diff options
Diffstat (limited to 'drivers/vdpa/vdpa_user')
-rw-r--r-- | drivers/vdpa/vdpa_user/iova_domain.c | 134 | ||||
-rw-r--r-- | drivers/vdpa/vdpa_user/iova_domain.h | 7 | ||||
-rw-r--r-- | drivers/vdpa/vdpa_user/vduse_dev.c | 79 |
3 files changed, 137 insertions, 83 deletions
diff --git a/drivers/vdpa/vdpa_user/iova_domain.c b/drivers/vdpa/vdpa_user/iova_domain.c index 58116f89d8da..4352b5cf74f0 100644 --- a/drivers/vdpa/vdpa_user/iova_domain.c +++ b/drivers/vdpa/vdpa_user/iova_domain.c @@ -103,19 +103,38 @@ void vduse_domain_clear_map(struct vduse_iova_domain *domain, static int vduse_domain_map_bounce_page(struct vduse_iova_domain *domain, u64 iova, u64 size, u64 paddr) { - struct vduse_bounce_map *map; + struct vduse_bounce_map *map, *head_map; + struct page *tmp_page; u64 last = iova + size - 1; while (iova <= last) { - map = &domain->bounce_maps[iova >> PAGE_SHIFT]; + /* + * When PAGE_SIZE is larger than 4KB, multiple adjacent bounce_maps will + * point to the same memory page of PAGE_SIZE. Since bounce_maps originate + * from IO requests, we may not be able to guarantee that the orig_phys + * values of all IO requests within the same 64KB memory page are contiguous. + * Therefore, we need to store them separately. + * + * Bounce pages are allocated on demand. As a result, it may occur that + * multiple bounce pages corresponding to the same 64KB memory page attempt + * to allocate memory simultaneously, so we use cmpxchg to handle this + * concurrency. + */ + map = &domain->bounce_maps[iova >> BOUNCE_MAP_SHIFT]; if (!map->bounce_page) { - map->bounce_page = alloc_page(GFP_ATOMIC); - if (!map->bounce_page) - return -ENOMEM; + head_map = &domain->bounce_maps[(iova & PAGE_MASK) >> BOUNCE_MAP_SHIFT]; + if (!head_map->bounce_page) { + tmp_page = alloc_page(GFP_ATOMIC); + if (!tmp_page) + return -ENOMEM; + if (cmpxchg(&head_map->bounce_page, NULL, tmp_page)) + __free_page(tmp_page); + } + map->bounce_page = head_map->bounce_page; } map->orig_phys = paddr; - paddr += PAGE_SIZE; - iova += PAGE_SIZE; + paddr += BOUNCE_MAP_SIZE; + iova += BOUNCE_MAP_SIZE; } return 0; } @@ -127,12 +146,17 @@ static void vduse_domain_unmap_bounce_page(struct vduse_iova_domain *domain, u64 last = iova + size - 1; while (iova <= last) { - map = &domain->bounce_maps[iova >> PAGE_SHIFT]; + map = &domain->bounce_maps[iova >> BOUNCE_MAP_SHIFT]; map->orig_phys = INVALID_PHYS_ADDR; - iova += PAGE_SIZE; + iova += BOUNCE_MAP_SIZE; } } +static unsigned int offset_in_bounce_page(dma_addr_t addr) +{ + return (addr & ~BOUNCE_MAP_MASK); +} + static void do_bounce(phys_addr_t orig, void *addr, size_t size, enum dma_data_direction dir) { @@ -163,7 +187,7 @@ static void vduse_domain_bounce(struct vduse_iova_domain *domain, { struct vduse_bounce_map *map; struct page *page; - unsigned int offset; + unsigned int offset, head_offset; void *addr; size_t sz; @@ -171,9 +195,10 @@ static void vduse_domain_bounce(struct vduse_iova_domain *domain, return; while (size) { - map = &domain->bounce_maps[iova >> PAGE_SHIFT]; - offset = offset_in_page(iova); - sz = min_t(size_t, PAGE_SIZE - offset, size); + map = &domain->bounce_maps[iova >> BOUNCE_MAP_SHIFT]; + head_offset = offset_in_page(iova); + offset = offset_in_bounce_page(iova); + sz = min_t(size_t, BOUNCE_MAP_SIZE - offset, size); if (WARN_ON(!map->bounce_page || map->orig_phys == INVALID_PHYS_ADDR)) @@ -183,7 +208,7 @@ static void vduse_domain_bounce(struct vduse_iova_domain *domain, map->user_bounce_page : map->bounce_page; addr = kmap_local_page(page); - do_bounce(map->orig_phys + offset, addr + offset, sz, dir); + do_bounce(map->orig_phys + offset, addr + head_offset, sz, dir); kunmap_local(addr); size -= sz; iova += sz; @@ -218,7 +243,7 @@ vduse_domain_get_bounce_page(struct vduse_iova_domain *domain, u64 iova) struct page *page = NULL; read_lock(&domain->bounce_lock); - map = &domain->bounce_maps[iova >> PAGE_SHIFT]; + map = &domain->bounce_maps[iova >> BOUNCE_MAP_SHIFT]; if (domain->user_bounce_pages || !map->bounce_page) goto out; @@ -236,7 +261,7 @@ vduse_domain_free_kernel_bounce_pages(struct vduse_iova_domain *domain) struct vduse_bounce_map *map; unsigned long pfn, bounce_pfns; - bounce_pfns = domain->bounce_size >> PAGE_SHIFT; + bounce_pfns = domain->bounce_size >> BOUNCE_MAP_SHIFT; for (pfn = 0; pfn < bounce_pfns; pfn++) { map = &domain->bounce_maps[pfn]; @@ -246,7 +271,8 @@ vduse_domain_free_kernel_bounce_pages(struct vduse_iova_domain *domain) if (!map->bounce_page) continue; - __free_page(map->bounce_page); + if (!((pfn << BOUNCE_MAP_SHIFT) & ~PAGE_MASK)) + __free_page(map->bounce_page); map->bounce_page = NULL; } } @@ -254,8 +280,12 @@ vduse_domain_free_kernel_bounce_pages(struct vduse_iova_domain *domain) int vduse_domain_add_user_bounce_pages(struct vduse_iova_domain *domain, struct page **pages, int count) { - struct vduse_bounce_map *map; - int i, ret; + struct vduse_bounce_map *map, *head_map; + int i, j, ret; + int inner_pages = PAGE_SIZE / BOUNCE_MAP_SIZE; + int bounce_pfns = domain->bounce_size >> BOUNCE_MAP_SHIFT; + struct page *head_page = NULL; + bool need_copy; /* Now we don't support partial mapping */ if (count != (domain->bounce_size >> PAGE_SHIFT)) @@ -267,16 +297,23 @@ int vduse_domain_add_user_bounce_pages(struct vduse_iova_domain *domain, goto out; for (i = 0; i < count; i++) { - map = &domain->bounce_maps[i]; - if (map->bounce_page) { + need_copy = false; + head_map = &domain->bounce_maps[(i * inner_pages)]; + head_page = head_map->bounce_page; + for (j = 0; j < inner_pages; j++) { + if ((i * inner_pages + j) >= bounce_pfns) + break; + map = &domain->bounce_maps[(i * inner_pages + j)]; /* Copy kernel page to user page if it's in use */ - if (map->orig_phys != INVALID_PHYS_ADDR) - memcpy_to_page(pages[i], 0, - page_address(map->bounce_page), - PAGE_SIZE); + if ((head_page) && (map->orig_phys != INVALID_PHYS_ADDR)) + need_copy = true; + map->user_bounce_page = pages[i]; } - map->user_bounce_page = pages[i]; get_page(pages[i]); + if ((head_page) && (need_copy)) + memcpy_to_page(pages[i], 0, + page_address(head_page), + PAGE_SIZE); } domain->user_bounce_pages = true; ret = 0; @@ -288,8 +325,12 @@ out: void vduse_domain_remove_user_bounce_pages(struct vduse_iova_domain *domain) { - struct vduse_bounce_map *map; - unsigned long i, count; + struct vduse_bounce_map *map, *head_map; + unsigned long i, j, count; + int inner_pages = PAGE_SIZE / BOUNCE_MAP_SIZE; + int bounce_pfns = domain->bounce_size >> BOUNCE_MAP_SHIFT; + struct page *head_page = NULL; + bool need_copy; write_lock(&domain->bounce_lock); if (!domain->user_bounce_pages) @@ -297,20 +338,27 @@ void vduse_domain_remove_user_bounce_pages(struct vduse_iova_domain *domain) count = domain->bounce_size >> PAGE_SHIFT; for (i = 0; i < count; i++) { - struct page *page = NULL; - - map = &domain->bounce_maps[i]; - if (WARN_ON(!map->user_bounce_page)) + need_copy = false; + head_map = &domain->bounce_maps[(i * inner_pages)]; + if (WARN_ON(!head_map->user_bounce_page)) continue; - - /* Copy user page to kernel page if it's in use */ - if (map->orig_phys != INVALID_PHYS_ADDR) { - page = map->bounce_page; - memcpy_from_page(page_address(page), - map->user_bounce_page, 0, PAGE_SIZE); + head_page = head_map->user_bounce_page; + + for (j = 0; j < inner_pages; j++) { + if ((i * inner_pages + j) >= bounce_pfns) + break; + map = &domain->bounce_maps[(i * inner_pages + j)]; + if (WARN_ON(!map->user_bounce_page)) + continue; + /* Copy user page to kernel page if it's in use */ + if ((map->orig_phys != INVALID_PHYS_ADDR) && (head_map->bounce_page)) + need_copy = true; + map->user_bounce_page = NULL; } - put_page(map->user_bounce_page); - map->user_bounce_page = NULL; + if (need_copy) + memcpy_from_page(page_address(head_map->bounce_page), + head_page, 0, PAGE_SIZE); + put_page(head_page); } domain->user_bounce_pages = false; out: @@ -447,7 +495,7 @@ void vduse_domain_unmap_page(struct vduse_iova_domain *domain, void *vduse_domain_alloc_coherent(struct vduse_iova_domain *domain, size_t size, dma_addr_t *dma_addr, - gfp_t flag, unsigned long attrs) + gfp_t flag) { struct iova_domain *iovad = &domain->consistent_iovad; unsigned long limit = domain->iova_limit; @@ -581,7 +629,7 @@ vduse_domain_create(unsigned long iova_limit, size_t bounce_size) unsigned long pfn, bounce_pfns; int ret; - bounce_pfns = PAGE_ALIGN(bounce_size) >> PAGE_SHIFT; + bounce_pfns = PAGE_ALIGN(bounce_size) >> BOUNCE_MAP_SHIFT; if (iova_limit <= bounce_size) return NULL; @@ -613,7 +661,7 @@ vduse_domain_create(unsigned long iova_limit, size_t bounce_size) rwlock_init(&domain->bounce_lock); spin_lock_init(&domain->iotlb_lock); init_iova_domain(&domain->stream_iovad, - PAGE_SIZE, IOVA_START_PFN); + BOUNCE_MAP_SIZE, IOVA_START_PFN); ret = iova_domain_init_rcaches(&domain->stream_iovad); if (ret) goto err_iovad_stream; diff --git a/drivers/vdpa/vdpa_user/iova_domain.h b/drivers/vdpa/vdpa_user/iova_domain.h index 7f3f0928ec78..775cad5238f3 100644 --- a/drivers/vdpa/vdpa_user/iova_domain.h +++ b/drivers/vdpa/vdpa_user/iova_domain.h @@ -19,6 +19,11 @@ #define INVALID_PHYS_ADDR (~(phys_addr_t)0) +#define BOUNCE_MAP_SHIFT 12 +#define BOUNCE_MAP_SIZE (1 << BOUNCE_MAP_SHIFT) +#define BOUNCE_MAP_MASK (~(BOUNCE_MAP_SIZE - 1)) +#define BOUNCE_MAP_ALIGN(addr) (((addr) + BOUNCE_MAP_SIZE - 1) & ~(BOUNCE_MAP_SIZE - 1)) + struct vduse_bounce_map { struct page *bounce_page; struct page *user_bounce_page; @@ -64,7 +69,7 @@ void vduse_domain_unmap_page(struct vduse_iova_domain *domain, void *vduse_domain_alloc_coherent(struct vduse_iova_domain *domain, size_t size, dma_addr_t *dma_addr, - gfp_t flag, unsigned long attrs); + gfp_t flag); void vduse_domain_free_coherent(struct vduse_iova_domain *domain, size_t size, void *vaddr, dma_addr_t dma_addr, diff --git a/drivers/vdpa/vdpa_user/vduse_dev.c b/drivers/vdpa/vdpa_user/vduse_dev.c index 04620bb77203..e7bced0b5542 100644 --- a/drivers/vdpa/vdpa_user/vduse_dev.c +++ b/drivers/vdpa/vdpa_user/vduse_dev.c @@ -814,59 +814,53 @@ static const struct vdpa_config_ops vduse_vdpa_config_ops = { .free = vduse_vdpa_free, }; -static void vduse_dev_sync_single_for_device(struct device *dev, +static void vduse_dev_sync_single_for_device(union virtio_map token, dma_addr_t dma_addr, size_t size, enum dma_data_direction dir) { - struct vduse_dev *vdev = dev_to_vduse(dev); - struct vduse_iova_domain *domain = vdev->domain; + struct vduse_iova_domain *domain = token.iova_domain; vduse_domain_sync_single_for_device(domain, dma_addr, size, dir); } -static void vduse_dev_sync_single_for_cpu(struct device *dev, +static void vduse_dev_sync_single_for_cpu(union virtio_map token, dma_addr_t dma_addr, size_t size, enum dma_data_direction dir) { - struct vduse_dev *vdev = dev_to_vduse(dev); - struct vduse_iova_domain *domain = vdev->domain; + struct vduse_iova_domain *domain = token.iova_domain; vduse_domain_sync_single_for_cpu(domain, dma_addr, size, dir); } -static dma_addr_t vduse_dev_map_page(struct device *dev, struct page *page, +static dma_addr_t vduse_dev_map_page(union virtio_map token, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, unsigned long attrs) { - struct vduse_dev *vdev = dev_to_vduse(dev); - struct vduse_iova_domain *domain = vdev->domain; + struct vduse_iova_domain *domain = token.iova_domain; return vduse_domain_map_page(domain, page, offset, size, dir, attrs); } -static void vduse_dev_unmap_page(struct device *dev, dma_addr_t dma_addr, - size_t size, enum dma_data_direction dir, - unsigned long attrs) +static void vduse_dev_unmap_page(union virtio_map token, dma_addr_t dma_addr, + size_t size, enum dma_data_direction dir, + unsigned long attrs) { - struct vduse_dev *vdev = dev_to_vduse(dev); - struct vduse_iova_domain *domain = vdev->domain; + struct vduse_iova_domain *domain = token.iova_domain; return vduse_domain_unmap_page(domain, dma_addr, size, dir, attrs); } -static void *vduse_dev_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_addr, gfp_t flag, - unsigned long attrs) +static void *vduse_dev_alloc_coherent(union virtio_map token, size_t size, + dma_addr_t *dma_addr, gfp_t flag) { - struct vduse_dev *vdev = dev_to_vduse(dev); - struct vduse_iova_domain *domain = vdev->domain; + struct vduse_iova_domain *domain = token.iova_domain; unsigned long iova; void *addr; *dma_addr = DMA_MAPPING_ERROR; addr = vduse_domain_alloc_coherent(domain, size, - (dma_addr_t *)&iova, flag, attrs); + (dma_addr_t *)&iova, flag); if (!addr) return NULL; @@ -875,31 +869,45 @@ static void *vduse_dev_alloc_coherent(struct device *dev, size_t size, return addr; } -static void vduse_dev_free_coherent(struct device *dev, size_t size, - void *vaddr, dma_addr_t dma_addr, - unsigned long attrs) +static void vduse_dev_free_coherent(union virtio_map token, size_t size, + void *vaddr, dma_addr_t dma_addr, + unsigned long attrs) { - struct vduse_dev *vdev = dev_to_vduse(dev); - struct vduse_iova_domain *domain = vdev->domain; + struct vduse_iova_domain *domain = token.iova_domain; vduse_domain_free_coherent(domain, size, vaddr, dma_addr, attrs); } -static size_t vduse_dev_max_mapping_size(struct device *dev) +static bool vduse_dev_need_sync(union virtio_map token, dma_addr_t dma_addr) { - struct vduse_dev *vdev = dev_to_vduse(dev); - struct vduse_iova_domain *domain = vdev->domain; + struct vduse_iova_domain *domain = token.iova_domain; + + return dma_addr < domain->bounce_size; +} + +static int vduse_dev_mapping_error(union virtio_map token, dma_addr_t dma_addr) +{ + if (unlikely(dma_addr == DMA_MAPPING_ERROR)) + return -ENOMEM; + return 0; +} + +static size_t vduse_dev_max_mapping_size(union virtio_map token) +{ + struct vduse_iova_domain *domain = token.iova_domain; return domain->bounce_size; } -static const struct dma_map_ops vduse_dev_dma_ops = { +static const struct virtio_map_ops vduse_map_ops = { .sync_single_for_device = vduse_dev_sync_single_for_device, .sync_single_for_cpu = vduse_dev_sync_single_for_cpu, .map_page = vduse_dev_map_page, .unmap_page = vduse_dev_unmap_page, .alloc = vduse_dev_alloc_coherent, .free = vduse_dev_free_coherent, + .need_sync = vduse_dev_need_sync, + .mapping_error = vduse_dev_mapping_error, .max_mapping_size = vduse_dev_max_mapping_size, }; @@ -2003,26 +2011,18 @@ static struct vduse_mgmt_dev *vduse_mgmt; static int vduse_dev_init_vdpa(struct vduse_dev *dev, const char *name) { struct vduse_vdpa *vdev; - int ret; if (dev->vdev) return -EEXIST; vdev = vdpa_alloc_device(struct vduse_vdpa, vdpa, dev->dev, - &vduse_vdpa_config_ops, 1, 1, name, true); + &vduse_vdpa_config_ops, &vduse_map_ops, + 1, 1, name, true); if (IS_ERR(vdev)) return PTR_ERR(vdev); dev->vdev = vdev; vdev->dev = dev; - vdev->vdpa.dev.dma_mask = &vdev->vdpa.dev.coherent_dma_mask; - ret = dma_set_mask_and_coherent(&vdev->vdpa.dev, DMA_BIT_MASK(64)); - if (ret) { - put_device(&vdev->vdpa.dev); - return ret; - } - set_dma_ops(&vdev->vdpa.dev, &vduse_dev_dma_ops); - vdev->vdpa.dma_dev = &vdev->vdpa.dev; vdev->vdpa.mdev = &vduse_mgmt->mgmt_dev; return 0; @@ -2055,6 +2055,7 @@ static int vdpa_dev_add(struct vdpa_mgmt_dev *mdev, const char *name, return -ENOMEM; } + dev->vdev->vdpa.vmap.iova_domain = dev->domain; ret = _vdpa_register_device(&dev->vdev->vdpa, dev->vq_num); if (ret) { put_device(&dev->vdev->vdpa.dev); |