diff options
Diffstat (limited to 'drivers/gpu/drm/xe/xe_bo.c')
-rw-r--r-- | drivers/gpu/drm/xe/xe_bo.c | 431 |
1 files changed, 403 insertions, 28 deletions
diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c index 3f5391d416d4..64f9c936eea0 100644 --- a/drivers/gpu/drm/xe/xe_bo.c +++ b/drivers/gpu/drm/xe/xe_bo.c @@ -6,15 +6,19 @@ #include "xe_bo.h" #include <linux/dma-buf.h> +#include <linux/nospec.h> #include <drm/drm_drv.h> #include <drm/drm_gem_ttm_helper.h> #include <drm/drm_managed.h> +#include <drm/ttm/ttm_backup.h> #include <drm/ttm/ttm_device.h> #include <drm/ttm/ttm_placement.h> #include <drm/ttm/ttm_tt.h> #include <uapi/drm/xe_drm.h> +#include <kunit/static_stub.h> + #include "xe_device.h" #include "xe_dma_buf.h" #include "xe_drm_client.h" @@ -24,7 +28,9 @@ #include "xe_migrate.h" #include "xe_pm.h" #include "xe_preempt_fence.h" +#include "xe_pxp.h" #include "xe_res_cursor.h" +#include "xe_shrinker.h" #include "xe_trace_bo.h" #include "xe_ttm_stolen_mgr.h" #include "xe_vm.h" @@ -124,6 +130,22 @@ bool xe_bo_is_stolen_devmem(struct xe_bo *bo) GRAPHICS_VERx100(xe_bo_device(bo)) >= 1270; } +/** + * xe_bo_is_vm_bound - check if BO has any mappings through VM_BIND + * @bo: The BO + * + * Check if a given bo is bound through VM_BIND. This requires the + * reservation lock for the BO to be held. + * + * Returns: boolean + */ +bool xe_bo_is_vm_bound(struct xe_bo *bo) +{ + xe_bo_assert_held(bo); + + return !list_empty(&bo->ttm.base.gpuva.list); +} + static bool xe_bo_is_user(struct xe_bo *bo) { return bo->flags & XE_BO_FLAG_USER; @@ -139,14 +161,17 @@ mem_type_to_migrate(struct xe_device *xe, u32 mem_type) return tile->migrate; } -static struct xe_mem_region *res_to_mem_region(struct ttm_resource *res) +static struct xe_vram_region *res_to_mem_region(struct ttm_resource *res) { struct xe_device *xe = ttm_to_xe_device(res->bo->bdev); struct ttm_resource_manager *mgr; + struct xe_ttm_vram_mgr *vram_mgr; xe_assert(xe, resource_is_vram(res)); mgr = ttm_manager_type(&xe->ttm, res->mem_type); - return to_xe_ttm_vram_mgr(mgr)->vram; + vram_mgr = to_xe_ttm_vram_mgr(mgr); + + return container_of(vram_mgr, struct xe_vram_region, ttm); } static void try_add_system(struct xe_device *xe, struct xe_bo *bo, @@ -175,12 +200,15 @@ static void add_vram(struct xe_device *xe, struct xe_bo *bo, struct ttm_place *places, u32 bo_flags, u32 mem_type, u32 *c) { struct ttm_place place = { .mem_type = mem_type }; - struct xe_mem_region *vram; + struct ttm_resource_manager *mgr = ttm_manager_type(&xe->ttm, mem_type); + struct xe_ttm_vram_mgr *vram_mgr = to_xe_ttm_vram_mgr(mgr); + + struct xe_vram_region *vram; u64 io_size; xe_assert(xe, *c < ARRAY_SIZE(bo->placements)); - vram = to_xe_ttm_vram_mgr(ttm_manager_type(&xe->ttm, mem_type))->vram; + vram = container_of(vram_mgr, struct xe_vram_region, ttm); xe_assert(xe, vram && vram->usable_size); io_size = vram->io_size; @@ -253,6 +281,8 @@ int xe_bo_placement_for_flags(struct xe_device *xe, struct xe_bo *bo, static void xe_evict_flags(struct ttm_buffer_object *tbo, struct ttm_placement *placement) { + struct xe_bo *bo; + if (!xe_bo_is_xe_bo(tbo)) { /* Don't handle scatter gather BOs */ if (tbo->type == ttm_bo_type_sg) { @@ -264,6 +294,12 @@ static void xe_evict_flags(struct ttm_buffer_object *tbo, return; } + bo = ttm_to_xe_bo(tbo); + if (bo->flags & XE_BO_FLAG_CPU_ADDR_MIRROR) { + *placement = sys_placement; + return; + } + /* * For xe, sg bos that are evicted to system just triggers a * rebind of the sg list upon subsequent validation to XE_PL_TT. @@ -281,9 +317,11 @@ static void xe_evict_flags(struct ttm_buffer_object *tbo, } } +/* struct xe_ttm_tt - Subclassed ttm_tt for xe */ struct xe_ttm_tt { struct ttm_tt ttm; - struct device *dev; + /** @xe - The xe device */ + struct xe_device *xe; struct sg_table sgt; struct sg_table *sg; /** @purgeable: Whether the content of the pages of @ttm is purgeable. */ @@ -296,7 +334,8 @@ static int xe_tt_map_sg(struct ttm_tt *tt) unsigned long num_pages = tt->num_pages; int ret; - XE_WARN_ON(tt->page_flags & TTM_TT_FLAG_EXTERNAL); + XE_WARN_ON((tt->page_flags & TTM_TT_FLAG_EXTERNAL) && + !(tt->page_flags & TTM_TT_FLAG_EXTERNAL_MAPPABLE)); if (xe_tt->sg) return 0; @@ -304,13 +343,13 @@ static int xe_tt_map_sg(struct ttm_tt *tt) ret = sg_alloc_table_from_pages_segment(&xe_tt->sgt, tt->pages, num_pages, 0, (u64)num_pages << PAGE_SHIFT, - xe_sg_segment_size(xe_tt->dev), + xe_sg_segment_size(xe_tt->xe->drm.dev), GFP_KERNEL); if (ret) return ret; xe_tt->sg = &xe_tt->sgt; - ret = dma_map_sgtable(xe_tt->dev, xe_tt->sg, DMA_BIDIRECTIONAL, + ret = dma_map_sgtable(xe_tt->xe->drm.dev, xe_tt->sg, DMA_BIDIRECTIONAL, DMA_ATTR_SKIP_CPU_SYNC); if (ret) { sg_free_table(xe_tt->sg); @@ -326,7 +365,7 @@ static void xe_tt_unmap_sg(struct ttm_tt *tt) struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm); if (xe_tt->sg) { - dma_unmap_sgtable(xe_tt->dev, xe_tt->sg, + dma_unmap_sgtable(xe_tt->xe->drm.dev, xe_tt->sg, DMA_BIDIRECTIONAL, 0); sg_free_table(xe_tt->sg); xe_tt->sg = NULL; @@ -341,21 +380,47 @@ struct sg_table *xe_bo_sg(struct xe_bo *bo) return xe_tt->sg; } +/* + * Account ttm pages against the device shrinker's shrinkable and + * purgeable counts. + */ +static void xe_ttm_tt_account_add(struct ttm_tt *tt) +{ + struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm); + + if (xe_tt->purgeable) + xe_shrinker_mod_pages(xe_tt->xe->mem.shrinker, 0, tt->num_pages); + else + xe_shrinker_mod_pages(xe_tt->xe->mem.shrinker, tt->num_pages, 0); +} + +static void xe_ttm_tt_account_subtract(struct ttm_tt *tt) +{ + struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm); + + if (xe_tt->purgeable) + xe_shrinker_mod_pages(xe_tt->xe->mem.shrinker, 0, -(long)tt->num_pages); + else + xe_shrinker_mod_pages(xe_tt->xe->mem.shrinker, -(long)tt->num_pages, 0); +} + static struct ttm_tt *xe_ttm_tt_create(struct ttm_buffer_object *ttm_bo, u32 page_flags) { struct xe_bo *bo = ttm_to_xe_bo(ttm_bo); struct xe_device *xe = xe_bo_device(bo); - struct xe_ttm_tt *tt; + struct xe_ttm_tt *xe_tt; + struct ttm_tt *tt; unsigned long extra_pages; enum ttm_caching caching = ttm_cached; int err; - tt = kzalloc(sizeof(*tt), GFP_KERNEL); - if (!tt) + xe_tt = kzalloc(sizeof(*xe_tt), GFP_KERNEL); + if (!xe_tt) return NULL; - tt->dev = xe->drm.dev; + tt = &xe_tt->ttm; + xe_tt->xe = xe; extra_pages = 0; if (xe_bo_needs_ccs_pages(bo)) @@ -401,42 +466,66 @@ static struct ttm_tt *xe_ttm_tt_create(struct ttm_buffer_object *ttm_bo, caching = ttm_uncached; } - err = ttm_tt_init(&tt->ttm, &bo->ttm, page_flags, caching, extra_pages); + if (ttm_bo->type != ttm_bo_type_sg) + page_flags |= TTM_TT_FLAG_EXTERNAL | TTM_TT_FLAG_EXTERNAL_MAPPABLE; + + err = ttm_tt_init(tt, &bo->ttm, page_flags, caching, extra_pages); if (err) { - kfree(tt); + kfree(xe_tt); return NULL; } - return &tt->ttm; + if (ttm_bo->type != ttm_bo_type_sg) { + err = ttm_tt_setup_backup(tt); + if (err) { + ttm_tt_fini(tt); + kfree(xe_tt); + return NULL; + } + } + + return tt; } static int xe_ttm_tt_populate(struct ttm_device *ttm_dev, struct ttm_tt *tt, struct ttm_operation_ctx *ctx) { + struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm); int err; /* * dma-bufs are not populated with pages, and the dma- * addresses are set up when moved to XE_PL_TT. */ - if (tt->page_flags & TTM_TT_FLAG_EXTERNAL) + if ((tt->page_flags & TTM_TT_FLAG_EXTERNAL) && + !(tt->page_flags & TTM_TT_FLAG_EXTERNAL_MAPPABLE)) return 0; - err = ttm_pool_alloc(&ttm_dev->pool, tt, ctx); + if (ttm_tt_is_backed_up(tt) && !xe_tt->purgeable) { + err = ttm_tt_restore(ttm_dev, tt, ctx); + } else { + ttm_tt_clear_backed_up(tt); + err = ttm_pool_alloc(&ttm_dev->pool, tt, ctx); + } if (err) return err; - return err; + xe_tt->purgeable = false; + xe_ttm_tt_account_add(tt); + + return 0; } static void xe_ttm_tt_unpopulate(struct ttm_device *ttm_dev, struct ttm_tt *tt) { - if (tt->page_flags & TTM_TT_FLAG_EXTERNAL) + if ((tt->page_flags & TTM_TT_FLAG_EXTERNAL) && + !(tt->page_flags & TTM_TT_FLAG_EXTERNAL_MAPPABLE)) return; xe_tt_unmap_sg(tt); - return ttm_pool_free(&ttm_dev->pool, tt); + ttm_pool_free(&ttm_dev->pool, tt); + xe_ttm_tt_account_subtract(tt); } static void xe_ttm_tt_destroy(struct ttm_device *ttm_dev, struct ttm_tt *tt) @@ -464,7 +553,7 @@ static int xe_ttm_io_mem_reserve(struct ttm_device *bdev, return 0; case XE_PL_VRAM0: case XE_PL_VRAM1: { - struct xe_mem_region *vram = res_to_mem_region(mem); + struct xe_vram_region *vram = res_to_mem_region(mem); if (!xe_ttm_resource_visible(mem)) return -EINVAL; @@ -708,11 +797,40 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict, goto out; } + if (!move_lacks_source && (bo->flags & XE_BO_FLAG_CPU_ADDR_MIRROR) && + new_mem->mem_type == XE_PL_SYSTEM) { + ret = xe_svm_bo_evict(bo); + if (!ret) { + drm_dbg(&xe->drm, "Evict system allocator BO success\n"); + ttm_bo_move_null(ttm_bo, new_mem); + } else { + drm_dbg(&xe->drm, "Evict system allocator BO failed=%pe\n", + ERR_PTR(ret)); + } + + goto out; + } + if (old_mem_type == XE_PL_SYSTEM && new_mem->mem_type == XE_PL_TT && !handle_system_ccs) { ttm_bo_move_null(ttm_bo, new_mem); goto out; } + /* Reject BO eviction if BO is bound to current VM. */ + if (evict && ctx->resv) { + struct drm_gpuvm_bo *vm_bo; + + drm_gem_for_each_gpuvm_bo(vm_bo, &bo->ttm.base) { + struct xe_vm *vm = gpuvm_to_vm(vm_bo->vm); + + if (xe_vm_resv(vm) == ctx->resv && + xe_vm_in_preempt_fence_mode(vm)) { + ret = -EBUSY; + goto out; + } + } + } + /* * Failed multi-hop where the old_mem is still marked as * TTM_PL_FLAG_TEMPORARY, should just be a dummy move. @@ -796,7 +914,7 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict, /* Create a new VMAP once kernel BO back in VRAM */ if (!ret && resource_is_vram(new_mem)) { - struct xe_mem_region *vram = res_to_mem_region(new_mem); + struct xe_vram_region *vram = res_to_mem_region(new_mem); void __iomem *new_addr = vram->mapping + (new_mem->start << PAGE_SHIFT); @@ -871,6 +989,111 @@ out: return ret; } +static long xe_bo_shrink_purge(struct ttm_operation_ctx *ctx, + struct ttm_buffer_object *bo, + unsigned long *scanned) +{ + long lret; + + /* Fake move to system, without copying data. */ + if (bo->resource->mem_type != XE_PL_SYSTEM) { + struct ttm_resource *new_resource; + + lret = ttm_bo_wait_ctx(bo, ctx); + if (lret) + return lret; + + lret = ttm_bo_mem_space(bo, &sys_placement, &new_resource, ctx); + if (lret) + return lret; + + xe_tt_unmap_sg(bo->ttm); + ttm_bo_move_null(bo, new_resource); + } + + *scanned += bo->ttm->num_pages; + lret = ttm_bo_shrink(ctx, bo, (struct ttm_bo_shrink_flags) + {.purge = true, + .writeback = false, + .allow_move = false}); + + if (lret > 0) + xe_ttm_tt_account_subtract(bo->ttm); + + return lret; +} + +/** + * xe_bo_shrink() - Try to shrink an xe bo. + * @ctx: The struct ttm_operation_ctx used for shrinking. + * @bo: The TTM buffer object whose pages to shrink. + * @flags: Flags governing the shrink behaviour. + * @scanned: Pointer to a counter of the number of pages + * attempted to shrink. + * + * Try to shrink- or purge a bo, and if it succeeds, unmap dma. + * Note that we need to be able to handle also non xe bos + * (ghost bos), but only if the struct ttm_tt is embedded in + * a struct xe_ttm_tt. When the function attempts to shrink + * the pages of a buffer object, The value pointed to by @scanned + * is updated. + * + * Return: The number of pages shrunken or purged, or negative error + * code on failure. + */ +long xe_bo_shrink(struct ttm_operation_ctx *ctx, struct ttm_buffer_object *bo, + const struct xe_bo_shrink_flags flags, + unsigned long *scanned) +{ + struct ttm_tt *tt = bo->ttm; + struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm); + struct ttm_place place = {.mem_type = bo->resource->mem_type}; + struct xe_bo *xe_bo = ttm_to_xe_bo(bo); + struct xe_device *xe = xe_tt->xe; + bool needs_rpm; + long lret = 0L; + + if (!(tt->page_flags & TTM_TT_FLAG_EXTERNAL_MAPPABLE) || + (flags.purge && !xe_tt->purgeable)) + return -EBUSY; + + if (!ttm_bo_eviction_valuable(bo, &place)) + return -EBUSY; + + if (!xe_bo_is_xe_bo(bo) || !xe_bo_get_unless_zero(xe_bo)) + return xe_bo_shrink_purge(ctx, bo, scanned); + + if (xe_tt->purgeable) { + if (bo->resource->mem_type != XE_PL_SYSTEM) + lret = xe_bo_move_notify(xe_bo, ctx); + if (!lret) + lret = xe_bo_shrink_purge(ctx, bo, scanned); + goto out_unref; + } + + /* System CCS needs gpu copy when moving PL_TT -> PL_SYSTEM */ + needs_rpm = (!IS_DGFX(xe) && bo->resource->mem_type != XE_PL_SYSTEM && + xe_bo_needs_ccs_pages(xe_bo)); + if (needs_rpm && !xe_pm_runtime_get_if_active(xe)) + goto out_unref; + + *scanned += tt->num_pages; + lret = ttm_bo_shrink(ctx, bo, (struct ttm_bo_shrink_flags) + {.purge = false, + .writeback = flags.writeback, + .allow_move = true}); + if (needs_rpm) + xe_pm_runtime_put(xe); + + if (lret > 0) + xe_ttm_tt_account_subtract(tt); + +out_unref: + xe_bo_put(xe_bo); + + return lret; +} + /** * xe_bo_evict_pinned() - Evict a pinned VRAM object to system memory * @bo: The buffer object to move. @@ -1006,7 +1229,7 @@ static unsigned long xe_ttm_io_mem_pfn(struct ttm_buffer_object *ttm_bo, { struct xe_bo *bo = ttm_to_xe_bo(ttm_bo); struct xe_res_cursor cursor; - struct xe_mem_region *vram; + struct xe_vram_region *vram; if (ttm_bo->resource->mem_type == XE_PL_STOLEN) return xe_ttm_stolen_io_offset(bo, page_offset << PAGE_SHIFT) >> PAGE_SHIFT; @@ -1146,7 +1369,7 @@ static int xe_ttm_access_memory(struct ttm_buffer_object *ttm_bo, struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev); struct iosys_map vmap; struct xe_res_cursor cursor; - struct xe_mem_region *vram; + struct xe_vram_region *vram; int bytes_left = len; xe_bo_assert_held(bo); @@ -1644,6 +1867,7 @@ __xe_bo_create_locked(struct xe_device *xe, } } + trace_xe_bo_create(bo); return bo; err_unlock_put_bo: @@ -1781,6 +2005,8 @@ struct xe_bo *xe_managed_bo_create_pin_map(struct xe_device *xe, struct xe_tile struct xe_bo *bo; int ret; + KUNIT_STATIC_STUB_REDIRECT(xe_managed_bo_create_pin_map, xe, tile, size, flags); + bo = xe_bo_create_pin_map(xe, tile, NULL, size, ttm_bo_type_kernel, flags); if (IS_ERR(bo)) return bo; @@ -1885,6 +2111,8 @@ int xe_bo_pin_external(struct xe_bo *bo) } ttm_bo_pin(&bo->ttm); + if (bo->ttm.ttm && ttm_tt_is_populated(bo->ttm.ttm)) + xe_ttm_tt_account_subtract(bo->ttm.ttm); /* * FIXME: If we always use the reserve / unreserve functions for locking @@ -1944,6 +2172,8 @@ int xe_bo_pin(struct xe_bo *bo) } ttm_bo_pin(&bo->ttm); + if (bo->ttm.ttm && ttm_tt_is_populated(bo->ttm.ttm)) + xe_ttm_tt_account_subtract(bo->ttm.ttm); /* * FIXME: If we always use the reserve / unreserve functions for locking @@ -1978,6 +2208,8 @@ void xe_bo_unpin_external(struct xe_bo *bo) spin_unlock(&xe->pinned.lock); ttm_bo_unpin(&bo->ttm); + if (bo->ttm.ttm && ttm_tt_is_populated(bo->ttm.ttm)) + xe_ttm_tt_account_add(bo->ttm.ttm); /* * FIXME: If we always use the reserve / unreserve functions for locking @@ -2001,6 +2233,8 @@ void xe_bo_unpin(struct xe_bo *bo) spin_unlock(&xe->pinned.lock); } ttm_bo_unpin(&bo->ttm); + if (bo->ttm.ttm && ttm_tt_is_populated(bo->ttm.ttm)) + xe_ttm_tt_account_add(bo->ttm.ttm); } /** @@ -2135,6 +2369,93 @@ void xe_bo_vunmap(struct xe_bo *bo) __xe_bo_vunmap(bo); } +static int gem_create_set_pxp_type(struct xe_device *xe, struct xe_bo *bo, u64 value) +{ + if (value == DRM_XE_PXP_TYPE_NONE) + return 0; + + /* we only support DRM_XE_PXP_TYPE_HWDRM for now */ + if (XE_IOCTL_DBG(xe, value != DRM_XE_PXP_TYPE_HWDRM)) + return -EINVAL; + + return xe_pxp_key_assign(xe->pxp, bo); +} + +typedef int (*xe_gem_create_set_property_fn)(struct xe_device *xe, + struct xe_bo *bo, + u64 value); + +static const xe_gem_create_set_property_fn gem_create_set_property_funcs[] = { + [DRM_XE_GEM_CREATE_EXTENSION_SET_PROPERTY] = gem_create_set_pxp_type, +}; + +static int gem_create_user_ext_set_property(struct xe_device *xe, + struct xe_bo *bo, + u64 extension) +{ + u64 __user *address = u64_to_user_ptr(extension); + struct drm_xe_ext_set_property ext; + int err; + u32 idx; + + err = __copy_from_user(&ext, address, sizeof(ext)); + if (XE_IOCTL_DBG(xe, err)) + return -EFAULT; + + if (XE_IOCTL_DBG(xe, ext.property >= + ARRAY_SIZE(gem_create_set_property_funcs)) || + XE_IOCTL_DBG(xe, ext.pad) || + XE_IOCTL_DBG(xe, ext.property != DRM_XE_GEM_CREATE_EXTENSION_SET_PROPERTY)) + return -EINVAL; + + idx = array_index_nospec(ext.property, ARRAY_SIZE(gem_create_set_property_funcs)); + if (!gem_create_set_property_funcs[idx]) + return -EINVAL; + + return gem_create_set_property_funcs[idx](xe, bo, ext.value); +} + +typedef int (*xe_gem_create_user_extension_fn)(struct xe_device *xe, + struct xe_bo *bo, + u64 extension); + +static const xe_gem_create_user_extension_fn gem_create_user_extension_funcs[] = { + [DRM_XE_GEM_CREATE_EXTENSION_SET_PROPERTY] = gem_create_user_ext_set_property, +}; + +#define MAX_USER_EXTENSIONS 16 +static int gem_create_user_extensions(struct xe_device *xe, struct xe_bo *bo, + u64 extensions, int ext_number) +{ + u64 __user *address = u64_to_user_ptr(extensions); + struct drm_xe_user_extension ext; + int err; + u32 idx; + + if (XE_IOCTL_DBG(xe, ext_number >= MAX_USER_EXTENSIONS)) + return -E2BIG; + + err = __copy_from_user(&ext, address, sizeof(ext)); + if (XE_IOCTL_DBG(xe, err)) + return -EFAULT; + + if (XE_IOCTL_DBG(xe, ext.pad) || + XE_IOCTL_DBG(xe, ext.name >= ARRAY_SIZE(gem_create_user_extension_funcs))) + return -EINVAL; + + idx = array_index_nospec(ext.name, + ARRAY_SIZE(gem_create_user_extension_funcs)); + err = gem_create_user_extension_funcs[idx](xe, bo, extensions); + if (XE_IOCTL_DBG(xe, err)) + return err; + + if (ext.next_extension) + return gem_create_user_extensions(xe, bo, ext.next_extension, + ++ext_number); + + return 0; +} + int xe_gem_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { @@ -2142,13 +2463,13 @@ int xe_gem_create_ioctl(struct drm_device *dev, void *data, struct xe_file *xef = to_xe_file(file); struct drm_xe_gem_create *args = data; struct xe_vm *vm = NULL; + ktime_t end = 0; struct xe_bo *bo; unsigned int bo_flags; u32 handle; int err; - if (XE_IOCTL_DBG(xe, args->extensions) || - XE_IOCTL_DBG(xe, args->pad[0] || args->pad[1] || args->pad[2]) || + if (XE_IOCTL_DBG(xe, args->pad[0] || args->pad[1] || args->pad[2]) || XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1])) return -EINVAL; @@ -2214,6 +2535,10 @@ int xe_gem_create_ioctl(struct drm_device *dev, void *data, vm = xe_vm_lookup(xef, args->vm_id); if (XE_IOCTL_DBG(xe, !vm)) return -ENOENT; + } + +retry: + if (vm) { err = xe_vm_lock(vm, true); if (err) goto out_vm; @@ -2227,9 +2552,17 @@ int xe_gem_create_ioctl(struct drm_device *dev, void *data, if (IS_ERR(bo)) { err = PTR_ERR(bo); + if (xe_vm_validate_should_retry(NULL, err, &end)) + goto retry; goto out_vm; } + if (args->extensions) { + err = gem_create_user_extensions(xe, bo, args->extensions, 0); + if (err) + goto out_bulk; + } + err = drm_gem_handle_create(file, &bo->ttm.base, &handle); if (err) goto out_bulk; @@ -2263,9 +2596,26 @@ int xe_gem_mmap_offset_ioctl(struct drm_device *dev, void *data, XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1])) return -EINVAL; - if (XE_IOCTL_DBG(xe, args->flags)) + if (XE_IOCTL_DBG(xe, args->flags & + ~DRM_XE_MMAP_OFFSET_FLAG_PCI_BARRIER)) return -EINVAL; + if (args->flags & DRM_XE_MMAP_OFFSET_FLAG_PCI_BARRIER) { + if (XE_IOCTL_DBG(xe, !IS_DGFX(xe))) + return -EINVAL; + + if (XE_IOCTL_DBG(xe, args->handle)) + return -EINVAL; + + if (XE_IOCTL_DBG(xe, PAGE_SIZE > SZ_4K)) + return -EINVAL; + + BUILD_BUG_ON(((XE_PCI_BARRIER_MMAP_OFFSET >> XE_PTE_SHIFT) + + SZ_4K) >= DRM_FILE_PAGE_OFFSET_START); + args->offset = XE_PCI_BARRIER_MMAP_OFFSET; + return 0; + } + gem_obj = drm_gem_object_lookup(file, args->handle); if (XE_IOCTL_DBG(xe, !gem_obj)) return -ENOENT; @@ -2500,6 +2850,31 @@ void xe_bo_put_commit(struct llist_head *deferred) drm_gem_object_free(&bo->ttm.base.refcount); } +static void xe_bo_dev_work_func(struct work_struct *work) +{ + struct xe_bo_dev *bo_dev = container_of(work, typeof(*bo_dev), async_free); + + xe_bo_put_commit(&bo_dev->async_list); +} + +/** + * xe_bo_dev_init() - Initialize BO dev to manage async BO freeing + * @bo_dev: The BO dev structure + */ +void xe_bo_dev_init(struct xe_bo_dev *bo_dev) +{ + INIT_WORK(&bo_dev->async_free, xe_bo_dev_work_func); +} + +/** + * xe_bo_dev_fini() - Finalize BO dev managing async BO freeing + * @bo_dev: The BO dev structure + */ +void xe_bo_dev_fini(struct xe_bo_dev *bo_dev) +{ + flush_work(&bo_dev->async_free); +} + void xe_bo_put(struct xe_bo *bo) { struct xe_tile *tile; |