summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMatthew Brost <matthew.brost@intel.com>2025-03-05 17:26:36 -0800
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2025-05-29 11:02:27 +0200
commit623669ae7a8204264098c80cdc13086b7371bacf (patch)
treeb7307a17d88eda287dd4f5dd9bf5b8e1188836f9
parenta3642d2d73a97b39c50fcb6abb71c2488cda21ae (diff)
drm/xe: Nuke VM's mapping upon close
[ Upstream commit 074e40d9c2a84939fe28d7121d3469db50f34a3d ] Clear root PT entry and invalidate entire VM's address space when closing the VM. Will prevent the GPU from accessing any of the VM's memory after closing. v2: - s/vma/vm in kernel doc (CI) - Don't nuke migration VM as this occur at driver unload (CI) v3: - Rebase and pull into SVM series (Thomas) - Wait for pending binds (Thomas) v5: - Remove xe_gt_tlb_invalidation_fence_fini in error case (Matt Auld) - Drop local migration bool (Thomas) v7: - Add drm_dev_enter/exit protecting invalidation (CI, Matt Auld) Signed-off-by: Matthew Brost <matthew.brost@intel.com> Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20250306012657.3505757-12-matthew.brost@intel.com Signed-off-by: Sasha Levin <sashal@kernel.org>
-rw-r--r--drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c22
-rw-r--r--drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h2
-rw-r--r--drivers/gpu/drm/xe/xe_pt.c14
-rw-r--r--drivers/gpu/drm/xe/xe_pt.h3
-rw-r--r--drivers/gpu/drm/xe/xe_vm.c32
5 files changed, 73 insertions, 0 deletions
diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
index 98a450271f5c..3155825fa46a 100644
--- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
+++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
@@ -407,6 +407,28 @@ int xe_gt_tlb_invalidation_range(struct xe_gt *gt,
}
/**
+ * xe_gt_tlb_invalidation_vm - Issue a TLB invalidation on this GT for a VM
+ * @gt: graphics tile
+ * @vm: VM to invalidate
+ *
+ * Invalidate entire VM's address space
+ */
+void xe_gt_tlb_invalidation_vm(struct xe_gt *gt, struct xe_vm *vm)
+{
+ struct xe_gt_tlb_invalidation_fence fence;
+ u64 range = 1ull << vm->xe->info.va_bits;
+ int ret;
+
+ xe_gt_tlb_invalidation_fence_init(gt, &fence, true);
+
+ ret = xe_gt_tlb_invalidation_range(gt, &fence, 0, range, vm->usm.asid);
+ if (ret < 0)
+ return;
+
+ xe_gt_tlb_invalidation_fence_wait(&fence);
+}
+
+/**
* xe_gt_tlb_invalidation_vma - Issue a TLB invalidation on this GT for a VMA
* @gt: graphics tile
* @fence: invalidation fence which will be signal on TLB invalidation
diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h
index 672acfcdf0d7..abe9b03d543e 100644
--- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h
+++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h
@@ -12,6 +12,7 @@
struct xe_gt;
struct xe_guc;
+struct xe_vm;
struct xe_vma;
int xe_gt_tlb_invalidation_init_early(struct xe_gt *gt);
@@ -21,6 +22,7 @@ int xe_gt_tlb_invalidation_ggtt(struct xe_gt *gt);
int xe_gt_tlb_invalidation_vma(struct xe_gt *gt,
struct xe_gt_tlb_invalidation_fence *fence,
struct xe_vma *vma);
+void xe_gt_tlb_invalidation_vm(struct xe_gt *gt, struct xe_vm *vm);
int xe_gt_tlb_invalidation_range(struct xe_gt *gt,
struct xe_gt_tlb_invalidation_fence *fence,
u64 start, u64 end, u32 asid);
diff --git a/drivers/gpu/drm/xe/xe_pt.c b/drivers/gpu/drm/xe/xe_pt.c
index 230cf47fb9c5..fb94ff55c736 100644
--- a/drivers/gpu/drm/xe/xe_pt.c
+++ b/drivers/gpu/drm/xe/xe_pt.c
@@ -218,6 +218,20 @@ void xe_pt_destroy(struct xe_pt *pt, u32 flags, struct llist_head *deferred)
}
/**
+ * xe_pt_clear() - Clear a page-table.
+ * @xe: xe device.
+ * @pt: The page-table.
+ *
+ * Clears page-table by setting to zero.
+ */
+void xe_pt_clear(struct xe_device *xe, struct xe_pt *pt)
+{
+ struct iosys_map *map = &pt->bo->vmap;
+
+ xe_map_memset(xe, map, 0, 0, SZ_4K);
+}
+
+/**
* DOC: Pagetable building
*
* Below we use the term "page-table" for both page-directories, containing
diff --git a/drivers/gpu/drm/xe/xe_pt.h b/drivers/gpu/drm/xe/xe_pt.h
index 9ab386431cad..8e43912ae8e9 100644
--- a/drivers/gpu/drm/xe/xe_pt.h
+++ b/drivers/gpu/drm/xe/xe_pt.h
@@ -13,6 +13,7 @@ struct dma_fence;
struct xe_bo;
struct xe_device;
struct xe_exec_queue;
+struct xe_svm_range;
struct xe_sync_entry;
struct xe_tile;
struct xe_vm;
@@ -35,6 +36,8 @@ void xe_pt_populate_empty(struct xe_tile *tile, struct xe_vm *vm,
void xe_pt_destroy(struct xe_pt *pt, u32 flags, struct llist_head *deferred);
+void xe_pt_clear(struct xe_device *xe, struct xe_pt *pt);
+
int xe_pt_update_ops_prepare(struct xe_tile *tile, struct xe_vma_ops *vops);
struct dma_fence *xe_pt_update_ops_run(struct xe_tile *tile,
struct xe_vma_ops *vops);
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index 872de052d670..de257a032225 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -8,6 +8,7 @@
#include <linux/dma-fence-array.h>
#include <linux/nospec.h>
+#include <drm/drm_drv.h>
#include <drm/drm_exec.h>
#include <drm/drm_print.h>
#include <drm/ttm/ttm_execbuf_util.h>
@@ -1581,9 +1582,40 @@ err_no_resv:
static void xe_vm_close(struct xe_vm *vm)
{
+ struct xe_device *xe = vm->xe;
+ bool bound;
+ int idx;
+
+ bound = drm_dev_enter(&xe->drm, &idx);
+
down_write(&vm->lock);
+
vm->size = 0;
+
+ if (!((vm->flags & XE_VM_FLAG_MIGRATION))) {
+ struct xe_tile *tile;
+ struct xe_gt *gt;
+ u8 id;
+
+ /* Wait for pending binds */
+ dma_resv_wait_timeout(xe_vm_resv(vm),
+ DMA_RESV_USAGE_BOOKKEEP,
+ false, MAX_SCHEDULE_TIMEOUT);
+
+ if (bound) {
+ for_each_tile(tile, xe, id)
+ if (vm->pt_root[id])
+ xe_pt_clear(xe, vm->pt_root[id]);
+
+ for_each_gt(gt, xe, id)
+ xe_gt_tlb_invalidation_vm(gt, vm);
+ }
+ }
+
up_write(&vm->lock);
+
+ if (bound)
+ drm_dev_exit(idx);
}
void xe_vm_close_and_put(struct xe_vm *vm)