summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/msm/msm_gem.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/msm/msm_gem.c')
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c166
1 files changed, 107 insertions, 59 deletions
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index a20ae783f244..664fb801c221 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -43,9 +43,53 @@ static int msm_gem_open(struct drm_gem_object *obj, struct drm_file *file)
return 0;
}
+static void put_iova_spaces(struct drm_gem_object *obj, struct drm_gpuvm *vm, bool close);
+
+static void detach_vm(struct drm_gem_object *obj, struct msm_gem_vm *vm)
+{
+ msm_gem_assert_locked(obj);
+
+ struct drm_gpuvm_bo *vm_bo = drm_gpuvm_bo_find(&vm->base, obj);
+ if (vm_bo) {
+ struct drm_gpuva *vma;
+
+ drm_gpuvm_bo_for_each_va (vma, vm_bo) {
+ if (vma->vm != &vm->base)
+ continue;
+ msm_gem_vma_purge(to_msm_vma(vma));
+ msm_gem_vma_close(to_msm_vma(vma));
+ break;
+ }
+
+ drm_gpuvm_bo_put(vm_bo);
+ }
+}
+
static void msm_gem_close(struct drm_gem_object *obj, struct drm_file *file)
{
+ struct msm_context *ctx = file->driver_priv;
+
update_ctx_mem(file, -obj->size);
+
+ /*
+ * If VM isn't created yet, nothing to cleanup. And in fact calling
+ * put_iova_spaces() with vm=NULL would be bad, in that it will tear-
+ * down the mappings of shared buffers in other contexts.
+ */
+ if (!ctx->vm)
+ return;
+
+ /*
+ * TODO we might need to kick this to a queue to avoid blocking
+ * in CLOSE ioctl
+ */
+ dma_resv_wait_timeout(obj->resv, DMA_RESV_USAGE_READ, false,
+ msecs_to_jiffies(1000));
+
+ msm_gem_lock(obj);
+ put_iova_spaces(obj, &ctx->vm->base, true);
+ detach_vm(obj, ctx->vm);
+ msm_gem_unlock(obj);
}
/*
@@ -167,6 +211,13 @@ static void put_pages(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ /*
+ * Skip gpuvm in the object free path to avoid a WARN_ON() splat.
+ * See explaination in msm_gem_assert_locked()
+ */
+ if (kref_read(&obj->refcount))
+ drm_gpuvm_bo_gem_evict(obj, true);
+
if (msm_obj->pages) {
if (msm_obj->sgt) {
/* For non-cached buffers, ensure the new
@@ -334,16 +385,25 @@ uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
}
static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj,
- struct msm_gem_vm *vm)
+ struct msm_gem_vm *vm)
{
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
- struct msm_gem_vma *vma;
+ struct drm_gpuvm_bo *vm_bo;
msm_gem_assert_locked(obj);
- list_for_each_entry(vma, &msm_obj->vmas, list) {
- if (vma->vm == vm)
- return vma;
+ drm_gem_for_each_gpuvm_bo (vm_bo, obj) {
+ struct drm_gpuva *vma;
+
+ drm_gpuvm_bo_for_each_va (vma, vm_bo) {
+ if (vma->vm == &vm->base) {
+ /* lookup_vma() should only be used in paths
+ * with at most one vma per vm
+ */
+ GEM_WARN_ON(!list_is_singular(&vm_bo->list.gpuva));
+
+ return to_msm_vma(vma);
+ }
+ }
}
return NULL;
@@ -356,33 +416,29 @@ static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj,
* mapping.
*/
static void
-put_iova_spaces(struct drm_gem_object *obj, bool close)
+put_iova_spaces(struct drm_gem_object *obj, struct drm_gpuvm *vm, bool close)
{
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
- struct msm_gem_vma *vma, *tmp;
+ struct drm_gpuvm_bo *vm_bo, *tmp;
msm_gem_assert_locked(obj);
- list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) {
- if (vma->vm) {
- msm_gem_vma_purge(vma);
- if (close)
- msm_gem_vma_close(vma);
- }
- }
-}
+ drm_gem_for_each_gpuvm_bo_safe (vm_bo, tmp, obj) {
+ struct drm_gpuva *vma, *vmatmp;
-/* Called with msm_obj locked */
-static void
-put_iova_vmas(struct drm_gem_object *obj)
-{
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
- struct msm_gem_vma *vma, *tmp;
+ if (vm && vm_bo->vm != vm)
+ continue;
- msm_gem_assert_locked(obj);
+ drm_gpuvm_bo_get(vm_bo);
+
+ drm_gpuvm_bo_for_each_va_safe (vma, vmatmp, vm_bo) {
+ struct msm_gem_vma *msm_vma = to_msm_vma(vma);
+
+ msm_gem_vma_purge(msm_vma);
+ if (close)
+ msm_gem_vma_close(msm_vma);
+ }
- list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) {
- msm_gem_vma_close(vma);
+ drm_gpuvm_bo_put(vm_bo);
}
}
@@ -390,7 +446,6 @@ static struct msm_gem_vma *get_vma_locked(struct drm_gem_object *obj,
struct msm_gem_vm *vm,
u64 range_start, u64 range_end)
{
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct msm_gem_vma *vma;
msm_gem_assert_locked(obj);
@@ -399,12 +454,9 @@ static struct msm_gem_vma *get_vma_locked(struct drm_gem_object *obj,
if (!vma) {
vma = msm_gem_vma_new(vm, obj, range_start, range_end);
- if (IS_ERR(vma))
- return vma;
- list_add_tail(&vma->list, &msm_obj->vmas);
} else {
- GEM_WARN_ON(vma->iova < range_start);
- GEM_WARN_ON((vma->iova + obj->size) > range_end);
+ GEM_WARN_ON(vma->base.va.addr < range_start);
+ GEM_WARN_ON((vma->base.va.addr + obj->size) > range_end);
}
return vma;
@@ -484,7 +536,7 @@ static int get_and_pin_iova_range_locked(struct drm_gem_object *obj,
ret = msm_gem_pin_vma_locked(obj, vma);
if (!ret) {
- *iova = vma->iova;
+ *iova = vma->base.va.addr;
pin_obj_locked(obj);
}
@@ -530,7 +582,7 @@ int msm_gem_get_iova(struct drm_gem_object *obj,
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
} else {
- *iova = vma->iova;
+ *iova = vma->base.va.addr;
}
msm_gem_unlock(obj);
@@ -571,7 +623,7 @@ int msm_gem_set_iova(struct drm_gem_object *obj,
vma = get_vma_locked(obj, vm, iova, iova + obj->size);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
- } else if (GEM_WARN_ON(vma->iova != iova)) {
+ } else if (GEM_WARN_ON(vma->base.va.addr != iova)) {
clear_iova(obj, vm);
ret = -EBUSY;
}
@@ -593,9 +645,10 @@ void msm_gem_unpin_iova(struct drm_gem_object *obj,
msm_gem_lock(obj);
vma = lookup_vma(obj, vm);
- if (!GEM_WARN_ON(!vma)) {
+ if (vma) {
msm_gem_unpin_locked(obj);
}
+ detach_vm(obj, vm);
msm_gem_unlock(obj);
}
@@ -755,7 +808,7 @@ void msm_gem_purge(struct drm_gem_object *obj)
GEM_WARN_ON(!is_purgeable(msm_obj));
/* Get rid of any iommu mapping(s): */
- put_iova_spaces(obj, false);
+ put_iova_spaces(obj, NULL, false);
msm_gem_vunmap(obj);
@@ -763,8 +816,6 @@ void msm_gem_purge(struct drm_gem_object *obj)
put_pages(obj);
- put_iova_vmas(obj);
-
mutex_lock(&priv->lru.lock);
/* A one-way transition: */
msm_obj->madv = __MSM_MADV_PURGED;
@@ -795,7 +846,7 @@ void msm_gem_evict(struct drm_gem_object *obj)
GEM_WARN_ON(is_unevictable(msm_obj));
/* Get rid of any iommu mapping(s): */
- put_iova_spaces(obj, false);
+ put_iova_spaces(obj, NULL, false);
drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
@@ -861,7 +912,6 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m,
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct dma_resv *robj = obj->resv;
- struct msm_gem_vma *vma;
uint64_t off = drm_vma_node_start(&obj->vma_node);
const char *madv;
@@ -904,14 +954,17 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m,
seq_printf(m, " %08zu %9s %-32s\n", obj->size, madv, msm_obj->name);
- if (!list_empty(&msm_obj->vmas)) {
+ if (!list_empty(&obj->gpuva.list)) {
+ struct drm_gpuvm_bo *vm_bo;
seq_puts(m, " vmas:");
- list_for_each_entry(vma, &msm_obj->vmas, list) {
- const char *name, *comm;
- if (vma->vm) {
- struct msm_gem_vm *vm = vma->vm;
+ drm_gem_for_each_gpuvm_bo (vm_bo, obj) {
+ struct drm_gpuva *vma;
+
+ drm_gpuvm_bo_for_each_va (vma, vm_bo) {
+ const char *name, *comm;
+ struct msm_gem_vm *vm = to_msm_vm(vma->vm);
struct task_struct *task =
get_pid_task(vm->pid, PIDTYPE_PID);
if (task) {
@@ -920,15 +973,14 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m,
} else {
comm = NULL;
}
- name = vm->name;
- } else {
- name = comm = NULL;
+ name = vm->base.name;
+
+ seq_printf(m, " [%s%s%s: vm=%p, %08llx, %smapped]",
+ name, comm ? ":" : "", comm ? comm : "",
+ vma->vm, vma->va.addr,
+ to_msm_vma(vma)->mapped ? "" : "un");
+ kfree(comm);
}
- seq_printf(m, " [%s%s%s: vm=%p, %08llx,%s]",
- name, comm ? ":" : "", comm ? comm : "",
- vma->vm, vma->iova,
- vma->mapped ? "mapped" : "unmapped");
- kfree(comm);
}
seq_puts(m, "\n");
@@ -974,7 +1026,7 @@ static void msm_gem_free_object(struct drm_gem_object *obj)
list_del(&msm_obj->node);
mutex_unlock(&priv->obj_lock);
- put_iova_spaces(obj, true);
+ put_iova_spaces(obj, NULL, true);
if (drm_gem_is_imported(obj)) {
GEM_WARN_ON(msm_obj->vaddr);
@@ -984,13 +1036,10 @@ static void msm_gem_free_object(struct drm_gem_object *obj)
*/
kvfree(msm_obj->pages);
- put_iova_vmas(obj);
-
drm_prime_gem_destroy(obj, msm_obj->sgt);
} else {
msm_gem_vunmap(obj);
put_pages(obj);
- put_iova_vmas(obj);
}
drm_gem_object_release(obj);
@@ -1096,7 +1145,6 @@ static int msm_gem_new_impl(struct drm_device *dev,
msm_obj->madv = MSM_MADV_WILLNEED;
INIT_LIST_HEAD(&msm_obj->node);
- INIT_LIST_HEAD(&msm_obj->vmas);
*obj = &msm_obj->base;
(*obj)->funcs = &msm_gem_object_funcs;