summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/ttm
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/ttm')
-rw-r--r--drivers/gpu/drm/ttm/ttm_agp_backend.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c262
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c40
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c34
-rw-r--r--drivers/gpu/drm/ttm/ttm_device.c82
-rw-r--r--drivers/gpu/drm/ttm/ttm_execbuf_util.c21
-rw-r--r--drivers/gpu/drm/ttm/ttm_resource.c231
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c28
8 files changed, 440 insertions, 260 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_agp_backend.c b/drivers/gpu/drm/ttm/ttm_agp_backend.c
index 6ddc16f0fe2b3..d27691f2e4518 100644
--- a/drivers/gpu/drm/ttm/ttm_agp_backend.c
+++ b/drivers/gpu/drm/ttm/ttm_agp_backend.c
@@ -134,7 +134,7 @@ struct ttm_tt *ttm_agp_tt_create(struct ttm_buffer_object *bo,
agp_be->mem = NULL;
agp_be->bridge = bridge;
- if (ttm_tt_init(&agp_be->ttm, bo, page_flags, ttm_write_combined)) {
+ if (ttm_tt_init(&agp_be->ttm, bo, page_flags, ttm_write_combined, 0)) {
kfree(agp_be);
return NULL;
}
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index db3dc7ef5382c..75d308ec173d3 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -69,107 +69,54 @@ static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
}
}
-static inline void ttm_bo_move_to_pinned(struct ttm_buffer_object *bo)
-{
- struct ttm_device *bdev = bo->bdev;
-
- list_move_tail(&bo->lru, &bdev->pinned);
-
- if (bdev->funcs->del_from_lru_notify)
- bdev->funcs->del_from_lru_notify(bo);
-}
-
-static inline void ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
-{
- struct ttm_device *bdev = bo->bdev;
-
- list_del_init(&bo->lru);
-
- if (bdev->funcs->del_from_lru_notify)
- bdev->funcs->del_from_lru_notify(bo);
-}
-
-static void ttm_bo_bulk_move_set_pos(struct ttm_lru_bulk_move_pos *pos,
- struct ttm_buffer_object *bo)
-{
- if (!pos->first)
- pos->first = bo;
- pos->last = bo;
-}
-
-void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo,
- struct ttm_resource *mem,
- struct ttm_lru_bulk_move *bulk)
+/**
+ * ttm_bo_move_to_lru_tail
+ *
+ * @bo: The buffer object.
+ *
+ * Move this BO to the tail of all lru lists used to lookup and reserve an
+ * object. This function must be called with struct ttm_global::lru_lock
+ * held, and is used to make a BO less likely to be considered for eviction.
+ */
+void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo)
{
- struct ttm_device *bdev = bo->bdev;
- struct ttm_resource_manager *man;
-
- if (!bo->deleted)
- dma_resv_assert_held(bo->base.resv);
-
- if (bo->pin_count) {
- ttm_bo_move_to_pinned(bo);
- return;
- }
-
- if (!mem)
- return;
-
- man = ttm_manager_type(bdev, mem->mem_type);
- list_move_tail(&bo->lru, &man->lru[bo->priority]);
-
- if (bdev->funcs->del_from_lru_notify)
- bdev->funcs->del_from_lru_notify(bo);
-
- if (bulk && !bo->pin_count) {
- switch (bo->resource->mem_type) {
- case TTM_PL_TT:
- ttm_bo_bulk_move_set_pos(&bulk->tt[bo->priority], bo);
- break;
+ dma_resv_assert_held(bo->base.resv);
- case TTM_PL_VRAM:
- ttm_bo_bulk_move_set_pos(&bulk->vram[bo->priority], bo);
- break;
- }
- }
+ if (bo->resource)
+ ttm_resource_move_to_lru_tail(bo->resource);
}
EXPORT_SYMBOL(ttm_bo_move_to_lru_tail);
-void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk)
+/**
+ * ttm_bo_set_bulk_move - update BOs bulk move object
+ *
+ * @bo: The buffer object.
+ *
+ * Update the BOs bulk move object, making sure that resources are added/removed
+ * as well. A bulk move allows to move many resource on the LRU at once,
+ * resulting in much less overhead of maintaining the LRU.
+ * The only requirement is that the resources stay together on the LRU and are
+ * never separated. This is enforces by setting the bulk_move structure on a BO.
+ * ttm_lru_bulk_move_tail() should be used to move all resources to the tail of
+ * their LRU list.
+ */
+void ttm_bo_set_bulk_move(struct ttm_buffer_object *bo,
+ struct ttm_lru_bulk_move *bulk)
{
- unsigned i;
-
- for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
- struct ttm_lru_bulk_move_pos *pos = &bulk->tt[i];
- struct ttm_resource_manager *man;
-
- if (!pos->first)
- continue;
-
- dma_resv_assert_held(pos->first->base.resv);
- dma_resv_assert_held(pos->last->base.resv);
-
- man = ttm_manager_type(pos->first->bdev, TTM_PL_TT);
- list_bulk_move_tail(&man->lru[i], &pos->first->lru,
- &pos->last->lru);
- }
-
- for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
- struct ttm_lru_bulk_move_pos *pos = &bulk->vram[i];
- struct ttm_resource_manager *man;
-
- if (!pos->first)
- continue;
+ dma_resv_assert_held(bo->base.resv);
- dma_resv_assert_held(pos->first->base.resv);
- dma_resv_assert_held(pos->last->base.resv);
+ if (bo->bulk_move == bulk)
+ return;
- man = ttm_manager_type(pos->first->bdev, TTM_PL_VRAM);
- list_bulk_move_tail(&man->lru[i], &pos->first->lru,
- &pos->last->lru);
- }
+ spin_lock(&bo->bdev->lru_lock);
+ if (bo->bulk_move && bo->resource)
+ ttm_lru_bulk_move_del(bo->bulk_move, bo->resource);
+ bo->bulk_move = bulk;
+ if (bo->bulk_move && bo->resource)
+ ttm_lru_bulk_move_add(bo->bulk_move, bo->resource);
+ spin_unlock(&bo->bdev->lru_lock);
}
-EXPORT_SYMBOL(ttm_bo_bulk_move_lru_tail);
+EXPORT_SYMBOL(ttm_bo_set_bulk_move);
static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
struct ttm_resource *mem, bool evict,
@@ -204,6 +151,10 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
}
}
+ ret = dma_resv_reserve_fences(bo->base.resv, 1);
+ if (ret)
+ goto out_err;
+
ret = bdev->funcs->move(bo, evict, ctx, mem, hop);
if (ret) {
if (ret == -EMULTIHOP)
@@ -272,7 +223,7 @@ static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
struct dma_resv_iter cursor;
struct dma_fence *fence;
- dma_resv_iter_begin(&cursor, resv, true);
+ dma_resv_iter_begin(&cursor, resv, DMA_RESV_USAGE_BOOKKEEP);
dma_resv_for_each_fence_unlocked(&cursor, fence) {
if (!fence->ops->signaled)
dma_fence_enable_sw_signaling(fence);
@@ -301,7 +252,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
struct dma_resv *resv = &bo->base._resv;
int ret;
- if (dma_resv_test_signaled(resv, true))
+ if (dma_resv_test_signaled(resv, DMA_RESV_USAGE_BOOKKEEP))
ret = 0;
else
ret = -EBUSY;
@@ -313,7 +264,8 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
dma_resv_unlock(bo->base.resv);
spin_unlock(&bo->bdev->lru_lock);
- lret = dma_resv_wait_timeout(resv, true, interruptible,
+ lret = dma_resv_wait_timeout(resv, DMA_RESV_USAGE_BOOKKEEP,
+ interruptible,
30 * HZ);
if (lret < 0)
@@ -344,7 +296,6 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
return ret;
}
- ttm_bo_move_to_pinned(bo);
list_del_init(&bo->ddestroy);
spin_unlock(&bo->bdev->lru_lock);
ttm_bo_cleanup_memtype_use(bo);
@@ -409,6 +360,7 @@ static void ttm_bo_release(struct kref *kref)
int ret;
WARN_ON_ONCE(bo->pin_count);
+ WARN_ON_ONCE(bo->bulk_move);
if (!bo->deleted) {
ret = ttm_bo_individualize_resv(bo);
@@ -416,7 +368,8 @@ static void ttm_bo_release(struct kref *kref)
/* Last resort, if we fail to allocate memory for the
* fences block for the BO to become idle
*/
- dma_resv_wait_timeout(bo->base.resv, true, false,
+ dma_resv_wait_timeout(bo->base.resv,
+ DMA_RESV_USAGE_BOOKKEEP, false,
30 * HZ);
}
@@ -427,7 +380,7 @@ static void ttm_bo_release(struct kref *kref)
ttm_mem_io_free(bdev, bo->resource);
}
- if (!dma_resv_test_signaled(bo->base.resv, true) ||
+ if (!dma_resv_test_signaled(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP) ||
!dma_resv_trylock(bo->base.resv)) {
/* The BO is not idle, resurrect it for delayed destroy */
ttm_bo_flush_all_fences(bo);
@@ -445,7 +398,7 @@ static void ttm_bo_release(struct kref *kref)
*/
if (bo->pin_count) {
bo->pin_count = 0;
- ttm_bo_move_to_lru_tail(bo, bo->resource, NULL);
+ ttm_resource_move_to_lru_tail(bo->resource);
}
kref_init(&bo->kref);
@@ -458,7 +411,6 @@ static void ttm_bo_release(struct kref *kref)
}
spin_lock(&bo->bdev->lru_lock);
- ttm_bo_del_from_lru(bo);
list_del(&bo->ddestroy);
spin_unlock(&bo->bdev->lru_lock);
@@ -466,7 +418,6 @@ static void ttm_bo_release(struct kref *kref)
dma_resv_unlock(bo->base.resv);
atomic_dec(&ttm_glob.bo_count);
- dma_fence_put(bo->moving);
bo->destroy(bo);
}
@@ -673,36 +624,29 @@ int ttm_mem_evict_first(struct ttm_device *bdev,
struct ww_acquire_ctx *ticket)
{
struct ttm_buffer_object *bo = NULL, *busy_bo = NULL;
+ struct ttm_resource_cursor cursor;
+ struct ttm_resource *res;
bool locked = false;
- unsigned i;
int ret;
spin_lock(&bdev->lru_lock);
- for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
- list_for_each_entry(bo, &man->lru[i], lru) {
- bool busy;
-
- if (!ttm_bo_evict_swapout_allowable(bo, ctx, place,
- &locked, &busy)) {
- if (busy && !busy_bo && ticket !=
- dma_resv_locking_ctx(bo->base.resv))
- busy_bo = bo;
- continue;
- }
-
- if (!ttm_bo_get_unless_zero(bo)) {
- if (locked)
- dma_resv_unlock(bo->base.resv);
- continue;
- }
- break;
+ ttm_resource_manager_for_each_res(man, &cursor, res) {
+ bool busy;
+
+ if (!ttm_bo_evict_swapout_allowable(res->bo, ctx, place,
+ &locked, &busy)) {
+ if (busy && !busy_bo && ticket !=
+ dma_resv_locking_ctx(res->bo->base.resv))
+ busy_bo = res->bo;
+ continue;
}
- /* If the inner loop terminated early, we have our candidate */
- if (&bo->lru != &man->lru[i])
+ if (ttm_bo_get_unless_zero(res->bo)) {
+ bo = res->bo;
break;
-
- bo = NULL;
+ }
+ if (locked)
+ dma_resv_unlock(res->bo->base.resv);
}
if (!bo) {
@@ -734,10 +678,43 @@ int ttm_mem_evict_first(struct ttm_device *bdev,
return ret;
}
+/**
+ * ttm_bo_pin - Pin the buffer object.
+ * @bo: The buffer object to pin
+ *
+ * Make sure the buffer is not evicted any more during memory pressure.
+ * @bo must be unpinned again by calling ttm_bo_unpin().
+ */
+void ttm_bo_pin(struct ttm_buffer_object *bo)
+{
+ dma_resv_assert_held(bo->base.resv);
+ WARN_ON_ONCE(!kref_read(&bo->kref));
+ if (!(bo->pin_count++) && bo->bulk_move && bo->resource)
+ ttm_lru_bulk_move_del(bo->bulk_move, bo->resource);
+}
+EXPORT_SYMBOL(ttm_bo_pin);
+
+/**
+ * ttm_bo_unpin - Unpin the buffer object.
+ * @bo: The buffer object to unpin
+ *
+ * Allows the buffer object to be evicted again during memory pressure.
+ */
+void ttm_bo_unpin(struct ttm_buffer_object *bo)
+{
+ dma_resv_assert_held(bo->base.resv);
+ WARN_ON_ONCE(!kref_read(&bo->kref));
+ if (WARN_ON_ONCE(!bo->pin_count))
+ return;
+
+ if (!(--bo->pin_count) && bo->bulk_move && bo->resource)
+ ttm_lru_bulk_move_add(bo->bulk_move, bo->resource);
+}
+EXPORT_SYMBOL(ttm_bo_unpin);
+
/*
- * Add the last move fence to the BO and reserve a new shared slot. We only use
- * a shared slot to avoid unecessary sync and rely on the subsequent bo move to
- * either stall or use an exclusive fence respectively set bo->moving.
+ * Add the last move fence to the BO as kernel dependency and reserve a new
+ * fence slot.
*/
static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
struct ttm_resource_manager *man,
@@ -760,17 +737,11 @@ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
return ret;
}
- dma_resv_add_shared_fence(bo->base.resv, fence);
-
- ret = dma_resv_reserve_shared(bo->base.resv, 1);
- if (unlikely(ret)) {
- dma_fence_put(fence);
- return ret;
- }
+ dma_resv_add_fence(bo->base.resv, fence, DMA_RESV_USAGE_KERNEL);
- dma_fence_put(bo->moving);
- bo->moving = fence;
- return 0;
+ ret = dma_resv_reserve_fences(bo->base.resv, 1);
+ dma_fence_put(fence);
+ return ret;
}
/*
@@ -821,7 +792,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
bool type_found = false;
int i, ret;
- ret = dma_resv_reserve_shared(bo->base.resv, 1);
+ ret = dma_resv_reserve_fences(bo->base.resv, 1);
if (unlikely(ret))
return ret;
@@ -875,9 +846,6 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
}
error:
- if (bo->resource->mem_type == TTM_PL_SYSTEM && !bo->pin_count)
- ttm_bo_move_to_lru_tail_unlocked(bo);
-
return ret;
}
EXPORT_SYMBOL(ttm_bo_mem_space);
@@ -971,14 +939,13 @@ int ttm_bo_init_reserved(struct ttm_device *bdev,
bo->destroy = destroy ? destroy : ttm_bo_default_destroy;
kref_init(&bo->kref);
- INIT_LIST_HEAD(&bo->lru);
INIT_LIST_HEAD(&bo->ddestroy);
bo->bdev = bdev;
bo->type = type;
bo->page_alignment = page_alignment;
- bo->moving = NULL;
bo->pin_count = 0;
bo->sg = sg;
+ bo->bulk_move = NULL;
if (resv) {
bo->base.resv = resv;
dma_resv_assert_held(bo->base.resv);
@@ -1021,8 +988,6 @@ int ttm_bo_init_reserved(struct ttm_device *bdev,
return ret;
}
- ttm_bo_move_to_lru_tail_unlocked(bo);
-
return ret;
}
EXPORT_SYMBOL(ttm_bo_init_reserved);
@@ -1072,14 +1037,14 @@ int ttm_bo_wait(struct ttm_buffer_object *bo,
long timeout = 15 * HZ;
if (no_wait) {
- if (dma_resv_test_signaled(bo->base.resv, true))
+ if (dma_resv_test_signaled(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP))
return 0;
else
return -EBUSY;
}
- timeout = dma_resv_wait_timeout(bo->base.resv, true, interruptible,
- timeout);
+ timeout = dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP,
+ interruptible, timeout);
if (timeout < 0)
return timeout;
@@ -1123,7 +1088,6 @@ int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
return ret == -EBUSY ? -ENOSPC : ret;
}
- ttm_bo_move_to_pinned(bo);
/* TODO: Cleanup the locking */
spin_unlock(&bo->bdev->lru_lock);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 2b8caa1efaa33..1cbfb00c1d658 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -221,9 +221,6 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
fbo->base = *bo;
- ttm_bo_get(bo);
- fbo->bo = bo;
-
/**
* Fix up members that we shouldn't copy directly:
* TODO: Explicit member copy would probably be better here.
@@ -231,8 +228,6 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
atomic_inc(&ttm_glob.bo_count);
INIT_LIST_HEAD(&fbo->base.ddestroy);
- INIT_LIST_HEAD(&fbo->base.lru);
- fbo->base.moving = NULL;
drm_vma_node_reset(&fbo->base.base.vma_node);
kref_init(&fbo->base.kref);
@@ -251,6 +246,15 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
ret = dma_resv_trylock(&fbo->base.base._resv);
WARN_ON(!ret);
+ ret = dma_resv_reserve_fences(&fbo->base.base._resv, 1);
+ if (ret) {
+ kfree(fbo);
+ return ret;
+ }
+
+ ttm_bo_get(bo);
+ fbo->bo = bo;
+
ttm_bo_move_to_lru_tail_unlocked(&fbo->base);
*new_obj = &fbo->base;
@@ -495,14 +499,12 @@ static int ttm_bo_move_to_ghost(struct ttm_buffer_object *bo,
* operation has completed.
*/
- dma_fence_put(bo->moving);
- bo->moving = dma_fence_get(fence);
-
ret = ttm_buffer_object_transfer(bo, &ghost_obj);
if (ret)
return ret;
- dma_resv_add_excl_fence(&ghost_obj->base._resv, fence);
+ dma_resv_add_fence(&ghost_obj->base._resv, fence,
+ DMA_RESV_USAGE_KERNEL);
/**
* If we're not moving to fixed memory, the TTM object
@@ -540,9 +542,6 @@ static void ttm_bo_move_pipeline_evict(struct ttm_buffer_object *bo,
spin_unlock(&from->move_lock);
ttm_resource_free(bo, &bo->resource);
-
- dma_fence_put(bo->moving);
- bo->moving = dma_fence_get(fence);
}
int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
@@ -556,7 +555,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type);
int ret = 0;
- dma_resv_add_excl_fence(bo->base.resv, fence);
+ dma_resv_add_fence(bo->base.resv, fence, DMA_RESV_USAGE_KERNEL);
if (!evict)
ret = ttm_bo_move_to_ghost(bo, fence, man->use_tt);
else if (!from->use_tt && pipeline)
@@ -573,6 +572,21 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
}
EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
+void ttm_bo_move_sync_cleanup(struct ttm_buffer_object *bo,
+ struct ttm_resource *new_mem)
+{
+ struct ttm_device *bdev = bo->bdev;
+ struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type);
+ int ret;
+
+ ret = ttm_bo_wait_free_node(bo, man->use_tt);
+ if (WARN_ON(ret))
+ return;
+
+ ttm_bo_assign_mem(bo, new_mem);
+}
+EXPORT_SYMBOL(ttm_bo_move_sync_cleanup);
+
/**
* ttm_bo_pipeline_gutting - purge the contents of a bo
* @bo: The buffer object
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 08ba083a80d23..5b324f2452656 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -46,17 +46,13 @@
static vm_fault_t ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
struct vm_fault *vmf)
{
- vm_fault_t ret = 0;
- int err = 0;
-
- if (likely(!bo->moving))
- goto out_unlock;
+ long err = 0;
/*
* Quick non-stalling check for idle.
*/
- if (dma_fence_is_signaled(bo->moving))
- goto out_clear;
+ if (dma_resv_test_signaled(bo->base.resv, DMA_RESV_USAGE_KERNEL))
+ return 0;
/*
* If possible, avoid waiting for GPU with mmap_lock
@@ -64,34 +60,30 @@ static vm_fault_t ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
* is the first attempt.
*/
if (fault_flag_allow_retry_first(vmf->flags)) {
- ret = VM_FAULT_RETRY;
if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT)
- goto out_unlock;
+ return VM_FAULT_RETRY;
ttm_bo_get(bo);
mmap_read_unlock(vmf->vma->vm_mm);
- (void) dma_fence_wait(bo->moving, true);
+ (void)dma_resv_wait_timeout(bo->base.resv,
+ DMA_RESV_USAGE_KERNEL, true,
+ MAX_SCHEDULE_TIMEOUT);
dma_resv_unlock(bo->base.resv);
ttm_bo_put(bo);
- goto out_unlock;
+ return VM_FAULT_RETRY;
}
/*
* Ordinary wait.
*/
- err = dma_fence_wait(bo->moving, true);
- if (unlikely(err != 0)) {
- ret = (err != -ERESTARTSYS) ? VM_FAULT_SIGBUS :
+ err = dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_KERNEL, true,
+ MAX_SCHEDULE_TIMEOUT);
+ if (unlikely(err < 0)) {
+ return (err != -ERESTARTSYS) ? VM_FAULT_SIGBUS :
VM_FAULT_NOPAGE;
- goto out_unlock;
}
-out_clear:
- dma_fence_put(bo->moving);
- bo->moving = NULL;
-
-out_unlock:
- return ret;
+ return 0;
}
static unsigned long ttm_bo_io_mem_pfn(struct ttm_buffer_object *bo,
diff --git a/drivers/gpu/drm/ttm/ttm_device.c b/drivers/gpu/drm/ttm/ttm_device.c
index be24bb6cefd0b..a0562ab386f51 100644
--- a/drivers/gpu/drm/ttm/ttm_device.c
+++ b/drivers/gpu/drm/ttm/ttm_device.c
@@ -142,9 +142,10 @@ EXPORT_SYMBOL(ttm_global_swapout);
int ttm_device_swapout(struct ttm_device *bdev, struct ttm_operation_ctx *ctx,
gfp_t gfp_flags)
{
+ struct ttm_resource_cursor cursor;
struct ttm_resource_manager *man;
- struct ttm_buffer_object *bo;
- unsigned i, j;
+ struct ttm_resource *res;
+ unsigned i;
int ret;
spin_lock(&bdev->lru_lock);
@@ -153,17 +154,16 @@ int ttm_device_swapout(struct ttm_device *bdev, struct ttm_operation_ctx *ctx,
if (!man || !man->use_tt)
continue;
- for (j = 0; j < TTM_MAX_BO_PRIORITY; ++j) {
- list_for_each_entry(bo, &man->lru[j], lru) {
- uint32_t num_pages = PFN_UP(bo->base.size);
-
- ret = ttm_bo_swapout(bo, ctx, gfp_flags);
- /* ttm_bo_swapout has dropped the lru_lock */
- if (!ret)
- return num_pages;
- if (ret != -EBUSY)
- return ret;
- }
+ ttm_resource_manager_for_each_res(man, &cursor, res) {
+ struct ttm_buffer_object *bo = res->bo;
+ uint32_t num_pages = PFN_UP(bo->base.size);
+
+ ret = ttm_bo_swapout(bo, ctx, gfp_flags);
+ /* ttm_bo_swapout has dropped the lru_lock */
+ if (!ret)
+ return num_pages;
+ if (ret != -EBUSY)
+ return ret;
}
}
spin_unlock(&bdev->lru_lock);
@@ -259,49 +259,45 @@ void ttm_device_fini(struct ttm_device *bdev)
}
EXPORT_SYMBOL(ttm_device_fini);
-void ttm_device_clear_dma_mappings(struct ttm_device *bdev)
+static void ttm_device_clear_lru_dma_mappings(struct ttm_device *bdev,
+ struct list_head *list)
{
- struct ttm_resource_manager *man;
- struct ttm_buffer_object *bo;
- unsigned int i, j;
+ struct ttm_resource *res;
spin_lock(&bdev->lru_lock);
- while (!list_empty(&bdev->pinned)) {
- bo = list_first_entry(&bdev->pinned, struct ttm_buffer_object, lru);
+ while ((res = list_first_entry_or_null(list, typeof(*res), lru))) {
+ struct ttm_buffer_object *bo = res->bo;
+
/* Take ref against racing releases once lru_lock is unlocked */
- if (ttm_bo_get_unless_zero(bo)) {
- list_del_init(&bo->lru);
- spin_unlock(&bdev->lru_lock);
+ if (!ttm_bo_get_unless_zero(bo))
+ continue;
- if (bo->ttm)
- ttm_tt_unpopulate(bo->bdev, bo->ttm);
+ list_del_init(&res->lru);
+ spin_unlock(&bdev->lru_lock);
- ttm_bo_put(bo);
- spin_lock(&bdev->lru_lock);
- }
+ if (bo->ttm)
+ ttm_tt_unpopulate(bo->bdev, bo->ttm);
+
+ ttm_bo_put(bo);
+ spin_lock(&bdev->lru_lock);
}
+ spin_unlock(&bdev->lru_lock);
+}
+
+void ttm_device_clear_dma_mappings(struct ttm_device *bdev)
+{
+ struct ttm_resource_manager *man;
+ unsigned int i, j;
+
+ ttm_device_clear_lru_dma_mappings(bdev, &bdev->pinned);
for (i = TTM_PL_SYSTEM; i < TTM_NUM_MEM_TYPES; ++i) {
man = ttm_manager_type(bdev, i);
if (!man || !man->use_tt)
continue;
- for (j = 0; j < TTM_MAX_BO_PRIORITY; ++j) {
- while (!list_empty(&man->lru[j])) {
- bo = list_first_entry(&man->lru[j], struct ttm_buffer_object, lru);
- if (ttm_bo_get_unless_zero(bo)) {
- list_del_init(&bo->lru);
- spin_unlock(&bdev->lru_lock);
-
- if (bo->ttm)
- ttm_tt_unpopulate(bo->bdev, bo->ttm);
-
- ttm_bo_put(bo);
- spin_lock(&bdev->lru_lock);
- }
- }
- }
+ for (j = 0; j < TTM_MAX_BO_PRIORITY; ++j)
+ ttm_device_clear_lru_dma_mappings(bdev, &man->lru[j]);
}
- spin_unlock(&bdev->lru_lock);
}
EXPORT_SYMBOL(ttm_device_clear_dma_mappings);
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
index 071c48d672c6b..dbee34a058dfd 100644
--- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -90,6 +90,7 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
list_for_each_entry(entry, list, head) {
struct ttm_buffer_object *bo = entry->bo;
+ unsigned int num_fences;
ret = ttm_bo_reserve(bo, intr, (ticket == NULL), ticket);
if (ret == -EALREADY && dups) {
@@ -100,12 +101,10 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
continue;
}
+ num_fences = max(entry->num_shared, 1u);
if (!ret) {
- if (!entry->num_shared)
- continue;
-
- ret = dma_resv_reserve_shared(bo->base.resv,
- entry->num_shared);
+ ret = dma_resv_reserve_fences(bo->base.resv,
+ num_fences);
if (!ret)
continue;
}
@@ -120,9 +119,9 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
ret = ttm_bo_reserve_slowpath(bo, intr, ticket);
}
- if (!ret && entry->num_shared)
- ret = dma_resv_reserve_shared(bo->base.resv,
- entry->num_shared);
+ if (!ret)
+ ret = dma_resv_reserve_fences(bo->base.resv,
+ num_fences);
if (unlikely(ret != 0)) {
if (ticket) {
@@ -155,10 +154,8 @@ void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
list_for_each_entry(entry, list, head) {
struct ttm_buffer_object *bo = entry->bo;
- if (entry->num_shared)
- dma_resv_add_shared_fence(bo->base.resv, fence);
- else
- dma_resv_add_excl_fence(bo->base.resv, fence);
+ dma_resv_add_fence(bo->base.resv, fence, entry->num_shared ?
+ DMA_RESV_USAGE_READ : DMA_RESV_USAGE_WRITE);
ttm_bo_move_to_lru_tail_unlocked(bo);
dma_resv_unlock(bo->base.resv);
}
diff --git a/drivers/gpu/drm/ttm/ttm_resource.c b/drivers/gpu/drm/ttm/ttm_resource.c
index 9a477f0fddeef..65889b3caf502 100644
--- a/drivers/gpu/drm/ttm/ttm_resource.c
+++ b/drivers/gpu/drm/ttm/ttm_resource.c
@@ -30,12 +30,129 @@
#include <drm/ttm/ttm_bo_driver.h>
/**
+ * ttm_lru_bulk_move_init - initialize a bulk move structure
+ * @bulk: the structure to init
+ *
+ * For now just memset the structure to zero.
+ */
+void ttm_lru_bulk_move_init(struct ttm_lru_bulk_move *bulk)
+{
+ memset(bulk, 0, sizeof(*bulk));
+}
+EXPORT_SYMBOL(ttm_lru_bulk_move_init);
+
+/**
+ * ttm_lru_bulk_move_tail - bulk move range of resources to the LRU tail.
+ *
+ * @bulk: bulk move structure
+ *
+ * Bulk move BOs to the LRU tail, only valid to use when driver makes sure that
+ * resource order never changes. Should be called with &ttm_device.lru_lock held.
+ */
+void ttm_lru_bulk_move_tail(struct ttm_lru_bulk_move *bulk)
+{
+ unsigned i, j;
+
+ for (i = 0; i < TTM_NUM_MEM_TYPES; ++i) {
+ for (j = 0; j < TTM_MAX_BO_PRIORITY; ++j) {
+ struct ttm_lru_bulk_move_pos *pos = &bulk->pos[i][j];
+ struct ttm_resource_manager *man;
+
+ if (!pos->first)
+ continue;
+
+ lockdep_assert_held(&pos->first->bo->bdev->lru_lock);
+ dma_resv_assert_held(pos->first->bo->base.resv);
+ dma_resv_assert_held(pos->last->bo->base.resv);
+
+ man = ttm_manager_type(pos->first->bo->bdev, i);
+ list_bulk_move_tail(&man->lru[j], &pos->first->lru,
+ &pos->last->lru);
+ }
+ }
+}
+EXPORT_SYMBOL(ttm_lru_bulk_move_tail);
+
+/* Return the bulk move pos object for this resource */
+static struct ttm_lru_bulk_move_pos *
+ttm_lru_bulk_move_pos(struct ttm_lru_bulk_move *bulk, struct ttm_resource *res)
+{
+ return &bulk->pos[res->mem_type][res->bo->priority];
+}
+
+/* Move the resource to the tail of the bulk move range */
+static void ttm_lru_bulk_move_pos_tail(struct ttm_lru_bulk_move_pos *pos,
+ struct ttm_resource *res)
+{
+ if (pos->last != res) {
+ list_move(&res->lru, &pos->last->lru);
+ pos->last = res;
+ }
+}
+
+/* Add the resource to a bulk_move cursor */
+void ttm_lru_bulk_move_add(struct ttm_lru_bulk_move *bulk,
+ struct ttm_resource *res)
+{
+ struct ttm_lru_bulk_move_pos *pos = ttm_lru_bulk_move_pos(bulk, res);
+
+ if (!pos->first) {
+ pos->first = res;
+ pos->last = res;
+ } else {
+ ttm_lru_bulk_move_pos_tail(pos, res);
+ }
+}
+
+/* Remove the resource from a bulk_move range */
+void ttm_lru_bulk_move_del(struct ttm_lru_bulk_move *bulk,
+ struct ttm_resource *res)
+{
+ struct ttm_lru_bulk_move_pos *pos = ttm_lru_bulk_move_pos(bulk, res);
+
+ if (unlikely(pos->first == res && pos->last == res)) {
+ pos->first = NULL;
+ pos->last = NULL;
+ } else if (pos->first == res) {
+ pos->first = list_next_entry(res, lru);
+ } else if (pos->last == res) {
+ pos->last = list_prev_entry(res, lru);
+ } else {
+ list_move(&res->lru, &pos->last->lru);
+ }
+}
+
+/* Move a resource to the LRU or bulk tail */
+void ttm_resource_move_to_lru_tail(struct ttm_resource *res)
+{
+ struct ttm_buffer_object *bo = res->bo;
+ struct ttm_device *bdev = bo->bdev;
+
+ lockdep_assert_held(&bo->bdev->lru_lock);
+
+ if (bo->pin_count) {
+ list_move_tail(&res->lru, &bdev->pinned);
+
+ } else if (bo->bulk_move) {
+ struct ttm_lru_bulk_move_pos *pos =
+ ttm_lru_bulk_move_pos(bo->bulk_move, res);
+
+ ttm_lru_bulk_move_pos_tail(pos, res);
+ } else {
+ struct ttm_resource_manager *man;
+
+ man = ttm_manager_type(bdev, res->mem_type);
+ list_move_tail(&res->lru, &man->lru[bo->priority]);
+ }
+}
+
+/**
* ttm_resource_init - resource object constructure
* @bo: buffer object this resources is allocated for
* @place: placement of the resource
* @res: the resource object to inistilize
*
- * Initialize a new resource object. Counterpart of &ttm_resource_fini.
+ * Initialize a new resource object. Counterpart of ttm_resource_fini().
*/
void ttm_resource_init(struct ttm_buffer_object *bo,
const struct ttm_place *place,
@@ -52,10 +169,15 @@ void ttm_resource_init(struct ttm_buffer_object *bo,
res->bus.is_iomem = false;
res->bus.caching = ttm_cached;
res->bo = bo;
+ INIT_LIST_HEAD(&res->lru);
man = ttm_manager_type(bo->bdev, place->mem_type);
spin_lock(&bo->bdev->lru_lock);
- man->usage += bo->base.size;
+ man->usage += res->num_pages << PAGE_SHIFT;
+ if (bo->bulk_move)
+ ttm_lru_bulk_move_add(bo->bulk_move, res);
+ else
+ ttm_resource_move_to_lru_tail(res);
spin_unlock(&bo->bdev->lru_lock);
}
EXPORT_SYMBOL(ttm_resource_init);
@@ -66,15 +188,19 @@ EXPORT_SYMBOL(ttm_resource_init);
* @res: the resource to clean up
*
* Should be used by resource manager backends to clean up the TTM resource
- * objects before freeing the underlying structure. Counterpart of
- * &ttm_resource_init
+ * objects before freeing the underlying structure. Makes sure the resource is
+ * removed from the LRU before destruction.
+ * Counterpart of ttm_resource_init().
*/
void ttm_resource_fini(struct ttm_resource_manager *man,
struct ttm_resource *res)
{
- spin_lock(&man->bdev->lru_lock);
- man->usage -= res->bo->base.size;
- spin_unlock(&man->bdev->lru_lock);
+ struct ttm_device *bdev = man->bdev;
+
+ spin_lock(&bdev->lru_lock);
+ list_del_init(&res->lru);
+ man->usage -= res->num_pages << PAGE_SHIFT;
+ spin_unlock(&bdev->lru_lock);
}
EXPORT_SYMBOL(ttm_resource_fini);
@@ -95,6 +221,12 @@ void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource **res)
if (!*res)
return;
+ if (bo->bulk_move) {
+ spin_lock(&bo->bdev->lru_lock);
+ ttm_lru_bulk_move_del(bo->bulk_move, *res);
+ spin_unlock(&bo->bdev->lru_lock);
+ }
+
man = ttm_manager_type(bo->bdev, (*res)->mem_type);
man->func->free(man, *res);
*res = NULL;
@@ -273,6 +405,57 @@ void ttm_resource_manager_debug(struct ttm_resource_manager *man,
}
EXPORT_SYMBOL(ttm_resource_manager_debug);
+/**
+ * ttm_resource_manager_first
+ *
+ * @man: resource manager to iterate over
+ * @cursor: cursor to record the position
+ *
+ * Returns the first resource from the resource manager.
+ */
+struct ttm_resource *
+ttm_resource_manager_first(struct ttm_resource_manager *man,
+ struct ttm_resource_cursor *cursor)
+{
+ struct ttm_resource *res;
+
+ lockdep_assert_held(&man->bdev->lru_lock);
+
+ for (cursor->priority = 0; cursor->priority < TTM_MAX_BO_PRIORITY;
+ ++cursor->priority)
+ list_for_each_entry(res, &man->lru[cursor->priority], lru)
+ return res;
+
+ return NULL;
+}
+
+/**
+ * ttm_resource_manager_next
+ *
+ * @man: resource manager to iterate over
+ * @cursor: cursor to record the position
+ * @res: the current resource pointer
+ *
+ * Returns the next resource from the resource manager.
+ */
+struct ttm_resource *
+ttm_resource_manager_next(struct ttm_resource_manager *man,
+ struct ttm_resource_cursor *cursor,
+ struct ttm_resource *res)
+{
+ lockdep_assert_held(&man->bdev->lru_lock);
+
+ list_for_each_entry_continue(res, &man->lru[cursor->priority], lru)
+ return res;
+
+ for (++cursor->priority; cursor->priority < TTM_MAX_BO_PRIORITY;
+ ++cursor->priority)
+ list_for_each_entry(res, &man->lru[cursor->priority], lru)
+ return res;
+
+ return NULL;
+}
+
static void ttm_kmap_iter_iomap_map_local(struct ttm_kmap_iter *iter,
struct iosys_map *dmap,
pgoff_t i)
@@ -461,3 +644,37 @@ ttm_kmap_iter_linear_io_fini(struct ttm_kmap_iter_linear_io *iter_io,
ttm_mem_io_free(bdev, mem);
}
+
+#if defined(CONFIG_DEBUG_FS)
+
+static int ttm_resource_manager_show(struct seq_file *m, void *unused)
+{
+ struct ttm_resource_manager *man =
+ (struct ttm_resource_manager *)m->private;
+ struct drm_printer p = drm_seq_file_printer(m);
+ ttm_resource_manager_debug(man, &p);
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(ttm_resource_manager);
+
+#endif
+
+/**
+ * ttm_resource_manager_create_debugfs - Create debugfs entry for specified
+ * resource manager.
+ * @man: The TTM resource manager for which the debugfs stats file be creates
+ * @parent: debugfs directory in which the file will reside
+ * @name: The filename to create.
+ *
+ * This function setups up a debugfs file that can be used to look
+ * at debug statistics of the specified ttm_resource_manager.
+ */
+void ttm_resource_manager_create_debugfs(struct ttm_resource_manager *man,
+ struct dentry * parent,
+ const char *name)
+{
+#if defined(CONFIG_DEBUG_FS)
+ debugfs_create_file(name, 0444, parent, man, &ttm_resource_manager_fops);
+#endif
+}
+EXPORT_SYMBOL(ttm_resource_manager_create_debugfs);
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index d234aab800a08..d505603930a79 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -96,19 +96,17 @@ int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc)
*/
static int ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
{
- ttm->pages = kvmalloc_array(ttm->num_pages, sizeof(void*),
- GFP_KERNEL | __GFP_ZERO);
+ ttm->pages = kvcalloc(ttm->num_pages, sizeof(void*), GFP_KERNEL);
if (!ttm->pages)
return -ENOMEM;
+
return 0;
}
static int ttm_dma_tt_alloc_page_directory(struct ttm_tt *ttm)
{
- ttm->pages = kvmalloc_array(ttm->num_pages,
- sizeof(*ttm->pages) +
- sizeof(*ttm->dma_address),
- GFP_KERNEL | __GFP_ZERO);
+ ttm->pages = kvcalloc(ttm->num_pages, sizeof(*ttm->pages) +
+ sizeof(*ttm->dma_address), GFP_KERNEL);
if (!ttm->pages)
return -ENOMEM;
@@ -118,11 +116,11 @@ static int ttm_dma_tt_alloc_page_directory(struct ttm_tt *ttm)
static int ttm_sg_tt_alloc_page_directory(struct ttm_tt *ttm)
{
- ttm->dma_address = kvmalloc_array(ttm->num_pages,
- sizeof(*ttm->dma_address),
- GFP_KERNEL | __GFP_ZERO);
+ ttm->dma_address = kvcalloc(ttm->num_pages, sizeof(*ttm->dma_address),
+ GFP_KERNEL);
if (!ttm->dma_address)
return -ENOMEM;
+
return 0;
}
@@ -134,9 +132,10 @@ void ttm_tt_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
static void ttm_tt_init_fields(struct ttm_tt *ttm,
struct ttm_buffer_object *bo,
uint32_t page_flags,
- enum ttm_caching caching)
+ enum ttm_caching caching,
+ unsigned long extra_pages)
{
- ttm->num_pages = PAGE_ALIGN(bo->base.size) >> PAGE_SHIFT;
+ ttm->num_pages = (PAGE_ALIGN(bo->base.size) >> PAGE_SHIFT) + extra_pages;
ttm->caching = ttm_cached;
ttm->page_flags = page_flags;
ttm->dma_address = NULL;
@@ -146,9 +145,10 @@ static void ttm_tt_init_fields(struct ttm_tt *ttm,
}
int ttm_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
- uint32_t page_flags, enum ttm_caching caching)
+ uint32_t page_flags, enum ttm_caching caching,
+ unsigned long extra_pages)
{
- ttm_tt_init_fields(ttm, bo, page_flags, caching);
+ ttm_tt_init_fields(ttm, bo, page_flags, caching, extra_pages);
if (ttm_tt_alloc_page_directory(ttm)) {
pr_err("Failed allocating page table\n");
@@ -180,7 +180,7 @@ int ttm_sg_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
{
int ret;
- ttm_tt_init_fields(ttm, bo, page_flags, caching);
+ ttm_tt_init_fields(ttm, bo, page_flags, caching, 0);
if (page_flags & TTM_TT_FLAG_EXTERNAL)
ret = ttm_sg_tt_alloc_page_directory(ttm);