From 7febe4bfd5d477eba17f70d4879cb81e9787118e Mon Sep 17 00:00:00 2001 From: Christian König Date: Wed, 1 Aug 2018 16:22:39 +0200 Subject: drm/scheduler: fix setting the priorty for entities (v2) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Since we now deal with multiple rq we need to update all of them, not just the current one. v2: Trivial: Removed unused variable (Alex) Signed-off-by: Christian König Acked-by: Nayan Deshmukh Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c index df69657610460..02d563cfb4a73 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c @@ -394,7 +394,6 @@ void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx, { int i; struct amdgpu_device *adev = ctx->adev; - struct drm_sched_rq *rq; struct drm_sched_entity *entity; struct amdgpu_ring *ring; enum drm_sched_priority ctx_prio; @@ -407,12 +406,11 @@ void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx, for (i = 0; i < adev->num_rings; i++) { ring = adev->rings[i]; entity = &ctx->rings[i].entity; - rq = &ring->sched.sched_rq[ctx_prio]; if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ) continue; - drm_sched_entity_set_rq(entity, rq); + drm_sched_entity_set_priority(entity, ctx_prio); } } -- cgit v1.2.3 From 845e6fdf3b52ae8d8cde8ddafa6bbd60214f2bd2 Mon Sep 17 00:00:00 2001 From: Christian König Date: Fri, 13 Jul 2018 09:12:44 +0200 Subject: drm/amdgpu: use scheduler load balancing for SDMA CS MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Start to use the scheduler load balancing for userspace SDMA command submissions. Signed-off-by: Christian König Reviewed-by: Chunming Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | 25 +++++++++++++++++++++---- 1 file changed, 21 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c index 02d563cfb4a73..3ff8042b8f89a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c @@ -48,7 +48,8 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, struct drm_file *filp, struct amdgpu_ctx *ctx) { - unsigned i, j; + struct drm_sched_rq *sdma_rqs[AMDGPU_MAX_RINGS]; + unsigned i, j, num_sdma_rqs; int r; if (priority < 0 || priority >= DRM_SCHED_PRIORITY_MAX) @@ -80,18 +81,34 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, ctx->init_priority = priority; ctx->override_priority = DRM_SCHED_PRIORITY_UNSET; - /* create context entity for each ring */ + num_sdma_rqs = 0; for (i = 0; i < adev->num_rings; i++) { struct amdgpu_ring *ring = adev->rings[i]; struct drm_sched_rq *rq; rq = &ring->sched.sched_rq[priority]; + if (ring->funcs->type == AMDGPU_RING_TYPE_SDMA) + sdma_rqs[num_sdma_rqs++] = rq; + } + + /* create context entity for each ring */ + for (i = 0; i < adev->num_rings; i++) { + struct amdgpu_ring *ring = adev->rings[i]; if (ring == &adev->gfx.kiq.ring) continue; - r = drm_sched_entity_init(&ctx->rings[i].entity, - &rq, 1, &ctx->guilty); + if (ring->funcs->type == AMDGPU_RING_TYPE_SDMA) { + r = drm_sched_entity_init(&ctx->rings[i].entity, + sdma_rqs, num_sdma_rqs, + &ctx->guilty); + } else { + struct drm_sched_rq *rq; + + rq = &ring->sched.sched_rq[priority]; + r = drm_sched_entity_init(&ctx->rings[i].entity, + &rq, 1, &ctx->guilty); + } if (r) goto failed; } -- cgit v1.2.3 From 72a4c072ca9f2640ea303c399bd3224b69a543d9 Mon Sep 17 00:00:00 2001 From: Christian König Date: Mon, 16 Jul 2018 14:59:26 +0200 Subject: drm/amdgpu: use scheduler load balancing for compute CS MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Start to use the scheduler load balancing for userspace compute command submissions. Signed-off-by: Christian König Reviewed-by: Chunming Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c index 3ff8042b8f89a..a078e68e03192 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c @@ -49,7 +49,8 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, struct amdgpu_ctx *ctx) { struct drm_sched_rq *sdma_rqs[AMDGPU_MAX_RINGS]; - unsigned i, j, num_sdma_rqs; + struct drm_sched_rq *comp_rqs[AMDGPU_MAX_RINGS]; + unsigned i, j, num_sdma_rqs, num_comp_rqs; int r; if (priority < 0 || priority >= DRM_SCHED_PRIORITY_MAX) @@ -82,6 +83,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, ctx->override_priority = DRM_SCHED_PRIORITY_UNSET; num_sdma_rqs = 0; + num_comp_rqs = 0; for (i = 0; i < adev->num_rings; i++) { struct amdgpu_ring *ring = adev->rings[i]; struct drm_sched_rq *rq; @@ -89,6 +91,8 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, rq = &ring->sched.sched_rq[priority]; if (ring->funcs->type == AMDGPU_RING_TYPE_SDMA) sdma_rqs[num_sdma_rqs++] = rq; + else if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) + comp_rqs[num_comp_rqs++] = rq; } /* create context entity for each ring */ @@ -102,6 +106,10 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, r = drm_sched_entity_init(&ctx->rings[i].entity, sdma_rqs, num_sdma_rqs, &ctx->guilty); + } else if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) { + r = drm_sched_entity_init(&ctx->rings[i].entity, + comp_rqs, num_comp_rqs, + &ctx->guilty); } else { struct drm_sched_rq *rq; -- cgit v1.2.3 From 869a53d4d7d7976d039b9389aa90b6f3d29ed234 Mon Sep 17 00:00:00 2001 From: Christian König Date: Mon, 16 Jul 2018 15:19:20 +0200 Subject: drm/amdgpu: remove the queue manager MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Not needed any more since that is now done by the scheduler. Signed-off-by: Christian König Reviewed-by: Chunming Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/Makefile | 3 +- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 27 +-- drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 22 +- drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | 67 +++++- drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c | 316 -------------------------- 5 files changed, 75 insertions(+), 360 deletions(-) delete mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c') diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile index 7d7faaf299efb..860cb8731c7cf 100644 --- a/drivers/gpu/drm/amd/amdgpu/Makefile +++ b/drivers/gpu/drm/amd/amdgpu/Makefile @@ -51,8 +51,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \ amdgpu_prime.o amdgpu_vm.o amdgpu_ib.o amdgpu_pll.o \ amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o \ amdgpu_gtt_mgr.o amdgpu_vram_mgr.o amdgpu_virt.o amdgpu_atomfirmware.o \ - amdgpu_queue_mgr.o amdgpu_vf_error.o amdgpu_sched.o amdgpu_debugfs.o \ - amdgpu_ids.o + amdgpu_vf_error.o amdgpu_sched.o amdgpu_debugfs.o amdgpu_ids.o # add asic specific block amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 07924d41ee893..20e81df5cd947 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -477,29 +477,6 @@ struct amdgpu_ib { extern const struct drm_sched_backend_ops amdgpu_sched_ops; -/* - * Queue manager - */ -struct amdgpu_queue_mapper { - int hw_ip; - struct mutex lock; - /* protected by lock */ - struct amdgpu_ring *queue_map[AMDGPU_MAX_RINGS]; -}; - -struct amdgpu_queue_mgr { - struct amdgpu_queue_mapper mapper[AMDGPU_MAX_IP_NUM]; -}; - -int amdgpu_queue_mgr_init(struct amdgpu_device *adev, - struct amdgpu_queue_mgr *mgr); -int amdgpu_queue_mgr_fini(struct amdgpu_device *adev, - struct amdgpu_queue_mgr *mgr); -int amdgpu_queue_mgr_map(struct amdgpu_device *adev, - struct amdgpu_queue_mgr *mgr, - u32 hw_ip, u32 instance, u32 ring, - struct amdgpu_ring **out_ring); - /* * context related structures */ @@ -513,7 +490,6 @@ struct amdgpu_ctx_ring { struct amdgpu_ctx { struct kref refcount; struct amdgpu_device *adev; - struct amdgpu_queue_mgr queue_mgr; unsigned reset_counter; unsigned reset_counter_query; uint32_t vram_lost_counter; @@ -537,6 +513,9 @@ struct amdgpu_ctx_mgr { struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id); int amdgpu_ctx_put(struct amdgpu_ctx *ctx); +int amdgpu_ctx_get_ring(struct amdgpu_ctx *ctx, + u32 hw_ip, u32 instance, u32 ring, + struct amdgpu_ring **out_ring); int amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring, struct dma_fence *fence, uint64_t *seq); struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index dc3b2f980d871..55667ab4fbf5c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -1008,8 +1008,9 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev, return -EINVAL; } - r = amdgpu_queue_mgr_map(adev, &parser->ctx->queue_mgr, chunk_ib->ip_type, - chunk_ib->ip_instance, chunk_ib->ring, &ring); + r = amdgpu_ctx_get_ring(parser->ctx, chunk_ib->ip_type, + chunk_ib->ip_instance, chunk_ib->ring, + &ring); if (r) return r; @@ -1067,10 +1068,9 @@ static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p, if (ctx == NULL) return -EINVAL; - r = amdgpu_queue_mgr_map(p->adev, &ctx->queue_mgr, - deps[i].ip_type, - deps[i].ip_instance, - deps[i].ring, &ring); + r = amdgpu_ctx_get_ring(ctx, deps[i].ip_type, + deps[i].ip_instance, + deps[i].ring, &ring); if (r) { amdgpu_ctx_put(ctx); return r; @@ -1331,7 +1331,6 @@ int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) { union drm_amdgpu_wait_cs *wait = data; - struct amdgpu_device *adev = dev->dev_private; unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout); struct amdgpu_ring *ring = NULL; struct amdgpu_ctx *ctx; @@ -1342,9 +1341,8 @@ int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data, if (ctx == NULL) return -EINVAL; - r = amdgpu_queue_mgr_map(adev, &ctx->queue_mgr, - wait->in.ip_type, wait->in.ip_instance, - wait->in.ring, &ring); + r = amdgpu_ctx_get_ring(ctx, wait->in.ip_type, wait->in.ip_instance, + wait->in.ring, &ring); if (r) { amdgpu_ctx_put(ctx); return r; @@ -1391,8 +1389,8 @@ static struct dma_fence *amdgpu_cs_get_fence(struct amdgpu_device *adev, if (ctx == NULL) return ERR_PTR(-EINVAL); - r = amdgpu_queue_mgr_map(adev, &ctx->queue_mgr, user->ip_type, - user->ip_instance, user->ring, &ring); + r = amdgpu_ctx_get_ring(ctx, user->ip_type, user->ip_instance, + user->ring, &ring); if (r) { amdgpu_ctx_put(ctx); return ERR_PTR(r); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c index a078e68e03192..e5acc72b05d2a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c @@ -121,10 +121,6 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, goto failed; } - r = amdgpu_queue_mgr_init(adev, &ctx->queue_mgr); - if (r) - goto failed; - return 0; failed: @@ -150,13 +146,72 @@ static void amdgpu_ctx_fini(struct kref *ref) kfree(ctx->fences); ctx->fences = NULL; - amdgpu_queue_mgr_fini(adev, &ctx->queue_mgr); - mutex_destroy(&ctx->lock); kfree(ctx); } +int amdgpu_ctx_get_ring(struct amdgpu_ctx *ctx, + u32 hw_ip, u32 instance, u32 ring, + struct amdgpu_ring **out_ring) +{ + struct amdgpu_device *adev = ctx->adev; + unsigned num_rings = 0; + + /* Right now all IPs have only one instance - multiple rings. */ + if (instance != 0) { + DRM_DEBUG("invalid ip instance: %d\n", instance); + return -EINVAL; + } + + switch (hw_ip) { + case AMDGPU_HW_IP_GFX: + *out_ring = &adev->gfx.gfx_ring[ring]; + num_rings = adev->gfx.num_gfx_rings; + break; + case AMDGPU_HW_IP_COMPUTE: + *out_ring = &adev->gfx.compute_ring[ring]; + num_rings = adev->gfx.num_compute_rings; + break; + case AMDGPU_HW_IP_DMA: + *out_ring = &adev->sdma.instance[ring].ring; + num_rings = adev->sdma.num_instances; + break; + case AMDGPU_HW_IP_UVD: + *out_ring = &adev->uvd.inst[0].ring; + num_rings = adev->uvd.num_uvd_inst; + break; + case AMDGPU_HW_IP_VCE: + *out_ring = &adev->vce.ring[ring]; + num_rings = adev->vce.num_rings; + break; + case AMDGPU_HW_IP_UVD_ENC: + *out_ring = &adev->uvd.inst[0].ring_enc[ring]; + num_rings = adev->uvd.num_enc_rings; + break; + case AMDGPU_HW_IP_VCN_DEC: + *out_ring = &adev->vcn.ring_dec; + num_rings = 1; + break; + case AMDGPU_HW_IP_VCN_ENC: + *out_ring = &adev->vcn.ring_enc[ring]; + num_rings = adev->vcn.num_enc_rings; + break; + case AMDGPU_HW_IP_VCN_JPEG: + *out_ring = &adev->vcn.ring_jpeg; + num_rings = 1; + break; + default: + DRM_ERROR("unknown HW IP type: %d\n", hw_ip); + return -EINVAL; + } + + if (ring > num_rings) + return -EINVAL; + + return 0; +} + static int amdgpu_ctx_alloc(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv, struct drm_file *filp, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c deleted file mode 100644 index a172bba32b45c..0000000000000 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c +++ /dev/null @@ -1,316 +0,0 @@ -/* - * Copyright 2017 Valve Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: Andres Rodriguez - */ - -#include "amdgpu.h" -#include "amdgpu_ring.h" - -static int amdgpu_queue_mapper_init(struct amdgpu_queue_mapper *mapper, - int hw_ip) -{ - if (!mapper) - return -EINVAL; - - if (hw_ip > AMDGPU_MAX_IP_NUM) - return -EINVAL; - - mapper->hw_ip = hw_ip; - mutex_init(&mapper->lock); - - memset(mapper->queue_map, 0, sizeof(mapper->queue_map)); - - return 0; -} - -static struct amdgpu_ring *amdgpu_get_cached_map(struct amdgpu_queue_mapper *mapper, - int ring) -{ - return mapper->queue_map[ring]; -} - -static int amdgpu_update_cached_map(struct amdgpu_queue_mapper *mapper, - int ring, struct amdgpu_ring *pring) -{ - if (WARN_ON(mapper->queue_map[ring])) { - DRM_ERROR("Un-expected ring re-map\n"); - return -EINVAL; - } - - mapper->queue_map[ring] = pring; - - return 0; -} - -static int amdgpu_identity_map(struct amdgpu_device *adev, - struct amdgpu_queue_mapper *mapper, - u32 ring, - struct amdgpu_ring **out_ring) -{ - switch (mapper->hw_ip) { - case AMDGPU_HW_IP_GFX: - *out_ring = &adev->gfx.gfx_ring[ring]; - break; - case AMDGPU_HW_IP_COMPUTE: - *out_ring = &adev->gfx.compute_ring[ring]; - break; - case AMDGPU_HW_IP_DMA: - *out_ring = &adev->sdma.instance[ring].ring; - break; - case AMDGPU_HW_IP_UVD: - *out_ring = &adev->uvd.inst[0].ring; - break; - case AMDGPU_HW_IP_VCE: - *out_ring = &adev->vce.ring[ring]; - break; - case AMDGPU_HW_IP_UVD_ENC: - *out_ring = &adev->uvd.inst[0].ring_enc[ring]; - break; - case AMDGPU_HW_IP_VCN_DEC: - *out_ring = &adev->vcn.ring_dec; - break; - case AMDGPU_HW_IP_VCN_ENC: - *out_ring = &adev->vcn.ring_enc[ring]; - break; - case AMDGPU_HW_IP_VCN_JPEG: - *out_ring = &adev->vcn.ring_jpeg; - break; - default: - *out_ring = NULL; - DRM_ERROR("unknown HW IP type: %d\n", mapper->hw_ip); - return -EINVAL; - } - - return amdgpu_update_cached_map(mapper, ring, *out_ring); -} - -static enum amdgpu_ring_type amdgpu_hw_ip_to_ring_type(int hw_ip) -{ - switch (hw_ip) { - case AMDGPU_HW_IP_GFX: - return AMDGPU_RING_TYPE_GFX; - case AMDGPU_HW_IP_COMPUTE: - return AMDGPU_RING_TYPE_COMPUTE; - case AMDGPU_HW_IP_DMA: - return AMDGPU_RING_TYPE_SDMA; - case AMDGPU_HW_IP_UVD: - return AMDGPU_RING_TYPE_UVD; - case AMDGPU_HW_IP_VCE: - return AMDGPU_RING_TYPE_VCE; - default: - DRM_ERROR("Invalid HW IP specified %d\n", hw_ip); - return -1; - } -} - -static int amdgpu_lru_map(struct amdgpu_device *adev, - struct amdgpu_queue_mapper *mapper, - u32 user_ring, bool lru_pipe_order, - struct amdgpu_ring **out_ring) -{ - int r, i, j; - int ring_type = amdgpu_hw_ip_to_ring_type(mapper->hw_ip); - int ring_blacklist[AMDGPU_MAX_RINGS]; - struct amdgpu_ring *ring; - - /* 0 is a valid ring index, so initialize to -1 */ - memset(ring_blacklist, 0xff, sizeof(ring_blacklist)); - - for (i = 0, j = 0; i < AMDGPU_MAX_RINGS; i++) { - ring = mapper->queue_map[i]; - if (ring) - ring_blacklist[j++] = ring->idx; - } - - r = amdgpu_ring_lru_get(adev, ring_type, ring_blacklist, - j, lru_pipe_order, out_ring); - if (r) - return r; - - return amdgpu_update_cached_map(mapper, user_ring, *out_ring); -} - -/** - * amdgpu_queue_mgr_init - init an amdgpu_queue_mgr struct - * - * @adev: amdgpu_device pointer - * @mgr: amdgpu_queue_mgr structure holding queue information - * - * Initialize the the selected @mgr (all asics). - * - * Returns 0 on success, error on failure. - */ -int amdgpu_queue_mgr_init(struct amdgpu_device *adev, - struct amdgpu_queue_mgr *mgr) -{ - int i, r; - - if (!adev || !mgr) - return -EINVAL; - - memset(mgr, 0, sizeof(*mgr)); - - for (i = 0; i < AMDGPU_MAX_IP_NUM; ++i) { - r = amdgpu_queue_mapper_init(&mgr->mapper[i], i); - if (r) - return r; - } - - return 0; -} - -/** - * amdgpu_queue_mgr_fini - de-initialize an amdgpu_queue_mgr struct - * - * @adev: amdgpu_device pointer - * @mgr: amdgpu_queue_mgr structure holding queue information - * - * De-initialize the the selected @mgr (all asics). - * - * Returns 0 on success, error on failure. - */ -int amdgpu_queue_mgr_fini(struct amdgpu_device *adev, - struct amdgpu_queue_mgr *mgr) -{ - return 0; -} - -/** - * amdgpu_queue_mgr_map - Map a userspace ring id to an amdgpu_ring - * - * @adev: amdgpu_device pointer - * @mgr: amdgpu_queue_mgr structure holding queue information - * @hw_ip: HW IP enum - * @instance: HW instance - * @ring: user ring id - * @our_ring: pointer to mapped amdgpu_ring - * - * Map a userspace ring id to an appropriate kernel ring. Different - * policies are configurable at a HW IP level. - * - * Returns 0 on success, error on failure. - */ -int amdgpu_queue_mgr_map(struct amdgpu_device *adev, - struct amdgpu_queue_mgr *mgr, - u32 hw_ip, u32 instance, u32 ring, - struct amdgpu_ring **out_ring) -{ - int i, r, ip_num_rings = 0; - struct amdgpu_queue_mapper *mapper = &mgr->mapper[hw_ip]; - - if (!adev || !mgr || !out_ring) - return -EINVAL; - - if (hw_ip >= AMDGPU_MAX_IP_NUM) - return -EINVAL; - - if (ring >= AMDGPU_MAX_RINGS) - return -EINVAL; - - /* Right now all IPs have only one instance - multiple rings. */ - if (instance != 0) { - DRM_DEBUG("invalid ip instance: %d\n", instance); - return -EINVAL; - } - - switch (hw_ip) { - case AMDGPU_HW_IP_GFX: - ip_num_rings = adev->gfx.num_gfx_rings; - break; - case AMDGPU_HW_IP_COMPUTE: - ip_num_rings = adev->gfx.num_compute_rings; - break; - case AMDGPU_HW_IP_DMA: - ip_num_rings = adev->sdma.num_instances; - break; - case AMDGPU_HW_IP_UVD: - for (i = 0; i < adev->uvd.num_uvd_inst; i++) { - if (!(adev->uvd.harvest_config & (1 << i))) - ip_num_rings++; - } - break; - case AMDGPU_HW_IP_VCE: - ip_num_rings = adev->vce.num_rings; - break; - case AMDGPU_HW_IP_UVD_ENC: - for (i = 0; i < adev->uvd.num_uvd_inst; i++) { - if (!(adev->uvd.harvest_config & (1 << i))) - ip_num_rings++; - } - ip_num_rings = - adev->uvd.num_enc_rings * ip_num_rings; - break; - case AMDGPU_HW_IP_VCN_DEC: - ip_num_rings = 1; - break; - case AMDGPU_HW_IP_VCN_ENC: - ip_num_rings = adev->vcn.num_enc_rings; - break; - case AMDGPU_HW_IP_VCN_JPEG: - ip_num_rings = 1; - break; - default: - DRM_DEBUG("unknown ip type: %d\n", hw_ip); - return -EINVAL; - } - - if (ring >= ip_num_rings) { - DRM_DEBUG("Ring index:%d exceeds maximum:%d for ip:%d\n", - ring, ip_num_rings, hw_ip); - return -EINVAL; - } - - mutex_lock(&mapper->lock); - - *out_ring = amdgpu_get_cached_map(mapper, ring); - if (*out_ring) { - /* cache hit */ - r = 0; - goto out_unlock; - } - - switch (mapper->hw_ip) { - case AMDGPU_HW_IP_GFX: - case AMDGPU_HW_IP_UVD: - case AMDGPU_HW_IP_VCE: - case AMDGPU_HW_IP_UVD_ENC: - case AMDGPU_HW_IP_VCN_DEC: - case AMDGPU_HW_IP_VCN_ENC: - case AMDGPU_HW_IP_VCN_JPEG: - r = amdgpu_identity_map(adev, mapper, ring, out_ring); - break; - case AMDGPU_HW_IP_DMA: - r = amdgpu_lru_map(adev, mapper, ring, false, out_ring); - break; - case AMDGPU_HW_IP_COMPUTE: - r = amdgpu_lru_map(adev, mapper, ring, true, out_ring); - break; - default: - *out_ring = NULL; - r = -EINVAL; - DRM_DEBUG("unknown HW IP type: %d\n", mapper->hw_ip); - } - -out_unlock: - mutex_unlock(&mapper->lock); - return r; -} -- cgit v1.2.3 From 0d346a14c634120046d194377e2cb5b387a6c1c6 Mon Sep 17 00:00:00 2001 From: Christian König Date: Thu, 19 Jul 2018 14:22:25 +0200 Subject: drm/amdgpu: use entity instead of ring for CS MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Further demangle ring from entity handling. Signed-off-by: Christian König Reviewed-by: Chunming Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 66 ++++++++++++++++--------------- drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | 53 ++++++++++++++----------- drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h | 16 ++++---- drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h | 4 +- drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c | 3 +- 6 files changed, 78 insertions(+), 66 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 50eeb7c1350e3..6265b88135fc4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -523,7 +523,7 @@ struct amdgpu_cs_parser { /* scheduler job object */ struct amdgpu_job *job; - struct amdgpu_ring *ring; + struct drm_sched_entity *entity; /* buffer objects */ struct ww_acquire_ctx ticket; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 55667ab4fbf5c..313ac971eaafe 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -893,13 +893,13 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p) static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev, struct amdgpu_cs_parser *p) { + struct amdgpu_ring *ring = to_amdgpu_ring(p->entity->rq->sched); struct amdgpu_fpriv *fpriv = p->filp->driver_priv; struct amdgpu_vm *vm = &fpriv->vm; - struct amdgpu_ring *ring = p->ring; int r; /* Only for UVD/VCE VM emulation */ - if (p->ring->funcs->parse_cs || p->ring->funcs->patch_cs_in_place) { + if (ring->funcs->parse_cs || ring->funcs->patch_cs_in_place) { unsigned i, j; for (i = 0, j = 0; i < p->nchunks && j < p->job->num_ibs; i++) { @@ -940,7 +940,7 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev, offset = m->start * AMDGPU_GPU_PAGE_SIZE; kptr += va_start - offset; - if (p->ring->funcs->parse_cs) { + if (ring->funcs->parse_cs) { memcpy(ib->ptr, kptr, chunk_ib->ib_bytes); amdgpu_bo_kunmap(aobj); @@ -979,14 +979,15 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev, { struct amdgpu_fpriv *fpriv = parser->filp->driver_priv; struct amdgpu_vm *vm = &fpriv->vm; - int i, j; int r, ce_preempt = 0, de_preempt = 0; + struct amdgpu_ring *ring; + int i, j; for (i = 0, j = 0; i < parser->nchunks && j < parser->job->num_ibs; i++) { struct amdgpu_cs_chunk *chunk; struct amdgpu_ib *ib; struct drm_amdgpu_cs_chunk_ib *chunk_ib; - struct amdgpu_ring *ring; + struct drm_sched_entity *entity; chunk = &parser->chunks[i]; ib = &parser->job->ibs[j]; @@ -1008,9 +1009,9 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev, return -EINVAL; } - r = amdgpu_ctx_get_ring(parser->ctx, chunk_ib->ip_type, - chunk_ib->ip_instance, chunk_ib->ring, - &ring); + r = amdgpu_ctx_get_entity(parser->ctx, chunk_ib->ip_type, + chunk_ib->ip_instance, chunk_ib->ring, + &entity); if (r) return r; @@ -1018,14 +1019,14 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev, parser->job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT; - if (parser->ring && parser->ring != ring) + if (parser->entity && parser->entity != entity) return -EINVAL; - parser->ring = ring; + parser->entity = entity; - r = amdgpu_ib_get(adev, vm, - ring->funcs->parse_cs ? chunk_ib->ib_bytes : 0, - ib); + ring = to_amdgpu_ring(entity->rq->sched); + r = amdgpu_ib_get(adev, vm, ring->funcs->parse_cs ? + chunk_ib->ib_bytes : 0, ib); if (r) { DRM_ERROR("Failed to get ib !\n"); return r; @@ -1039,12 +1040,13 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev, } /* UVD & VCE fw doesn't support user fences */ + ring = to_amdgpu_ring(parser->entity->rq->sched); if (parser->job->uf_addr && ( - parser->ring->funcs->type == AMDGPU_RING_TYPE_UVD || - parser->ring->funcs->type == AMDGPU_RING_TYPE_VCE)) + ring->funcs->type == AMDGPU_RING_TYPE_UVD || + ring->funcs->type == AMDGPU_RING_TYPE_VCE)) return -EINVAL; - return amdgpu_ctx_wait_prev_fence(parser->ctx, parser->ring->idx); + return amdgpu_ctx_wait_prev_fence(parser->ctx, parser->entity); } static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p, @@ -1060,23 +1062,23 @@ static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p, sizeof(struct drm_amdgpu_cs_chunk_dep); for (i = 0; i < num_deps; ++i) { - struct amdgpu_ring *ring; struct amdgpu_ctx *ctx; + struct drm_sched_entity *entity; struct dma_fence *fence; ctx = amdgpu_ctx_get(fpriv, deps[i].ctx_id); if (ctx == NULL) return -EINVAL; - r = amdgpu_ctx_get_ring(ctx, deps[i].ip_type, - deps[i].ip_instance, - deps[i].ring, &ring); + r = amdgpu_ctx_get_entity(ctx, deps[i].ip_type, + deps[i].ip_instance, + deps[i].ring, &entity); if (r) { amdgpu_ctx_put(ctx); return r; } - fence = amdgpu_ctx_get_fence(ctx, ring, + fence = amdgpu_ctx_get_fence(ctx, entity, deps[i].handle); if (IS_ERR(fence)) { r = PTR_ERR(fence); @@ -1195,9 +1197,9 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, union drm_amdgpu_cs *cs) { struct amdgpu_fpriv *fpriv = p->filp->driver_priv; - struct amdgpu_ring *ring = p->ring; - struct drm_sched_entity *entity = &p->ctx->rings[ring->idx].entity; + struct drm_sched_entity *entity = p->entity; enum drm_sched_priority priority; + struct amdgpu_ring *ring; struct amdgpu_bo_list_entry *e; struct amdgpu_job *job; uint64_t seq; @@ -1227,7 +1229,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, job->owner = p->filp; p->fence = dma_fence_get(&job->base.s_fence->finished); - r = amdgpu_ctx_add_fence(p->ctx, ring, p->fence, &seq); + r = amdgpu_ctx_add_fence(p->ctx, entity, p->fence, &seq); if (r) { dma_fence_put(p->fence); dma_fence_put(&job->base.s_fence->finished); @@ -1332,7 +1334,7 @@ int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data, { union drm_amdgpu_wait_cs *wait = data; unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout); - struct amdgpu_ring *ring = NULL; + struct drm_sched_entity *entity; struct amdgpu_ctx *ctx; struct dma_fence *fence; long r; @@ -1341,14 +1343,14 @@ int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data, if (ctx == NULL) return -EINVAL; - r = amdgpu_ctx_get_ring(ctx, wait->in.ip_type, wait->in.ip_instance, - wait->in.ring, &ring); + r = amdgpu_ctx_get_entity(ctx, wait->in.ip_type, wait->in.ip_instance, + wait->in.ring, &entity); if (r) { amdgpu_ctx_put(ctx); return r; } - fence = amdgpu_ctx_get_fence(ctx, ring, wait->in.handle); + fence = amdgpu_ctx_get_fence(ctx, entity, wait->in.handle); if (IS_ERR(fence)) r = PTR_ERR(fence); else if (fence) { @@ -1380,7 +1382,7 @@ static struct dma_fence *amdgpu_cs_get_fence(struct amdgpu_device *adev, struct drm_file *filp, struct drm_amdgpu_fence *user) { - struct amdgpu_ring *ring; + struct drm_sched_entity *entity; struct amdgpu_ctx *ctx; struct dma_fence *fence; int r; @@ -1389,14 +1391,14 @@ static struct dma_fence *amdgpu_cs_get_fence(struct amdgpu_device *adev, if (ctx == NULL) return ERR_PTR(-EINVAL); - r = amdgpu_ctx_get_ring(ctx, user->ip_type, user->ip_instance, - user->ring, &ring); + r = amdgpu_ctx_get_entity(ctx, user->ip_type, user->ip_instance, + user->ring, &entity); if (r) { amdgpu_ctx_put(ctx); return ERR_PTR(r); } - fence = amdgpu_ctx_get_fence(ctx, ring, user->seq_no); + fence = amdgpu_ctx_get_fence(ctx, entity, user->seq_no); amdgpu_ctx_put(ctx); return fence; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c index e5acc72b05d2a..0a6cd1202ee50 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c @@ -27,6 +27,9 @@ #include "amdgpu.h" #include "amdgpu_sched.h" +#define to_amdgpu_ctx_ring(e) \ + container_of((e), struct amdgpu_ctx_ring, entity) + static int amdgpu_ctx_priority_permit(struct drm_file *filp, enum drm_sched_priority priority) { @@ -151,12 +154,12 @@ static void amdgpu_ctx_fini(struct kref *ref) kfree(ctx); } -int amdgpu_ctx_get_ring(struct amdgpu_ctx *ctx, - u32 hw_ip, u32 instance, u32 ring, - struct amdgpu_ring **out_ring) +int amdgpu_ctx_get_entity(struct amdgpu_ctx *ctx, u32 hw_ip, u32 instance, + u32 ring, struct drm_sched_entity **entity) { struct amdgpu_device *adev = ctx->adev; unsigned num_rings = 0; + struct amdgpu_ring *out_ring; /* Right now all IPs have only one instance - multiple rings. */ if (instance != 0) { @@ -166,39 +169,39 @@ int amdgpu_ctx_get_ring(struct amdgpu_ctx *ctx, switch (hw_ip) { case AMDGPU_HW_IP_GFX: - *out_ring = &adev->gfx.gfx_ring[ring]; + out_ring = &adev->gfx.gfx_ring[ring]; num_rings = adev->gfx.num_gfx_rings; break; case AMDGPU_HW_IP_COMPUTE: - *out_ring = &adev->gfx.compute_ring[ring]; + out_ring = &adev->gfx.compute_ring[ring]; num_rings = adev->gfx.num_compute_rings; break; case AMDGPU_HW_IP_DMA: - *out_ring = &adev->sdma.instance[ring].ring; + out_ring = &adev->sdma.instance[ring].ring; num_rings = adev->sdma.num_instances; break; case AMDGPU_HW_IP_UVD: - *out_ring = &adev->uvd.inst[0].ring; + out_ring = &adev->uvd.inst[0].ring; num_rings = adev->uvd.num_uvd_inst; break; case AMDGPU_HW_IP_VCE: - *out_ring = &adev->vce.ring[ring]; + out_ring = &adev->vce.ring[ring]; num_rings = adev->vce.num_rings; break; case AMDGPU_HW_IP_UVD_ENC: - *out_ring = &adev->uvd.inst[0].ring_enc[ring]; + out_ring = &adev->uvd.inst[0].ring_enc[ring]; num_rings = adev->uvd.num_enc_rings; break; case AMDGPU_HW_IP_VCN_DEC: - *out_ring = &adev->vcn.ring_dec; + out_ring = &adev->vcn.ring_dec; num_rings = 1; break; case AMDGPU_HW_IP_VCN_ENC: - *out_ring = &adev->vcn.ring_enc[ring]; + out_ring = &adev->vcn.ring_enc[ring]; num_rings = adev->vcn.num_enc_rings; break; case AMDGPU_HW_IP_VCN_JPEG: - *out_ring = &adev->vcn.ring_jpeg; + out_ring = &adev->vcn.ring_jpeg; num_rings = 1; break; default: @@ -209,6 +212,7 @@ int amdgpu_ctx_get_ring(struct amdgpu_ctx *ctx, if (ring > num_rings) return -EINVAL; + *entity = &ctx->rings[out_ring->idx].entity; return 0; } @@ -414,13 +418,14 @@ int amdgpu_ctx_put(struct amdgpu_ctx *ctx) return 0; } -int amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring, - struct dma_fence *fence, uint64_t* handler) +int amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, + struct drm_sched_entity *entity, + struct dma_fence *fence, uint64_t* handle) { - struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx]; + struct amdgpu_ctx_ring *cring = to_amdgpu_ctx_ring(entity); uint64_t seq = cring->sequence; - unsigned idx = 0; struct dma_fence *other = NULL; + unsigned idx = 0; idx = seq & (amdgpu_sched_jobs - 1); other = cring->fences[idx]; @@ -435,22 +440,23 @@ int amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring, spin_unlock(&ctx->ring_lock); dma_fence_put(other); - if (handler) - *handler = seq; + if (handle) + *handle = seq; return 0; } struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx, - struct amdgpu_ring *ring, uint64_t seq) + struct drm_sched_entity *entity, + uint64_t seq) { - struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx]; + struct amdgpu_ctx_ring *cring = to_amdgpu_ctx_ring(entity); struct dma_fence *fence; spin_lock(&ctx->ring_lock); if (seq == ~0ull) - seq = ctx->rings[ring->idx].sequence - 1; + seq = cring->sequence - 1; if (seq >= cring->sequence) { spin_unlock(&ctx->ring_lock); @@ -494,9 +500,10 @@ void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx, } } -int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx, unsigned ring_id) +int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx, + struct drm_sched_entity *entity) { - struct amdgpu_ctx_ring *cring = &ctx->rings[ring_id]; + struct amdgpu_ctx_ring *cring = to_amdgpu_ctx_ring(entity); unsigned idx = cring->sequence & (amdgpu_sched_jobs - 1); struct dma_fence *other = cring->fences[idx]; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h index 5664b1f541424..609f925b076cc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h @@ -61,20 +61,22 @@ struct amdgpu_ctx_mgr { struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id); int amdgpu_ctx_put(struct amdgpu_ctx *ctx); -int amdgpu_ctx_get_ring(struct amdgpu_ctx *ctx, - u32 hw_ip, u32 instance, u32 ring, - struct amdgpu_ring **out_ring); -int amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring, - struct dma_fence *fence, uint64_t *seq); +int amdgpu_ctx_get_entity(struct amdgpu_ctx *ctx, u32 hw_ip, u32 instance, + u32 ring, struct drm_sched_entity **entity); +int amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, + struct drm_sched_entity *entity, + struct dma_fence *fence, uint64_t *seq); struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx, - struct amdgpu_ring *ring, uint64_t seq); + struct drm_sched_entity *entity, + uint64_t seq); void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx, enum drm_sched_priority priority); int amdgpu_ctx_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); -int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx, unsigned ring_id); +int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx, + struct drm_sched_entity *entity); void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr); void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h index 8c2dab20eb367..2e87414422f9a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h @@ -150,10 +150,10 @@ TRACE_EVENT(amdgpu_cs, TP_fast_assign( __entry->bo_list = p->bo_list; - __entry->ring = p->ring->idx; + __entry->ring = to_amdgpu_ring(p->entity->rq->sched)->idx; __entry->dw = p->job->ibs[i].length_dw; __entry->fences = amdgpu_fence_count_emitted( - p->ring); + to_amdgpu_ring(p->entity->rq->sched)); ), TP_printk("bo_list=%p, ring=%u, dw=%u, fences=%u", __entry->bo_list, __entry->ring, __entry->dw, diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c index 9b7f8469bc5c0..e33425513a894 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c @@ -1264,11 +1264,12 @@ static int uvd_v7_0_ring_test_ring(struct amdgpu_ring *ring) static int uvd_v7_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p, uint32_t ib_idx) { + struct amdgpu_ring *ring = to_amdgpu_ring(p->entity->rq->sched); struct amdgpu_ib *ib = &p->job->ibs[ib_idx]; unsigned i; /* No patching necessary for the first instance */ - if (!p->ring->me) + if (!ring->me) return 0; for (i = 0; i < ib->length_dw; i += 2) { -- cgit v1.2.3 From 1b1f2fecb699bb4ccc3cb2fafe92950e9bdb39de Mon Sep 17 00:00:00 2001 From: Christian König Date: Wed, 1 Aug 2018 16:00:52 +0200 Subject: drm/amdgpu: rework ctx entity creation MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Use a fixed number of entities for each hardware IP. The number of compute entities is reduced to four, SDMA keeps it two entities and all other engines just expose one entity. Signed-off-by: Christian König Reviewed-by: Chunming Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | 291 ++++++++++++++++---------------- drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h | 30 ++-- drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 36 ++-- 3 files changed, 190 insertions(+), 167 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c index 0a6cd1202ee50..987b7f2564634 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c @@ -27,8 +27,29 @@ #include "amdgpu.h" #include "amdgpu_sched.h" -#define to_amdgpu_ctx_ring(e) \ - container_of((e), struct amdgpu_ctx_ring, entity) +#define to_amdgpu_ctx_entity(e) \ + container_of((e), struct amdgpu_ctx_entity, entity) + +const unsigned int amdgpu_ctx_num_entities[AMDGPU_HW_IP_NUM] = { + [AMDGPU_HW_IP_GFX] = 1, + [AMDGPU_HW_IP_COMPUTE] = 4, + [AMDGPU_HW_IP_DMA] = 2, + [AMDGPU_HW_IP_UVD] = 1, + [AMDGPU_HW_IP_VCE] = 1, + [AMDGPU_HW_IP_UVD_ENC] = 1, + [AMDGPU_HW_IP_VCN_DEC] = 1, + [AMDGPU_HW_IP_VCN_ENC] = 1, +}; + +static int amdgput_ctx_total_num_entities(void) +{ + unsigned i, num_entities = 0; + + for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) + num_entities += amdgpu_ctx_num_entities[i]; + + return num_entities; +} static int amdgpu_ctx_priority_permit(struct drm_file *filp, enum drm_sched_priority priority) @@ -51,9 +72,8 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, struct drm_file *filp, struct amdgpu_ctx *ctx) { - struct drm_sched_rq *sdma_rqs[AMDGPU_MAX_RINGS]; - struct drm_sched_rq *comp_rqs[AMDGPU_MAX_RINGS]; - unsigned i, j, num_sdma_rqs, num_comp_rqs; + unsigned num_entities = amdgput_ctx_total_num_entities(); + unsigned i, j; int r; if (priority < 0 || priority >= DRM_SCHED_PRIORITY_MAX) @@ -65,19 +85,33 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, memset(ctx, 0, sizeof(*ctx)); ctx->adev = adev; - kref_init(&ctx->refcount); - spin_lock_init(&ctx->ring_lock); - ctx->fences = kcalloc(amdgpu_sched_jobs * AMDGPU_MAX_RINGS, + + ctx->fences = kcalloc(amdgpu_sched_jobs * num_entities, sizeof(struct dma_fence*), GFP_KERNEL); if (!ctx->fences) return -ENOMEM; - mutex_init(&ctx->lock); + ctx->entities[0] = kcalloc(num_entities, + sizeof(struct amdgpu_ctx_entity), + GFP_KERNEL); + if (!ctx->entities[0]) { + r = -ENOMEM; + goto error_free_fences; + } - for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { - ctx->rings[i].sequence = 1; - ctx->rings[i].fences = &ctx->fences[amdgpu_sched_jobs * i]; + for (i = 0; i < num_entities; ++i) { + struct amdgpu_ctx_entity *entity = &ctx->entities[0][i]; + + entity->sequence = 1; + entity->fences = &ctx->fences[amdgpu_sched_jobs * i]; } + for (i = 1; i < AMDGPU_HW_IP_NUM; ++i) + ctx->entities[i] = ctx->entities[i - 1] + + amdgpu_ctx_num_entities[i - 1]; + + kref_init(&ctx->refcount); + spin_lock_init(&ctx->ring_lock); + mutex_init(&ctx->lock); ctx->reset_counter = atomic_read(&adev->gpu_reset_counter); ctx->reset_counter_query = ctx->reset_counter; @@ -85,50 +119,70 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, ctx->init_priority = priority; ctx->override_priority = DRM_SCHED_PRIORITY_UNSET; - num_sdma_rqs = 0; - num_comp_rqs = 0; - for (i = 0; i < adev->num_rings; i++) { - struct amdgpu_ring *ring = adev->rings[i]; - struct drm_sched_rq *rq; - - rq = &ring->sched.sched_rq[priority]; - if (ring->funcs->type == AMDGPU_RING_TYPE_SDMA) - sdma_rqs[num_sdma_rqs++] = rq; - else if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) - comp_rqs[num_comp_rqs++] = rq; - } - - /* create context entity for each ring */ - for (i = 0; i < adev->num_rings; i++) { - struct amdgpu_ring *ring = adev->rings[i]; + for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) { + struct amdgpu_ring *rings[AMDGPU_MAX_RINGS]; + struct drm_sched_rq *rqs[AMDGPU_MAX_RINGS]; + unsigned num_rings; + + switch (i) { + case AMDGPU_HW_IP_GFX: + rings[0] = &adev->gfx.gfx_ring[0]; + num_rings = 1; + break; + case AMDGPU_HW_IP_COMPUTE: + for (j = 0; j < adev->gfx.num_compute_rings; ++j) + rings[j] = &adev->gfx.compute_ring[j]; + num_rings = adev->gfx.num_compute_rings; + break; + case AMDGPU_HW_IP_DMA: + for (j = 0; j < adev->sdma.num_instances; ++j) + rings[j] = &adev->sdma.instance[j].ring; + num_rings = adev->sdma.num_instances; + break; + case AMDGPU_HW_IP_UVD: + rings[0] = &adev->uvd.inst[0].ring; + num_rings = 1; + break; + case AMDGPU_HW_IP_VCE: + rings[0] = &adev->vce.ring[0]; + num_rings = 1; + break; + case AMDGPU_HW_IP_UVD_ENC: + rings[0] = &adev->uvd.inst[0].ring_enc[0]; + num_rings = 1; + break; + case AMDGPU_HW_IP_VCN_DEC: + rings[0] = &adev->vcn.ring_dec; + num_rings = 1; + break; + case AMDGPU_HW_IP_VCN_ENC: + rings[0] = &adev->vcn.ring_enc[0]; + num_rings = 1; + break; + case AMDGPU_HW_IP_VCN_JPEG: + rings[0] = &adev->vcn.ring_jpeg; + num_rings = 1; + break; + } - if (ring == &adev->gfx.kiq.ring) - continue; + for (j = 0; j < num_rings; ++j) + rqs[j] = &rings[j]->sched.sched_rq[priority]; - if (ring->funcs->type == AMDGPU_RING_TYPE_SDMA) { - r = drm_sched_entity_init(&ctx->rings[i].entity, - sdma_rqs, num_sdma_rqs, - &ctx->guilty); - } else if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) { - r = drm_sched_entity_init(&ctx->rings[i].entity, - comp_rqs, num_comp_rqs, - &ctx->guilty); - } else { - struct drm_sched_rq *rq; - - rq = &ring->sched.sched_rq[priority]; - r = drm_sched_entity_init(&ctx->rings[i].entity, - &rq, 1, &ctx->guilty); - } + for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) + r = drm_sched_entity_init(&ctx->entities[i][j].entity, + rqs, num_rings, &ctx->guilty); if (r) - goto failed; + goto error_cleanup_entities; } return 0; -failed: - for (j = 0; j < i; j++) - drm_sched_entity_destroy(&ctx->rings[j].entity); +error_cleanup_entities: + for (i = 0; i < num_entities; ++i) + drm_sched_entity_destroy(&ctx->entities[0][i].entity); + kfree(ctx->entities[0]); + +error_free_fences: kfree(ctx->fences); ctx->fences = NULL; return r; @@ -137,17 +191,18 @@ failed: static void amdgpu_ctx_fini(struct kref *ref) { struct amdgpu_ctx *ctx = container_of(ref, struct amdgpu_ctx, refcount); + unsigned num_entities = amdgput_ctx_total_num_entities(); struct amdgpu_device *adev = ctx->adev; unsigned i, j; if (!adev) return; - for (i = 0; i < AMDGPU_MAX_RINGS; ++i) + for (i = 0; i < num_entities; ++i) for (j = 0; j < amdgpu_sched_jobs; ++j) - dma_fence_put(ctx->rings[i].fences[j]); + dma_fence_put(ctx->entities[0][i].fences[j]); kfree(ctx->fences); - ctx->fences = NULL; + kfree(ctx->entities[0]); mutex_destroy(&ctx->lock); @@ -157,9 +212,10 @@ static void amdgpu_ctx_fini(struct kref *ref) int amdgpu_ctx_get_entity(struct amdgpu_ctx *ctx, u32 hw_ip, u32 instance, u32 ring, struct drm_sched_entity **entity) { - struct amdgpu_device *adev = ctx->adev; - unsigned num_rings = 0; - struct amdgpu_ring *out_ring; + if (hw_ip >= AMDGPU_HW_IP_NUM) { + DRM_ERROR("unknown HW IP type: %d\n", hw_ip); + return -EINVAL; + } /* Right now all IPs have only one instance - multiple rings. */ if (instance != 0) { @@ -167,52 +223,12 @@ int amdgpu_ctx_get_entity(struct amdgpu_ctx *ctx, u32 hw_ip, u32 instance, return -EINVAL; } - switch (hw_ip) { - case AMDGPU_HW_IP_GFX: - out_ring = &adev->gfx.gfx_ring[ring]; - num_rings = adev->gfx.num_gfx_rings; - break; - case AMDGPU_HW_IP_COMPUTE: - out_ring = &adev->gfx.compute_ring[ring]; - num_rings = adev->gfx.num_compute_rings; - break; - case AMDGPU_HW_IP_DMA: - out_ring = &adev->sdma.instance[ring].ring; - num_rings = adev->sdma.num_instances; - break; - case AMDGPU_HW_IP_UVD: - out_ring = &adev->uvd.inst[0].ring; - num_rings = adev->uvd.num_uvd_inst; - break; - case AMDGPU_HW_IP_VCE: - out_ring = &adev->vce.ring[ring]; - num_rings = adev->vce.num_rings; - break; - case AMDGPU_HW_IP_UVD_ENC: - out_ring = &adev->uvd.inst[0].ring_enc[ring]; - num_rings = adev->uvd.num_enc_rings; - break; - case AMDGPU_HW_IP_VCN_DEC: - out_ring = &adev->vcn.ring_dec; - num_rings = 1; - break; - case AMDGPU_HW_IP_VCN_ENC: - out_ring = &adev->vcn.ring_enc[ring]; - num_rings = adev->vcn.num_enc_rings; - break; - case AMDGPU_HW_IP_VCN_JPEG: - out_ring = &adev->vcn.ring_jpeg; - num_rings = 1; - break; - default: - DRM_ERROR("unknown HW IP type: %d\n", hw_ip); + if (ring >= amdgpu_ctx_num_entities[hw_ip]) { + DRM_DEBUG("invalid ring: %d %d\n", hw_ip, ring); return -EINVAL; } - if (ring > num_rings) - return -EINVAL; - - *entity = &ctx->rings[out_ring->idx].entity; + *entity = &ctx->entities[hw_ip][ring].entity; return 0; } @@ -252,17 +268,17 @@ static int amdgpu_ctx_alloc(struct amdgpu_device *adev, static void amdgpu_ctx_do_release(struct kref *ref) { struct amdgpu_ctx *ctx; + unsigned num_entities; u32 i; ctx = container_of(ref, struct amdgpu_ctx, refcount); - for (i = 0; i < ctx->adev->num_rings; i++) { + num_entities = 0; + for (i = 0; i < AMDGPU_HW_IP_NUM; i++) + num_entities += amdgpu_ctx_num_entities[i]; - if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring) - continue; - - drm_sched_entity_destroy(&ctx->rings[i].entity); - } + for (i = 0; i < num_entities; i++) + drm_sched_entity_destroy(&ctx->entities[0][i].entity); amdgpu_ctx_fini(ref); } @@ -422,21 +438,21 @@ int amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct drm_sched_entity *entity, struct dma_fence *fence, uint64_t* handle) { - struct amdgpu_ctx_ring *cring = to_amdgpu_ctx_ring(entity); - uint64_t seq = cring->sequence; + struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity); + uint64_t seq = centity->sequence; struct dma_fence *other = NULL; unsigned idx = 0; idx = seq & (amdgpu_sched_jobs - 1); - other = cring->fences[idx]; + other = centity->fences[idx]; if (other) BUG_ON(!dma_fence_is_signaled(other)); dma_fence_get(fence); spin_lock(&ctx->ring_lock); - cring->fences[idx] = fence; - cring->sequence++; + centity->fences[idx] = fence; + centity->sequence++; spin_unlock(&ctx->ring_lock); dma_fence_put(other); @@ -450,26 +466,26 @@ struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx, struct drm_sched_entity *entity, uint64_t seq) { - struct amdgpu_ctx_ring *cring = to_amdgpu_ctx_ring(entity); + struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity); struct dma_fence *fence; spin_lock(&ctx->ring_lock); if (seq == ~0ull) - seq = cring->sequence - 1; + seq = centity->sequence - 1; - if (seq >= cring->sequence) { + if (seq >= centity->sequence) { spin_unlock(&ctx->ring_lock); return ERR_PTR(-EINVAL); } - if (seq + amdgpu_sched_jobs < cring->sequence) { + if (seq + amdgpu_sched_jobs < centity->sequence) { spin_unlock(&ctx->ring_lock); return NULL; } - fence = dma_fence_get(cring->fences[seq & (amdgpu_sched_jobs - 1)]); + fence = dma_fence_get(centity->fences[seq & (amdgpu_sched_jobs - 1)]); spin_unlock(&ctx->ring_lock); return fence; @@ -478,23 +494,17 @@ struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx, void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx, enum drm_sched_priority priority) { - int i; - struct amdgpu_device *adev = ctx->adev; - struct drm_sched_entity *entity; - struct amdgpu_ring *ring; + unsigned num_entities = amdgput_ctx_total_num_entities(); enum drm_sched_priority ctx_prio; + unsigned i; ctx->override_priority = priority; ctx_prio = (ctx->override_priority == DRM_SCHED_PRIORITY_UNSET) ? ctx->init_priority : ctx->override_priority; - for (i = 0; i < adev->num_rings; i++) { - ring = adev->rings[i]; - entity = &ctx->rings[i].entity; - - if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ) - continue; + for (i = 0; i < num_entities; i++) { + struct drm_sched_entity *entity = &ctx->entities[0][i].entity; drm_sched_entity_set_priority(entity, ctx_prio); } @@ -503,9 +513,9 @@ void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx, int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx, struct drm_sched_entity *entity) { - struct amdgpu_ctx_ring *cring = to_amdgpu_ctx_ring(entity); - unsigned idx = cring->sequence & (amdgpu_sched_jobs - 1); - struct dma_fence *other = cring->fences[idx]; + struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity); + unsigned idx = centity->sequence & (amdgpu_sched_jobs - 1); + struct dma_fence *other = centity->fences[idx]; if (other) { signed long r; @@ -529,6 +539,7 @@ void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr) void amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr) { + unsigned num_entities = amdgput_ctx_total_num_entities(); struct amdgpu_ctx *ctx; struct idr *idp; uint32_t id, i; @@ -544,13 +555,11 @@ void amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr) return; } - for (i = 0; i < ctx->adev->num_rings; i++) { + for (i = 0; i < num_entities; i++) { + struct drm_sched_entity *entity; - if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring) - continue; - - max_wait = drm_sched_entity_flush(&ctx->rings[i].entity, - max_wait); + entity = &ctx->entities[0][i].entity; + max_wait = drm_sched_entity_flush(entity, max_wait); } } mutex_unlock(&mgr->lock); @@ -558,6 +567,7 @@ void amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr) void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr) { + unsigned num_entities = amdgput_ctx_total_num_entities(); struct amdgpu_ctx *ctx; struct idr *idp; uint32_t id, i; @@ -569,16 +579,13 @@ void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr) if (!ctx->adev) return; - for (i = 0; i < ctx->adev->num_rings; i++) { - - if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring) - continue; - - if (kref_read(&ctx->refcount) == 1) - drm_sched_entity_fini(&ctx->rings[i].entity); - else - DRM_ERROR("ctx %p is still alive\n", ctx); + if (kref_read(&ctx->refcount) != 1) { + DRM_ERROR("ctx %p is still alive\n", ctx); + continue; } + + for (i = 0; i < num_entities; i++) + drm_sched_entity_fini(&ctx->entities[0][i].entity); } } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h index 609f925b076cc..d67c1d285a4f3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h @@ -29,26 +29,26 @@ struct drm_device; struct drm_file; struct amdgpu_fpriv; -struct amdgpu_ctx_ring { +struct amdgpu_ctx_entity { uint64_t sequence; struct dma_fence **fences; struct drm_sched_entity entity; }; struct amdgpu_ctx { - struct kref refcount; - struct amdgpu_device *adev; - unsigned reset_counter; - unsigned reset_counter_query; - uint32_t vram_lost_counter; - spinlock_t ring_lock; - struct dma_fence **fences; - struct amdgpu_ctx_ring rings[AMDGPU_MAX_RINGS]; - bool preamble_presented; - enum drm_sched_priority init_priority; - enum drm_sched_priority override_priority; - struct mutex lock; - atomic_t guilty; + struct kref refcount; + struct amdgpu_device *adev; + unsigned reset_counter; + unsigned reset_counter_query; + uint32_t vram_lost_counter; + spinlock_t ring_lock; + struct dma_fence **fences; + struct amdgpu_ctx_entity *entities[AMDGPU_HW_IP_NUM]; + bool preamble_presented; + enum drm_sched_priority init_priority; + enum drm_sched_priority override_priority; + struct mutex lock; + atomic_t guilty; }; struct amdgpu_ctx_mgr { @@ -58,6 +58,8 @@ struct amdgpu_ctx_mgr { struct idr ctx_handles; }; +extern const unsigned int amdgpu_ctx_num_entities[AMDGPU_HW_IP_NUM]; + struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id); int amdgpu_ctx_put(struct amdgpu_ctx *ctx); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index bdb6362e95568..ad7978bab5fce 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -270,7 +270,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev, uint32_t ib_start_alignment = 0; uint32_t ib_size_alignment = 0; enum amd_ip_block_type type; - uint32_t ring_mask = 0; + unsigned int num_rings = 0; unsigned int i, j; if (info->query_hw_ip.ip_instance >= AMDGPU_HW_IP_INSTANCE_MAX_COUNT) @@ -280,21 +280,24 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev, case AMDGPU_HW_IP_GFX: type = AMD_IP_BLOCK_TYPE_GFX; for (i = 0; i < adev->gfx.num_gfx_rings; i++) - ring_mask |= adev->gfx.gfx_ring[i].ready << i; + if (adev->gfx.gfx_ring[i].ready) + ++num_rings; ib_start_alignment = 32; ib_size_alignment = 32; break; case AMDGPU_HW_IP_COMPUTE: type = AMD_IP_BLOCK_TYPE_GFX; for (i = 0; i < adev->gfx.num_compute_rings; i++) - ring_mask |= adev->gfx.compute_ring[i].ready << i; + if (adev->gfx.compute_ring[i].ready) + ++num_rings; ib_start_alignment = 32; ib_size_alignment = 32; break; case AMDGPU_HW_IP_DMA: type = AMD_IP_BLOCK_TYPE_SDMA; for (i = 0; i < adev->sdma.num_instances; i++) - ring_mask |= adev->sdma.instance[i].ring.ready << i; + if (adev->sdma.instance[i].ring.ready) + ++num_rings; ib_start_alignment = 256; ib_size_alignment = 4; break; @@ -303,7 +306,9 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev, for (i = 0; i < adev->uvd.num_uvd_inst; i++) { if (adev->uvd.harvest_config & (1 << i)) continue; - ring_mask |= adev->uvd.inst[i].ring.ready; + + if (adev->uvd.inst[i].ring.ready) + ++num_rings; } ib_start_alignment = 64; ib_size_alignment = 64; @@ -311,7 +316,8 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev, case AMDGPU_HW_IP_VCE: type = AMD_IP_BLOCK_TYPE_VCE; for (i = 0; i < adev->vce.num_rings; i++) - ring_mask |= adev->vce.ring[i].ready << i; + if (adev->vce.ring[i].ready) + ++num_rings; ib_start_alignment = 4; ib_size_alignment = 1; break; @@ -320,28 +326,33 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev, for (i = 0; i < adev->uvd.num_uvd_inst; i++) { if (adev->uvd.harvest_config & (1 << i)) continue; + for (j = 0; j < adev->uvd.num_enc_rings; j++) - ring_mask |= adev->uvd.inst[i].ring_enc[j].ready << j; + if (adev->uvd.inst[i].ring_enc[j].ready) + ++num_rings; } ib_start_alignment = 64; ib_size_alignment = 64; break; case AMDGPU_HW_IP_VCN_DEC: type = AMD_IP_BLOCK_TYPE_VCN; - ring_mask = adev->vcn.ring_dec.ready; + if (adev->vcn.ring_dec.ready) + ++num_rings; ib_start_alignment = 16; ib_size_alignment = 16; break; case AMDGPU_HW_IP_VCN_ENC: type = AMD_IP_BLOCK_TYPE_VCN; for (i = 0; i < adev->vcn.num_enc_rings; i++) - ring_mask |= adev->vcn.ring_enc[i].ready << i; + if (adev->vcn.ring_enc[i].ready) + ++num_rings; ib_start_alignment = 64; ib_size_alignment = 1; break; case AMDGPU_HW_IP_VCN_JPEG: type = AMD_IP_BLOCK_TYPE_VCN; - ring_mask = adev->vcn.ring_jpeg.ready; + if (adev->vcn.ring_jpeg.ready) + ++num_rings; ib_start_alignment = 16; ib_size_alignment = 16; break; @@ -357,10 +368,13 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev, if (i == adev->num_ip_blocks) return 0; + num_rings = min(amdgpu_ctx_num_entities[info->query_hw_ip.type], + num_rings); + result->hw_ip_version_major = adev->ip_blocks[i].version->major; result->hw_ip_version_minor = adev->ip_blocks[i].version->minor; result->capabilities_flags = 0; - result->available_rings = ring_mask; + result->available_rings = (1 << num_rings) - 1; result->ib_start_alignment = ib_start_alignment; result->ib_size_alignment = ib_size_alignment; return 0; -- cgit v1.2.3 From 85eff20020a656b2d13b33dc4681523508fee037 Mon Sep 17 00:00:00 2001 From: Christian König Date: Fri, 24 Aug 2018 14:23:33 +0200 Subject: drm/amdgpu: amdgpu_ctx_add_fence can't fail MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit No more waiting for a fence done here. Signed-off-by: Christian König Reviewed-by: Chunming Zhou Reviewed-by: Junwei Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 10 +--------- drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | 8 +++----- drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h | 6 +++--- 3 files changed, 7 insertions(+), 17 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index b62bbe71662dd..adc6a43e2333f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -1217,15 +1217,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, job->owner = p->filp; p->fence = dma_fence_get(&job->base.s_fence->finished); - r = amdgpu_ctx_add_fence(p->ctx, entity, p->fence, &seq); - if (r) { - dma_fence_put(p->fence); - dma_fence_put(&job->base.s_fence->finished); - amdgpu_job_free(job); - amdgpu_mn_unlock(p->mn); - return r; - } - + amdgpu_ctx_add_fence(p->ctx, entity, p->fence, &seq); amdgpu_cs_post_dependencies(p); if ((job->preamble_status & AMDGPU_PREAMBLE_IB_PRESENT) && diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c index 987b7f2564634..f9b54236102d5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c @@ -434,9 +434,9 @@ int amdgpu_ctx_put(struct amdgpu_ctx *ctx) return 0; } -int amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, - struct drm_sched_entity *entity, - struct dma_fence *fence, uint64_t* handle) +void amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, + struct drm_sched_entity *entity, + struct dma_fence *fence, uint64_t* handle) { struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity); uint64_t seq = centity->sequence; @@ -458,8 +458,6 @@ int amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, dma_fence_put(other); if (handle) *handle = seq; - - return 0; } struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h index d67c1d285a4f3..b3b012c0a7da9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h @@ -65,9 +65,9 @@ int amdgpu_ctx_put(struct amdgpu_ctx *ctx); int amdgpu_ctx_get_entity(struct amdgpu_ctx *ctx, u32 hw_ip, u32 instance, u32 ring, struct drm_sched_entity **entity); -int amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, - struct drm_sched_entity *entity, - struct dma_fence *fence, uint64_t *seq); +void amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, + struct drm_sched_entity *entity, + struct dma_fence *fence, uint64_t *seq); struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx, struct drm_sched_entity *entity, uint64_t seq); -- cgit v1.2.3