summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/scheduler/sched_entity.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2024-11-21 14:56:17 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2024-11-21 14:56:17 -0800
commit28eb75e178d389d325f1666e422bc13bbbb9804c (patch)
tree20417b4e798f98fc5687e80c1e0126afcf437c70 /drivers/gpu/drm/scheduler/sched_entity.c
parent071b34dcf71523a559b6c39f5d21a268a9531b50 (diff)
parenta163b895077861598be48c1cf7f4a88413c28b22 (diff)
Merge tag 'drm-next-2024-11-21' of https://gitlab.freedesktop.org/drm/kernel
Pull drm updates from Dave Airlie: "There's a lot of rework, the panic helper support is being added to more drivers, v3d gets support for HW superpages, scheduler documentation, drm client and video aperture reworks, some new MAINTAINERS added, amdgpu has the usual lots of IP refactors, Intel has some Pantherlake enablement and xe is getting some SRIOV bits, but just lots of stuff everywhere. core: - split DSC helpers from DP helpers - clang build fixes for drm/mm test - drop simple pipeline support for gem vram - document submission error signaling - move drm_rect to drm core module from kms helper - add default client setup to most drivers - move to video aperture helpers instead of drm ones tests: - new framebuffer tests ttm: - remove swapped and pinned BOs from TTM lru panic: - fix uninit spinlock - add ABGR2101010 support bridge: - add TI TDP158 support - use standard PM OPS dma-fence: - use read_trylock instead of read_lock to help lockdep scheduler: - add errno to sched start to report different errors - add locking to drm_sched_entity_modify_sched - improve documentation xe: - add drm_line_printer - lots of refactoring - Enable Xe2 + PES disaggregation - add new ARL PCI ID - SRIOV development work - fix exec unnecessary implicit fence - define and parse OA sync props - forcewake refactoring i915: - Enable BMG/LNL ultra joiner - Enable 10bpx + CCS scanout on ICL+, fp16/CCS on TGL+ - use DSB for plane/color mgmt - Arrow lake PCI IDs - lots of i915/xe display refactoring - enable PXP GuC autoteardown - Pantherlake (PTL) Xe3 LPD display enablement - Allow fastset HDR infoframe changes - write DP source OUI for non-eDP sinks - share PCI IDs between i915 and xe amdgpu: - SDMA queue reset support - SMU 13.0.6, JPEG 4.0.3 updates - Initial runtime repartitioning support - rework IP structs for multiple IP instances - Fetch EDID from _DDC if available - SMU13 zero rpm user control - lots of fixes/cleanups amdkfd: - Increase event FIFO size - add topology cap flag for per queue reset msm: - DPU: - SA8775P support - (disabled by default) MSM8917, MSM8937, MSM8953 and MSM8996 support - Enable large framebuffer support - Drop MSM8998 and SDM845 - DP: - SA8775P support - GPU: - a7xx preemption support - Adreno A663 support ast: - warn about unsupported TX chips ivpu: - add coredump - add pantherlake support rockchip: - 4K@60Hz display enablement - generate pll programming tables panthor: - add timestamp query API - add realtime group priority - add fdinfo support etnaviv: - improve handling of DMA address limits - improve GPU hangcheck exynos: - Decon Exynos7870 support mediatek: - add OF graph support omap: - locking fixes bochs: - convert to gem/shmem from simpledrm v3d: - support big/super pages - add gemfs vc4: - BCM2712 support refactoring - add YUV444 format support udmabuf: - folio related fixes nouveau: - add panic support on nv50+" * tag 'drm-next-2024-11-21' of https://gitlab.freedesktop.org/drm/kernel: (1583 commits) drm/xe/guc: Fix dereference before NULL check drm/amd: Fix initialization mistake for NBIO 7.7.0 Revert "drm/amd/display: parse umc_info or vram_info based on ASIC" drm/amd/display: Fix failure to read vram info due to static BP_RESULT drm/amdgpu: enable GTT fallback handling for dGPUs only drm/amd/amdgpu: limit single process inside MES drm/fourcc: add AMD_FMT_MOD_TILE_GFX9_4K_D_X drm/amdgpu/mes12: correct kiq unmap latency drm/amdgpu: Support vcn and jpeg error info parsing drm/amd : Update MES API header file for v11 & v12 drm/amd/amdkfd: add/remove kfd queues on start/stop KFD scheduling drm/amdkfd: change kfd process kref count at creation drm/amdgpu: Cleanup shift coding style drm/amd/amdgpu: Increase MES log buffer to dump mes scratch data drm/amdgpu: Implement virt req_ras_err_count drm/amdgpu: VF Query RAS Caps from Host if supported drm/amdgpu: Add msg handlers for SRIOV RAS Telemetry drm/amdgpu: Update SRIOV Exchange Headers for RAS Telemetry Support drm/amd/display: 3.2.309 drm/amd/display: Adjust VSDB parser for replay feature ...
Diffstat (limited to 'drivers/gpu/drm/scheduler/sched_entity.c')
-rw-r--r--drivers/gpu/drm/scheduler/sched_entity.c52
1 files changed, 31 insertions, 21 deletions
diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
index a75eede8bf8d..69bcf0e99d57 100644
--- a/drivers/gpu/drm/scheduler/sched_entity.c
+++ b/drivers/gpu/drm/scheduler/sched_entity.c
@@ -51,7 +51,7 @@
* drm_sched_entity_set_priority(). For changing the set of schedulers
* @sched_list at runtime see drm_sched_entity_modify_sched().
*
- * An entity is cleaned up by callind drm_sched_entity_fini(). See also
+ * An entity is cleaned up by calling drm_sched_entity_fini(). See also
* drm_sched_entity_destroy().
*
* Returns 0 on success or a negative error code on failure.
@@ -105,7 +105,7 @@ int drm_sched_entity_init(struct drm_sched_entity *entity,
/* We start in an idle state. */
complete_all(&entity->entity_idle);
- spin_lock_init(&entity->rq_lock);
+ spin_lock_init(&entity->lock);
spsc_queue_init(&entity->job_queue);
atomic_set(&entity->fence_seq, 0);
@@ -133,10 +133,10 @@ void drm_sched_entity_modify_sched(struct drm_sched_entity *entity,
{
WARN_ON(!num_sched_list || !sched_list);
- spin_lock(&entity->rq_lock);
+ spin_lock(&entity->lock);
entity->sched_list = sched_list;
entity->num_sched_list = num_sched_list;
- spin_unlock(&entity->rq_lock);
+ spin_unlock(&entity->lock);
}
EXPORT_SYMBOL(drm_sched_entity_modify_sched);
@@ -244,10 +244,10 @@ static void drm_sched_entity_kill(struct drm_sched_entity *entity)
if (!entity->rq)
return;
- spin_lock(&entity->rq_lock);
+ spin_lock(&entity->lock);
entity->stopped = true;
drm_sched_rq_remove_entity(entity->rq, entity);
- spin_unlock(&entity->rq_lock);
+ spin_unlock(&entity->lock);
/* Make sure this entity is not used by the scheduler at the moment */
wait_for_completion(&entity->entity_idle);
@@ -372,8 +372,8 @@ static void drm_sched_entity_clear_dep(struct dma_fence *f,
}
/*
- * drm_sched_entity_clear_dep - callback to clear the entities dependency and
- * wake up scheduler
+ * drm_sched_entity_wakeup - callback to clear the entity's dependency and
+ * wake up the scheduler
*/
static void drm_sched_entity_wakeup(struct dma_fence *f,
struct dma_fence_cb *cb)
@@ -391,14 +391,14 @@ static void drm_sched_entity_wakeup(struct dma_fence *f,
* @entity: scheduler entity
* @priority: scheduler priority
*
- * Update the priority of runqueus used for the entity.
+ * Update the priority of runqueues used for the entity.
*/
void drm_sched_entity_set_priority(struct drm_sched_entity *entity,
enum drm_sched_priority priority)
{
- spin_lock(&entity->rq_lock);
+ spin_lock(&entity->lock);
entity->priority = priority;
- spin_unlock(&entity->rq_lock);
+ spin_unlock(&entity->lock);
}
EXPORT_SYMBOL(drm_sched_entity_set_priority);
@@ -514,8 +514,17 @@ struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity)
struct drm_sched_job *next;
next = to_drm_sched_job(spsc_queue_peek(&entity->job_queue));
- if (next)
- drm_sched_rq_update_fifo(entity, next->submit_ts);
+ if (next) {
+ struct drm_sched_rq *rq;
+
+ spin_lock(&entity->lock);
+ rq = entity->rq;
+ spin_lock(&rq->lock);
+ drm_sched_rq_update_fifo_locked(entity, rq,
+ next->submit_ts);
+ spin_unlock(&rq->lock);
+ spin_unlock(&entity->lock);
+ }
}
/* Jobs and entities might have different lifecycles. Since we're
@@ -555,14 +564,14 @@ void drm_sched_entity_select_rq(struct drm_sched_entity *entity)
if (fence && !dma_fence_is_signaled(fence))
return;
- spin_lock(&entity->rq_lock);
+ spin_lock(&entity->lock);
sched = drm_sched_pick_best(entity->sched_list, entity->num_sched_list);
rq = sched ? sched->sched_rq[entity->priority] : NULL;
if (rq != entity->rq) {
drm_sched_rq_remove_entity(entity->rq, entity);
entity->rq = rq;
}
- spin_unlock(&entity->rq_lock);
+ spin_unlock(&entity->lock);
if (entity->num_sched_list == 1)
entity->sched_list = NULL;
@@ -576,8 +585,6 @@ void drm_sched_entity_select_rq(struct drm_sched_entity *entity)
* fence sequence number this function should be called with drm_sched_job_arm()
* under common lock for the struct drm_sched_entity that was set up for
* @sched_job in drm_sched_job_init().
- *
- * Returns 0 for success, negative error code otherwise.
*/
void drm_sched_entity_push_job(struct drm_sched_job *sched_job)
{
@@ -603,9 +610,9 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job)
struct drm_sched_rq *rq;
/* Add the entity to the run queue */
- spin_lock(&entity->rq_lock);
+ spin_lock(&entity->lock);
if (entity->stopped) {
- spin_unlock(&entity->rq_lock);
+ spin_unlock(&entity->lock);
DRM_ERROR("Trying to push to a killed entity\n");
return;
@@ -614,11 +621,14 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job)
rq = entity->rq;
sched = rq->sched;
+ spin_lock(&rq->lock);
drm_sched_rq_add_entity(rq, entity);
- spin_unlock(&entity->rq_lock);
if (drm_sched_policy == DRM_SCHED_POLICY_FIFO)
- drm_sched_rq_update_fifo(entity, submit_ts);
+ drm_sched_rq_update_fifo_locked(entity, rq, submit_ts);
+
+ spin_unlock(&rq->lock);
+ spin_unlock(&entity->lock);
drm_sched_wakeup(sched);
}