diff options
author | Dmitry Torokhov <dmitry.torokhov@gmail.com> | 2024-09-20 01:24:24 -0700 |
---|---|---|
committer | Dmitry Torokhov <dmitry.torokhov@gmail.com> | 2024-09-20 01:24:24 -0700 |
commit | 36ec807b627b4c0a0a382f0ae48eac7187d14b2b (patch) | |
tree | df9d682d66492722dbd186bd01af2d7334ea408a /drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | |
parent | eb017f4ea13b1a5ad7f4332279f2e4c67b44bdea (diff) | |
parent | 55bef83509f0cbe4cc54a583ac0313389dabee66 (diff) |
Merge branch 'next' into for-linus
Prepare input updates for 6.12 merge window.
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_object.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 94 |
1 files changed, 59 insertions, 35 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index f6d503432a9ef..e32161f6b67a3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -39,6 +39,7 @@ #include "amdgpu.h" #include "amdgpu_trace.h" #include "amdgpu_amdkfd.h" +#include "amdgpu_vram_mgr.h" /** * DOC: amdgpu_object @@ -153,8 +154,10 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain) else places[c].flags |= TTM_PL_FLAG_TOPDOWN; - if (flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS) + if (abo->tbo.type == ttm_bo_type_kernel && + flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS) places[c].flags |= TTM_PL_FLAG_CONTIGUOUS; + c++; } @@ -173,6 +176,12 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain) abo->flags & AMDGPU_GEM_CREATE_PREEMPTIBLE ? AMDGPU_PL_PREEMPT : TTM_PL_TT; places[c].flags = 0; + /* + * When GTT is just an alternative to VRAM make sure that we + * only use it as fallback and still try to fill up VRAM first. + */ + if (domain & abo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) + places[c].flags |= TTM_PL_FLAG_FALLBACK; c++; } @@ -595,9 +604,6 @@ int amdgpu_bo_create(struct amdgpu_device *adev, if (!amdgpu_bo_support_uswc(bo->flags)) bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC; - if (adev->ras_enabled) - bo->flags |= AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE; - bo->tbo.bdev = &adev->mman.bdev; if (bp->domain & (AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA | AMDGPU_GEM_DOMAIN_GDS)) @@ -629,7 +635,7 @@ int amdgpu_bo_create(struct amdgpu_device *adev, bo->tbo.resource->mem_type == TTM_PL_VRAM) { struct dma_fence *fence; - r = amdgpu_fill_buffer(bo, 0, bo->tbo.base.resv, &fence, true); + r = amdgpu_ttm_clear_buffer(bo, bo->tbo.base.resv, &fence); if (unlikely(r)) goto fail_unreserve; @@ -759,7 +765,7 @@ int amdgpu_bo_restore_shadow(struct amdgpu_bo *shadow, struct dma_fence **fence) return amdgpu_copy_buffer(ring, shadow_addr, parent_addr, amdgpu_bo_size(shadow), NULL, fence, - true, false, false); + true, false, 0); } /** @@ -961,6 +967,10 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, if (!bo->placements[i].lpfn || (lpfn && lpfn < bo->placements[i].lpfn)) bo->placements[i].lpfn = lpfn; + + if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS && + bo->placements[i].mem_type == TTM_PL_VRAM) + bo->placements[i].flags |= TTM_PL_FLAG_CONTIGUOUS; } r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); @@ -971,12 +981,11 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, ttm_bo_pin(&bo->tbo); - domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type); - if (domain == AMDGPU_GEM_DOMAIN_VRAM) { + if (bo->tbo.resource->mem_type == TTM_PL_VRAM) { atomic64_add(amdgpu_bo_size(bo), &adev->vram_pin_size); atomic64_add(amdgpu_vram_mgr_bo_visible_size(bo), &adev->visible_pin_size); - } else if (domain == AMDGPU_GEM_DOMAIN_GTT) { + } else if (bo->tbo.resource->mem_type == TTM_PL_TT) { atomic64_add(amdgpu_bo_size(bo), &adev->gart_pin_size); } @@ -1281,7 +1290,6 @@ void amdgpu_bo_get_memory(struct amdgpu_bo *bo, struct ttm_resource *res = bo->tbo.resource; uint64_t size = amdgpu_bo_size(bo); struct drm_gem_object *obj; - unsigned int domain; bool shared; /* Abort if the BO doesn't currently have a backing store */ @@ -1291,21 +1299,20 @@ void amdgpu_bo_get_memory(struct amdgpu_bo *bo, obj = &bo->tbo.base; shared = drm_gem_object_is_shared_for_memory_stats(obj); - domain = amdgpu_mem_type_to_domain(res->mem_type); - switch (domain) { - case AMDGPU_GEM_DOMAIN_VRAM: + switch (res->mem_type) { + case TTM_PL_VRAM: stats->vram += size; - if (amdgpu_res_cpu_visible(adev, bo->tbo.resource)) + if (amdgpu_res_cpu_visible(adev, res)) stats->visible_vram += size; if (shared) stats->vram_shared += size; break; - case AMDGPU_GEM_DOMAIN_GTT: + case TTM_PL_TT: stats->gtt += size; if (shared) stats->gtt_shared += size; break; - case AMDGPU_GEM_DOMAIN_CPU: + case TTM_PL_SYSTEM: default: stats->cpu += size; if (shared) @@ -1318,7 +1325,7 @@ void amdgpu_bo_get_memory(struct amdgpu_bo *bo, if (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) stats->requested_visible_vram += size; - if (domain != AMDGPU_GEM_DOMAIN_VRAM) { + if (res->mem_type != TTM_PL_VRAM) { stats->evicted_vram += size; if (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) stats->evicted_visible_vram += size; @@ -1366,8 +1373,9 @@ void amdgpu_bo_release_notify(struct ttm_buffer_object *bo) if (WARN_ON_ONCE(!dma_resv_trylock(bo->base.resv))) return; - r = amdgpu_fill_buffer(abo, AMDGPU_POISON, bo->base.resv, &fence, true); + r = amdgpu_fill_buffer(abo, 0, bo->base.resv, &fence, true); if (!WARN_ON(r)) { + amdgpu_vram_mgr_set_cleared(bo->resource); amdgpu_bo_fence(abo, fence, false); dma_fence_put(fence); } @@ -1591,23 +1599,39 @@ u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m) u64 size; if (dma_resv_trylock(bo->tbo.base.resv)) { - unsigned int domain; - - domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type); - switch (domain) { - case AMDGPU_GEM_DOMAIN_VRAM: - if (amdgpu_res_cpu_visible(adev, bo->tbo.resource)) - placement = "VRAM VISIBLE"; - else - placement = "VRAM"; - break; - case AMDGPU_GEM_DOMAIN_GTT: - placement = "GTT"; - break; - case AMDGPU_GEM_DOMAIN_CPU: - default: - placement = "CPU"; - break; + if (!bo->tbo.resource) { + placement = "NONE"; + } else { + switch (bo->tbo.resource->mem_type) { + case TTM_PL_VRAM: + if (amdgpu_res_cpu_visible(adev, bo->tbo.resource)) + placement = "VRAM VISIBLE"; + else + placement = "VRAM"; + break; + case TTM_PL_TT: + placement = "GTT"; + break; + case AMDGPU_PL_GDS: + placement = "GDS"; + break; + case AMDGPU_PL_GWS: + placement = "GWS"; + break; + case AMDGPU_PL_OA: + placement = "OA"; + break; + case AMDGPU_PL_PREEMPT: + placement = "PREEMPTIBLE"; + break; + case AMDGPU_PL_DOORBELL: + placement = "DOORBELL"; + break; + case TTM_PL_SYSTEM: + default: + placement = "CPU"; + break; + } } dma_resv_unlock(bo->tbo.base.resv); } else { |