diff options
Diffstat (limited to 'drivers/gpu/drm/amd')
50 files changed, 440 insertions, 295 deletions
| diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 2a0df4cabb99..6f5b4a0e0a34 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1290,6 +1290,7 @@ struct amdgpu_device {  	bool                            debug_disable_gpu_ring_reset;  	bool                            debug_vm_userptr;  	bool                            debug_disable_ce_logs; +	bool                            debug_enable_ce_cs;  	/* Protection for the following isolation structure */  	struct mutex                    enforce_isolation_mutex; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index 7c54fe6b0f5d..a2ca9acf8c4e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -2329,10 +2329,9 @@ void amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(struct kgd_mem *mem)  int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct amdgpu_device *adev,  					  struct kfd_vm_fault_info *mem)  { -	if (atomic_read(&adev->gmc.vm_fault_info_updated) == 1) { +	if (atomic_read_acquire(&adev->gmc.vm_fault_info_updated) == 1) {  		*mem = *adev->gmc.vm_fault_info; -		mb(); /* make sure read happened */ -		atomic_set(&adev->gmc.vm_fault_info_updated, 0); +		atomic_set_release(&adev->gmc.vm_fault_info_updated, 0);  	}  	return 0;  } @@ -2586,12 +2585,17 @@ static int update_invalid_user_pages(struct amdkfd_process_info *process_info,  			 * from the KFD, trigger a segmentation fault in VM debug mode.  			 */  			if (amdgpu_ttm_adev(bo->tbo.bdev)->debug_vm_userptr) { +				struct kfd_process *p; +  				pr_err("Pid %d unmapped memory before destroying userptr at GPU addr 0x%llx\n",  								pid_nr(process_info->pid), mem->va);  				// Send GPU VM fault to user space -				kfd_signal_vm_fault_event_with_userptr(kfd_lookup_process_by_pid(process_info->pid), -								mem->va); +				p = kfd_lookup_process_by_pid(process_info->pid); +				if (p) { +					kfd_signal_vm_fault_event_with_userptr(p, mem->va); +					kfd_unref_process(p); +				}  			}  			ret = 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 9cd7741d2254..2f6a96af7fb1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -364,6 +364,12 @@ static int amdgpu_cs_p2_ib(struct amdgpu_cs_parser *p,  	if (p->uf_bo && ring->funcs->no_user_fence)  		return -EINVAL; +	if (!p->adev->debug_enable_ce_cs && +	    chunk_ib->flags & AMDGPU_IB_FLAG_CE) { +		dev_err_ratelimited(p->adev->dev, "CE CS is blocked, use debug=0x400 to override\n"); +		return -EINVAL; +	} +  	if (chunk_ib->ip_type == AMDGPU_HW_IP_GFX &&  	    chunk_ib->flags & AMDGPU_IB_FLAG_PREEMPT) {  		if (chunk_ib->flags & AMDGPU_IB_FLAG_CE) @@ -702,7 +708,7 @@ static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev,  	 */  	const s64 us_upper_bound = 200000; -	if (!adev->mm_stats.log2_max_MBps) { +	if ((!adev->mm_stats.log2_max_MBps) || !ttm_resource_manager_used(&adev->mman.vram_mgr.manager)) {  		*max_bytes = 0;  		*max_vis_bytes = 0;  		return; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index a77000c2e0bb..3d032c4e2dce 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -1882,6 +1882,13 @@ static bool amdgpu_device_pcie_dynamic_switching_supported(struct amdgpu_device  static bool amdgpu_device_aspm_support_quirk(struct amdgpu_device *adev)  { +	/* Enabling ASPM causes randoms hangs on Tahiti and Oland on Zen4. +	 * It's unclear if this is a platform-specific or GPU-specific issue. +	 * Disable ASPM on SI for the time being. +	 */ +	if (adev->family == AMDGPU_FAMILY_SI) +		return true; +  #if IS_ENABLED(CONFIG_X86)  	struct cpuinfo_x86 *c = &cpu_data(0); @@ -6389,23 +6396,28 @@ static int amdgpu_device_sched_resume(struct list_head *device_list,  		if (!drm_drv_uses_atomic_modeset(adev_to_drm(tmp_adev)) && !job_signaled)  			drm_helper_resume_force_mode(adev_to_drm(tmp_adev)); -		if (tmp_adev->asic_reset_res) -			r = tmp_adev->asic_reset_res; - -		tmp_adev->asic_reset_res = 0; - -		if (r) { +		if (tmp_adev->asic_reset_res) {  			/* bad news, how to tell it to userspace ?  			 * for ras error, we should report GPU bad status instead of  			 * reset failure  			 */  			if (reset_context->src != AMDGPU_RESET_SRC_RAS ||  			    !amdgpu_ras_eeprom_check_err_threshold(tmp_adev)) -				dev_info(tmp_adev->dev, "GPU reset(%d) failed\n", -					atomic_read(&tmp_adev->gpu_reset_counter)); -			amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r); +				dev_info( +					tmp_adev->dev, +					"GPU reset(%d) failed with error %d \n", +					atomic_read( +						&tmp_adev->gpu_reset_counter), +					tmp_adev->asic_reset_res); +			amdgpu_vf_error_put(tmp_adev, +					    AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, +					    tmp_adev->asic_reset_res); +			if (!r) +				r = tmp_adev->asic_reset_res; +			tmp_adev->asic_reset_res = 0;  		} else { -			dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&tmp_adev->gpu_reset_counter)); +			dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", +				 atomic_read(&tmp_adev->gpu_reset_counter));  			if (amdgpu_acpi_smart_shift_update(tmp_adev,  							   AMDGPU_SS_DEV_D0))  				dev_warn(tmp_adev->dev, @@ -7157,28 +7169,35 @@ void amdgpu_pci_resume(struct pci_dev *pdev)  static void amdgpu_device_cache_switch_state(struct amdgpu_device *adev)  { -	struct pci_dev *parent = pci_upstream_bridge(adev->pdev); +	struct pci_dev *swus, *swds;  	int r; -	if (!parent || parent->vendor != PCI_VENDOR_ID_ATI) +	swds = pci_upstream_bridge(adev->pdev); +	if (!swds || swds->vendor != PCI_VENDOR_ID_ATI || +	    pci_pcie_type(swds) != PCI_EXP_TYPE_DOWNSTREAM) +		return; +	swus = pci_upstream_bridge(swds); +	if (!swus || +	    (swus->vendor != PCI_VENDOR_ID_ATI && +	     swus->vendor != PCI_VENDOR_ID_AMD) || +	    pci_pcie_type(swus) != PCI_EXP_TYPE_UPSTREAM)  		return;  	/* If already saved, return */  	if (adev->pcie_reset_ctx.swus)  		return;  	/* Upstream bridge is ATI, assume it's SWUS/DS architecture */ -	r = pci_save_state(parent); +	r = pci_save_state(swds);  	if (r)  		return; -	adev->pcie_reset_ctx.swds_pcistate = pci_store_saved_state(parent); +	adev->pcie_reset_ctx.swds_pcistate = pci_store_saved_state(swds); -	parent = pci_upstream_bridge(parent); -	r = pci_save_state(parent); +	r = pci_save_state(swus);  	if (r)  		return; -	adev->pcie_reset_ctx.swus_pcistate = pci_store_saved_state(parent); +	adev->pcie_reset_ctx.swus_pcistate = pci_store_saved_state(swus); -	adev->pcie_reset_ctx.swus = parent; +	adev->pcie_reset_ctx.swus = swus;  }  static void amdgpu_device_load_switch_state(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c index 73401f0aeb34..dd7b2b796427 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c @@ -1033,7 +1033,9 @@ static uint8_t amdgpu_discovery_get_harvest_info(struct amdgpu_device *adev,  	/* Until a uniform way is figured, get mask based on hwid */  	switch (hw_id) {  	case VCN_HWID: -		harvest = ((1 << inst) & adev->vcn.inst_mask) == 0; +		/* VCN vs UVD+VCE */ +		if (!amdgpu_ip_version(adev, VCE_HWIP, 0)) +			harvest = ((1 << inst) & adev->vcn.inst_mask) == 0;  		break;  	case DMU_HWID:  		if (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK) @@ -2565,7 +2567,9 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)  		amdgpu_discovery_init(adev);  		vega10_reg_base_init(adev);  		adev->sdma.num_instances = 2; +		adev->sdma.sdma_mask = 3;  		adev->gmc.num_umc = 4; +		adev->gfx.xcc_mask = 1;  		adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 0, 0);  		adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 0, 0);  		adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 0); @@ -2592,7 +2596,9 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)  		amdgpu_discovery_init(adev);  		vega10_reg_base_init(adev);  		adev->sdma.num_instances = 2; +		adev->sdma.sdma_mask = 3;  		adev->gmc.num_umc = 4; +		adev->gfx.xcc_mask = 1;  		adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 3, 0);  		adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 3, 0);  		adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 1); @@ -2619,8 +2625,10 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)  		amdgpu_discovery_init(adev);  		vega10_reg_base_init(adev);  		adev->sdma.num_instances = 1; +		adev->sdma.sdma_mask = 1;  		adev->vcn.num_vcn_inst = 1;  		adev->gmc.num_umc = 2; +		adev->gfx.xcc_mask = 1;  		if (adev->apu_flags & AMD_APU_IS_RAVEN2) {  			adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 2, 0);  			adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 2, 0); @@ -2665,7 +2673,9 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)  		amdgpu_discovery_init(adev);  		vega20_reg_base_init(adev);  		adev->sdma.num_instances = 2; +		adev->sdma.sdma_mask = 3;  		adev->gmc.num_umc = 8; +		adev->gfx.xcc_mask = 1;  		adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 0);  		adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 0);  		adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 0); @@ -2693,8 +2703,10 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)  		amdgpu_discovery_init(adev);  		arct_reg_base_init(adev);  		adev->sdma.num_instances = 8; +		adev->sdma.sdma_mask = 0xff;  		adev->vcn.num_vcn_inst = 2;  		adev->gmc.num_umc = 8; +		adev->gfx.xcc_mask = 1;  		adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 1);  		adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 1);  		adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 1); @@ -2726,8 +2738,10 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)  		amdgpu_discovery_init(adev);  		aldebaran_reg_base_init(adev);  		adev->sdma.num_instances = 5; +		adev->sdma.sdma_mask = 0x1f;  		adev->vcn.num_vcn_inst = 2;  		adev->gmc.num_umc = 4; +		adev->gfx.xcc_mask = 1;  		adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 2);  		adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 2);  		adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 4, 0); @@ -2762,6 +2776,8 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)  		} else {  			cyan_skillfish_reg_base_init(adev);  			adev->sdma.num_instances = 2; +			adev->sdma.sdma_mask = 3; +			adev->gfx.xcc_mask = 1;  			adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(2, 0, 3);  			adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(2, 0, 3);  			adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(5, 0, 1); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index bff25ef3e2d0..61268aa82df4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -144,7 +144,8 @@ enum AMDGPU_DEBUG_MASK {  	AMDGPU_DEBUG_DISABLE_GPU_RING_RESET = BIT(6),  	AMDGPU_DEBUG_SMU_POOL = BIT(7),  	AMDGPU_DEBUG_VM_USERPTR = BIT(8), -	AMDGPU_DEBUG_DISABLE_RAS_CE_LOG = BIT(9) +	AMDGPU_DEBUG_DISABLE_RAS_CE_LOG = BIT(9), +	AMDGPU_DEBUG_ENABLE_CE_CS = BIT(10)  };  unsigned int amdgpu_vram_limit = UINT_MAX; @@ -2289,6 +2290,11 @@ static void amdgpu_init_debug_options(struct amdgpu_device *adev)  		pr_info("debug: disable kernel logs of correctable errors\n");  		adev->debug_disable_ce_logs = true;  	} + +	if (amdgpu_debug_mask & AMDGPU_DEBUG_ENABLE_CE_CS) { +		pr_info("debug: allowing command submission to CE engine\n"); +		adev->debug_enable_ce_cs = true; +	}  }  static unsigned long amdgpu_fix_asic_type(struct pci_dev *pdev, unsigned long flags) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index fd8cca241da6..18a7829122d2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c @@ -758,11 +758,42 @@ void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring)   * @fence: fence of the ring to signal   *   */ -void amdgpu_fence_driver_guilty_force_completion(struct amdgpu_fence *fence) +void amdgpu_fence_driver_guilty_force_completion(struct amdgpu_fence *af)  { -	dma_fence_set_error(&fence->base, -ETIME); -	amdgpu_fence_write(fence->ring, fence->seq); -	amdgpu_fence_process(fence->ring); +	struct dma_fence *unprocessed; +	struct dma_fence __rcu **ptr; +	struct amdgpu_fence *fence; +	struct amdgpu_ring *ring = af->ring; +	unsigned long flags; +	u32 seq, last_seq; + +	last_seq = amdgpu_fence_read(ring) & ring->fence_drv.num_fences_mask; +	seq = ring->fence_drv.sync_seq & ring->fence_drv.num_fences_mask; + +	/* mark all fences from the guilty context with an error */ +	spin_lock_irqsave(&ring->fence_drv.lock, flags); +	do { +		last_seq++; +		last_seq &= ring->fence_drv.num_fences_mask; + +		ptr = &ring->fence_drv.fences[last_seq]; +		rcu_read_lock(); +		unprocessed = rcu_dereference(*ptr); + +		if (unprocessed && !dma_fence_is_signaled_locked(unprocessed)) { +			fence = container_of(unprocessed, struct amdgpu_fence, base); + +			if (fence == af) +				dma_fence_set_error(&fence->base, -ETIME); +			else if (fence->context == af->context) +				dma_fence_set_error(&fence->base, -ECANCELED); +		} +		rcu_read_unlock(); +	} while (last_seq != seq); +	spin_unlock_irqrestore(&ring->fence_drv.lock, flags); +	/* signal the guilty fence */ +	amdgpu_fence_write(ring, af->seq); +	amdgpu_fence_process(ring);  }  void amdgpu_fence_save_wptr(struct dma_fence *fence) @@ -790,14 +821,19 @@ void amdgpu_ring_backup_unprocessed_commands(struct amdgpu_ring *ring,  	struct dma_fence *unprocessed;  	struct dma_fence __rcu **ptr;  	struct amdgpu_fence *fence; -	u64 wptr, i, seqno; +	u64 wptr; +	u32 seq, last_seq; -	seqno = amdgpu_fence_read(ring); +	last_seq = amdgpu_fence_read(ring) & ring->fence_drv.num_fences_mask; +	seq = ring->fence_drv.sync_seq & ring->fence_drv.num_fences_mask;  	wptr = ring->fence_drv.signalled_wptr;  	ring->ring_backup_entries_to_copy = 0; -	for (i = seqno + 1; i <= ring->fence_drv.sync_seq; ++i) { -		ptr = &ring->fence_drv.fences[i & ring->fence_drv.num_fences_mask]; +	do { +		last_seq++; +		last_seq &= ring->fence_drv.num_fences_mask; + +		ptr = &ring->fence_drv.fences[last_seq];  		rcu_read_lock();  		unprocessed = rcu_dereference(*ptr); @@ -813,7 +849,7 @@ void amdgpu_ring_backup_unprocessed_commands(struct amdgpu_ring *ring,  			wptr = fence->wptr;  		}  		rcu_read_unlock(); -	} +	} while (last_seq != seq);  }  /* diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index a09ccf7d8aa2..ebe2b4c68b0f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -1102,6 +1102,9 @@ uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg, uint32_t xcc_  	might_sleep();  	while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) { +		if (amdgpu_in_reset(adev)) +			goto failed_kiq_read; +  		msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);  		r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);  	} @@ -1171,6 +1174,8 @@ void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, uint3  	might_sleep();  	while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) { +		if (amdgpu_in_reset(adev)) +			goto failed_kiq_write;  		msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);  		r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c index 6b7d66b6d4cc..63ee6ba6a931 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c @@ -371,7 +371,7 @@ static int amdgpu_debugfs_jpeg_sched_mask_set(void *data, u64 val)  	for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {  		for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) {  			ring = &adev->jpeg.inst[i].ring_dec[j]; -			if (val & (BIT_ULL(1) << ((i * adev->jpeg.num_jpeg_rings) + j))) +			if (val & (BIT_ULL((i * adev->jpeg.num_jpeg_rings) + j)))  				ring->sched.ready = true;  			else  				ring->sched.ready = false; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 8676400834fc..b3e6b3fcdf2c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -758,7 +758,8 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)  		ui64 = atomic64_read(&adev->num_vram_cpu_page_faults);  		return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;  	case AMDGPU_INFO_VRAM_USAGE: -		ui64 = ttm_resource_manager_usage(&adev->mman.vram_mgr.manager); +		ui64 = ttm_resource_manager_used(&adev->mman.vram_mgr.manager) ? +			ttm_resource_manager_usage(&adev->mman.vram_mgr.manager) : 0;  		return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;  	case AMDGPU_INFO_VIS_VRAM_USAGE:  		ui64 = amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr); @@ -804,8 +805,8 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)  		mem.vram.usable_heap_size = adev->gmc.real_vram_size -  			atomic64_read(&adev->vram_pin_size) -  			AMDGPU_VM_RESERVED_VRAM; -		mem.vram.heap_usage = -			ttm_resource_manager_usage(vram_man); +		mem.vram.heap_usage = ttm_resource_manager_used(&adev->mman.vram_mgr.manager) ? +				ttm_resource_manager_usage(vram_man) : 0;  		mem.vram.max_allocation = mem.vram.usable_heap_size * 3 / 4;  		mem.cpu_accessible_vram.total_heap_size = @@ -1421,14 +1422,10 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)  	amdgpu_debugfs_vm_init(file_priv); -	r = amdgpu_vm_init(adev, &fpriv->vm, fpriv->xcp_id); +	r = amdgpu_vm_init(adev, &fpriv->vm, fpriv->xcp_id, pasid);  	if (r)  		goto error_pasid; -	r = amdgpu_vm_set_pasid(adev, &fpriv->vm, pasid); -	if (r) -		goto error_vm; -  	fpriv->prt_va = amdgpu_vm_bo_add(adev, &fpriv->vm, NULL);  	if (!fpriv->prt_va) {  		r = -ENOMEM; @@ -1468,10 +1465,8 @@ error_vm:  	amdgpu_vm_fini(adev, &fpriv->vm);  error_pasid: -	if (pasid) { +	if (pasid)  		amdgpu_pasid_free(pasid); -		amdgpu_vm_set_pasid(adev, &fpriv->vm, 0); -	}  	kfree(fpriv); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c index 5bf9be073cdd..4883adcfbb4b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c @@ -409,7 +409,7 @@ int amdgpu_mes_detect_and_reset_hung_queues(struct amdgpu_device *adev,  		return -EINVAL;  	/* Clear the doorbell array before detection */ -	memset(adev->mes.hung_queue_db_array_cpu_addr, 0, +	memset(adev->mes.hung_queue_db_array_cpu_addr, AMDGPU_MES_INVALID_DB_OFFSET,  		adev->mes.hung_queue_db_array_size * sizeof(u32));  	input.queue_type = queue_type;  	input.detect_only = detect_only; @@ -420,12 +420,17 @@ int amdgpu_mes_detect_and_reset_hung_queues(struct amdgpu_device *adev,  		dev_err(adev->dev, "failed to detect and reset\n");  	} else {  		*hung_db_num = 0; -		for (i = 0; i < adev->mes.hung_queue_db_array_size; i++) { +		for (i = 0; i < adev->mes.hung_queue_hqd_info_offset; i++) {  			if (db_array[i] != AMDGPU_MES_INVALID_DB_OFFSET) {  				hung_db_array[i] = db_array[i];  				*hung_db_num += 1;  			}  		} + +		/* +		 * TODO: return HQD info for MES scheduled user compute queue reset cases +		 * stored in hung_db_array hqd info offset to full array size +		 */  	}  	return r; @@ -686,14 +691,11 @@ out:  bool amdgpu_mes_suspend_resume_all_supported(struct amdgpu_device *adev)  {  	uint32_t mes_rev = adev->mes.sched_version & AMDGPU_MES_VERSION_MASK; -	bool is_supported = false; - -	if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 0) && -	    amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(12, 0, 0) && -	    mes_rev >= 0x63) -		is_supported = true; -	return is_supported; +	return ((amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 0) && +		 amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(12, 0, 0) && +		 mes_rev >= 0x63) || +		amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(12, 0, 0));  }  /* Fix me -- node_id is used to identify the correct MES instances in the future */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h index 6b506fc72f58..97c137c90f97 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h @@ -149,6 +149,7 @@ struct amdgpu_mes {  	void                *resource_1_addr[AMDGPU_MAX_MES_PIPES];  	int				hung_queue_db_array_size; +	int				hung_queue_hqd_info_offset;  	struct amdgpu_bo		*hung_queue_db_array_gpu_obj;  	uint64_t			hung_queue_db_array_gpu_addr;  	void				*hung_queue_db_array_cpu_addr; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index 1578e4e2bf84..8c0e5d03de50 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -2352,7 +2352,7 @@ static int psp_securedisplay_initialize(struct psp_context *psp)  	}  	ret = psp_ta_load(psp, &psp->securedisplay_context.context); -	if (!ret) { +	if (!ret && !psp->securedisplay_context.context.resp_status) {  		psp->securedisplay_context.context.initialized = true;  		mutex_init(&psp->securedisplay_context.mutex);  	} else diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c index 8f6ce948c684..5ec5c3ff22bb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c @@ -811,7 +811,7 @@ int amdgpu_ring_reset_helper_end(struct amdgpu_ring *ring,  	if (r)  		return r; -	/* signal the fence of the bad job */ +	/* signal the guilty fence and set an error on all fences from the context */  	if (guilty_fence)  		amdgpu_fence_driver_guilty_force_completion(guilty_fence);  	/* Re-emit the non-guilty commands */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h index b6b649179776..4b46e3c26ff3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h @@ -155,7 +155,7 @@ extern const struct drm_sched_backend_ops amdgpu_sched_ops;  void amdgpu_fence_driver_clear_job_fences(struct amdgpu_ring *ring);  void amdgpu_fence_driver_set_error(struct amdgpu_ring *ring, int error);  void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring); -void amdgpu_fence_driver_guilty_force_completion(struct amdgpu_fence *fence); +void amdgpu_fence_driver_guilty_force_completion(struct amdgpu_fence *af);  void amdgpu_fence_save_wptr(struct dma_fence *fence);  int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c index 48e0932f5b62..1add21160d21 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c @@ -726,12 +726,12 @@ amdgpu_userq_bo_validate(struct amdgpu_device *adev, struct drm_exec *exec,  	struct amdgpu_bo *bo;  	int ret; -	spin_lock(&vm->invalidated_lock); +	spin_lock(&vm->status_lock);  	while (!list_empty(&vm->invalidated)) {  		bo_va = list_first_entry(&vm->invalidated,  					 struct amdgpu_bo_va,  					 base.vm_status); -		spin_unlock(&vm->invalidated_lock); +		spin_unlock(&vm->status_lock);  		bo = bo_va->base.bo;  		ret = drm_exec_prepare_obj(exec, &bo->tbo.base, 2); @@ -748,9 +748,9 @@ amdgpu_userq_bo_validate(struct amdgpu_device *adev, struct drm_exec *exec,  		if (ret)  			return ret; -		spin_lock(&vm->invalidated_lock); +		spin_lock(&vm->status_lock);  	} -	spin_unlock(&vm->invalidated_lock); +	spin_unlock(&vm->status_lock);  	return 0;  } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c index 3328ab63376b..f96beb96c75c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c @@ -598,8 +598,8 @@ static int amdgpu_virt_write_vf2pf_data(struct amdgpu_device *adev)  	vf2pf_info->driver_cert = 0;  	vf2pf_info->os_info.all = 0; -	vf2pf_info->fb_usage = -		ttm_resource_manager_usage(&adev->mman.vram_mgr.manager) >> 20; +	vf2pf_info->fb_usage = ttm_resource_manager_used(&adev->mman.vram_mgr.manager) ? +		 ttm_resource_manager_usage(&adev->mman.vram_mgr.manager) >> 20 : 0;  	vf2pf_info->fb_vis_usage =  		amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr) >> 20;  	vf2pf_info->fb_size = adev->gmc.real_vram_size >> 20; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 8c28e8923f02..c1a801203949 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -139,48 +139,6 @@ static void amdgpu_vm_assert_locked(struct amdgpu_vm *vm)  }  /** - * amdgpu_vm_set_pasid - manage pasid and vm ptr mapping - * - * @adev: amdgpu_device pointer - * @vm: amdgpu_vm pointer - * @pasid: the pasid the VM is using on this GPU - * - * Set the pasid this VM is using on this GPU, can also be used to remove the - * pasid by passing in zero. - * - */ -int amdgpu_vm_set_pasid(struct amdgpu_device *adev, struct amdgpu_vm *vm, -			u32 pasid) -{ -	int r; - -	amdgpu_vm_assert_locked(vm); - -	if (vm->pasid == pasid) -		return 0; - -	if (vm->pasid) { -		r = xa_err(xa_erase_irq(&adev->vm_manager.pasids, vm->pasid)); -		if (r < 0) -			return r; - -		vm->pasid = 0; -	} - -	if (pasid) { -		r = xa_err(xa_store_irq(&adev->vm_manager.pasids, pasid, vm, -					GFP_KERNEL)); -		if (r < 0) -			return r; - -		vm->pasid = pasid; -	} - - -	return 0; -} - -/**   * amdgpu_vm_bo_evicted - vm_bo is evicted   *   * @vm_bo: vm_bo which is evicted @@ -195,10 +153,12 @@ static void amdgpu_vm_bo_evicted(struct amdgpu_vm_bo_base *vm_bo)  	vm_bo->moved = true;  	amdgpu_vm_assert_locked(vm); +	spin_lock(&vm_bo->vm->status_lock);  	if (bo->tbo.type == ttm_bo_type_kernel)  		list_move(&vm_bo->vm_status, &vm->evicted);  	else  		list_move_tail(&vm_bo->vm_status, &vm->evicted); +	spin_unlock(&vm_bo->vm->status_lock);  }  /**   * amdgpu_vm_bo_moved - vm_bo is moved @@ -211,7 +171,9 @@ static void amdgpu_vm_bo_evicted(struct amdgpu_vm_bo_base *vm_bo)  static void amdgpu_vm_bo_moved(struct amdgpu_vm_bo_base *vm_bo)  {  	amdgpu_vm_assert_locked(vm_bo->vm); +	spin_lock(&vm_bo->vm->status_lock);  	list_move(&vm_bo->vm_status, &vm_bo->vm->moved); +	spin_unlock(&vm_bo->vm->status_lock);  }  /** @@ -225,7 +187,9 @@ static void amdgpu_vm_bo_moved(struct amdgpu_vm_bo_base *vm_bo)  static void amdgpu_vm_bo_idle(struct amdgpu_vm_bo_base *vm_bo)  {  	amdgpu_vm_assert_locked(vm_bo->vm); +	spin_lock(&vm_bo->vm->status_lock);  	list_move(&vm_bo->vm_status, &vm_bo->vm->idle); +	spin_unlock(&vm_bo->vm->status_lock);  	vm_bo->moved = false;  } @@ -239,9 +203,9 @@ static void amdgpu_vm_bo_idle(struct amdgpu_vm_bo_base *vm_bo)   */  static void amdgpu_vm_bo_invalidated(struct amdgpu_vm_bo_base *vm_bo)  { -	spin_lock(&vm_bo->vm->invalidated_lock); +	spin_lock(&vm_bo->vm->status_lock);  	list_move(&vm_bo->vm_status, &vm_bo->vm->invalidated); -	spin_unlock(&vm_bo->vm->invalidated_lock); +	spin_unlock(&vm_bo->vm->status_lock);  }  /** @@ -254,9 +218,10 @@ static void amdgpu_vm_bo_invalidated(struct amdgpu_vm_bo_base *vm_bo)   */  static void amdgpu_vm_bo_evicted_user(struct amdgpu_vm_bo_base *vm_bo)  { -	amdgpu_vm_assert_locked(vm_bo->vm);  	vm_bo->moved = true; +	spin_lock(&vm_bo->vm->status_lock);  	list_move(&vm_bo->vm_status, &vm_bo->vm->evicted_user); +	spin_unlock(&vm_bo->vm->status_lock);  }  /** @@ -270,10 +235,13 @@ static void amdgpu_vm_bo_evicted_user(struct amdgpu_vm_bo_base *vm_bo)  static void amdgpu_vm_bo_relocated(struct amdgpu_vm_bo_base *vm_bo)  {  	amdgpu_vm_assert_locked(vm_bo->vm); -	if (vm_bo->bo->parent) +	if (vm_bo->bo->parent) { +		spin_lock(&vm_bo->vm->status_lock);  		list_move(&vm_bo->vm_status, &vm_bo->vm->relocated); -	else +		spin_unlock(&vm_bo->vm->status_lock); +	} else {  		amdgpu_vm_bo_idle(vm_bo); +	}  }  /** @@ -287,7 +255,9 @@ static void amdgpu_vm_bo_relocated(struct amdgpu_vm_bo_base *vm_bo)  static void amdgpu_vm_bo_done(struct amdgpu_vm_bo_base *vm_bo)  {  	amdgpu_vm_assert_locked(vm_bo->vm); +	spin_lock(&vm_bo->vm->status_lock);  	list_move(&vm_bo->vm_status, &vm_bo->vm->done); +	spin_unlock(&vm_bo->vm->status_lock);  }  /** @@ -301,13 +271,13 @@ static void amdgpu_vm_bo_reset_state_machine(struct amdgpu_vm *vm)  {  	struct amdgpu_vm_bo_base *vm_bo, *tmp; -	spin_lock(&vm->invalidated_lock); +	amdgpu_vm_assert_locked(vm); + +	spin_lock(&vm->status_lock);  	list_splice_init(&vm->done, &vm->invalidated);  	list_for_each_entry(vm_bo, &vm->invalidated, vm_status)  		vm_bo->moved = true; -	spin_unlock(&vm->invalidated_lock); -	amdgpu_vm_assert_locked(vm_bo->vm);  	list_for_each_entry_safe(vm_bo, tmp, &vm->idle, vm_status) {  		struct amdgpu_bo *bo = vm_bo->bo; @@ -317,13 +287,14 @@ static void amdgpu_vm_bo_reset_state_machine(struct amdgpu_vm *vm)  		else if (bo->parent)  			list_move(&vm_bo->vm_status, &vm_bo->vm->relocated);  	} +	spin_unlock(&vm->status_lock);  }  /**   * amdgpu_vm_update_shared - helper to update shared memory stat   * @base: base structure for tracking BO usage in a VM   * - * Takes the vm stats_lock and updates the shared memory stat. If the basic + * Takes the vm status_lock and updates the shared memory stat. If the basic   * stat changed (e.g. buffer was moved) amdgpu_vm_update_stats need to be called   * as well.   */ @@ -336,7 +307,7 @@ static void amdgpu_vm_update_shared(struct amdgpu_vm_bo_base *base)  	bool shared;  	dma_resv_assert_held(bo->tbo.base.resv); -	spin_lock(&vm->stats_lock); +	spin_lock(&vm->status_lock);  	shared = drm_gem_object_is_shared_for_memory_stats(&bo->tbo.base);  	if (base->shared != shared) {  		base->shared = shared; @@ -348,7 +319,7 @@ static void amdgpu_vm_update_shared(struct amdgpu_vm_bo_base *base)  			vm->stats[bo_memtype].drm.private += size;  		}  	} -	spin_unlock(&vm->stats_lock); +	spin_unlock(&vm->status_lock);  }  /** @@ -373,11 +344,11 @@ void amdgpu_vm_bo_update_shared(struct amdgpu_bo *bo)   *        be bo->tbo.resource   * @sign: if we should add (+1) or subtract (-1) from the stat   * - * Caller need to have the vm stats_lock held. Useful for when multiple update + * Caller need to have the vm status_lock held. Useful for when multiple update   * need to happen at the same time.   */  static void amdgpu_vm_update_stats_locked(struct amdgpu_vm_bo_base *base, -					  struct ttm_resource *res, int sign) +			    struct ttm_resource *res, int sign)  {  	struct amdgpu_vm *vm = base->vm;  	struct amdgpu_bo *bo = base->bo; @@ -401,8 +372,7 @@ static void amdgpu_vm_update_stats_locked(struct amdgpu_vm_bo_base *base,  		 */  		if (bo->flags & AMDGPU_GEM_CREATE_DISCARDABLE)  			vm->stats[res_memtype].drm.purgeable += size; -		if (!(bo->preferred_domains & -		      amdgpu_mem_type_to_domain(res_memtype))) +		if (!(bo->preferred_domains & amdgpu_mem_type_to_domain(res_memtype)))  			vm->stats[bo_memtype].evicted += size;  	}  } @@ -421,9 +391,9 @@ void amdgpu_vm_update_stats(struct amdgpu_vm_bo_base *base,  {  	struct amdgpu_vm *vm = base->vm; -	spin_lock(&vm->stats_lock); +	spin_lock(&vm->status_lock);  	amdgpu_vm_update_stats_locked(base, res, sign); -	spin_unlock(&vm->stats_lock); +	spin_unlock(&vm->status_lock);  }  /** @@ -449,10 +419,10 @@ void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,  	base->next = bo->vm_bo;  	bo->vm_bo = base; -	spin_lock(&vm->stats_lock); +	spin_lock(&vm->status_lock);  	base->shared = drm_gem_object_is_shared_for_memory_stats(&bo->tbo.base);  	amdgpu_vm_update_stats_locked(base, bo->tbo.resource, +1); -	spin_unlock(&vm->stats_lock); +	spin_unlock(&vm->status_lock);  	if (!amdgpu_vm_is_bo_always_valid(vm, bo))  		return; @@ -511,10 +481,10 @@ int amdgpu_vm_lock_done_list(struct amdgpu_vm *vm, struct drm_exec *exec,  	int ret;  	/* We can only trust prev->next while holding the lock */ -	spin_lock(&vm->invalidated_lock); +	spin_lock(&vm->status_lock);  	while (!list_is_head(prev->next, &vm->done)) {  		bo_va = list_entry(prev->next, typeof(*bo_va), base.vm_status); -		spin_unlock(&vm->invalidated_lock); +		spin_unlock(&vm->status_lock);  		bo = bo_va->base.bo;  		if (bo) { @@ -522,10 +492,10 @@ int amdgpu_vm_lock_done_list(struct amdgpu_vm *vm, struct drm_exec *exec,  			if (unlikely(ret))  				return ret;  		} -		spin_lock(&vm->invalidated_lock); +		spin_lock(&vm->status_lock);  		prev = prev->next;  	} -	spin_unlock(&vm->invalidated_lock); +	spin_unlock(&vm->status_lock);  	return 0;  } @@ -621,7 +591,7 @@ int amdgpu_vm_validate(struct amdgpu_device *adev, struct amdgpu_vm *vm,  		       void *param)  {  	uint64_t new_vm_generation = amdgpu_vm_generation(adev, vm); -	struct amdgpu_vm_bo_base *bo_base, *tmp; +	struct amdgpu_vm_bo_base *bo_base;  	struct amdgpu_bo *bo;  	int r; @@ -634,7 +604,13 @@ int amdgpu_vm_validate(struct amdgpu_device *adev, struct amdgpu_vm *vm,  			return r;  	} -	list_for_each_entry_safe(bo_base, tmp, &vm->evicted, vm_status) { +	spin_lock(&vm->status_lock); +	while (!list_empty(&vm->evicted)) { +		bo_base = list_first_entry(&vm->evicted, +					   struct amdgpu_vm_bo_base, +					   vm_status); +		spin_unlock(&vm->status_lock); +  		bo = bo_base->bo;  		r = validate(param, bo); @@ -647,21 +623,26 @@ int amdgpu_vm_validate(struct amdgpu_device *adev, struct amdgpu_vm *vm,  			vm->update_funcs->map_table(to_amdgpu_bo_vm(bo));  			amdgpu_vm_bo_relocated(bo_base);  		} +		spin_lock(&vm->status_lock);  	} +	while (ticket && !list_empty(&vm->evicted_user)) { +		bo_base = list_first_entry(&vm->evicted_user, +					   struct amdgpu_vm_bo_base, +					   vm_status); +		spin_unlock(&vm->status_lock); + +		bo = bo_base->bo; +		dma_resv_assert_held(bo->tbo.base.resv); -	if (ticket) { -		list_for_each_entry_safe(bo_base, tmp, &vm->evicted_user, -					 vm_status) { -			bo = bo_base->bo; -			dma_resv_assert_held(bo->tbo.base.resv); +		r = validate(param, bo); +		if (r) +			return r; -			r = validate(param, bo); -			if (r) -				return r; +		amdgpu_vm_bo_invalidated(bo_base); -			amdgpu_vm_bo_invalidated(bo_base); -		} +		spin_lock(&vm->status_lock);  	} +	spin_unlock(&vm->status_lock);  	amdgpu_vm_eviction_lock(vm);  	vm->evicting = false; @@ -690,7 +671,9 @@ bool amdgpu_vm_ready(struct amdgpu_vm *vm)  	ret = !vm->evicting;  	amdgpu_vm_eviction_unlock(vm); +	spin_lock(&vm->status_lock);  	ret &= list_empty(&vm->evicted); +	spin_unlock(&vm->status_lock);  	spin_lock(&vm->immediate.lock);  	ret &= !vm->immediate.stopped; @@ -981,13 +964,18 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev,  			  struct amdgpu_vm *vm, bool immediate)  {  	struct amdgpu_vm_update_params params; -	struct amdgpu_vm_bo_base *entry, *tmp; +	struct amdgpu_vm_bo_base *entry;  	bool flush_tlb_needed = false; +	LIST_HEAD(relocated);  	int r, idx;  	amdgpu_vm_assert_locked(vm); -	if (list_empty(&vm->relocated)) +	spin_lock(&vm->status_lock); +	list_splice_init(&vm->relocated, &relocated); +	spin_unlock(&vm->status_lock); + +	if (list_empty(&relocated))  		return 0;  	if (!drm_dev_enter(adev_to_drm(adev), &idx)) @@ -1003,7 +991,7 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev,  	if (r)  		goto error; -	list_for_each_entry(entry, &vm->relocated, vm_status) { +	list_for_each_entry(entry, &relocated, vm_status) {  		/* vm_flush_needed after updating moved PDEs */  		flush_tlb_needed |= entry->moved; @@ -1019,7 +1007,9 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev,  	if (flush_tlb_needed)  		atomic64_inc(&vm->tlb_seq); -	list_for_each_entry_safe(entry, tmp, &vm->relocated, vm_status) { +	while (!list_empty(&relocated)) { +		entry = list_first_entry(&relocated, struct amdgpu_vm_bo_base, +					 vm_status);  		amdgpu_vm_bo_idle(entry);  	} @@ -1246,9 +1236,9 @@ error_free:  void amdgpu_vm_get_memory(struct amdgpu_vm *vm,  			  struct amdgpu_mem_stats stats[__AMDGPU_PL_NUM])  { -	spin_lock(&vm->stats_lock); +	spin_lock(&vm->status_lock);  	memcpy(stats, vm->stats, sizeof(*stats) * __AMDGPU_PL_NUM); -	spin_unlock(&vm->stats_lock); +	spin_unlock(&vm->status_lock);  }  /** @@ -1615,24 +1605,29 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev,  			   struct amdgpu_vm *vm,  			   struct ww_acquire_ctx *ticket)  { -	struct amdgpu_bo_va *bo_va, *tmp; +	struct amdgpu_bo_va *bo_va;  	struct dma_resv *resv;  	bool clear, unlock;  	int r; -	list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) { +	spin_lock(&vm->status_lock); +	while (!list_empty(&vm->moved)) { +		bo_va = list_first_entry(&vm->moved, struct amdgpu_bo_va, +					 base.vm_status); +		spin_unlock(&vm->status_lock); +  		/* Per VM BOs never need to bo cleared in the page tables */  		r = amdgpu_vm_bo_update(adev, bo_va, false);  		if (r)  			return r; +		spin_lock(&vm->status_lock);  	} -	spin_lock(&vm->invalidated_lock);  	while (!list_empty(&vm->invalidated)) {  		bo_va = list_first_entry(&vm->invalidated, struct amdgpu_bo_va,  					 base.vm_status);  		resv = bo_va->base.bo->tbo.base.resv; -		spin_unlock(&vm->invalidated_lock); +		spin_unlock(&vm->status_lock);  		/* Try to reserve the BO to avoid clearing its ptes */  		if (!adev->debug_vm && dma_resv_trylock(resv)) { @@ -1664,9 +1659,9 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev,  		     bo_va->base.bo->tbo.resource->mem_type == TTM_PL_SYSTEM))  			amdgpu_vm_bo_evicted_user(&bo_va->base); -		spin_lock(&vm->invalidated_lock); +		spin_lock(&vm->status_lock);  	} -	spin_unlock(&vm->invalidated_lock); +	spin_unlock(&vm->status_lock);  	return 0;  } @@ -2195,9 +2190,9 @@ void amdgpu_vm_bo_del(struct amdgpu_device *adev,  		}  	} -	spin_lock(&vm->invalidated_lock); +	spin_lock(&vm->status_lock);  	list_del(&bo_va->base.vm_status); -	spin_unlock(&vm->invalidated_lock); +	spin_unlock(&vm->status_lock);  	list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {  		list_del(&mapping->list); @@ -2305,10 +2300,10 @@ void amdgpu_vm_bo_move(struct amdgpu_bo *bo, struct ttm_resource *new_mem,  	for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) {  		struct amdgpu_vm *vm = bo_base->vm; -		spin_lock(&vm->stats_lock); +		spin_lock(&vm->status_lock);  		amdgpu_vm_update_stats_locked(bo_base, bo->tbo.resource, -1);  		amdgpu_vm_update_stats_locked(bo_base, new_mem, +1); -		spin_unlock(&vm->stats_lock); +		spin_unlock(&vm->status_lock);  	}  	amdgpu_vm_bo_invalidate(bo, evicted); @@ -2554,6 +2549,7 @@ void amdgpu_vm_set_task_info(struct amdgpu_vm *vm)   * @adev: amdgpu_device pointer   * @vm: requested vm   * @xcp_id: GPU partition selection id + * @pasid: the pasid the VM is using on this GPU   *   * Init @vm fields.   * @@ -2561,7 +2557,7 @@ void amdgpu_vm_set_task_info(struct amdgpu_vm *vm)   * 0 for success, error for failure.   */  int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, -		   int32_t xcp_id) +		   int32_t xcp_id, uint32_t pasid)  {  	struct amdgpu_bo *root_bo;  	struct amdgpu_bo_vm *root; @@ -2575,12 +2571,11 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,  	INIT_LIST_HEAD(&vm->relocated);  	INIT_LIST_HEAD(&vm->moved);  	INIT_LIST_HEAD(&vm->idle); -	spin_lock_init(&vm->invalidated_lock);  	INIT_LIST_HEAD(&vm->invalidated); +	spin_lock_init(&vm->status_lock);  	INIT_LIST_HEAD(&vm->freed);  	INIT_LIST_HEAD(&vm->done);  	INIT_KFIFO(vm->faults); -	spin_lock_init(&vm->stats_lock);  	r = amdgpu_vm_init_entities(adev, vm);  	if (r) @@ -2638,12 +2633,26 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,  	if (r)  		dev_dbg(adev->dev, "Failed to create task info for VM\n"); +	/* Store new PASID in XArray (if non-zero) */ +	if (pasid != 0) { +		r = xa_err(xa_store_irq(&adev->vm_manager.pasids, pasid, vm, GFP_KERNEL)); +		if (r < 0) +			goto error_free_root; + +		vm->pasid = pasid; +	} +  	amdgpu_bo_unreserve(vm->root.bo);  	amdgpu_bo_unref(&root_bo);  	return 0;  error_free_root: +	/* If PASID was partially set, erase it from XArray before failing */ +	if (vm->pasid != 0) { +		xa_erase_irq(&adev->vm_manager.pasids, vm->pasid); +		vm->pasid = 0; +	}  	amdgpu_vm_pt_free_root(adev, vm);  	amdgpu_bo_unreserve(vm->root.bo);  	amdgpu_bo_unref(&root_bo); @@ -2749,7 +2758,11 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)  	root = amdgpu_bo_ref(vm->root.bo);  	amdgpu_bo_reserve(root, true); -	amdgpu_vm_set_pasid(adev, vm, 0); +	/* Remove PASID mapping before destroying VM */ +	if (vm->pasid != 0) { +		xa_erase_irq(&adev->vm_manager.pasids, vm->pasid); +		vm->pasid = 0; +	}  	dma_fence_wait(vm->last_unlocked, false);  	dma_fence_put(vm->last_unlocked);  	dma_fence_wait(vm->last_tlb_flush, false); @@ -3038,6 +3051,7 @@ void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m)  	amdgpu_vm_assert_locked(vm); +	spin_lock(&vm->status_lock);  	seq_puts(m, "\tIdle BOs:\n");  	list_for_each_entry_safe(bo_va, tmp, &vm->idle, base.vm_status) {  		if (!bo_va->base.bo) @@ -3075,13 +3089,11 @@ void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m)  	id = 0;  	seq_puts(m, "\tInvalidated BOs:\n"); -	spin_lock(&vm->invalidated_lock);  	list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, base.vm_status) {  		if (!bo_va->base.bo)  			continue;  		total_invalidated += amdgpu_bo_print_info(id++,	bo_va->base.bo, m);  	} -	spin_unlock(&vm->invalidated_lock);  	total_invalidated_objs = id;  	id = 0; @@ -3091,6 +3103,7 @@ void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m)  			continue;  		total_done += amdgpu_bo_print_info(id++, bo_va->base.bo, m);  	} +	spin_unlock(&vm->status_lock);  	total_done_objs = id;  	seq_printf(m, "\tTotal idle size:        %12lld\tobjs:\t%d\n", total_idle, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index adc5c9161fa8..cf0ec94e8a07 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -203,11 +203,11 @@ struct amdgpu_vm_bo_base {  	/* protected by bo being reserved */  	struct amdgpu_vm_bo_base	*next; -	/* protected by vm reservation and invalidated_lock */ +	/* protected by vm status_lock */  	struct list_head		vm_status;  	/* if the bo is counted as shared in mem stats -	 * protected by vm BO being reserved */ +	 * protected by vm status_lock */  	bool				shared;  	/* protected by the BO being reserved */ @@ -343,8 +343,10 @@ struct amdgpu_vm {  	bool			evicting;  	unsigned int		saved_flags; -	/* Memory statistics for this vm, protected by stats_lock */ -	spinlock_t		stats_lock; +	/* Lock to protect vm_bo add/del/move on all lists of vm */ +	spinlock_t		status_lock; + +	/* Memory statistics for this vm, protected by status_lock */  	struct amdgpu_mem_stats stats[__AMDGPU_PL_NUM];  	/* @@ -352,8 +354,6 @@ struct amdgpu_vm {  	 * PDs, PTs or per VM BOs. The state transits are:  	 *  	 * evicted -> relocated (PDs, PTs) or moved (per VM BOs) -> idle -	 * -	 * Lists are protected by the root PD dma_resv lock.  	 */  	/* Per-VM and PT BOs who needs a validation */ @@ -374,10 +374,7 @@ struct amdgpu_vm {  	 * state transits are:  	 *  	 * evicted_user or invalidated -> done -	 * -	 * Lists are protected by the invalidated_lock.  	 */ -	spinlock_t		invalidated_lock;  	/* BOs for user mode queues that need a validation */  	struct list_head	evicted_user; @@ -503,11 +500,8 @@ extern const struct amdgpu_vm_update_funcs amdgpu_vm_sdma_funcs;  void amdgpu_vm_manager_init(struct amdgpu_device *adev);  void amdgpu_vm_manager_fini(struct amdgpu_device *adev); -int amdgpu_vm_set_pasid(struct amdgpu_device *adev, struct amdgpu_vm *vm, -			u32 pasid); -  long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout); -int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, int32_t xcp_id); +int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, int32_t xcp_id, uint32_t pasid);  int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm);  void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);  int amdgpu_vm_lock_pd(struct amdgpu_vm *vm, struct drm_exec *exec, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c index 7a4c12ff9b18..f794fb1cc06e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c @@ -543,7 +543,9 @@ static void amdgpu_vm_pt_free(struct amdgpu_vm_bo_base *entry)  	entry->bo->vm_bo = NULL;  	ttm_bo_set_bulk_move(&entry->bo->tbo, NULL); +	spin_lock(&entry->vm->status_lock);  	list_del(&entry->vm_status); +	spin_unlock(&entry->vm->status_lock);  	amdgpu_bo_unref(&entry->bo);  } @@ -587,6 +589,7 @@ static void amdgpu_vm_pt_add_list(struct amdgpu_vm_update_params *params,  	struct amdgpu_vm_pt_cursor seek;  	struct amdgpu_vm_bo_base *entry; +	spin_lock(¶ms->vm->status_lock);  	for_each_amdgpu_vm_pt_dfs_safe(params->adev, params->vm, cursor, seek, entry) {  		if (entry && entry->bo)  			list_move(&entry->vm_status, ¶ms->tlb_flush_waitlist); @@ -594,6 +597,7 @@ static void amdgpu_vm_pt_add_list(struct amdgpu_vm_update_params *params,  	/* enter start node now */  	list_move(&cursor->entry->vm_status, ¶ms->tlb_flush_waitlist); +	spin_unlock(¶ms->vm->status_lock);  }  /** diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c index a5adb2ed9b3c..9d934c07fa6b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c @@ -234,6 +234,9 @@ static umode_t amdgpu_vram_attrs_is_visible(struct kobject *kobj,  	    !adev->gmc.vram_vendor)  		return 0; +	if (!ttm_resource_manager_used(&adev->mman.vram_mgr.manager)) +		return 0; +  	return attr->mode;  } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c index 66c47c466532..d61eb9f187c6 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c @@ -5862,8 +5862,6 @@ static void gfx_v11_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,  	unsigned vmid = AMDGPU_JOB_GET_VMID(job);  	u32 header, control = 0; -	BUG_ON(ib->flags & AMDGPU_IB_FLAG_CE); -  	header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);  	control |= ib->length_dw | (vmid << 24); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c index 710ec9c34e43..93fde0f9af87 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c @@ -4419,8 +4419,6 @@ static void gfx_v12_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,  	unsigned vmid = AMDGPU_JOB_GET_VMID(job);  	u32 header, control = 0; -	BUG_ON(ib->flags & AMDGPU_IB_FLAG_CE); -  	header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);  	control |= ib->length_dw | (vmid << 24); diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c index 404cc8c2ff2c..f4a19357ccbc 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c @@ -337,7 +337,7 @@ static void gmc_v12_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,  	int vmid, i;  	if (adev->enable_uni_mes && adev->mes.ring[AMDGPU_MES_SCHED_PIPE].sched.ready && -	    (adev->mes.sched_version & AMDGPU_MES_VERSION_MASK) >= 0x83) { +	    (adev->mes.sched_version & AMDGPU_MES_VERSION_MASK) >= 0x84) {  		struct mes_inv_tlbs_pasid_input input = {0};  		input.pasid = pasid;  		input.flush_type = flush_type; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index 93d7ccb7d013..0e5e54d0a9a5 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c @@ -1068,7 +1068,7 @@ static int gmc_v7_0_sw_init(struct amdgpu_ip_block *ip_block)  					GFP_KERNEL);  	if (!adev->gmc.vm_fault_info)  		return -ENOMEM; -	atomic_set(&adev->gmc.vm_fault_info_updated, 0); +	atomic_set_release(&adev->gmc.vm_fault_info_updated, 0);  	return 0;  } @@ -1290,7 +1290,7 @@ static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev,  	vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,  			     VMID);  	if (amdgpu_amdkfd_is_kfd_vmid(adev, vmid) -		&& !atomic_read(&adev->gmc.vm_fault_info_updated)) { +		&& !atomic_read_acquire(&adev->gmc.vm_fault_info_updated)) {  		struct kfd_vm_fault_info *info = adev->gmc.vm_fault_info;  		u32 protections = REG_GET_FIELD(status,  					VM_CONTEXT1_PROTECTION_FAULT_STATUS, @@ -1306,8 +1306,7 @@ static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev,  		info->prot_read = protections & 0x8 ? true : false;  		info->prot_write = protections & 0x10 ? true : false;  		info->prot_exec = protections & 0x20 ? true : false; -		mb(); -		atomic_set(&adev->gmc.vm_fault_info_updated, 1); +		atomic_set_release(&adev->gmc.vm_fault_info_updated, 1);  	}  	return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index c5e2a2c41e06..e1509480dfc2 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -1183,7 +1183,7 @@ static int gmc_v8_0_sw_init(struct amdgpu_ip_block *ip_block)  					GFP_KERNEL);  	if (!adev->gmc.vm_fault_info)  		return -ENOMEM; -	atomic_set(&adev->gmc.vm_fault_info_updated, 0); +	atomic_set_release(&adev->gmc.vm_fault_info_updated, 0);  	return 0;  } @@ -1478,7 +1478,7 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev,  	vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,  			     VMID);  	if (amdgpu_amdkfd_is_kfd_vmid(adev, vmid) -		&& !atomic_read(&adev->gmc.vm_fault_info_updated)) { +		&& !atomic_read_acquire(&adev->gmc.vm_fault_info_updated)) {  		struct kfd_vm_fault_info *info = adev->gmc.vm_fault_info;  		u32 protections = REG_GET_FIELD(status,  					VM_CONTEXT1_PROTECTION_FAULT_STATUS, @@ -1494,8 +1494,7 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev,  		info->prot_read = protections & 0x8 ? true : false;  		info->prot_write = protections & 0x10 ? true : false;  		info->prot_exec = protections & 0x20 ? true : false; -		mb(); -		atomic_set(&adev->gmc.vm_fault_info_updated, 1); +		atomic_set_release(&adev->gmc.vm_fault_info_updated, 1);  	}  	return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c b/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c index 2db9b2c63693..1cd9eaeef38f 100644 --- a/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c +++ b/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c @@ -208,10 +208,10 @@ static int mes_userq_detect_and_reset(struct amdgpu_device *adev,  	struct amdgpu_userq_mgr *uqm, *tmp;  	unsigned int hung_db_num = 0;  	int queue_id, r, i; -	u32 db_array[4]; +	u32 db_array[8]; -	if (db_array_size > 4) { -		dev_err(adev->dev, "DB array size (%d vs 4) too small\n", +	if (db_array_size > 8) { +		dev_err(adev->dev, "DB array size (%d vs 8) too small\n",  			db_array_size);  		return -EINVAL;  	} diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c index e82188431f79..da575bb1377f 100644 --- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c @@ -66,7 +66,8 @@ static int mes_v11_0_kiq_hw_fini(struct amdgpu_device *adev);  #define GFX_MES_DRAM_SIZE	0x80000  #define MES11_HW_RESOURCE_1_SIZE (128 * AMDGPU_GPU_PAGE_SIZE) -#define MES11_HUNG_DB_OFFSET_ARRAY_SIZE 4 +#define MES11_HUNG_DB_OFFSET_ARRAY_SIZE 8 /* [0:3] = db offset, [4:7] = hqd info */ +#define MES11_HUNG_HQD_INFO_OFFSET	4  static void mes_v11_0_ring_set_wptr(struct amdgpu_ring *ring)  { @@ -1720,8 +1721,9 @@ static int mes_v11_0_early_init(struct amdgpu_ip_block *ip_block)  	struct amdgpu_device *adev = ip_block->adev;  	int pipe, r; -	adev->mes.hung_queue_db_array_size = -		MES11_HUNG_DB_OFFSET_ARRAY_SIZE; +	adev->mes.hung_queue_db_array_size = MES11_HUNG_DB_OFFSET_ARRAY_SIZE; +	adev->mes.hung_queue_hqd_info_offset = MES11_HUNG_HQD_INFO_OFFSET; +  	for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) {  		if (!adev->enable_mes_kiq && pipe == AMDGPU_MES_KIQ_PIPE)  			continue; diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c index aff06f06aeee..7f3512d9de07 100644 --- a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c @@ -47,7 +47,8 @@ static int mes_v12_0_kiq_hw_fini(struct amdgpu_device *adev);  #define MES_EOP_SIZE   2048 -#define MES12_HUNG_DB_OFFSET_ARRAY_SIZE 4 +#define MES12_HUNG_DB_OFFSET_ARRAY_SIZE 8 /* [0:3] = db offset [4:7] hqd info */ +#define MES12_HUNG_HQD_INFO_OFFSET	4  static void mes_v12_0_ring_set_wptr(struct amdgpu_ring *ring)  { @@ -228,7 +229,12 @@ static int mes_v12_0_submit_pkt_and_poll_completion(struct amdgpu_mes *mes,  			pipe, x_pkt->header.opcode);  	r = amdgpu_fence_wait_polling(ring, seq, timeout); -	if (r < 1 || !*status_ptr) { + +	/* +	 * status_ptr[31:0] == 0 (fail) or status_ptr[63:0] == 1 (success). +	 * If status_ptr[31:0] == 0 then status_ptr[63:32] will have debug error information. +	 */ +	if (r < 1 || !(lower_32_bits(*status_ptr))) {  		if (misc_op_str)  			dev_err(adev->dev, "MES(%d) failed to respond to msg=%s (%s)\n", @@ -1899,8 +1905,9 @@ static int mes_v12_0_early_init(struct amdgpu_ip_block *ip_block)  	struct amdgpu_device *adev = ip_block->adev;  	int pipe, r; -	adev->mes.hung_queue_db_array_size = -		MES12_HUNG_DB_OFFSET_ARRAY_SIZE; +	adev->mes.hung_queue_db_array_size = MES12_HUNG_DB_OFFSET_ARRAY_SIZE; +	adev->mes.hung_queue_hqd_info_offset = MES12_HUNG_HQD_INFO_OFFSET; +  	for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) {  		r = amdgpu_mes_init_microcode(adev, pipe);  		if (r) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index 6c5c7c1bf5ed..6e7bc983fc0b 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -1209,6 +1209,15 @@ static int evict_process_queues_cpsch(struct device_queue_manager *dqm,  	pr_debug_ratelimited("Evicting process pid %d queues\n",  			    pdd->process->lead_thread->pid); +	if (dqm->dev->kfd->shared_resources.enable_mes) { +		pdd->last_evict_timestamp = get_jiffies_64(); +		retval = suspend_all_queues_mes(dqm); +		if (retval) { +			dev_err(dev, "Suspending all queues failed"); +			goto out; +		} +	} +  	/* Mark all queues as evicted. Deactivate all active queues on  	 * the qpd.  	 */ @@ -1221,23 +1230,27 @@ static int evict_process_queues_cpsch(struct device_queue_manager *dqm,  		decrement_queue_count(dqm, qpd, q);  		if (dqm->dev->kfd->shared_resources.enable_mes) { -			int err; - -			err = remove_queue_mes(dqm, q, qpd); -			if (err) { +			retval = remove_queue_mes(dqm, q, qpd); +			if (retval) {  				dev_err(dev, "Failed to evict queue %d\n",  					q->properties.queue_id); -				retval = err; +				goto out;  			}  		}  	} -	pdd->last_evict_timestamp = get_jiffies_64(); -	if (!dqm->dev->kfd->shared_resources.enable_mes) + +	if (!dqm->dev->kfd->shared_resources.enable_mes) { +		pdd->last_evict_timestamp = get_jiffies_64();  		retval = execute_queues_cpsch(dqm,  					      qpd->is_debug ?  					      KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES :  					      KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0,  					      USE_DEFAULT_GRACE_PERIOD); +	} else { +		retval = resume_all_queues_mes(dqm); +		if (retval) +			dev_err(dev, "Resuming all queues failed"); +	}  out:  	dqm_unlock(dqm); @@ -3098,61 +3111,17 @@ out:  	return ret;  } -static int kfd_dqm_evict_pasid_mes(struct device_queue_manager *dqm, -				   struct qcm_process_device *qpd) -{ -	struct device *dev = dqm->dev->adev->dev; -	int ret = 0; - -	/* Check if process is already evicted */ -	dqm_lock(dqm); -	if (qpd->evicted) { -		/* Increment the evicted count to make sure the -		 * process stays evicted before its terminated. -		 */ -		qpd->evicted++; -		dqm_unlock(dqm); -		goto out; -	} -	dqm_unlock(dqm); - -	ret = suspend_all_queues_mes(dqm); -	if (ret) { -		dev_err(dev, "Suspending all queues failed"); -		goto out; -	} - -	ret = dqm->ops.evict_process_queues(dqm, qpd); -	if (ret) { -		dev_err(dev, "Evicting process queues failed"); -		goto out; -	} - -	ret = resume_all_queues_mes(dqm); -	if (ret) -		dev_err(dev, "Resuming all queues failed"); - -out: -	return ret; -} -  int kfd_evict_process_device(struct kfd_process_device *pdd)  {  	struct device_queue_manager *dqm;  	struct kfd_process *p; -	int ret = 0;  	p = pdd->process;  	dqm = pdd->dev->dqm;  	WARN(debug_evictions, "Evicting pid %d", p->lead_thread->pid); -	if (dqm->dev->kfd->shared_resources.enable_mes) -		ret = kfd_dqm_evict_pasid_mes(dqm, &pdd->qpd); -	else -		ret = dqm->ops.evict_process_queues(dqm, &pdd->qpd); - -	return ret; +	return dqm->ops.evict_process_queues(dqm, &pdd->qpd);  }  int reserve_debug_trap_vmid(struct device_queue_manager *dqm, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index 273f42e3afdd..9d72411c3379 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -3045,6 +3045,8 @@ retry_write_locked:  	if (svms->checkpoint_ts[gpuidx] != 0) {  		if (amdgpu_ih_ts_after_or_equal(ts,  svms->checkpoint_ts[gpuidx])) {  			pr_debug("draining retry fault, drop fault 0x%llx\n", addr); +			if (write_locked) +				mmap_write_downgrade(mm);  			r = -EAGAIN;  			goto out_unlock_svms;  		} else { diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 8e1622bf7a42..6597475e245d 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -2000,6 +2000,10 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)  	init_data.flags.disable_ips_in_vpb = 0; +	/* DCN35 and above supports dynamic DTBCLK switch */ +	if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 5, 0)) +		init_data.flags.allow_0_dtb_clk = true; +  	/* Enable DWB for tested platforms only */  	if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 0, 0))  		init_data.num_virtual_links = 1; @@ -2081,8 +2085,6 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)  	dc_hardware_init(adev->dm.dc); -	adev->dm.restore_backlight = true; -  	adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev);  	if (!adev->dm.hpd_rx_offload_wq) {  		drm_err(adev_to_drm(adev), "failed to create hpd rx offload workqueue.\n"); @@ -3438,7 +3440,6 @@ static int dm_resume(struct amdgpu_ip_block *ip_block)  		dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);  		dc_resume(dm->dc); -		adev->dm.restore_backlight = true;  		amdgpu_dm_irq_resume_early(adev); @@ -9965,6 +9966,7 @@ static void amdgpu_dm_commit_streams(struct drm_atomic_state *state,  	bool mode_set_reset_required = false;  	u32 i;  	struct dc_commit_streams_params params = {dc_state->streams, dc_state->stream_count}; +	bool set_backlight_level = false;  	/* Disable writeback */  	for_each_old_connector_in_state(state, connector, old_con_state, i) { @@ -10084,6 +10086,7 @@ static void amdgpu_dm_commit_streams(struct drm_atomic_state *state,  			acrtc->hw_mode = new_crtc_state->mode;  			crtc->hwmode = new_crtc_state->mode;  			mode_set_reset_required = true; +			set_backlight_level = true;  		} else if (modereset_required(new_crtc_state)) {  			drm_dbg_atomic(dev,  				       "Atomic commit: RESET. crtc id %d:[%p]\n", @@ -10140,16 +10143,13 @@ static void amdgpu_dm_commit_streams(struct drm_atomic_state *state,  	 * to fix a flicker issue.  	 * It will cause the dm->actual_brightness is not the current panel brightness  	 * level. (the dm->brightness is the correct panel level) -	 * So we set the backlight level with dm->brightness value after initial -	 * set mode. Use restore_backlight flag to avoid setting backlight level -	 * for every subsequent mode set. +	 * So we set the backlight level with dm->brightness value after set mode  	 */ -	if (dm->restore_backlight) { +	if (set_backlight_level) {  		for (i = 0; i < dm->num_of_edps; i++) {  			if (dm->backlight_dev[i])  				amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);  		} -		dm->restore_backlight = false;  	}  } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h index 009f206226f0..db75e991ac7b 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h @@ -631,13 +631,6 @@ struct amdgpu_display_manager {  	u32 actual_brightness[AMDGPU_DM_MAX_NUM_EDP];  	/** -	 * @restore_backlight: -	 * -	 * Flag to indicate whether to restore backlight after modeset. -	 */ -	bool restore_backlight; - -	/**  	 * @aux_hpd_discon_quirk:  	 *  	 * quirk for hpd discon while aux is on-going. diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c index 2b1673d69ea8..1ab5ae9b5ea5 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c @@ -154,10 +154,13 @@ static bool dce60_setup_scaling_configuration(  	REG_SET(SCL_BYPASS_CONTROL, 0, SCL_BYPASS_MODE, 0);  	if (data->taps.h_taps + data->taps.v_taps <= 2) { -		/* Set bypass */ - -		/* DCE6 has no SCL_MODE register, skip scale mode programming */ +		/* Disable scaler functionality */ +		REG_WRITE(SCL_SCALER_ENABLE, 0); +		/* Clear registers that can cause glitches even when the scaler is off */ +		REG_WRITE(SCL_TAP_CONTROL, 0); +		REG_WRITE(SCL_AUTOMATIC_MODE_CONTROL, 0); +		REG_WRITE(SCL_F_SHARP_CONTROL, 0);  		return false;  	} @@ -165,7 +168,7 @@ static bool dce60_setup_scaling_configuration(  			SCL_H_NUM_OF_TAPS, data->taps.h_taps - 1,  			SCL_V_NUM_OF_TAPS, data->taps.v_taps - 1); -	/* DCE6 has no SCL_MODE register, skip scale mode programming */ +	REG_WRITE(SCL_SCALER_ENABLE, 1);  	/* DCE6 has no SCL_BOUNDARY_MODE bit, skip replace out of bound pixels */ @@ -502,6 +505,8 @@ static void dce60_transform_set_scaler(  	REG_SET(DC_LB_MEM_SIZE, 0,  		DC_LB_MEM_SIZE, xfm_dce->lb_memory_size); +	REG_WRITE(SCL_UPDATE, 0x00010000); +  	/* Clear SCL_F_SHARP_CONTROL value to 0 */  	REG_WRITE(SCL_F_SHARP_CONTROL, 0); @@ -527,8 +532,7 @@ static void dce60_transform_set_scaler(  		if (coeffs_v != xfm_dce->filter_v || coeffs_h != xfm_dce->filter_h) {  			/* 4. Program vertical filters */  			if (xfm_dce->filter_v == NULL) -				REG_SET(SCL_VERT_FILTER_CONTROL, 0, -						SCL_V_2TAP_HARDCODE_COEF_EN, 0); +				REG_WRITE(SCL_VERT_FILTER_CONTROL, 0);  			program_multi_taps_filter(  					xfm_dce,  					data->taps.v_taps, @@ -542,8 +546,7 @@ static void dce60_transform_set_scaler(  			/* 5. Program horizontal filters */  			if (xfm_dce->filter_h == NULL) -				REG_SET(SCL_HORZ_FILTER_CONTROL, 0, -						SCL_H_2TAP_HARDCODE_COEF_EN, 0); +				REG_WRITE(SCL_HORZ_FILTER_CONTROL, 0);  			program_multi_taps_filter(  					xfm_dce,  					data->taps.h_taps, @@ -566,6 +569,8 @@ static void dce60_transform_set_scaler(  	/* DCE6 has no SCL_COEF_UPDATE_COMPLETE bit to flip to new coefficient memory */  	/* DCE6 DATA_FORMAT register does not support ALPHA_EN */ + +	REG_WRITE(SCL_UPDATE, 0);  }  #endif diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.h b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.h index cbce194ec7b8..eb716e8337e2 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.h @@ -155,6 +155,9 @@  	SRI(SCL_COEF_RAM_TAP_DATA, SCL, id), \  	SRI(VIEWPORT_START, SCL, id), \  	SRI(VIEWPORT_SIZE, SCL, id), \ +	SRI(SCL_SCALER_ENABLE, SCL, id), \ +	SRI(SCL_HORZ_FILTER_INIT_RGB_LUMA, SCL, id), \ +	SRI(SCL_HORZ_FILTER_INIT_CHROMA, SCL, id), \  	SRI(SCL_HORZ_FILTER_SCALE_RATIO, SCL, id), \  	SRI(SCL_VERT_FILTER_SCALE_RATIO, SCL, id), \  	SRI(SCL_VERT_FILTER_INIT, SCL, id), \ @@ -590,6 +593,7 @@ struct dce_transform_registers {  	uint32_t SCL_VERT_FILTER_SCALE_RATIO;  	uint32_t SCL_HORZ_FILTER_INIT;  #if defined(CONFIG_DRM_AMD_DC_SI) +	uint32_t SCL_SCALER_ENABLE;  	uint32_t SCL_HORZ_FILTER_INIT_RGB_LUMA;  	uint32_t SCL_HORZ_FILTER_INIT_CHROMA;  #endif diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c index 17a21bcbde17..1a28061bb9ff 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c @@ -808,6 +808,8 @@ void dcn316_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param  int dcn_get_max_non_odm_pix_rate_100hz(struct _vcs_dpi_soc_bounding_box_st *soc)  { +	dc_assert_fp_enabled(); +  	return soc->clock_limits[0].dispclk_mhz * 10000.0 / (1.0 + soc->dcn_downspread_percent / 100.0);  } @@ -815,6 +817,8 @@ int dcn_get_approx_det_segs_required_for_pstate(  		struct _vcs_dpi_soc_bounding_box_st *soc,  		int pix_clk_100hz, int bpp, int seg_size_kb)  { +	dc_assert_fp_enabled(); +  	/* Roughly calculate required crb to hide latency. In practice there is slightly  	 * more buffer available for latency hiding  	 */ diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c index c9dd920744c9..817a370e80a7 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c @@ -445,6 +445,8 @@ int dcn35_populate_dml_pipes_from_context_fpu(struct dc *dc,  	bool upscaled = false;  	const unsigned int max_allowed_vblank_nom = 1023; +	dc_assert_fp_enabled(); +  	dcn31_populate_dml_pipes_from_context(dc, context, pipes,  					      validate_mode); @@ -498,9 +500,7 @@ int dcn35_populate_dml_pipes_from_context_fpu(struct dc *dc,  		pipes[pipe_cnt].pipe.src.unbounded_req_mode = false; -		DC_FP_START();  		dcn31_zero_pipe_dcc_fraction(pipes, pipe_cnt); -		DC_FP_END();  		pipes[pipe_cnt].pipe.dest.vfront_porch = timing->v_front_porch;  		pipes[pipe_cnt].pipe.src.dcc_rate = 3; @@ -581,6 +581,8 @@ void dcn35_decide_zstate_support(struct dc *dc, struct dc_state *context)  	unsigned int i, plane_count = 0;  	DC_LOGGER_INIT(dc->ctx->logger); +	dc_assert_fp_enabled(); +  	for (i = 0; i < dc->res_pool->pipe_count; i++) {  		if (context->res_ctx.pipe_ctx[i].plane_state)  			plane_count++; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c index 8cda18ce1a76..77023b619f1e 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c @@ -478,6 +478,8 @@ int dcn351_populate_dml_pipes_from_context_fpu(struct dc *dc,  	bool upscaled = false;  	const unsigned int max_allowed_vblank_nom = 1023; +	dc_assert_fp_enabled(); +  	dcn31_populate_dml_pipes_from_context(dc, context, pipes,  					      validate_mode); @@ -531,9 +533,7 @@ int dcn351_populate_dml_pipes_from_context_fpu(struct dc *dc,  		pipes[pipe_cnt].pipe.src.unbounded_req_mode = false; -		DC_FP_START();  		dcn31_zero_pipe_dcc_fraction(pipes, pipe_cnt); -		DC_FP_END();  		pipes[pipe_cnt].pipe.dest.vfront_porch = timing->v_front_porch;  		pipes[pipe_cnt].pipe.src.dcc_rate = 3; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c index 53c67ebe779f..b75be6ad64f6 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c @@ -404,13 +404,13 @@ static const struct dc_plane_cap plane_cap = {  	},  	.max_upscale_factor = { -			.argb8888 = 16000, +			.argb8888 = 1,  			.nv12 = 1,  			.fp16 = 1  	},  	.max_downscale_factor = { -			.argb8888 = 250, +			.argb8888 = 1,  			.nv12 = 1,  			.fp16 = 1  	} diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c index 07552445e424..fff57f23f4f7 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c @@ -1760,6 +1760,20 @@ enum dc_status dcn35_patch_unknown_plane_state(struct dc_plane_state *plane_stat  } +static int populate_dml_pipes_from_context_fpu(struct dc *dc, +					       struct dc_state *context, +					       display_e2e_pipe_params_st *pipes, +					       enum dc_validate_mode validate_mode) +{ +	int ret; + +	DC_FP_START(); +	ret = dcn35_populate_dml_pipes_from_context_fpu(dc, context, pipes, validate_mode); +	DC_FP_END(); + +	return ret; +} +  static struct resource_funcs dcn35_res_pool_funcs = {  	.destroy = dcn35_destroy_resource_pool,  	.link_enc_create = dcn35_link_encoder_create, @@ -1770,7 +1784,7 @@ static struct resource_funcs dcn35_res_pool_funcs = {  	.validate_bandwidth = dcn35_validate_bandwidth,  	.calculate_wm_and_dlg = NULL,  	.update_soc_for_wm_a = dcn31_update_soc_for_wm_a, -	.populate_dml_pipes = dcn35_populate_dml_pipes_from_context_fpu, +	.populate_dml_pipes = populate_dml_pipes_from_context_fpu,  	.acquire_free_pipe_as_secondary_dpp_pipe = dcn20_acquire_free_pipe_for_layer,  	.release_pipe = dcn20_release_pipe,  	.add_stream_to_ctx = dcn30_add_stream_to_ctx, diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c index cb0478a9a34d..0abd163b425e 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c @@ -1732,6 +1732,21 @@ static enum dc_status dcn351_validate_bandwidth(struct dc *dc,  	return out ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;  } +static int populate_dml_pipes_from_context_fpu(struct dc *dc, +					       struct dc_state *context, +					       display_e2e_pipe_params_st *pipes, +					       enum dc_validate_mode validate_mode) +{ +	int ret; + +	DC_FP_START(); +	ret = dcn351_populate_dml_pipes_from_context_fpu(dc, context, pipes, validate_mode); +	DC_FP_END(); + +	return ret; + +} +  static struct resource_funcs dcn351_res_pool_funcs = {  	.destroy = dcn351_destroy_resource_pool,  	.link_enc_create = dcn35_link_encoder_create, @@ -1742,7 +1757,7 @@ static struct resource_funcs dcn351_res_pool_funcs = {  	.validate_bandwidth = dcn351_validate_bandwidth,  	.calculate_wm_and_dlg = NULL,  	.update_soc_for_wm_a = dcn31_update_soc_for_wm_a, -	.populate_dml_pipes = dcn351_populate_dml_pipes_from_context_fpu, +	.populate_dml_pipes = populate_dml_pipes_from_context_fpu,  	.acquire_free_pipe_as_secondary_dpp_pipe = dcn20_acquire_free_pipe_for_layer,  	.release_pipe = dcn20_release_pipe,  	.add_stream_to_ctx = dcn30_add_stream_to_ctx, diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c index 126090c9bb8a..ca125ee6c2fb 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c @@ -1734,6 +1734,20 @@ static enum dc_status dcn35_validate_bandwidth(struct dc *dc,  } +static int populate_dml_pipes_from_context_fpu(struct dc *dc, +					       struct dc_state *context, +					       display_e2e_pipe_params_st *pipes, +					       enum dc_validate_mode validate_mode) +{ +	int ret; + +	DC_FP_START(); +	ret = dcn35_populate_dml_pipes_from_context_fpu(dc, context, pipes, validate_mode); +	DC_FP_END(); + +	return ret; +} +  static struct resource_funcs dcn36_res_pool_funcs = {  	.destroy = dcn36_destroy_resource_pool,  	.link_enc_create = dcn35_link_encoder_create, @@ -1744,7 +1758,7 @@ static struct resource_funcs dcn36_res_pool_funcs = {  	.validate_bandwidth = dcn35_validate_bandwidth,  	.calculate_wm_and_dlg = NULL,  	.update_soc_for_wm_a = dcn31_update_soc_for_wm_a, -	.populate_dml_pipes = dcn35_populate_dml_pipes_from_context_fpu, +	.populate_dml_pipes = populate_dml_pipes_from_context_fpu,  	.acquire_free_pipe_as_secondary_dpp_pipe = dcn20_acquire_free_pipe_for_layer,  	.release_pipe = dcn20_release_pipe,  	.add_stream_to_ctx = dcn30_add_stream_to_ctx, diff --git a/drivers/gpu/drm/amd/display/dc/sspl/dc_spl.c b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl.c index 55b929ca7982..b1fb0f8a253a 100644 --- a/drivers/gpu/drm/amd/display/dc/sspl/dc_spl.c +++ b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl.c @@ -641,16 +641,16 @@ static void spl_calculate_inits_and_viewports(struct spl_in *spl_in,  		/* this gives the direction of the cositing (negative will move  		 * left, right otherwise)  		 */ -		int sign = 1; +		int h_sign = flip_horz_scan_dir ? -1 : 1; +		int v_sign = flip_vert_scan_dir ? -1 : 1;  		switch (spl_in->basic_in.cositing) { -  		case CHROMA_COSITING_TOPLEFT: -			init_adj_h = spl_fixpt_from_fraction(sign, 4); -			init_adj_v = spl_fixpt_from_fraction(sign, 4); +			init_adj_h = spl_fixpt_from_fraction(h_sign, 4); +			init_adj_v = spl_fixpt_from_fraction(v_sign, 4);  			break;  		case CHROMA_COSITING_LEFT: -			init_adj_h = spl_fixpt_from_fraction(sign, 4); +			init_adj_h = spl_fixpt_from_fraction(h_sign, 4);  			init_adj_v = spl_fixpt_zero;  			break;  		case CHROMA_COSITING_NONE: diff --git a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_d.h b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_d.h index 9de01ae574c0..067eddd9c62d 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_d.h +++ b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_d.h @@ -4115,6 +4115,7 @@  #define mmSCL0_SCL_COEF_RAM_CONFLICT_STATUS 0x1B55  #define mmSCL0_SCL_COEF_RAM_SELECT 0x1B40  #define mmSCL0_SCL_COEF_RAM_TAP_DATA 0x1B41 +#define mmSCL0_SCL_SCALER_ENABLE 0x1B42  #define mmSCL0_SCL_CONTROL 0x1B44  #define mmSCL0_SCL_DEBUG 0x1B6A  #define mmSCL0_SCL_DEBUG2 0x1B69 @@ -4144,6 +4145,7 @@  #define mmSCL1_SCL_COEF_RAM_CONFLICT_STATUS 0x1E55  #define mmSCL1_SCL_COEF_RAM_SELECT 0x1E40  #define mmSCL1_SCL_COEF_RAM_TAP_DATA 0x1E41 +#define mmSCL1_SCL_SCALER_ENABLE 0x1E42  #define mmSCL1_SCL_CONTROL 0x1E44  #define mmSCL1_SCL_DEBUG 0x1E6A  #define mmSCL1_SCL_DEBUG2 0x1E69 @@ -4173,6 +4175,7 @@  #define mmSCL2_SCL_COEF_RAM_CONFLICT_STATUS 0x4155  #define mmSCL2_SCL_COEF_RAM_SELECT 0x4140  #define mmSCL2_SCL_COEF_RAM_TAP_DATA 0x4141 +#define mmSCL2_SCL_SCALER_ENABLE 0x4142  #define mmSCL2_SCL_CONTROL 0x4144  #define mmSCL2_SCL_DEBUG 0x416A  #define mmSCL2_SCL_DEBUG2 0x4169 @@ -4202,6 +4205,7 @@  #define mmSCL3_SCL_COEF_RAM_CONFLICT_STATUS 0x4455  #define mmSCL3_SCL_COEF_RAM_SELECT 0x4440  #define mmSCL3_SCL_COEF_RAM_TAP_DATA 0x4441 +#define mmSCL3_SCL_SCALER_ENABLE 0x4442  #define mmSCL3_SCL_CONTROL 0x4444  #define mmSCL3_SCL_DEBUG 0x446A  #define mmSCL3_SCL_DEBUG2 0x4469 @@ -4231,6 +4235,7 @@  #define mmSCL4_SCL_COEF_RAM_CONFLICT_STATUS 0x4755  #define mmSCL4_SCL_COEF_RAM_SELECT 0x4740  #define mmSCL4_SCL_COEF_RAM_TAP_DATA 0x4741 +#define mmSCL4_SCL_SCALER_ENABLE 0x4742  #define mmSCL4_SCL_CONTROL 0x4744  #define mmSCL4_SCL_DEBUG 0x476A  #define mmSCL4_SCL_DEBUG2 0x4769 @@ -4260,6 +4265,7 @@  #define mmSCL5_SCL_COEF_RAM_CONFLICT_STATUS 0x4A55  #define mmSCL5_SCL_COEF_RAM_SELECT 0x4A40  #define mmSCL5_SCL_COEF_RAM_TAP_DATA 0x4A41 +#define mmSCL5_SCL_SCALER_ENABLE 0x4A42  #define mmSCL5_SCL_CONTROL 0x4A44  #define mmSCL5_SCL_DEBUG 0x4A6A  #define mmSCL5_SCL_DEBUG2 0x4A69 @@ -4287,6 +4293,7 @@  #define mmSCL_COEF_RAM_CONFLICT_STATUS 0x1B55  #define mmSCL_COEF_RAM_SELECT 0x1B40  #define mmSCL_COEF_RAM_TAP_DATA 0x1B41 +#define mmSCL_SCALER_ENABLE 0x1B42  #define mmSCL_CONTROL 0x1B44  #define mmSCL_DEBUG 0x1B6A  #define mmSCL_DEBUG2 0x1B69 diff --git a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_sh_mask.h index 2d6a598a6c25..9317a7afa621 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_sh_mask.h @@ -8650,6 +8650,8 @@  #define REGAMMA_LUT_INDEX__REGAMMA_LUT_INDEX__SHIFT 0x00000000  #define REGAMMA_LUT_WRITE_EN_MASK__REGAMMA_LUT_WRITE_EN_MASK_MASK 0x00000007L  #define REGAMMA_LUT_WRITE_EN_MASK__REGAMMA_LUT_WRITE_EN_MASK__SHIFT 0x00000000 +#define SCL_SCALER_ENABLE__SCL_SCALE_EN_MASK 0x00000001L +#define SCL_SCALER_ENABLE__SCL_SCALE_EN__SHIFT 0x00000000  #define SCL_ALU_CONTROL__SCL_ALU_DISABLE_MASK 0x00000001L  #define SCL_ALU_CONTROL__SCL_ALU_DISABLE__SHIFT 0x00000000  #define SCL_BYPASS_CONTROL__SCL_BYPASS_MODE_MASK 0x00000003L diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c index cf9932e68055..3a9522c17fee 100644 --- a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c +++ b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c @@ -3500,6 +3500,11 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,  	 * for these GPUs to calculate bandwidth requirements.  	 */  	if (high_pixelclock_count) { +		/* Work around flickering lines at the bottom edge +		 * of the screen when using a single 4K 60Hz monitor. +		 */ +		disable_mclk_switching = true; +  		/* On Oland, we observe some flickering when two 4K 60Hz  		 * displays are connected, possibly because voltage is too low.  		 * Raise the voltage by requiring a higher SCLK. diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c index 8da882c51856..9b28c0728269 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c @@ -5444,8 +5444,7 @@ static int smu7_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,  		thermal_data->max = table_info->cac_dtp_table->usSoftwareShutdownTemp *  			PP_TEMPERATURE_UNITS_PER_CENTIGRADES;  	else if (hwmgr->pp_table_version == PP_TABLE_V0) -		thermal_data->max = data->thermal_temp_setting.temperature_shutdown * -			PP_TEMPERATURE_UNITS_PER_CENTIGRADES; +		thermal_data->max = data->thermal_temp_setting.temperature_shutdown;  	thermal_data->sw_ctf_threshold = thermal_data->max; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c index 1a1f2a6b2e52..a89075e25717 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c @@ -288,7 +288,8 @@ int smu_v13_0_check_fw_version(struct smu_context *smu)  	 * Considering above, we just leave user a verbal message instead  	 * of halt driver loading.  	 */ -	if (if_version != smu->smc_driver_if_version) { +	if (smu->smc_driver_if_version != SMU_IGNORE_IF_VERSION && +	    if_version != smu->smc_driver_if_version) {  		dev_info(adev->dev, "smu driver if version = 0x%08x, smu fw if version = 0x%08x, "  			 "smu fw program = %d, smu fw version = 0x%08x (%d.%d.%d)\n",  			 smu->smc_driver_if_version, if_version, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c index cbe5b06438c1..285cf7979693 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c @@ -450,8 +450,7 @@ static void smu_v13_0_6_init_caps(struct smu_context *smu)  	    ((pgm == 4) && (fw_ver >= 0x4557000)))  		smu_v13_0_6_cap_set(smu, SMU_CAP(SDMA_RESET)); -	if (((pgm == 0) && (fw_ver >= 0x00558200)) || -	    ((pgm == 4) && (fw_ver >= 0x04557100))) +	if ((pgm == 0) && (fw_ver >= 0x00558200))  		smu_v13_0_6_cap_set(smu, SMU_CAP(VCN_RESET));  } @@ -3933,7 +3932,7 @@ void smu_v13_0_6_set_ppt_funcs(struct smu_context *smu)  	smu->feature_map = (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 12)) ?  		smu_v13_0_12_feature_mask_map : smu_v13_0_6_feature_mask_map;  	smu->table_map = smu_v13_0_6_table_map; -	smu->smc_driver_if_version = SMU13_0_6_DRIVER_IF_VERSION; +	smu->smc_driver_if_version = SMU_IGNORE_IF_VERSION;  	smu->smc_fw_caps |= SMU_FW_CAP_RAS_PRI;  	smu_v13_0_set_smu_mailbox_registers(smu);  	smu_v13_0_6_set_temp_funcs(smu); diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h index d588f74b98de..0ae91c8b6d72 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h +++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h @@ -40,6 +40,8 @@  #define SMU_IH_INTERRUPT_CONTEXT_ID_FAN_ABNORMAL        0x8  #define SMU_IH_INTERRUPT_CONTEXT_ID_FAN_RECOVERY        0x9 +#define SMU_IGNORE_IF_VERSION 0xFFFFFFFF +  #define smu_cmn_init_soft_gpu_metrics(ptr, frev, crev)                   \  	do {                                                             \  		typecheck(struct gpu_metrics_v##frev##_##crev *, (ptr)); \ | 
