diff options
Diffstat (limited to 'drivers/gpu/drm/amd')
47 files changed, 651 insertions, 355 deletions
| diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 2f9c14aca73c..a3b86b86dc47 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1296,6 +1296,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,  void amdgpu_device_pci_config_reset(struct amdgpu_device *adev);  int amdgpu_device_pci_reset(struct amdgpu_device *adev);  bool amdgpu_device_need_post(struct amdgpu_device *adev); +bool amdgpu_device_pcie_dynamic_switching_supported(void);  bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev);  bool amdgpu_device_aspm_support_quirk(void); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index f61527b800e6..d34c3ef8f3ed 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -1709,7 +1709,8 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(  			alloc_flags |= (flags & KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC) ?  			AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED : 0;  		} -		xcp_id = fpriv->xcp_id == ~0 ? 0 : fpriv->xcp_id; +		xcp_id = fpriv->xcp_id == AMDGPU_XCP_NO_PARTITION ? +					0 : fpriv->xcp_id;  	} else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_GTT) {  		domain = alloc_domain = AMDGPU_GEM_DOMAIN_GTT;  		alloc_flags = 0; @@ -2881,6 +2882,9 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)  			if (!attachment->is_mapped)  				continue; +			if (attachment->bo_va->base.bo->tbo.pin_count) +				continue; +  			kfd_mem_dmaunmap_attachment(mem, attachment);  			ret = update_gpuvm_pte(mem, attachment, &sync_obj);  			if (ret) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index a92c6189b4b6..a2cdde0ca0a7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -1458,6 +1458,25 @@ bool amdgpu_device_need_post(struct amdgpu_device *adev)  	return true;  } +/* + * Intel hosts such as Raptor Lake and Sapphire Rapids don't support dynamic + * speed switching. Until we have confirmation from Intel that a specific host + * supports it, it's safer that we keep it disabled for all. + * + * https://edc.intel.com/content/www/us/en/design/products/platforms/details/raptor-lake-s/13th-generation-core-processors-datasheet-volume-1-of-2/005/pci-express-support/ + * https://gitlab.freedesktop.org/drm/amd/-/issues/2663 + */ +bool amdgpu_device_pcie_dynamic_switching_supported(void) +{ +#if IS_ENABLED(CONFIG_X86) +	struct cpuinfo_x86 *c = &cpu_data(0); + +	if (c->x86_vendor == X86_VENDOR_INTEL) +		return false; +#endif +	return true; +} +  /**   * amdgpu_device_should_use_aspm - check if the device should program ASPM   * diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index cca5a495611f..12414a713256 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -1229,13 +1229,13 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)  		pasid = 0;  	} -	r = amdgpu_vm_init(adev, &fpriv->vm); +	r = amdgpu_xcp_open_device(adev, fpriv, file_priv);  	if (r)  		goto error_pasid; -	r = amdgpu_xcp_open_device(adev, fpriv, file_priv); +	r = amdgpu_vm_init(adev, &fpriv->vm, fpriv->xcp_id);  	if (r) -		goto error_vm; +		goto error_pasid;  	r = amdgpu_vm_set_pasid(adev, &fpriv->vm, pasid);  	if (r) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c index e9091ebfe230..f808841310fd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c @@ -1382,7 +1382,7 @@ int amdgpu_mes_self_test(struct amdgpu_device *adev)  		goto error_pasid;  	} -	r = amdgpu_vm_init(adev, vm); +	r = amdgpu_vm_init(adev, vm, -1);  	if (r) {  		DRM_ERROR("failed to initialize vm\n");  		goto error_pasid; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index 6d676bdd1505..78d1ee71f3f4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -498,11 +498,11 @@ static int psp_sw_init(void *handle)  	return 0;  failed2: -	amdgpu_bo_free_kernel(&psp->fw_pri_bo, -			      &psp->fw_pri_mc_addr, &psp->fw_pri_buf); -failed1:  	amdgpu_bo_free_kernel(&psp->fence_buf_bo,  			      &psp->fence_buf_mc_addr, &psp->fence_buf); +failed1: +	amdgpu_bo_free_kernel(&psp->fw_pri_bo, +			      &psp->fw_pri_mc_addr, &psp->fw_pri_buf);  	return ret;  } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c index 53ff91fc6cf6..d0748bcfad16 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c @@ -55,8 +55,9 @@ static enum hrtimer_restart amdgpu_vkms_vblank_simulate(struct hrtimer *timer)  		DRM_WARN("%s: vblank timer overrun\n", __func__);  	ret = drm_crtc_handle_vblank(crtc); +	/* Don't queue timer again when vblank is disabled. */  	if (!ret) -		DRM_ERROR("amdgpu_vkms failure on handling vblank"); +		return HRTIMER_NORESTART;  	return HRTIMER_RESTART;  } @@ -81,7 +82,7 @@ static void amdgpu_vkms_disable_vblank(struct drm_crtc *crtc)  {  	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); -	hrtimer_cancel(&amdgpu_crtc->vblank_timer); +	hrtimer_try_to_cancel(&amdgpu_crtc->vblank_timer);  }  static bool amdgpu_vkms_get_vblank_timestamp(struct drm_crtc *crtc, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 291977b93b1d..ec1ec08d4058 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -2121,13 +2121,14 @@ long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)   *   * @adev: amdgpu_device pointer   * @vm: requested vm + * @xcp_id: GPU partition selection id   *   * Init @vm fields.   *   * Returns:   * 0 for success, error for failure.   */ -int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm) +int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, int32_t xcp_id)  {  	struct amdgpu_bo *root_bo;  	struct amdgpu_bo_vm *root; @@ -2177,7 +2178,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)  	vm->evicting = false;  	r = amdgpu_vm_pt_create(adev, vm, adev->vm_manager.root_level, -				false, &root); +				false, &root, xcp_id);  	if (r)  		goto error_free_delayed;  	root_bo = &root->bo; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index 9c85d494f2a2..ffac7413c657 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -392,7 +392,7 @@ int amdgpu_vm_set_pasid(struct amdgpu_device *adev, struct amdgpu_vm *vm,  			u32 pasid);  long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout); -int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm); +int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, int32_t xcp_id);  int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm);  void amdgpu_vm_release_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm);  void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm); @@ -475,7 +475,8 @@ void amdgpu_vm_get_memory(struct amdgpu_vm *vm,  int amdgpu_vm_pt_clear(struct amdgpu_device *adev, struct amdgpu_vm *vm,  		       struct amdgpu_bo_vm *vmbo, bool immediate);  int amdgpu_vm_pt_create(struct amdgpu_device *adev, struct amdgpu_vm *vm, -			int level, bool immediate, struct amdgpu_bo_vm **vmbo); +			int level, bool immediate, struct amdgpu_bo_vm **vmbo, +			int32_t xcp_id);  void amdgpu_vm_pt_free_root(struct amdgpu_device *adev, struct amdgpu_vm *vm);  bool amdgpu_vm_pt_is_root_clean(struct amdgpu_device *adev,  				struct amdgpu_vm *vm); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c index dea1a64be44d..5431332bbdb8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c @@ -498,11 +498,12 @@ exit:   * @level: the page table level   * @immediate: use a immediate update   * @vmbo: pointer to the buffer object pointer + * @xcp_id: GPU partition id   */  int amdgpu_vm_pt_create(struct amdgpu_device *adev, struct amdgpu_vm *vm, -			int level, bool immediate, struct amdgpu_bo_vm **vmbo) +			int level, bool immediate, struct amdgpu_bo_vm **vmbo, +			int32_t xcp_id)  { -	struct amdgpu_fpriv *fpriv = container_of(vm, struct amdgpu_fpriv, vm);  	struct amdgpu_bo_param bp;  	struct amdgpu_bo *bo;  	struct dma_resv *resv; @@ -535,7 +536,7 @@ int amdgpu_vm_pt_create(struct amdgpu_device *adev, struct amdgpu_vm *vm,  	bp.type = ttm_bo_type_kernel;  	bp.no_wait_gpu = immediate; -	bp.xcp_id_plus1 = fpriv->xcp_id == ~0 ? 0 : fpriv->xcp_id + 1; +	bp.xcp_id_plus1 = xcp_id + 1;  	if (vm->root.bo)  		bp.resv = vm->root.bo->tbo.base.resv; @@ -561,7 +562,7 @@ int amdgpu_vm_pt_create(struct amdgpu_device *adev, struct amdgpu_vm *vm,  	bp.type = ttm_bo_type_kernel;  	bp.resv = bo->tbo.base.resv;  	bp.bo_ptr_size = sizeof(struct amdgpu_bo); -	bp.xcp_id_plus1 = fpriv->xcp_id == ~0 ? 0 : fpriv->xcp_id + 1; +	bp.xcp_id_plus1 = xcp_id + 1;  	r = amdgpu_bo_create(adev, &bp, &(*vmbo)->shadow); @@ -606,7 +607,8 @@ static int amdgpu_vm_pt_alloc(struct amdgpu_device *adev,  		return 0;  	amdgpu_vm_eviction_unlock(vm); -	r = amdgpu_vm_pt_create(adev, vm, cursor->level, immediate, &pt); +	r = amdgpu_vm_pt_create(adev, vm, cursor->level, immediate, &pt, +				vm->root.bo->xcp_id);  	amdgpu_vm_eviction_lock(vm);  	if (r)  		return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c index d175e862f222..9c9cca129498 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c @@ -363,7 +363,7 @@ int amdgpu_xcp_open_device(struct amdgpu_device *adev,  	if (!adev->xcp_mgr)  		return 0; -	fpriv->xcp_id = ~0; +	fpriv->xcp_id = AMDGPU_XCP_NO_PARTITION;  	for (i = 0; i < MAX_XCP; ++i) {  		if (!adev->xcp_mgr->xcp[i].ddev)  			break; @@ -381,7 +381,7 @@ int amdgpu_xcp_open_device(struct amdgpu_device *adev,  		}  	} -	fpriv->vm.mem_id = fpriv->xcp_id == ~0 ? -1 : +	fpriv->vm.mem_id = fpriv->xcp_id == AMDGPU_XCP_NO_PARTITION ? -1 :  				adev->xcp_mgr->xcp[fpriv->xcp_id].mem_id;  	return 0;  } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h index 0f8026d64ea5..9a1036aeec2a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h @@ -37,6 +37,8 @@  #define AMDGPU_XCP_FL_NONE 0  #define AMDGPU_XCP_FL_LOCKED (1 << 0) +#define AMDGPU_XCP_NO_PARTITION (~0) +  struct amdgpu_fpriv;  enum AMDGPU_XCP_IP_BLOCK { diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c index 16471b81a1f5..72b629a78c62 100644 --- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c +++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c @@ -68,7 +68,7 @@ static void aqua_vanjaram_set_xcp_id(struct amdgpu_device *adev,  	enum AMDGPU_XCP_IP_BLOCK ip_blk;  	uint32_t inst_mask; -	ring->xcp_id = ~0; +	ring->xcp_id = AMDGPU_XCP_NO_PARTITION;  	if (adev->xcp_mgr->mode == AMDGPU_XCP_MODE_NONE)  		return; @@ -177,7 +177,7 @@ static int aqua_vanjaram_select_scheds(  	u32 sel_xcp_id;  	int i; -	if (fpriv->xcp_id == ~0) { +	if (fpriv->xcp_id == AMDGPU_XCP_NO_PARTITION) {  		u32 least_ref_cnt = ~0;  		fpriv->xcp_id = 0; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index 9e3b835bdbb2..4f883b94f98e 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -46,6 +46,7 @@ MODULE_FIRMWARE("amdgpu/gc_9_4_3_rlc.bin");  #define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L  #define GOLDEN_GB_ADDR_CONFIG 0x2a114042 +#define CP_HQD_PERSISTENT_STATE_DEFAULT 0xbe05301  struct amdgpu_gfx_ras gfx_v9_4_3_ras; @@ -1736,7 +1737,7 @@ static int gfx_v9_4_3_xcc_q_fini_register(struct amdgpu_ring *ring,  	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_IQ_TIMER, 0);  	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_IB_CONTROL, 0); -	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PERSISTENT_STATE, 0); +	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PERSISTENT_STATE, CP_HQD_PERSISTENT_STATE_DEFAULT);  	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL, 0x40000000);  	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL, 0);  	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR, 0); diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c index 4dabf910334b..d9f14dc55998 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c @@ -402,18 +402,15 @@ static void gfxhub_v1_2_xcc_program_invalidation(struct amdgpu_device *adev,  static int gfxhub_v1_2_xcc_gart_enable(struct amdgpu_device *adev,  				       uint32_t xcc_mask)  { -	uint32_t tmp_mask;  	int i; -	tmp_mask = xcc_mask;  	/*  	 * MC_VM_FB_LOCATION_BASE/TOP is NULL for VF, because they are  	 * VF copy registers so vbios post doesn't program them, for  	 * SRIOV driver need to program them  	 */  	if (amdgpu_sriov_vf(adev)) { -		for_each_inst(i, tmp_mask) { -			i = ffs(tmp_mask) - 1; +		for_each_inst(i, xcc_mask) {  			WREG32_SOC15_RLC(GC, GET_INST(GC, i), regMC_VM_FB_LOCATION_BASE,  				     adev->gmc.vram_start >> 24);  			WREG32_SOC15_RLC(GC, GET_INST(GC, i), regMC_VM_FB_LOCATION_TOP, diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c index f9cb0d2c89d1..e1a392bcea70 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c @@ -49,6 +49,7 @@ MODULE_FIRMWARE("amdgpu/psp_13_0_10_ta.bin");  MODULE_FIRMWARE("amdgpu/psp_13_0_11_toc.bin");  MODULE_FIRMWARE("amdgpu/psp_13_0_11_ta.bin");  MODULE_FIRMWARE("amdgpu/psp_13_0_6_sos.bin"); +MODULE_FIRMWARE("amdgpu/psp_13_0_6_ta.bin");  /* For large FW files the time to complete can be very long */  #define USBC_PD_POLLING_LIMIT_S 240 diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debug.c b/drivers/gpu/drm/amd/amdkfd/kfd_debug.c index fff3ccc04fa9..9766076e9ec4 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_debug.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_debug.c @@ -302,8 +302,7 @@ static int kfd_dbg_set_queue_workaround(struct queue *q, bool enable)  	if (!q)  		return 0; -	if (KFD_GC_VERSION(q->device) < IP_VERSION(11, 0, 0) || -	    KFD_GC_VERSION(q->device) >= IP_VERSION(12, 0, 0)) +	if (!kfd_dbg_has_cwsr_workaround(q->device))  		return 0;  	if (enable && q->properties.is_user_cu_masked) @@ -349,7 +348,7 @@ int kfd_dbg_set_mes_debug_mode(struct kfd_process_device *pdd)  {  	uint32_t spi_dbg_cntl = pdd->spi_dbg_override | pdd->spi_dbg_launch_mode;  	uint32_t flags = pdd->process->dbg_flags; -	bool sq_trap_en = !!spi_dbg_cntl; +	bool sq_trap_en = !!spi_dbg_cntl || !kfd_dbg_has_cwsr_workaround(pdd->dev);  	if (!kfd_dbg_is_per_vmid_supported(pdd->dev))  		return 0; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debug.h b/drivers/gpu/drm/amd/amdkfd/kfd_debug.h index a289e59ceb79..662a13a0d582 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_debug.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_debug.h @@ -100,6 +100,12 @@ static inline bool kfd_dbg_is_rlc_restore_supported(struct kfd_node *dev)  		 KFD_GC_VERSION(dev) == IP_VERSION(10, 1, 1));  } +static inline bool kfd_dbg_has_cwsr_workaround(struct kfd_node *dev) +{ +	return KFD_GC_VERSION(dev) >= IP_VERSION(11, 0, 0) && +	       KFD_GC_VERSION(dev) <= IP_VERSION(11, 0, 3); +} +  static inline bool kfd_dbg_has_gws_support(struct kfd_node *dev)  {  	if ((KFD_GC_VERSION(dev) == IP_VERSION(9, 0, 1) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index f515cb8f30ca..2df153828ff4 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -226,8 +226,7 @@ static int add_queue_mes(struct device_queue_manager *dqm, struct queue *q,  	queue_input.paging = false;  	queue_input.tba_addr = qpd->tba_addr;  	queue_input.tma_addr = qpd->tma_addr; -	queue_input.trap_en = KFD_GC_VERSION(q->device) < IP_VERSION(11, 0, 0) || -			      KFD_GC_VERSION(q->device) > IP_VERSION(11, 0, 3); +	queue_input.trap_en = !kfd_dbg_has_cwsr_workaround(q->device);  	queue_input.skip_process_ctx_clear = qpd->pqm->process->debug_trap_enabled;  	queue_type = convert_to_mes_queue_type(q->properties.type); @@ -1806,8 +1805,7 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,  	 */  	q->properties.is_evicted = !!qpd->evicted;  	q->properties.is_dbg_wa = qpd->pqm->process->debug_trap_enabled && -			KFD_GC_VERSION(q->device) >= IP_VERSION(11, 0, 0) && -			KFD_GC_VERSION(q->device) <= IP_VERSION(11, 0, 3); +				  kfd_dbg_has_cwsr_workaround(q->device);  	if (qd)  		mqd_mgr->restore_mqd(mqd_mgr, &q->mqd, q->mqd_mem_obj, &q->gart_mqd_addr, diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index ff0a217b9d56..0fa739fd6a9c 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -424,12 +424,12 @@ static void dm_pflip_high_irq(void *interrupt_params)  	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); -	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){ -		DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n", -						 amdgpu_crtc->pflip_status, -						 AMDGPU_FLIP_SUBMITTED, -						 amdgpu_crtc->crtc_id, -						 amdgpu_crtc); +	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED) { +		DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p]\n", +			     amdgpu_crtc->pflip_status, +			     AMDGPU_FLIP_SUBMITTED, +			     amdgpu_crtc->crtc_id, +			     amdgpu_crtc);  		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);  		return;  	} @@ -883,7 +883,7 @@ static int dm_set_powergating_state(void *handle,  }  /* Prototypes of private functions */ -static int dm_early_init(void* handle); +static int dm_early_init(void *handle);  /* Allocate memory for FBC compressed data  */  static void amdgpu_dm_fbc_init(struct drm_connector *connector) @@ -1282,7 +1282,7 @@ static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_  	pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;  	pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18; -	pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ; +	pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24;  	pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;  	pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24; @@ -1347,6 +1347,15 @@ static void dm_handle_hpd_rx_offload_work(struct work_struct *work)  	if (amdgpu_in_reset(adev))  		goto skip; +	if (offload_work->data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY || +		offload_work->data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) { +		dm_handle_mst_sideband_msg_ready_event(&aconnector->mst_mgr, DOWN_OR_UP_MSG_RDY_EVENT); +		spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags); +		offload_work->offload_wq->is_handling_mst_msg_rdy_event = false; +		spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags); +		goto skip; +	} +  	mutex_lock(&adev->dm.dc_lock);  	if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST) {  		dc_link_dp_handle_automated_test(dc_link); @@ -1365,8 +1374,7 @@ static void dm_handle_hpd_rx_offload_work(struct work_struct *work)  		DP_TEST_RESPONSE,  		&test_response.raw,  		sizeof(test_response)); -	} -	else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) && +	} else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&  			dc_link_check_link_loss_status(dc_link, &offload_work->data) &&  			dc_link_dp_allow_hpd_rx_irq(dc_link)) {  		/* offload_work->data is from handle_hpd_rx_irq-> @@ -1554,7 +1562,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)  	mutex_init(&adev->dm.dc_lock);  	mutex_init(&adev->dm.audio_lock); -	if(amdgpu_dm_irq_init(adev)) { +	if (amdgpu_dm_irq_init(adev)) {  		DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");  		goto error;  	} @@ -1696,9 +1704,8 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)  	if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)  		adev->dm.dc->debug.disable_stutter = true; -	if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) { +	if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)  		adev->dm.dc->debug.disable_dsc = true; -	}  	if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)  		adev->dm.dc->debug.disable_clock_gate = true; @@ -1942,8 +1949,6 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev)  	mutex_destroy(&adev->dm.audio_lock);  	mutex_destroy(&adev->dm.dc_lock);  	mutex_destroy(&adev->dm.dpia_aux_lock); - -	return;  }  static int load_dmcu_fw(struct amdgpu_device *adev) @@ -1952,7 +1957,7 @@ static int load_dmcu_fw(struct amdgpu_device *adev)  	int r;  	const struct dmcu_firmware_header_v1_0 *hdr; -	switch(adev->asic_type) { +	switch (adev->asic_type) {  #if defined(CONFIG_DRM_AMD_DC_SI)  	case CHIP_TAHITI:  	case CHIP_PITCAIRN: @@ -2709,7 +2714,7 @@ static void dm_gpureset_commit_state(struct dc_state *dc_state,  		struct dc_scaling_info scaling_infos[MAX_SURFACES];  		struct dc_flip_addrs flip_addrs[MAX_SURFACES];  		struct dc_stream_update stream_update; -	} * bundle; +	} *bundle;  	int k, m;  	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL); @@ -2739,8 +2744,6 @@ static void dm_gpureset_commit_state(struct dc_state *dc_state,  cleanup:  	kfree(bundle); - -	return;  }  static int dm_resume(void *handle) @@ -2954,8 +2957,7 @@ static const struct amd_ip_funcs amdgpu_dm_funcs = {  	.set_powergating_state = dm_set_powergating_state,  }; -const struct amdgpu_ip_block_version dm_ip_block = -{ +const struct amdgpu_ip_block_version dm_ip_block = {  	.type = AMD_IP_BLOCK_TYPE_DCE,  	.major = 1,  	.minor = 0, @@ -3000,9 +3002,12 @@ static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)  	caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;  	caps->aux_support = false; -	if (caps->ext_caps->bits.oled == 1 /*|| -	    caps->ext_caps->bits.sdr_aux_backlight_control == 1 || -	    caps->ext_caps->bits.hdr_aux_backlight_control == 1*/) +	if (caps->ext_caps->bits.oled == 1 +	    /* +	     * || +	     * caps->ext_caps->bits.sdr_aux_backlight_control == 1 || +	     * caps->ext_caps->bits.hdr_aux_backlight_control == 1 +	     */)  		caps->aux_support = true;  	if (amdgpu_backlight == 0) @@ -3236,86 +3241,6 @@ static void handle_hpd_irq(void *param)  } -static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector) -{ -	u8 esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 }; -	u8 dret; -	bool new_irq_handled = false; -	int dpcd_addr; -	int dpcd_bytes_to_read; - -	const int max_process_count = 30; -	int process_count = 0; - -	const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link); - -	if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) { -		dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT; -		/* DPCD 0x200 - 0x201 for downstream IRQ */ -		dpcd_addr = DP_SINK_COUNT; -	} else { -		dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI; -		/* DPCD 0x2002 - 0x2005 for downstream IRQ */ -		dpcd_addr = DP_SINK_COUNT_ESI; -	} - -	dret = drm_dp_dpcd_read( -		&aconnector->dm_dp_aux.aux, -		dpcd_addr, -		esi, -		dpcd_bytes_to_read); - -	while (dret == dpcd_bytes_to_read && -		process_count < max_process_count) { -		u8 ack[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = {}; -		u8 retry; -		dret = 0; - -		process_count++; - -		DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]); -		/* handle HPD short pulse irq */ -		if (aconnector->mst_mgr.mst_state) -			drm_dp_mst_hpd_irq_handle_event(&aconnector->mst_mgr, -							esi, -							ack, -							&new_irq_handled); - -		if (new_irq_handled) { -			/* ACK at DPCD to notify down stream */ -			for (retry = 0; retry < 3; retry++) { -				ssize_t wret; - -				wret = drm_dp_dpcd_writeb(&aconnector->dm_dp_aux.aux, -							  dpcd_addr + 1, -							  ack[1]); -				if (wret == 1) -					break; -			} - -			if (retry == 3) { -				DRM_ERROR("Failed to ack MST event.\n"); -				return; -			} - -			drm_dp_mst_hpd_irq_send_new_request(&aconnector->mst_mgr); -			/* check if there is new irq to be handled */ -			dret = drm_dp_dpcd_read( -				&aconnector->dm_dp_aux.aux, -				dpcd_addr, -				esi, -				dpcd_bytes_to_read); - -			new_irq_handled = false; -		} else { -			break; -		} -	} - -	if (process_count == max_process_count) -		DRM_DEBUG_DRIVER("Loop exceeded max iterations\n"); -} -  static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq,  							union hpd_irq_data hpd_irq_data)  { @@ -3377,7 +3302,23 @@ static void handle_hpd_rx_irq(void *param)  	if (dc_link_dp_allow_hpd_rx_irq(dc_link)) {  		if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||  			hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) { -			dm_handle_mst_sideband_msg(aconnector); +			bool skip = false; + +			/* +			 * DOWN_REP_MSG_RDY is also handled by polling method +			 * mgr->cbs->poll_hpd_irq() +			 */ +			spin_lock(&offload_wq->offload_lock); +			skip = offload_wq->is_handling_mst_msg_rdy_event; + +			if (!skip) +				offload_wq->is_handling_mst_msg_rdy_event = true; + +			spin_unlock(&offload_wq->offload_lock); + +			if (!skip) +				schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data); +  			goto out;  		} @@ -3468,7 +3409,7 @@ static void register_hpd_handlers(struct amdgpu_device *adev)  		aconnector = to_amdgpu_dm_connector(connector);  		dc_link = aconnector->dc_link; -		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) { +		if (dc_link->irq_source_hpd != DC_IRQ_SOURCE_INVALID) {  			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;  			int_params.irq_source = dc_link->irq_source_hpd; @@ -3477,7 +3418,7 @@ static void register_hpd_handlers(struct amdgpu_device *adev)  					(void *) aconnector);  		} -		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) { +		if (dc_link->irq_source_hpd_rx != DC_IRQ_SOURCE_INVALID) {  			/* Also register for DP short pulse (hpd_rx). */  			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT; @@ -3486,11 +3427,11 @@ static void register_hpd_handlers(struct amdgpu_device *adev)  			amdgpu_dm_irq_register_interrupt(adev, &int_params,  					handle_hpd_rx_irq,  					(void *) aconnector); - -			if (adev->dm.hpd_rx_offload_wq) -				adev->dm.hpd_rx_offload_wq[dc_link->link_index].aconnector = -					aconnector;  		} + +		if (adev->dm.hpd_rx_offload_wq) +			adev->dm.hpd_rx_offload_wq[connector->index].aconnector = +				aconnector;  	}  } @@ -3503,7 +3444,7 @@ static int dce60_register_irq_handlers(struct amdgpu_device *adev)  	struct dc_interrupt_params int_params = {0};  	int r;  	int i; -	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY; +	unsigned int client_id = AMDGPU_IRQ_CLIENTID_LEGACY;  	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;  	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT; @@ -3517,11 +3458,12 @@ static int dce60_register_irq_handlers(struct amdgpu_device *adev)  	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts  	 *    coming from DC hardware.  	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC -	 *    for acknowledging and handling. */ +	 *    for acknowledging and handling. +	 */  	/* Use VBLANK interrupt */  	for (i = 0; i < adev->mode_info.num_crtc; i++) { -		r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq); +		r = amdgpu_irq_add_id(adev, client_id, i + 1, &adev->crtc_irq);  		if (r) {  			DRM_ERROR("Failed to add crtc irq id!\n");  			return r; @@ -3529,7 +3471,7 @@ static int dce60_register_irq_handlers(struct amdgpu_device *adev)  		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;  		int_params.irq_source = -			dc_interrupt_to_irq_source(dc, i+1 , 0); +			dc_interrupt_to_irq_source(dc, i + 1, 0);  		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1]; @@ -3585,7 +3527,7 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev)  	struct dc_interrupt_params int_params = {0};  	int r;  	int i; -	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY; +	unsigned int client_id = AMDGPU_IRQ_CLIENTID_LEGACY;  	if (adev->family >= AMDGPU_FAMILY_AI)  		client_id = SOC15_IH_CLIENTID_DCE; @@ -3602,7 +3544,8 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev)  	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts  	 *    coming from DC hardware.  	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC -	 *    for acknowledging and handling. */ +	 *    for acknowledging and handling. +	 */  	/* Use VBLANK interrupt */  	for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) { @@ -4049,7 +3992,7 @@ static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm,  }  static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps, -				unsigned *min, unsigned *max) +				unsigned int *min, unsigned int *max)  {  	if (!caps)  		return 0; @@ -4069,7 +4012,7 @@ static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,  static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,  					uint32_t brightness)  { -	unsigned min, max; +	unsigned int min, max;  	if (!get_brightness_range(caps, &min, &max))  		return brightness; @@ -4082,7 +4025,7 @@ static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *c  static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,  				      uint32_t brightness)  { -	unsigned min, max; +	unsigned int min, max;  	if (!get_brightness_range(caps, &min, &max))  		return brightness; @@ -4562,7 +4505,6 @@ fail:  static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)  {  	drm_atomic_private_obj_fini(&dm->atomic_obj); -	return;  }  /****************************************************************************** @@ -5394,6 +5336,7 @@ static bool adjust_colour_depth_from_display_info(  {  	enum dc_color_depth depth = timing_out->display_color_depth;  	int normalized_clk; +  	do {  		normalized_clk = timing_out->pix_clk_100hz / 10;  		/* YCbCr 4:2:0 requires additional adjustment of 1/2 */ @@ -5609,6 +5552,7 @@ create_fake_sink(struct amdgpu_dm_connector *aconnector)  {  	struct dc_sink_init_data sink_init_data = { 0 };  	struct dc_sink *sink = NULL; +  	sink_init_data.link = aconnector->dc_link;  	sink_init_data.sink_signal = aconnector->dc_link->connector_signal; @@ -5732,7 +5676,7 @@ get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,  		return &aconnector->freesync_vid_base;  	/* Find the preferred mode */ -	list_for_each_entry (m, list_head, head) { +	list_for_each_entry(m, list_head, head) {  		if (m->type & DRM_MODE_TYPE_PREFERRED) {  			m_pref = m;  			break; @@ -5756,7 +5700,7 @@ get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,  	 * For some monitors, preferred mode is not the mode with highest  	 * supported refresh rate.  	 */ -	list_for_each_entry (m, list_head, head) { +	list_for_each_entry(m, list_head, head) {  		current_refresh  = drm_mode_vrefresh(m);  		if (m->hdisplay == m_pref->hdisplay && @@ -6028,7 +5972,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,  		 * This may not be an error, the use case is when we have no  		 * usermode calls to reset and set mode upon hotplug. In this  		 * case, we call set mode ourselves to restore the previous mode -		 * and the modelist may not be filled in in time. +		 * and the modelist may not be filled in time.  		 */  		DRM_DEBUG_DRIVER("No preferred mode found\n");  	} else { @@ -6051,9 +5995,9 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,  		drm_mode_set_crtcinfo(&mode, 0);  	/* -	* If scaling is enabled and refresh rate didn't change -	* we copy the vic and polarities of the old timings -	*/ +	 * If scaling is enabled and refresh rate didn't change +	 * we copy the vic and polarities of the old timings +	 */  	if (!scale || mode_refresh != preferred_refresh)  		fill_stream_properties_from_drm_display_mode(  			stream, &mode, &aconnector->base, con_state, NULL, @@ -6817,6 +6761,7 @@ static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,  	if (!state->duplicated) {  		int max_bpc = conn_state->max_requested_bpc; +  		is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&  			  aconnector->force_yuv420_output;  		color_depth = convert_color_depth_from_display_info(connector, @@ -7135,7 +7080,7 @@ static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,  {  	struct drm_display_mode *m; -	list_for_each_entry (m, &aconnector->base.probed_modes, head) { +	list_for_each_entry(m, &aconnector->base.probed_modes, head) {  		if (drm_mode_equal(m, mode))  			return true;  	} @@ -7295,6 +7240,7 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,  	aconnector->as_type = ADAPTIVE_SYNC_TYPE_NONE;  	memset(&aconnector->vsdb_info, 0, sizeof(aconnector->vsdb_info));  	mutex_init(&aconnector->hpd_lock); +	mutex_init(&aconnector->handle_mst_msg_ready);  	/*  	 * configure support HPD hot plug connector_>polled default value is 0 @@ -7454,7 +7400,6 @@ static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,  	link->priv = aconnector; -	DRM_DEBUG_DRIVER("%s()\n", __func__);  	i2c = create_i2c(link->ddc, link->link_index, &res);  	if (!i2c) { @@ -8125,7 +8070,15 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,  		 * Only allow immediate flips for fast updates that don't  		 * change memory domain, FB pitch, DCC state, rotation or  		 * mirroring. +		 * +		 * dm_crtc_helper_atomic_check() only accepts async flips with +		 * fast updates.  		 */ +		if (crtc->state->async_flip && +		    acrtc_state->update_type != UPDATE_TYPE_FAST) +			drm_warn_once(state->dev, +				      "[PLANE:%d:%s] async flip with non-fast update\n", +				      plane->base.id, plane->name);  		bundle->flip_addrs[planes_count].flip_immediate =  			crtc->state->async_flip &&  			acrtc_state->update_type == UPDATE_TYPE_FAST && @@ -8168,8 +8121,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,  			 * DRI3/Present extension with defined target_msc.  			 */  			last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc); -		} -		else { +		} else {  			/* For variable refresh rate mode only:  			 * Get vblank of last completed flip to avoid > 1 vrr  			 * flips per video frame by use of throttling, but allow @@ -8502,8 +8454,8 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)  		dc_resource_state_copy_construct_current(dm->dc, dc_state);  	} -	for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state, -				       new_crtc_state, i) { +	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, +				      new_crtc_state, i) {  		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);  		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); @@ -8526,9 +8478,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)  		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);  		drm_dbg_state(state->dev, -			"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, " -			"planes_changed:%d, mode_changed:%d,active_changed:%d," -			"connectors_changed:%d\n", +			"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, planes_changed:%d, mode_changed:%d,active_changed:%d,connectors_changed:%d\n",  			acrtc->crtc_id,  			new_crtc_state->enable,  			new_crtc_state->active, @@ -9104,8 +9054,8 @@ static int do_aquire_global_lock(struct drm_device *dev,  					&commit->flip_done, 10*HZ);  		if (ret == 0) -			DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done " -				  "timed out\n", crtc->base.id, crtc->name); +			DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done timed out\n", +				  crtc->base.id, crtc->name);  		drm_crtc_commit_put(commit);  	} @@ -9190,7 +9140,8 @@ is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,  	return false;  } -static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) { +static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) +{  	u64 num, den, res;  	struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base; @@ -9312,9 +9263,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,  		goto skip_modeset;  	drm_dbg_state(state->dev, -		"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, " -		"planes_changed:%d, mode_changed:%d,active_changed:%d," -		"connectors_changed:%d\n", +		"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, planes_changed:%d, mode_changed:%d,active_changed:%d,connectors_changed:%d\n",  		acrtc->crtc_id,  		new_crtc_state->enable,  		new_crtc_state->active, @@ -9343,8 +9292,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,  						     old_crtc_state)) {  			new_crtc_state->mode_changed = false;  			DRM_DEBUG_DRIVER( -				"Mode change not required for front porch change, " -				"setting mode_changed to %d", +				"Mode change not required for front porch change, setting mode_changed to %d",  				new_crtc_state->mode_changed);  			set_freesync_fixed_config(dm_new_crtc_state); @@ -9356,9 +9304,8 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,  			struct drm_display_mode *high_mode;  			high_mode = get_highest_refresh_rate_mode(aconnector, false); -			if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) { +			if (!drm_mode_equal(&new_crtc_state->mode, high_mode))  				set_freesync_fixed_config(dm_new_crtc_state); -			}  		}  		ret = dm_atomic_get_state(state, &dm_state); @@ -9526,6 +9473,7 @@ static bool should_reset_plane(struct drm_atomic_state *state,  	 */  	for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {  		struct amdgpu_framebuffer *old_afb, *new_afb; +  		if (other->type == DRM_PLANE_TYPE_CURSOR)  			continue; @@ -9624,11 +9572,12 @@ static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,  	}  	/* Core DRM takes care of checking FB modifiers, so we only need to -	 * check tiling flags when the FB doesn't have a modifier. */ +	 * check tiling flags when the FB doesn't have a modifier. +	 */  	if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {  		if (adev->family < AMDGPU_FAMILY_AI) {  			linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 && -			         AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 && +				 AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&  				 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;  		} else {  			linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0; @@ -9850,12 +9799,12 @@ static int dm_check_crtc_cursor(struct drm_atomic_state *state,  	/* On DCE and DCN there is no dedicated hardware cursor plane. We get a  	 * cursor per pipe but it's going to inherit the scaling and  	 * positioning from the underlying pipe. Check the cursor plane's -	 * blending properties match the underlying planes'. */ +	 * blending properties match the underlying planes'. +	 */  	new_cursor_state = drm_atomic_get_new_plane_state(state, cursor); -	if (!new_cursor_state || !new_cursor_state->fb) { +	if (!new_cursor_state || !new_cursor_state->fb)  		return 0; -	}  	dm_get_oriented_plane_size(new_cursor_state, &cursor_src_w, &cursor_src_h);  	cursor_scale_w = new_cursor_state->crtc_w * 1000 / cursor_src_w; @@ -9900,6 +9849,7 @@ static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm  	struct drm_connector_state *conn_state, *old_conn_state;  	struct amdgpu_dm_connector *aconnector = NULL;  	int i; +  	for_each_oldnew_connector_in_state(state, connector, old_conn_state, conn_state, i) {  		if (!conn_state->crtc)  			conn_state = old_conn_state; @@ -10334,7 +10284,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,  	}  	/* Store the overall update type for use later in atomic check. */ -	for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) { +	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {  		struct dm_crtc_state *dm_new_crtc_state =  			to_dm_crtc_state(new_crtc_state); @@ -10356,7 +10306,7 @@ fail:  	else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)  		DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");  	else -		DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret); +		DRM_DEBUG_DRIVER("Atomic check failed with err: %d\n", ret);  	trace_amdgpu_dm_atomic_check_finish(state, ret); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h index 4561f55afa99..9fb5bb3a75a7 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h @@ -195,6 +195,11 @@ struct hpd_rx_irq_offload_work_queue {  	 */  	bool is_handling_link_loss;  	/** +	 * @is_handling_mst_msg_rdy_event: Used to prevent inserting mst message +	 * ready event when we're already handling mst message ready event +	 */ +	bool is_handling_mst_msg_rdy_event; +	/**  	 * @aconnector: The aconnector that this work queue is attached to  	 */  	struct amdgpu_dm_connector *aconnector; @@ -638,6 +643,8 @@ struct amdgpu_dm_connector {  	struct drm_dp_mst_port *mst_output_port;  	struct amdgpu_dm_connector *mst_root;  	struct drm_dp_aux *dsc_aux; +	struct mutex handle_mst_msg_ready; +  	/* TODO see if we can merge with ddc_bus or make a dm_connector */  	struct amdgpu_i2c_adapter *i2c; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c index 440fc0869a34..30d4c6fd95f5 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c @@ -398,6 +398,18 @@ static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,  		return -EINVAL;  	} +	/* +	 * Only allow async flips for fast updates that don't change the FB +	 * pitch, the DCC state, rotation, etc. +	 */ +	if (crtc_state->async_flip && +	    dm_crtc_state->update_type != UPDATE_TYPE_FAST) { +		drm_dbg_atomic(crtc->dev, +			       "[CRTC:%d:%s] async flips are only supported for fast updates\n", +			       crtc->base.id, crtc->name); +		return -EINVAL; +	} +  	/* In some use cases, like reset, no stream is attached */  	if (!dm_crtc_state->stream)  		return 0; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index 46d0a8f57e55..9bc86deac9e8 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -619,8 +619,118 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,  	return connector;  } +void dm_handle_mst_sideband_msg_ready_event( +	struct drm_dp_mst_topology_mgr *mgr, +	enum mst_msg_ready_type msg_rdy_type) +{ +	uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 }; +	uint8_t dret; +	bool new_irq_handled = false; +	int dpcd_addr; +	uint8_t dpcd_bytes_to_read; +	const uint8_t max_process_count = 30; +	uint8_t process_count = 0; +	u8 retry; +	struct amdgpu_dm_connector *aconnector = +			container_of(mgr, struct amdgpu_dm_connector, mst_mgr); + + +	const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link); + +	if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) { +		dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT; +		/* DPCD 0x200 - 0x201 for downstream IRQ */ +		dpcd_addr = DP_SINK_COUNT; +	} else { +		dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI; +		/* DPCD 0x2002 - 0x2005 for downstream IRQ */ +		dpcd_addr = DP_SINK_COUNT_ESI; +	} + +	mutex_lock(&aconnector->handle_mst_msg_ready); + +	while (process_count < max_process_count) { +		u8 ack[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = {}; + +		process_count++; + +		dret = drm_dp_dpcd_read( +			&aconnector->dm_dp_aux.aux, +			dpcd_addr, +			esi, +			dpcd_bytes_to_read); + +		if (dret != dpcd_bytes_to_read) { +			DRM_DEBUG_KMS("DPCD read and acked number is not as expected!"); +			break; +		} + +		DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]); + +		switch (msg_rdy_type) { +		case DOWN_REP_MSG_RDY_EVENT: +			/* Only handle DOWN_REP_MSG_RDY case*/ +			esi[1] &= DP_DOWN_REP_MSG_RDY; +			break; +		case UP_REQ_MSG_RDY_EVENT: +			/* Only handle UP_REQ_MSG_RDY case*/ +			esi[1] &= DP_UP_REQ_MSG_RDY; +			break; +		default: +			/* Handle both cases*/ +			esi[1] &= (DP_DOWN_REP_MSG_RDY | DP_UP_REQ_MSG_RDY); +			break; +		} + +		if (!esi[1]) +			break; + +		/* handle MST irq */ +		if (aconnector->mst_mgr.mst_state) +			drm_dp_mst_hpd_irq_handle_event(&aconnector->mst_mgr, +						 esi, +						 ack, +						 &new_irq_handled); + +		if (new_irq_handled) { +			/* ACK at DPCD to notify down stream */ +			for (retry = 0; retry < 3; retry++) { +				ssize_t wret; + +				wret = drm_dp_dpcd_writeb(&aconnector->dm_dp_aux.aux, +							  dpcd_addr + 1, +							  ack[1]); +				if (wret == 1) +					break; +			} + +			if (retry == 3) { +				DRM_ERROR("Failed to ack MST event.\n"); +				break; +			} + +			drm_dp_mst_hpd_irq_send_new_request(&aconnector->mst_mgr); + +			new_irq_handled = false; +		} else { +			break; +		} +	} + +	mutex_unlock(&aconnector->handle_mst_msg_ready); + +	if (process_count == max_process_count) +		DRM_DEBUG_DRIVER("Loop exceeded max iterations\n"); +} + +static void dm_handle_mst_down_rep_msg_ready(struct drm_dp_mst_topology_mgr *mgr) +{ +	dm_handle_mst_sideband_msg_ready_event(mgr, DOWN_REP_MSG_RDY_EVENT); +} +  static const struct drm_dp_mst_topology_cbs dm_mst_cbs = {  	.add_connector = dm_dp_add_mst_connector, +	.poll_hpd_irq = dm_handle_mst_down_rep_msg_ready,  };  void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm, diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h index 1e4ede1e57ab..37c820ab0fdb 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h @@ -49,6 +49,13 @@  #define PBN_FEC_OVERHEAD_MULTIPLIER_8B_10B	1031  #define PBN_FEC_OVERHEAD_MULTIPLIER_128B_132B	1000 +enum mst_msg_ready_type { +	NONE_MSG_RDY_EVENT = 0, +	DOWN_REP_MSG_RDY_EVENT = 1, +	UP_REQ_MSG_RDY_EVENT = 2, +	DOWN_OR_UP_MSG_RDY_EVENT = 3 +}; +  struct amdgpu_display_manager;  struct amdgpu_dm_connector; @@ -61,6 +68,10 @@ void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,  void  dm_dp_create_fake_mst_encoders(struct amdgpu_device *adev); +void dm_handle_mst_sideband_msg_ready_event( +	struct drm_dp_mst_topology_mgr *mgr, +	enum mst_msg_ready_type msg_rdy_type); +  struct dsc_mst_fairness_vars {  	int pbn;  	bool dsc_enabled; diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c index 7ccd96959256..3db4ef564b99 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c @@ -87,6 +87,11 @@ static int dcn31_get_active_display_cnt_wa(  				stream->signal == SIGNAL_TYPE_DVI_SINGLE_LINK ||  				stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK)  			tmds_present = true; + +		/* Checking stream / link detection ensuring that PHY is active*/ +		if (dc_is_dp_signal(stream->signal) && !stream->dpms_off) +			display_count++; +  	}  	for (i = 0; i < dc->link_count; i++) { diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c index 6c9ca43d1040..20d4d08a6a2f 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c @@ -1792,10 +1792,13 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context)  			hws->funcs.edp_backlight_control(edp_link_with_sink, false);  		}  		/*resume from S3, no vbios posting, no need to power down again*/ +		clk_mgr_exit_optimized_pwr_state(dc, dc->clk_mgr); +  		power_down_all_hw_blocks(dc);  		disable_vga_and_power_gate_all_controllers(dc);  		if (edp_link_with_sink && !keep_edp_vdd_on)  			dc->hwss.edp_power_control(edp_link_with_sink, false); +		clk_mgr_optimize_pwr_state(dc, dc->clk_mgr);  	}  	bios_set_scratch_acc_mode_change(dc->ctx->dc_bios, 1);  } diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index a50309039d08..9834b75f1837 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -3278,7 +3278,8 @@ void dcn10_wait_for_mpcc_disconnect(  		if (pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst]) {  			struct hubp *hubp = get_hubp_by_inst(res_pool, mpcc_inst); -			if (pipe_ctx->stream_res.tg->funcs->is_tg_enabled(pipe_ctx->stream_res.tg)) +			if (pipe_ctx->stream_res.tg && +				pipe_ctx->stream_res.tg->funcs->is_tg_enabled(pipe_ctx->stream_res.tg))  				res_pool->mpc->funcs->wait_for_idle(res_pool->mpc, mpcc_inst);  			pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst] = false;  			hubp->funcs->set_blank(hubp, true); diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c index dfb8f62765f2..5bf4d0aa6230 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c @@ -215,7 +215,7 @@ void optc3_set_odm_bypass(struct timing_generator *optc,  	optc1->opp_count = 1;  } -static void optc3_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_cnt, +void optc3_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_cnt,  		struct dc_crtc_timing *timing)  {  	struct optc *optc1 = DCN10TG_FROM_TG(optc); @@ -293,7 +293,7 @@ static void optc3_set_timing_double_buffer(struct timing_generator *optc, bool e  		   OTG_DRR_TIMING_DBUF_UPDATE_MODE, mode);  } -static void optc3_wait_drr_doublebuffer_pending_clear(struct timing_generator *optc) +void optc3_wait_drr_doublebuffer_pending_clear(struct timing_generator *optc)  {  	struct optc *optc1 = DCN10TG_FROM_TG(optc); diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.h index fb06dc9a4893..d3a056c12b0d 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.h +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.h @@ -351,6 +351,9 @@ void optc3_set_timing_db_mode(struct timing_generator *optc, bool enable);  void optc3_set_odm_bypass(struct timing_generator *optc,  		const struct dc_crtc_timing *dc_crtc_timing); +void optc3_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_cnt, +		struct dc_crtc_timing *timing); +void optc3_wait_drr_doublebuffer_pending_clear(struct timing_generator *optc);  void optc3_tg_init(struct timing_generator *optc);  void optc3_set_vtotal_min_max(struct timing_generator *optc, int vtotal_min, int vtotal_max);  #endif /* __DC_OPTC_DCN30_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/Makefile b/drivers/gpu/drm/amd/display/dc/dcn301/Makefile index 7aa628c21973..9002cb10a6ae 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn301/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dcn301/Makefile @@ -11,7 +11,8 @@  # Makefile for dcn30.  DCN301 = dcn301_init.o dcn301_resource.o dcn301_dccg.o \ -		dcn301_dio_link_encoder.o dcn301_hwseq.o dcn301_panel_cntl.o dcn301_hubbub.o +		dcn301_dio_link_encoder.o dcn301_hwseq.o dcn301_panel_cntl.o dcn301_hubbub.o \ +		dcn301_optc.o  AMD_DAL_DCN301 = $(addprefix $(AMDDALPATH)/dc/dcn301/,$(DCN301)) diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_optc.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_optc.c new file mode 100644 index 000000000000..b3cfcb887905 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_optc.c @@ -0,0 +1,185 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "reg_helper.h" +#include "dcn301_optc.h" +#include "dc.h" +#include "dcn_calc_math.h" +#include "dc_dmub_srv.h" + +#include "dml/dcn30/dcn30_fpu.h" +#include "dc_trace.h" + +#define REG(reg)\ +	optc1->tg_regs->reg + +#define CTX \ +	optc1->base.ctx + +#undef FN +#define FN(reg_name, field_name) \ +	optc1->tg_shift->field_name, optc1->tg_mask->field_name + + +/** + * optc301_set_drr() - Program dynamic refresh rate registers m_OTGx_OTG_V_TOTAL_*. + * + * @optc: timing_generator instance. + * @params: parameters used for Dynamic Refresh Rate. + */ +void optc301_set_drr( +	struct timing_generator *optc, +	const struct drr_params *params) +{ +	struct optc *optc1 = DCN10TG_FROM_TG(optc); + +	if (params != NULL && +		params->vertical_total_max > 0 && +		params->vertical_total_min > 0) { + +		if (params->vertical_total_mid != 0) { + +			REG_SET(OTG_V_TOTAL_MID, 0, +				OTG_V_TOTAL_MID, params->vertical_total_mid - 1); + +			REG_UPDATE_2(OTG_V_TOTAL_CONTROL, +					OTG_VTOTAL_MID_REPLACING_MAX_EN, 1, +					OTG_VTOTAL_MID_FRAME_NUM, +					(uint8_t)params->vertical_total_mid_frame_num); + +		} + +		optc->funcs->set_vtotal_min_max(optc, params->vertical_total_min - 1, params->vertical_total_max - 1); + +		REG_UPDATE_5(OTG_V_TOTAL_CONTROL, +				OTG_V_TOTAL_MIN_SEL, 1, +				OTG_V_TOTAL_MAX_SEL, 1, +				OTG_FORCE_LOCK_ON_EVENT, 0, +				OTG_SET_V_TOTAL_MIN_MASK_EN, 0, +				OTG_SET_V_TOTAL_MIN_MASK, 0); +		// Setup manual flow control for EOF via TRIG_A +		optc->funcs->setup_manual_trigger(optc); + +	} else { +		REG_UPDATE_4(OTG_V_TOTAL_CONTROL, +				OTG_SET_V_TOTAL_MIN_MASK, 0, +				OTG_V_TOTAL_MIN_SEL, 0, +				OTG_V_TOTAL_MAX_SEL, 0, +				OTG_FORCE_LOCK_ON_EVENT, 0); + +		optc->funcs->set_vtotal_min_max(optc, 0, 0); +	} +} + + +void optc301_setup_manual_trigger(struct timing_generator *optc) +{ +	struct optc *optc1 = DCN10TG_FROM_TG(optc); + +	REG_SET_8(OTG_TRIGA_CNTL, 0, +			OTG_TRIGA_SOURCE_SELECT, 21, +			OTG_TRIGA_SOURCE_PIPE_SELECT, optc->inst, +			OTG_TRIGA_RISING_EDGE_DETECT_CNTL, 1, +			OTG_TRIGA_FALLING_EDGE_DETECT_CNTL, 0, +			OTG_TRIGA_POLARITY_SELECT, 0, +			OTG_TRIGA_FREQUENCY_SELECT, 0, +			OTG_TRIGA_DELAY, 0, +			OTG_TRIGA_CLEAR, 1); +} + +static struct timing_generator_funcs dcn30_tg_funcs = { +		.validate_timing = optc1_validate_timing, +		.program_timing = optc1_program_timing, +		.setup_vertical_interrupt0 = optc1_setup_vertical_interrupt0, +		.setup_vertical_interrupt1 = optc1_setup_vertical_interrupt1, +		.setup_vertical_interrupt2 = optc1_setup_vertical_interrupt2, +		.program_global_sync = optc1_program_global_sync, +		.enable_crtc = optc2_enable_crtc, +		.disable_crtc = optc1_disable_crtc, +		/* used by enable_timing_synchronization. Not need for FPGA */ +		.is_counter_moving = optc1_is_counter_moving, +		.get_position = optc1_get_position, +		.get_frame_count = optc1_get_vblank_counter, +		.get_scanoutpos = optc1_get_crtc_scanoutpos, +		.get_otg_active_size = optc1_get_otg_active_size, +		.set_early_control = optc1_set_early_control, +		/* used by enable_timing_synchronization. Not need for FPGA */ +		.wait_for_state = optc1_wait_for_state, +		.set_blank_color = optc3_program_blank_color, +		.did_triggered_reset_occur = optc1_did_triggered_reset_occur, +		.triplebuffer_lock = optc3_triplebuffer_lock, +		.triplebuffer_unlock = optc2_triplebuffer_unlock, +		.enable_reset_trigger = optc1_enable_reset_trigger, +		.enable_crtc_reset = optc1_enable_crtc_reset, +		.disable_reset_trigger = optc1_disable_reset_trigger, +		.lock = optc3_lock, +		.unlock = optc1_unlock, +		.lock_doublebuffer_enable = optc3_lock_doublebuffer_enable, +		.lock_doublebuffer_disable = optc3_lock_doublebuffer_disable, +		.enable_optc_clock = optc1_enable_optc_clock, +		.set_drr = optc301_set_drr, +		.get_last_used_drr_vtotal = optc2_get_last_used_drr_vtotal, +		.set_vtotal_min_max = optc3_set_vtotal_min_max, +		.set_static_screen_control = optc1_set_static_screen_control, +		.program_stereo = optc1_program_stereo, +		.is_stereo_left_eye = optc1_is_stereo_left_eye, +		.tg_init = optc3_tg_init, +		.is_tg_enabled = optc1_is_tg_enabled, +		.is_optc_underflow_occurred = optc1_is_optc_underflow_occurred, +		.clear_optc_underflow = optc1_clear_optc_underflow, +		.setup_global_swap_lock = NULL, +		.get_crc = optc1_get_crc, +		.configure_crc = optc2_configure_crc, +		.set_dsc_config = optc3_set_dsc_config, +		.get_dsc_status = optc2_get_dsc_status, +		.set_dwb_source = NULL, +		.set_odm_bypass = optc3_set_odm_bypass, +		.set_odm_combine = optc3_set_odm_combine, +		.get_optc_source = optc2_get_optc_source, +		.set_out_mux = optc3_set_out_mux, +		.set_drr_trigger_window = optc3_set_drr_trigger_window, +		.set_vtotal_change_limit = optc3_set_vtotal_change_limit, +		.set_gsl = optc2_set_gsl, +		.set_gsl_source_select = optc2_set_gsl_source_select, +		.set_vtg_params = optc1_set_vtg_params, +		.program_manual_trigger = optc2_program_manual_trigger, +		.setup_manual_trigger = optc301_setup_manual_trigger, +		.get_hw_timing = optc1_get_hw_timing, +		.wait_drr_doublebuffer_pending_clear = optc3_wait_drr_doublebuffer_pending_clear, +}; + +void dcn301_timing_generator_init(struct optc *optc1) +{ +	optc1->base.funcs = &dcn30_tg_funcs; + +	optc1->max_h_total = optc1->tg_mask->OTG_H_TOTAL + 1; +	optc1->max_v_total = optc1->tg_mask->OTG_V_TOTAL + 1; + +	optc1->min_h_blank = 32; +	optc1->min_v_blank = 3; +	optc1->min_v_blank_interlace = 5; +	optc1->min_h_sync_width = 4; +	optc1->min_v_sync_width = 1; +} diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_optc.h b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_optc.h new file mode 100644 index 000000000000..b49585682a15 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_optc.h @@ -0,0 +1,36 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DC_OPTC_DCN301_H__ +#define __DC_OPTC_DCN301_H__ + +#include "dcn20/dcn20_optc.h" +#include "dcn30/dcn30_optc.h" + +void dcn301_timing_generator_init(struct optc *optc1); +void optc301_setup_manual_trigger(struct timing_generator *optc); +void optc301_set_drr(struct timing_generator *optc, const struct drr_params *params); + +#endif /* __DC_OPTC_DCN301_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c index 3485fbb1093e..1bee9a4636e6 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c @@ -42,7 +42,7 @@  #include "dcn30/dcn30_hubp.h"  #include "irq/dcn30/irq_service_dcn30.h"  #include "dcn30/dcn30_dpp.h" -#include "dcn30/dcn30_optc.h" +#include "dcn301/dcn301_optc.h"  #include "dcn20/dcn20_hwseq.h"  #include "dcn30/dcn30_hwseq.h"  #include "dce110/dce110_hw_sequencer.h" @@ -855,7 +855,7 @@ static struct timing_generator *dcn301_timing_generator_create(  	tgn10->tg_shift = &optc_shift;  	tgn10->tg_mask = &optc_mask; -	dcn30_timing_generator_init(tgn10); +	dcn301_timing_generator_init(tgn10);  	return &tgn10->base;  } diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c index 45956ef6f3f9..131b8b82afc0 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c @@ -65,7 +65,7 @@ static const struct dc_debug_options debug_defaults_drv = {  		.timing_trace = false,  		.clock_trace = true,  		.disable_pplib_clock_request = true, -		.pipe_split_policy = MPC_SPLIT_DYNAMIC, +		.pipe_split_policy = MPC_SPLIT_AVOID,  		.force_single_disp_pipe_split = false,  		.disable_dcc = DCC_ENABLE,  		.vsr_support = true, diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c index 65c1d754e2d6..01cc679ae418 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c @@ -84,7 +84,8 @@ static enum phyd32clk_clock_source get_phy_mux_symclk(  		struct dcn_dccg *dccg_dcn,  		enum phyd32clk_clock_source src)  { -	if (dccg_dcn->base.ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0) { +	if (dccg_dcn->base.ctx->asic_id.chip_family == FAMILY_YELLOW_CARP && +			dccg_dcn->base.ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0) {  		if (src == PHYD32CLKC)  			src = PHYD32CLKF;  		if (src == PHYD32CLKD) diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c index 11e28e056cf7..61ceff6bc0b1 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c @@ -49,7 +49,10 @@ static void dccg32_trigger_dio_fifo_resync(  	uint32_t dispclk_rdivider_value = 0;  	REG_GET(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_RDIVIDER, &dispclk_rdivider_value); -	REG_UPDATE(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_WDIVIDER, dispclk_rdivider_value); + +	/* Not valid for the WDIVIDER to be set to 0 */ +	if (dispclk_rdivider_value != 0) +		REG_UPDATE(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_WDIVIDER, dispclk_rdivider_value);  }  static void dccg32_get_pixel_rate_div( diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c index d9e049e7ff0a..ed8ddb75b333 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c @@ -295,7 +295,11 @@ int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *c  		pipe = &res_ctx->pipe_ctx[i];  		timing = &pipe->stream->timing; -		pipes[pipe_cnt].pipe.dest.vtotal = pipe->stream->adjust.v_total_min; +		if (pipe->stream->adjust.v_total_min != 0) +			pipes[pipe_cnt].pipe.dest.vtotal = pipe->stream->adjust.v_total_min; +		else +			pipes[pipe_cnt].pipe.dest.vtotal = timing->v_total; +  		pipes[pipe_cnt].pipe.dest.vblank_nom = timing->v_total - pipes[pipe_cnt].pipe.dest.vactive;  		pipes[pipe_cnt].pipe.dest.vblank_nom = min(pipes[pipe_cnt].pipe.dest.vblank_nom, dcn3_14_ip.VBlankNomDefaultUS);  		pipes[pipe_cnt].pipe.dest.vblank_nom = max(pipes[pipe_cnt].pipe.dest.vblank_nom, timing->v_sync_width); diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c index 6841a4bce186..1cb402264497 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c @@ -1798,17 +1798,6 @@ static int smu7_disable_dpm_tasks(struct pp_hwmgr *hwmgr)  	return result;  } -static bool intel_core_rkl_chk(void) -{ -#if IS_ENABLED(CONFIG_X86_64) -	struct cpuinfo_x86 *c = &cpu_data(0); - -	return (c->x86 == 6 && c->x86_model == INTEL_FAM6_ROCKETLAKE); -#else -	return false; -#endif -} -  static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr)  {  	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); @@ -1835,7 +1824,8 @@ static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr)  	data->mclk_dpm_key_disabled = hwmgr->feature_mask & PP_MCLK_DPM_MASK ? false : true;  	data->sclk_dpm_key_disabled = hwmgr->feature_mask & PP_SCLK_DPM_MASK ? false : true;  	data->pcie_dpm_key_disabled = -		intel_core_rkl_chk() || !(hwmgr->feature_mask & PP_PCIE_DPM_MASK); +		!amdgpu_device_pcie_dynamic_switching_supported() || +		!(hwmgr->feature_mask & PP_PCIE_DPM_MASK);  	/* need to set voltage control types before EVV patching */  	data->voltage_control = SMU7_VOLTAGE_CONTROL_NONE;  	data->vddci_control = SMU7_VOLTAGE_CONTROL_NONE; diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h index 6a0ac0bbaace..355c156d871a 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h @@ -295,5 +295,9 @@ int smu_v13_0_get_pptable_from_firmware(struct smu_context *smu,  					uint32_t *size,  					uint32_t pptable_id); +int smu_v13_0_update_pcie_parameters(struct smu_context *smu, +				     uint32_t pcie_gen_cap, +				     uint32_t pcie_width_cap); +  #endif  #endif diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c index 9cd005131f56..3bb18396d2f9 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c @@ -2113,7 +2113,6 @@ static int arcturus_i2c_xfer(struct i2c_adapter *i2c_adap,  	}  	mutex_lock(&adev->pm.mutex);  	r = smu_cmn_update_table(smu, SMU_TABLE_I2C_COMMANDS, 0, req, true); -	mutex_unlock(&adev->pm.mutex);  	if (r)  		goto fail; @@ -2130,6 +2129,7 @@ static int arcturus_i2c_xfer(struct i2c_adapter *i2c_adap,  	}  	r = num_msgs;  fail: +	mutex_unlock(&adev->pm.mutex);  	kfree(req);  	return r;  } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c index c94d825a871b..95f6d821bacb 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c @@ -3021,7 +3021,6 @@ static int navi10_i2c_xfer(struct i2c_adapter *i2c_adap,  	}  	mutex_lock(&adev->pm.mutex);  	r = smu_cmn_update_table(smu, SMU_TABLE_I2C_COMMANDS, 0, req, true); -	mutex_unlock(&adev->pm.mutex);  	if (r)  		goto fail; @@ -3038,6 +3037,7 @@ static int navi10_i2c_xfer(struct i2c_adapter *i2c_adap,  	}  	r = num_msgs;  fail: +	mutex_unlock(&adev->pm.mutex);  	kfree(req);  	return r;  } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c index f7ed3e655e39..0cda3b276f61 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c @@ -1927,12 +1927,16 @@ static int sienna_cichlid_read_sensor(struct smu_context *smu,  		*size = 4;  		break;  	case AMDGPU_PP_SENSOR_GFX_MCLK: -		ret = sienna_cichlid_get_current_clk_freq_by_table(smu, SMU_UCLK, (uint32_t *)data); +		ret = sienna_cichlid_get_smu_metrics_data(smu, +							  METRICS_CURR_UCLK, +							  (uint32_t *)data);  		*(uint32_t *)data *= 100;  		*size = 4;  		break;  	case AMDGPU_PP_SENSOR_GFX_SCLK: -		ret = sienna_cichlid_get_current_clk_freq_by_table(smu, SMU_GFXCLK, (uint32_t *)data); +		ret = sienna_cichlid_get_smu_metrics_data(smu, +							  METRICS_AVERAGE_GFXCLK, +							  (uint32_t *)data);  		*(uint32_t *)data *= 100;  		*size = 4;  		break; @@ -2077,89 +2081,36 @@ static int sienna_cichlid_display_disable_memory_clock_switch(struct smu_context  	return ret;  } -static void sienna_cichlid_get_override_pcie_settings(struct smu_context *smu, -						      uint32_t *gen_speed_override, -						      uint32_t *lane_width_override) -{ -	struct amdgpu_device *adev = smu->adev; - -	*gen_speed_override = 0xff; -	*lane_width_override = 0xff; - -	switch (adev->pdev->device) { -	case 0x73A0: -	case 0x73A1: -	case 0x73A2: -	case 0x73A3: -	case 0x73AB: -	case 0x73AE: -		/* Bit 7:0: PCIE lane width, 1 to 7 corresponds is x1 to x32 */ -		*lane_width_override = 6; -		break; -	case 0x73E0: -	case 0x73E1: -	case 0x73E3: -		*lane_width_override = 4; -		break; -	case 0x7420: -	case 0x7421: -	case 0x7422: -	case 0x7423: -	case 0x7424: -		*lane_width_override = 3; -		break; -	default: -		break; -	} -} - -#define MAX(a, b)	((a) > (b) ? (a) : (b)) -  static int sienna_cichlid_update_pcie_parameters(struct smu_context *smu,  					 uint32_t pcie_gen_cap,  					 uint32_t pcie_width_cap)  {  	struct smu_11_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;  	struct smu_11_0_pcie_table *pcie_table = &dpm_context->dpm_tables.pcie_table; -	uint32_t gen_speed_override, lane_width_override; -	uint8_t *table_member1, *table_member2; -	uint32_t min_gen_speed, max_gen_speed; -	uint32_t min_lane_width, max_lane_width; -	uint32_t smu_pcie_arg; +	u32 smu_pcie_arg;  	int ret, i; -	GET_PPTABLE_MEMBER(PcieGenSpeed, &table_member1); -	GET_PPTABLE_MEMBER(PcieLaneCount, &table_member2); +	/* PCIE gen speed and lane width override */ +	if (!amdgpu_device_pcie_dynamic_switching_supported()) { +		if (pcie_table->pcie_gen[NUM_LINK_LEVELS - 1] < pcie_gen_cap) +			pcie_gen_cap = pcie_table->pcie_gen[NUM_LINK_LEVELS - 1]; -	sienna_cichlid_get_override_pcie_settings(smu, -						  &gen_speed_override, -						  &lane_width_override); +		if (pcie_table->pcie_lane[NUM_LINK_LEVELS - 1] < pcie_width_cap) +			pcie_width_cap = pcie_table->pcie_lane[NUM_LINK_LEVELS - 1]; -	/* PCIE gen speed override */ -	if (gen_speed_override != 0xff) { -		min_gen_speed = MIN(pcie_gen_cap, gen_speed_override); -		max_gen_speed = MIN(pcie_gen_cap, gen_speed_override); -	} else { -		min_gen_speed = MAX(0, table_member1[0]); -		max_gen_speed = MIN(pcie_gen_cap, table_member1[1]); -		min_gen_speed = min_gen_speed > max_gen_speed ? -				max_gen_speed : min_gen_speed; -	} -	pcie_table->pcie_gen[0] = min_gen_speed; -	pcie_table->pcie_gen[1] = max_gen_speed; - -	/* PCIE lane width override */ -	if (lane_width_override != 0xff) { -		min_lane_width = MIN(pcie_width_cap, lane_width_override); -		max_lane_width = MIN(pcie_width_cap, lane_width_override); +		/* Force all levels to use the same settings */ +		for (i = 0; i < NUM_LINK_LEVELS; i++) { +			pcie_table->pcie_gen[i] = pcie_gen_cap; +			pcie_table->pcie_lane[i] = pcie_width_cap; +		}  	} else { -		min_lane_width = MAX(1, table_member2[0]); -		max_lane_width = MIN(pcie_width_cap, table_member2[1]); -		min_lane_width = min_lane_width > max_lane_width ? -				 max_lane_width : min_lane_width; +		for (i = 0; i < NUM_LINK_LEVELS; i++) { +			if (pcie_table->pcie_gen[i] > pcie_gen_cap) +				pcie_table->pcie_gen[i] = pcie_gen_cap; +			if (pcie_table->pcie_lane[i] > pcie_width_cap) +				pcie_table->pcie_lane[i] = pcie_width_cap; +		}  	} -	pcie_table->pcie_lane[0] = min_lane_width; -	pcie_table->pcie_lane[1] = max_lane_width;  	for (i = 0; i < NUM_LINK_LEVELS; i++) {  		smu_pcie_arg = (i << 16 | @@ -3842,7 +3793,6 @@ static int sienna_cichlid_i2c_xfer(struct i2c_adapter *i2c_adap,  	}  	mutex_lock(&adev->pm.mutex);  	r = smu_cmn_update_table(smu, SMU_TABLE_I2C_COMMANDS, 0, req, true); -	mutex_unlock(&adev->pm.mutex);  	if (r)  		goto fail; @@ -3859,6 +3809,7 @@ static int sienna_cichlid_i2c_xfer(struct i2c_adapter *i2c_adap,  	}  	r = num_msgs;  fail: +	mutex_unlock(&adev->pm.mutex);  	kfree(req);  	return r;  } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c index e80f122d8aec..ce50ef46e73f 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c @@ -1525,7 +1525,6 @@ static int aldebaran_i2c_xfer(struct i2c_adapter *i2c_adap,  	}  	mutex_lock(&adev->pm.mutex);  	r = smu_cmn_update_table(smu, SMU_TABLE_I2C_COMMANDS, 0, req, true); -	mutex_unlock(&adev->pm.mutex);  	if (r)  		goto fail; @@ -1542,6 +1541,7 @@ static int aldebaran_i2c_xfer(struct i2c_adapter *i2c_adap,  	}  	r = num_msgs;  fail: +	mutex_unlock(&adev->pm.mutex);  	kfree(req);  	return r;  } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c index 3856da6c3f3d..9b62b45ebb7f 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c @@ -2424,3 +2424,51 @@ int smu_v13_0_mode1_reset(struct smu_context *smu)  	return ret;  } + +int smu_v13_0_update_pcie_parameters(struct smu_context *smu, +				     uint32_t pcie_gen_cap, +				     uint32_t pcie_width_cap) +{ +	struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context; +	struct smu_13_0_pcie_table *pcie_table = +				&dpm_context->dpm_tables.pcie_table; +	int num_of_levels = pcie_table->num_of_link_levels; +	uint32_t smu_pcie_arg; +	int ret, i; + +	if (!amdgpu_device_pcie_dynamic_switching_supported()) { +		if (pcie_table->pcie_gen[num_of_levels - 1] < pcie_gen_cap) +			pcie_gen_cap = pcie_table->pcie_gen[num_of_levels - 1]; + +		if (pcie_table->pcie_lane[num_of_levels - 1] < pcie_width_cap) +			pcie_width_cap = pcie_table->pcie_lane[num_of_levels - 1]; + +		/* Force all levels to use the same settings */ +		for (i = 0; i < num_of_levels; i++) { +			pcie_table->pcie_gen[i] = pcie_gen_cap; +			pcie_table->pcie_lane[i] = pcie_width_cap; +		} +	} else { +		for (i = 0; i < num_of_levels; i++) { +			if (pcie_table->pcie_gen[i] > pcie_gen_cap) +				pcie_table->pcie_gen[i] = pcie_gen_cap; +			if (pcie_table->pcie_lane[i] > pcie_width_cap) +				pcie_table->pcie_lane[i] = pcie_width_cap; +		} +	} + +	for (i = 0; i < num_of_levels; i++) { +		smu_pcie_arg = i << 16; +		smu_pcie_arg |= pcie_table->pcie_gen[i] << 8; +		smu_pcie_arg |= pcie_table->pcie_lane[i]; + +		ret = smu_cmn_send_smc_msg_with_param(smu, +						      SMU_MSG_OverridePcieParameters, +						      smu_pcie_arg, +						      NULL); +		if (ret) +			return ret; +	} + +	return 0; +} diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c index 124287cbbff8..3d188616ba24 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c @@ -1645,37 +1645,6 @@ static int smu_v13_0_0_force_clk_levels(struct smu_context *smu,  	return ret;  } -static int smu_v13_0_0_update_pcie_parameters(struct smu_context *smu, -					      uint32_t pcie_gen_cap, -					      uint32_t pcie_width_cap) -{ -	struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context; -	struct smu_13_0_pcie_table *pcie_table = -				&dpm_context->dpm_tables.pcie_table; -	uint32_t smu_pcie_arg; -	int ret, i; - -	for (i = 0; i < pcie_table->num_of_link_levels; i++) { -		if (pcie_table->pcie_gen[i] > pcie_gen_cap) -			pcie_table->pcie_gen[i] = pcie_gen_cap; -		if (pcie_table->pcie_lane[i] > pcie_width_cap) -			pcie_table->pcie_lane[i] = pcie_width_cap; - -		smu_pcie_arg = i << 16; -		smu_pcie_arg |= pcie_table->pcie_gen[i] << 8; -		smu_pcie_arg |= pcie_table->pcie_lane[i]; - -		ret = smu_cmn_send_smc_msg_with_param(smu, -						      SMU_MSG_OverridePcieParameters, -						      smu_pcie_arg, -						      NULL); -		if (ret) -			return ret; -	} - -	return 0; -} -  static const struct smu_temperature_range smu13_thermal_policy[] = {  	{-273150,  99000, 99000, -273150, 99000, 99000, -273150, 99000, 99000},  	{ 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000}, @@ -1765,7 +1734,7 @@ static ssize_t smu_v13_0_0_get_gpu_metrics(struct smu_context *smu,  	gpu_metrics->average_vclk1_frequency = metrics->AverageVclk1Frequency;  	gpu_metrics->average_dclk1_frequency = metrics->AverageDclk1Frequency; -	gpu_metrics->current_gfxclk = metrics->CurrClock[PPCLK_GFXCLK]; +	gpu_metrics->current_gfxclk = gpu_metrics->average_gfxclk_frequency;  	gpu_metrics->current_socclk = metrics->CurrClock[PPCLK_SOCCLK];  	gpu_metrics->current_uclk = metrics->CurrClock[PPCLK_UCLK];  	gpu_metrics->current_vclk0 = metrics->CurrClock[PPCLK_VCLK_0]; @@ -2320,7 +2289,6 @@ static int smu_v13_0_0_i2c_xfer(struct i2c_adapter *i2c_adap,  	}  	mutex_lock(&adev->pm.mutex);  	r = smu_cmn_update_table(smu, SMU_TABLE_I2C_COMMANDS, 0, req, true); -	mutex_unlock(&adev->pm.mutex);  	if (r)  		goto fail; @@ -2337,6 +2305,7 @@ static int smu_v13_0_0_i2c_xfer(struct i2c_adapter *i2c_adap,  	}  	r = num_msgs;  fail: +	mutex_unlock(&adev->pm.mutex);  	kfree(req);  	return r;  } @@ -2654,7 +2623,7 @@ static const struct pptable_funcs smu_v13_0_0_ppt_funcs = {  	.feature_is_enabled = smu_cmn_feature_is_enabled,  	.print_clk_levels = smu_v13_0_0_print_clk_levels,  	.force_clk_levels = smu_v13_0_0_force_clk_levels, -	.update_pcie_parameters = smu_v13_0_0_update_pcie_parameters, +	.update_pcie_parameters = smu_v13_0_update_pcie_parameters,  	.get_thermal_temperature_range = smu_v13_0_0_get_thermal_temperature_range,  	.register_irq_handler = smu_v13_0_register_irq_handler,  	.enable_thermal_alert = smu_v13_0_enable_thermal_alert, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c index 6ef12252beb5..1ac552142763 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c @@ -1763,7 +1763,6 @@ static int smu_v13_0_6_i2c_xfer(struct i2c_adapter *i2c_adap,  	}  	mutex_lock(&adev->pm.mutex);  	r = smu_v13_0_6_request_i2c_xfer(smu, req); -	mutex_unlock(&adev->pm.mutex);  	if (r)  		goto fail; @@ -1780,6 +1779,7 @@ static int smu_v13_0_6_i2c_xfer(struct i2c_adapter *i2c_adap,  	}  	r = num_msgs;  fail: +	mutex_unlock(&adev->pm.mutex);  	kfree(req);  	return r;  } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c index cda4e818aab7..b1f0937ccade 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c @@ -949,7 +949,7 @@ static int smu_v13_0_7_read_sensor(struct smu_context *smu,  		break;  	case AMDGPU_PP_SENSOR_GFX_MCLK:  		ret = smu_v13_0_7_get_smu_metrics_data(smu, -						       METRICS_AVERAGE_UCLK, +						       METRICS_CURR_UCLK,  						       (uint32_t *)data);  		*(uint32_t *)data *= 100;  		*size = 4; @@ -1635,37 +1635,6 @@ static int smu_v13_0_7_force_clk_levels(struct smu_context *smu,  	return ret;  } -static int smu_v13_0_7_update_pcie_parameters(struct smu_context *smu, -					      uint32_t pcie_gen_cap, -					      uint32_t pcie_width_cap) -{ -	struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context; -	struct smu_13_0_pcie_table *pcie_table = -				&dpm_context->dpm_tables.pcie_table; -	uint32_t smu_pcie_arg; -	int ret, i; - -	for (i = 0; i < pcie_table->num_of_link_levels; i++) { -		if (pcie_table->pcie_gen[i] > pcie_gen_cap) -			pcie_table->pcie_gen[i] = pcie_gen_cap; -		if (pcie_table->pcie_lane[i] > pcie_width_cap) -			pcie_table->pcie_lane[i] = pcie_width_cap; - -		smu_pcie_arg = i << 16; -		smu_pcie_arg |= pcie_table->pcie_gen[i] << 8; -		smu_pcie_arg |= pcie_table->pcie_lane[i]; - -		ret = smu_cmn_send_smc_msg_with_param(smu, -						      SMU_MSG_OverridePcieParameters, -						      smu_pcie_arg, -						      NULL); -		if (ret) -			return ret; -	} - -	return 0; -} -  static const struct smu_temperature_range smu13_thermal_policy[] =  {  	{-273150,  99000, 99000, -273150, 99000, 99000, -273150, 99000, 99000}, @@ -2234,7 +2203,7 @@ static const struct pptable_funcs smu_v13_0_7_ppt_funcs = {  	.feature_is_enabled = smu_cmn_feature_is_enabled,  	.print_clk_levels = smu_v13_0_7_print_clk_levels,  	.force_clk_levels = smu_v13_0_7_force_clk_levels, -	.update_pcie_parameters = smu_v13_0_7_update_pcie_parameters, +	.update_pcie_parameters = smu_v13_0_update_pcie_parameters,  	.get_thermal_temperature_range = smu_v13_0_7_get_thermal_temperature_range,  	.register_irq_handler = smu_v13_0_register_irq_handler,  	.enable_thermal_alert = smu_v13_0_enable_thermal_alert, | 
