diff options
| author | Dmitry Torokhov <dmitry.torokhov@gmail.com> | 2021-07-04 23:05:31 -0700 | 
|---|---|---|
| committer | Dmitry Torokhov <dmitry.torokhov@gmail.com> | 2021-07-04 23:05:31 -0700 | 
| commit | 818b26588994d9d95743fca0a427f08ec6c1c41d (patch) | |
| tree | 870d9abed0e43b82257350a93e6517816815cd6c /drivers/gpu/drm/amd/pm | |
| parent | 45a4b68354ffccbc9ca71027bd34754ca24f5183 (diff) | |
| parent | f8f84af5da9ee04ef1d271528656dac42a090d00 (diff) | |
Merge branch 'next' into for-linus
Prepare input updates for 5.14 merge window.
Diffstat (limited to 'drivers/gpu/drm/amd/pm')
| -rw-r--r-- | drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c | 60 | ||||
| -rw-r--r-- | drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c | 74 | ||||
| -rw-r--r-- | drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c | 24 | ||||
| -rw-r--r-- | drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c | 25 | ||||
| -rw-r--r-- | drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c | 5 | ||||
| -rw-r--r-- | drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c | 5 | 
6 files changed, 178 insertions, 15 deletions
| diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c index a2681fe875ed..c0565a932a12 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c @@ -587,6 +587,48 @@ static int smu7_force_switch_to_arbf0(struct pp_hwmgr *hwmgr)  			tmp, MC_CG_ARB_FREQ_F0);  } +static uint16_t smu7_override_pcie_speed(struct pp_hwmgr *hwmgr) +{ +	struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev); +	uint16_t pcie_gen = 0; + +	if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4 && +	    adev->pm.pcie_gen_mask & CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4) +		pcie_gen = 3; +	else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 && +		adev->pm.pcie_gen_mask & CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3) +		pcie_gen = 2; +	else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 && +		adev->pm.pcie_gen_mask & CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2) +		pcie_gen = 1; +	else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 && +		adev->pm.pcie_gen_mask & CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1) +		pcie_gen = 0; + +	return pcie_gen; +} + +static uint16_t smu7_override_pcie_width(struct pp_hwmgr *hwmgr) +{ +	struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev); +	uint16_t pcie_width = 0; + +	if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16) +		pcie_width = 16; +	else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12) +		pcie_width = 12; +	else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8) +		pcie_width = 8; +	else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4) +		pcie_width = 4; +	else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2) +		pcie_width = 2; +	else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1) +		pcie_width = 1; + +	return pcie_width; +} +  static int smu7_setup_default_pcie_table(struct pp_hwmgr *hwmgr)  {  	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); @@ -683,6 +725,11 @@ static int smu7_setup_default_pcie_table(struct pp_hwmgr *hwmgr)  					PP_Min_PCIEGen),  			get_pcie_lane_support(data->pcie_lane_cap,  					PP_Max_PCIELane)); + +		if (data->pcie_dpm_key_disabled) +			phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, +				data->dpm_table.pcie_speed_table.count, +				smu7_override_pcie_speed(hwmgr), smu7_override_pcie_width(hwmgr));  	}  	return 0;  } @@ -1177,7 +1224,8 @@ static int smu7_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)  		    (hwmgr->chip_id == CHIP_POLARIS10) ||  		    (hwmgr->chip_id == CHIP_POLARIS11) ||  		    (hwmgr->chip_id == CHIP_POLARIS12) || -		    (hwmgr->chip_id == CHIP_TONGA)) +		    (hwmgr->chip_id == CHIP_TONGA) || +		    (hwmgr->chip_id == CHIP_TOPAZ))  			PHM_WRITE_FIELD(hwmgr->device, MC_SEQ_CNTL_3, CAC_EN, 0x1); @@ -1248,6 +1296,13 @@ static int smu7_start_dpm(struct pp_hwmgr *hwmgr)  						NULL)),  				"Failed to enable pcie DPM during DPM Start Function!",  				return -EINVAL); +	} else { +		PP_ASSERT_WITH_CODE( +				(0 == smum_send_msg_to_smc(hwmgr, +						PPSMC_MSG_PCIeDPM_Disable, +						NULL)), +				"Failed to disble pcie DPM during DPM Start Function!", +				return -EINVAL);  	}  	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, @@ -3276,7 +3331,8 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,  	disable_mclk_switching_for_display = ((1 < hwmgr->display_config->num_display) &&  						!hwmgr->display_config->multi_monitor_in_sync) || -						smu7_vblank_too_short(hwmgr, hwmgr->display_config->min_vblank_time); +						(hwmgr->display_config->num_display && +						smu7_vblank_too_short(hwmgr, hwmgr->display_config->min_vblank_time));  	disable_mclk_switching = disable_mclk_switching_for_frame_lock ||  					 disable_mclk_switching_for_display; diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c index 22b636e2b89b..599ec9726601 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c @@ -54,6 +54,9 @@  #include "smuio/smuio_9_0_offset.h"  #include "smuio/smuio_9_0_sh_mask.h" +#define smnPCIE_LC_SPEED_CNTL			0x11140290 +#define smnPCIE_LC_LINK_WIDTH_CNTL		0x11140288 +  #define HBM_MEMORY_CHANNEL_WIDTH    128  static const uint32_t channel_number[] = {1, 2, 0, 4, 0, 8, 0, 16, 2}; @@ -443,8 +446,7 @@ static void vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr)  	if (PP_CAP(PHM_PlatformCaps_VCEDPM))  		data->smu_features[GNLD_DPM_VCE].supported = true; -	if (!data->registry_data.pcie_dpm_key_disabled) -		data->smu_features[GNLD_DPM_LINK].supported = true; +	data->smu_features[GNLD_DPM_LINK].supported = true;  	if (!data->registry_data.dcefclk_dpm_key_disabled)  		data->smu_features[GNLD_DPM_DCEFCLK].supported = true; @@ -1544,6 +1546,13 @@ static int vega10_override_pcie_parameters(struct pp_hwmgr *hwmgr)  			pp_table->PcieLaneCount[i] = pcie_width;  	} +	if (data->registry_data.pcie_dpm_key_disabled) { +		for (i = 0; i < NUM_LINK_LEVELS; i++) { +			pp_table->PcieGenSpeed[i] = pcie_gen; +			pp_table->PcieLaneCount[i] = pcie_width; +		} +	} +  	return 0;  } @@ -2966,6 +2975,14 @@ static int vega10_start_dpm(struct pp_hwmgr *hwmgr, uint32_t bitmap)  		}  	} +	if (data->registry_data.pcie_dpm_key_disabled) { +		PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr, +				false, data->smu_features[GNLD_DPM_LINK].smu_feature_bitmap), +		"Attempt to Disable Link DPM feature Failed!", return -EINVAL); +		data->smu_features[GNLD_DPM_LINK].enabled = false; +		data->smu_features[GNLD_DPM_LINK].supported = false; +	} +  	return 0;  } @@ -4584,6 +4601,24 @@ static int vega10_set_ppfeature_status(struct pp_hwmgr *hwmgr, uint64_t new_ppfe  	return 0;  } +static int vega10_get_current_pcie_link_width_level(struct pp_hwmgr *hwmgr) +{ +	struct amdgpu_device *adev = hwmgr->adev; + +	return (RREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL) & +		PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK) +		>> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT; +} + +static int vega10_get_current_pcie_link_speed_level(struct pp_hwmgr *hwmgr) +{ +	struct amdgpu_device *adev = hwmgr->adev; + +	return (RREG32_PCIE(smnPCIE_LC_SPEED_CNTL) & +		PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK) +		>> PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT; +} +  static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,  		enum pp_clock_type type, char *buf)  { @@ -4592,8 +4627,9 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,  	struct vega10_single_dpm_table *mclk_table = &(data->dpm_table.mem_table);  	struct vega10_single_dpm_table *soc_table = &(data->dpm_table.soc_table);  	struct vega10_single_dpm_table *dcef_table = &(data->dpm_table.dcef_table); -	struct vega10_pcie_table *pcie_table = &(data->dpm_table.pcie_table);  	struct vega10_odn_clock_voltage_dependency_table *podn_vdd_dep = NULL; +	uint32_t gen_speed, lane_width, current_gen_speed, current_lane_width; +	PPTable_t *pptable = &(data->smc_state_table.pp_table);  	int i, now, size = 0, count = 0; @@ -4650,15 +4686,31 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,  					"*" : "");  		break;  	case PP_PCIE: -		smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentLinkIndex, &now); - -		for (i = 0; i < pcie_table->count; i++) -			size += sprintf(buf + size, "%d: %s %s\n", i, -					(pcie_table->pcie_gen[i] == 0) ? "2.5GT/s, x1" : -					(pcie_table->pcie_gen[i] == 1) ? "5.0GT/s, x16" : -					(pcie_table->pcie_gen[i] == 2) ? "8.0GT/s, x16" : "", -					(i == now) ? "*" : ""); +		current_gen_speed = +			vega10_get_current_pcie_link_speed_level(hwmgr); +		current_lane_width = +			vega10_get_current_pcie_link_width_level(hwmgr); +		for (i = 0; i < NUM_LINK_LEVELS; i++) { +			gen_speed = pptable->PcieGenSpeed[i]; +			lane_width = pptable->PcieLaneCount[i]; + +			size += sprintf(buf + size, "%d: %s %s %s\n", i, +					(gen_speed == 0) ? "2.5GT/s," : +					(gen_speed == 1) ? "5.0GT/s," : +					(gen_speed == 2) ? "8.0GT/s," : +					(gen_speed == 3) ? "16.0GT/s," : "", +					(lane_width == 1) ? "x1" : +					(lane_width == 2) ? "x2" : +					(lane_width == 3) ? "x4" : +					(lane_width == 4) ? "x8" : +					(lane_width == 5) ? "x12" : +					(lane_width == 6) ? "x16" : "", +					(current_gen_speed == gen_speed) && +					(current_lane_width == lane_width) ? +					"*" : ""); +		}  		break; +  	case OD_SCLK:  		if (hwmgr->od_enabled) {  			size = sprintf(buf, "%s:\n", "OD_SCLK"); diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c index 43e01d880f7c..4f6da11e8f10 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c @@ -133,6 +133,7 @@ static void vega12_set_default_registry_data(struct pp_hwmgr *hwmgr)  	data->registry_data.auto_wattman_debug = 0;  	data->registry_data.auto_wattman_sample_period = 100;  	data->registry_data.auto_wattman_threshold = 50; +	data->registry_data.pcie_dpm_key_disabled = !(hwmgr->feature_mask & PP_PCIE_DPM_MASK);  }  static int vega12_set_features_platform_caps(struct pp_hwmgr *hwmgr) @@ -539,6 +540,29 @@ static int vega12_override_pcie_parameters(struct pp_hwmgr *hwmgr)  		pp_table->PcieLaneCount[i] = pcie_width_arg;  	} +	/* override to the highest if it's disabled from ppfeaturmask */ +	if (data->registry_data.pcie_dpm_key_disabled) { +		for (i = 0; i < NUM_LINK_LEVELS; i++) { +			smu_pcie_arg = (i << 16) | (pcie_gen << 8) | pcie_width; +			ret = smum_send_msg_to_smc_with_parameter(hwmgr, +				PPSMC_MSG_OverridePcieParameters, smu_pcie_arg, +				NULL); +			PP_ASSERT_WITH_CODE(!ret, +				"[OverridePcieParameters] Attempt to override pcie params failed!", +				return ret); + +			pp_table->PcieGenSpeed[i] = pcie_gen; +			pp_table->PcieLaneCount[i] = pcie_width; +		} +		ret = vega12_enable_smc_features(hwmgr, +				false, +				data->smu_features[GNLD_DPM_LINK].smu_feature_bitmap); +		PP_ASSERT_WITH_CODE(!ret, +				"Attempt to Disable DPM LINK Failed!", +				return ret); +		data->smu_features[GNLD_DPM_LINK].enabled = false; +		data->smu_features[GNLD_DPM_LINK].supported = false; +	}  	return 0;  } diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c index f19964c69a00..b6ee3a285c9d 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c @@ -171,6 +171,7 @@ static void vega20_set_default_registry_data(struct pp_hwmgr *hwmgr)  	data->registry_data.gfxoff_controlled_by_driver = 1;  	data->gfxoff_allowed = false;  	data->counter_gfxoff = 0; +	data->registry_data.pcie_dpm_key_disabled = !(hwmgr->feature_mask & PP_PCIE_DPM_MASK);  }  static int vega20_set_features_platform_caps(struct pp_hwmgr *hwmgr) @@ -884,6 +885,30 @@ static int vega20_override_pcie_parameters(struct pp_hwmgr *hwmgr)  		pp_table->PcieLaneCount[i] = pcie_width_arg;  	} +	/* override to the highest if it's disabled from ppfeaturmask */ +	if (data->registry_data.pcie_dpm_key_disabled) { +		for (i = 0; i < NUM_LINK_LEVELS; i++) { +			smu_pcie_arg = (i << 16) | (pcie_gen << 8) | pcie_width; +			ret = smum_send_msg_to_smc_with_parameter(hwmgr, +				PPSMC_MSG_OverridePcieParameters, smu_pcie_arg, +				NULL); +			PP_ASSERT_WITH_CODE(!ret, +				"[OverridePcieParameters] Attempt to override pcie params failed!", +				return ret); + +			pp_table->PcieGenSpeed[i] = pcie_gen; +			pp_table->PcieLaneCount[i] = pcie_width; +		} +		ret = vega20_enable_smc_features(hwmgr, +				false, +				data->smu_features[GNLD_DPM_LINK].smu_feature_bitmap); +		PP_ASSERT_WITH_CODE(!ret, +				"Attempt to Disable DPM LINK Failed!", +				return ret); +		data->smu_features[GNLD_DPM_LINK].enabled = false; +		data->smu_features[GNLD_DPM_LINK].supported = false; +	} +  	return 0;  } diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c index d143ef1b460b..cd905e41080e 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c @@ -1294,7 +1294,7 @@ static int smu_disable_dpms(struct smu_context *smu)  	bool use_baco = !smu->is_apu &&  		((amdgpu_in_reset(adev) &&  		  (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)) || -		 ((adev->in_runpm || adev->in_hibernate) && amdgpu_asic_supports_baco(adev))); +		 ((adev->in_runpm || adev->in_s4) && amdgpu_asic_supports_baco(adev)));  	/*  	 * For custom pptable uploading, skip the DPM features @@ -1431,7 +1431,8 @@ static int smu_suspend(void *handle)  	smu->watermarks_bitmap &= ~(WATERMARKS_LOADED); -	if (smu->is_apu) +	/* skip CGPG when in S0ix */ +	if (smu->is_apu && !adev->in_s0ix)  		smu_set_gfx_cgpg(&adev->smu, false);  	return 0; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c index 7ddbaecb11c2..101eaa20db9b 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c @@ -384,10 +384,15 @@ static int vangogh_dpm_set_jpeg_enable(struct smu_context *smu, bool enable)  static bool vangogh_is_dpm_running(struct smu_context *smu)  { +	struct amdgpu_device *adev = smu->adev;  	int ret = 0;  	uint32_t feature_mask[2];  	uint64_t feature_enabled; +	/* we need to re-init after suspend so return false */ +	if (adev->in_suspend) +		return false; +  	ret = smu_cmn_get_enabled_32_bits_mask(smu, feature_mask, 2);  	if (ret) | 
