diff options
Diffstat (limited to 'drivers/gpu/drm/amd/pm')
27 files changed, 3128 insertions, 156 deletions
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c index 6e79d3352d0bb..078aaaa531628 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c @@ -36,6 +36,8 @@ #define amdgpu_dpm_enable_bapm(adev, e) \ ((adev)->powerplay.pp_funcs->enable_bapm((adev)->powerplay.pp_handle, (e))) +#define amdgpu_dpm_is_legacy_dpm(adev) ((adev)->powerplay.pp_handle == (adev)) + int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low) { const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; @@ -456,6 +458,34 @@ int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors senso return ret; } +int amdgpu_dpm_get_apu_thermal_limit(struct amdgpu_device *adev, uint32_t *limit) +{ + const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + int ret = -EINVAL; + + if (pp_funcs && pp_funcs->get_apu_thermal_limit) { + mutex_lock(&adev->pm.mutex); + ret = pp_funcs->get_apu_thermal_limit(adev->powerplay.pp_handle, limit); + mutex_unlock(&adev->pm.mutex); + } + + return ret; +} + +int amdgpu_dpm_set_apu_thermal_limit(struct amdgpu_device *adev, uint32_t limit) +{ + const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + int ret = -EINVAL; + + if (pp_funcs && pp_funcs->set_apu_thermal_limit) { + mutex_lock(&adev->pm.mutex); + ret = pp_funcs->set_apu_thermal_limit(adev->powerplay.pp_handle, limit); + mutex_unlock(&adev->pm.mutex); + } + + return ret; +} + void amdgpu_dpm_compute_clocks(struct amdgpu_device *adev) { const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; @@ -1432,15 +1462,24 @@ int amdgpu_dpm_get_smu_prv_buf_details(struct amdgpu_device *adev, int amdgpu_dpm_is_overdrive_supported(struct amdgpu_device *adev) { - struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; - struct smu_context *smu = adev->powerplay.pp_handle; + if (is_support_sw_smu(adev)) { + struct smu_context *smu = adev->powerplay.pp_handle; - if ((is_support_sw_smu(adev) && smu->od_enabled) || - (is_support_sw_smu(adev) && smu->is_apu) || - (!is_support_sw_smu(adev) && hwmgr->od_enabled)) - return true; + return (smu->od_enabled || smu->is_apu); + } else { + struct pp_hwmgr *hwmgr; - return false; + /* + * dpm on some legacy asics don't carry od_enabled member + * as its pp_handle is casted directly from adev. + */ + if (amdgpu_dpm_is_legacy_dpm(adev)) + return false; + + hwmgr = (struct pp_hwmgr *)adev->powerplay.pp_handle; + + return hwmgr->od_enabled; + } } int amdgpu_dpm_set_pp_table(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c index bf6d63673b5aa..f4f40459f22b9 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c @@ -91,6 +91,8 @@ const char * const amdgpu_pp_profile_name[] = { "COMPUTE", "CUSTOM", "WINDOW_3D", + "CAPPED", + "UNCAPPED", }; /** @@ -869,13 +871,11 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev, } if (ret == -ENOENT) { size = amdgpu_dpm_print_clock_levels(adev, OD_SCLK, buf); - if (size > 0) { - size += amdgpu_dpm_print_clock_levels(adev, OD_MCLK, buf + size); - size += amdgpu_dpm_print_clock_levels(adev, OD_VDDC_CURVE, buf + size); - size += amdgpu_dpm_print_clock_levels(adev, OD_VDDGFX_OFFSET, buf + size); - size += amdgpu_dpm_print_clock_levels(adev, OD_RANGE, buf + size); - size += amdgpu_dpm_print_clock_levels(adev, OD_CCLK, buf + size); - } + size += amdgpu_dpm_print_clock_levels(adev, OD_MCLK, buf + size); + size += amdgpu_dpm_print_clock_levels(adev, OD_VDDC_CURVE, buf + size); + size += amdgpu_dpm_print_clock_levels(adev, OD_VDDGFX_OFFSET, buf + size); + size += amdgpu_dpm_print_clock_levels(adev, OD_RANGE, buf + size); + size += amdgpu_dpm_print_clock_levels(adev, OD_CCLK, buf + size); } if (size == 0) @@ -1178,6 +1178,21 @@ static ssize_t amdgpu_set_pp_dpm_vclk(struct device *dev, return amdgpu_set_pp_dpm_clock(dev, PP_VCLK, buf, count); } +static ssize_t amdgpu_get_pp_dpm_vclk1(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + return amdgpu_get_pp_dpm_clock(dev, PP_VCLK1, buf); +} + +static ssize_t amdgpu_set_pp_dpm_vclk1(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t count) +{ + return amdgpu_set_pp_dpm_clock(dev, PP_VCLK1, buf, count); +} + static ssize_t amdgpu_get_pp_dpm_dclk(struct device *dev, struct device_attribute *attr, char *buf) @@ -1193,6 +1208,21 @@ static ssize_t amdgpu_set_pp_dpm_dclk(struct device *dev, return amdgpu_set_pp_dpm_clock(dev, PP_DCLK, buf, count); } +static ssize_t amdgpu_get_pp_dpm_dclk1(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + return amdgpu_get_pp_dpm_clock(dev, PP_DCLK1, buf); +} + +static ssize_t amdgpu_set_pp_dpm_dclk1(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t count) +{ + return amdgpu_set_pp_dpm_clock(dev, PP_DCLK1, buf, count); +} + static ssize_t amdgpu_get_pp_dpm_dcefclk(struct device *dev, struct device_attribute *attr, char *buf) @@ -1686,6 +1716,82 @@ static ssize_t amdgpu_set_thermal_throttling_logging(struct device *dev, } /** + * DOC: apu_thermal_cap + * + * The amdgpu driver provides a sysfs API for retrieving/updating thermal + * limit temperature in millidegrees Celsius + * + * Reading back the file shows you core limit value + * + * Writing an integer to the file, sets a new thermal limit. The value + * should be between 0 and 100. If the value is less than 0 or greater + * than 100, then the write request will be ignored. + */ +static ssize_t amdgpu_get_apu_thermal_cap(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + int ret, size; + u32 limit; + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = drm_to_adev(ddev); + + ret = pm_runtime_get_sync(ddev->dev); + if (ret < 0) { + pm_runtime_put_autosuspend(ddev->dev); + return ret; + } + + ret = amdgpu_dpm_get_apu_thermal_limit(adev, &limit); + if (!ret) + size = sysfs_emit(buf, "%u\n", limit); + else + size = sysfs_emit(buf, "failed to get thermal limit\n"); + + pm_runtime_mark_last_busy(ddev->dev); + pm_runtime_put_autosuspend(ddev->dev); + + return size; +} + +static ssize_t amdgpu_set_apu_thermal_cap(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t count) +{ + int ret; + u32 value; + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = drm_to_adev(ddev); + + ret = kstrtou32(buf, 10, &value); + if (ret) + return ret; + + if (value > 100) { + dev_err(dev, "Invalid argument !\n"); + return -EINVAL; + } + + ret = pm_runtime_get_sync(ddev->dev); + if (ret < 0) { + pm_runtime_put_autosuspend(ddev->dev); + return ret; + } + + ret = amdgpu_dpm_set_apu_thermal_limit(adev, value); + if (ret) { + dev_err(dev, "failed to update thermal limit\n"); + return ret; + } + + pm_runtime_mark_last_busy(ddev->dev); + pm_runtime_put_autosuspend(ddev->dev); + + return count; +} + +/** * DOC: gpu_metrics * * The amdgpu driver provides a sysfs API for retrieving current gpu @@ -1924,7 +2030,9 @@ static struct amdgpu_device_attr amdgpu_device_attrs[] = { AMDGPU_DEVICE_ATTR_RW(pp_dpm_socclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), AMDGPU_DEVICE_ATTR_RW(pp_dpm_fclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), AMDGPU_DEVICE_ATTR_RW(pp_dpm_vclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), + AMDGPU_DEVICE_ATTR_RW(pp_dpm_vclk1, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), AMDGPU_DEVICE_ATTR_RW(pp_dpm_dclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), + AMDGPU_DEVICE_ATTR_RW(pp_dpm_dclk1, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), AMDGPU_DEVICE_ATTR_RW(pp_dpm_dcefclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), AMDGPU_DEVICE_ATTR_RW(pp_dpm_pcie, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), AMDGPU_DEVICE_ATTR_RW(pp_sclk_od, ATTR_FLAG_BASIC), @@ -1937,6 +2045,7 @@ static struct amdgpu_device_attr amdgpu_device_attrs[] = { AMDGPU_DEVICE_ATTR_RW(pp_features, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), AMDGPU_DEVICE_ATTR_RO(unique_id, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), AMDGPU_DEVICE_ATTR_RW(thermal_throttling_logging, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), + AMDGPU_DEVICE_ATTR_RW(apu_thermal_cap, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), AMDGPU_DEVICE_ATTR_RO(gpu_metrics, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), AMDGPU_DEVICE_ATTR_RO(smartshift_apu_power, ATTR_FLAG_BASIC, .attr_update = ss_power_attr_update), @@ -2012,6 +2121,12 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_ gc_ver == IP_VERSION(11, 0, 2) || gc_ver == IP_VERSION(11, 0, 3))) *states = ATTR_STATE_UNSUPPORTED; + } else if (DEVICE_ATTR_IS(pp_dpm_vclk1)) { + if (!((gc_ver == IP_VERSION(10, 3, 1) || + gc_ver == IP_VERSION(10, 3, 0) || + gc_ver == IP_VERSION(11, 0, 2) || + gc_ver == IP_VERSION(11, 0, 3)) && adev->vcn.num_vcn_inst >= 2)) + *states = ATTR_STATE_UNSUPPORTED; } else if (DEVICE_ATTR_IS(pp_dpm_dclk)) { if (!(gc_ver == IP_VERSION(10, 3, 1) || gc_ver == IP_VERSION(10, 3, 0) || @@ -2020,6 +2135,12 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_ gc_ver == IP_VERSION(11, 0, 2) || gc_ver == IP_VERSION(11, 0, 3))) *states = ATTR_STATE_UNSUPPORTED; + } else if (DEVICE_ATTR_IS(pp_dpm_dclk1)) { + if (!((gc_ver == IP_VERSION(10, 3, 1) || + gc_ver == IP_VERSION(10, 3, 0) || + gc_ver == IP_VERSION(11, 0, 2) || + gc_ver == IP_VERSION(11, 0, 3)) && adev->vcn.num_vcn_inst >= 2)) + *states = ATTR_STATE_UNSUPPORTED; } else if (DEVICE_ATTR_IS(pp_power_profile_mode)) { if (amdgpu_dpm_get_power_profile_mode(adev, NULL) == -EOPNOTSUPP) *states = ATTR_STATE_UNSUPPORTED; @@ -3272,7 +3393,7 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj, attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't manage state */ effective_mode &= ~S_IWUSR; - /* not implemented yet for GC 10.3.1 APUs */ + /* In the case of APUs, this is only implemented on Vangogh */ if (((adev->family == AMDGPU_FAMILY_SI) || ((adev->flags & AMD_IS_APU) && (gc_ver != IP_VERSION(10, 3, 1)))) && (attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr || @@ -3281,7 +3402,7 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj, attr == &sensor_dev_attr_power1_cap_default.dev_attr.attr)) return 0; - /* not implemented yet for APUs having <= GC 9.3.0 */ + /* not implemented yet for APUs having < GC 9.3.0 (Renoir) */ if (((adev->family == AMDGPU_FAMILY_SI) || ((adev->flags & AMD_IS_APU) && (gc_ver < IP_VERSION(9, 3, 0)))) && (attr == &sensor_dev_attr_power1_average.dev_attr.attr)) diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h index 16addceca68ff..d178f3f440816 100644 --- a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h +++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h @@ -369,6 +369,9 @@ struct amdgpu_pm { int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors sensor, void *data, uint32_t *size); +int amdgpu_dpm_get_apu_thermal_limit(struct amdgpu_device *adev, uint32_t *limit); +int amdgpu_dpm_set_apu_thermal_limit(struct amdgpu_device *adev, uint32_t limit); + int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block_type, bool gate); diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c index d6d9e3b1b2c0e..02e69ccff3bac 100644 --- a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c +++ b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c @@ -6925,23 +6925,6 @@ static int si_dpm_enable(struct amdgpu_device *adev) return 0; } -static int si_set_temperature_range(struct amdgpu_device *adev) -{ - int ret; - - ret = si_thermal_enable_alert(adev, false); - if (ret) - return ret; - ret = si_thermal_set_temperature_range(adev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX); - if (ret) - return ret; - ret = si_thermal_enable_alert(adev, true); - if (ret) - return ret; - - return ret; -} - static void si_dpm_disable(struct amdgpu_device *adev) { struct rv7xx_power_info *pi = rv770_get_pi(adev); @@ -7626,18 +7609,6 @@ static int si_dpm_process_interrupt(struct amdgpu_device *adev, static int si_dpm_late_init(void *handle) { - int ret; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - if (!adev->pm.dpm_enabled) - return 0; - - ret = si_set_temperature_range(adev); - if (ret) - return ret; -#if 0 //TODO ? - si_dpm_powergate_uvd(adev, true); -#endif return 0; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c index 0652b001ad549..2ddf5198e5c48 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c @@ -40,6 +40,7 @@ #include "smu_v13_0_0_ppt.h" #include "smu_v13_0_4_ppt.h" #include "smu_v13_0_5_ppt.h" +#include "smu_v13_0_6_ppt.h" #include "smu_v13_0_7_ppt.h" #include "amd_pcie.h" @@ -161,10 +162,15 @@ int smu_get_dpm_freq_range(struct smu_context *smu, int smu_set_gfx_power_up_by_imu(struct smu_context *smu) { - if (!smu->ppt_funcs || !smu->ppt_funcs->set_gfx_power_up_by_imu) - return -EOPNOTSUPP; + int ret = 0; + struct amdgpu_device *adev = smu->adev; - return smu->ppt_funcs->set_gfx_power_up_by_imu(smu); + if (smu->ppt_funcs->set_gfx_power_up_by_imu) { + ret = smu->ppt_funcs->set_gfx_power_up_by_imu(smu); + if (ret) + dev_err(adev->dev, "Failed to enable gfx imu!\n"); + } + return ret; } static u32 smu_get_mclk(void *handle, bool low) @@ -195,6 +201,19 @@ static u32 smu_get_sclk(void *handle, bool low) return clk_freq * 100; } +static int smu_set_gfx_imu_enable(struct smu_context *smu) +{ + struct amdgpu_device *adev = smu->adev; + + if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) + return 0; + + if (amdgpu_in_reset(smu->adev) || adev->in_s0ix) + return 0; + + return smu_set_gfx_power_up_by_imu(smu); +} + static int smu_dpm_set_vcn_enable(struct smu_context *smu, bool enable) { @@ -609,6 +628,11 @@ static int smu_set_funcs(struct amdgpu_device *adev) case IP_VERSION(13, 0, 10): smu_v13_0_0_set_ppt_funcs(smu); break; + case IP_VERSION(13, 0, 6): + smu_v13_0_6_set_ppt_funcs(smu); + /* Enable pp_od_clk_voltage node */ + smu->od_enabled = true; + break; case IP_VERSION(13, 0, 7): smu_v13_0_7_set_ppt_funcs(smu); break; @@ -709,6 +733,24 @@ static int smu_late_init(void *handle) return ret; } + /* + * Explicitly notify PMFW the power mode the system in. Since + * the PMFW may boot the ASIC with a different mode. + * For those supporting ACDC switch via gpio, PMFW will + * handle the switch automatically. Driver involvement + * is unnecessary. + */ + if (!smu->dc_controlled_by_gpio) { + ret = smu_set_power_source(smu, + adev->pm.ac_power ? SMU_POWER_SOURCE_AC : + SMU_POWER_SOURCE_DC); + if (ret) { + dev_err(adev->dev, "Failed to switch to %s mode!\n", + adev->pm.ac_power ? "AC" : "DC"); + return ret; + } + } + if ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 1)) || (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 3))) return 0; @@ -1390,15 +1432,9 @@ static int smu_hw_init(void *handle) } if (smu->is_apu) { - if ((smu->ppt_funcs->set_gfx_power_up_by_imu) && - likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) { - ret = smu->ppt_funcs->set_gfx_power_up_by_imu(smu); - if (ret) { - dev_err(adev->dev, "Failed to Enable gfx imu!\n"); - return ret; - } - } - + ret = smu_set_gfx_imu_enable(smu); + if (ret) + return ret; smu_dpm_set_vcn_enable(smu, true); smu_dpm_set_jpeg_enable(smu, true); smu_set_gfx_cgpg(smu, true); @@ -1675,6 +1711,10 @@ static int smu_resume(void *handle) return ret; } + ret = smu_set_gfx_imu_enable(smu); + if (ret) + return ret; + smu_set_gfx_cgpg(smu, true); smu->disable_uclk_switch = 0; @@ -1690,8 +1730,6 @@ static int smu_display_configuration_change(void *handle, const struct amd_pp_display_configuration *display_config) { struct smu_context *smu = handle; - int index = 0; - int num_of_active_display = 0; if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) return -EOPNOTSUPP; @@ -1702,11 +1740,6 @@ static int smu_display_configuration_change(void *handle, smu_set_min_dcef_deep_sleep(smu, display_config->min_dcef_deep_sleep_set_clk / 100); - for (index = 0; index < display_config->num_path_including_non_display; index++) { - if (display_config->displays[index].controller_id != 0) - num_of_active_display++; - } - return 0; } @@ -2000,8 +2033,12 @@ static int smu_force_ppclk_levels(void *handle, clk_type = SMU_DCEFCLK; break; case PP_VCLK: clk_type = SMU_VCLK; break; + case PP_VCLK1: + clk_type = SMU_VCLK1; break; case PP_DCLK: clk_type = SMU_DCLK; break; + case PP_DCLK1: + clk_type = SMU_DCLK1; break; case OD_SCLK: clk_type = SMU_OD_SCLK; break; case OD_MCLK: @@ -2387,8 +2424,12 @@ static enum smu_clk_type smu_convert_to_smuclk(enum pp_clock_type type) clk_type = SMU_DCEFCLK; break; case PP_VCLK: clk_type = SMU_VCLK; break; + case PP_VCLK1: + clk_type = SMU_VCLK1; break; case PP_DCLK: clk_type = SMU_DCLK; break; + case PP_DCLK1: + clk_type = SMU_DCLK1; break; case OD_SCLK: clk_type = SMU_OD_SCLK; break; case OD_MCLK: @@ -2532,6 +2573,28 @@ unlock: return ret; } +static int smu_get_apu_thermal_limit(void *handle, uint32_t *limit) +{ + int ret = -EINVAL; + struct smu_context *smu = handle; + + if (smu->ppt_funcs && smu->ppt_funcs->get_apu_thermal_limit) + ret = smu->ppt_funcs->get_apu_thermal_limit(smu, limit); + + return ret; +} + +static int smu_set_apu_thermal_limit(void *handle, uint32_t limit) +{ + int ret = -EINVAL; + struct smu_context *smu = handle; + + if (smu->ppt_funcs && smu->ppt_funcs->set_apu_thermal_limit) + ret = smu->ppt_funcs->set_apu_thermal_limit(smu, limit); + + return ret; +} + static int smu_get_power_profile_mode(void *handle, char *buf) { struct smu_context *smu = handle; @@ -3033,6 +3096,8 @@ static const struct amd_pm_funcs swsmu_pm_funcs = { .emit_clock_levels = smu_emit_ppclk_levels, .force_performance_level = smu_force_performance_level, .read_sensor = smu_read_sensor, + .get_apu_thermal_limit = smu_get_apu_thermal_limit, + .set_apu_thermal_limit = smu_set_apu_thermal_limit, .get_performance_level = smu_get_performance_level, .get_current_power_state = smu_get_current_power_state, .get_fan_speed_rpm = smu_get_fan_speed_rpm, diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h index 2a03d85bf4e2d..09469c750a96b 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h @@ -722,6 +722,18 @@ struct pptable_funcs { void *data, uint32_t *size); /** + * @get_apu_thermal_limit: get apu core limit from smu + * &limit: current limit temperature in millidegrees Celsius + */ + int (*get_apu_thermal_limit)(struct smu_context *smu, uint32_t *limit); + + /** + * @set_apu_thermal_limit: update all controllers with new limit + * &limit: limit temperature to be setted, in millidegrees Celsius + */ + int (*set_apu_thermal_limit)(struct smu_context *smu, uint32_t limit); + + /** * @pre_display_config_changed: Prepare GPU for a display configuration * change. * diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_vangogh.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_vangogh.h index 8361ebd8d8768..21e6028a49e6f 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_vangogh.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_vangogh.h @@ -238,7 +238,9 @@ typedef struct { #define WORKLOAD_PPLIB_VR_BIT 3 #define WORKLOAD_PPLIB_COMPUTE_BIT 4 #define WORKLOAD_PPLIB_CUSTOM_BIT 5 -#define WORKLOAD_PPLIB_COUNT 6 +#define WORKLOAD_PPLIB_CAPPED_BIT 6 +#define WORKLOAD_PPLIB_UNCAPPED_BIT 7 +#define WORKLOAD_PPLIB_COUNT 8 #define TABLE_BIOS_IF 0 // Called by BIOS #define TABLE_WATERMARKS 1 // Called by DAL through VBIOS diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_4.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_4.h index f77401709d83c..2162ecd1057d1 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_4.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_4.h @@ -27,7 +27,7 @@ // *** IMPORTANT *** // SMU TEAM: Always increment the interface version if // any structure is changed in this file -#define PMFW_DRIVER_IF_VERSION 7 +#define PMFW_DRIVER_IF_VERSION 8 typedef struct { int32_t value; @@ -198,7 +198,7 @@ typedef struct { uint16_t SkinTemp; uint16_t DeviceState; uint16_t CurTemp; //[centi-Celsius] - uint16_t spare2; + uint16_t FilterAlphaValue; uint16_t AverageGfxclkFrequency; uint16_t AverageFclkFrequency; diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_6.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_6.h new file mode 100644 index 0000000000000..be596777cd2ca --- /dev/null +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_6.h @@ -0,0 +1,141 @@ +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef SMU_13_0_6_DRIVER_IF_H +#define SMU_13_0_6_DRIVER_IF_H + +// *** IMPORTANT *** +// PMFW TEAM: Always increment the interface version if +// anything is changed in this file +#define SMU13_0_6_DRIVER_IF_VERSION 0x08042022 + +//I2C Interface +#define NUM_I2C_CONTROLLERS 8 +#define I2C_CONTROLLER_ENABLED 1 +#define I2C_CONTROLLER_DISABLED 0 + +#define MAX_SW_I2C_COMMANDS 24 + +typedef enum { + I2C_CONTROLLER_PORT_0, //CKSVII2C0 + I2C_CONTROLLER_PORT_1, //CKSVII2C1 + I2C_CONTROLLER_PORT_COUNT, +} I2cControllerPort_e; + +typedef enum { + UNSUPPORTED_1, //50 Kbits/s not supported anymore! + I2C_SPEED_STANDARD_100K, //100 Kbits/s + I2C_SPEED_FAST_400K, //400 Kbits/s + I2C_SPEED_FAST_PLUS_1M, //1 Mbits/s (in fast mode) + UNSUPPORTED_2, //1 Mbits/s (in high speed mode) not supported anymore! + UNSUPPORTED_3, //2.3 Mbits/s not supported anymore! + I2C_SPEED_COUNT, +} I2cSpeed_e; + +typedef enum { + I2C_CMD_READ, + I2C_CMD_WRITE, + I2C_CMD_COUNT, +} I2cCmdType_e; + +#define CMDCONFIG_STOP_BIT 0 +#define CMDCONFIG_RESTART_BIT 1 +#define CMDCONFIG_READWRITE_BIT 2 //bit should be 0 for read, 1 for write + +#define CMDCONFIG_STOP_MASK (1 << CMDCONFIG_STOP_BIT) +#define CMDCONFIG_RESTART_MASK (1 << CMDCONFIG_RESTART_BIT) +#define CMDCONFIG_READWRITE_MASK (1 << CMDCONFIG_READWRITE_BIT) + +typedef struct { + uint8_t ReadWriteData; //Return data for read. Data to send for write + uint8_t CmdConfig; //Includes whether associated command should have a stop or restart command, and is a read or write +} SwI2cCmd_t; //SW I2C Command Table + +typedef struct { + uint8_t I2CcontrollerPort; //CKSVII2C0(0) or //CKSVII2C1(1) + uint8_t I2CSpeed; //Use I2cSpeed_e to indicate speed to select + uint8_t SlaveAddress; //Slave address of device + uint8_t NumCmds; //Number of commands + SwI2cCmd_t SwI2cCmds[MAX_SW_I2C_COMMANDS]; +} SwI2cRequest_t; // SW I2C Request Table + +typedef struct { + SwI2cRequest_t SwI2cRequest; + uint32_t Spare[8]; + uint32_t MmHubPadding[8]; // SMU internal use +} SwI2cRequestExternal_t; + +typedef enum { + PPCLK_VCLK, + PPCLK_DCLK, + PPCLK_SOCCLK, + PPCLK_UCLK, + PPCLK_FCLK, + PPCLK_LCLK, + PPCLK_COUNT, +} PPCLK_e; + +typedef enum { + GPIO_INT_POLARITY_ACTIVE_LOW, + GPIO_INT_POLARITY_ACTIVE_HIGH, +} GpioIntPolarity_e; + +//TODO confirm if this is used in SMU_13_0_6 PPSMC_MSG_SetUclkDpmMode +typedef enum { + UCLK_DPM_MODE_BANDWIDTH, + UCLK_DPM_MODE_LATENCY, +} UCLK_DPM_MODE_e; + +typedef struct { + //0-26 SOC, 27-29 SOCIO + uint16_t avgPsmCount[30]; + uint16_t minPsmCount[30]; + float avgPsmVoltage[30]; + float minPsmVoltage[30]; +} AvfsDebugTableAid_t; + +typedef struct { + //0-27 GFX, 28-29 SOC + uint16_t avgPsmCount[30]; + uint16_t minPsmCount[30]; + float avgPsmVoltage[30]; + float minPsmVoltage[30]; +} AvfsDebugTableXcd_t; + +// These defines are used with the following messages: +// SMC_MSG_TransferTableDram2Smu +// SMC_MSG_TransferTableSmu2Dram +// #define TABLE_PPTABLE 0 +// #define TABLE_AVFS_PSM_DEBUG 1 +// #define TABLE_AVFS_FUSE_OVERRIDE 2 +// #define TABLE_PMSTATUSLOG 3 +// #define TABLE_SMU_METRICS 4 +// #define TABLE_DRIVER_SMU_CONFIG 5 +// #define TABLE_I2C_COMMANDS 6 +// #define TABLE_COUNT 7 + +// // Table transfer status +// #define TABLE_TRANSFER_OK 0x0 +// #define TABLE_TRANSFER_FAILED 0xFF +// #define TABLE_TRANSFER_PENDING 0xAB + +#endif diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h new file mode 100644 index 0000000000000..bdccbb4a62763 --- /dev/null +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h @@ -0,0 +1,212 @@ +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef SMU_13_0_6_PMFW_H +#define SMU_13_0_6_PMFW_H + +#define NUM_VCLK_DPM_LEVELS 4 +#define NUM_DCLK_DPM_LEVELS 4 +#define NUM_SOCCLK_DPM_LEVELS 4 +#define NUM_LCLK_DPM_LEVELS 4 +#define NUM_UCLK_DPM_LEVELS 4 +#define NUM_FCLK_DPM_LEVELS 4 +#define NUM_XGMI_DPM_LEVELS 2 +#define NUM_CXL_BITRATES 4 +#define NUM_PCIE_BITRATES 4 +#define NUM_XGMI_BITRATES 4 +#define NUM_XGMI_WIDTHS 3 + +typedef enum { +/*0*/ FEATURE_DATA_CALCULATION = 0, +/*1*/ FEATURE_DPM_CCLK = 1, +/*2*/ FEATURE_DPM_FCLK = 2, +/*3*/ FEATURE_DPM_GFXCLK = 3, +/*4*/ FEATURE_DPM_LCLK = 4, +/*5*/ FEATURE_DPM_SOCCLK = 5, +/*6*/ FEATURE_DPM_UCLK = 6, +/*7*/ FEATURE_DPM_VCN = 7, +/*8*/ FEATURE_DPM_XGMI = 8, +/*9*/ FEATURE_DS_FCLK = 9, +/*10*/ FEATURE_DS_GFXCLK = 10, +/*11*/ FEATURE_DS_LCLK = 11, +/*12*/ FEATURE_DS_MP0CLK = 12, +/*13*/ FEATURE_DS_MP1CLK = 13, +/*14*/ FEATURE_DS_MPIOCLK = 14, +/*15*/ FEATURE_DS_SOCCLK = 15, +/*16*/ FEATURE_DS_VCN = 16, +/*17*/ FEATURE_APCC_DFLL = 17, +/*18*/ FEATURE_APCC_PLUS = 18, +/*19*/ FEATURE_DF_CSTATE = 19, +/*20*/ FEATURE_CC6 = 20, +/*21*/ FEATURE_PC6 = 21, +/*22*/ FEATURE_CPPC = 22, +/*23*/ FEATURE_PPT = 23, +/*24*/ FEATURE_TDC = 24, +/*25*/ FEATURE_THERMAL = 25, +/*26*/ FEATURE_SOC_PCC = 26, +/*27*/ FEATURE_CCD_PCC = 27, +/*28*/ FEATURE_CCD_EDC = 28, +/*29*/ FEATURE_PROCHOT = 29, +/*30*/ FEATURE_DVO_CCLK = 30, +/*31*/ FEATURE_FDD_AID_HBM = 31, +/*32*/ FEATURE_FDD_AID_SOC = 32, +/*33*/ FEATURE_FDD_XCD_EDC = 33, +/*34*/ FEATURE_FDD_XCD_XVMIN = 34, +/*35*/ FEATURE_FW_CTF = 35, +/*36*/ FEATURE_GFXOFF = 36, +/*37*/ FEATURE_SMU_CG = 37, +/*38*/ FEATURE_PSI7 = 38, +/*39*/ FEATURE_CSTATE_BOOST = 39, +/*40*/ FEATURE_XGMI_PER_LINK_PWR_DOWN = 40, +/*41*/ FEATURE_CXL_QOS = 41, +/*42*/ FEATURE_SOC_DC_RTC = 42, +/*43*/ FEATURE_GFX_DC_RTC = 43, + +/*44*/ NUM_FEATURES = 44 +} FEATURE_LIST_e; + +//enum for MPIO PCIe gen speed msgs +typedef enum { + PCIE_LINK_SPEED_INDEX_TABLE_GEN1, + PCIE_LINK_SPEED_INDEX_TABLE_GEN2, + PCIE_LINK_SPEED_INDEX_TABLE_GEN3, + PCIE_LINK_SPEED_INDEX_TABLE_GEN4, + PCIE_LINK_SPEED_INDEX_TABLE_GEN4_ESM, + PCIE_LINK_SPEED_INDEX_TABLE_GEN5, + PCIE_LINK_SPEED_INDEX_TABLE_COUNT +} PCIE_LINK_SPEED_INDEX_TABLE_e; + +typedef enum { + VOLTAGE_COLD_0, + VOLTAGE_COLD_1, + VOLTAGE_COLD_2, + VOLTAGE_COLD_3, + VOLTAGE_COLD_4, + VOLTAGE_COLD_5, + VOLTAGE_COLD_6, + VOLTAGE_COLD_7, + VOLTAGE_MID_0, + VOLTAGE_MID_1, + VOLTAGE_MID_2, + VOLTAGE_MID_3, + VOLTAGE_MID_4, + VOLTAGE_MID_5, + VOLTAGE_MID_6, + VOLTAGE_MID_7, + VOLTAGE_HOT_0, + VOLTAGE_HOT_1, + VOLTAGE_HOT_2, + VOLTAGE_HOT_3, + VOLTAGE_HOT_4, + VOLTAGE_HOT_5, + VOLTAGE_HOT_6, + VOLTAGE_HOT_7, + VOLTAGE_GUARDBAND_COUNT +} GFX_GUARDBAND_e; + +#define SMU_METRICS_TABLE_VERSION 0x1 + +typedef struct { + uint32_t AccumulationCounter; + + //TEMPERATURE + uint32_t MaxSocketTemperature; + uint32_t MaxVrTemperature; + uint32_t MaxHbmTemperature; + uint64_t MaxSocketTemperatureAcc; + uint64_t MaxVrTemperatureAcc; + uint64_t MaxHbmTemperatureAcc; + + //POWER + uint32_t SocketPowerLimit; + uint32_t MaxSocketPowerLimit; + uint32_t SocketPower; + + //ENERGY + uint64_t Timestamp; + uint64_t SocketEnergyAcc; + uint64_t CcdEnergyAcc; + uint64_t XcdEnergyAcc; + uint64_t AidEnergyAcc; + uint64_t HbmEnergyAcc; + + //FREQUENCY + uint32_t CclkFrequencyLimit; + uint32_t GfxclkFrequencyLimit; + uint32_t FclkFrequency; + uint32_t UclkFrequency; + uint32_t SocclkFrequency[4]; + uint32_t VclkFrequency[4]; + uint32_t DclkFrequency[4]; + uint32_t LclkFrequency[4]; + uint64_t GfxclkFrequencyAcc[8]; + uint64_t CclkFrequencyAcc[96]; + + //FREQUENCY RANGE + uint32_t MaxCclkFrequency; + uint32_t MinCclkFrequency; + uint32_t MaxGfxclkFrequency; + uint32_t MinGfxclkFrequency; + uint32_t FclkFrequencyTable[4]; + uint32_t UclkFrequencyTable[4]; + uint32_t SocclkFrequencyTable[4]; + uint32_t VclkFrequencyTable[4]; + uint32_t DclkFrequencyTable[4]; + uint32_t LclkFrequencyTable[4]; + uint32_t MaxLclkDpmRange; + uint32_t MinLclkDpmRange; + + //XGMI + uint32_t XgmiWidth; + uint32_t XgmiBitrate; + uint64_t XgmiReadBandwidthAcc[8]; + uint64_t XgmiWriteBandwidthAcc[8]; + + //ACTIVITY + uint32_t SocketC0Residency; + uint32_t SocketGfxBusy; + uint32_t DramBandwidthUtilization; + uint64_t SocketC0ResidencyAcc; + uint64_t SocketGfxBusyAcc; + uint64_t DramBandwidthAcc; + uint32_t MaxDramBandwidth; + uint64_t DramBandwidthUtilizationAcc; + uint64_t PcieBandwidthAcc[4]; + + //THROTTLERS + uint32_t ProchotResidencyAcc; + uint32_t PptResidencyAcc; + uint32_t SocketThmResidencyAcc; + uint32_t VrThmResidencyAcc; + uint32_t HbmThmResidencyAcc; +} MetricsTable_t; + +#define SMU_VF_METRICS_TABLE_VERSION 0x1 + +typedef struct { + uint32_t AccumulationCounter; + uint32_t InstGfxclk_TargFreq; + uint64_t AccGfxclk_TargFreq; + uint64_t AccGfxRsmuDpm_Busy; +} VfMetricsTable_t; + +#endif diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h new file mode 100644 index 0000000000000..b838e8db395ac --- /dev/null +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h @@ -0,0 +1,95 @@ +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef SMU_13_0_6_PPSMC_H +#define SMU_13_0_6_PPSMC_H + +// SMU Response Codes: +#define PPSMC_Result_OK 0x1 +#define PPSMC_Result_Failed 0xFF +#define PPSMC_Result_UnknownCmd 0xFE +#define PPSMC_Result_CmdRejectedPrereq 0xFD +#define PPSMC_Result_CmdRejectedBusy 0xFC + +// Message Definitions: +#define PPSMC_MSG_TestMessage 0x1 +#define PPSMC_MSG_GetSmuVersion 0x2 +#define PPSMC_MSG_GfxDriverReset 0x3 +#define PPSMC_MSG_GetDriverIfVersion 0x4 +#define PPSMC_MSG_EnableAllSmuFeatures 0x5 +#define PPSMC_MSG_DisableAllSmuFeatures 0x6 +#define PPSMC_MSG_RequestI2cTransaction 0x7 +#define PPSMC_MSG_GetMetricsVersion 0x8 +#define PPSMC_MSG_GetMetricsTable 0x9 +#define PPSMC_MSG_GetEccInfoTable 0xA +#define PPSMC_MSG_GetEnabledSmuFeaturesLow 0xB +#define PPSMC_MSG_GetEnabledSmuFeaturesHigh 0xC +#define PPSMC_MSG_SetDriverDramAddrHigh 0xD +#define PPSMC_MSG_SetDriverDramAddrLow 0xE +#define PPSMC_MSG_SetToolsDramAddrHigh 0xF +#define PPSMC_MSG_SetToolsDramAddrLow 0x10 +#define PPSMC_MSG_SetSystemVirtualDramAddrHigh 0x11 +#define PPSMC_MSG_SetSystemVirtualDramAddrLow 0x12 +#define PPSMC_MSG_SetSoftMinByFreq 0x13 +#define PPSMC_MSG_SetSoftMaxByFreq 0x14 +#define PPSMC_MSG_GetMinDpmFreq 0x15 +#define PPSMC_MSG_GetMaxDpmFreq 0x16 +#define PPSMC_MSG_GetDpmFreqByIndex 0x17 +#define PPSMC_MSG_SetPptLimit 0x18 +#define PPSMC_MSG_GetPptLimit 0x19 +#define PPSMC_MSG_DramLogSetDramAddrHigh 0x1A +#define PPSMC_MSG_DramLogSetDramAddrLow 0x1B +#define PPSMC_MSG_DramLogSetDramSize 0x1C +#define PPSMC_MSG_GetDebugData 0x1D +#define PPSMC_MSG_HeavySBR 0x1E +#define PPSMC_MSG_SetNumBadHbmPagesRetired 0x1F +#define PPSMC_MSG_DFCstateControl 0x20 +#define PPSMC_MSG_GetGmiPwrDnHyst 0x21 +#define PPSMC_MSG_SetGmiPwrDnHyst 0x22 +#define PPSMC_MSG_GmiPwrDnControl 0x23 +#define PPSMC_MSG_EnterGfxoff 0x24 +#define PPSMC_MSG_ExitGfxoff 0x25 +#define PPSMC_MSG_EnableDeterminism 0x26 +#define PPSMC_MSG_DisableDeterminism 0x27 +#define PPSMC_MSG_DumpSTBtoDram 0x28 +#define PPSMC_MSG_STBtoDramLogSetDramAddrHigh 0x29 +#define PPSMC_MSG_STBtoDramLogSetDramAddrLow 0x2A +#define PPSMC_MSG_STBtoDramLogSetDramSize 0x2B +#define PPSMC_MSG_SetSystemVirtualSTBtoDramAddrHigh 0x2C +#define PPSMC_MSG_SetSystemVirtualSTBtoDramAddrLow 0x2D +#define PPSMC_MSG_GfxDriverResetRecovery 0x2E +#define PPSMC_MSG_TriggerVFFLR 0x2F +#define PPSMC_MSG_SetSoftMinGfxClk 0x30 +#define PPSMC_MSG_SetSoftMaxGfxClk 0x31 +#define PPSMC_MSG_GetMinGfxDpmFreq 0x32 +#define PPSMC_MSG_GetMaxGfxDpmFreq 0x33 +#define PPSMC_Message_Count 0x34 + +//PPSMC Reset Types for driver msg argument +#define PPSMC_RESET_TYPE_DRIVER_MODE_1_RESET 0x1 +#define PPSMC_RESET_TYPE_DRIVER_MODE_2_RESET 0x2 +#define PPSMC_RESET_TYPE_DRIVER_MODE_3_RESET 0x3 + +typedef uint32_t PPSMC_Result; +typedef uint32_t PPSMC_MSG; + +#endif diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h index 96f6c2db955b5..297b70b9388f4 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h @@ -243,7 +243,9 @@ __SMU_DUMMY_MAP(SetNumBadMemoryPagesRetired), \ __SMU_DUMMY_MAP(SetBadMemoryPagesRetiredFlagsPerChannel), \ __SMU_DUMMY_MAP(AllowGpo), \ - __SMU_DUMMY_MAP(Mode2Reset), + __SMU_DUMMY_MAP(Mode2Reset), \ + __SMU_DUMMY_MAP(RequestI2cTransaction), \ + __SMU_DUMMY_MAP(GetMetricsTable), #undef __SMU_DUMMY_MAP #define __SMU_DUMMY_MAP(type) SMU_MSG_##type diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h index 1c0ae2cb757b8..df3baaab00378 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h @@ -29,11 +29,12 @@ #define SMU13_DRIVER_IF_VERSION_YELLOW_CARP 0x04 #define SMU13_DRIVER_IF_VERSION_ALDE 0x08 #define SMU13_DRIVER_IF_VERSION_SMU_V13_0_0_0 0x37 -#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_4 0x07 +#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_4 0x08 #define SMU13_DRIVER_IF_VERSION_SMU_V13_0_5 0x04 #define SMU13_DRIVER_IF_VERSION_SMU_V13_0_0_10 0x32 #define SMU13_DRIVER_IF_VERSION_SMU_V13_0_7 0x37 #define SMU13_DRIVER_IF_VERSION_SMU_V13_0_10 0x1D +#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_6 0x0 #define SMU13_MODE1_RESET_WAIT_TIME_IN_MS 500 //500ms @@ -61,6 +62,12 @@ #define CTF_OFFSET_HOTSPOT 5 #define CTF_OFFSET_MEM 5 +extern const int pmfw_decoded_link_speed[5]; +extern const int pmfw_decoded_link_width[7]; + +#define DECODE_GEN_SPEED(gen_speed_idx) (pmfw_decoded_link_speed[gen_speed_idx]) +#define DECODE_LANE_WIDTH(lane_width_idx) (pmfw_decoded_link_width[lane_width_idx]) + struct smu_13_0_max_sustainable_clocks { uint32_t display_clock; uint32_t phy_clock; @@ -244,6 +251,10 @@ int smu_v13_0_set_single_dpm_table(struct smu_context *smu, enum smu_clk_type clk_type, struct smu_13_0_dpm_table *single_dpm_table); +int smu_v13_0_get_dpm_freq_by_index(struct smu_context *smu, + enum smu_clk_type clk_type, uint16_t level, + uint32_t *value); + int smu_v13_0_get_current_pcie_link_width_level(struct smu_context *smu); int smu_v13_0_get_current_pcie_link_width(struct smu_context *smu); diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c index 95da6dd1cc656..275f708db6362 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c @@ -304,7 +304,8 @@ navi10_get_allowed_feature_mask(struct smu_context *smu, | FEATURE_MASK(FEATURE_GFX_SS_BIT) | FEATURE_MASK(FEATURE_APCC_DFLL_BIT) | FEATURE_MASK(FEATURE_FW_CTF_BIT) - | FEATURE_MASK(FEATURE_OUT_OF_BAND_MONITOR_BIT); + | FEATURE_MASK(FEATURE_OUT_OF_BAND_MONITOR_BIT) + | FEATURE_MASK(FEATURE_TEMP_DEPENDENT_VMIN_BIT); if (adev->pm.pp_feature & PP_SCLK_DPM_MASK) *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT); @@ -3412,26 +3413,8 @@ static int navi10_post_smu_init(struct smu_context *smu) return 0; ret = navi10_run_umc_cdr_workaround(smu); - if (ret) { + if (ret) dev_err(adev->dev, "Failed to apply umc cdr workaround!\n"); - return ret; - } - - if (!smu->dc_controlled_by_gpio) { - /* - * For Navi1X, manually switch it to AC mode as PMFW - * may boot it with DC mode. - */ - ret = smu_v11_0_set_power_source(smu, - adev->pm.ac_power ? - SMU_POWER_SOURCE_AC : - SMU_POWER_SOURCE_DC); - if (ret) { - dev_err(adev->dev, "Failed to switch to %s mode!\n", - adev->pm.ac_power ? "AC" : "DC"); - return ret; - } - } return ret; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c index 697e98a0a20ab..85d53597eb07a 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c @@ -2067,33 +2067,94 @@ static int sienna_cichlid_display_disable_memory_clock_switch(struct smu_context return ret; } +static void sienna_cichlid_get_override_pcie_settings(struct smu_context *smu, + uint32_t *gen_speed_override, + uint32_t *lane_width_override) +{ + struct amdgpu_device *adev = smu->adev; + + *gen_speed_override = 0xff; + *lane_width_override = 0xff; + + switch (adev->pdev->device) { + case 0x73A0: + case 0x73A1: + case 0x73A2: + case 0x73A3: + case 0x73AB: + case 0x73AE: + /* Bit 7:0: PCIE lane width, 1 to 7 corresponds is x1 to x32 */ + *lane_width_override = 6; + break; + case 0x73E0: + case 0x73E1: + case 0x73E3: + *lane_width_override = 4; + break; + case 0x7420: + case 0x7421: + case 0x7422: + case 0x7423: + case 0x7424: + *lane_width_override = 3; + break; + default: + break; + } +} + +#define MAX(a, b) ((a) > (b) ? (a) : (b)) + static int sienna_cichlid_update_pcie_parameters(struct smu_context *smu, uint32_t pcie_gen_cap, uint32_t pcie_width_cap) { struct smu_11_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context; - - uint32_t smu_pcie_arg; + struct smu_11_0_pcie_table *pcie_table = &dpm_context->dpm_tables.pcie_table; + uint32_t gen_speed_override, lane_width_override; uint8_t *table_member1, *table_member2; + uint32_t min_gen_speed, max_gen_speed; + uint32_t min_lane_width, max_lane_width; + uint32_t smu_pcie_arg; int ret, i; GET_PPTABLE_MEMBER(PcieGenSpeed, &table_member1); GET_PPTABLE_MEMBER(PcieLaneCount, &table_member2); - /* lclk dpm table setup */ - for (i = 0; i < MAX_PCIE_CONF; i++) { - dpm_context->dpm_tables.pcie_table.pcie_gen[i] = table_member1[i]; - dpm_context->dpm_tables.pcie_table.pcie_lane[i] = table_member2[i]; + sienna_cichlid_get_override_pcie_settings(smu, + &gen_speed_override, + &lane_width_override); + + /* PCIE gen speed override */ + if (gen_speed_override != 0xff) { + min_gen_speed = MIN(pcie_gen_cap, gen_speed_override); + max_gen_speed = MIN(pcie_gen_cap, gen_speed_override); + } else { + min_gen_speed = MAX(0, table_member1[0]); + max_gen_speed = MIN(pcie_gen_cap, table_member1[1]); + min_gen_speed = min_gen_speed > max_gen_speed ? + max_gen_speed : min_gen_speed; } + pcie_table->pcie_gen[0] = min_gen_speed; + pcie_table->pcie_gen[1] = max_gen_speed; + + /* PCIE lane width override */ + if (lane_width_override != 0xff) { + min_lane_width = MIN(pcie_width_cap, lane_width_override); + max_lane_width = MIN(pcie_width_cap, lane_width_override); + } else { + min_lane_width = MAX(1, table_member2[0]); + max_lane_width = MIN(pcie_width_cap, table_member2[1]); + min_lane_width = min_lane_width > max_lane_width ? + max_lane_width : min_lane_width; + } + pcie_table->pcie_lane[0] = min_lane_width; + pcie_table->pcie_lane[1] = max_lane_width; for (i = 0; i < NUM_LINK_LEVELS; i++) { - smu_pcie_arg = (i << 16) | - ((table_member1[i] <= pcie_gen_cap) ? - (table_member1[i] << 8) : - (pcie_gen_cap << 8)) | - ((table_member2[i] <= pcie_width_cap) ? - table_member2[i] : - pcie_width_cap); + smu_pcie_arg = (i << 16 | + pcie_table->pcie_gen[i] << 8 | + pcie_table->pcie_lane[i]); ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_OverridePcieParameters, @@ -2101,11 +2162,6 @@ static int sienna_cichlid_update_pcie_parameters(struct smu_context *smu, NULL); if (ret) return ret; - - if (table_member1[i] > pcie_gen_cap) - dpm_context->dpm_tables.pcie_table.pcie_gen[i] = pcie_gen_cap; - if (table_member2[i] > pcie_width_cap) - dpm_context->dpm_tables.pcie_table.pcie_lane[i] = pcie_width_cap; } return 0; @@ -2143,16 +2199,9 @@ static int sienna_cichlid_set_default_od_settings(struct smu_context *smu) (OverDriveTable_t *)smu->smu_table.boot_overdrive_table; OverDriveTable_t *user_od_table = (OverDriveTable_t *)smu->smu_table.user_overdrive_table; + OverDriveTable_t user_od_table_bak; int ret = 0; - /* - * For S3/S4/Runpm resume, no need to setup those overdrive tables again as - * - either they already have the default OD settings got during cold bootup - * - or they have some user customized OD settings which cannot be overwritten - */ - if (smu->adev->in_suspend) - return 0; - ret = smu_cmn_update_table(smu, SMU_TABLE_OVERDRIVE, 0, (void *)boot_od_table, false); if (ret) { @@ -2163,7 +2212,23 @@ static int sienna_cichlid_set_default_od_settings(struct smu_context *smu) sienna_cichlid_dump_od_table(smu, boot_od_table); memcpy(od_table, boot_od_table, sizeof(OverDriveTable_t)); - memcpy(user_od_table, boot_od_table, sizeof(OverDriveTable_t)); + + /* + * For S3/S4/Runpm resume, we need to setup those overdrive tables again, + * but we have to preserve user defined values in "user_od_table". + */ + if (!smu->adev->in_suspend) { + memcpy(user_od_table, boot_od_table, sizeof(OverDriveTable_t)); + smu->user_dpm_profile.user_od = false; + } else if (smu->user_dpm_profile.user_od) { + memcpy(&user_od_table_bak, user_od_table, sizeof(OverDriveTable_t)); + memcpy(user_od_table, boot_od_table, sizeof(OverDriveTable_t)); + user_od_table->GfxclkFmin = user_od_table_bak.GfxclkFmin; + user_od_table->GfxclkFmax = user_od_table_bak.GfxclkFmax; + user_od_table->UclkFmin = user_od_table_bak.UclkFmin; + user_od_table->UclkFmax = user_od_table_bak.UclkFmax; + user_od_table->VddGfxOffset = user_od_table_bak.VddGfxOffset; + } return 0; } @@ -2373,6 +2438,20 @@ static int sienna_cichlid_od_edit_dpm_table(struct smu_context *smu, return ret; } +static int sienna_cichlid_restore_user_od_settings(struct smu_context *smu) +{ + struct smu_table_context *table_context = &smu->smu_table; + OverDriveTable_t *od_table = table_context->overdrive_table; + OverDriveTable_t *user_od_table = table_context->user_overdrive_table; + int res; + + res = smu_v11_0_restore_user_od_settings(smu); + if (res == 0) + memcpy(od_table, user_od_table, sizeof(OverDriveTable_t)); + + return res; +} + static int sienna_cichlid_run_btc(struct smu_context *smu) { int res; @@ -4400,7 +4479,7 @@ static const struct pptable_funcs sienna_cichlid_ppt_funcs = { .set_soft_freq_limited_range = smu_v11_0_set_soft_freq_limited_range, .set_default_od_settings = sienna_cichlid_set_default_od_settings, .od_edit_dpm_table = sienna_cichlid_od_edit_dpm_table, - .restore_user_od_settings = smu_v11_0_restore_user_od_settings, + .restore_user_od_settings = sienna_cichlid_restore_user_od_settings, .run_btc = sienna_cichlid_run_btc, .set_power_source = smu_v11_0_set_power_source, .get_pp_feature_mask = smu_cmn_get_pp_feature_mask, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c index cb10c7e312646..067b4e0b026c0 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c @@ -203,6 +203,8 @@ static struct cmn2asic_mapping vangogh_workload_map[PP_SMC_POWER_PROFILE_COUNT] WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VR, WORKLOAD_PPLIB_VR_BIT), WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE, WORKLOAD_PPLIB_COMPUTE_BIT), WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM, WORKLOAD_PPLIB_CUSTOM_BIT), + WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CAPPED, WORKLOAD_PPLIB_CAPPED_BIT), + WORKLOAD_MAP(PP_SMC_POWER_PROFILE_UNCAPPED, WORKLOAD_PPLIB_UNCAPPED_BIT), }; static const uint8_t vangogh_throttler_map[] = { @@ -580,7 +582,7 @@ static int vangogh_print_legacy_clk_levels(struct smu_context *smu, DpmClocks_t *clk_table = smu->smu_table.clocks_table; SmuMetrics_legacy_t metrics; struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); - int i, size = 0, ret = 0; + int i, idx, size = 0, ret = 0; uint32_t cur_value = 0, value = 0, count = 0; bool cur_value_match_level = false; @@ -654,7 +656,8 @@ static int vangogh_print_legacy_clk_levels(struct smu_context *smu, case SMU_MCLK: case SMU_FCLK: for (i = 0; i < count; i++) { - ret = vangogh_get_dpm_clk_limited(smu, clk_type, i, &value); + idx = (clk_type == SMU_FCLK || clk_type == SMU_MCLK) ? (count - i - 1) : i; + ret = vangogh_get_dpm_clk_limited(smu, clk_type, idx, &value); if (ret) return ret; if (!value) @@ -681,7 +684,7 @@ static int vangogh_print_clk_levels(struct smu_context *smu, DpmClocks_t *clk_table = smu->smu_table.clocks_table; SmuMetrics_t metrics; struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); - int i, size = 0, ret = 0; + int i, idx, size = 0, ret = 0; uint32_t cur_value = 0, value = 0, count = 0; bool cur_value_match_level = false; uint32_t min, max; @@ -763,7 +766,8 @@ static int vangogh_print_clk_levels(struct smu_context *smu, case SMU_MCLK: case SMU_FCLK: for (i = 0; i < count; i++) { - ret = vangogh_get_dpm_clk_limited(smu, clk_type, i, &value); + idx = (clk_type == SMU_FCLK || clk_type == SMU_MCLK) ? (count - i - 1) : i; + ret = vangogh_get_dpm_clk_limited(smu, clk_type, idx, &value); if (ret) return ret; if (!value) @@ -1046,7 +1050,7 @@ static int vangogh_get_power_profile_mode(struct smu_context *smu, if (!buf) return -EINVAL; - for (i = 0; i <= PP_SMC_POWER_PROFILE_CUSTOM; i++) { + for (i = 0; i < PP_SMC_POWER_PROFILE_COUNT; i++) { /* * Conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT * Not all profile modes are supported on vangogh. @@ -1070,7 +1074,7 @@ static int vangogh_set_power_profile_mode(struct smu_context *smu, long *input, int workload_type, ret; uint32_t profile_mode = input[size]; - if (profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) { + if (profile_mode >= PP_SMC_POWER_PROFILE_COUNT) { dev_err(smu->adev->dev, "Invalid power profile mode %d\n", profile_mode); return -EINVAL; } @@ -1590,6 +1594,21 @@ static int vangogh_read_sensor(struct smu_context *smu, return ret; } +static int vangogh_get_apu_thermal_limit(struct smu_context *smu, uint32_t *limit) +{ + return smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_GetThermalLimit, + 0, limit); +} + +static int vangogh_set_apu_thermal_limit(struct smu_context *smu, uint32_t limit) +{ + return smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_SetReducedThermalLimit, + limit, NULL); +} + + static int vangogh_set_watermarks_table(struct smu_context *smu, struct pp_smu_wm_range_sets *clock_ranges) { @@ -2372,6 +2391,7 @@ static u32 vangogh_set_gfxoff_residency(struct smu_context *smu, bool start) * vangogh_get_gfxoff_residency * * @smu: amdgpu_device pointer + * @residency: placeholder for return value * * This function will be used to get gfxoff residency. * @@ -2390,6 +2410,7 @@ static u32 vangogh_get_gfxoff_residency(struct smu_context *smu, uint32_t *resid * vangogh_get_gfxoff_entrycount - get gfxoff entry count * * @smu: amdgpu_device pointer + * @entrycount: placeholder for return value * * This function will be used to get gfxoff entry count * @@ -2425,6 +2446,8 @@ static const struct pptable_funcs vangogh_ppt_funcs = { .dpm_set_jpeg_enable = vangogh_dpm_set_jpeg_enable, .is_dpm_running = vangogh_is_dpm_running, .read_sensor = vangogh_read_sensor, + .get_apu_thermal_limit = vangogh_get_apu_thermal_limit, + .set_apu_thermal_limit = vangogh_set_apu_thermal_limit, .get_enabled_mask = smu_cmn_get_enabled_mask, .get_pp_feature_mask = smu_cmn_get_pp_feature_mask, .set_watermarks_table = vangogh_set_watermarks_table, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c index 5cdc07165480b..8a8ba25c9ad7c 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c @@ -494,7 +494,7 @@ static int renoir_set_fine_grain_gfx_freq_parameters(struct smu_context *smu) static int renoir_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf) { - int i, size = 0, ret = 0; + int i, idx, size = 0, ret = 0; uint32_t cur_value = 0, value = 0, count = 0, min = 0, max = 0; SmuMetrics_t metrics; struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); @@ -594,7 +594,8 @@ static int renoir_print_clk_levels(struct smu_context *smu, case SMU_VCLK: case SMU_DCLK: for (i = 0; i < count; i++) { - ret = renoir_get_dpm_clk_limited(smu, clk_type, i, &value); + idx = (clk_type == SMU_FCLK || clk_type == SMU_MCLK) ? (count - i - 1) : i; + ret = renoir_get_dpm_clk_limited(smu, clk_type, idx, &value); if (ret) return ret; if (!value) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/Makefile b/drivers/gpu/drm/amd/pm/swsmu/smu13/Makefile index 9043f6ef1aee2..7f3493b6c53c3 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/Makefile +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/Makefile @@ -24,7 +24,7 @@ # It provides the smu management services for the driver. SMU13_MGR = smu_v13_0.o aldebaran_ppt.o yellow_carp_ppt.o smu_v13_0_0_ppt.o smu_v13_0_4_ppt.o \ - smu_v13_0_5_ppt.o smu_v13_0_7_ppt.o + smu_v13_0_5_ppt.o smu_v13_0_7_ppt.o smu_v13_0_6_ppt.o AMD_SWSMU_SMU13MGR = $(addprefix $(AMD_SWSMU_PATH)/smu13/,$(SMU13_MGR)) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c index a52ed0580fd7e..ca379181081c1 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c @@ -85,6 +85,9 @@ MODULE_FIRMWARE("amdgpu/smu_13_0_10.bin"); static const int link_width[] = {0, 1, 2, 4, 8, 12, 16}; static const int link_speed[] = {25, 50, 80, 160}; +const int pmfw_decoded_link_speed[5] = {1, 2, 3, 4, 5}; +const int pmfw_decoded_link_width[7] = {0, 1, 2, 4, 8, 12, 16}; + int smu_v13_0_init_microcode(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; @@ -294,6 +297,10 @@ int smu_v13_0_check_fw_version(struct smu_context *smu) case IP_VERSION(13, 0, 5): smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_SMU_V13_0_5; break; + case IP_VERSION(13, 0, 6): + smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_SMU_V13_0_6; + adev->pm.fw_version = smu_version; + break; default: dev_err(adev->dev, "smu unsupported IP version: 0x%x.\n", adev->ip_versions[MP1_HWIP][0]); @@ -566,11 +573,11 @@ int smu_v13_0_init_power(struct smu_context *smu) if (smu_power->power_context || smu_power->power_context_size != 0) return -EINVAL; - smu_power->power_context = kzalloc(sizeof(struct smu_13_0_dpm_context), + smu_power->power_context = kzalloc(sizeof(struct smu_13_0_power_context), GFP_KERNEL); if (!smu_power->power_context) return -ENOMEM; - smu_power->power_context_size = sizeof(struct smu_13_0_dpm_context); + smu_power->power_context_size = sizeof(struct smu_13_0_power_context); return 0; } @@ -1914,10 +1921,9 @@ int smu_v13_0_set_power_source(struct smu_context *smu, NULL); } -static int smu_v13_0_get_dpm_freq_by_index(struct smu_context *smu, - enum smu_clk_type clk_type, - uint16_t level, - uint32_t *value) +int smu_v13_0_get_dpm_freq_by_index(struct smu_context *smu, + enum smu_clk_type clk_type, uint16_t level, + uint32_t *value) { int ret = 0, clk_id = 0; uint32_t param; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c index 27448ffe60a43..08577d1b84eca 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c @@ -1144,8 +1144,8 @@ static int smu_v13_0_0_print_clk_levels(struct smu_context *smu, (pcie_table->pcie_lane[i] == 5) ? "x12" : (pcie_table->pcie_lane[i] == 6) ? "x16" : "", pcie_table->clk_freq[i], - ((gen_speed - 1) == pcie_table->pcie_gen[i]) && - (lane_width == link_width[pcie_table->pcie_lane[i]]) ? + (gen_speed == DECODE_GEN_SPEED(pcie_table->pcie_gen[i])) && + (lane_width == DECODE_LANE_WIDTH(link_width[pcie_table->pcie_lane[i]])) ? "*" : ""); break; @@ -1587,7 +1587,9 @@ static int smu_v13_0_0_get_power_profile_mode(struct smu_context *smu, workload_type = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_WORKLOAD, i); - if (workload_type < 0) + if (workload_type == -ENOTSUPP) + continue; + else if (workload_type < 0) return -EINVAL; result = smu_cmn_update_table(smu, @@ -1694,10 +1696,39 @@ static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu, } } - /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ - workload_type = smu_cmn_to_asic_specific_index(smu, + if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_COMPUTE && + (((smu->adev->pdev->device == 0x744C) && (smu->adev->pdev->revision == 0xC8)) || + ((smu->adev->pdev->device == 0x744C) && (smu->adev->pdev->revision == 0xCC)))) { + ret = smu_cmn_update_table(smu, + SMU_TABLE_ACTIVITY_MONITOR_COEFF, + WORKLOAD_PPLIB_COMPUTE_BIT, + (void *)(&activity_monitor_external), + false); + if (ret) { + dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__); + return ret; + } + + ret = smu_cmn_update_table(smu, + SMU_TABLE_ACTIVITY_MONITOR_COEFF, + WORKLOAD_PPLIB_CUSTOM_BIT, + (void *)(&activity_monitor_external), + true); + if (ret) { + dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__); + return ret; + } + + workload_type = smu_cmn_to_asic_specific_index(smu, + CMN2ASIC_MAPPING_WORKLOAD, + PP_SMC_POWER_PROFILE_CUSTOM); + } else { + /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ + workload_type = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_WORKLOAD, smu->power_profile_mode); + } + if (workload_type < 0) return -EINVAL; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c index 8fa9a36c38b64..6d9760eac16d8 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c @@ -478,7 +478,7 @@ static int smu_v13_0_4_get_dpm_level_count(struct smu_context *smu, static int smu_v13_0_4_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf) { - int i, size = 0, ret = 0; + int i, idx, size = 0, ret = 0; uint32_t cur_value = 0, value = 0, count = 0; uint32_t min, max; @@ -512,7 +512,8 @@ static int smu_v13_0_4_print_clk_levels(struct smu_context *smu, break; for (i = 0; i < count; i++) { - ret = smu_v13_0_4_get_dpm_freq_by_index(smu, clk_type, i, &value); + idx = (clk_type == SMU_FCLK || clk_type == SMU_MCLK) ? (count - i - 1) : i; + ret = smu_v13_0_4_get_dpm_freq_by_index(smu, clk_type, idx, &value); if (ret) break; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c index 66445964efbd1..0081fa607e02e 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c @@ -866,7 +866,7 @@ out: static int smu_v13_0_5_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf) { - int i, size = 0, ret = 0; + int i, idx, size = 0, ret = 0; uint32_t cur_value = 0, value = 0, count = 0; uint32_t min = 0, max = 0; @@ -898,7 +898,8 @@ static int smu_v13_0_5_print_clk_levels(struct smu_context *smu, goto print_clk_out; for (i = 0; i < count; i++) { - ret = smu_v13_0_5_get_dpm_freq_by_index(smu, clk_type, i, &value); + idx = (clk_type == SMU_MCLK) ? (count - i - 1) : i; + ret = smu_v13_0_5_get_dpm_freq_by_index(smu, clk_type, idx, &value); if (ret) goto print_clk_out; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c new file mode 100644 index 0000000000000..ea8f3d6fb98b3 --- /dev/null +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c @@ -0,0 +1,2069 @@ +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#define SWSMU_CODE_LAYER_L2 + +#include <linux/firmware.h> +#include "amdgpu.h" +#include "amdgpu_smu.h" +#include "atomfirmware.h" +#include "amdgpu_atomfirmware.h" +#include "amdgpu_atombios.h" +#include "smu_v13_0_6_pmfw.h" +#include "smu13_driver_if_v13_0_6.h" +#include "smu_v13_0_6_ppsmc.h" +#include "soc15_common.h" +#include "atom.h" +#include "power_state.h" +#include "smu_v13_0.h" +#include "smu_v13_0_6_ppt.h" +#include "nbio/nbio_7_4_offset.h" +#include "nbio/nbio_7_4_sh_mask.h" +#include "thm/thm_11_0_2_offset.h" +#include "thm/thm_11_0_2_sh_mask.h" +#include "amdgpu_xgmi.h" +#include <linux/pci.h> +#include "amdgpu_ras.h" +#include "smu_cmn.h" +#include "mp/mp_13_0_6_offset.h" +#include "mp/mp_13_0_6_sh_mask.h" + +#undef MP1_Public +#undef smnMP1_FIRMWARE_FLAGS + +/* TODO: Check final register offsets */ +#define MP1_Public 0x03b00000 +#define smnMP1_FIRMWARE_FLAGS 0x3010028 +/* + * DO NOT use these for err/warn/info/debug messages. + * Use dev_err, dev_warn, dev_info and dev_dbg instead. + * They are more MGPU friendly. + */ +#undef pr_err +#undef pr_warn +#undef pr_info +#undef pr_debug + +#define to_amdgpu_device(x) (container_of(x, struct amdgpu_device, pm.smu_i2c)) + +#define SMU_13_0_6_FEA_MAP(smu_feature, smu_13_0_6_feature) \ + [smu_feature] = { 1, (smu_13_0_6_feature) } + +#define FEATURE_MASK(feature) (1ULL << feature) +#define SMC_DPM_FEATURE \ + (FEATURE_MASK(FEATURE_DATA_CALCULATION) | \ + FEATURE_MASK(FEATURE_DPM_GFXCLK) | FEATURE_MASK(FEATURE_DPM_UCLK) | \ + FEATURE_MASK(FEATURE_DPM_SOCCLK) | FEATURE_MASK(FEATURE_DPM_FCLK) | \ + FEATURE_MASK(FEATURE_DPM_LCLK) | FEATURE_MASK(FEATURE_DPM_XGMI) | \ + FEATURE_MASK(FEATURE_DPM_VCN)) + +/* possible frequency drift (1Mhz) */ +#define EPSILON 1 + +#define smnPCIE_ESM_CTRL 0x111003D0 + +static const struct cmn2asic_msg_mapping smu_v13_0_6_message_map[SMU_MSG_MAX_COUNT] = { + MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 0), + MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 1), + MSG_MAP(GetDriverIfVersion, PPSMC_MSG_GetDriverIfVersion, 1), + MSG_MAP(EnableAllSmuFeatures, PPSMC_MSG_EnableAllSmuFeatures, 1), + MSG_MAP(DisableAllSmuFeatures, PPSMC_MSG_DisableAllSmuFeatures, 1), + MSG_MAP(RequestI2cTransaction, PPSMC_MSG_RequestI2cTransaction, 0), + MSG_MAP(GetMetricsTable, PPSMC_MSG_GetMetricsTable, 1), + MSG_MAP(GetEnabledSmuFeaturesHigh, PPSMC_MSG_GetEnabledSmuFeaturesHigh, 1), + MSG_MAP(GetEnabledSmuFeaturesLow, PPSMC_MSG_GetEnabledSmuFeaturesLow, 1), + MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh, 1), + MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverDramAddrLow, 1), + MSG_MAP(SetToolsDramAddrHigh, PPSMC_MSG_SetToolsDramAddrHigh, 0), + MSG_MAP(SetToolsDramAddrLow, PPSMC_MSG_SetToolsDramAddrLow, 0), + MSG_MAP(SetSoftMinByFreq, PPSMC_MSG_SetSoftMinByFreq, 0), + MSG_MAP(SetSoftMaxByFreq, PPSMC_MSG_SetSoftMaxByFreq, 0), + MSG_MAP(GetMinDpmFreq, PPSMC_MSG_GetMinDpmFreq, 0), + MSG_MAP(GetMaxDpmFreq, PPSMC_MSG_GetMaxDpmFreq, 0), + MSG_MAP(GetDpmFreqByIndex, PPSMC_MSG_GetDpmFreqByIndex, 1), + MSG_MAP(SetPptLimit, PPSMC_MSG_SetPptLimit, 0), + MSG_MAP(GetPptLimit, PPSMC_MSG_GetPptLimit, 1), + MSG_MAP(GfxDeviceDriverReset, PPSMC_MSG_GfxDriverReset, 0), + MSG_MAP(DramLogSetDramAddrHigh, PPSMC_MSG_DramLogSetDramAddrHigh, 0), + MSG_MAP(DramLogSetDramAddrLow, PPSMC_MSG_DramLogSetDramAddrLow, 0), + MSG_MAP(DramLogSetDramSize, PPSMC_MSG_DramLogSetDramSize, 0), + MSG_MAP(GetDebugData, PPSMC_MSG_GetDebugData, 0), + MSG_MAP(SetNumBadHbmPagesRetired, PPSMC_MSG_SetNumBadHbmPagesRetired, 0), + MSG_MAP(DFCstateControl, PPSMC_MSG_DFCstateControl, 0), + MSG_MAP(GetGmiPwrDnHyst, PPSMC_MSG_GetGmiPwrDnHyst, 0), + MSG_MAP(SetGmiPwrDnHyst, PPSMC_MSG_SetGmiPwrDnHyst, 0), + MSG_MAP(GmiPwrDnControl, PPSMC_MSG_GmiPwrDnControl, 0), + MSG_MAP(EnterGfxoff, PPSMC_MSG_EnterGfxoff, 0), + MSG_MAP(ExitGfxoff, PPSMC_MSG_ExitGfxoff, 0), + MSG_MAP(EnableDeterminism, PPSMC_MSG_EnableDeterminism, 0), + MSG_MAP(DisableDeterminism, PPSMC_MSG_DisableDeterminism, 0), + MSG_MAP(GfxDriverResetRecovery, PPSMC_MSG_GfxDriverResetRecovery, 0), + MSG_MAP(GetMinGfxclkFrequency, PPSMC_MSG_GetMinGfxDpmFreq, 0), + MSG_MAP(GetMaxGfxclkFrequency, PPSMC_MSG_GetMaxGfxDpmFreq, 0), + MSG_MAP(SetSoftMinGfxclk, PPSMC_MSG_SetSoftMinGfxClk, 0), + MSG_MAP(SetSoftMaxGfxClk, PPSMC_MSG_SetSoftMaxGfxClk, 0), +}; + +static const struct cmn2asic_mapping smu_v13_0_6_clk_map[SMU_CLK_COUNT] = { + CLK_MAP(SOCCLK, PPCLK_SOCCLK), + CLK_MAP(FCLK, PPCLK_FCLK), + CLK_MAP(UCLK, PPCLK_UCLK), + CLK_MAP(MCLK, PPCLK_UCLK), + CLK_MAP(DCLK, PPCLK_DCLK), + CLK_MAP(VCLK, PPCLK_VCLK), + CLK_MAP(LCLK, PPCLK_LCLK), +}; + +static const struct cmn2asic_mapping smu_v13_0_6_feature_mask_map[SMU_FEATURE_COUNT] = { + SMU_13_0_6_FEA_MAP(SMU_FEATURE_DATA_CALCULATIONS_BIT, FEATURE_DATA_CALCULATION), + SMU_13_0_6_FEA_MAP(SMU_FEATURE_DPM_GFXCLK_BIT, FEATURE_DPM_GFXCLK), + SMU_13_0_6_FEA_MAP(SMU_FEATURE_DPM_UCLK_BIT, FEATURE_DPM_UCLK), + SMU_13_0_6_FEA_MAP(SMU_FEATURE_DPM_SOCCLK_BIT, FEATURE_DPM_SOCCLK), + SMU_13_0_6_FEA_MAP(SMU_FEATURE_DPM_FCLK_BIT, FEATURE_DPM_FCLK), + SMU_13_0_6_FEA_MAP(SMU_FEATURE_DPM_LCLK_BIT, FEATURE_DPM_LCLK), + SMU_13_0_6_FEA_MAP(SMU_FEATURE_DPM_VCLK_BIT, FEATURE_DPM_VCN), + SMU_13_0_6_FEA_MAP(SMU_FEATURE_DPM_DCLK_BIT, FEATURE_DPM_VCN), + SMU_13_0_6_FEA_MAP(SMU_FEATURE_DPM_XGMI_BIT, FEATURE_DPM_XGMI), + SMU_13_0_6_FEA_MAP(SMU_FEATURE_DS_GFXCLK_BIT, FEATURE_DS_GFXCLK), + SMU_13_0_6_FEA_MAP(SMU_FEATURE_DS_SOCCLK_BIT, FEATURE_DS_SOCCLK), + SMU_13_0_6_FEA_MAP(SMU_FEATURE_DS_LCLK_BIT, FEATURE_DS_LCLK), + SMU_13_0_6_FEA_MAP(SMU_FEATURE_DS_FCLK_BIT, FEATURE_DS_FCLK), + SMU_13_0_6_FEA_MAP(SMU_FEATURE_VCN_DPM_BIT, FEATURE_DPM_VCN), + SMU_13_0_6_FEA_MAP(SMU_FEATURE_PPT_BIT, FEATURE_PPT), + SMU_13_0_6_FEA_MAP(SMU_FEATURE_TDC_BIT, FEATURE_TDC), + SMU_13_0_6_FEA_MAP(SMU_FEATURE_APCC_DFLL_BIT, FEATURE_APCC_DFLL), + SMU_13_0_6_FEA_MAP(SMU_FEATURE_MP1_CG_BIT, FEATURE_SMU_CG), + SMU_13_0_6_FEA_MAP(SMU_FEATURE_GFXOFF_BIT, FEATURE_GFXOFF), + SMU_13_0_6_FEA_MAP(SMU_FEATURE_FW_CTF_BIT, FEATURE_FW_CTF), + SMU_13_0_6_FEA_MAP(SMU_FEATURE_THERMAL_BIT, FEATURE_THERMAL), + SMU_13_0_6_FEA_MAP(SMU_FEATURE_XGMI_PER_LINK_PWR_DWN_BIT, FEATURE_XGMI_PER_LINK_PWR_DOWN), + SMU_13_0_6_FEA_MAP(SMU_FEATURE_DF_CSTATE_BIT, FEATURE_DF_CSTATE), +}; + +#define TABLE_PMSTATUSLOG 0 +#define TABLE_SMU_METRICS 1 +#define TABLE_I2C_COMMANDS 2 +#define TABLE_COUNT 3 + +static const struct cmn2asic_mapping smu_v13_0_6_table_map[SMU_TABLE_COUNT] = { + TAB_MAP(PMSTATUSLOG), + TAB_MAP(SMU_METRICS), + TAB_MAP(I2C_COMMANDS), +}; + +#define THROTTLER_PROCHOT_GFX_BIT 0 +#define THROTTLER_PPT_BIT 1 +#define THROTTLER_TEMP_SOC_BIT 2 +#define THROTTLER_TEMP_VR_GFX_BIT 3 +#define THROTTLER_TEMP_HBM_BIT 4 + +static const uint8_t smu_v13_0_6_throttler_map[] = { + [THROTTLER_PPT_BIT] = (SMU_THROTTLER_PPT0_BIT), + [THROTTLER_TEMP_SOC_BIT] = (SMU_THROTTLER_TEMP_GPU_BIT), + [THROTTLER_TEMP_HBM_BIT] = (SMU_THROTTLER_TEMP_MEM_BIT), + [THROTTLER_TEMP_VR_GFX_BIT] = (SMU_THROTTLER_TEMP_VR_GFX_BIT), + [THROTTLER_PROCHOT_GFX_BIT] = (SMU_THROTTLER_PROCHOT_GFX_BIT), +}; + +struct PPTable_t { + uint32_t MaxSocketPowerLimit; + uint32_t MaxGfxclkFrequency; + uint32_t MinGfxclkFrequency; + uint32_t FclkFrequencyTable[4]; + uint32_t UclkFrequencyTable[4]; + uint32_t SocclkFrequencyTable[4]; + uint32_t VclkFrequencyTable[4]; + uint32_t DclkFrequencyTable[4]; + uint32_t LclkFrequencyTable[4]; + uint32_t MaxLclkDpmRange; + uint32_t MinLclkDpmRange; + bool Init; +}; + +#define SMUQ10_TO_UINT(x) ((x) >> 10) + +struct smu_v13_0_6_dpm_map { + enum smu_clk_type clk_type; + uint32_t feature_num; + struct smu_13_0_dpm_table *dpm_table; + uint32_t *freq_table; +}; + +static int smu_v13_0_6_tables_init(struct smu_context *smu) +{ + struct smu_table_context *smu_table = &smu->smu_table; + struct smu_table *tables = smu_table->tables; + struct amdgpu_device *adev = smu->adev; + + if (!(adev->flags & AMD_IS_APU)) + SMU_TABLE_INIT(tables, SMU_TABLE_PMSTATUSLOG, SMU13_TOOL_SIZE, + PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); + + SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(MetricsTable_t), + PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); + + SMU_TABLE_INIT(tables, SMU_TABLE_I2C_COMMANDS, sizeof(SwI2cRequest_t), + PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); + + smu_table->metrics_table = kzalloc(sizeof(MetricsTable_t), GFP_KERNEL); + if (!smu_table->metrics_table) + return -ENOMEM; + smu_table->metrics_time = 0; + + smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_3); + smu_table->gpu_metrics_table = + kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL); + if (!smu_table->gpu_metrics_table) { + kfree(smu_table->metrics_table); + return -ENOMEM; + } + + smu_table->driver_pptable = + kzalloc(sizeof(struct PPTable_t), GFP_KERNEL); + if (!smu_table->driver_pptable) { + kfree(smu_table->metrics_table); + kfree(smu_table->gpu_metrics_table); + return -ENOMEM; + } + + return 0; +} + +static int smu_v13_0_6_allocate_dpm_context(struct smu_context *smu) +{ + struct smu_dpm_context *smu_dpm = &smu->smu_dpm; + + smu_dpm->dpm_context = + kzalloc(sizeof(struct smu_13_0_dpm_context), GFP_KERNEL); + if (!smu_dpm->dpm_context) + return -ENOMEM; + smu_dpm->dpm_context_size = sizeof(struct smu_13_0_dpm_context); + + return 0; +} + +static int smu_v13_0_6_init_smc_tables(struct smu_context *smu) +{ + int ret = 0; + + ret = smu_v13_0_6_tables_init(smu); + if (ret) + return ret; + + ret = smu_v13_0_6_allocate_dpm_context(smu); + + return ret; +} + +static int smu_v13_0_6_get_allowed_feature_mask(struct smu_context *smu, + uint32_t *feature_mask, + uint32_t num) +{ + if (num > 2) + return -EINVAL; + + /* pptable will handle the features to enable */ + memset(feature_mask, 0xFF, sizeof(uint32_t) * num); + + return 0; +} + +static int smu_v13_0_6_get_metrics_table(struct smu_context *smu, + void *metrics_table, bool bypass_cache) +{ + struct smu_table_context *smu_table = &smu->smu_table; + uint32_t table_size = smu_table->tables[SMU_TABLE_SMU_METRICS].size; + struct smu_table *table = &smu_table->driver_table; + int ret; + + if (bypass_cache || !smu_table->metrics_time || + time_after(jiffies, + smu_table->metrics_time + msecs_to_jiffies(1))) { + ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetMetricsTable, NULL); + if (ret) { + dev_info(smu->adev->dev, + "Failed to export SMU metrics table!\n"); + return ret; + } + + amdgpu_asic_invalidate_hdp(smu->adev, NULL); + memcpy(smu_table->metrics_table, table->cpu_addr, table_size); + + smu_table->metrics_time = jiffies; + } + + if (metrics_table) + memcpy(metrics_table, smu_table->metrics_table, table_size); + + return 0; +} + +static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu) +{ + struct smu_table_context *smu_table = &smu->smu_table; + MetricsTable_t *metrics = (MetricsTable_t *)smu_table->metrics_table; + struct PPTable_t *pptable = + (struct PPTable_t *)smu_table->driver_pptable; + int ret; + int i; + + /* Store one-time values in driver PPTable */ + if (!pptable->Init) { + ret = smu_v13_0_6_get_metrics_table(smu, NULL, false); + if (ret) + return ret; + + pptable->MaxSocketPowerLimit = + SMUQ10_TO_UINT(metrics->MaxSocketPowerLimit); + pptable->MaxGfxclkFrequency = + SMUQ10_TO_UINT(metrics->MaxGfxclkFrequency); + pptable->MinGfxclkFrequency = + SMUQ10_TO_UINT(metrics->MinGfxclkFrequency); + + for (i = 0; i < 4; ++i) { + pptable->FclkFrequencyTable[i] = + SMUQ10_TO_UINT(metrics->FclkFrequencyTable[i]); + pptable->UclkFrequencyTable[i] = + SMUQ10_TO_UINT(metrics->UclkFrequencyTable[i]); + pptable->SocclkFrequencyTable[i] = SMUQ10_TO_UINT( + metrics->SocclkFrequencyTable[i]); + pptable->VclkFrequencyTable[i] = + SMUQ10_TO_UINT(metrics->VclkFrequencyTable[i]); + pptable->DclkFrequencyTable[i] = + SMUQ10_TO_UINT(metrics->DclkFrequencyTable[i]); + pptable->LclkFrequencyTable[i] = + SMUQ10_TO_UINT(metrics->LclkFrequencyTable[i]); + } + + pptable->Init = true; + } + + return 0; +} + +static int smu_v13_0_6_get_dpm_ultimate_freq(struct smu_context *smu, + enum smu_clk_type clk_type, + uint32_t *min, uint32_t *max) +{ + struct smu_table_context *smu_table = &smu->smu_table; + struct PPTable_t *pptable = + (struct PPTable_t *)smu_table->driver_pptable; + uint32_t clock_limit = 0, param; + int ret = 0, clk_id = 0; + + if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type)) { + switch (clk_type) { + case SMU_MCLK: + case SMU_UCLK: + if (pptable->Init) + clock_limit = pptable->UclkFrequencyTable[0]; + break; + case SMU_GFXCLK: + case SMU_SCLK: + if (pptable->Init) + clock_limit = pptable->MinGfxclkFrequency; + break; + case SMU_SOCCLK: + if (pptable->Init) + clock_limit = pptable->UclkFrequencyTable[0]; + break; + case SMU_FCLK: + if (pptable->Init) + clock_limit = pptable->FclkFrequencyTable[0]; + break; + case SMU_VCLK: + if (pptable->Init) + clock_limit = pptable->VclkFrequencyTable[0]; + break; + case SMU_DCLK: + if (pptable->Init) + clock_limit = pptable->DclkFrequencyTable[0]; + break; + default: + break; + } + + if (min) + *min = clock_limit; + + if (max) + *max = clock_limit; + + return 0; + } + + if (!(clk_type == SMU_GFXCLK || clk_type == SMU_SCLK)) { + clk_id = smu_cmn_to_asic_specific_index( + smu, CMN2ASIC_MAPPING_CLK, clk_type); + if (clk_id < 0) { + ret = -EINVAL; + goto failed; + } + param = (clk_id & 0xffff) << 16; + } + + if (max) { + if (clk_type == SMU_GFXCLK || clk_type == SMU_SCLK) + ret = smu_cmn_send_smc_msg( + smu, SMU_MSG_GetMaxGfxclkFrequency, max); + else + ret = smu_cmn_send_smc_msg_with_param( + smu, SMU_MSG_GetMaxDpmFreq, param, max); + if (ret) + goto failed; + } + + if (min) { + if (clk_type == SMU_GFXCLK || clk_type == SMU_SCLK) + ret = smu_cmn_send_smc_msg( + smu, SMU_MSG_GetMinGfxclkFrequency, min); + else + ret = smu_cmn_send_smc_msg_with_param( + smu, SMU_MSG_GetMinDpmFreq, param, min); + } + +failed: + return ret; +} + +static int smu_v13_0_6_get_dpm_level_count(struct smu_context *smu, + enum smu_clk_type clk_type, + uint32_t *levels) +{ + int ret; + + ret = smu_v13_0_get_dpm_freq_by_index(smu, clk_type, 0xff, levels); + if (!ret) + ++(*levels); + + return ret; +} + +static int smu_v13_0_6_set_default_dpm_table(struct smu_context *smu) +{ + struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context; + struct smu_table_context *smu_table = &smu->smu_table; + struct smu_13_0_dpm_table *dpm_table = NULL; + struct PPTable_t *pptable = + (struct PPTable_t *)smu_table->driver_pptable; + uint32_t gfxclkmin, gfxclkmax, levels; + int ret = 0, i, j; + struct smu_v13_0_6_dpm_map dpm_map[] = { + { SMU_SOCCLK, SMU_FEATURE_DPM_SOCCLK_BIT, + &dpm_context->dpm_tables.soc_table, + pptable->SocclkFrequencyTable }, + { SMU_UCLK, SMU_FEATURE_DPM_UCLK_BIT, + &dpm_context->dpm_tables.uclk_table, + pptable->UclkFrequencyTable }, + { SMU_FCLK, SMU_FEATURE_DPM_FCLK_BIT, + &dpm_context->dpm_tables.fclk_table, + pptable->FclkFrequencyTable }, + { SMU_VCLK, SMU_FEATURE_DPM_VCLK_BIT, + &dpm_context->dpm_tables.vclk_table, + pptable->VclkFrequencyTable }, + { SMU_DCLK, SMU_FEATURE_DPM_DCLK_BIT, + &dpm_context->dpm_tables.dclk_table, + pptable->DclkFrequencyTable }, + }; + + smu_v13_0_6_setup_driver_pptable(smu); + + /* gfxclk dpm table setup */ + dpm_table = &dpm_context->dpm_tables.gfx_table; + if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT)) { + /* In the case of gfxclk, only fine-grained dpm is honored. + * Get min/max values from FW. + */ + ret = smu_v13_0_6_get_dpm_ultimate_freq(smu, SMU_GFXCLK, + &gfxclkmin, &gfxclkmax); + if (ret) + return ret; + + dpm_table->count = 2; + dpm_table->dpm_levels[0].value = gfxclkmin; + dpm_table->dpm_levels[0].enabled = true; + dpm_table->dpm_levels[1].value = gfxclkmax; + dpm_table->dpm_levels[1].enabled = true; + dpm_table->min = dpm_table->dpm_levels[0].value; + dpm_table->max = dpm_table->dpm_levels[1].value; + } else { + dpm_table->count = 1; + dpm_table->dpm_levels[0].value = pptable->MinGfxclkFrequency; + dpm_table->dpm_levels[0].enabled = true; + dpm_table->min = dpm_table->dpm_levels[0].value; + dpm_table->max = dpm_table->dpm_levels[0].value; + } + + for (j = 0; j < ARRAY_SIZE(dpm_map); j++) { + dpm_table = dpm_map[j].dpm_table; + levels = 1; + if (smu_cmn_feature_is_enabled(smu, dpm_map[j].feature_num)) { + ret = smu_v13_0_6_get_dpm_level_count( + smu, dpm_map[j].clk_type, &levels); + if (ret) + return ret; + } + dpm_table->count = levels; + for (i = 0; i < dpm_table->count; ++i) { + dpm_table->dpm_levels[i].value = + dpm_map[j].freq_table[i]; + dpm_table->dpm_levels[i].enabled = true; + + } + dpm_table->min = dpm_table->dpm_levels[0].value; + dpm_table->max = dpm_table->dpm_levels[levels - 1].value; + + } + + return 0; +} + +static int smu_v13_0_6_setup_pptable(struct smu_context *smu) +{ + struct smu_table_context *table_context = &smu->smu_table; + + /* TODO: PPTable is not available. + * 1) Find an alternate way to get 'PPTable values' here. + * 2) Check if there is SW CTF + */ + table_context->thermal_controller_type = 0; + + return 0; +} + +static int smu_v13_0_6_check_fw_status(struct smu_context *smu) +{ + struct amdgpu_device *adev = smu->adev; + uint32_t mp1_fw_flags; + + mp1_fw_flags = + RREG32_PCIE(MP1_Public | (smnMP1_FIRMWARE_FLAGS & 0xffffffff)); + + if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >> + MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT) + return 0; + + return -EIO; +} + +static int smu_v13_0_6_populate_umd_state_clk(struct smu_context *smu) +{ + struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context; + struct smu_13_0_dpm_table *gfx_table = + &dpm_context->dpm_tables.gfx_table; + struct smu_13_0_dpm_table *mem_table = + &dpm_context->dpm_tables.uclk_table; + struct smu_13_0_dpm_table *soc_table = + &dpm_context->dpm_tables.soc_table; + struct smu_umd_pstate_table *pstate_table = &smu->pstate_table; + + pstate_table->gfxclk_pstate.min = gfx_table->min; + pstate_table->gfxclk_pstate.peak = gfx_table->max; + pstate_table->gfxclk_pstate.curr.min = gfx_table->min; + pstate_table->gfxclk_pstate.curr.max = gfx_table->max; + + pstate_table->uclk_pstate.min = mem_table->min; + pstate_table->uclk_pstate.peak = mem_table->max; + pstate_table->uclk_pstate.curr.min = mem_table->min; + pstate_table->uclk_pstate.curr.max = mem_table->max; + + pstate_table->socclk_pstate.min = soc_table->min; + pstate_table->socclk_pstate.peak = soc_table->max; + pstate_table->socclk_pstate.curr.min = soc_table->min; + pstate_table->socclk_pstate.curr.max = soc_table->max; + + if (gfx_table->count > SMU_13_0_6_UMD_PSTATE_GFXCLK_LEVEL && + mem_table->count > SMU_13_0_6_UMD_PSTATE_MCLK_LEVEL && + soc_table->count > SMU_13_0_6_UMD_PSTATE_SOCCLK_LEVEL) { + pstate_table->gfxclk_pstate.standard = + gfx_table->dpm_levels[SMU_13_0_6_UMD_PSTATE_GFXCLK_LEVEL].value; + pstate_table->uclk_pstate.standard = + mem_table->dpm_levels[SMU_13_0_6_UMD_PSTATE_MCLK_LEVEL].value; + pstate_table->socclk_pstate.standard = + soc_table->dpm_levels[SMU_13_0_6_UMD_PSTATE_SOCCLK_LEVEL].value; + } else { + pstate_table->gfxclk_pstate.standard = + pstate_table->gfxclk_pstate.min; + pstate_table->uclk_pstate.standard = + pstate_table->uclk_pstate.min; + pstate_table->socclk_pstate.standard = + pstate_table->socclk_pstate.min; + } + + return 0; +} + +static int smu_v13_0_6_get_clk_table(struct smu_context *smu, + struct pp_clock_levels_with_latency *clocks, + struct smu_13_0_dpm_table *dpm_table) +{ + int i, count; + + count = (dpm_table->count > MAX_NUM_CLOCKS) ? MAX_NUM_CLOCKS : + dpm_table->count; + clocks->num_levels = count; + + for (i = 0; i < count; i++) { + clocks->data[i].clocks_in_khz = + dpm_table->dpm_levels[i].value * 1000; + clocks->data[i].latency_in_us = 0; + } + + return 0; +} + +static int smu_v13_0_6_freqs_in_same_level(int32_t frequency1, + int32_t frequency2) +{ + return (abs(frequency1 - frequency2) <= EPSILON); +} + +static uint32_t smu_v13_0_6_get_throttler_status(struct smu_context *smu, + MetricsTable_t *metrics) +{ + uint32_t throttler_status = 0; + + throttler_status |= metrics->ProchotResidencyAcc > 0 ? 1U << THROTTLER_PROCHOT_GFX_BIT : 0; + throttler_status |= metrics->PptResidencyAcc > 0 ? 1U << THROTTLER_PPT_BIT : 0; + throttler_status |= metrics->SocketThmResidencyAcc > 0 ? 1U << THROTTLER_TEMP_SOC_BIT : 0; + throttler_status |= metrics->VrThmResidencyAcc > 0 ? 1U << THROTTLER_TEMP_VR_GFX_BIT : 0; + throttler_status |= metrics->HbmThmResidencyAcc > 0 ? 1U << THROTTLER_TEMP_HBM_BIT : 0; + + return throttler_status; +} + +static int smu_v13_0_6_get_smu_metrics_data(struct smu_context *smu, + MetricsMember_t member, + uint32_t *value) +{ + struct smu_table_context *smu_table = &smu->smu_table; + MetricsTable_t *metrics = (MetricsTable_t *)smu_table->metrics_table; + int ret = 0; + + ret = smu_v13_0_6_get_metrics_table(smu, NULL, false); + if (ret) + return ret; + + /* For clocks with multiple instances, only report the first one */ + switch (member) { + case METRICS_CURR_GFXCLK: + case METRICS_AVERAGE_GFXCLK: + *value = 0; + break; + case METRICS_CURR_SOCCLK: + case METRICS_AVERAGE_SOCCLK: + *value = SMUQ10_TO_UINT(metrics->SocclkFrequency[0]); + break; + case METRICS_CURR_UCLK: + case METRICS_AVERAGE_UCLK: + *value = SMUQ10_TO_UINT(metrics->UclkFrequency); + break; + case METRICS_CURR_VCLK: + *value = SMUQ10_TO_UINT(metrics->VclkFrequency[0]); + break; + case METRICS_CURR_DCLK: + *value = SMUQ10_TO_UINT(metrics->DclkFrequency[0]); + break; + case METRICS_CURR_FCLK: + *value = SMUQ10_TO_UINT(metrics->FclkFrequency); + break; + case METRICS_AVERAGE_GFXACTIVITY: + *value = SMUQ10_TO_UINT(metrics->SocketGfxBusy); + break; + case METRICS_AVERAGE_MEMACTIVITY: + *value = SMUQ10_TO_UINT(metrics->DramBandwidthUtilization); + break; + case METRICS_AVERAGE_SOCKETPOWER: + *value = SMUQ10_TO_UINT(metrics->SocketPower) << 8; + break; + case METRICS_TEMPERATURE_HOTSPOT: + *value = SMUQ10_TO_UINT(metrics->MaxSocketTemperature); + break; + case METRICS_TEMPERATURE_MEM: + *value = SMUQ10_TO_UINT(metrics->MaxHbmTemperature); + break; + /* This is the max of all VRs and not just SOC VR. + * No need to define another data type for the same. + */ + case METRICS_TEMPERATURE_VRSOC: + *value = SMUQ10_TO_UINT(metrics->MaxVrTemperature); + break; + case METRICS_THROTTLER_STATUS: + *value = smu_v13_0_6_get_throttler_status(smu, metrics); + break; + default: + *value = UINT_MAX; + break; + } + + return ret; +} + +static int smu_v13_0_6_get_current_clk_freq_by_table(struct smu_context *smu, + enum smu_clk_type clk_type, + uint32_t *value) +{ + MetricsMember_t member_type; + + if (!value) + return -EINVAL; + + switch (clk_type) { + case SMU_GFXCLK: + member_type = METRICS_CURR_GFXCLK; + break; + case SMU_UCLK: + member_type = METRICS_CURR_UCLK; + break; + case SMU_SOCCLK: + member_type = METRICS_CURR_SOCCLK; + break; + case SMU_VCLK: + member_type = METRICS_CURR_VCLK; + break; + case SMU_DCLK: + member_type = METRICS_CURR_DCLK; + break; + case SMU_FCLK: + member_type = METRICS_CURR_FCLK; + break; + default: + return -EINVAL; + } + + return smu_v13_0_6_get_smu_metrics_data(smu, member_type, value); +} + +static int smu_v13_0_6_print_clk_levels(struct smu_context *smu, + enum smu_clk_type type, char *buf) +{ + int i, now, size = 0; + int ret = 0; + struct smu_umd_pstate_table *pstate_table = &smu->pstate_table; + struct pp_clock_levels_with_latency clocks; + struct smu_13_0_dpm_table *single_dpm_table; + struct smu_dpm_context *smu_dpm = &smu->smu_dpm; + struct smu_13_0_dpm_context *dpm_context = NULL; + uint32_t display_levels; + uint32_t freq_values[3] = { 0 }; + uint32_t min_clk, max_clk; + + smu_cmn_get_sysfs_buf(&buf, &size); + + if (amdgpu_ras_intr_triggered()) { + size += sysfs_emit_at(buf, size, "unavailable\n"); + return size; + } + + dpm_context = smu_dpm->dpm_context; + + switch (type) { + case SMU_OD_SCLK: + size += sysfs_emit_at(buf, size, "%s:\n", "GFXCLK"); + fallthrough; + case SMU_SCLK: + ret = smu_v13_0_6_get_current_clk_freq_by_table(smu, SMU_GFXCLK, + &now); + if (ret) { + dev_err(smu->adev->dev, + "Attempt to get current gfx clk Failed!"); + return ret; + } + + single_dpm_table = &(dpm_context->dpm_tables.gfx_table); + ret = smu_v13_0_6_get_clk_table(smu, &clocks, single_dpm_table); + if (ret) { + dev_err(smu->adev->dev, + "Attempt to get gfx clk levels Failed!"); + return ret; + } + + display_levels = clocks.num_levels; + + min_clk = pstate_table->gfxclk_pstate.curr.min; + max_clk = pstate_table->gfxclk_pstate.curr.max; + + freq_values[0] = min_clk; + freq_values[1] = max_clk; + + /* fine-grained dpm has only 2 levels */ + if (now > min_clk && now < max_clk) { + display_levels = clocks.num_levels + 1; + freq_values[2] = max_clk; + freq_values[1] = now; + } + + /* + * For DPM disabled case, there will be only one clock level. + * And it's safe to assume that is always the current clock. + */ + if (display_levels == clocks.num_levels) { + for (i = 0; i < clocks.num_levels; i++) + size += sysfs_emit_at( + buf, size, "%d: %uMhz %s\n", i, + freq_values[i], + (clocks.num_levels == 1) ? + "*" : + (smu_v13_0_6_freqs_in_same_level( + freq_values[i], now) ? + "*" : + "")); + } else { + for (i = 0; i < display_levels; i++) + size += sysfs_emit_at(buf, size, + "%d: %uMhz %s\n", i, + freq_values[i], + i == 1 ? "*" : ""); + } + + break; + + case SMU_OD_MCLK: + size += sysfs_emit_at(buf, size, "%s:\n", "MCLK"); + fallthrough; + case SMU_MCLK: + ret = smu_v13_0_6_get_current_clk_freq_by_table(smu, SMU_UCLK, + &now); + if (ret) { + dev_err(smu->adev->dev, + "Attempt to get current mclk Failed!"); + return ret; + } + + single_dpm_table = &(dpm_context->dpm_tables.uclk_table); + ret = smu_v13_0_6_get_clk_table(smu, &clocks, single_dpm_table); + if (ret) { + dev_err(smu->adev->dev, + "Attempt to get memory clk levels Failed!"); + return ret; + } + + for (i = 0; i < clocks.num_levels; i++) + size += sysfs_emit_at( + buf, size, "%d: %uMhz %s\n", i, + clocks.data[i].clocks_in_khz / 1000, + (clocks.num_levels == 1) ? + "*" : + (smu_v13_0_6_freqs_in_same_level( + clocks.data[i].clocks_in_khz / + 1000, + now) ? + "*" : + "")); + break; + + case SMU_SOCCLK: + ret = smu_v13_0_6_get_current_clk_freq_by_table(smu, SMU_SOCCLK, + &now); + if (ret) { + dev_err(smu->adev->dev, + "Attempt to get current socclk Failed!"); + return ret; + } + + single_dpm_table = &(dpm_context->dpm_tables.soc_table); + ret = smu_v13_0_6_get_clk_table(smu, &clocks, single_dpm_table); + if (ret) { + dev_err(smu->adev->dev, + "Attempt to get socclk levels Failed!"); + return ret; + } + + for (i = 0; i < clocks.num_levels; i++) + size += sysfs_emit_at( + buf, size, "%d: %uMhz %s\n", i, + clocks.data[i].clocks_in_khz / 1000, + (clocks.num_levels == 1) ? + "*" : + (smu_v13_0_6_freqs_in_same_level( + clocks.data[i].clocks_in_khz / + 1000, + now) ? + "*" : + "")); + break; + + case SMU_FCLK: + ret = smu_v13_0_6_get_current_clk_freq_by_table(smu, SMU_FCLK, + &now); + if (ret) { + dev_err(smu->adev->dev, + "Attempt to get current fclk Failed!"); + return ret; + } + + single_dpm_table = &(dpm_context->dpm_tables.fclk_table); + ret = smu_v13_0_6_get_clk_table(smu, &clocks, single_dpm_table); + if (ret) { + dev_err(smu->adev->dev, + "Attempt to get fclk levels Failed!"); + return ret; + } + + for (i = 0; i < single_dpm_table->count; i++) + size += sysfs_emit_at( + buf, size, "%d: %uMhz %s\n", i, + single_dpm_table->dpm_levels[i].value, + (clocks.num_levels == 1) ? + "*" : + (smu_v13_0_6_freqs_in_same_level( + clocks.data[i].clocks_in_khz / + 1000, + now) ? + "*" : + "")); + break; + + case SMU_VCLK: + ret = smu_v13_0_6_get_current_clk_freq_by_table(smu, SMU_VCLK, + &now); + if (ret) { + dev_err(smu->adev->dev, + "Attempt to get current vclk Failed!"); + return ret; + } + + single_dpm_table = &(dpm_context->dpm_tables.vclk_table); + ret = smu_v13_0_6_get_clk_table(smu, &clocks, single_dpm_table); + if (ret) { + dev_err(smu->adev->dev, + "Attempt to get vclk levels Failed!"); + return ret; + } + + for (i = 0; i < single_dpm_table->count; i++) + size += sysfs_emit_at( + buf, size, "%d: %uMhz %s\n", i, + single_dpm_table->dpm_levels[i].value, + (clocks.num_levels == 1) ? + "*" : + (smu_v13_0_6_freqs_in_same_level( + clocks.data[i].clocks_in_khz / + 1000, + now) ? + "*" : + "")); + break; + + case SMU_DCLK: + ret = smu_v13_0_6_get_current_clk_freq_by_table(smu, SMU_DCLK, + &now); + if (ret) { + dev_err(smu->adev->dev, + "Attempt to get current dclk Failed!"); + return ret; + } + + single_dpm_table = &(dpm_context->dpm_tables.dclk_table); + ret = smu_v13_0_6_get_clk_table(smu, &clocks, single_dpm_table); + if (ret) { + dev_err(smu->adev->dev, + "Attempt to get dclk levels Failed!"); + return ret; + } + + for (i = 0; i < single_dpm_table->count; i++) + size += sysfs_emit_at( + buf, size, "%d: %uMhz %s\n", i, + single_dpm_table->dpm_levels[i].value, + (clocks.num_levels == 1) ? + "*" : + (smu_v13_0_6_freqs_in_same_level( + clocks.data[i].clocks_in_khz / + 1000, + now) ? + "*" : + "")); + break; + + default: + break; + } + + return size; +} + +static int smu_v13_0_6_upload_dpm_level(struct smu_context *smu, bool max, + uint32_t feature_mask, uint32_t level) +{ + struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context; + uint32_t freq; + int ret = 0; + + if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT) && + (feature_mask & FEATURE_MASK(FEATURE_DPM_GFXCLK))) { + freq = dpm_context->dpm_tables.gfx_table.dpm_levels[level].value; + ret = smu_cmn_send_smc_msg_with_param( + smu, + (max ? SMU_MSG_SetSoftMaxGfxClk : + SMU_MSG_SetSoftMinGfxclk), + freq & 0xffff, NULL); + if (ret) { + dev_err(smu->adev->dev, + "Failed to set soft %s gfxclk !\n", + max ? "max" : "min"); + return ret; + } + } + + if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT) && + (feature_mask & FEATURE_MASK(FEATURE_DPM_UCLK))) { + freq = dpm_context->dpm_tables.uclk_table.dpm_levels[level] + .value; + ret = smu_cmn_send_smc_msg_with_param( + smu, + (max ? SMU_MSG_SetSoftMaxByFreq : + SMU_MSG_SetSoftMinByFreq), + (PPCLK_UCLK << 16) | (freq & 0xffff), NULL); + if (ret) { + dev_err(smu->adev->dev, + "Failed to set soft %s memclk !\n", + max ? "max" : "min"); + return ret; + } + } + + if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT) && + (feature_mask & FEATURE_MASK(FEATURE_DPM_SOCCLK))) { + freq = dpm_context->dpm_tables.soc_table.dpm_levels[level].value; + ret = smu_cmn_send_smc_msg_with_param( + smu, + (max ? SMU_MSG_SetSoftMaxByFreq : + SMU_MSG_SetSoftMinByFreq), + (PPCLK_SOCCLK << 16) | (freq & 0xffff), NULL); + if (ret) { + dev_err(smu->adev->dev, + "Failed to set soft %s socclk !\n", + max ? "max" : "min"); + return ret; + } + } + + return ret; +} + +static int smu_v13_0_6_force_clk_levels(struct smu_context *smu, + enum smu_clk_type type, uint32_t mask) +{ + struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context; + struct smu_13_0_dpm_table *single_dpm_table = NULL; + uint32_t soft_min_level, soft_max_level; + int ret = 0; + + soft_min_level = mask ? (ffs(mask) - 1) : 0; + soft_max_level = mask ? (fls(mask) - 1) : 0; + + switch (type) { + case SMU_SCLK: + single_dpm_table = &(dpm_context->dpm_tables.gfx_table); + if (soft_max_level >= single_dpm_table->count) { + dev_err(smu->adev->dev, + "Clock level specified %d is over max allowed %d\n", + soft_max_level, single_dpm_table->count - 1); + ret = -EINVAL; + break; + } + + ret = smu_v13_0_6_upload_dpm_level( + smu, false, FEATURE_MASK(FEATURE_DPM_GFXCLK), + soft_min_level); + if (ret) { + dev_err(smu->adev->dev, + "Failed to upload boot level to lowest!\n"); + break; + } + + ret = smu_v13_0_6_upload_dpm_level( + smu, true, FEATURE_MASK(FEATURE_DPM_GFXCLK), + soft_max_level); + if (ret) + dev_err(smu->adev->dev, + "Failed to upload dpm max level to highest!\n"); + + break; + + case SMU_MCLK: + case SMU_SOCCLK: + case SMU_FCLK: + /* + * Should not arrive here since smu_13_0_6 does not + * support mclk/socclk/fclk softmin/softmax settings + */ + ret = -EINVAL; + break; + + default: + break; + } + + return ret; +} + +static int smu_v13_0_6_get_current_activity_percent(struct smu_context *smu, + enum amd_pp_sensors sensor, + uint32_t *value) +{ + int ret = 0; + + if (!value) + return -EINVAL; + + switch (sensor) { + case AMDGPU_PP_SENSOR_GPU_LOAD: + ret = smu_v13_0_6_get_smu_metrics_data( + smu, METRICS_AVERAGE_GFXACTIVITY, value); + break; + case AMDGPU_PP_SENSOR_MEM_LOAD: + ret = smu_v13_0_6_get_smu_metrics_data( + smu, METRICS_AVERAGE_MEMACTIVITY, value); + break; + default: + dev_err(smu->adev->dev, + "Invalid sensor for retrieving clock activity\n"); + return -EINVAL; + } + + return ret; +} + +static int smu_v13_0_6_get_gpu_power(struct smu_context *smu, uint32_t *value) +{ + if (!value) + return -EINVAL; + + return smu_v13_0_6_get_smu_metrics_data(smu, METRICS_AVERAGE_SOCKETPOWER, + value); +} + +static int smu_v13_0_6_thermal_get_temperature(struct smu_context *smu, + enum amd_pp_sensors sensor, + uint32_t *value) +{ + int ret = 0; + + if (!value) + return -EINVAL; + + switch (sensor) { + case AMDGPU_PP_SENSOR_HOTSPOT_TEMP: + ret = smu_v13_0_6_get_smu_metrics_data( + smu, METRICS_TEMPERATURE_HOTSPOT, value); + break; + case AMDGPU_PP_SENSOR_MEM_TEMP: + ret = smu_v13_0_6_get_smu_metrics_data( + smu, METRICS_TEMPERATURE_MEM, value); + break; + default: + dev_err(smu->adev->dev, "Invalid sensor for retrieving temp\n"); + return -EINVAL; + } + + return ret; +} + +static int smu_v13_0_6_read_sensor(struct smu_context *smu, + enum amd_pp_sensors sensor, void *data, + uint32_t *size) +{ + int ret = 0; + + if (amdgpu_ras_intr_triggered()) + return 0; + + if (!data || !size) + return -EINVAL; + + switch (sensor) { + case AMDGPU_PP_SENSOR_MEM_LOAD: + case AMDGPU_PP_SENSOR_GPU_LOAD: + ret = smu_v13_0_6_get_current_activity_percent(smu, sensor, + (uint32_t *)data); + *size = 4; + break; + case AMDGPU_PP_SENSOR_GPU_POWER: + ret = smu_v13_0_6_get_gpu_power(smu, (uint32_t *)data); + *size = 4; + break; + case AMDGPU_PP_SENSOR_HOTSPOT_TEMP: + case AMDGPU_PP_SENSOR_MEM_TEMP: + ret = smu_v13_0_6_thermal_get_temperature(smu, sensor, + (uint32_t *)data); + *size = 4; + break; + case AMDGPU_PP_SENSOR_GFX_MCLK: + ret = smu_v13_0_6_get_current_clk_freq_by_table( + smu, SMU_UCLK, (uint32_t *)data); + /* the output clock frequency in 10K unit */ + *(uint32_t *)data *= 100; + *size = 4; + break; + case AMDGPU_PP_SENSOR_GFX_SCLK: + ret = smu_v13_0_6_get_current_clk_freq_by_table( + smu, SMU_GFXCLK, (uint32_t *)data); + *(uint32_t *)data *= 100; + *size = 4; + break; + case AMDGPU_PP_SENSOR_VDDGFX: + ret = smu_v13_0_get_gfx_vdd(smu, (uint32_t *)data); + *size = 4; + break; + default: + ret = -EOPNOTSUPP; + break; + } + + return ret; +} + +static int smu_v13_0_6_get_power_limit(struct smu_context *smu, + uint32_t *current_power_limit, + uint32_t *default_power_limit, + uint32_t *max_power_limit) +{ + struct smu_table_context *smu_table = &smu->smu_table; + struct PPTable_t *pptable = + (struct PPTable_t *)smu_table->driver_pptable; + uint32_t power_limit = 0; + int ret; + + if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) { + if (current_power_limit) + *current_power_limit = 0; + if (default_power_limit) + *default_power_limit = 0; + if (max_power_limit) + *max_power_limit = 0; + + dev_warn( + smu->adev->dev, + "PPT feature is not enabled, power values can't be fetched."); + + return 0; + } + + ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetPptLimit, &power_limit); + + if (ret) { + dev_err(smu->adev->dev, "Couldn't get PPT limit"); + return -EINVAL; + } + + if (current_power_limit) + *current_power_limit = power_limit; + if (default_power_limit) + *default_power_limit = power_limit; + + if (max_power_limit) { + *max_power_limit = pptable->MaxSocketPowerLimit; + } + + return 0; +} + +static int smu_v13_0_6_set_power_limit(struct smu_context *smu, + enum smu_ppt_limit_type limit_type, + uint32_t limit) +{ + return smu_v13_0_set_power_limit(smu, limit_type, limit); +} + +static int smu_v13_0_6_system_features_control(struct smu_context *smu, + bool enable) +{ + int ret; + + /* Nothing to be done for APU */ + if (smu->adev->flags & AMD_IS_APU) + return 0; + + ret = smu_v13_0_system_features_control(smu, enable); + + return ret; +} + +static int smu_v13_0_6_set_gfx_soft_freq_limited_range(struct smu_context *smu, + uint32_t min, + uint32_t max) +{ + int ret; + + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk, + max & 0xffff, NULL); + if (ret) + return ret; + + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinGfxclk, + min & 0xffff, NULL); + + return ret; +} + +static int smu_v13_0_6_set_performance_level(struct smu_context *smu, + enum amd_dpm_forced_level level) +{ + struct smu_dpm_context *smu_dpm = &(smu->smu_dpm); + struct smu_13_0_dpm_context *dpm_context = smu_dpm->dpm_context; + struct smu_13_0_dpm_table *gfx_table = + &dpm_context->dpm_tables.gfx_table; + struct smu_umd_pstate_table *pstate_table = &smu->pstate_table; + int ret; + + /* Disable determinism if switching to another mode */ + if ((smu_dpm->dpm_level == AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) && + (level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM)) { + smu_cmn_send_smc_msg(smu, SMU_MSG_DisableDeterminism, NULL); + pstate_table->gfxclk_pstate.curr.max = gfx_table->max; + } + + switch (level) { + case AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM: + return 0; + + case AMD_DPM_FORCED_LEVEL_AUTO: + if ((gfx_table->min == pstate_table->gfxclk_pstate.curr.min) && + (gfx_table->max == pstate_table->gfxclk_pstate.curr.max)) + return 0; + + ret = smu_v13_0_6_set_gfx_soft_freq_limited_range( + smu, gfx_table->min, gfx_table->max); + if (ret) + return ret; + + pstate_table->gfxclk_pstate.curr.min = gfx_table->min; + pstate_table->gfxclk_pstate.curr.max = gfx_table->max; + return 0; + case AMD_DPM_FORCED_LEVEL_MANUAL: + return 0; + default: + break; + } + + return -EINVAL; +} + +static int smu_v13_0_6_set_soft_freq_limited_range(struct smu_context *smu, + enum smu_clk_type clk_type, + uint32_t min, uint32_t max) +{ + struct smu_dpm_context *smu_dpm = &(smu->smu_dpm); + struct smu_13_0_dpm_context *dpm_context = smu_dpm->dpm_context; + struct smu_umd_pstate_table *pstate_table = &smu->pstate_table; + struct amdgpu_device *adev = smu->adev; + uint32_t min_clk; + uint32_t max_clk; + int ret = 0; + + if (clk_type != SMU_GFXCLK && clk_type != SMU_SCLK) + return -EINVAL; + + if ((smu_dpm->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) && + (smu_dpm->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM)) + return -EINVAL; + + if (smu_dpm->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { + if (min >= max) { + dev_err(smu->adev->dev, + "Minimum GFX clk should be less than the maximum allowed clock\n"); + return -EINVAL; + } + + if ((min == pstate_table->gfxclk_pstate.curr.min) && + (max == pstate_table->gfxclk_pstate.curr.max)) + return 0; + + ret = smu_v13_0_6_set_gfx_soft_freq_limited_range(smu, min, max); + if (!ret) { + pstate_table->gfxclk_pstate.curr.min = min; + pstate_table->gfxclk_pstate.curr.max = max; + } + + return ret; + } + + if (smu_dpm->dpm_level == AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) { + if (!max || (max < dpm_context->dpm_tables.gfx_table.min) || + (max > dpm_context->dpm_tables.gfx_table.max)) { + dev_warn( + adev->dev, + "Invalid max frequency %d MHz specified for determinism\n", + max); + return -EINVAL; + } + + /* Restore default min/max clocks and enable determinism */ + min_clk = dpm_context->dpm_tables.gfx_table.min; + max_clk = dpm_context->dpm_tables.gfx_table.max; + ret = smu_v13_0_6_set_gfx_soft_freq_limited_range(smu, min_clk, + max_clk); + if (!ret) { + usleep_range(500, 1000); + ret = smu_cmn_send_smc_msg_with_param( + smu, SMU_MSG_EnableDeterminism, max, NULL); + if (ret) { + dev_err(adev->dev, + "Failed to enable determinism at GFX clock %d MHz\n", + max); + } else { + pstate_table->gfxclk_pstate.curr.min = min_clk; + pstate_table->gfxclk_pstate.curr.max = max; + } + } + } + + return ret; +} + +static int smu_v13_0_6_usr_edit_dpm_table(struct smu_context *smu, + enum PP_OD_DPM_TABLE_COMMAND type, + long input[], uint32_t size) +{ + struct smu_dpm_context *smu_dpm = &(smu->smu_dpm); + struct smu_13_0_dpm_context *dpm_context = smu_dpm->dpm_context; + struct smu_umd_pstate_table *pstate_table = &smu->pstate_table; + uint32_t min_clk; + uint32_t max_clk; + int ret = 0; + + /* Only allowed in manual or determinism mode */ + if ((smu_dpm->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) && + (smu_dpm->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM)) + return -EINVAL; + + switch (type) { + case PP_OD_EDIT_SCLK_VDDC_TABLE: + if (size != 2) { + dev_err(smu->adev->dev, + "Input parameter number not correct\n"); + return -EINVAL; + } + + if (input[0] == 0) { + if (input[1] < dpm_context->dpm_tables.gfx_table.min) { + dev_warn( + smu->adev->dev, + "Minimum GFX clk (%ld) MHz specified is less than the minimum allowed (%d) MHz\n", + input[1], + dpm_context->dpm_tables.gfx_table.min); + pstate_table->gfxclk_pstate.custom.min = + pstate_table->gfxclk_pstate.curr.min; + return -EINVAL; + } + + pstate_table->gfxclk_pstate.custom.min = input[1]; + } else if (input[0] == 1) { + if (input[1] > dpm_context->dpm_tables.gfx_table.max) { + dev_warn( + smu->adev->dev, + "Maximum GFX clk (%ld) MHz specified is greater than the maximum allowed (%d) MHz\n", + input[1], + dpm_context->dpm_tables.gfx_table.max); + pstate_table->gfxclk_pstate.custom.max = + pstate_table->gfxclk_pstate.curr.max; + return -EINVAL; + } + + pstate_table->gfxclk_pstate.custom.max = input[1]; + } else { + return -EINVAL; + } + break; + case PP_OD_RESTORE_DEFAULT_TABLE: + if (size != 0) { + dev_err(smu->adev->dev, + "Input parameter number not correct\n"); + return -EINVAL; + } else { + /* Use the default frequencies for manual and determinism mode */ + min_clk = dpm_context->dpm_tables.gfx_table.min; + max_clk = dpm_context->dpm_tables.gfx_table.max; + + return smu_v13_0_6_set_soft_freq_limited_range( + smu, SMU_GFXCLK, min_clk, max_clk); + } + break; + case PP_OD_COMMIT_DPM_TABLE: + if (size != 0) { + dev_err(smu->adev->dev, + "Input parameter number not correct\n"); + return -EINVAL; + } else { + if (!pstate_table->gfxclk_pstate.custom.min) + pstate_table->gfxclk_pstate.custom.min = + pstate_table->gfxclk_pstate.curr.min; + + if (!pstate_table->gfxclk_pstate.custom.max) + pstate_table->gfxclk_pstate.custom.max = + pstate_table->gfxclk_pstate.curr.max; + + min_clk = pstate_table->gfxclk_pstate.custom.min; + max_clk = pstate_table->gfxclk_pstate.custom.max; + + return smu_v13_0_6_set_soft_freq_limited_range( + smu, SMU_GFXCLK, min_clk, max_clk); + } + break; + default: + return -ENOSYS; + } + + return ret; +} + +static int smu_v13_0_6_get_enabled_mask(struct smu_context *smu, + uint64_t *feature_mask) +{ + uint32_t smu_version; + int ret; + + smu_cmn_get_smc_version(smu, NULL, &smu_version); + ret = smu_cmn_get_enabled_mask(smu, feature_mask); + + if (ret == -EIO && smu_version < 0x552F00) { + *feature_mask = 0; + ret = 0; + } + + return ret; +} + +static bool smu_v13_0_6_is_dpm_running(struct smu_context *smu) +{ + int ret; + uint64_t feature_enabled; + + ret = smu_v13_0_6_get_enabled_mask(smu, &feature_enabled); + + if (ret) + return false; + + return !!(feature_enabled & SMC_DPM_FEATURE); +} + +static int smu_v13_0_6_request_i2c_xfer(struct smu_context *smu, + void *table_data) +{ + struct smu_table_context *smu_table = &smu->smu_table; + struct smu_table *table = &smu_table->driver_table; + struct amdgpu_device *adev = smu->adev; + uint32_t table_size; + int ret = 0; + + if (!table_data) + return -EINVAL; + + table_size = smu_table->tables[SMU_TABLE_I2C_COMMANDS].size; + + memcpy(table->cpu_addr, table_data, table_size); + /* Flush hdp cache */ + amdgpu_asic_flush_hdp(adev, NULL); + ret = smu_cmn_send_smc_msg(smu, SMU_MSG_RequestI2cTransaction, + NULL); + + return ret; +} + +static int smu_v13_0_6_i2c_xfer(struct i2c_adapter *i2c_adap, + struct i2c_msg *msg, int num_msgs) +{ + struct amdgpu_smu_i2c_bus *smu_i2c = i2c_get_adapdata(i2c_adap); + struct amdgpu_device *adev = smu_i2c->adev; + struct smu_context *smu = adev->powerplay.pp_handle; + struct smu_table_context *smu_table = &smu->smu_table; + struct smu_table *table = &smu_table->driver_table; + SwI2cRequest_t *req, *res = (SwI2cRequest_t *)table->cpu_addr; + int i, j, r, c; + u16 dir; + + if (!adev->pm.dpm_enabled) + return -EBUSY; + + req = kzalloc(sizeof(*req), GFP_KERNEL); + if (!req) + return -ENOMEM; + + req->I2CcontrollerPort = smu_i2c->port; + req->I2CSpeed = I2C_SPEED_FAST_400K; + req->SlaveAddress = msg[0].addr << 1; /* wants an 8-bit address */ + dir = msg[0].flags & I2C_M_RD; + + for (c = i = 0; i < num_msgs; i++) { + for (j = 0; j < msg[i].len; j++, c++) { + SwI2cCmd_t *cmd = &req->SwI2cCmds[c]; + + if (!(msg[i].flags & I2C_M_RD)) { + /* write */ + cmd->CmdConfig |= CMDCONFIG_READWRITE_MASK; + cmd->ReadWriteData = msg[i].buf[j]; + } + + if ((dir ^ msg[i].flags) & I2C_M_RD) { + /* The direction changes. + */ + dir = msg[i].flags & I2C_M_RD; + cmd->CmdConfig |= CMDCONFIG_RESTART_MASK; + } + + req->NumCmds++; + + /* + * Insert STOP if we are at the last byte of either last + * message for the transaction or the client explicitly + * requires a STOP at this particular message. + */ + if ((j == msg[i].len - 1) && + ((i == num_msgs - 1) || (msg[i].flags & I2C_M_STOP))) { + cmd->CmdConfig &= ~CMDCONFIG_RESTART_MASK; + cmd->CmdConfig |= CMDCONFIG_STOP_MASK; + } + } + } + mutex_lock(&adev->pm.mutex); + r = smu_v13_0_6_request_i2c_xfer(smu, req); + mutex_unlock(&adev->pm.mutex); + if (r) + goto fail; + + for (c = i = 0; i < num_msgs; i++) { + if (!(msg[i].flags & I2C_M_RD)) { + c += msg[i].len; + continue; + } + for (j = 0; j < msg[i].len; j++, c++) { + SwI2cCmd_t *cmd = &res->SwI2cCmds[c]; + + msg[i].buf[j] = cmd->ReadWriteData; + } + } + r = num_msgs; +fail: + kfree(req); + return r; +} + +static u32 smu_v13_0_6_i2c_func(struct i2c_adapter *adap) +{ + return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; +} + +static const struct i2c_algorithm smu_v13_0_6_i2c_algo = { + .master_xfer = smu_v13_0_6_i2c_xfer, + .functionality = smu_v13_0_6_i2c_func, +}; + +static const struct i2c_adapter_quirks smu_v13_0_6_i2c_control_quirks = { + .flags = I2C_AQ_COMB | I2C_AQ_COMB_SAME_ADDR | I2C_AQ_NO_ZERO_LEN, + .max_read_len = MAX_SW_I2C_COMMANDS, + .max_write_len = MAX_SW_I2C_COMMANDS, + .max_comb_1st_msg_len = 2, + .max_comb_2nd_msg_len = MAX_SW_I2C_COMMANDS - 2, +}; + +static int smu_v13_0_6_i2c_control_init(struct smu_context *smu) +{ + struct amdgpu_device *adev = smu->adev; + int res, i; + + for (i = 0; i < MAX_SMU_I2C_BUSES; i++) { + struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i]; + struct i2c_adapter *control = &smu_i2c->adapter; + + smu_i2c->adev = adev; + smu_i2c->port = i; + mutex_init(&smu_i2c->mutex); + control->owner = THIS_MODULE; + control->class = I2C_CLASS_SPD; + control->dev.parent = &adev->pdev->dev; + control->algo = &smu_v13_0_6_i2c_algo; + snprintf(control->name, sizeof(control->name), "AMDGPU SMU %d", i); + control->quirks = &smu_v13_0_6_i2c_control_quirks; + i2c_set_adapdata(control, smu_i2c); + + res = i2c_add_adapter(control); + if (res) { + DRM_ERROR("Failed to register hw i2c, err: %d\n", res); + goto Out_err; + } + } + + adev->pm.ras_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter; + adev->pm.fru_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter; + + return 0; +Out_err: + for ( ; i >= 0; i--) { + struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i]; + struct i2c_adapter *control = &smu_i2c->adapter; + + i2c_del_adapter(control); + } + return res; +} + +static void smu_v13_0_6_i2c_control_fini(struct smu_context *smu) +{ + struct amdgpu_device *adev = smu->adev; + int i; + + for (i = 0; i < MAX_SMU_I2C_BUSES; i++) { + struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i]; + struct i2c_adapter *control = &smu_i2c->adapter; + + i2c_del_adapter(control); + } + adev->pm.ras_eeprom_i2c_bus = NULL; + adev->pm.fru_eeprom_i2c_bus = NULL; +} + +static void smu_v13_0_6_get_unique_id(struct smu_context *smu) +{ + struct amdgpu_device *adev = smu->adev; + //SmuMetrics_t *metrics = smu->smu_table.metrics_table; + uint32_t upper32 = 0, lower32 = 0; + int ret; + + ret = smu_cmn_get_metrics_table(smu, NULL, false); + if (ret) + goto out; + + //upper32 = metrics->PublicSerialNumUpper32; + //lower32 = metrics->PublicSerialNumLower32; + +out: + adev->unique_id = ((uint64_t)upper32 << 32) | lower32; + if (adev->serial[0] == '\0') + sprintf(adev->serial, "%016llx", adev->unique_id); +} + +static bool smu_v13_0_6_is_baco_supported(struct smu_context *smu) +{ + /* smu_13_0_6 does not support baco */ + + return false; +} + +static int smu_v13_0_6_set_df_cstate(struct smu_context *smu, + enum pp_df_cstate state) +{ + return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DFCstateControl, + state, NULL); +} + +static int smu_v13_0_6_allow_xgmi_power_down(struct smu_context *smu, bool en) +{ + return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GmiPwrDnControl, + en ? 0 : 1, NULL); +} + +static const struct throttling_logging_label { + uint32_t feature_mask; + const char *label; +} logging_label[] = { + { (1U << THROTTLER_TEMP_HBM_BIT), "HBM" }, + { (1U << THROTTLER_TEMP_SOC_BIT), "SOC" }, + { (1U << THROTTLER_TEMP_VR_GFX_BIT), "VR limit" }, +}; +static void smu_v13_0_6_log_thermal_throttling_event(struct smu_context *smu) +{ + int ret; + int throttler_idx, throtting_events = 0, buf_idx = 0; + struct amdgpu_device *adev = smu->adev; + uint32_t throttler_status; + char log_buf[256]; + + ret = smu_v13_0_6_get_smu_metrics_data(smu, METRICS_THROTTLER_STATUS, + &throttler_status); + if (ret) + return; + + memset(log_buf, 0, sizeof(log_buf)); + for (throttler_idx = 0; throttler_idx < ARRAY_SIZE(logging_label); + throttler_idx++) { + if (throttler_status & + logging_label[throttler_idx].feature_mask) { + throtting_events++; + buf_idx += snprintf(log_buf + buf_idx, + sizeof(log_buf) - buf_idx, "%s%s", + throtting_events > 1 ? " and " : "", + logging_label[throttler_idx].label); + if (buf_idx >= sizeof(log_buf)) { + dev_err(adev->dev, "buffer overflow!\n"); + log_buf[sizeof(log_buf) - 1] = '\0'; + break; + } + } + } + + dev_warn( + adev->dev, + "WARN: GPU thermal throttling temperature reached, expect performance decrease. %s.\n", + log_buf); + kgd2kfd_smi_event_throttle( + smu->adev->kfd.dev, + smu_cmn_get_indep_throttler_status(throttler_status, + smu_v13_0_6_throttler_map)); +} + +static int smu_v13_0_6_get_current_pcie_link_speed(struct smu_context *smu) +{ + struct amdgpu_device *adev = smu->adev; + uint32_t esm_ctrl; + + /* TODO: confirm this on real target */ + esm_ctrl = RREG32_PCIE(smnPCIE_ESM_CTRL); + if ((esm_ctrl >> 15) & 0x1FFFF) + return (((esm_ctrl >> 8) & 0x3F) + 128); + + return smu_v13_0_get_current_pcie_link_speed(smu); +} + +static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table) +{ + struct smu_table_context *smu_table = &smu->smu_table; + struct gpu_metrics_v1_3 *gpu_metrics = + (struct gpu_metrics_v1_3 *)smu_table->gpu_metrics_table; + MetricsTable_t *metrics; + int i, ret = 0; + + metrics = kzalloc(sizeof(MetricsTable_t), GFP_KERNEL); + ret = smu_v13_0_6_get_metrics_table(smu, metrics, true); + if (ret) + return ret; + + smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3); + + /* TODO: Decide on how to fill in zero value fields */ + gpu_metrics->temperature_edge = 0; + gpu_metrics->temperature_hotspot = 0; + gpu_metrics->temperature_mem = 0; + gpu_metrics->temperature_vrgfx = 0; + gpu_metrics->temperature_vrsoc = 0; + gpu_metrics->temperature_vrmem = 0; + + gpu_metrics->average_gfx_activity = 0; + gpu_metrics->average_umc_activity = 0; + gpu_metrics->average_mm_activity = 0; + + gpu_metrics->average_socket_power = 0; + gpu_metrics->energy_accumulator = 0; + + gpu_metrics->average_gfxclk_frequency = 0; + gpu_metrics->average_socclk_frequency = 0; + gpu_metrics->average_uclk_frequency = 0; + gpu_metrics->average_vclk0_frequency = 0; + gpu_metrics->average_dclk0_frequency = 0; + + gpu_metrics->current_gfxclk = 0; + gpu_metrics->current_socclk = 0; + gpu_metrics->current_uclk = 0; + gpu_metrics->current_vclk0 = 0; + gpu_metrics->current_dclk0 = 0; + + gpu_metrics->throttle_status = 0; + gpu_metrics->indep_throttle_status = smu_cmn_get_indep_throttler_status( + gpu_metrics->throttle_status, smu_v13_0_6_throttler_map); + + gpu_metrics->current_fan_speed = 0; + + gpu_metrics->pcie_link_width = 0; + gpu_metrics->pcie_link_speed = smu_v13_0_6_get_current_pcie_link_speed(smu); + + gpu_metrics->system_clock_counter = ktime_get_boottime_ns(); + + gpu_metrics->gfx_activity_acc = 0; + gpu_metrics->mem_activity_acc = 0; + + for (i = 0; i < NUM_HBM_INSTANCES; i++) + gpu_metrics->temperature_hbm[i] = 0; + + gpu_metrics->firmware_timestamp = 0; + + *table = (void *)gpu_metrics; + kfree(metrics); + + return sizeof(struct gpu_metrics_v1_3); +} + +static int smu_v13_0_6_mode2_reset(struct smu_context *smu) +{ + u32 smu_version; + int ret = 0, index; + struct amdgpu_device *adev = smu->adev; + int timeout = 10; + + smu_cmn_get_smc_version(smu, NULL, &smu_version); + + index = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG, + SMU_MSG_GfxDeviceDriverReset); + + mutex_lock(&smu->message_lock); + ret = smu_cmn_send_msg_without_waiting(smu, (uint16_t)index, + SMU_RESET_MODE_2); + /* This is similar to FLR, wait till max FLR timeout */ + msleep(100); + dev_dbg(smu->adev->dev, "restore config space...\n"); + /* Restore the config space saved during init */ + amdgpu_device_load_pci_state(adev->pdev); + + dev_dbg(smu->adev->dev, "wait for reset ack\n"); + while (ret == -ETIME && timeout) { + ret = smu_cmn_wait_for_response(smu); + /* Wait a bit more time for getting ACK */ + if (ret == -ETIME) { + --timeout; + usleep_range(500, 1000); + continue; + } + + if (ret != 1) { + dev_err(adev->dev, + "failed to send mode2 message \tparam: 0x%08x response %#x\n", + SMU_RESET_MODE_2, ret); + goto out; + } + } + + if (ret == 1) + ret = 0; +out: + mutex_unlock(&smu->message_lock); + + return ret; +} + +static int smu_v13_0_6_mode1_reset(struct smu_context *smu) +{ + struct amdgpu_device *adev = smu->adev; + struct amdgpu_ras *ras; + u32 fatal_err, param; + int ret = 0; + + ras = amdgpu_ras_get_context(adev); + fatal_err = 0; + param = SMU_RESET_MODE_1; + + /* fatal error triggered by ras, PMFW supports the flag */ + if (ras && atomic_read(&ras->in_recovery)) + fatal_err = 1; + + param |= (fatal_err << 16); + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset, + param, NULL); + + if (!ret) + msleep(SMU13_MODE1_RESET_WAIT_TIME_IN_MS); + + return ret; +} + +static bool smu_v13_0_6_is_mode1_reset_supported(struct smu_context *smu) +{ + /* TODO: Enable this when FW support is added */ + return false; +} + +static bool smu_v13_0_6_is_mode2_reset_supported(struct smu_context *smu) +{ + return true; +} + +static int smu_v13_0_6_smu_send_hbm_bad_page_num(struct smu_context *smu, + uint32_t size) +{ + int ret = 0; + + /* message SMU to update the bad page number on SMUBUS */ + ret = smu_cmn_send_smc_msg_with_param( + smu, SMU_MSG_SetNumBadHbmPagesRetired, size, NULL); + if (ret) + dev_err(smu->adev->dev, + "[%s] failed to message SMU to update HBM bad pages number\n", + __func__); + + return ret; +} + +static const struct pptable_funcs smu_v13_0_6_ppt_funcs = { + /* init dpm */ + .get_allowed_feature_mask = smu_v13_0_6_get_allowed_feature_mask, + /* dpm/clk tables */ + .set_default_dpm_table = smu_v13_0_6_set_default_dpm_table, + .populate_umd_state_clk = smu_v13_0_6_populate_umd_state_clk, + .print_clk_levels = smu_v13_0_6_print_clk_levels, + .force_clk_levels = smu_v13_0_6_force_clk_levels, + .read_sensor = smu_v13_0_6_read_sensor, + .set_performance_level = smu_v13_0_6_set_performance_level, + .get_power_limit = smu_v13_0_6_get_power_limit, + .is_dpm_running = smu_v13_0_6_is_dpm_running, + .get_unique_id = smu_v13_0_6_get_unique_id, + .init_smc_tables = smu_v13_0_6_init_smc_tables, + .fini_smc_tables = smu_v13_0_fini_smc_tables, + .init_power = smu_v13_0_init_power, + .fini_power = smu_v13_0_fini_power, + .check_fw_status = smu_v13_0_6_check_fw_status, + /* pptable related */ + .check_fw_version = smu_v13_0_check_fw_version, + .set_driver_table_location = smu_v13_0_set_driver_table_location, + .set_tool_table_location = smu_v13_0_set_tool_table_location, + .notify_memory_pool_location = smu_v13_0_notify_memory_pool_location, + .system_features_control = smu_v13_0_6_system_features_control, + .send_smc_msg_with_param = smu_cmn_send_smc_msg_with_param, + .send_smc_msg = smu_cmn_send_smc_msg, + .get_enabled_mask = smu_v13_0_6_get_enabled_mask, + .feature_is_enabled = smu_cmn_feature_is_enabled, + .set_power_limit = smu_v13_0_6_set_power_limit, + .set_xgmi_pstate = smu_v13_0_set_xgmi_pstate, + /* TODO: Thermal limits unknown, skip these for now + .register_irq_handler = smu_v13_0_register_irq_handler, + .enable_thermal_alert = smu_v13_0_enable_thermal_alert, + .disable_thermal_alert = smu_v13_0_disable_thermal_alert, + */ + .setup_pptable = smu_v13_0_6_setup_pptable, + .baco_is_support = smu_v13_0_6_is_baco_supported, + .get_dpm_ultimate_freq = smu_v13_0_6_get_dpm_ultimate_freq, + .set_soft_freq_limited_range = smu_v13_0_6_set_soft_freq_limited_range, + .od_edit_dpm_table = smu_v13_0_6_usr_edit_dpm_table, + .set_df_cstate = smu_v13_0_6_set_df_cstate, + .allow_xgmi_power_down = smu_v13_0_6_allow_xgmi_power_down, + .log_thermal_throttling_event = smu_v13_0_6_log_thermal_throttling_event, + .get_pp_feature_mask = smu_cmn_get_pp_feature_mask, + .set_pp_feature_mask = smu_cmn_set_pp_feature_mask, + .get_gpu_metrics = smu_v13_0_6_get_gpu_metrics, + .mode1_reset_is_support = smu_v13_0_6_is_mode1_reset_supported, + .mode2_reset_is_support = smu_v13_0_6_is_mode2_reset_supported, + .mode1_reset = smu_v13_0_6_mode1_reset, + .mode2_reset = smu_v13_0_6_mode2_reset, + .wait_for_event = smu_v13_0_wait_for_event, + .i2c_init = smu_v13_0_6_i2c_control_init, + .i2c_fini = smu_v13_0_6_i2c_control_fini, + .send_hbm_bad_pages_num = smu_v13_0_6_smu_send_hbm_bad_page_num, +}; + +void smu_v13_0_6_set_ppt_funcs(struct smu_context *smu) +{ + smu->ppt_funcs = &smu_v13_0_6_ppt_funcs; + smu->message_map = smu_v13_0_6_message_map; + smu->clock_map = smu_v13_0_6_clk_map; + smu->feature_map = smu_v13_0_6_feature_mask_map; + smu->table_map = smu_v13_0_6_table_map; + smu_v13_0_set_smu_mailbox_registers(smu); +} diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h new file mode 100644 index 0000000000000..f0fa42a645c05 --- /dev/null +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h @@ -0,0 +1,32 @@ +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef __SMU_13_0_6_PPT_H__ +#define __SMU_13_0_6_PPT_H__ + +#define SMU_13_0_6_UMD_PSTATE_GFXCLK_LEVEL 0x2 +#define SMU_13_0_6_UMD_PSTATE_SOCCLK_LEVEL 0x4 +#define SMU_13_0_6_UMD_PSTATE_MCLK_LEVEL 0x2 + +extern void smu_v13_0_6_set_ppt_funcs(struct smu_context *smu); + +#endif diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c index 9e1967d8049e3..bba621615abf0 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c @@ -125,6 +125,7 @@ static struct cmn2asic_msg_mapping smu_v13_0_7_message_map[SMU_MSG_MAX_COUNT] = MSG_MAP(ArmD3, PPSMC_MSG_ArmD3, 0), MSG_MAP(AllowGpo, PPSMC_MSG_SetGpoAllow, 0), MSG_MAP(GetPptLimit, PPSMC_MSG_GetPptLimit, 0), + MSG_MAP(NotifyPowerSource, PPSMC_MSG_NotifyPowerSource, 0), }; static struct cmn2asic_mapping smu_v13_0_7_clk_map[SMU_CLK_COUNT] = { @@ -575,6 +576,14 @@ static int smu_v13_0_7_set_default_dpm_table(struct smu_context *smu) dpm_table); if (ret) return ret; + + if (skutable->DriverReportedClocks.GameClockAc && + (dpm_table->dpm_levels[dpm_table->count - 1].value > + skutable->DriverReportedClocks.GameClockAc)) { + dpm_table->dpm_levels[dpm_table->count - 1].value = + skutable->DriverReportedClocks.GameClockAc; + dpm_table->max = skutable->DriverReportedClocks.GameClockAc; + } } else { dpm_table->count = 1; dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.gfxclk / 100; @@ -828,6 +837,57 @@ static int smu_v13_0_7_get_smu_metrics_data(struct smu_context *smu, return ret; } +static int smu_v13_0_7_get_dpm_ultimate_freq(struct smu_context *smu, + enum smu_clk_type clk_type, + uint32_t *min, + uint32_t *max) +{ + struct smu_13_0_dpm_context *dpm_context = + smu->smu_dpm.dpm_context; + struct smu_13_0_dpm_table *dpm_table; + + switch (clk_type) { + case SMU_MCLK: + case SMU_UCLK: + /* uclk dpm table */ + dpm_table = &dpm_context->dpm_tables.uclk_table; + break; + case SMU_GFXCLK: + case SMU_SCLK: + /* gfxclk dpm table */ + dpm_table = &dpm_context->dpm_tables.gfx_table; + break; + case SMU_SOCCLK: + /* socclk dpm table */ + dpm_table = &dpm_context->dpm_tables.soc_table; + break; + case SMU_FCLK: + /* fclk dpm table */ + dpm_table = &dpm_context->dpm_tables.fclk_table; + break; + case SMU_VCLK: + case SMU_VCLK1: + /* vclk dpm table */ + dpm_table = &dpm_context->dpm_tables.vclk_table; + break; + case SMU_DCLK: + case SMU_DCLK1: + /* dclk dpm table */ + dpm_table = &dpm_context->dpm_tables.dclk_table; + break; + default: + dev_err(smu->adev->dev, "Unsupported clock type!\n"); + return -EINVAL; + } + + if (min) + *min = dpm_table->min; + if (max) + *max = dpm_table->max; + + return 0; +} + static int smu_v13_0_7_read_sensor(struct smu_context *smu, enum amd_pp_sensors sensor, void *data, @@ -1074,8 +1134,8 @@ static int smu_v13_0_7_print_clk_levels(struct smu_context *smu, (pcie_table->pcie_lane[i] == 5) ? "x12" : (pcie_table->pcie_lane[i] == 6) ? "x16" : "", pcie_table->clk_freq[i], - (gen_speed == pcie_table->pcie_gen[i]) && - (lane_width == pcie_table->pcie_lane[i]) ? + (gen_speed == DECODE_GEN_SPEED(pcie_table->pcie_gen[i])) && + (lane_width == DECODE_LANE_WIDTH(pcie_table->pcie_lane[i])) ? "*" : ""); break; @@ -1329,9 +1389,17 @@ static int smu_v13_0_7_populate_umd_state_clk(struct smu_context *smu) &dpm_context->dpm_tables.fclk_table; struct smu_umd_pstate_table *pstate_table = &smu->pstate_table; + struct smu_table_context *table_context = &smu->smu_table; + PPTable_t *pptable = table_context->driver_pptable; + DriverReportedClocks_t driver_clocks = + pptable->SkuTable.DriverReportedClocks; pstate_table->gfxclk_pstate.min = gfx_table->min; - pstate_table->gfxclk_pstate.peak = gfx_table->max; + if (driver_clocks.GameClockAc && + (driver_clocks.GameClockAc < gfx_table->max)) + pstate_table->gfxclk_pstate.peak = driver_clocks.GameClockAc; + else + pstate_table->gfxclk_pstate.peak = gfx_table->max; pstate_table->uclk_pstate.min = mem_table->min; pstate_table->uclk_pstate.peak = mem_table->max; @@ -1348,12 +1416,12 @@ static int smu_v13_0_7_populate_umd_state_clk(struct smu_context *smu) pstate_table->fclk_pstate.min = fclk_table->min; pstate_table->fclk_pstate.peak = fclk_table->max; - /* - * For now, just use the mininum clock frequency. - * TODO: update them when the real pstate settings available - */ - pstate_table->gfxclk_pstate.standard = gfx_table->min; - pstate_table->uclk_pstate.standard = mem_table->min; + if (driver_clocks.BaseClockAc && + driver_clocks.BaseClockAc < gfx_table->max) + pstate_table->gfxclk_pstate.standard = driver_clocks.BaseClockAc; + else + pstate_table->gfxclk_pstate.standard = gfx_table->max; + pstate_table->uclk_pstate.standard = mem_table->max; pstate_table->socclk_pstate.standard = soc_table->min; pstate_table->vclk_pstate.standard = vclk_table->min; pstate_table->dclk_pstate.standard = dclk_table->min; @@ -1479,7 +1547,9 @@ static int smu_v13_0_7_get_power_profile_mode(struct smu_context *smu, char *buf workload_type = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_WORKLOAD, i); - if (workload_type < 0) { + if (workload_type == -ENOTSUPP) + continue; + else if (workload_type < 0) { result = -EINVAL; goto out; } @@ -1676,7 +1746,7 @@ static const struct pptable_funcs smu_v13_0_7_ppt_funcs = { .dpm_set_jpeg_enable = smu_v13_0_set_jpeg_enable, .init_pptable_microcode = smu_v13_0_init_pptable_microcode, .populate_umd_state_clk = smu_v13_0_7_populate_umd_state_clk, - .get_dpm_ultimate_freq = smu_v13_0_get_dpm_ultimate_freq, + .get_dpm_ultimate_freq = smu_v13_0_7_get_dpm_ultimate_freq, .get_vbios_bootup_values = smu_v13_0_get_vbios_bootup_values, .read_sensor = smu_v13_0_7_read_sensor, .feature_is_enabled = smu_cmn_feature_is_enabled, @@ -1701,6 +1771,7 @@ static const struct pptable_funcs smu_v13_0_7_ppt_funcs = { .enable_mgpu_fan_boost = smu_v13_0_7_enable_mgpu_fan_boost, .get_power_limit = smu_v13_0_7_get_power_limit, .set_power_limit = smu_v13_0_set_power_limit, + .set_power_source = smu_v13_0_set_power_source, .get_power_profile_mode = smu_v13_0_7_get_power_profile_mode, .set_power_profile_mode = smu_v13_0_7_set_power_profile_mode, .set_tool_table_location = smu_v13_0_set_tool_table_location, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c index 04e56b0b3033e..798f36cfcebd3 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c @@ -1000,7 +1000,7 @@ out: static int yellow_carp_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf) { - int i, size = 0, ret = 0; + int i, idx, size = 0, ret = 0; uint32_t cur_value = 0, value = 0, count = 0; uint32_t min, max; @@ -1033,7 +1033,8 @@ static int yellow_carp_print_clk_levels(struct smu_context *smu, goto print_clk_out; for (i = 0; i < count; i++) { - ret = yellow_carp_get_dpm_freq_by_index(smu, clk_type, i, &value); + idx = (clk_type == SMU_FCLK || clk_type == SMU_MCLK) ? (count - i - 1) : i; + ret = yellow_carp_get_dpm_freq_by_index(smu, clk_type, idx, &value); if (ret) goto print_clk_out; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c index d5abafc5a6820..3ecb900e6ecdc 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c @@ -478,13 +478,13 @@ int smu_cmn_to_asic_specific_index(struct smu_context *smu, return mapping.map_to; case CMN2ASIC_MAPPING_WORKLOAD: - if (index > PP_SMC_POWER_PROFILE_WINDOW3D || + if (index >= PP_SMC_POWER_PROFILE_COUNT || !smu->workload_map) return -EINVAL; mapping = smu->workload_map[index]; if (!mapping.valid_mapping) - return -EINVAL; + return -ENOTSUPP; return mapping.map_to; |