diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/cik.c')
| -rw-r--r-- | drivers/gpu/drm/amd/amdgpu/cik.c | 75 | 
1 files changed, 67 insertions, 8 deletions
| diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c index 71c50d8900e3..07c1f239e9c3 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik.c +++ b/drivers/gpu/drm/amd/amdgpu/cik.c @@ -1741,6 +1741,69 @@ static bool cik_need_full_reset(struct amdgpu_device *adev)  	return true;  } +static void cik_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0, +			       uint64_t *count1) +{ +	uint32_t perfctr = 0; +	uint64_t cnt0_of, cnt1_of; +	int tmp; + +	/* This reports 0 on APUs, so return to avoid writing/reading registers +	 * that may or may not be different from their GPU counterparts +	 */ +	if (adev->flags & AMD_IS_APU) +		return; + +	/* Set the 2 events that we wish to watch, defined above */ +	/* Reg 40 is # received msgs, Reg 104 is # of posted requests sent */ +	perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT0_SEL, 40); +	perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT1_SEL, 104); + +	/* Write to enable desired perf counters */ +	WREG32_PCIE(ixPCIE_PERF_CNTL_TXCLK, perfctr); +	/* Zero out and enable the perf counters +	 * Write 0x5: +	 * Bit 0 = Start all counters(1) +	 * Bit 2 = Global counter reset enable(1) +	 */ +	WREG32_PCIE(ixPCIE_PERF_COUNT_CNTL, 0x00000005); + +	msleep(1000); + +	/* Load the shadow and disable the perf counters +	 * Write 0x2: +	 * Bit 0 = Stop counters(0) +	 * Bit 1 = Load the shadow counters(1) +	 */ +	WREG32_PCIE(ixPCIE_PERF_COUNT_CNTL, 0x00000002); + +	/* Read register values to get any >32bit overflow */ +	tmp = RREG32_PCIE(ixPCIE_PERF_CNTL_TXCLK); +	cnt0_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER0_UPPER); +	cnt1_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER1_UPPER); + +	/* Get the values and add the overflow */ +	*count0 = RREG32_PCIE(ixPCIE_PERF_COUNT0_TXCLK) | (cnt0_of << 32); +	*count1 = RREG32_PCIE(ixPCIE_PERF_COUNT1_TXCLK) | (cnt1_of << 32); +} + +static bool cik_need_reset_on_init(struct amdgpu_device *adev) +{ +	u32 clock_cntl, pc; + +	if (adev->flags & AMD_IS_APU) +		return false; + +	/* check if the SMC is already running */ +	clock_cntl = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0); +	pc = RREG32_SMC(ixSMC_PC_C); +	if ((0 == REG_GET_FIELD(clock_cntl, SMC_SYSCON_CLOCK_CNTL_0, ck_disable)) && +	    (0x20100 <= pc)) +		return true; + +	return false; +} +  static const struct amdgpu_asic_funcs cik_asic_funcs =  {  	.read_disabled_bios = &cik_read_disabled_bios, @@ -1756,6 +1819,8 @@ static const struct amdgpu_asic_funcs cik_asic_funcs =  	.invalidate_hdp = &cik_invalidate_hdp,  	.need_full_reset = &cik_need_full_reset,  	.init_doorbell_index = &legacy_doorbell_index_init, +	.get_pcie_usage = &cik_get_pcie_usage, +	.need_reset_on_init = &cik_need_reset_on_init,  };  static int cik_common_early_init(void *handle) @@ -2005,10 +2070,7 @@ int cik_set_ip_blocks(struct amdgpu_device *adev)  		amdgpu_device_ip_block_add(adev, &cik_ih_ip_block);  		amdgpu_device_ip_block_add(adev, &gfx_v7_2_ip_block);  		amdgpu_device_ip_block_add(adev, &cik_sdma_ip_block); -		if (amdgpu_dpm == -1) -			amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); -		else -			amdgpu_device_ip_block_add(adev, &ci_smu_ip_block); +		amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);  		if (adev->enable_virtual_display)  			amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);  #if defined(CONFIG_DRM_AMD_DC) @@ -2026,10 +2088,7 @@ int cik_set_ip_blocks(struct amdgpu_device *adev)  		amdgpu_device_ip_block_add(adev, &cik_ih_ip_block);  		amdgpu_device_ip_block_add(adev, &gfx_v7_3_ip_block);  		amdgpu_device_ip_block_add(adev, &cik_sdma_ip_block); -		if (amdgpu_dpm == -1) -			amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); -		else -			amdgpu_device_ip_block_add(adev, &ci_smu_ip_block); +		amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);  		if (adev->enable_virtual_display)  			amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);  #if defined(CONFIG_DRM_AMD_DC) | 
