diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c')
| -rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c | 104 | 
1 files changed, 95 insertions, 9 deletions
| diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index e00b46180d2e..0f960b498792 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -296,7 +296,7 @@ int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev,  	spin_lock_init(&kiq->ring_lock); -	r = amdgpu_device_wb_get(adev, &adev->virt.reg_val_offs); +	r = amdgpu_device_wb_get(adev, &kiq->reg_val_offs);  	if (r)  		return r; @@ -321,7 +321,7 @@ int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev,  void amdgpu_gfx_kiq_free_ring(struct amdgpu_ring *ring)  { -	amdgpu_device_wb_free(ring->adev, ring->adev->virt.reg_val_offs); +	amdgpu_device_wb_free(ring->adev, ring->adev->gfx.kiq.reg_val_offs);  	amdgpu_ring_fini(ring);  } @@ -543,12 +543,6 @@ void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable)  	if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))  		return; -	if (!is_support_sw_smu(adev) && -	    (!adev->powerplay.pp_funcs || -	     !adev->powerplay.pp_funcs->set_powergating_by_smu)) -		return; - -  	mutex_lock(&adev->gfx.gfx_off_mutex);  	if (!enable) @@ -641,7 +635,7 @@ int amdgpu_gfx_process_ras_data_cb(struct amdgpu_device *adev,  		kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);  		if (adev->gfx.funcs->query_ras_error_count)  			adev->gfx.funcs->query_ras_error_count(adev, err_data); -		amdgpu_ras_reset_gpu(adev, 0); +		amdgpu_ras_reset_gpu(adev);  	}  	return AMDGPU_RAS_SUCCESS;  } @@ -664,3 +658,95 @@ int amdgpu_gfx_cp_ecc_error_irq(struct amdgpu_device *adev,  	amdgpu_ras_interrupt_dispatch(adev, &ih_data);  	return 0;  } + +uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg) +{ +	signed long r, cnt = 0; +	unsigned long flags; +	uint32_t seq; +	struct amdgpu_kiq *kiq = &adev->gfx.kiq; +	struct amdgpu_ring *ring = &kiq->ring; + +	BUG_ON(!ring->funcs->emit_rreg); + +	spin_lock_irqsave(&kiq->ring_lock, flags); +	amdgpu_ring_alloc(ring, 32); +	amdgpu_ring_emit_rreg(ring, reg); +	amdgpu_fence_emit_polling(ring, &seq); +	amdgpu_ring_commit(ring); +	spin_unlock_irqrestore(&kiq->ring_lock, flags); + +	r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); + +	/* don't wait anymore for gpu reset case because this way may +	 * block gpu_recover() routine forever, e.g. this virt_kiq_rreg +	 * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will +	 * never return if we keep waiting in virt_kiq_rreg, which cause +	 * gpu_recover() hang there. +	 * +	 * also don't wait anymore for IRQ context +	 * */ +	if (r < 1 && (adev->in_gpu_reset || in_interrupt())) +		goto failed_kiq_read; + +	might_sleep(); +	while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) { +		msleep(MAX_KIQ_REG_BAILOUT_INTERVAL); +		r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); +	} + +	if (cnt > MAX_KIQ_REG_TRY) +		goto failed_kiq_read; + +	return adev->wb.wb[kiq->reg_val_offs]; + +failed_kiq_read: +	pr_err("failed to read reg:%x\n", reg); +	return ~0; +} + +void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v) +{ +	signed long r, cnt = 0; +	unsigned long flags; +	uint32_t seq; +	struct amdgpu_kiq *kiq = &adev->gfx.kiq; +	struct amdgpu_ring *ring = &kiq->ring; + +	BUG_ON(!ring->funcs->emit_wreg); + +	spin_lock_irqsave(&kiq->ring_lock, flags); +	amdgpu_ring_alloc(ring, 32); +	amdgpu_ring_emit_wreg(ring, reg, v); +	amdgpu_fence_emit_polling(ring, &seq); +	amdgpu_ring_commit(ring); +	spin_unlock_irqrestore(&kiq->ring_lock, flags); + +	r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); + +	/* don't wait anymore for gpu reset case because this way may +	 * block gpu_recover() routine forever, e.g. this virt_kiq_rreg +	 * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will +	 * never return if we keep waiting in virt_kiq_rreg, which cause +	 * gpu_recover() hang there. +	 * +	 * also don't wait anymore for IRQ context +	 * */ +	if (r < 1 && (adev->in_gpu_reset || in_interrupt())) +		goto failed_kiq_write; + +	might_sleep(); +	while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) { + +		msleep(MAX_KIQ_REG_BAILOUT_INTERVAL); +		r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); +	} + +	if (cnt > MAX_KIQ_REG_TRY) +		goto failed_kiq_write; + +	return; + +failed_kiq_write: +	pr_err("failed to write reg:%x\n", reg); +} | 
