diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c')
| -rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c | 54 | 
1 files changed, 45 insertions, 9 deletions
| diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index fd8cca241da6..18a7829122d2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c @@ -758,11 +758,42 @@ void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring)   * @fence: fence of the ring to signal   *   */ -void amdgpu_fence_driver_guilty_force_completion(struct amdgpu_fence *fence) +void amdgpu_fence_driver_guilty_force_completion(struct amdgpu_fence *af)  { -	dma_fence_set_error(&fence->base, -ETIME); -	amdgpu_fence_write(fence->ring, fence->seq); -	amdgpu_fence_process(fence->ring); +	struct dma_fence *unprocessed; +	struct dma_fence __rcu **ptr; +	struct amdgpu_fence *fence; +	struct amdgpu_ring *ring = af->ring; +	unsigned long flags; +	u32 seq, last_seq; + +	last_seq = amdgpu_fence_read(ring) & ring->fence_drv.num_fences_mask; +	seq = ring->fence_drv.sync_seq & ring->fence_drv.num_fences_mask; + +	/* mark all fences from the guilty context with an error */ +	spin_lock_irqsave(&ring->fence_drv.lock, flags); +	do { +		last_seq++; +		last_seq &= ring->fence_drv.num_fences_mask; + +		ptr = &ring->fence_drv.fences[last_seq]; +		rcu_read_lock(); +		unprocessed = rcu_dereference(*ptr); + +		if (unprocessed && !dma_fence_is_signaled_locked(unprocessed)) { +			fence = container_of(unprocessed, struct amdgpu_fence, base); + +			if (fence == af) +				dma_fence_set_error(&fence->base, -ETIME); +			else if (fence->context == af->context) +				dma_fence_set_error(&fence->base, -ECANCELED); +		} +		rcu_read_unlock(); +	} while (last_seq != seq); +	spin_unlock_irqrestore(&ring->fence_drv.lock, flags); +	/* signal the guilty fence */ +	amdgpu_fence_write(ring, af->seq); +	amdgpu_fence_process(ring);  }  void amdgpu_fence_save_wptr(struct dma_fence *fence) @@ -790,14 +821,19 @@ void amdgpu_ring_backup_unprocessed_commands(struct amdgpu_ring *ring,  	struct dma_fence *unprocessed;  	struct dma_fence __rcu **ptr;  	struct amdgpu_fence *fence; -	u64 wptr, i, seqno; +	u64 wptr; +	u32 seq, last_seq; -	seqno = amdgpu_fence_read(ring); +	last_seq = amdgpu_fence_read(ring) & ring->fence_drv.num_fences_mask; +	seq = ring->fence_drv.sync_seq & ring->fence_drv.num_fences_mask;  	wptr = ring->fence_drv.signalled_wptr;  	ring->ring_backup_entries_to_copy = 0; -	for (i = seqno + 1; i <= ring->fence_drv.sync_seq; ++i) { -		ptr = &ring->fence_drv.fences[i & ring->fence_drv.num_fences_mask]; +	do { +		last_seq++; +		last_seq &= ring->fence_drv.num_fences_mask; + +		ptr = &ring->fence_drv.fences[last_seq];  		rcu_read_lock();  		unprocessed = rcu_dereference(*ptr); @@ -813,7 +849,7 @@ void amdgpu_ring_backup_unprocessed_commands(struct amdgpu_ring *ring,  			wptr = fence->wptr;  		}  		rcu_read_unlock(); -	} +	} while (last_seq != seq);  }  /* | 
