diff options
Diffstat (limited to 'drivers/gpu/drm/i915/i915_scheduler.c')
| -rw-r--r-- | drivers/gpu/drm/i915/i915_scheduler.c | 29 | 
1 files changed, 20 insertions, 9 deletions
| diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c index 340faea6c08a..d01683167c77 100644 --- a/drivers/gpu/drm/i915/i915_scheduler.c +++ b/drivers/gpu/drm/i915/i915_scheduler.c @@ -127,8 +127,7 @@ static inline struct i915_priolist *to_priolist(struct rb_node *rb)  	return rb_entry(rb, struct i915_priolist, node);  } -static void assert_priolists(struct intel_engine_execlists * const execlists, -			     long queue_priority) +static void assert_priolists(struct intel_engine_execlists * const execlists)  {  	struct rb_node *rb;  	long last_prio, i; @@ -139,7 +138,7 @@ static void assert_priolists(struct intel_engine_execlists * const execlists,  	GEM_BUG_ON(rb_first_cached(&execlists->queue) !=  		   rb_first(&execlists->queue.rb_root)); -	last_prio = (queue_priority >> I915_USER_PRIORITY_SHIFT) + 1; +	last_prio = (INT_MAX >> I915_USER_PRIORITY_SHIFT) + 1;  	for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) {  		const struct i915_priolist *p = to_priolist(rb); @@ -166,7 +165,7 @@ i915_sched_lookup_priolist(struct intel_engine_cs *engine, int prio)  	int idx, i;  	lockdep_assert_held(&engine->timeline.lock); -	assert_priolists(execlists, INT_MAX); +	assert_priolists(execlists);  	/* buckets sorted from highest [in slot 0] to lowest priority */  	idx = I915_PRIORITY_COUNT - (prio & I915_PRIORITY_MASK) - 1; @@ -239,6 +238,18 @@ sched_lock_engine(struct i915_sched_node *node, struct intel_engine_cs *locked)  	return engine;  } +static bool inflight(const struct i915_request *rq, +		     const struct intel_engine_cs *engine) +{ +	const struct i915_request *active; + +	if (!i915_request_is_active(rq)) +		return false; + +	active = port_request(engine->execlists.port); +	return active->hw_context == rq->hw_context; +} +  static void __i915_schedule(struct i915_request *rq,  			    const struct i915_sched_attr *attr)  { @@ -328,6 +339,7 @@ static void __i915_schedule(struct i915_request *rq,  		INIT_LIST_HEAD(&dep->dfs_link);  		engine = sched_lock_engine(node, engine); +		lockdep_assert_held(&engine->timeline.lock);  		/* Recheck after acquiring the engine->timeline.lock */  		if (prio <= node->attr.priority || node_signaled(node)) @@ -353,20 +365,19 @@ static void __i915_schedule(struct i915_request *rq,  				continue;  		} -		if (prio <= engine->execlists.queue_priority) +		if (prio <= engine->execlists.queue_priority_hint)  			continue; +		engine->execlists.queue_priority_hint = prio; +  		/*  		 * If we are already the currently executing context, don't  		 * bother evaluating if we should preempt ourselves.  		 */ -		if (node_to_request(node)->global_seqno && -		    i915_seqno_passed(port_request(engine->execlists.port)->global_seqno, -				      node_to_request(node)->global_seqno)) +		if (inflight(node_to_request(node), engine))  			continue;  		/* Defer (tasklet) submission until after all of our updates. */ -		engine->execlists.queue_priority = prio;  		tasklet_hi_schedule(&engine->execlists.tasklet);  	} | 
