diff options
| author | Rafael J. Wysocki <rafael.j.wysocki@intel.com> | 2015-05-04 22:53:35 +0200 | 
|---|---|---|
| committer | Rafael J. Wysocki <rafael.j.wysocki@intel.com> | 2015-05-04 22:53:35 +0200 | 
| commit | bcf6ad8a4a3d002e8bc8f6639cdc119168f4e87b (patch) | |
| tree | 6536efb9c4c8d199aca4118af959d79192bab716 /kernel/sched/idle.c | |
| parent | a802ea96454570f3c526dd9d7ad8c706e570444d (diff) | |
sched / idle: Eliminate the "reflect" check from cpuidle_idle_call()
Since cpuidle_reflect() should only be called if the idle state
to enter was selected by cpuidle_select(), there is the "reflect"
variable in cpuidle_idle_call() whose value is used to determine
whether or not that is the case.
However, if the entire code run between the conditional setting
"reflect" and the call to cpuidle_reflect() is moved to a separate
function, it will be possible to call that new function in both
branches of the conditional, in which case cpuidle_reflect() will
only need to be called from one of them too and the "reflect"
variable won't be necessary any more.
This eliminates one check made by cpuidle_idle_call() on the majority
of its invocations, so change the code as described.
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Reviewed-by: Daniel Lezcano <daniel.lezcano@linaro.org>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Diffstat (limited to 'kernel/sched/idle.c')
| -rw-r--r-- | kernel/sched/idle.c | 90 | 
1 files changed, 46 insertions, 44 deletions
| diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c index ae7c0be90d16..9c919b42f846 100644 --- a/kernel/sched/idle.c +++ b/kernel/sched/idle.c @@ -79,6 +79,46 @@ static void default_idle_call(void)  		arch_cpu_idle();  } +static int call_cpuidle(struct cpuidle_driver *drv, struct cpuidle_device *dev, +		      int next_state) +{ +	int entered_state; + +	/* Fall back to the default arch idle method on errors. */ +	if (next_state < 0) { +		default_idle_call(); +		return next_state; +	} + +	/* +	 * The idle task must be scheduled, it is pointless to go to idle, just +	 * update no idle residency and return. +	 */ +	if (current_clr_polling_and_test()) { +		dev->last_residency = 0; +		local_irq_enable(); +		return -EBUSY; +	} + +	/* Take note of the planned idle state. */ +	idle_set_state(this_rq(), &drv->states[next_state]); + +	/* +	 * Enter the idle state previously returned by the governor decision. +	 * This function will block until an interrupt occurs and will take +	 * care of re-enabling the local interrupts +	 */ +	entered_state = cpuidle_enter(drv, dev, next_state); + +	/* The cpu is no longer idle or about to enter idle. */ +	idle_set_state(this_rq(), NULL); + +	if (entered_state == -EBUSY) +		default_idle_call(); + +	return entered_state; +} +  /**   * cpuidle_idle_call - the main idle function   * @@ -93,7 +133,6 @@ static void cpuidle_idle_call(void)  	struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);  	struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);  	int next_state, entered_state; -	bool reflect;  	/*  	 * Check if the idle task must be rescheduled. If it is the @@ -138,56 +177,19 @@ static void cpuidle_idle_call(void)  			goto exit_idle;  		} -		reflect = false;  		next_state = cpuidle_find_deepest_state(drv, dev); +		call_cpuidle(drv, dev, next_state);  	} else { -		reflect = true;  		/*  		 * Ask the cpuidle framework to choose a convenient idle state.  		 */  		next_state = cpuidle_select(drv, dev); -	} -	/* Fall back to the default arch idle method on errors. */ -	if (next_state < 0) { -		default_idle_call(); -		goto exit_idle; -	} - -	/* -	 * The idle task must be scheduled, it is pointless to -	 * go to idle, just update no idle residency and get -	 * out of this function -	 */ -	if (current_clr_polling_and_test()) { -		dev->last_residency = 0; -		entered_state = next_state; -		local_irq_enable(); -		goto exit_idle; -	} - -	/* Take note of the planned idle state. */ -	idle_set_state(this_rq(), &drv->states[next_state]); - -	/* -	 * Enter the idle state previously returned by the governor decision. -	 * This function will block until an interrupt occurs and will take -	 * care of re-enabling the local interrupts -	 */ -	entered_state = cpuidle_enter(drv, dev, next_state); - -	/* The cpu is no longer idle or about to enter idle. */ -	idle_set_state(this_rq(), NULL); - -	if (entered_state == -EBUSY) { -		default_idle_call(); -		goto exit_idle; -	} - -	/* -	 * Give the governor an opportunity to reflect on the outcome -	 */ -	if (reflect) +		entered_state = call_cpuidle(drv, dev, next_state); +		/* +		 * Give the governor an opportunity to reflect on the outcome +		 */  		cpuidle_reflect(dev, entered_state); +	}  exit_idle:  	__current_set_polling(); | 
