diff options
Diffstat (limited to 'kernel/sched')
| -rw-r--r-- | kernel/sched/core.c | 25 | ||||
| -rw-r--r-- | kernel/sched/idle.c | 2 | ||||
| -rw-r--r-- | kernel/sched/sched.h | 2 | 
3 files changed, 28 insertions, 1 deletions
| diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 759f4bd52cd6..803c3bc274c4 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -3502,6 +3502,31 @@ asmlinkage __visible void __sched schedule(void)  }  EXPORT_SYMBOL(schedule); +/* + * synchronize_rcu_tasks() makes sure that no task is stuck in preempted + * state (have scheduled out non-voluntarily) by making sure that all + * tasks have either left the run queue or have gone into user space. + * As idle tasks do not do either, they must not ever be preempted + * (schedule out non-voluntarily). + * + * schedule_idle() is similar to schedule_preempt_disable() except that it + * never enables preemption because it does not call sched_submit_work(). + */ +void __sched schedule_idle(void) +{ +	/* +	 * As this skips calling sched_submit_work(), which the idle task does +	 * regardless because that function is a nop when the task is in a +	 * TASK_RUNNING state, make sure this isn't used someplace that the +	 * current task can be in any other state. Note, idle is always in the +	 * TASK_RUNNING state. +	 */ +	WARN_ON_ONCE(current->state); +	do { +		__schedule(false); +	} while (need_resched()); +} +  #ifdef CONFIG_CONTEXT_TRACKING  asmlinkage __visible void __sched schedule_user(void)  { diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c index 2a25a9ec2c6e..ef63adce0c9c 100644 --- a/kernel/sched/idle.c +++ b/kernel/sched/idle.c @@ -265,7 +265,7 @@ static void do_idle(void)  	smp_mb__after_atomic();  	sched_ttwu_pending(); -	schedule_preempt_disabled(); +	schedule_idle();  	if (unlikely(klp_patch_pending(current)))  		klp_update_patch_state(current); diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 7808ab050599..6dda2aab731e 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1467,6 +1467,8 @@ static inline struct cpuidle_state *idle_get_state(struct rq *rq)  }  #endif +extern void schedule_idle(void); +  extern void sysrq_sched_debug_show(void);  extern void sched_init_granularity(void);  extern void update_max_interval(void); | 
