diff options
-rw-r--r-- | kernel/sched/core.c | 22 |
1 files changed, 3 insertions, 19 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 942734bf7347..8c5f75af07db 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -10494,7 +10494,7 @@ void sched_release_group(struct task_group *tg) spin_unlock_irqrestore(&task_group_lock, flags); } -static struct task_group *sched_get_task_group(struct task_struct *tsk) +static void sched_change_group(struct task_struct *tsk) { struct task_group *tg; @@ -10506,13 +10506,7 @@ static struct task_group *sched_get_task_group(struct task_struct *tsk) tg = container_of(task_css_check(tsk, cpu_cgrp_id, true), struct task_group, css); tg = autogroup_task_group(tsk, tg); - - return tg; -} - -static void sched_change_group(struct task_struct *tsk, struct task_group *group) -{ - tsk->sched_task_group = group; + tsk->sched_task_group = tg; #ifdef CONFIG_FAIR_GROUP_SCHED if (tsk->sched_class->task_change_group) @@ -10533,19 +10527,10 @@ void sched_move_task(struct task_struct *tsk) { int queued, running, queue_flags = DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK; - struct task_group *group; struct rq_flags rf; struct rq *rq; rq = task_rq_lock(tsk, &rf); - /* - * Esp. with SCHED_AUTOGROUP enabled it is possible to get superfluous - * group changes. - */ - group = sched_get_task_group(tsk); - if (group == tsk->sched_task_group) - goto unlock; - update_rq_clock(rq); running = task_current(rq, tsk); @@ -10556,7 +10541,7 @@ void sched_move_task(struct task_struct *tsk) if (running) put_prev_task(rq, tsk); - sched_change_group(tsk, group); + sched_change_group(tsk); if (queued) enqueue_task(rq, tsk, queue_flags); @@ -10570,7 +10555,6 @@ void sched_move_task(struct task_struct *tsk) resched_curr(rq); } -unlock: task_rq_unlock(rq, tsk, &rf); } |