diff options
-rw-r--r-- | kernel/sched/ext.c | 11 |
1 files changed, 3 insertions, 8 deletions
diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index f4a7abcec7935..62574d9804092 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -1572,18 +1572,13 @@ static void task_unlink_from_dsq(struct task_struct *p, list_del_init(&p->scx.dsq_list.node); } -static bool task_linked_on_dsq(struct task_struct *p) -{ - return !list_empty(&p->scx.dsq_list.node); -} - static void dispatch_dequeue(struct rq *rq, struct task_struct *p) { struct scx_dispatch_q *dsq = p->scx.dsq; bool is_local = dsq == &rq->scx.local_dsq; if (!dsq) { - WARN_ON_ONCE(task_linked_on_dsq(p)); + WARN_ON_ONCE(!list_empty(&p->scx.dsq_list.node)); /* * When dispatching directly from the BPF scheduler to a local * DSQ, the task isn't associated with any DSQ but @@ -1604,7 +1599,7 @@ static void dispatch_dequeue(struct rq *rq, struct task_struct *p) */ if (p->scx.holding_cpu < 0) { /* @p must still be on @dsq, dequeue */ - WARN_ON_ONCE(!task_linked_on_dsq(p)); + WARN_ON_ONCE(list_empty(&p->scx.dsq_list.node)); task_unlink_from_dsq(p, dsq); dsq_mod_nr(dsq, -1); } else { @@ -1614,7 +1609,7 @@ static void dispatch_dequeue(struct rq *rq, struct task_struct *p) * holding_cpu which tells dispatch_to_local_dsq() that it lost * the race. */ - WARN_ON_ONCE(task_linked_on_dsq(p)); + WARN_ON_ONCE(!list_empty(&p->scx.dsq_list.node)); p->scx.holding_cpu = -1; } p->scx.dsq = NULL; |