diff options
author | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2024-04-03 16:36:45 +0200 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2024-04-04 20:23:07 +0200 |
commit | 5debbff9539c9c536d71e91d4bb8995206672b90 (patch) | |
tree | 9dd9788cde6d485aefa4ee746bd22514da24d5c9 | |
parent | e3ee73b57a2e67010407c9f02514916cab19bbc7 (diff) |
Revert "workqueue: Move nr_active handling into helpers"
This reverts commit 4023a2d95076918abe2757d60810642a8115b586 which is
commit 1c270b79ce0b8290f146255ea9057243f6dd3c17 upstream.
The workqueue patches backported to 6.6.y caused some reported
regressions, so revert them for now.
Reported-by: Thorsten Leemhuis <regressions@leemhuis.info>
Cc: Tejun Heo <tj@kernel.org>
Cc: Marek Szyprowski <m.szyprowski@samsung.com>
Cc: Nathan Chancellor <nathan@kernel.org>
Cc: Sasha Levin <sashal@kernel.org>
Cc: Audra Mitchell <audra@redhat.com>
Link: https://lore.kernel.org/all/ce4c2f67-c298-48a0-87a3-f933d646c73b@leemhuis.info/
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-rw-r--r-- | kernel/workqueue.c | 86 |
1 files changed, 19 insertions, 67 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 51dc508ac35b..3436fd266cde 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -1458,14 +1458,11 @@ static bool pwq_is_empty(struct pool_workqueue *pwq) static void __pwq_activate_work(struct pool_workqueue *pwq, struct work_struct *work) { - unsigned long *wdb = work_data_bits(work); - - WARN_ON_ONCE(!(*wdb & WORK_STRUCT_INACTIVE)); trace_workqueue_activate_work(work); if (list_empty(&pwq->pool->worklist)) pwq->pool->watchdog_ts = jiffies; move_linked_works(work, &pwq->pool->worklist, NULL); - __clear_bit(WORK_STRUCT_INACTIVE_BIT, wdb); + __clear_bit(WORK_STRUCT_INACTIVE_BIT, work_data_bits(work)); } /** @@ -1490,66 +1487,12 @@ static bool pwq_activate_work(struct pool_workqueue *pwq, return true; } -/** - * pwq_tryinc_nr_active - Try to increment nr_active for a pwq - * @pwq: pool_workqueue of interest - * - * Try to increment nr_active for @pwq. Returns %true if an nr_active count is - * successfully obtained. %false otherwise. - */ -static bool pwq_tryinc_nr_active(struct pool_workqueue *pwq) -{ - struct workqueue_struct *wq = pwq->wq; - struct worker_pool *pool = pwq->pool; - bool obtained; - - lockdep_assert_held(&pool->lock); - - obtained = pwq->nr_active < READ_ONCE(wq->max_active); - - if (obtained) - pwq->nr_active++; - return obtained; -} - -/** - * pwq_activate_first_inactive - Activate the first inactive work item on a pwq - * @pwq: pool_workqueue of interest - * - * Activate the first inactive work item of @pwq if available and allowed by - * max_active limit. - * - * Returns %true if an inactive work item has been activated. %false if no - * inactive work item is found or max_active limit is reached. - */ -static bool pwq_activate_first_inactive(struct pool_workqueue *pwq) -{ - struct work_struct *work = - list_first_entry_or_null(&pwq->inactive_works, - struct work_struct, entry); - - if (work && pwq_tryinc_nr_active(pwq)) { - __pwq_activate_work(pwq, work); - return true; - } else { - return false; - } -} - -/** - * pwq_dec_nr_active - Retire an active count - * @pwq: pool_workqueue of interest - * - * Decrement @pwq's nr_active and try to activate the first inactive work item. - */ -static void pwq_dec_nr_active(struct pool_workqueue *pwq) +static void pwq_activate_first_inactive(struct pool_workqueue *pwq) { - struct worker_pool *pool = pwq->pool; + struct work_struct *work = list_first_entry(&pwq->inactive_works, + struct work_struct, entry); - lockdep_assert_held(&pool->lock); - - pwq->nr_active--; - pwq_activate_first_inactive(pwq); + pwq_activate_work(pwq, work); } /** @@ -1567,8 +1510,14 @@ static void pwq_dec_nr_in_flight(struct pool_workqueue *pwq, unsigned long work_ { int color = get_work_color(work_data); - if (!(work_data & WORK_STRUCT_INACTIVE)) - pwq_dec_nr_active(pwq); + if (!(work_data & WORK_STRUCT_INACTIVE)) { + pwq->nr_active--; + if (!list_empty(&pwq->inactive_works)) { + /* one down, submit an inactive one */ + if (pwq->nr_active < READ_ONCE(pwq->wq->max_active)) + pwq_activate_first_inactive(pwq); + } + } pwq->nr_in_flight[color]--; @@ -1870,11 +1819,13 @@ retry: * @work must also queue behind existing inactive work items to maintain * ordering when max_active changes. See wq_adjust_max_active(). */ - if (list_empty(&pwq->inactive_works) && pwq_tryinc_nr_active(pwq)) { + if (list_empty(&pwq->inactive_works) && + pwq->nr_active < READ_ONCE(pwq->wq->max_active)) { if (list_empty(&pool->worklist)) pool->watchdog_ts = jiffies; trace_workqueue_activate_work(work); + pwq->nr_active++; insert_work(pwq, work, &pool->worklist, work_flags); kick_pool(pool); } else { @@ -4736,8 +4687,9 @@ static void wq_adjust_max_active(struct workqueue_struct *wq) /* this function can be called during early boot w/ irq disabled */ raw_spin_lock_irqsave(&pwq->pool->lock, flags); - while (pwq_activate_first_inactive(pwq)) - ; + while (!list_empty(&pwq->inactive_works) && + pwq->nr_active < wq->max_active) + pwq_activate_first_inactive(pwq); kick_pool(pwq->pool); |