summaryrefslogtreecommitdiff
path: root/kernel/signal.c
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2020-10-09 16:03:01 -0600
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2023-01-04 11:39:23 +0100
commitc91ab04781f9e46c1a2143bd1ba8fc1f1aff6ebc (patch)
treeeda9f2602c0783b4401140be889f0995a8db4e42 /kernel/signal.c
parent788d0824269bef539fe31a785b1517882eafed93 (diff)
signal: kill JOBCTL_TASK_WORK
[ Upstream commit 98b89b649fce39dacb9dc036d6d0fdb8caff73f7 ] It's no longer used, get rid of it. Signed-off-by: Jens Axboe <axboe@kernel.dk> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'kernel/signal.c')
-rw-r--r--kernel/signal.c20
1 files changed, 0 insertions, 20 deletions
diff --git a/kernel/signal.c b/kernel/signal.c
index fb5473724f5d..f528a91d6734 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -2544,26 +2544,6 @@ bool get_signal(struct ksignal *ksig)
relock:
spin_lock_irq(&sighand->siglock);
- /*
- * Make sure we can safely read ->jobctl() in task_work add. As Oleg
- * states:
- *
- * It pairs with mb (implied by cmpxchg) before READ_ONCE. So we
- * roughly have
- *
- * task_work_add: get_signal:
- * STORE(task->task_works, new_work); STORE(task->jobctl);
- * mb(); mb();
- * LOAD(task->jobctl); LOAD(task->task_works);
- *
- * and we can rely on STORE-MB-LOAD [ in task_work_add].
- */
- smp_store_mb(current->jobctl, current->jobctl & ~JOBCTL_TASK_WORK);
- if (unlikely(current->task_works)) {
- spin_unlock_irq(&sighand->siglock);
- task_work_run();
- goto relock;
- }
/*
* Every stopped thread goes here after wakeup. Check to see if