diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2023-04-28 15:03:43 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2023-04-28 15:03:43 -0700 |
commit | f20730efbd305d42eded761f6fbd9a59d6125228 (patch) | |
tree | c9c553ff088fd788ac9149ca3f7a5c83cf8eaed6 /kernel/irq_work.c | |
parent | 586b222d748e91c619d68e9239654ebc7fed9b0c (diff) | |
parent | 5c3124975e15c1fadd5af1c61e4d627cf6d97ba2 (diff) |
Merge tag 'smp-core-2023-04-27' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull SMP cross-CPU function-call updates from Ingo Molnar:
- Remove diagnostics and adjust config for CSD lock diagnostics
- Add a generic IPI-sending tracepoint, as currently there's no easy
way to instrument IPI origins: it's arch dependent and for some major
architectures it's not even consistently available.
* tag 'smp-core-2023-04-27' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
trace,smp: Trace all smp_function_call*() invocations
trace: Add trace_ipi_send_cpu()
sched, smp: Trace smp callback causing an IPI
smp: reword smp call IPI comment
treewide: Trace IPIs sent via smp_send_reschedule()
irq_work: Trace self-IPIs sent via arch_irq_work_raise()
smp: Trace IPIs sent via arch_send_call_function_ipi_mask()
sched, smp: Trace IPIs sent via send_call_function_single_ipi()
trace: Add trace_ipi_send_cpumask()
kernel/smp: Make csdlock_debug= resettable
locking/csd_lock: Remove per-CPU data indirection from CSD lock debugging
locking/csd_lock: Remove added data from CSD lock debugging
locking/csd_lock: Add Kconfig option for csd_debug default
Diffstat (limited to 'kernel/irq_work.c')
-rw-r--r-- | kernel/irq_work.c | 12 |
1 files changed, 11 insertions, 1 deletions
diff --git a/kernel/irq_work.c b/kernel/irq_work.c index 7afa40fe5cc43..2f4fb336dda17 100644 --- a/kernel/irq_work.c +++ b/kernel/irq_work.c @@ -22,6 +22,8 @@ #include <asm/processor.h> #include <linux/kasan.h> +#include <trace/events/ipi.h> + static DEFINE_PER_CPU(struct llist_head, raised_list); static DEFINE_PER_CPU(struct llist_head, lazy_list); static DEFINE_PER_CPU(struct task_struct *, irq_workd); @@ -74,6 +76,14 @@ void __weak arch_irq_work_raise(void) */ } +static __always_inline void irq_work_raise(struct irq_work *work) +{ + if (trace_ipi_send_cpu_enabled() && arch_irq_work_has_interrupt()) + trace_ipi_send_cpu(smp_processor_id(), _RET_IP_, work->func); + + arch_irq_work_raise(); +} + /* Enqueue on current CPU, work must already be claimed and preempt disabled */ static void __irq_work_queue_local(struct irq_work *work) { @@ -99,7 +109,7 @@ static void __irq_work_queue_local(struct irq_work *work) /* If the work is "lazy", handle it from next tick if any */ if (!lazy_work || tick_nohz_tick_stopped()) - arch_irq_work_raise(); + irq_work_raise(work); } /* Enqueue the irq work @work on the current CPU */ |