1457c8996SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2e360adbeSPeter Zijlstra /*
390eec103SPeter Zijlstra * Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra
4e360adbeSPeter Zijlstra *
5e360adbeSPeter Zijlstra * Provides a framework for enqueueing and running callbacks from hardirq
6e360adbeSPeter Zijlstra * context. The enqueueing is NMI-safe.
7e360adbeSPeter Zijlstra */
8e360adbeSPeter Zijlstra
983e3fa6fSPaul Gortmaker #include <linux/bug.h>
10e360adbeSPeter Zijlstra #include <linux/kernel.h>
119984de1aSPaul Gortmaker #include <linux/export.h>
12e360adbeSPeter Zijlstra #include <linux/irq_work.h>
13967d1f90SPaul Gortmaker #include <linux/percpu.h>
14e360adbeSPeter Zijlstra #include <linux/hardirq.h>
15ef1f0982SChris Metcalf #include <linux/irqflags.h>
16bc6679aeSFrederic Weisbecker #include <linux/sched.h>
17bc6679aeSFrederic Weisbecker #include <linux/tick.h>
18c0e980a4SSteven Rostedt #include <linux/cpu.h>
19c0e980a4SSteven Rostedt #include <linux/notifier.h>
2047885016SFrederic Weisbecker #include <linux/smp.h>
21b4c6f86eSSebastian Andrzej Siewior #include <linux/smpboot.h>
22967d1f90SPaul Gortmaker #include <asm/processor.h>
23e2b5bcf9SZqiang #include <linux/kasan.h>
24e360adbeSPeter Zijlstra
254468161aSValentin Schneider #include <trace/events/ipi.h>
264468161aSValentin Schneider
27b93e0b8fSFrederic Weisbecker static DEFINE_PER_CPU(struct llist_head, raised_list);
28b93e0b8fSFrederic Weisbecker static DEFINE_PER_CPU(struct llist_head, lazy_list);
29b4c6f86eSSebastian Andrzej Siewior static DEFINE_PER_CPU(struct task_struct *, irq_workd);
30b4c6f86eSSebastian Andrzej Siewior
wake_irq_workd(void)31b4c6f86eSSebastian Andrzej Siewior static void wake_irq_workd(void)
32b4c6f86eSSebastian Andrzej Siewior {
33b4c6f86eSSebastian Andrzej Siewior struct task_struct *tsk = __this_cpu_read(irq_workd);
34b4c6f86eSSebastian Andrzej Siewior
35b4c6f86eSSebastian Andrzej Siewior if (!llist_empty(this_cpu_ptr(&lazy_list)) && tsk)
36b4c6f86eSSebastian Andrzej Siewior wake_up_process(tsk);
37b4c6f86eSSebastian Andrzej Siewior }
38b4c6f86eSSebastian Andrzej Siewior
39b4c6f86eSSebastian Andrzej Siewior #ifdef CONFIG_SMP
irq_work_wake(struct irq_work * entry)40b4c6f86eSSebastian Andrzej Siewior static void irq_work_wake(struct irq_work *entry)
41b4c6f86eSSebastian Andrzej Siewior {
42b4c6f86eSSebastian Andrzej Siewior wake_irq_workd();
43b4c6f86eSSebastian Andrzej Siewior }
44b4c6f86eSSebastian Andrzej Siewior
45b4c6f86eSSebastian Andrzej Siewior static DEFINE_PER_CPU(struct irq_work, irq_work_wakeup) =
46b4c6f86eSSebastian Andrzej Siewior IRQ_WORK_INIT_HARD(irq_work_wake);
47b4c6f86eSSebastian Andrzej Siewior #endif
48b4c6f86eSSebastian Andrzej Siewior
irq_workd_should_run(unsigned int cpu)49b4c6f86eSSebastian Andrzej Siewior static int irq_workd_should_run(unsigned int cpu)
50b4c6f86eSSebastian Andrzej Siewior {
51b4c6f86eSSebastian Andrzej Siewior return !llist_empty(this_cpu_ptr(&lazy_list));
52b4c6f86eSSebastian Andrzej Siewior }
53e360adbeSPeter Zijlstra
54e360adbeSPeter Zijlstra /*
55e360adbeSPeter Zijlstra * Claim the entry so that no one else will poke at it.
56e360adbeSPeter Zijlstra */
irq_work_claim(struct irq_work * work)5738aaf809SHuang Ying static bool irq_work_claim(struct irq_work *work)
58e360adbeSPeter Zijlstra {
5925269871SFrederic Weisbecker int oflags;
60e360adbeSPeter Zijlstra
617a9f50a0SPeter Zijlstra oflags = atomic_fetch_or(IRQ_WORK_CLAIMED | CSD_TYPE_IRQ_WORK, &work->node.a_flags);
62e0bbe2d8SFrederic Weisbecker /*
6325269871SFrederic Weisbecker * If the work is already pending, no need to raise the IPI.
642914b0baSPeter Zijlstra * The pairing smp_mb() in irq_work_single() makes sure
6525269871SFrederic Weisbecker * everything we did before is visible.
66e0bbe2d8SFrederic Weisbecker */
67e0bbe2d8SFrederic Weisbecker if (oflags & IRQ_WORK_PENDING)
68e0bbe2d8SFrederic Weisbecker return false;
69e360adbeSPeter Zijlstra return true;
70e360adbeSPeter Zijlstra }
71e360adbeSPeter Zijlstra
arch_irq_work_raise(void)72e360adbeSPeter Zijlstra void __weak arch_irq_work_raise(void)
73e360adbeSPeter Zijlstra {
74e360adbeSPeter Zijlstra /*
75e360adbeSPeter Zijlstra * Lame architectures will get the timer tick callback
76e360adbeSPeter Zijlstra */
77e360adbeSPeter Zijlstra }
78e360adbeSPeter Zijlstra
irq_work_raise(struct irq_work * work)794468161aSValentin Schneider static __always_inline void irq_work_raise(struct irq_work *work)
804468161aSValentin Schneider {
81*68e2d17cSPeter Zijlstra if (trace_ipi_send_cpu_enabled() && arch_irq_work_has_interrupt())
82*68e2d17cSPeter Zijlstra trace_ipi_send_cpu(smp_processor_id(), _RET_IP_, work->func);
834468161aSValentin Schneider
844468161aSValentin Schneider arch_irq_work_raise();
854468161aSValentin Schneider }
864468161aSValentin Schneider
87471ba0e6SNicholas Piggin /* Enqueue on current CPU, work must already be claimed and preempt disabled */
__irq_work_queue_local(struct irq_work * work)88471ba0e6SNicholas Piggin static void __irq_work_queue_local(struct irq_work *work)
8947885016SFrederic Weisbecker {
90b4c6f86eSSebastian Andrzej Siewior struct llist_head *list;
91b4c6f86eSSebastian Andrzej Siewior bool rt_lazy_work = false;
92b4c6f86eSSebastian Andrzej Siewior bool lazy_work = false;
93b4c6f86eSSebastian Andrzej Siewior int work_flags;
94b4c6f86eSSebastian Andrzej Siewior
95b4c6f86eSSebastian Andrzej Siewior work_flags = atomic_read(&work->node.a_flags);
96b4c6f86eSSebastian Andrzej Siewior if (work_flags & IRQ_WORK_LAZY)
97b4c6f86eSSebastian Andrzej Siewior lazy_work = true;
98b4c6f86eSSebastian Andrzej Siewior else if (IS_ENABLED(CONFIG_PREEMPT_RT) &&
99b4c6f86eSSebastian Andrzej Siewior !(work_flags & IRQ_WORK_HARD_IRQ))
100b4c6f86eSSebastian Andrzej Siewior rt_lazy_work = true;
101b4c6f86eSSebastian Andrzej Siewior
102b4c6f86eSSebastian Andrzej Siewior if (lazy_work || rt_lazy_work)
103b4c6f86eSSebastian Andrzej Siewior list = this_cpu_ptr(&lazy_list);
104b4c6f86eSSebastian Andrzej Siewior else
105b4c6f86eSSebastian Andrzej Siewior list = this_cpu_ptr(&raised_list);
106b4c6f86eSSebastian Andrzej Siewior
107b4c6f86eSSebastian Andrzej Siewior if (!llist_add(&work->node.llist, list))
108b4c6f86eSSebastian Andrzej Siewior return;
109b4c6f86eSSebastian Andrzej Siewior
110471ba0e6SNicholas Piggin /* If the work is "lazy", handle it from next tick if any */
111b4c6f86eSSebastian Andrzej Siewior if (!lazy_work || tick_nohz_tick_stopped())
1124468161aSValentin Schneider irq_work_raise(work);
11347885016SFrederic Weisbecker }
11447885016SFrederic Weisbecker
11547885016SFrederic Weisbecker /* Enqueue the irq work @work on the current CPU */
irq_work_queue(struct irq_work * work)116cd578abbSPeter Zijlstra bool irq_work_queue(struct irq_work *work)
117e360adbeSPeter Zijlstra {
118c02cf5f8Sanish kumar /* Only queue if not already pending */
119c02cf5f8Sanish kumar if (!irq_work_claim(work))
120cd578abbSPeter Zijlstra return false;
121c02cf5f8Sanish kumar
122c02cf5f8Sanish kumar /* Queue the entry and raise the IPI if needed. */
12320b87691SChristoph Lameter preempt_disable();
124471ba0e6SNicholas Piggin __irq_work_queue_local(work);
12520b87691SChristoph Lameter preempt_enable();
126cd578abbSPeter Zijlstra
127cd578abbSPeter Zijlstra return true;
128e360adbeSPeter Zijlstra }
129e360adbeSPeter Zijlstra EXPORT_SYMBOL_GPL(irq_work_queue);
130e360adbeSPeter Zijlstra
131471ba0e6SNicholas Piggin /*
132471ba0e6SNicholas Piggin * Enqueue the irq_work @work on @cpu unless it's already pending
133471ba0e6SNicholas Piggin * somewhere.
134471ba0e6SNicholas Piggin *
135471ba0e6SNicholas Piggin * Can be re-enqueued while the callback is still in progress.
136471ba0e6SNicholas Piggin */
irq_work_queue_on(struct irq_work * work,int cpu)137471ba0e6SNicholas Piggin bool irq_work_queue_on(struct irq_work *work, int cpu)
138471ba0e6SNicholas Piggin {
139471ba0e6SNicholas Piggin #ifndef CONFIG_SMP
140471ba0e6SNicholas Piggin return irq_work_queue(work);
141471ba0e6SNicholas Piggin
142471ba0e6SNicholas Piggin #else /* CONFIG_SMP: */
143471ba0e6SNicholas Piggin /* All work should have been flushed before going offline */
144471ba0e6SNicholas Piggin WARN_ON_ONCE(cpu_is_offline(cpu));
145471ba0e6SNicholas Piggin
146471ba0e6SNicholas Piggin /* Only queue if not already pending */
147471ba0e6SNicholas Piggin if (!irq_work_claim(work))
148471ba0e6SNicholas Piggin return false;
149471ba0e6SNicholas Piggin
15025934fcfSZqiang kasan_record_aux_stack_noalloc(work);
151e2b5bcf9SZqiang
152471ba0e6SNicholas Piggin preempt_disable();
153471ba0e6SNicholas Piggin if (cpu != smp_processor_id()) {
154471ba0e6SNicholas Piggin /* Arch remote IPI send/receive backend aren't NMI safe */
155471ba0e6SNicholas Piggin WARN_ON_ONCE(in_nmi());
156b4c6f86eSSebastian Andrzej Siewior
157b4c6f86eSSebastian Andrzej Siewior /*
158b4c6f86eSSebastian Andrzej Siewior * On PREEMPT_RT the items which are not marked as
159b4c6f86eSSebastian Andrzej Siewior * IRQ_WORK_HARD_IRQ are added to the lazy list and a HARD work
160b4c6f86eSSebastian Andrzej Siewior * item is used on the remote CPU to wake the thread.
161b4c6f86eSSebastian Andrzej Siewior */
162b4c6f86eSSebastian Andrzej Siewior if (IS_ENABLED(CONFIG_PREEMPT_RT) &&
163b4c6f86eSSebastian Andrzej Siewior !(atomic_read(&work->node.a_flags) & IRQ_WORK_HARD_IRQ)) {
164b4c6f86eSSebastian Andrzej Siewior
165b4c6f86eSSebastian Andrzej Siewior if (!llist_add(&work->node.llist, &per_cpu(lazy_list, cpu)))
166b4c6f86eSSebastian Andrzej Siewior goto out;
167b4c6f86eSSebastian Andrzej Siewior
168b4c6f86eSSebastian Andrzej Siewior work = &per_cpu(irq_work_wakeup, cpu);
169b4c6f86eSSebastian Andrzej Siewior if (!irq_work_claim(work))
170b4c6f86eSSebastian Andrzej Siewior goto out;
171b4c6f86eSSebastian Andrzej Siewior }
172b4c6f86eSSebastian Andrzej Siewior
1737a9f50a0SPeter Zijlstra __smp_call_single_queue(cpu, &work->node.llist);
174471ba0e6SNicholas Piggin } else {
175471ba0e6SNicholas Piggin __irq_work_queue_local(work);
176471ba0e6SNicholas Piggin }
177b4c6f86eSSebastian Andrzej Siewior out:
178471ba0e6SNicholas Piggin preempt_enable();
179471ba0e6SNicholas Piggin
180471ba0e6SNicholas Piggin return true;
181471ba0e6SNicholas Piggin #endif /* CONFIG_SMP */
182471ba0e6SNicholas Piggin }
183471ba0e6SNicholas Piggin
irq_work_needs_cpu(void)18400b42959SFrederic Weisbecker bool irq_work_needs_cpu(void)
185e360adbeSPeter Zijlstra {
186b93e0b8fSFrederic Weisbecker struct llist_head *raised, *lazy;
18700b42959SFrederic Weisbecker
18822127e93SChristoph Lameter raised = this_cpu_ptr(&raised_list);
18922127e93SChristoph Lameter lazy = this_cpu_ptr(&lazy_list);
19076a33061SFrederic Weisbecker
19176a33061SFrederic Weisbecker if (llist_empty(raised) || arch_irq_work_has_interrupt())
19276a33061SFrederic Weisbecker if (llist_empty(lazy))
19300b42959SFrederic Weisbecker return false;
19400b42959SFrederic Weisbecker
1958aa2acceSSteven Rostedt /* All work should have been flushed before going offline */
1968aa2acceSSteven Rostedt WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
1978aa2acceSSteven Rostedt
19800b42959SFrederic Weisbecker return true;
19900b42959SFrederic Weisbecker }
20000b42959SFrederic Weisbecker
irq_work_single(void * arg)2014b44a21dSPeter Zijlstra void irq_work_single(void *arg)
202e360adbeSPeter Zijlstra {
2034b44a21dSPeter Zijlstra struct irq_work *work = arg;
204feb4a513SFrederic Weisbecker int flags;
2054b44a21dSPeter Zijlstra
206e360adbeSPeter Zijlstra /*
2072914b0baSPeter Zijlstra * Clear the PENDING bit, after this point the @work can be re-used.
2082914b0baSPeter Zijlstra * The PENDING bit acts as a lock, and we own it, so we can clear it
2092914b0baSPeter Zijlstra * without atomic ops.
210e360adbeSPeter Zijlstra */
2112914b0baSPeter Zijlstra flags = atomic_read(&work->node.a_flags);
212e9838bd5SFrederic Weisbecker flags &= ~IRQ_WORK_PENDING;
2132914b0baSPeter Zijlstra atomic_set(&work->node.a_flags, flags);
2142914b0baSPeter Zijlstra
2152914b0baSPeter Zijlstra /*
2162914b0baSPeter Zijlstra * See irq_work_claim().
2172914b0baSPeter Zijlstra */
2182914b0baSPeter Zijlstra smp_mb();
2192914b0baSPeter Zijlstra
2202914b0baSPeter Zijlstra lockdep_irq_work_enter(flags);
2212914b0baSPeter Zijlstra work->func(work);
2222914b0baSPeter Zijlstra lockdep_irq_work_exit(flags);
2232914b0baSPeter Zijlstra
2242914b0baSPeter Zijlstra /*
2252914b0baSPeter Zijlstra * Clear the BUSY bit, if set, and return to the free state if no-one
2262914b0baSPeter Zijlstra * else claimed it meanwhile.
2272914b0baSPeter Zijlstra */
2287a9f50a0SPeter Zijlstra (void)atomic_cmpxchg(&work->node.a_flags, flags, flags & ~IRQ_WORK_BUSY);
22981097968SSebastian Andrzej Siewior
23009089db7SSebastian Andrzej Siewior if ((IS_ENABLED(CONFIG_PREEMPT_RT) && !irq_work_is_hard(work)) ||
23109089db7SSebastian Andrzej Siewior !arch_irq_work_has_interrupt())
23281097968SSebastian Andrzej Siewior rcuwait_wake_up(&work->irqwait);
233e360adbeSPeter Zijlstra }
2344b44a21dSPeter Zijlstra
irq_work_run_list(struct llist_head * list)2354b44a21dSPeter Zijlstra static void irq_work_run_list(struct llist_head *list)
2364b44a21dSPeter Zijlstra {
2374b44a21dSPeter Zijlstra struct irq_work *work, *tmp;
2384b44a21dSPeter Zijlstra struct llist_node *llnode;
2394b44a21dSPeter Zijlstra
240b4c6f86eSSebastian Andrzej Siewior /*
241b4c6f86eSSebastian Andrzej Siewior * On PREEMPT_RT IRQ-work which is not marked as HARD will be processed
242b4c6f86eSSebastian Andrzej Siewior * in a per-CPU thread in preemptible context. Only the items which are
243b4c6f86eSSebastian Andrzej Siewior * marked as IRQ_WORK_HARD_IRQ will be processed in hardirq context.
244b4c6f86eSSebastian Andrzej Siewior */
245b4c6f86eSSebastian Andrzej Siewior BUG_ON(!irqs_disabled() && !IS_ENABLED(CONFIG_PREEMPT_RT));
2464b44a21dSPeter Zijlstra
2474b44a21dSPeter Zijlstra if (llist_empty(list))
2484b44a21dSPeter Zijlstra return;
2494b44a21dSPeter Zijlstra
2504b44a21dSPeter Zijlstra llnode = llist_del_all(list);
2517a9f50a0SPeter Zijlstra llist_for_each_entry_safe(work, tmp, llnode, node.llist)
2524b44a21dSPeter Zijlstra irq_work_single(work);
253e360adbeSPeter Zijlstra }
254c0e980a4SSteven Rostedt
255c0e980a4SSteven Rostedt /*
256a77353e5SPeter Zijlstra * hotplug calls this through:
257a77353e5SPeter Zijlstra * hotplug_cfd() -> flush_smp_call_function_queue()
258c0e980a4SSteven Rostedt */
irq_work_run(void)259c0e980a4SSteven Rostedt void irq_work_run(void)
260c0e980a4SSteven Rostedt {
26122127e93SChristoph Lameter irq_work_run_list(this_cpu_ptr(&raised_list));
262b4c6f86eSSebastian Andrzej Siewior if (!IS_ENABLED(CONFIG_PREEMPT_RT))
26322127e93SChristoph Lameter irq_work_run_list(this_cpu_ptr(&lazy_list));
264b4c6f86eSSebastian Andrzej Siewior else
265b4c6f86eSSebastian Andrzej Siewior wake_irq_workd();
266c0e980a4SSteven Rostedt }
267e360adbeSPeter Zijlstra EXPORT_SYMBOL_GPL(irq_work_run);
268e360adbeSPeter Zijlstra
irq_work_tick(void)26976a33061SFrederic Weisbecker void irq_work_tick(void)
27076a33061SFrederic Weisbecker {
27156e4dea8SChristoph Lameter struct llist_head *raised = this_cpu_ptr(&raised_list);
27276a33061SFrederic Weisbecker
27376a33061SFrederic Weisbecker if (!llist_empty(raised) && !arch_irq_work_has_interrupt())
27476a33061SFrederic Weisbecker irq_work_run_list(raised);
275b4c6f86eSSebastian Andrzej Siewior
276b4c6f86eSSebastian Andrzej Siewior if (!IS_ENABLED(CONFIG_PREEMPT_RT))
27756e4dea8SChristoph Lameter irq_work_run_list(this_cpu_ptr(&lazy_list));
278b4c6f86eSSebastian Andrzej Siewior else
279b4c6f86eSSebastian Andrzej Siewior wake_irq_workd();
28076a33061SFrederic Weisbecker }
28176a33061SFrederic Weisbecker
282e360adbeSPeter Zijlstra /*
283e360adbeSPeter Zijlstra * Synchronize against the irq_work @entry, ensures the entry is not
284e360adbeSPeter Zijlstra * currently in use.
285e360adbeSPeter Zijlstra */
irq_work_sync(struct irq_work * work)28638aaf809SHuang Ying void irq_work_sync(struct irq_work *work)
287e360adbeSPeter Zijlstra {
2883c7169a3SFrederic Weisbecker lockdep_assert_irqs_enabled();
28981097968SSebastian Andrzej Siewior might_sleep();
29081097968SSebastian Andrzej Siewior
29109089db7SSebastian Andrzej Siewior if ((IS_ENABLED(CONFIG_PREEMPT_RT) && !irq_work_is_hard(work)) ||
29209089db7SSebastian Andrzej Siewior !arch_irq_work_has_interrupt()) {
29381097968SSebastian Andrzej Siewior rcuwait_wait_event(&work->irqwait, !irq_work_is_busy(work),
29481097968SSebastian Andrzej Siewior TASK_UNINTERRUPTIBLE);
29581097968SSebastian Andrzej Siewior return;
29681097968SSebastian Andrzej Siewior }
297e360adbeSPeter Zijlstra
2987a9f50a0SPeter Zijlstra while (irq_work_is_busy(work))
299e360adbeSPeter Zijlstra cpu_relax();
300e360adbeSPeter Zijlstra }
301e360adbeSPeter Zijlstra EXPORT_SYMBOL_GPL(irq_work_sync);
302b4c6f86eSSebastian Andrzej Siewior
run_irq_workd(unsigned int cpu)303b4c6f86eSSebastian Andrzej Siewior static void run_irq_workd(unsigned int cpu)
304b4c6f86eSSebastian Andrzej Siewior {
305b4c6f86eSSebastian Andrzej Siewior irq_work_run_list(this_cpu_ptr(&lazy_list));
306b4c6f86eSSebastian Andrzej Siewior }
307b4c6f86eSSebastian Andrzej Siewior
irq_workd_setup(unsigned int cpu)308b4c6f86eSSebastian Andrzej Siewior static void irq_workd_setup(unsigned int cpu)
309b4c6f86eSSebastian Andrzej Siewior {
310b4c6f86eSSebastian Andrzej Siewior sched_set_fifo_low(current);
311b4c6f86eSSebastian Andrzej Siewior }
312b4c6f86eSSebastian Andrzej Siewior
313b4c6f86eSSebastian Andrzej Siewior static struct smp_hotplug_thread irqwork_threads = {
314b4c6f86eSSebastian Andrzej Siewior .store = &irq_workd,
315b4c6f86eSSebastian Andrzej Siewior .setup = irq_workd_setup,
316b4c6f86eSSebastian Andrzej Siewior .thread_should_run = irq_workd_should_run,
317b4c6f86eSSebastian Andrzej Siewior .thread_fn = run_irq_workd,
318b4c6f86eSSebastian Andrzej Siewior .thread_comm = "irq_work/%u",
319b4c6f86eSSebastian Andrzej Siewior };
320b4c6f86eSSebastian Andrzej Siewior
irq_work_init_threads(void)321b4c6f86eSSebastian Andrzej Siewior static __init int irq_work_init_threads(void)
322b4c6f86eSSebastian Andrzej Siewior {
323b4c6f86eSSebastian Andrzej Siewior if (IS_ENABLED(CONFIG_PREEMPT_RT))
324b4c6f86eSSebastian Andrzej Siewior BUG_ON(smpboot_register_percpu_thread(&irqwork_threads));
325b4c6f86eSSebastian Andrzej Siewior return 0;
326b4c6f86eSSebastian Andrzej Siewior }
327b4c6f86eSSebastian Andrzej Siewior early_initcall(irq_work_init_threads);
328