xref: /linux/kernel/irq_work.c (revision 30ff3c59137d00e083f68437665e00895cc271c7)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra
4  *
5  * Provides a framework for enqueueing and running callbacks from hardirq
6  * context. The enqueueing is NMI-safe.
7  */
8 
9 #include <linux/bug.h>
10 #include <linux/kernel.h>
11 #include <linux/export.h>
12 #include <linux/irq_work.h>
13 #include <linux/percpu.h>
14 #include <linux/hardirq.h>
15 #include <linux/irqflags.h>
16 #include <linux/sched.h>
17 #include <linux/tick.h>
18 #include <linux/cpu.h>
19 #include <linux/notifier.h>
20 #include <linux/smp.h>
21 #include <asm/processor.h>
22 
23 
24 static DEFINE_PER_CPU(struct llist_head, raised_list);
25 static DEFINE_PER_CPU(struct llist_head, lazy_list);
26 
27 /*
28  * Claim the entry so that no one else will poke at it.
29  */
30 static bool irq_work_claim(struct irq_work *work)
31 {
32 	int oflags;
33 
34 	oflags = atomic_fetch_or(IRQ_WORK_CLAIMED, &work->flags);
35 	/*
36 	 * If the work is already pending, no need to raise the IPI.
37 	 * The pairing atomic_fetch_andnot() in irq_work_run() makes sure
38 	 * everything we did before is visible.
39 	 */
40 	if (oflags & IRQ_WORK_PENDING)
41 		return false;
42 	return true;
43 }
44 
45 void __weak arch_irq_work_raise(void)
46 {
47 	/*
48 	 * Lame architectures will get the timer tick callback
49 	 */
50 }
51 
52 /* Enqueue on current CPU, work must already be claimed and preempt disabled */
53 static void __irq_work_queue_local(struct irq_work *work)
54 {
55 	/* If the work is "lazy", handle it from next tick if any */
56 	if (atomic_read(&work->flags) & IRQ_WORK_LAZY) {
57 		if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) &&
58 		    tick_nohz_tick_stopped())
59 			arch_irq_work_raise();
60 	} else {
61 		if (llist_add(&work->llnode, this_cpu_ptr(&raised_list)))
62 			arch_irq_work_raise();
63 	}
64 }
65 
66 /* Enqueue the irq work @work on the current CPU */
67 bool irq_work_queue(struct irq_work *work)
68 {
69 	/* Only queue if not already pending */
70 	if (!irq_work_claim(work))
71 		return false;
72 
73 	/* Queue the entry and raise the IPI if needed. */
74 	preempt_disable();
75 	__irq_work_queue_local(work);
76 	preempt_enable();
77 
78 	return true;
79 }
80 EXPORT_SYMBOL_GPL(irq_work_queue);
81 
82 /*
83  * Enqueue the irq_work @work on @cpu unless it's already pending
84  * somewhere.
85  *
86  * Can be re-enqueued while the callback is still in progress.
87  */
88 bool irq_work_queue_on(struct irq_work *work, int cpu)
89 {
90 #ifndef CONFIG_SMP
91 	return irq_work_queue(work);
92 
93 #else /* CONFIG_SMP: */
94 	/* All work should have been flushed before going offline */
95 	WARN_ON_ONCE(cpu_is_offline(cpu));
96 
97 	/* Only queue if not already pending */
98 	if (!irq_work_claim(work))
99 		return false;
100 
101 	preempt_disable();
102 	if (cpu != smp_processor_id()) {
103 		/* Arch remote IPI send/receive backend aren't NMI safe */
104 		WARN_ON_ONCE(in_nmi());
105 		if (llist_add(&work->llnode, &per_cpu(raised_list, cpu)))
106 			arch_send_call_function_single_ipi(cpu);
107 	} else {
108 		__irq_work_queue_local(work);
109 	}
110 	preempt_enable();
111 
112 	return true;
113 #endif /* CONFIG_SMP */
114 }
115 
116 
117 bool irq_work_needs_cpu(void)
118 {
119 	struct llist_head *raised, *lazy;
120 
121 	raised = this_cpu_ptr(&raised_list);
122 	lazy = this_cpu_ptr(&lazy_list);
123 
124 	if (llist_empty(raised) || arch_irq_work_has_interrupt())
125 		if (llist_empty(lazy))
126 			return false;
127 
128 	/* All work should have been flushed before going offline */
129 	WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
130 
131 	return true;
132 }
133 
134 static void irq_work_run_list(struct llist_head *list)
135 {
136 	struct irq_work *work, *tmp;
137 	struct llist_node *llnode;
138 
139 	BUG_ON(!irqs_disabled());
140 
141 	if (llist_empty(list))
142 		return;
143 
144 	llnode = llist_del_all(list);
145 	llist_for_each_entry_safe(work, tmp, llnode, llnode) {
146 		int flags;
147 		/*
148 		 * Clear the PENDING bit, after this point the @work
149 		 * can be re-used.
150 		 * Make it immediately visible so that other CPUs trying
151 		 * to claim that work don't rely on us to handle their data
152 		 * while we are in the middle of the func.
153 		 */
154 		flags = atomic_fetch_andnot(IRQ_WORK_PENDING, &work->flags);
155 
156 		work->func(work);
157 		/*
158 		 * Clear the BUSY bit and return to the free state if
159 		 * no-one else claimed it meanwhile.
160 		 */
161 		flags &= ~IRQ_WORK_PENDING;
162 		(void)atomic_cmpxchg(&work->flags, flags, flags & ~IRQ_WORK_BUSY);
163 	}
164 }
165 
166 /*
167  * hotplug calls this through:
168  *  hotplug_cfd() -> flush_smp_call_function_queue()
169  */
170 void irq_work_run(void)
171 {
172 	irq_work_run_list(this_cpu_ptr(&raised_list));
173 	irq_work_run_list(this_cpu_ptr(&lazy_list));
174 }
175 EXPORT_SYMBOL_GPL(irq_work_run);
176 
177 void irq_work_tick(void)
178 {
179 	struct llist_head *raised = this_cpu_ptr(&raised_list);
180 
181 	if (!llist_empty(raised) && !arch_irq_work_has_interrupt())
182 		irq_work_run_list(raised);
183 	irq_work_run_list(this_cpu_ptr(&lazy_list));
184 }
185 
186 /*
187  * Synchronize against the irq_work @entry, ensures the entry is not
188  * currently in use.
189  */
190 void irq_work_sync(struct irq_work *work)
191 {
192 	lockdep_assert_irqs_enabled();
193 
194 	while (atomic_read(&work->flags) & IRQ_WORK_BUSY)
195 		cpu_relax();
196 }
197 EXPORT_SYMBOL_GPL(irq_work_sync);
198