xref: /linux/kernel/irq_work.c (revision f2ee442115c9b6219083c019939a9cc0c9abb2f8)
1 /*
2  * Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
3  *
4  * Provides a framework for enqueueing and running callbacks from hardirq
5  * context. The enqueueing is NMI-safe.
6  */
7 
8 #include <linux/kernel.h>
9 #include <linux/export.h>
10 #include <linux/irq_work.h>
11 #include <linux/percpu.h>
12 #include <linux/hardirq.h>
13 #include <asm/processor.h>
14 
15 /*
16  * An entry can be in one of four states:
17  *
18  * free	     NULL, 0 -> {claimed}       : free to be used
19  * claimed   NULL, 3 -> {pending}       : claimed to be enqueued
20  * pending   next, 3 -> {busy}          : queued, pending callback
21  * busy      NULL, 2 -> {free, claimed} : callback in progress, can be claimed
22  */
23 
24 #define IRQ_WORK_PENDING	1UL
25 #define IRQ_WORK_BUSY		2UL
26 #define IRQ_WORK_FLAGS		3UL
27 
28 static DEFINE_PER_CPU(struct llist_head, irq_work_list);
29 
30 /*
31  * Claim the entry so that no one else will poke at it.
32  */
33 static bool irq_work_claim(struct irq_work *work)
34 {
35 	unsigned long flags, nflags;
36 
37 	for (;;) {
38 		flags = work->flags;
39 		if (flags & IRQ_WORK_PENDING)
40 			return false;
41 		nflags = flags | IRQ_WORK_FLAGS;
42 		if (cmpxchg(&work->flags, flags, nflags) == flags)
43 			break;
44 		cpu_relax();
45 	}
46 
47 	return true;
48 }
49 
50 void __weak arch_irq_work_raise(void)
51 {
52 	/*
53 	 * Lame architectures will get the timer tick callback
54 	 */
55 }
56 
57 /*
58  * Queue the entry and raise the IPI if needed.
59  */
60 static void __irq_work_queue(struct irq_work *work)
61 {
62 	bool empty;
63 
64 	preempt_disable();
65 
66 	empty = llist_add(&work->llnode, &__get_cpu_var(irq_work_list));
67 	/* The list was empty, raise self-interrupt to start processing. */
68 	if (empty)
69 		arch_irq_work_raise();
70 
71 	preempt_enable();
72 }
73 
74 /*
75  * Enqueue the irq_work @entry, returns true on success, failure when the
76  * @entry was already enqueued by someone else.
77  *
78  * Can be re-enqueued while the callback is still in progress.
79  */
80 bool irq_work_queue(struct irq_work *work)
81 {
82 	if (!irq_work_claim(work)) {
83 		/*
84 		 * Already enqueued, can't do!
85 		 */
86 		return false;
87 	}
88 
89 	__irq_work_queue(work);
90 	return true;
91 }
92 EXPORT_SYMBOL_GPL(irq_work_queue);
93 
94 /*
95  * Run the irq_work entries on this cpu. Requires to be ran from hardirq
96  * context with local IRQs disabled.
97  */
98 void irq_work_run(void)
99 {
100 	struct irq_work *work;
101 	struct llist_head *this_list;
102 	struct llist_node *llnode;
103 
104 	this_list = &__get_cpu_var(irq_work_list);
105 	if (llist_empty(this_list))
106 		return;
107 
108 	BUG_ON(!in_irq());
109 	BUG_ON(!irqs_disabled());
110 
111 	llnode = llist_del_all(this_list);
112 	while (llnode != NULL) {
113 		work = llist_entry(llnode, struct irq_work, llnode);
114 
115 		llnode = llist_next(llnode);
116 
117 		/*
118 		 * Clear the PENDING bit, after this point the @work
119 		 * can be re-used.
120 		 */
121 		work->flags = IRQ_WORK_BUSY;
122 		work->func(work);
123 		/*
124 		 * Clear the BUSY bit and return to the free state if
125 		 * no-one else claimed it meanwhile.
126 		 */
127 		(void)cmpxchg(&work->flags, IRQ_WORK_BUSY, 0);
128 	}
129 }
130 EXPORT_SYMBOL_GPL(irq_work_run);
131 
132 /*
133  * Synchronize against the irq_work @entry, ensures the entry is not
134  * currently in use.
135  */
136 void irq_work_sync(struct irq_work *work)
137 {
138 	WARN_ON_ONCE(irqs_disabled());
139 
140 	while (work->flags & IRQ_WORK_BUSY)
141 		cpu_relax();
142 }
143 EXPORT_SYMBOL_GPL(irq_work_sync);
144