xref: /linux/kernel/task_work.c (revision a1ff5a7d78a036d6c2178ee5acd6ba4946243800)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/irq_work.h>
3 #include <linux/spinlock.h>
4 #include <linux/task_work.h>
5 #include <linux/resume_user_mode.h>
6 
7 static struct callback_head work_exited; /* all we need is ->next == NULL */
8 
9 #ifdef CONFIG_IRQ_WORK
task_work_set_notify_irq(struct irq_work * entry)10 static void task_work_set_notify_irq(struct irq_work *entry)
11 {
12 	test_and_set_tsk_thread_flag(current, TIF_NOTIFY_RESUME);
13 }
14 static DEFINE_PER_CPU(struct irq_work, irq_work_NMI_resume) =
15 	IRQ_WORK_INIT_HARD(task_work_set_notify_irq);
16 #endif
17 
18 /**
19  * task_work_add - ask the @task to execute @work->func()
20  * @task: the task which should run the callback
21  * @work: the callback to run
22  * @notify: how to notify the targeted task
23  *
24  * Queue @work for task_work_run() below and notify the @task if @notify
25  * is @TWA_RESUME, @TWA_SIGNAL, @TWA_SIGNAL_NO_IPI or @TWA_NMI_CURRENT.
26  *
27  * @TWA_SIGNAL works like signals, in that the it will interrupt the targeted
28  * task and run the task_work, regardless of whether the task is currently
29  * running in the kernel or userspace.
30  * @TWA_SIGNAL_NO_IPI works like @TWA_SIGNAL, except it doesn't send a
31  * reschedule IPI to force the targeted task to reschedule and run task_work.
32  * This can be advantageous if there's no strict requirement that the
33  * task_work be run as soon as possible, just whenever the task enters the
34  * kernel anyway.
35  * @TWA_RESUME work is run only when the task exits the kernel and returns to
36  * user mode, or before entering guest mode.
37  * @TWA_NMI_CURRENT works like @TWA_RESUME, except it can only be used for the
38  * current @task and if the current context is NMI.
39  *
40  * Fails if the @task is exiting/exited and thus it can't process this @work.
41  * Otherwise @work->func() will be called when the @task goes through one of
42  * the aforementioned transitions, or exits.
43  *
44  * If the targeted task is exiting, then an error is returned and the work item
45  * is not queued. It's up to the caller to arrange for an alternative mechanism
46  * in that case.
47  *
48  * Note: there is no ordering guarantee on works queued here. The task_work
49  * list is LIFO.
50  *
51  * RETURNS:
52  * 0 if succeeds or -ESRCH.
53  */
task_work_add(struct task_struct * task,struct callback_head * work,enum task_work_notify_mode notify)54 int task_work_add(struct task_struct *task, struct callback_head *work,
55 		  enum task_work_notify_mode notify)
56 {
57 	struct callback_head *head;
58 
59 	if (notify == TWA_NMI_CURRENT) {
60 		if (WARN_ON_ONCE(task != current))
61 			return -EINVAL;
62 		if (!IS_ENABLED(CONFIG_IRQ_WORK))
63 			return -EINVAL;
64 	} else {
65 		/* record the work call stack in order to print it in KASAN reports */
66 		kasan_record_aux_stack(work);
67 	}
68 
69 	head = READ_ONCE(task->task_works);
70 	do {
71 		if (unlikely(head == &work_exited))
72 			return -ESRCH;
73 		work->next = head;
74 	} while (!try_cmpxchg(&task->task_works, &head, work));
75 
76 	switch (notify) {
77 	case TWA_NONE:
78 		break;
79 	case TWA_RESUME:
80 		set_notify_resume(task);
81 		break;
82 	case TWA_SIGNAL:
83 		set_notify_signal(task);
84 		break;
85 	case TWA_SIGNAL_NO_IPI:
86 		__set_notify_signal(task);
87 		break;
88 #ifdef CONFIG_IRQ_WORK
89 	case TWA_NMI_CURRENT:
90 		irq_work_queue(this_cpu_ptr(&irq_work_NMI_resume));
91 		break;
92 #endif
93 	default:
94 		WARN_ON_ONCE(1);
95 		break;
96 	}
97 
98 	return 0;
99 }
100 
101 /**
102  * task_work_cancel_match - cancel a pending work added by task_work_add()
103  * @task: the task which should execute the work
104  * @match: match function to call
105  * @data: data to be passed in to match function
106  *
107  * RETURNS:
108  * The found work or NULL if not found.
109  */
110 struct callback_head *
task_work_cancel_match(struct task_struct * task,bool (* match)(struct callback_head *,void * data),void * data)111 task_work_cancel_match(struct task_struct *task,
112 		       bool (*match)(struct callback_head *, void *data),
113 		       void *data)
114 {
115 	struct callback_head **pprev = &task->task_works;
116 	struct callback_head *work;
117 	unsigned long flags;
118 
119 	if (likely(!task_work_pending(task)))
120 		return NULL;
121 	/*
122 	 * If cmpxchg() fails we continue without updating pprev.
123 	 * Either we raced with task_work_add() which added the
124 	 * new entry before this work, we will find it again. Or
125 	 * we raced with task_work_run(), *pprev == NULL/exited.
126 	 */
127 	raw_spin_lock_irqsave(&task->pi_lock, flags);
128 	work = READ_ONCE(*pprev);
129 	while (work) {
130 		if (!match(work, data)) {
131 			pprev = &work->next;
132 			work = READ_ONCE(*pprev);
133 		} else if (try_cmpxchg(pprev, &work, work->next))
134 			break;
135 	}
136 	raw_spin_unlock_irqrestore(&task->pi_lock, flags);
137 
138 	return work;
139 }
140 
task_work_func_match(struct callback_head * cb,void * data)141 static bool task_work_func_match(struct callback_head *cb, void *data)
142 {
143 	return cb->func == data;
144 }
145 
146 /**
147  * task_work_cancel_func - cancel a pending work matching a function added by task_work_add()
148  * @task: the task which should execute the func's work
149  * @func: identifies the func to match with a work to remove
150  *
151  * Find the last queued pending work with ->func == @func and remove
152  * it from queue.
153  *
154  * RETURNS:
155  * The found work or NULL if not found.
156  */
157 struct callback_head *
task_work_cancel_func(struct task_struct * task,task_work_func_t func)158 task_work_cancel_func(struct task_struct *task, task_work_func_t func)
159 {
160 	return task_work_cancel_match(task, task_work_func_match, func);
161 }
162 
task_work_match(struct callback_head * cb,void * data)163 static bool task_work_match(struct callback_head *cb, void *data)
164 {
165 	return cb == data;
166 }
167 
168 /**
169  * task_work_cancel - cancel a pending work added by task_work_add()
170  * @task: the task which should execute the work
171  * @cb: the callback to remove if queued
172  *
173  * Remove a callback from a task's queue if queued.
174  *
175  * RETURNS:
176  * True if the callback was queued and got cancelled, false otherwise.
177  */
task_work_cancel(struct task_struct * task,struct callback_head * cb)178 bool task_work_cancel(struct task_struct *task, struct callback_head *cb)
179 {
180 	struct callback_head *ret;
181 
182 	ret = task_work_cancel_match(task, task_work_match, cb);
183 
184 	return ret == cb;
185 }
186 
187 /**
188  * task_work_run - execute the works added by task_work_add()
189  *
190  * Flush the pending works. Should be used by the core kernel code.
191  * Called before the task returns to the user-mode or stops, or when
192  * it exits. In the latter case task_work_add() can no longer add the
193  * new work after task_work_run() returns.
194  */
task_work_run(void)195 void task_work_run(void)
196 {
197 	struct task_struct *task = current;
198 	struct callback_head *work, *head, *next;
199 
200 	for (;;) {
201 		/*
202 		 * work->func() can do task_work_add(), do not set
203 		 * work_exited unless the list is empty.
204 		 */
205 		work = READ_ONCE(task->task_works);
206 		do {
207 			head = NULL;
208 			if (!work) {
209 				if (task->flags & PF_EXITING)
210 					head = &work_exited;
211 				else
212 					break;
213 			}
214 		} while (!try_cmpxchg(&task->task_works, &work, head));
215 
216 		if (!work)
217 			break;
218 		/*
219 		 * Synchronize with task_work_cancel_match(). It can not remove
220 		 * the first entry == work, cmpxchg(task_works) must fail.
221 		 * But it can remove another entry from the ->next list.
222 		 */
223 		raw_spin_lock_irq(&task->pi_lock);
224 		raw_spin_unlock_irq(&task->pi_lock);
225 
226 		do {
227 			next = work->next;
228 			work->func(work);
229 			work = next;
230 			cond_resched();
231 		} while (work);
232 	}
233 }
234