xref: /linux/kernel/sched/completion.c (revision bf76f23aa1c178e9115eba17f699fa726aed669b)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 /*
4  * Generic wait-for-completion handler;
5  *
6  * It differs from semaphores in that their default case is the opposite,
7  * wait_for_completion default blocks whereas semaphore default non-block. The
8  * interface also makes it easy to 'complete' multiple waiting threads,
9  * something which isn't entirely natural for semaphores.
10  *
11  * But more importantly, the primitive documents the usage. Semaphores would
12  * typically be used for exclusion which gives rise to priority inversion.
13  * Waiting for completion is a typically sync point, but not an exclusion point.
14  */
15 
16 #include <linux/linkage.h>
17 #include <linux/sched/debug.h>
18 #include <linux/completion.h>
19 #include "sched.h"
20 
complete_with_flags(struct completion * x,int wake_flags)21 static void complete_with_flags(struct completion *x, int wake_flags)
22 {
23 	unsigned long flags;
24 
25 	raw_spin_lock_irqsave(&x->wait.lock, flags);
26 
27 	if (x->done != UINT_MAX)
28 		x->done++;
29 	swake_up_locked(&x->wait, wake_flags);
30 	raw_spin_unlock_irqrestore(&x->wait.lock, flags);
31 }
32 
complete_on_current_cpu(struct completion * x)33 void complete_on_current_cpu(struct completion *x)
34 {
35 	return complete_with_flags(x, WF_CURRENT_CPU);
36 }
37 
38 /**
39  * complete: - signals a single thread waiting on this completion
40  * @x:  holds the state of this particular completion
41  *
42  * This will wake up a single thread waiting on this completion. Threads will be
43  * awakened in the same order in which they were queued.
44  *
45  * See also complete_all(), wait_for_completion() and related routines.
46  *
47  * If this function wakes up a task, it executes a full memory barrier before
48  * accessing the task state.
49  */
complete(struct completion * x)50 void complete(struct completion *x)
51 {
52 	complete_with_flags(x, 0);
53 }
54 EXPORT_SYMBOL(complete);
55 
56 /**
57  * complete_all: - signals all threads waiting on this completion
58  * @x:  holds the state of this particular completion
59  *
60  * This will wake up all threads waiting on this particular completion event.
61  *
62  * If this function wakes up a task, it executes a full memory barrier before
63  * accessing the task state.
64  *
65  * Since complete_all() sets the completion of @x permanently to done
66  * to allow multiple waiters to finish, a call to reinit_completion()
67  * must be used on @x if @x is to be used again. The code must make
68  * sure that all waiters have woken and finished before reinitializing
69  * @x. Also note that the function completion_done() can not be used
70  * to know if there are still waiters after complete_all() has been called.
71  */
complete_all(struct completion * x)72 void complete_all(struct completion *x)
73 {
74 	unsigned long flags;
75 
76 	lockdep_assert_RT_in_threaded_ctx();
77 
78 	raw_spin_lock_irqsave(&x->wait.lock, flags);
79 	x->done = UINT_MAX;
80 	swake_up_all_locked(&x->wait);
81 	raw_spin_unlock_irqrestore(&x->wait.lock, flags);
82 }
83 EXPORT_SYMBOL(complete_all);
84 
85 static inline long __sched
do_wait_for_common(struct completion * x,long (* action)(long),long timeout,int state)86 do_wait_for_common(struct completion *x,
87 		   long (*action)(long), long timeout, int state)
88 {
89 	if (!x->done) {
90 		DECLARE_SWAITQUEUE(wait);
91 
92 		do {
93 			if (signal_pending_state(state, current)) {
94 				timeout = -ERESTARTSYS;
95 				break;
96 			}
97 			__prepare_to_swait(&x->wait, &wait);
98 			__set_current_state(state);
99 			raw_spin_unlock_irq(&x->wait.lock);
100 			timeout = action(timeout);
101 			raw_spin_lock_irq(&x->wait.lock);
102 		} while (!x->done && timeout);
103 		__finish_swait(&x->wait, &wait);
104 		if (!x->done)
105 			return timeout;
106 	}
107 	if (x->done != UINT_MAX)
108 		x->done--;
109 	return timeout ?: 1;
110 }
111 
112 static inline long __sched
__wait_for_common(struct completion * x,long (* action)(long),long timeout,int state)113 __wait_for_common(struct completion *x,
114 		  long (*action)(long), long timeout, int state)
115 {
116 	might_sleep();
117 
118 	complete_acquire(x);
119 
120 	raw_spin_lock_irq(&x->wait.lock);
121 	timeout = do_wait_for_common(x, action, timeout, state);
122 	raw_spin_unlock_irq(&x->wait.lock);
123 
124 	complete_release(x);
125 
126 	return timeout;
127 }
128 
129 static long __sched
wait_for_common(struct completion * x,long timeout,int state)130 wait_for_common(struct completion *x, long timeout, int state)
131 {
132 	return __wait_for_common(x, schedule_timeout, timeout, state);
133 }
134 
135 static long __sched
wait_for_common_io(struct completion * x,long timeout,int state)136 wait_for_common_io(struct completion *x, long timeout, int state)
137 {
138 	return __wait_for_common(x, io_schedule_timeout, timeout, state);
139 }
140 
141 /**
142  * wait_for_completion: - waits for completion of a task
143  * @x:  holds the state of this particular completion
144  *
145  * This waits to be signaled for completion of a specific task. It is NOT
146  * interruptible and there is no timeout.
147  *
148  * See also similar routines (i.e. wait_for_completion_timeout()) with timeout
149  * and interrupt capability. Also see complete().
150  */
wait_for_completion(struct completion * x)151 void __sched wait_for_completion(struct completion *x)
152 {
153 	wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE);
154 }
155 EXPORT_SYMBOL(wait_for_completion);
156 
157 /**
158  * wait_for_completion_timeout: - waits for completion of a task (w/timeout)
159  * @x:  holds the state of this particular completion
160  * @timeout:  timeout value in jiffies
161  *
162  * This waits for either a completion of a specific task to be signaled or for a
163  * specified timeout to expire. The timeout is in jiffies. It is not
164  * interruptible.
165  *
166  * Return: 0 if timed out, and positive (at least 1, or number of jiffies left
167  * till timeout) if completed.
168  */
169 unsigned long __sched
wait_for_completion_timeout(struct completion * x,unsigned long timeout)170 wait_for_completion_timeout(struct completion *x, unsigned long timeout)
171 {
172 	return wait_for_common(x, timeout, TASK_UNINTERRUPTIBLE);
173 }
174 EXPORT_SYMBOL(wait_for_completion_timeout);
175 
176 /**
177  * wait_for_completion_io: - waits for completion of a task
178  * @x:  holds the state of this particular completion
179  *
180  * This waits to be signaled for completion of a specific task. It is NOT
181  * interruptible and there is no timeout. The caller is accounted as waiting
182  * for IO (which traditionally means blkio only).
183  */
wait_for_completion_io(struct completion * x)184 void __sched wait_for_completion_io(struct completion *x)
185 {
186 	wait_for_common_io(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE);
187 }
188 EXPORT_SYMBOL(wait_for_completion_io);
189 
190 /**
191  * wait_for_completion_io_timeout: - waits for completion of a task (w/timeout)
192  * @x:  holds the state of this particular completion
193  * @timeout:  timeout value in jiffies
194  *
195  * This waits for either a completion of a specific task to be signaled or for a
196  * specified timeout to expire. The timeout is in jiffies. It is not
197  * interruptible. The caller is accounted as waiting for IO (which traditionally
198  * means blkio only).
199  *
200  * Return: 0 if timed out, and positive (at least 1, or number of jiffies left
201  * till timeout) if completed.
202  */
203 unsigned long __sched
wait_for_completion_io_timeout(struct completion * x,unsigned long timeout)204 wait_for_completion_io_timeout(struct completion *x, unsigned long timeout)
205 {
206 	return wait_for_common_io(x, timeout, TASK_UNINTERRUPTIBLE);
207 }
208 EXPORT_SYMBOL(wait_for_completion_io_timeout);
209 
210 /**
211  * wait_for_completion_interruptible: - waits for completion of a task (w/intr)
212  * @x:  holds the state of this particular completion
213  *
214  * This waits for completion of a specific task to be signaled. It is
215  * interruptible.
216  *
217  * Return: -ERESTARTSYS if interrupted, 0 if completed.
218  */
wait_for_completion_interruptible(struct completion * x)219 int __sched wait_for_completion_interruptible(struct completion *x)
220 {
221 	long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_INTERRUPTIBLE);
222 
223 	if (t == -ERESTARTSYS)
224 		return t;
225 	return 0;
226 }
227 EXPORT_SYMBOL(wait_for_completion_interruptible);
228 
229 /**
230  * wait_for_completion_interruptible_timeout: - waits for completion (w/(to,intr))
231  * @x:  holds the state of this particular completion
232  * @timeout:  timeout value in jiffies
233  *
234  * This waits for either a completion of a specific task to be signaled or for a
235  * specified timeout to expire. It is interruptible. The timeout is in jiffies.
236  *
237  * Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1,
238  * or number of jiffies left till timeout) if completed.
239  */
240 long __sched
wait_for_completion_interruptible_timeout(struct completion * x,unsigned long timeout)241 wait_for_completion_interruptible_timeout(struct completion *x,
242 					  unsigned long timeout)
243 {
244 	return wait_for_common(x, timeout, TASK_INTERRUPTIBLE);
245 }
246 EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
247 
248 /**
249  * wait_for_completion_killable: - waits for completion of a task (killable)
250  * @x:  holds the state of this particular completion
251  *
252  * This waits to be signaled for completion of a specific task. It can be
253  * interrupted by a kill signal.
254  *
255  * Return: -ERESTARTSYS if interrupted, 0 if completed.
256  */
wait_for_completion_killable(struct completion * x)257 int __sched wait_for_completion_killable(struct completion *x)
258 {
259 	long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE);
260 
261 	if (t == -ERESTARTSYS)
262 		return t;
263 	return 0;
264 }
265 EXPORT_SYMBOL(wait_for_completion_killable);
266 
wait_for_completion_state(struct completion * x,unsigned int state)267 int __sched wait_for_completion_state(struct completion *x, unsigned int state)
268 {
269 	long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, state);
270 
271 	if (t == -ERESTARTSYS)
272 		return t;
273 	return 0;
274 }
275 EXPORT_SYMBOL(wait_for_completion_state);
276 
277 /**
278  * wait_for_completion_killable_timeout: - waits for completion of a task (w/(to,killable))
279  * @x:  holds the state of this particular completion
280  * @timeout:  timeout value in jiffies
281  *
282  * This waits for either a completion of a specific task to be
283  * signaled or for a specified timeout to expire. It can be
284  * interrupted by a kill signal. The timeout is in jiffies.
285  *
286  * Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1,
287  * or number of jiffies left till timeout) if completed.
288  */
289 long __sched
wait_for_completion_killable_timeout(struct completion * x,unsigned long timeout)290 wait_for_completion_killable_timeout(struct completion *x,
291 				     unsigned long timeout)
292 {
293 	return wait_for_common(x, timeout, TASK_KILLABLE);
294 }
295 EXPORT_SYMBOL(wait_for_completion_killable_timeout);
296 
297 /**
298  *	try_wait_for_completion - try to decrement a completion without blocking
299  *	@x:	completion structure
300  *
301  *	Return: 0 if a decrement cannot be done without blocking
302  *		 1 if a decrement succeeded.
303  *
304  *	If a completion is being used as a counting completion,
305  *	attempt to decrement the counter without blocking. This
306  *	enables us to avoid waiting if the resource the completion
307  *	is protecting is not available.
308  */
try_wait_for_completion(struct completion * x)309 bool try_wait_for_completion(struct completion *x)
310 {
311 	unsigned long flags;
312 	bool ret = true;
313 
314 	/*
315 	 * Since x->done will need to be locked only
316 	 * in the non-blocking case, we check x->done
317 	 * first without taking the lock so we can
318 	 * return early in the blocking case.
319 	 */
320 	if (!READ_ONCE(x->done))
321 		return false;
322 
323 	raw_spin_lock_irqsave(&x->wait.lock, flags);
324 	if (!x->done)
325 		ret = false;
326 	else if (x->done != UINT_MAX)
327 		x->done--;
328 	raw_spin_unlock_irqrestore(&x->wait.lock, flags);
329 	return ret;
330 }
331 EXPORT_SYMBOL(try_wait_for_completion);
332 
333 /**
334  *	completion_done - Test to see if a completion has any waiters
335  *	@x:	completion structure
336  *
337  *	Return: 0 if there are waiters (wait_for_completion() in progress)
338  *		 1 if there are no waiters.
339  *
340  *	Note, this will always return true if complete_all() was called on @X.
341  */
completion_done(struct completion * x)342 bool completion_done(struct completion *x)
343 {
344 	unsigned long flags;
345 
346 	if (!READ_ONCE(x->done))
347 		return false;
348 
349 	/*
350 	 * If ->done, we need to wait for complete() to release ->wait.lock
351 	 * otherwise we can end up freeing the completion before complete()
352 	 * is done referencing it.
353 	 */
354 	raw_spin_lock_irqsave(&x->wait.lock, flags);
355 	raw_spin_unlock_irqrestore(&x->wait.lock, flags);
356 	return true;
357 }
358 EXPORT_SYMBOL(completion_done);
359