1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_WAIT_H
3 #define _LINUX_WAIT_H
4 /*
5 * Linux wait queue related types and methods
6 */
7 #include <linux/list.h>
8 #include <linux/stddef.h>
9 #include <linux/spinlock.h>
10
11 #include <asm/current.h>
12
13 typedef struct wait_queue_entry wait_queue_entry_t;
14
15 typedef int (*wait_queue_func_t)(struct wait_queue_entry *wq_entry, unsigned mode, int flags, void *key);
16 int default_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int flags, void *key);
17
18 /* wait_queue_entry::flags */
19 #define WQ_FLAG_EXCLUSIVE 0x01
20 #define WQ_FLAG_WOKEN 0x02
21 #define WQ_FLAG_CUSTOM 0x04
22 #define WQ_FLAG_DONE 0x08
23 #define WQ_FLAG_PRIORITY 0x10
24
25 /*
26 * A single wait-queue entry structure:
27 */
28 struct wait_queue_entry {
29 unsigned int flags;
30 void *private;
31 wait_queue_func_t func;
32 struct list_head entry;
33 };
34
35 struct wait_queue_head {
36 spinlock_t lock;
37 struct list_head head;
38 };
39 typedef struct wait_queue_head wait_queue_head_t;
40
41 struct task_struct;
42
43 /*
44 * Macros for declaration and initialisaton of the datatypes
45 */
46
47 #define __WAITQUEUE_INITIALIZER(name, tsk) { \
48 .private = tsk, \
49 .func = default_wake_function, \
50 .entry = { NULL, NULL } }
51
52 #define DECLARE_WAITQUEUE(name, tsk) \
53 struct wait_queue_entry name = __WAITQUEUE_INITIALIZER(name, tsk)
54
55 #define __WAIT_QUEUE_HEAD_INITIALIZER(name) { \
56 .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
57 .head = LIST_HEAD_INIT(name.head) }
58
59 #define DECLARE_WAIT_QUEUE_HEAD(name) \
60 struct wait_queue_head name = __WAIT_QUEUE_HEAD_INITIALIZER(name)
61
62 extern void __init_waitqueue_head(struct wait_queue_head *wq_head, const char *name, struct lock_class_key *);
63
64 #define init_waitqueue_head(wq_head) \
65 do { \
66 static struct lock_class_key __key; \
67 \
68 __init_waitqueue_head((wq_head), #wq_head, &__key); \
69 } while (0)
70
71 #ifdef CONFIG_LOCKDEP
72 # define __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) \
73 ({ init_waitqueue_head(&name); name; })
74 # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) \
75 struct wait_queue_head name = __WAIT_QUEUE_HEAD_INIT_ONSTACK(name)
76 #else
77 # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) DECLARE_WAIT_QUEUE_HEAD(name)
78 #endif
79
init_waitqueue_entry(struct wait_queue_entry * wq_entry,struct task_struct * p)80 static inline void init_waitqueue_entry(struct wait_queue_entry *wq_entry, struct task_struct *p)
81 {
82 wq_entry->flags = 0;
83 wq_entry->private = p;
84 wq_entry->func = default_wake_function;
85 }
86
87 static inline void
init_waitqueue_func_entry(struct wait_queue_entry * wq_entry,wait_queue_func_t func)88 init_waitqueue_func_entry(struct wait_queue_entry *wq_entry, wait_queue_func_t func)
89 {
90 wq_entry->flags = 0;
91 wq_entry->private = NULL;
92 wq_entry->func = func;
93 }
94
95 /**
96 * waitqueue_active -- locklessly test for waiters on the queue
97 * @wq_head: the waitqueue to test for waiters
98 *
99 * returns true if the wait list is not empty
100 *
101 * NOTE: this function is lockless and requires care, incorrect usage _will_
102 * lead to sporadic and non-obvious failure.
103 *
104 * Use either while holding wait_queue_head::lock or when used for wakeups
105 * with an extra smp_mb() like::
106 *
107 * CPU0 - waker CPU1 - waiter
108 *
109 * for (;;) {
110 * @cond = true; prepare_to_wait(&wq_head, &wait, state);
111 * smp_mb(); // smp_mb() from set_current_state()
112 * if (waitqueue_active(wq_head)) if (@cond)
113 * wake_up(wq_head); break;
114 * schedule();
115 * }
116 * finish_wait(&wq_head, &wait);
117 *
118 * Because without the explicit smp_mb() it's possible for the
119 * waitqueue_active() load to get hoisted over the @cond store such that we'll
120 * observe an empty wait list while the waiter might not observe @cond.
121 *
122 * Also note that this 'optimization' trades a spin_lock() for an smp_mb(),
123 * which (when the lock is uncontended) are of roughly equal cost.
124 */
waitqueue_active(struct wait_queue_head * wq_head)125 static inline int waitqueue_active(struct wait_queue_head *wq_head)
126 {
127 return !list_empty(&wq_head->head);
128 }
129
130 /**
131 * wq_has_single_sleeper - check if there is only one sleeper
132 * @wq_head: wait queue head
133 *
134 * Returns true of wq_head has only one sleeper on the list.
135 *
136 * Please refer to the comment for waitqueue_active.
137 */
wq_has_single_sleeper(struct wait_queue_head * wq_head)138 static inline bool wq_has_single_sleeper(struct wait_queue_head *wq_head)
139 {
140 return list_is_singular(&wq_head->head);
141 }
142
143 /**
144 * wq_has_sleeper - check if there are any waiting processes
145 * @wq_head: wait queue head
146 *
147 * Returns true if wq_head has waiting processes
148 *
149 * Please refer to the comment for waitqueue_active.
150 */
wq_has_sleeper(struct wait_queue_head * wq_head)151 static inline bool wq_has_sleeper(struct wait_queue_head *wq_head)
152 {
153 /*
154 * We need to be sure we are in sync with the
155 * add_wait_queue modifications to the wait queue.
156 *
157 * This memory barrier should be paired with one on the
158 * waiting side.
159 */
160 smp_mb();
161 return waitqueue_active(wq_head);
162 }
163
164 extern void add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
165 extern void add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
166 extern void add_wait_queue_priority(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
167 extern int add_wait_queue_priority_exclusive(struct wait_queue_head *wq_head,
168 struct wait_queue_entry *wq_entry);
169 extern void remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
170
__add_wait_queue(struct wait_queue_head * wq_head,struct wait_queue_entry * wq_entry)171 static inline void __add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
172 {
173 struct list_head *head = &wq_head->head;
174 struct wait_queue_entry *wq;
175
176 list_for_each_entry(wq, &wq_head->head, entry) {
177 if (!(wq->flags & WQ_FLAG_PRIORITY))
178 break;
179 head = &wq->entry;
180 }
181 list_add(&wq_entry->entry, head);
182 }
183
184 /*
185 * Used for wake-one threads:
186 */
187 static inline void
__add_wait_queue_exclusive(struct wait_queue_head * wq_head,struct wait_queue_entry * wq_entry)188 __add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
189 {
190 wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
191 __add_wait_queue(wq_head, wq_entry);
192 }
193
__add_wait_queue_entry_tail(struct wait_queue_head * wq_head,struct wait_queue_entry * wq_entry)194 static inline void __add_wait_queue_entry_tail(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
195 {
196 list_add_tail(&wq_entry->entry, &wq_head->head);
197 }
198
199 static inline void
__add_wait_queue_entry_tail_exclusive(struct wait_queue_head * wq_head,struct wait_queue_entry * wq_entry)200 __add_wait_queue_entry_tail_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
201 {
202 wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
203 __add_wait_queue_entry_tail(wq_head, wq_entry);
204 }
205
206 static inline void
__remove_wait_queue(struct wait_queue_head * wq_head,struct wait_queue_entry * wq_entry)207 __remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
208 {
209 list_del(&wq_entry->entry);
210 }
211
212 int __wake_up(struct wait_queue_head *wq_head, unsigned int mode, int nr, void *key);
213 void __wake_up_on_current_cpu(struct wait_queue_head *wq_head, unsigned int mode, void *key);
214 void __wake_up_locked_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
215 void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
216 void __wake_up_locked_sync_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
217 void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr);
218 void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode);
219 void __wake_up_pollfree(struct wait_queue_head *wq_head);
220
221 #define wake_up(x) __wake_up(x, TASK_NORMAL, 1, NULL)
222 #define wake_up_nr(x, nr) __wake_up(x, TASK_NORMAL, nr, NULL)
223 #define wake_up_all(x) __wake_up(x, TASK_NORMAL, 0, NULL)
224 #define wake_up_locked(x) __wake_up_locked((x), TASK_NORMAL, 1)
225 #define wake_up_all_locked(x) __wake_up_locked((x), TASK_NORMAL, 0)
226 #define wake_up_sync(x) __wake_up_sync(x, TASK_NORMAL)
227
228 #define wake_up_interruptible(x) __wake_up(x, TASK_INTERRUPTIBLE, 1, NULL)
229 #define wake_up_interruptible_nr(x, nr) __wake_up(x, TASK_INTERRUPTIBLE, nr, NULL)
230 #define wake_up_interruptible_all(x) __wake_up(x, TASK_INTERRUPTIBLE, 0, NULL)
231 #define wake_up_interruptible_sync(x) __wake_up_sync((x), TASK_INTERRUPTIBLE)
232
233 /*
234 * Wakeup macros to be used to report events to the targets.
235 */
236 #define poll_to_key(m) ((void *)(__force uintptr_t)(__poll_t)(m))
237 #define key_to_poll(m) ((__force __poll_t)(uintptr_t)(void *)(m))
238 #define wake_up_poll(x, m) \
239 __wake_up(x, TASK_NORMAL, 1, poll_to_key(m))
240 #define wake_up_poll_on_current_cpu(x, m) \
241 __wake_up_on_current_cpu(x, TASK_NORMAL, poll_to_key(m))
242 #define wake_up_locked_poll(x, m) \
243 __wake_up_locked_key((x), TASK_NORMAL, poll_to_key(m))
244 #define wake_up_interruptible_poll(x, m) \
245 __wake_up(x, TASK_INTERRUPTIBLE, 1, poll_to_key(m))
246 #define wake_up_interruptible_sync_poll(x, m) \
247 __wake_up_sync_key((x), TASK_INTERRUPTIBLE, poll_to_key(m))
248 #define wake_up_interruptible_sync_poll_locked(x, m) \
249 __wake_up_locked_sync_key((x), TASK_INTERRUPTIBLE, poll_to_key(m))
250
251 /**
252 * wake_up_pollfree - signal that a polled waitqueue is going away
253 * @wq_head: the wait queue head
254 *
255 * In the very rare cases where a ->poll() implementation uses a waitqueue whose
256 * lifetime is tied to a task rather than to the 'struct file' being polled,
257 * this function must be called before the waitqueue is freed so that
258 * non-blocking polls (e.g. epoll) are notified that the queue is going away.
259 *
260 * The caller must also RCU-delay the freeing of the wait_queue_head, e.g. via
261 * an explicit synchronize_rcu() or call_rcu(), or via SLAB_TYPESAFE_BY_RCU.
262 */
wake_up_pollfree(struct wait_queue_head * wq_head)263 static inline void wake_up_pollfree(struct wait_queue_head *wq_head)
264 {
265 /*
266 * For performance reasons, we don't always take the queue lock here.
267 * Therefore, we might race with someone removing the last entry from
268 * the queue, and proceed while they still hold the queue lock.
269 * However, rcu_read_lock() is required to be held in such cases, so we
270 * can safely proceed with an RCU-delayed free.
271 */
272 if (waitqueue_active(wq_head))
273 __wake_up_pollfree(wq_head);
274 }
275
276 #define ___wait_cond_timeout(condition) \
277 ({ \
278 bool __cond = (condition); \
279 if (__cond && !__ret) \
280 __ret = 1; \
281 __cond || !__ret; \
282 })
283
284 #define ___wait_is_interruptible(state) \
285 (!__builtin_constant_p(state) || \
286 (state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL)))
287
288 extern void init_wait_entry(struct wait_queue_entry *wq_entry, int flags);
289
290 /*
291 * The below macro ___wait_event() has an explicit shadow of the __ret
292 * variable when used from the wait_event_*() macros.
293 *
294 * This is so that both can use the ___wait_cond_timeout() construct
295 * to wrap the condition.
296 *
297 * The type inconsistency of the wait_event_*() __ret variable is also
298 * on purpose; we use long where we can return timeout values and int
299 * otherwise.
300 */
301
302 #define ___wait_event(wq_head, condition, state, exclusive, ret, cmd) \
303 ({ \
304 __label__ __out; \
305 struct wait_queue_entry __wq_entry; \
306 long __ret = ret; /* explicit shadow */ \
307 \
308 init_wait_entry(&__wq_entry, exclusive ? WQ_FLAG_EXCLUSIVE : 0); \
309 for (;;) { \
310 long __int = prepare_to_wait_event(&wq_head, &__wq_entry, state);\
311 \
312 if (condition) \
313 break; \
314 \
315 if (___wait_is_interruptible(state) && __int) { \
316 __ret = __int; \
317 goto __out; \
318 } \
319 \
320 cmd; \
321 \
322 if (condition) \
323 break; \
324 } \
325 finish_wait(&wq_head, &__wq_entry); \
326 __out: __ret; \
327 })
328
329 #define __wait_event(wq_head, condition) \
330 (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
331 schedule())
332
333 /**
334 * wait_event - sleep until a condition gets true
335 * @wq_head: the waitqueue to wait on
336 * @condition: a C expression for the event to wait for
337 *
338 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
339 * @condition evaluates to true. The @condition is checked each time
340 * the waitqueue @wq_head is woken up.
341 *
342 * wake_up() has to be called after changing any variable that could
343 * change the result of the wait condition.
344 */
345 #define wait_event(wq_head, condition) \
346 do { \
347 might_sleep(); \
348 if (condition) \
349 break; \
350 __wait_event(wq_head, condition); \
351 } while (0)
352
353 #define __io_wait_event(wq_head, condition) \
354 (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
355 io_schedule())
356
357 /*
358 * io_wait_event() -- like wait_event() but with io_schedule()
359 */
360 #define io_wait_event(wq_head, condition) \
361 do { \
362 might_sleep(); \
363 if (condition) \
364 break; \
365 __io_wait_event(wq_head, condition); \
366 } while (0)
367
368 #define __wait_event_freezable(wq_head, condition) \
369 ___wait_event(wq_head, condition, (TASK_INTERRUPTIBLE|TASK_FREEZABLE), \
370 0, 0, schedule())
371
372 /**
373 * wait_event_freezable - sleep (or freeze) until a condition gets true
374 * @wq_head: the waitqueue to wait on
375 * @condition: a C expression for the event to wait for
376 *
377 * The process is put to sleep (TASK_INTERRUPTIBLE -- so as not to contribute
378 * to system load) until the @condition evaluates to true. The
379 * @condition is checked each time the waitqueue @wq_head is woken up.
380 *
381 * wake_up() has to be called after changing any variable that could
382 * change the result of the wait condition.
383 */
384 #define wait_event_freezable(wq_head, condition) \
385 ({ \
386 int __ret = 0; \
387 might_sleep(); \
388 if (!(condition)) \
389 __ret = __wait_event_freezable(wq_head, condition); \
390 __ret; \
391 })
392
393 #define __wait_event_timeout(wq_head, condition, timeout) \
394 ___wait_event(wq_head, ___wait_cond_timeout(condition), \
395 TASK_UNINTERRUPTIBLE, 0, timeout, \
396 __ret = schedule_timeout(__ret))
397
398 /**
399 * wait_event_timeout - sleep until a condition gets true or a timeout elapses
400 * @wq_head: the waitqueue to wait on
401 * @condition: a C expression for the event to wait for
402 * @timeout: timeout, in jiffies
403 *
404 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
405 * @condition evaluates to true. The @condition is checked each time
406 * the waitqueue @wq_head is woken up.
407 *
408 * wake_up() has to be called after changing any variable that could
409 * change the result of the wait condition.
410 *
411 * Returns:
412 * 0 if the @condition evaluated to %false after the @timeout elapsed,
413 * 1 if the @condition evaluated to %true after the @timeout elapsed,
414 * or the remaining jiffies (at least 1) if the @condition evaluated
415 * to %true before the @timeout elapsed.
416 */
417 #define wait_event_timeout(wq_head, condition, timeout) \
418 ({ \
419 long __ret = timeout; \
420 might_sleep(); \
421 if (!___wait_cond_timeout(condition)) \
422 __ret = __wait_event_timeout(wq_head, condition, timeout); \
423 __ret; \
424 })
425
426 #define __wait_event_freezable_timeout(wq_head, condition, timeout) \
427 ___wait_event(wq_head, ___wait_cond_timeout(condition), \
428 (TASK_INTERRUPTIBLE|TASK_FREEZABLE), 0, timeout, \
429 __ret = schedule_timeout(__ret))
430
431 /*
432 * like wait_event_timeout() -- except it uses TASK_INTERRUPTIBLE to avoid
433 * increasing load and is freezable.
434 */
435 #define wait_event_freezable_timeout(wq_head, condition, timeout) \
436 ({ \
437 long __ret = timeout; \
438 might_sleep(); \
439 if (!___wait_cond_timeout(condition)) \
440 __ret = __wait_event_freezable_timeout(wq_head, condition, timeout); \
441 __ret; \
442 })
443
444 #define __wait_event_exclusive_cmd(wq_head, condition, cmd1, cmd2) \
445 (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 1, 0, \
446 cmd1; schedule(); cmd2)
447 /*
448 * Just like wait_event_cmd(), except it sets exclusive flag
449 */
450 #define wait_event_exclusive_cmd(wq_head, condition, cmd1, cmd2) \
451 do { \
452 if (condition) \
453 break; \
454 __wait_event_exclusive_cmd(wq_head, condition, cmd1, cmd2); \
455 } while (0)
456
457 #define __wait_event_cmd(wq_head, condition, cmd1, cmd2) \
458 (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
459 cmd1; schedule(); cmd2)
460
461 /**
462 * wait_event_cmd - sleep until a condition gets true
463 * @wq_head: the waitqueue to wait on
464 * @condition: a C expression for the event to wait for
465 * @cmd1: the command will be executed before sleep
466 * @cmd2: the command will be executed after sleep
467 *
468 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
469 * @condition evaluates to true. The @condition is checked each time
470 * the waitqueue @wq_head is woken up.
471 *
472 * wake_up() has to be called after changing any variable that could
473 * change the result of the wait condition.
474 */
475 #define wait_event_cmd(wq_head, condition, cmd1, cmd2) \
476 do { \
477 if (condition) \
478 break; \
479 __wait_event_cmd(wq_head, condition, cmd1, cmd2); \
480 } while (0)
481
482 #define __wait_event_interruptible(wq_head, condition) \
483 ___wait_event(wq_head, condition, TASK_INTERRUPTIBLE, 0, 0, \
484 schedule())
485
486 /**
487 * wait_event_interruptible - sleep until a condition gets true
488 * @wq_head: the waitqueue to wait on
489 * @condition: a C expression for the event to wait for
490 *
491 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
492 * @condition evaluates to true or a signal is received.
493 * The @condition is checked each time the waitqueue @wq_head is woken up.
494 *
495 * wake_up() has to be called after changing any variable that could
496 * change the result of the wait condition.
497 *
498 * The function will return -ERESTARTSYS if it was interrupted by a
499 * signal and 0 if @condition evaluated to true.
500 */
501 #define wait_event_interruptible(wq_head, condition) \
502 ({ \
503 int __ret = 0; \
504 might_sleep(); \
505 if (!(condition)) \
506 __ret = __wait_event_interruptible(wq_head, condition); \
507 __ret; \
508 })
509
510 #define __wait_event_interruptible_timeout(wq_head, condition, timeout) \
511 ___wait_event(wq_head, ___wait_cond_timeout(condition), \
512 TASK_INTERRUPTIBLE, 0, timeout, \
513 __ret = schedule_timeout(__ret))
514
515 /**
516 * wait_event_interruptible_timeout - sleep until a condition gets true or a timeout elapses
517 * @wq_head: the waitqueue to wait on
518 * @condition: a C expression for the event to wait for
519 * @timeout: timeout, in jiffies
520 *
521 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
522 * @condition evaluates to true or a signal is received.
523 * The @condition is checked each time the waitqueue @wq_head is woken up.
524 *
525 * wake_up() has to be called after changing any variable that could
526 * change the result of the wait condition.
527 *
528 * Returns:
529 * 0 if the @condition evaluated to %false after the @timeout elapsed,
530 * 1 if the @condition evaluated to %true after the @timeout elapsed,
531 * the remaining jiffies (at least 1) if the @condition evaluated
532 * to %true before the @timeout elapsed, or -%ERESTARTSYS if it was
533 * interrupted by a signal.
534 */
535 #define wait_event_interruptible_timeout(wq_head, condition, timeout) \
536 ({ \
537 long __ret = timeout; \
538 might_sleep(); \
539 if (!___wait_cond_timeout(condition)) \
540 __ret = __wait_event_interruptible_timeout(wq_head, \
541 condition, timeout); \
542 __ret; \
543 })
544
545 #define __wait_event_hrtimeout(wq_head, condition, timeout, state) \
546 ({ \
547 int __ret = 0; \
548 struct hrtimer_sleeper __t; \
549 \
550 hrtimer_setup_sleeper_on_stack(&__t, CLOCK_MONOTONIC, \
551 HRTIMER_MODE_REL); \
552 if ((timeout) != KTIME_MAX) { \
553 hrtimer_set_expires_range_ns(&__t.timer, timeout, \
554 current->timer_slack_ns); \
555 hrtimer_sleeper_start_expires(&__t, HRTIMER_MODE_REL); \
556 } \
557 \
558 __ret = ___wait_event(wq_head, condition, state, 0, 0, \
559 if (!__t.task) { \
560 __ret = -ETIME; \
561 break; \
562 } \
563 schedule()); \
564 \
565 hrtimer_cancel(&__t.timer); \
566 destroy_hrtimer_on_stack(&__t.timer); \
567 __ret; \
568 })
569
570 /**
571 * wait_event_hrtimeout - sleep until a condition gets true or a timeout elapses
572 * @wq_head: the waitqueue to wait on
573 * @condition: a C expression for the event to wait for
574 * @timeout: timeout, as a ktime_t
575 *
576 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
577 * @condition evaluates to true or a signal is received.
578 * The @condition is checked each time the waitqueue @wq_head is woken up.
579 *
580 * wake_up() has to be called after changing any variable that could
581 * change the result of the wait condition.
582 *
583 * The function returns 0 if @condition became true, or -ETIME if the timeout
584 * elapsed.
585 */
586 #define wait_event_hrtimeout(wq_head, condition, timeout) \
587 ({ \
588 int __ret = 0; \
589 might_sleep(); \
590 if (!(condition)) \
591 __ret = __wait_event_hrtimeout(wq_head, condition, timeout, \
592 TASK_UNINTERRUPTIBLE); \
593 __ret; \
594 })
595
596 /**
597 * wait_event_interruptible_hrtimeout - sleep until a condition gets true or a timeout elapses
598 * @wq: the waitqueue to wait on
599 * @condition: a C expression for the event to wait for
600 * @timeout: timeout, as a ktime_t
601 *
602 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
603 * @condition evaluates to true or a signal is received.
604 * The @condition is checked each time the waitqueue @wq is woken up.
605 *
606 * wake_up() has to be called after changing any variable that could
607 * change the result of the wait condition.
608 *
609 * The function returns 0 if @condition became true, -ERESTARTSYS if it was
610 * interrupted by a signal, or -ETIME if the timeout elapsed.
611 */
612 #define wait_event_interruptible_hrtimeout(wq, condition, timeout) \
613 ({ \
614 long __ret = 0; \
615 might_sleep(); \
616 if (!(condition)) \
617 __ret = __wait_event_hrtimeout(wq, condition, timeout, \
618 TASK_INTERRUPTIBLE); \
619 __ret; \
620 })
621
622 #define __wait_event_interruptible_exclusive(wq, condition) \
623 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0, \
624 schedule())
625
626 #define wait_event_interruptible_exclusive(wq, condition) \
627 ({ \
628 int __ret = 0; \
629 might_sleep(); \
630 if (!(condition)) \
631 __ret = __wait_event_interruptible_exclusive(wq, condition); \
632 __ret; \
633 })
634
635 #define __wait_event_killable_exclusive(wq, condition) \
636 ___wait_event(wq, condition, TASK_KILLABLE, 1, 0, \
637 schedule())
638
639 #define wait_event_killable_exclusive(wq, condition) \
640 ({ \
641 int __ret = 0; \
642 might_sleep(); \
643 if (!(condition)) \
644 __ret = __wait_event_killable_exclusive(wq, condition); \
645 __ret; \
646 })
647
648
649 #define __wait_event_freezable_exclusive(wq, condition) \
650 ___wait_event(wq, condition, (TASK_INTERRUPTIBLE|TASK_FREEZABLE), 1, 0,\
651 schedule())
652
653 #define wait_event_freezable_exclusive(wq, condition) \
654 ({ \
655 int __ret = 0; \
656 might_sleep(); \
657 if (!(condition)) \
658 __ret = __wait_event_freezable_exclusive(wq, condition); \
659 __ret; \
660 })
661
662 /**
663 * wait_event_idle - wait for a condition without contributing to system load
664 * @wq_head: the waitqueue to wait on
665 * @condition: a C expression for the event to wait for
666 *
667 * The process is put to sleep (TASK_IDLE) until the
668 * @condition evaluates to true.
669 * The @condition is checked each time the waitqueue @wq_head is woken up.
670 *
671 * wake_up() has to be called after changing any variable that could
672 * change the result of the wait condition.
673 *
674 */
675 #define wait_event_idle(wq_head, condition) \
676 do { \
677 might_sleep(); \
678 if (!(condition)) \
679 ___wait_event(wq_head, condition, TASK_IDLE, 0, 0, schedule()); \
680 } while (0)
681
682 /**
683 * wait_event_idle_exclusive - wait for a condition with contributing to system load
684 * @wq_head: the waitqueue to wait on
685 * @condition: a C expression for the event to wait for
686 *
687 * The process is put to sleep (TASK_IDLE) until the
688 * @condition evaluates to true.
689 * The @condition is checked each time the waitqueue @wq_head is woken up.
690 *
691 * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
692 * set thus if other processes wait on the same list, when this
693 * process is woken further processes are not considered.
694 *
695 * wake_up() has to be called after changing any variable that could
696 * change the result of the wait condition.
697 *
698 */
699 #define wait_event_idle_exclusive(wq_head, condition) \
700 do { \
701 might_sleep(); \
702 if (!(condition)) \
703 ___wait_event(wq_head, condition, TASK_IDLE, 1, 0, schedule()); \
704 } while (0)
705
706 #define __wait_event_idle_timeout(wq_head, condition, timeout) \
707 ___wait_event(wq_head, ___wait_cond_timeout(condition), \
708 TASK_IDLE, 0, timeout, \
709 __ret = schedule_timeout(__ret))
710
711 /**
712 * wait_event_idle_timeout - sleep without load until a condition becomes true or a timeout elapses
713 * @wq_head: the waitqueue to wait on
714 * @condition: a C expression for the event to wait for
715 * @timeout: timeout, in jiffies
716 *
717 * The process is put to sleep (TASK_IDLE) until the
718 * @condition evaluates to true. The @condition is checked each time
719 * the waitqueue @wq_head is woken up.
720 *
721 * wake_up() has to be called after changing any variable that could
722 * change the result of the wait condition.
723 *
724 * Returns:
725 * 0 if the @condition evaluated to %false after the @timeout elapsed,
726 * 1 if the @condition evaluated to %true after the @timeout elapsed,
727 * or the remaining jiffies (at least 1) if the @condition evaluated
728 * to %true before the @timeout elapsed.
729 */
730 #define wait_event_idle_timeout(wq_head, condition, timeout) \
731 ({ \
732 long __ret = timeout; \
733 might_sleep(); \
734 if (!___wait_cond_timeout(condition)) \
735 __ret = __wait_event_idle_timeout(wq_head, condition, timeout); \
736 __ret; \
737 })
738
739 #define __wait_event_idle_exclusive_timeout(wq_head, condition, timeout) \
740 ___wait_event(wq_head, ___wait_cond_timeout(condition), \
741 TASK_IDLE, 1, timeout, \
742 __ret = schedule_timeout(__ret))
743
744 /**
745 * wait_event_idle_exclusive_timeout - sleep without load until a condition becomes true or a timeout elapses
746 * @wq_head: the waitqueue to wait on
747 * @condition: a C expression for the event to wait for
748 * @timeout: timeout, in jiffies
749 *
750 * The process is put to sleep (TASK_IDLE) until the
751 * @condition evaluates to true. The @condition is checked each time
752 * the waitqueue @wq_head is woken up.
753 *
754 * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
755 * set thus if other processes wait on the same list, when this
756 * process is woken further processes are not considered.
757 *
758 * wake_up() has to be called after changing any variable that could
759 * change the result of the wait condition.
760 *
761 * Returns:
762 * 0 if the @condition evaluated to %false after the @timeout elapsed,
763 * 1 if the @condition evaluated to %true after the @timeout elapsed,
764 * or the remaining jiffies (at least 1) if the @condition evaluated
765 * to %true before the @timeout elapsed.
766 */
767 #define wait_event_idle_exclusive_timeout(wq_head, condition, timeout) \
768 ({ \
769 long __ret = timeout; \
770 might_sleep(); \
771 if (!___wait_cond_timeout(condition)) \
772 __ret = __wait_event_idle_exclusive_timeout(wq_head, condition, timeout);\
773 __ret; \
774 })
775
776 extern int do_wait_intr(wait_queue_head_t *, wait_queue_entry_t *);
777 extern int do_wait_intr_irq(wait_queue_head_t *, wait_queue_entry_t *);
778
779 #define __wait_event_interruptible_locked(wq, condition, exclusive, fn) \
780 ({ \
781 int __ret; \
782 DEFINE_WAIT(__wait); \
783 if (exclusive) \
784 __wait.flags |= WQ_FLAG_EXCLUSIVE; \
785 do { \
786 __ret = fn(&(wq), &__wait); \
787 if (__ret) \
788 break; \
789 } while (!(condition)); \
790 __remove_wait_queue(&(wq), &__wait); \
791 __set_current_state(TASK_RUNNING); \
792 __ret; \
793 })
794
795
796 /**
797 * wait_event_interruptible_locked - sleep until a condition gets true
798 * @wq: the waitqueue to wait on
799 * @condition: a C expression for the event to wait for
800 *
801 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
802 * @condition evaluates to true or a signal is received.
803 * The @condition is checked each time the waitqueue @wq is woken up.
804 *
805 * It must be called with wq.lock being held. This spinlock is
806 * unlocked while sleeping but @condition testing is done while lock
807 * is held and when this macro exits the lock is held.
808 *
809 * The lock is locked/unlocked using spin_lock()/spin_unlock()
810 * functions which must match the way they are locked/unlocked outside
811 * of this macro.
812 *
813 * wake_up_locked() has to be called after changing any variable that could
814 * change the result of the wait condition.
815 *
816 * The function will return -ERESTARTSYS if it was interrupted by a
817 * signal and 0 if @condition evaluated to true.
818 */
819 #define wait_event_interruptible_locked(wq, condition) \
820 ((condition) \
821 ? 0 : __wait_event_interruptible_locked(wq, condition, 0, do_wait_intr))
822
823 /**
824 * wait_event_interruptible_locked_irq - sleep until a condition gets true
825 * @wq: the waitqueue to wait on
826 * @condition: a C expression for the event to wait for
827 *
828 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
829 * @condition evaluates to true or a signal is received.
830 * The @condition is checked each time the waitqueue @wq is woken up.
831 *
832 * It must be called with wq.lock being held. This spinlock is
833 * unlocked while sleeping but @condition testing is done while lock
834 * is held and when this macro exits the lock is held.
835 *
836 * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
837 * functions which must match the way they are locked/unlocked outside
838 * of this macro.
839 *
840 * wake_up_locked() has to be called after changing any variable that could
841 * change the result of the wait condition.
842 *
843 * The function will return -ERESTARTSYS if it was interrupted by a
844 * signal and 0 if @condition evaluated to true.
845 */
846 #define wait_event_interruptible_locked_irq(wq, condition) \
847 ((condition) \
848 ? 0 : __wait_event_interruptible_locked(wq, condition, 0, do_wait_intr_irq))
849
850 /**
851 * wait_event_interruptible_exclusive_locked - sleep exclusively until a condition gets true
852 * @wq: the waitqueue to wait on
853 * @condition: a C expression for the event to wait for
854 *
855 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
856 * @condition evaluates to true or a signal is received.
857 * The @condition is checked each time the waitqueue @wq is woken up.
858 *
859 * It must be called with wq.lock being held. This spinlock is
860 * unlocked while sleeping but @condition testing is done while lock
861 * is held and when this macro exits the lock is held.
862 *
863 * The lock is locked/unlocked using spin_lock()/spin_unlock()
864 * functions which must match the way they are locked/unlocked outside
865 * of this macro.
866 *
867 * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
868 * set thus when other process waits process on the list if this
869 * process is awaken further processes are not considered.
870 *
871 * wake_up_locked() has to be called after changing any variable that could
872 * change the result of the wait condition.
873 *
874 * The function will return -ERESTARTSYS if it was interrupted by a
875 * signal and 0 if @condition evaluated to true.
876 */
877 #define wait_event_interruptible_exclusive_locked(wq, condition) \
878 ((condition) \
879 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, do_wait_intr))
880
881 /**
882 * wait_event_interruptible_exclusive_locked_irq - sleep until a condition gets true
883 * @wq: the waitqueue to wait on
884 * @condition: a C expression for the event to wait for
885 *
886 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
887 * @condition evaluates to true or a signal is received.
888 * The @condition is checked each time the waitqueue @wq is woken up.
889 *
890 * It must be called with wq.lock being held. This spinlock is
891 * unlocked while sleeping but @condition testing is done while lock
892 * is held and when this macro exits the lock is held.
893 *
894 * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
895 * functions which must match the way they are locked/unlocked outside
896 * of this macro.
897 *
898 * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
899 * set thus when other process waits process on the list if this
900 * process is awaken further processes are not considered.
901 *
902 * wake_up_locked() has to be called after changing any variable that could
903 * change the result of the wait condition.
904 *
905 * The function will return -ERESTARTSYS if it was interrupted by a
906 * signal and 0 if @condition evaluated to true.
907 */
908 #define wait_event_interruptible_exclusive_locked_irq(wq, condition) \
909 ((condition) \
910 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, do_wait_intr_irq))
911
912
913 #define __wait_event_killable(wq, condition) \
914 ___wait_event(wq, condition, TASK_KILLABLE, 0, 0, schedule())
915
916 /**
917 * wait_event_killable - sleep until a condition gets true
918 * @wq_head: the waitqueue to wait on
919 * @condition: a C expression for the event to wait for
920 *
921 * The process is put to sleep (TASK_KILLABLE) until the
922 * @condition evaluates to true or a signal is received.
923 * The @condition is checked each time the waitqueue @wq_head is woken up.
924 *
925 * wake_up() has to be called after changing any variable that could
926 * change the result of the wait condition.
927 *
928 * The function will return -ERESTARTSYS if it was interrupted by a
929 * signal and 0 if @condition evaluated to true.
930 */
931 #define wait_event_killable(wq_head, condition) \
932 ({ \
933 int __ret = 0; \
934 might_sleep(); \
935 if (!(condition)) \
936 __ret = __wait_event_killable(wq_head, condition); \
937 __ret; \
938 })
939
940 #define __wait_event_state(wq, condition, state) \
941 ___wait_event(wq, condition, state, 0, 0, schedule())
942
943 /**
944 * wait_event_state - sleep until a condition gets true
945 * @wq_head: the waitqueue to wait on
946 * @condition: a C expression for the event to wait for
947 * @state: state to sleep in
948 *
949 * The process is put to sleep (@state) until the @condition evaluates to true
950 * or a signal is received (when allowed by @state). The @condition is checked
951 * each time the waitqueue @wq_head is woken up.
952 *
953 * wake_up() has to be called after changing any variable that could
954 * change the result of the wait condition.
955 *
956 * The function will return -ERESTARTSYS if it was interrupted by a signal
957 * (when allowed by @state) and 0 if @condition evaluated to true.
958 */
959 #define wait_event_state(wq_head, condition, state) \
960 ({ \
961 int __ret = 0; \
962 might_sleep(); \
963 if (!(condition)) \
964 __ret = __wait_event_state(wq_head, condition, state); \
965 __ret; \
966 })
967
968 #define __wait_event_killable_timeout(wq_head, condition, timeout) \
969 ___wait_event(wq_head, ___wait_cond_timeout(condition), \
970 TASK_KILLABLE, 0, timeout, \
971 __ret = schedule_timeout(__ret))
972
973 /**
974 * wait_event_killable_timeout - sleep until a condition gets true or a timeout elapses
975 * @wq_head: the waitqueue to wait on
976 * @condition: a C expression for the event to wait for
977 * @timeout: timeout, in jiffies
978 *
979 * The process is put to sleep (TASK_KILLABLE) until the
980 * @condition evaluates to true or a kill signal is received.
981 * The @condition is checked each time the waitqueue @wq_head is woken up.
982 *
983 * wake_up() has to be called after changing any variable that could
984 * change the result of the wait condition.
985 *
986 * Returns:
987 * 0 if the @condition evaluated to %false after the @timeout elapsed,
988 * 1 if the @condition evaluated to %true after the @timeout elapsed,
989 * the remaining jiffies (at least 1) if the @condition evaluated
990 * to %true before the @timeout elapsed, or -%ERESTARTSYS if it was
991 * interrupted by a kill signal.
992 *
993 * Only kill signals interrupt this process.
994 */
995 #define wait_event_killable_timeout(wq_head, condition, timeout) \
996 ({ \
997 long __ret = timeout; \
998 might_sleep(); \
999 if (!___wait_cond_timeout(condition)) \
1000 __ret = __wait_event_killable_timeout(wq_head, \
1001 condition, timeout); \
1002 __ret; \
1003 })
1004
1005
1006 #define __wait_event_lock_irq(wq_head, condition, lock, cmd) \
1007 (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
1008 spin_unlock_irq(&lock); \
1009 cmd; \
1010 schedule(); \
1011 spin_lock_irq(&lock))
1012
1013 /**
1014 * wait_event_lock_irq_cmd - sleep until a condition gets true. The
1015 * condition is checked under the lock. This
1016 * is expected to be called with the lock
1017 * taken.
1018 * @wq_head: the waitqueue to wait on
1019 * @condition: a C expression for the event to wait for
1020 * @lock: a locked spinlock_t, which will be released before cmd
1021 * and schedule() and reacquired afterwards.
1022 * @cmd: a command which is invoked outside the critical section before
1023 * sleep
1024 *
1025 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
1026 * @condition evaluates to true. The @condition is checked each time
1027 * the waitqueue @wq_head is woken up.
1028 *
1029 * wake_up() has to be called after changing any variable that could
1030 * change the result of the wait condition.
1031 *
1032 * This is supposed to be called while holding the lock. The lock is
1033 * dropped before invoking the cmd and going to sleep and is reacquired
1034 * afterwards.
1035 */
1036 #define wait_event_lock_irq_cmd(wq_head, condition, lock, cmd) \
1037 do { \
1038 if (condition) \
1039 break; \
1040 __wait_event_lock_irq(wq_head, condition, lock, cmd); \
1041 } while (0)
1042
1043 /**
1044 * wait_event_lock_irq - sleep until a condition gets true. The
1045 * condition is checked under the lock. This
1046 * is expected to be called with the lock
1047 * taken.
1048 * @wq_head: the waitqueue to wait on
1049 * @condition: a C expression for the event to wait for
1050 * @lock: a locked spinlock_t, which will be released before schedule()
1051 * and reacquired afterwards.
1052 *
1053 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
1054 * @condition evaluates to true. The @condition is checked each time
1055 * the waitqueue @wq_head is woken up.
1056 *
1057 * wake_up() has to be called after changing any variable that could
1058 * change the result of the wait condition.
1059 *
1060 * This is supposed to be called while holding the lock. The lock is
1061 * dropped before going to sleep and is reacquired afterwards.
1062 */
1063 #define wait_event_lock_irq(wq_head, condition, lock) \
1064 do { \
1065 if (condition) \
1066 break; \
1067 __wait_event_lock_irq(wq_head, condition, lock, ); \
1068 } while (0)
1069
1070
1071 #define __wait_event_interruptible_lock_irq(wq_head, condition, lock, cmd) \
1072 ___wait_event(wq_head, condition, TASK_INTERRUPTIBLE, 0, 0, \
1073 spin_unlock_irq(&lock); \
1074 cmd; \
1075 schedule(); \
1076 spin_lock_irq(&lock))
1077
1078 /**
1079 * wait_event_interruptible_lock_irq_cmd - sleep until a condition gets true.
1080 * The condition is checked under the lock. This is expected to
1081 * be called with the lock taken.
1082 * @wq_head: the waitqueue to wait on
1083 * @condition: a C expression for the event to wait for
1084 * @lock: a locked spinlock_t, which will be released before cmd and
1085 * schedule() and reacquired afterwards.
1086 * @cmd: a command which is invoked outside the critical section before
1087 * sleep
1088 *
1089 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
1090 * @condition evaluates to true or a signal is received. The @condition is
1091 * checked each time the waitqueue @wq_head is woken up.
1092 *
1093 * wake_up() has to be called after changing any variable that could
1094 * change the result of the wait condition.
1095 *
1096 * This is supposed to be called while holding the lock. The lock is
1097 * dropped before invoking the cmd and going to sleep and is reacquired
1098 * afterwards.
1099 *
1100 * The macro will return -ERESTARTSYS if it was interrupted by a signal
1101 * and 0 if @condition evaluated to true.
1102 */
1103 #define wait_event_interruptible_lock_irq_cmd(wq_head, condition, lock, cmd) \
1104 ({ \
1105 int __ret = 0; \
1106 if (!(condition)) \
1107 __ret = __wait_event_interruptible_lock_irq(wq_head, \
1108 condition, lock, cmd); \
1109 __ret; \
1110 })
1111
1112 /**
1113 * wait_event_interruptible_lock_irq - sleep until a condition gets true.
1114 * The condition is checked under the lock. This is expected
1115 * to be called with the lock taken.
1116 * @wq_head: the waitqueue to wait on
1117 * @condition: a C expression for the event to wait for
1118 * @lock: a locked spinlock_t, which will be released before schedule()
1119 * and reacquired afterwards.
1120 *
1121 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
1122 * @condition evaluates to true or signal is received. The @condition is
1123 * checked each time the waitqueue @wq_head is woken up.
1124 *
1125 * wake_up() has to be called after changing any variable that could
1126 * change the result of the wait condition.
1127 *
1128 * This is supposed to be called while holding the lock. The lock is
1129 * dropped before going to sleep and is reacquired afterwards.
1130 *
1131 * The macro will return -ERESTARTSYS if it was interrupted by a signal
1132 * and 0 if @condition evaluated to true.
1133 */
1134 #define wait_event_interruptible_lock_irq(wq_head, condition, lock) \
1135 ({ \
1136 int __ret = 0; \
1137 if (!(condition)) \
1138 __ret = __wait_event_interruptible_lock_irq(wq_head, \
1139 condition, lock,); \
1140 __ret; \
1141 })
1142
1143 #define __wait_event_lock_irq_timeout(wq_head, condition, lock, timeout, state) \
1144 ___wait_event(wq_head, ___wait_cond_timeout(condition), \
1145 state, 0, timeout, \
1146 spin_unlock_irq(&lock); \
1147 __ret = schedule_timeout(__ret); \
1148 spin_lock_irq(&lock));
1149
1150 /**
1151 * wait_event_interruptible_lock_irq_timeout - sleep until a condition gets
1152 * true or a timeout elapses. The condition is checked under
1153 * the lock. This is expected to be called with the lock taken.
1154 * @wq_head: the waitqueue to wait on
1155 * @condition: a C expression for the event to wait for
1156 * @lock: a locked spinlock_t, which will be released before schedule()
1157 * and reacquired afterwards.
1158 * @timeout: timeout, in jiffies
1159 *
1160 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
1161 * @condition evaluates to true or signal is received. The @condition is
1162 * checked each time the waitqueue @wq_head is woken up.
1163 *
1164 * wake_up() has to be called after changing any variable that could
1165 * change the result of the wait condition.
1166 *
1167 * This is supposed to be called while holding the lock. The lock is
1168 * dropped before going to sleep and is reacquired afterwards.
1169 *
1170 * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it
1171 * was interrupted by a signal, and the remaining jiffies otherwise
1172 * if the condition evaluated to true before the timeout elapsed.
1173 */
1174 #define wait_event_interruptible_lock_irq_timeout(wq_head, condition, lock, \
1175 timeout) \
1176 ({ \
1177 long __ret = timeout; \
1178 if (!___wait_cond_timeout(condition)) \
1179 __ret = __wait_event_lock_irq_timeout( \
1180 wq_head, condition, lock, timeout, \
1181 TASK_INTERRUPTIBLE); \
1182 __ret; \
1183 })
1184
1185 #define wait_event_lock_irq_timeout(wq_head, condition, lock, timeout) \
1186 ({ \
1187 long __ret = timeout; \
1188 if (!___wait_cond_timeout(condition)) \
1189 __ret = __wait_event_lock_irq_timeout( \
1190 wq_head, condition, lock, timeout, \
1191 TASK_UNINTERRUPTIBLE); \
1192 __ret; \
1193 })
1194
1195 /*
1196 * Waitqueues which are removed from the waitqueue_head at wakeup time
1197 */
1198 void prepare_to_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
1199 bool prepare_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
1200 long prepare_to_wait_event(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
1201 void finish_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
1202 long wait_woken(struct wait_queue_entry *wq_entry, unsigned mode, long timeout);
1203 int woken_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key);
1204 int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key);
1205
1206 #define DEFINE_WAIT_FUNC(name, function) \
1207 struct wait_queue_entry name = { \
1208 .private = current, \
1209 .func = function, \
1210 .entry = LIST_HEAD_INIT((name).entry), \
1211 }
1212
1213 #define DEFINE_WAIT(name) DEFINE_WAIT_FUNC(name, autoremove_wake_function)
1214
1215 #define init_wait_func(wait, function) \
1216 do { \
1217 (wait)->private = current; \
1218 (wait)->func = function; \
1219 INIT_LIST_HEAD(&(wait)->entry); \
1220 (wait)->flags = 0; \
1221 } while (0)
1222
1223 #define init_wait(wait) init_wait_func(wait, autoremove_wake_function)
1224
1225 typedef int (*task_call_f)(struct task_struct *p, void *arg);
1226 extern int task_call_func(struct task_struct *p, task_call_f func, void *arg);
1227
1228 #endif /* _LINUX_WAIT_H */
1229