xref: /freebsd/sys/compat/linuxkpi/common/include/linux/wait.h (revision 13ea0450a9c8742119d36f3bf8f47accdce46e54)
1 /*-
2  * Copyright (c) 2010 Isilon Systems, Inc.
3  * Copyright (c) 2010 iX Systems, Inc.
4  * Copyright (c) 2010 Panasas, Inc.
5  * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd.
6  * Copyright (c) 2017 Mark Johnston <markj@FreeBSD.org>
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice unmodified, this list of conditions, and the following
14  *    disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29  *
30  * $FreeBSD$
31  */
32 
33 #ifndef _LINUX_WAIT_H_
34 #define	_LINUX_WAIT_H_
35 
36 #include <linux/compiler.h>
37 #include <linux/list.h>
38 #include <linux/spinlock.h>
39 
40 #include <asm/atomic.h>
41 
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 
45 #define	SKIP_SLEEP() (SCHEDULER_STOPPED() || kdb_active)
46 
47 #define	might_sleep()							\
48 	WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "might_sleep()")
49 
50 #define	might_sleep_if(cond) do { \
51 	if (cond) { might_sleep(); } \
52 } while (0)
53 
54 struct wait_queue;
55 struct wait_queue_head;
56 
57 #define	wait_queue_entry wait_queue
58 
59 typedef struct wait_queue wait_queue_t;
60 typedef struct wait_queue_entry wait_queue_entry_t;
61 typedef struct wait_queue_head wait_queue_head_t;
62 
63 typedef int wait_queue_func_t(wait_queue_t *, unsigned int, int, void *);
64 
65 /*
66  * Many API consumers directly reference these fields and those of
67  * wait_queue_head.
68  */
69 struct wait_queue {
70 	unsigned int flags;	/* always 0 */
71 	void *private;
72 	wait_queue_func_t *func;
73 	union {
74 		struct list_head task_list; /* < v4.13 */
75 		struct list_head entry; /* >= v4.13 */
76 	};
77 };
78 
79 struct wait_queue_head {
80 	spinlock_t lock;
81 	union {
82 		struct list_head task_list; /* < v4.13 */
83 		struct list_head head; /* >= v4.13 */
84 	};
85 };
86 
87 /*
88  * This function is referenced by at least one DRM driver, so it may not be
89  * renamed and furthermore must be the default wait queue callback.
90  */
91 extern wait_queue_func_t autoremove_wake_function;
92 extern wait_queue_func_t default_wake_function;
93 
94 #define	DEFINE_WAIT_FUNC(name, function)				\
95 	wait_queue_t name = {						\
96 		.private = current,					\
97 		.func = function,					\
98 		.task_list = LINUX_LIST_HEAD_INIT(name.task_list)	\
99 	}
100 
101 #define	DEFINE_WAIT(name) \
102 	DEFINE_WAIT_FUNC(name, autoremove_wake_function)
103 
104 #define	DECLARE_WAITQUEUE(name, task)					\
105 	wait_queue_t name = {						\
106 		.private = task,					\
107 		.task_list = LINUX_LIST_HEAD_INIT(name.task_list)	\
108 	}
109 
110 #define	DECLARE_WAIT_QUEUE_HEAD(name)					\
111 	wait_queue_head_t name = {					\
112 		.task_list = LINUX_LIST_HEAD_INIT(name.task_list),	\
113 	};								\
114 	MTX_SYSINIT(name, &(name).lock.m, spin_lock_name("wqhead"), MTX_DEF)
115 
116 #define	init_waitqueue_head(wqh) do {					\
117 	mtx_init(&(wqh)->lock.m, spin_lock_name("wqhead"),		\
118 	    NULL, MTX_DEF | MTX_NEW | MTX_NOWITNESS);			\
119 	INIT_LIST_HEAD(&(wqh)->task_list);				\
120 } while (0)
121 
122 void linux_init_wait_entry(wait_queue_t *, int);
123 void linux_wake_up(wait_queue_head_t *, unsigned int, int, bool);
124 
125 #define	init_wait_entry(wq, flags)					\
126         linux_init_wait_entry(wq, flags)
127 #define	wake_up(wqh)							\
128 	linux_wake_up(wqh, TASK_NORMAL, 1, false)
129 #define	wake_up_all(wqh)						\
130 	linux_wake_up(wqh, TASK_NORMAL, 0, false)
131 #define	wake_up_locked(wqh)						\
132 	linux_wake_up(wqh, TASK_NORMAL, 1, true)
133 #define	wake_up_all_locked(wqh)						\
134 	linux_wake_up(wqh, TASK_NORMAL, 0, true)
135 #define	wake_up_interruptible(wqh)					\
136 	linux_wake_up(wqh, TASK_INTERRUPTIBLE, 1, false)
137 #define	wake_up_interruptible_all(wqh)					\
138 	linux_wake_up(wqh, TASK_INTERRUPTIBLE, 0, false)
139 
140 int linux_wait_event_common(wait_queue_head_t *, wait_queue_t *, int,
141     unsigned int, spinlock_t *);
142 
143 /*
144  * Returns -ERESTARTSYS for a signal, 0 if cond is false after timeout, 1 if
145  * cond is true after timeout, remaining jiffies (> 0) if cond is true before
146  * timeout.
147  */
148 #define	__wait_event_common(wqh, cond, timeout, state, lock) ({	\
149 	DEFINE_WAIT(__wq);					\
150 	const int __timeout = ((int)(timeout)) < 1 ? 1 : (timeout);	\
151 	int __start = ticks;					\
152 	int __ret = 0;						\
153 								\
154 	for (;;) {						\
155 		linux_prepare_to_wait(&(wqh), &__wq, state);	\
156 		if (cond)					\
157 			break;					\
158 		__ret = linux_wait_event_common(&(wqh), &__wq,	\
159 		    __timeout, state, lock);			\
160 		if (__ret != 0)					\
161 			break;					\
162 	}							\
163 	linux_finish_wait(&(wqh), &__wq);			\
164 	if (__timeout != MAX_SCHEDULE_TIMEOUT) {		\
165 		if (__ret == -EWOULDBLOCK)			\
166 			__ret = !!(cond);			\
167 		else if (__ret != -ERESTARTSYS) {		\
168 			__ret = __timeout + __start - ticks;	\
169 			/* range check return value */		\
170 			if (__ret < 1)				\
171 				__ret = 1;			\
172 			else if (__ret > __timeout)		\
173 				__ret = __timeout;		\
174 		}						\
175 	}							\
176 	__ret;							\
177 })
178 
179 #define	wait_event(wqh, cond) do {					\
180 	(void) __wait_event_common(wqh, cond, MAX_SCHEDULE_TIMEOUT,	\
181 	    TASK_UNINTERRUPTIBLE, NULL);				\
182 } while (0)
183 
184 #define	wait_event_timeout(wqh, cond, timeout) ({			\
185 	__wait_event_common(wqh, cond, timeout, TASK_UNINTERRUPTIBLE,	\
186 	    NULL);							\
187 })
188 
189 #define	wait_event_killable(wqh, cond) ({				\
190 	__wait_event_common(wqh, cond, MAX_SCHEDULE_TIMEOUT,		\
191 	    TASK_INTERRUPTIBLE, NULL);					\
192 })
193 
194 #define	wait_event_interruptible(wqh, cond) ({				\
195 	__wait_event_common(wqh, cond, MAX_SCHEDULE_TIMEOUT,		\
196 	    TASK_INTERRUPTIBLE, NULL);					\
197 })
198 
199 #define	wait_event_interruptible_timeout(wqh, cond, timeout) ({		\
200 	__wait_event_common(wqh, cond, timeout, TASK_INTERRUPTIBLE,	\
201 	    NULL);							\
202 })
203 
204 /*
205  * Wait queue is already locked.
206  */
207 #define	wait_event_interruptible_locked(wqh, cond) ({			\
208 	int __ret;							\
209 									\
210 	spin_unlock(&(wqh).lock);					\
211 	__ret = __wait_event_common(wqh, cond, MAX_SCHEDULE_TIMEOUT,	\
212 	    TASK_INTERRUPTIBLE, NULL);					\
213 	spin_lock(&(wqh).lock);						\
214 	__ret;								\
215 })
216 
217 /*
218  * The passed spinlock is held when testing the condition.
219  */
220 #define	wait_event_interruptible_lock_irq(wqh, cond, lock) ({		\
221 	__wait_event_common(wqh, cond, MAX_SCHEDULE_TIMEOUT,		\
222 	    TASK_INTERRUPTIBLE, &(lock));				\
223 })
224 
225 /*
226  * The passed spinlock is held when testing the condition.
227  */
228 #define	wait_event_lock_irq(wqh, cond, lock) ({			\
229 	__wait_event_common(wqh, cond, MAX_SCHEDULE_TIMEOUT,	\
230 	    TASK_UNINTERRUPTIBLE, &(lock));			\
231 })
232 
233 static inline void
234 __add_wait_queue(wait_queue_head_t *wqh, wait_queue_t *wq)
235 {
236 	list_add(&wq->task_list, &wqh->task_list);
237 }
238 
239 static inline void
240 add_wait_queue(wait_queue_head_t *wqh, wait_queue_t *wq)
241 {
242 
243 	spin_lock(&wqh->lock);
244 	__add_wait_queue(wqh, wq);
245 	spin_unlock(&wqh->lock);
246 }
247 
248 static inline void
249 __add_wait_queue_tail(wait_queue_head_t *wqh, wait_queue_t *wq)
250 {
251 	list_add_tail(&wq->task_list, &wqh->task_list);
252 }
253 
254 static inline void
255 __add_wait_queue_entry_tail(wait_queue_head_t *wqh, wait_queue_entry_t *wq)
256 {
257         list_add_tail(&wq->entry, &wqh->head);
258 }
259 
260 static inline void
261 __remove_wait_queue(wait_queue_head_t *wqh, wait_queue_t *wq)
262 {
263 	list_del(&wq->task_list);
264 }
265 
266 static inline void
267 remove_wait_queue(wait_queue_head_t *wqh, wait_queue_t *wq)
268 {
269 
270 	spin_lock(&wqh->lock);
271 	__remove_wait_queue(wqh, wq);
272 	spin_unlock(&wqh->lock);
273 }
274 
275 bool linux_waitqueue_active(wait_queue_head_t *);
276 
277 #define	waitqueue_active(wqh)		linux_waitqueue_active(wqh)
278 
279 void linux_prepare_to_wait(wait_queue_head_t *, wait_queue_t *, int);
280 void linux_finish_wait(wait_queue_head_t *, wait_queue_t *);
281 
282 #define	prepare_to_wait(wqh, wq, state)	linux_prepare_to_wait(wqh, wq, state)
283 #define	finish_wait(wqh, wq)		linux_finish_wait(wqh, wq)
284 
285 void linux_wake_up_bit(void *, int);
286 int linux_wait_on_bit_timeout(unsigned long *, int, unsigned int, int);
287 void linux_wake_up_atomic_t(atomic_t *);
288 int linux_wait_on_atomic_t(atomic_t *, unsigned int);
289 
290 #define	wake_up_bit(word, bit)		linux_wake_up_bit(word, bit)
291 #define	wait_on_bit(word, bit, state)					\
292 	linux_wait_on_bit_timeout(word, bit, state, MAX_SCHEDULE_TIMEOUT)
293 #define	wait_on_bit_timeout(word, bit, state, timeout)			\
294 	linux_wait_on_bit_timeout(word, bit, state, timeout)
295 #define	wake_up_atomic_t(a)		linux_wake_up_atomic_t(a)
296 /*
297  * All existing callers have a cb that just schedule()s. To avoid adding
298  * complexity, just emulate that internally. The prototype is different so that
299  * callers must be manually modified; a cb that does something other than call
300  * schedule() will require special treatment.
301  */
302 #define	wait_on_atomic_t(a, state)	linux_wait_on_atomic_t(a, state)
303 
304 struct task_struct;
305 bool linux_wake_up_state(struct task_struct *, unsigned int);
306 
307 #define	wake_up_process(task)		linux_wake_up_state(task, TASK_NORMAL)
308 #define	wake_up_state(task, state)	linux_wake_up_state(task, state)
309 
310 #endif /* _LINUX_WAIT_H_ */
311