xref: /freebsd/sys/compat/linuxkpi/common/include/linux/wait.h (revision 3750ccefb8629a08890bfbae894dd6bc6a7483b4)
1 /*-
2  * Copyright (c) 2010 Isilon Systems, Inc.
3  * Copyright (c) 2010 iX Systems, Inc.
4  * Copyright (c) 2010 Panasas, Inc.
5  * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd.
6  * Copyright (c) 2017 Mark Johnston <markj@FreeBSD.org>
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice unmodified, this list of conditions, and the following
14  *    disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 #ifndef _LINUXKPI_LINUX_WAIT_H_
32 #define	_LINUXKPI_LINUX_WAIT_H_
33 
34 #include <linux/compiler.h>
35 #include <linux/list.h>
36 #include <linux/spinlock.h>
37 #include <linux/sched.h>
38 
39 #include <asm/atomic.h>
40 
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 
44 #define	SKIP_SLEEP() (SCHEDULER_STOPPED() || kdb_active)
45 
46 #define	might_sleep()							\
47 	WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "might_sleep()")
48 
49 #define	might_sleep_if(cond) do { \
50 	if (cond) { might_sleep(); } \
51 } while (0)
52 
53 struct wait_queue;
54 struct wait_queue_head;
55 
56 #define	wait_queue_entry wait_queue
57 
58 typedef struct wait_queue wait_queue_t;
59 typedef struct wait_queue_entry wait_queue_entry_t;
60 typedef struct wait_queue_head wait_queue_head_t;
61 
62 typedef int wait_queue_func_t(wait_queue_t *, unsigned int, int, void *);
63 
64 /*
65  * Many API consumers directly reference these fields and those of
66  * wait_queue_head.
67  */
68 struct wait_queue {
69 	unsigned int flags;	/* always 0 */
70 	void *private;
71 	wait_queue_func_t *func;
72 	union {
73 		struct list_head task_list; /* < v4.13 */
74 		struct list_head entry; /* >= v4.13 */
75 	};
76 };
77 
78 struct wait_queue_head {
79 	spinlock_t lock;
80 	union {
81 		struct list_head task_list; /* < v4.13 */
82 		struct list_head head; /* >= v4.13 */
83 	};
84 };
85 
86 /*
87  * This function is referenced by at least one DRM driver, so it may not be
88  * renamed and furthermore must be the default wait queue callback.
89  */
90 extern wait_queue_func_t autoremove_wake_function;
91 extern wait_queue_func_t default_wake_function;
92 
93 #define	DEFINE_WAIT_FUNC(name, function)				\
94 	wait_queue_t name = {						\
95 		.private = current,					\
96 		.func = function,					\
97 		.task_list = LINUX_LIST_HEAD_INIT(name.task_list)	\
98 	}
99 
100 #define	DEFINE_WAIT(name) \
101 	DEFINE_WAIT_FUNC(name, autoremove_wake_function)
102 
103 #define	DECLARE_WAITQUEUE(name, task)					\
104 	wait_queue_t name = {						\
105 		.private = task,					\
106 		.task_list = LINUX_LIST_HEAD_INIT(name.task_list)	\
107 	}
108 
109 #define	DECLARE_WAIT_QUEUE_HEAD(name)					\
110 	wait_queue_head_t name = {					\
111 		.task_list = LINUX_LIST_HEAD_INIT(name.task_list),	\
112 	};								\
113 	MTX_SYSINIT(name, &(name).lock, spin_lock_name("wqhead"), MTX_DEF)
114 
115 #define	init_waitqueue_head(wqh) do {					\
116 	mtx_init(&(wqh)->lock, spin_lock_name("wqhead"),		\
117 	    NULL, MTX_DEF | MTX_NEW | MTX_NOWITNESS);			\
118 	INIT_LIST_HEAD(&(wqh)->task_list);				\
119 } while (0)
120 
121 #define	__init_waitqueue_head(wqh, name, lk) init_waitqueue_head(wqh)
122 
123 void linux_init_wait_entry(wait_queue_t *, int);
124 void linux_wake_up(wait_queue_head_t *, unsigned int, int, bool);
125 
126 #define	init_wait_entry(wq, flags)					\
127         linux_init_wait_entry(wq, flags)
128 #define	wake_up(wqh)							\
129 	linux_wake_up(wqh, TASK_NORMAL, 1, false)
130 #define	wake_up_all(wqh)						\
131 	linux_wake_up(wqh, TASK_NORMAL, 0, false)
132 #define	wake_up_locked(wqh)						\
133 	linux_wake_up(wqh, TASK_NORMAL, 1, true)
134 #define	wake_up_all_locked(wqh)						\
135 	linux_wake_up(wqh, TASK_NORMAL, 0, true)
136 #define	wake_up_interruptible(wqh)					\
137 	linux_wake_up(wqh, TASK_INTERRUPTIBLE, 1, false)
138 #define	wake_up_interruptible_all(wqh)					\
139 	linux_wake_up(wqh, TASK_INTERRUPTIBLE, 0, false)
140 
141 int linux_wait_event_common(wait_queue_head_t *, wait_queue_t *, int,
142     unsigned int, spinlock_t *);
143 
144 /*
145  * Returns -ERESTARTSYS for a signal, 0 if cond is false after timeout, 1 if
146  * cond is true after timeout, remaining jiffies (> 0) if cond is true before
147  * timeout.
148  */
149 #define	__wait_event_common(wqh, cond, timeout, state, lock) ({	\
150 	DEFINE_WAIT(__wq);					\
151 	const int __timeout = ((int)(timeout)) < 1 ? 1 : (timeout);	\
152 	int __start = ticks;					\
153 	int __ret = 0;						\
154 								\
155 	for (;;) {						\
156 		linux_prepare_to_wait(&(wqh), &__wq, state);	\
157 		if (cond)					\
158 			break;					\
159 		__ret = linux_wait_event_common(&(wqh), &__wq,	\
160 		    __timeout, state, lock);			\
161 		if (__ret != 0)					\
162 			break;					\
163 	}							\
164 	linux_finish_wait(&(wqh), &__wq);			\
165 	if (__timeout != MAX_SCHEDULE_TIMEOUT) {		\
166 		if (__ret == -EWOULDBLOCK)			\
167 			__ret = !!(cond);			\
168 		else if (__ret != -ERESTARTSYS) {		\
169 			__ret = __timeout + __start - ticks;	\
170 			/* range check return value */		\
171 			if (__ret < 1)				\
172 				__ret = 1;			\
173 			else if (__ret > __timeout)		\
174 				__ret = __timeout;		\
175 		}						\
176 	}							\
177 	__ret;							\
178 })
179 
180 #define	wait_event(wqh, cond) do {					\
181 	(void) __wait_event_common(wqh, cond, MAX_SCHEDULE_TIMEOUT,	\
182 	    TASK_UNINTERRUPTIBLE, NULL);				\
183 } while (0)
184 
185 #define	wait_event_timeout(wqh, cond, timeout) ({			\
186 	__wait_event_common(wqh, cond, timeout, TASK_UNINTERRUPTIBLE,	\
187 	    NULL);							\
188 })
189 
190 #define	wait_event_killable(wqh, cond) ({				\
191 	__wait_event_common(wqh, cond, MAX_SCHEDULE_TIMEOUT,		\
192 	    TASK_INTERRUPTIBLE, NULL);					\
193 })
194 
195 #define	wait_event_interruptible(wqh, cond) ({				\
196 	__wait_event_common(wqh, cond, MAX_SCHEDULE_TIMEOUT,		\
197 	    TASK_INTERRUPTIBLE, NULL);					\
198 })
199 
200 #define	wait_event_interruptible_timeout(wqh, cond, timeout) ({		\
201 	__wait_event_common(wqh, cond, timeout, TASK_INTERRUPTIBLE,	\
202 	    NULL);							\
203 })
204 
205 /*
206  * Wait queue is already locked.
207  */
208 #define	wait_event_interruptible_locked(wqh, cond) ({			\
209 	int __ret;							\
210 									\
211 	spin_unlock(&(wqh).lock);					\
212 	__ret = __wait_event_common(wqh, cond, MAX_SCHEDULE_TIMEOUT,	\
213 	    TASK_INTERRUPTIBLE, NULL);					\
214 	spin_lock(&(wqh).lock);						\
215 	__ret;								\
216 })
217 
218 /*
219  * The passed spinlock is held when testing the condition.
220  */
221 #define	wait_event_interruptible_lock_irq(wqh, cond, lock) ({		\
222 	__wait_event_common(wqh, cond, MAX_SCHEDULE_TIMEOUT,		\
223 	    TASK_INTERRUPTIBLE, &(lock));				\
224 })
225 
226 /*
227  * The passed spinlock is held when testing the condition.
228  */
229 #define	wait_event_lock_irq(wqh, cond, lock) ({			\
230 	__wait_event_common(wqh, cond, MAX_SCHEDULE_TIMEOUT,	\
231 	    TASK_UNINTERRUPTIBLE, &(lock));			\
232 })
233 
234 static inline void
235 __add_wait_queue(wait_queue_head_t *wqh, wait_queue_t *wq)
236 {
237 	list_add(&wq->task_list, &wqh->task_list);
238 }
239 
240 static inline void
241 add_wait_queue(wait_queue_head_t *wqh, wait_queue_t *wq)
242 {
243 
244 	spin_lock(&wqh->lock);
245 	__add_wait_queue(wqh, wq);
246 	spin_unlock(&wqh->lock);
247 }
248 
249 static inline void
250 __add_wait_queue_tail(wait_queue_head_t *wqh, wait_queue_t *wq)
251 {
252 	list_add_tail(&wq->task_list, &wqh->task_list);
253 }
254 
255 static inline void
256 __add_wait_queue_entry_tail(wait_queue_head_t *wqh, wait_queue_entry_t *wq)
257 {
258         list_add_tail(&wq->entry, &wqh->head);
259 }
260 
261 static inline void
262 __remove_wait_queue(wait_queue_head_t *wqh, wait_queue_t *wq)
263 {
264 	list_del(&wq->task_list);
265 }
266 
267 static inline void
268 remove_wait_queue(wait_queue_head_t *wqh, wait_queue_t *wq)
269 {
270 
271 	spin_lock(&wqh->lock);
272 	__remove_wait_queue(wqh, wq);
273 	spin_unlock(&wqh->lock);
274 }
275 
276 bool linux_waitqueue_active(wait_queue_head_t *);
277 
278 #define	waitqueue_active(wqh)		linux_waitqueue_active(wqh)
279 
280 void linux_prepare_to_wait(wait_queue_head_t *, wait_queue_t *, int);
281 void linux_finish_wait(wait_queue_head_t *, wait_queue_t *);
282 
283 #define	prepare_to_wait(wqh, wq, state)	linux_prepare_to_wait(wqh, wq, state)
284 #define	finish_wait(wqh, wq)		linux_finish_wait(wqh, wq)
285 
286 void linux_wake_up_bit(void *, int);
287 int linux_wait_on_bit_timeout(unsigned long *, int, unsigned int, int);
288 void linux_wake_up_atomic_t(atomic_t *);
289 int linux_wait_on_atomic_t(atomic_t *, unsigned int);
290 
291 #define	wake_up_bit(word, bit)		linux_wake_up_bit(word, bit)
292 #define	wait_on_bit(word, bit, state)					\
293 	linux_wait_on_bit_timeout(word, bit, state, MAX_SCHEDULE_TIMEOUT)
294 #define	wait_on_bit_timeout(word, bit, state, timeout)			\
295 	linux_wait_on_bit_timeout(word, bit, state, timeout)
296 #define	wake_up_atomic_t(a)		linux_wake_up_atomic_t(a)
297 /*
298  * All existing callers have a cb that just schedule()s. To avoid adding
299  * complexity, just emulate that internally. The prototype is different so that
300  * callers must be manually modified; a cb that does something other than call
301  * schedule() will require special treatment.
302  */
303 #define	wait_on_atomic_t(a, state)	linux_wait_on_atomic_t(a, state)
304 
305 struct task_struct;
306 bool linux_wake_up_state(struct task_struct *, unsigned int);
307 
308 #define	wake_up_process(task)		linux_wake_up_state(task, TASK_NORMAL)
309 #define	wake_up_state(task, state)	linux_wake_up_state(task, state)
310 
311 #endif /* _LINUXKPI_LINUX_WAIT_H_ */
312