1 /*- 2 * Copyright (c) 2010 Isilon Systems, Inc. 3 * Copyright (c) 2010 iX Systems, Inc. 4 * Copyright (c) 2010 Panasas, Inc. 5 * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd. 6 * Copyright (c) 2017 Mark Johnston <markj@FreeBSD.org> 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice unmodified, this list of conditions, and the following 14 * disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31 #ifndef _LINUXKPI_LINUX_WAIT_H_ 32 #define _LINUXKPI_LINUX_WAIT_H_ 33 34 #include <linux/compiler.h> 35 #include <linux/list.h> 36 #include <linux/spinlock.h> 37 #include <linux/sched.h> 38 39 #include <asm/atomic.h> 40 41 #include <sys/param.h> 42 #include <sys/systm.h> 43 44 #define SKIP_SLEEP() (SCHEDULER_STOPPED() || kdb_active) 45 46 #define might_sleep() \ 47 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "might_sleep()") 48 49 #define might_sleep_if(cond) do { \ 50 if (cond) { might_sleep(); } \ 51 } while (0) 52 53 struct wait_queue; 54 struct wait_queue_head; 55 56 #define wait_queue_entry wait_queue 57 58 typedef struct wait_queue wait_queue_t; 59 typedef struct wait_queue_entry wait_queue_entry_t; 60 typedef struct wait_queue_head wait_queue_head_t; 61 62 typedef int wait_queue_func_t(wait_queue_t *, unsigned int, int, void *); 63 64 #define WQ_FLAG_WOKEN 0x02 65 66 /* 67 * Many API consumers directly reference these fields and those of 68 * wait_queue_head. 69 */ 70 struct wait_queue { 71 unsigned int flags; 72 void *private; 73 wait_queue_func_t *func; 74 union { 75 struct list_head task_list; /* < v4.13 */ 76 struct list_head entry; /* >= v4.13 */ 77 }; 78 }; 79 80 struct wait_queue_head { 81 spinlock_t lock; 82 union { 83 struct list_head task_list; /* < v4.13 */ 84 struct list_head head; /* >= v4.13 */ 85 }; 86 }; 87 88 /* 89 * This function is referenced by at least one DRM driver, so it may not be 90 * renamed and furthermore must be the default wait queue callback. 91 */ 92 wait_queue_func_t autoremove_wake_function; 93 wait_queue_func_t default_wake_function; 94 wait_queue_func_t woken_wake_function; 95 96 long linux_wait_woken(wait_queue_t *wq, unsigned state, long timeout); 97 98 #define wait_woken(wq, state, timeout) \ 99 linux_wait_woken((wq), (state), (timeout)) 100 101 #define DEFINE_WAIT_FUNC(name, function) \ 102 wait_queue_t name = { \ 103 .private = current, \ 104 .func = function, \ 105 .task_list = LINUX_LIST_HEAD_INIT(name.task_list) \ 106 } 107 108 #define DEFINE_WAIT(name) \ 109 DEFINE_WAIT_FUNC(name, autoremove_wake_function) 110 111 #define DECLARE_WAITQUEUE(name, task) \ 112 wait_queue_t name = { \ 113 .private = task, \ 114 .task_list = LINUX_LIST_HEAD_INIT(name.task_list) \ 115 } 116 117 #define DECLARE_WAIT_QUEUE_HEAD(name) \ 118 wait_queue_head_t name = { \ 119 .task_list = LINUX_LIST_HEAD_INIT(name.task_list), \ 120 }; \ 121 MTX_SYSINIT(name, &(name).lock, spin_lock_name("wqhead"), MTX_DEF) 122 123 #define init_waitqueue_head(wqh) do { \ 124 mtx_init(&(wqh)->lock, spin_lock_name("wqhead"), \ 125 NULL, MTX_DEF | MTX_NEW | MTX_NOWITNESS); \ 126 INIT_LIST_HEAD(&(wqh)->task_list); \ 127 } while (0) 128 129 #define __init_waitqueue_head(wqh, name, lk) init_waitqueue_head(wqh) 130 131 void linux_init_wait_entry(wait_queue_t *, int); 132 void linux_wake_up(wait_queue_head_t *, unsigned int, int, bool); 133 134 #define init_wait_entry(wq, flags) \ 135 linux_init_wait_entry(wq, flags) 136 #define wake_up(wqh) \ 137 linux_wake_up(wqh, TASK_NORMAL, 1, false) 138 #define wake_up_all(wqh) \ 139 linux_wake_up(wqh, TASK_NORMAL, 0, false) 140 #define wake_up_locked(wqh) \ 141 linux_wake_up(wqh, TASK_NORMAL, 1, true) 142 #define wake_up_all_locked(wqh) \ 143 linux_wake_up(wqh, TASK_NORMAL, 0, true) 144 #define wake_up_interruptible(wqh) \ 145 linux_wake_up(wqh, TASK_INTERRUPTIBLE, 1, false) 146 #define wake_up_interruptible_all(wqh) \ 147 linux_wake_up(wqh, TASK_INTERRUPTIBLE, 0, false) 148 149 int linux_wait_event_common(wait_queue_head_t *, wait_queue_t *, long, 150 unsigned int, spinlock_t *); 151 152 /* 153 * Returns -ERESTARTSYS for a signal, 0 if cond is false after timeout, 1 if 154 * cond is true after timeout, remaining jiffies (> 0) if cond is true before 155 * timeout. 156 */ 157 #define __wait_event_common(wqh, cond, timeout, state, lock) ({ \ 158 DEFINE_WAIT(__wq); \ 159 const long __timeout = ((long)(timeout)) < 1 ? 1 : (timeout); \ 160 long __start = jiffies; \ 161 long __ret = 0; \ 162 \ 163 for (;;) { \ 164 linux_prepare_to_wait(&(wqh), &__wq, state); \ 165 if (cond) \ 166 break; \ 167 __ret = linux_wait_event_common(&(wqh), &__wq, \ 168 __timeout, state, lock); \ 169 if (__ret != 0) \ 170 break; \ 171 } \ 172 linux_finish_wait(&(wqh), &__wq); \ 173 if (__timeout != MAX_SCHEDULE_TIMEOUT) { \ 174 if (__ret == -EWOULDBLOCK) \ 175 __ret = !!(cond); \ 176 else if (__ret != -ERESTARTSYS) { \ 177 __ret = __timeout + __start - jiffies; \ 178 /* range check return value */ \ 179 if (__ret < 1) \ 180 __ret = 1; \ 181 else if (__ret > __timeout) \ 182 __ret = __timeout; \ 183 } \ 184 } \ 185 __ret; \ 186 }) 187 188 #define wait_event(wqh, cond) do { \ 189 (void) __wait_event_common(wqh, cond, MAX_SCHEDULE_TIMEOUT, \ 190 TASK_UNINTERRUPTIBLE, NULL); \ 191 } while (0) 192 193 #define wait_event_timeout(wqh, cond, timeout) ({ \ 194 __wait_event_common(wqh, cond, timeout, TASK_UNINTERRUPTIBLE, \ 195 NULL); \ 196 }) 197 198 #define wait_event_killable(wqh, cond) ({ \ 199 __wait_event_common(wqh, cond, MAX_SCHEDULE_TIMEOUT, \ 200 TASK_INTERRUPTIBLE, NULL); \ 201 }) 202 203 #define wait_event_interruptible(wqh, cond) ({ \ 204 __wait_event_common(wqh, cond, MAX_SCHEDULE_TIMEOUT, \ 205 TASK_INTERRUPTIBLE, NULL); \ 206 }) 207 208 #define wait_event_interruptible_timeout(wqh, cond, timeout) ({ \ 209 __wait_event_common(wqh, cond, timeout, TASK_INTERRUPTIBLE, \ 210 NULL); \ 211 }) 212 213 /* 214 * Wait queue is already locked. 215 */ 216 #define wait_event_interruptible_locked(wqh, cond) ({ \ 217 int __ret; \ 218 \ 219 spin_unlock(&(wqh).lock); \ 220 __ret = __wait_event_common(wqh, cond, MAX_SCHEDULE_TIMEOUT, \ 221 TASK_INTERRUPTIBLE, NULL); \ 222 spin_lock(&(wqh).lock); \ 223 __ret; \ 224 }) 225 226 /* 227 * The passed spinlock is held when testing the condition. 228 */ 229 #define wait_event_interruptible_lock_irq(wqh, cond, lock) ({ \ 230 __wait_event_common(wqh, cond, MAX_SCHEDULE_TIMEOUT, \ 231 TASK_INTERRUPTIBLE, &(lock)); \ 232 }) 233 234 /* 235 * The passed spinlock is held when testing the condition. 236 */ 237 #define wait_event_lock_irq(wqh, cond, lock) ({ \ 238 __wait_event_common(wqh, cond, MAX_SCHEDULE_TIMEOUT, \ 239 TASK_UNINTERRUPTIBLE, &(lock)); \ 240 }) 241 242 static inline void 243 __add_wait_queue(wait_queue_head_t *wqh, wait_queue_t *wq) 244 { 245 list_add(&wq->task_list, &wqh->task_list); 246 } 247 248 static inline void 249 add_wait_queue(wait_queue_head_t *wqh, wait_queue_t *wq) 250 { 251 252 spin_lock(&wqh->lock); 253 __add_wait_queue(wqh, wq); 254 spin_unlock(&wqh->lock); 255 } 256 257 static inline void 258 __add_wait_queue_tail(wait_queue_head_t *wqh, wait_queue_t *wq) 259 { 260 list_add_tail(&wq->task_list, &wqh->task_list); 261 } 262 263 static inline void 264 __add_wait_queue_entry_tail(wait_queue_head_t *wqh, wait_queue_entry_t *wq) 265 { 266 list_add_tail(&wq->entry, &wqh->head); 267 } 268 269 static inline void 270 __remove_wait_queue(wait_queue_head_t *wqh, wait_queue_t *wq) 271 { 272 list_del(&wq->task_list); 273 } 274 275 static inline void 276 remove_wait_queue(wait_queue_head_t *wqh, wait_queue_t *wq) 277 { 278 279 spin_lock(&wqh->lock); 280 __remove_wait_queue(wqh, wq); 281 spin_unlock(&wqh->lock); 282 } 283 284 bool linux_waitqueue_active(wait_queue_head_t *); 285 286 #define waitqueue_active(wqh) linux_waitqueue_active(wqh) 287 288 void linux_prepare_to_wait(wait_queue_head_t *, wait_queue_t *, int); 289 void linux_finish_wait(wait_queue_head_t *, wait_queue_t *); 290 291 #define prepare_to_wait(wqh, wq, state) linux_prepare_to_wait(wqh, wq, state) 292 #define finish_wait(wqh, wq) linux_finish_wait(wqh, wq) 293 294 void linux_wake_up_bit(void *, int); 295 int linux_wait_on_bit_timeout(unsigned long *, int, unsigned int, long); 296 void linux_wake_up_atomic_t(atomic_t *); 297 int linux_wait_on_atomic_t(atomic_t *, unsigned int); 298 299 #define wake_up_bit(word, bit) linux_wake_up_bit(word, bit) 300 #define wait_on_bit(word, bit, state) \ 301 linux_wait_on_bit_timeout(word, bit, state, MAX_SCHEDULE_TIMEOUT) 302 #define wait_on_bit_timeout(word, bit, state, timeout) \ 303 linux_wait_on_bit_timeout(word, bit, state, timeout) 304 #define wake_up_atomic_t(a) linux_wake_up_atomic_t(a) 305 /* 306 * All existing callers have a cb that just schedule()s. To avoid adding 307 * complexity, just emulate that internally. The prototype is different so that 308 * callers must be manually modified; a cb that does something other than call 309 * schedule() will require special treatment. 310 */ 311 #define wait_on_atomic_t(a, state) linux_wait_on_atomic_t(a, state) 312 313 struct task_struct; 314 bool linux_wake_up_state(struct task_struct *, unsigned int); 315 316 #define wake_up_process(task) linux_wake_up_state(task, TASK_NORMAL) 317 #define wake_up_state(task, state) linux_wake_up_state(task, state) 318 319 #endif /* _LINUXKPI_LINUX_WAIT_H_ */ 320