1 /*- 2 * Copyright (c) 2010 Isilon Systems, Inc. 3 * Copyright (c) 2010 iX Systems, Inc. 4 * Copyright (c) 2010 Panasas, Inc. 5 * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd. 6 * Copyright (c) 2017 Mark Johnston <markj@FreeBSD.org> 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice unmodified, this list of conditions, and the following 14 * disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 * 30 * $FreeBSD$ 31 */ 32 33 #ifndef _LINUX_WAIT_H_ 34 #define _LINUX_WAIT_H_ 35 36 #include <linux/compiler.h> 37 #include <linux/list.h> 38 #include <linux/spinlock.h> 39 40 #include <asm/atomic.h> 41 42 #include <sys/param.h> 43 #include <sys/systm.h> 44 45 #define SKIP_SLEEP() (SCHEDULER_STOPPED() || kdb_active) 46 47 #define might_sleep() \ 48 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "might_sleep()") 49 50 struct wait_queue; 51 struct wait_queue_head; 52 53 typedef struct wait_queue wait_queue_t; 54 typedef struct wait_queue_head wait_queue_head_t; 55 56 typedef int wait_queue_func_t(wait_queue_t *, unsigned int, int, void *); 57 58 /* 59 * Many API consumers directly reference these fields and those of 60 * wait_queue_head. 61 */ 62 struct wait_queue { 63 unsigned int flags; /* always 0 */ 64 void *private; 65 wait_queue_func_t *func; 66 struct list_head task_list; 67 }; 68 69 struct wait_queue_head { 70 spinlock_t lock; 71 struct list_head task_list; 72 }; 73 74 /* 75 * This function is referenced by at least one DRM driver, so it may not be 76 * renamed and furthermore must be the default wait queue callback. 77 */ 78 extern wait_queue_func_t autoremove_wake_function; 79 80 #define DEFINE_WAIT(name) \ 81 wait_queue_t name = { \ 82 .private = current, \ 83 .func = autoremove_wake_function, \ 84 .task_list = LINUX_LIST_HEAD_INIT(name.task_list) \ 85 } 86 87 #define DECLARE_WAITQUEUE(name, task) \ 88 wait_queue_t name = { \ 89 .private = task, \ 90 .task_list = LINUX_LIST_HEAD_INIT(name.task_list) \ 91 } 92 93 #define DECLARE_WAIT_QUEUE_HEAD(name) \ 94 wait_queue_head_t name = { \ 95 .task_list = LINUX_LIST_HEAD_INIT(name.task_list), \ 96 }; \ 97 MTX_SYSINIT(name, &(name).lock.m, spin_lock_name("wqhead"), MTX_DEF) 98 99 #define init_waitqueue_head(wqh) do { \ 100 mtx_init(&(wqh)->lock.m, spin_lock_name("wqhead"), \ 101 NULL, MTX_DEF | MTX_NEW | MTX_NOWITNESS); \ 102 INIT_LIST_HEAD(&(wqh)->task_list); \ 103 } while (0) 104 105 void linux_wake_up(wait_queue_head_t *, unsigned int, int, bool); 106 107 #define wake_up(wqh) \ 108 linux_wake_up(wqh, TASK_NORMAL, 1, false) 109 #define wake_up_all(wqh) \ 110 linux_wake_up(wqh, TASK_NORMAL, 0, false) 111 #define wake_up_locked(wqh) \ 112 linux_wake_up(wqh, TASK_NORMAL, 1, true) 113 #define wake_up_all_locked(wqh) \ 114 linux_wake_up(wqh, TASK_NORMAL, 0, true) 115 #define wake_up_interruptible(wqh) \ 116 linux_wake_up(wqh, TASK_INTERRUPTIBLE, 1, false) 117 #define wake_up_interruptible_all(wqh) \ 118 linux_wake_up(wqh, TASK_INTERRUPTIBLE, 0, false) 119 120 int linux_wait_event_common(wait_queue_head_t *, wait_queue_t *, int, 121 unsigned int, spinlock_t *); 122 123 /* 124 * Returns -ERESTARTSYS for a signal, 0 if cond is false after timeout, 1 if 125 * cond is true after timeout, remaining jiffies (> 0) if cond is true before 126 * timeout. 127 */ 128 #define __wait_event_common(wqh, cond, timeout, state, lock) ({ \ 129 DEFINE_WAIT(__wq); \ 130 const int __timeout = (timeout) < 1 ? 1 : (timeout); \ 131 int __start = ticks; \ 132 int __ret = 0; \ 133 \ 134 for (;;) { \ 135 linux_prepare_to_wait(&(wqh), &__wq, state); \ 136 if (cond) { \ 137 __ret = 1; \ 138 break; \ 139 } \ 140 __ret = linux_wait_event_common(&(wqh), &__wq, \ 141 __timeout, state, lock); \ 142 if (__ret != 0) \ 143 break; \ 144 } \ 145 linux_finish_wait(&(wqh), &__wq); \ 146 if (__timeout != MAX_SCHEDULE_TIMEOUT) { \ 147 if (__ret == -EWOULDBLOCK) \ 148 __ret = !!(cond); \ 149 else if (__ret != -ERESTARTSYS) { \ 150 __ret = __timeout + __start - ticks; \ 151 /* range check return value */ \ 152 if (__ret < 1) \ 153 __ret = 1; \ 154 else if (__ret > __timeout) \ 155 __ret = __timeout; \ 156 } \ 157 } \ 158 __ret; \ 159 }) 160 161 #define wait_event(wqh, cond) ({ \ 162 __wait_event_common(wqh, cond, MAX_SCHEDULE_TIMEOUT, \ 163 TASK_UNINTERRUPTIBLE, NULL); \ 164 }) 165 166 #define wait_event_timeout(wqh, cond, timeout) ({ \ 167 __wait_event_common(wqh, cond, timeout, TASK_UNINTERRUPTIBLE, \ 168 NULL); \ 169 }) 170 171 #define wait_event_interruptible(wqh, cond) ({ \ 172 __wait_event_common(wqh, cond, MAX_SCHEDULE_TIMEOUT, \ 173 TASK_INTERRUPTIBLE, NULL); \ 174 }) 175 176 #define wait_event_interruptible_timeout(wqh, cond, timeout) ({ \ 177 __wait_event_common(wqh, cond, timeout, TASK_INTERRUPTIBLE, \ 178 NULL); \ 179 }) 180 181 /* 182 * Wait queue is already locked. 183 */ 184 #define wait_event_interruptible_locked(wqh, cond) ({ \ 185 int __ret; \ 186 \ 187 spin_unlock(&(wqh).lock); \ 188 __ret = __wait_event_common(wqh, cond, MAX_SCHEDULE_TIMEOUT, \ 189 TASK_INTERRUPTIBLE, NULL); \ 190 spin_lock(&(wqh).lock); \ 191 __ret; \ 192 }) 193 194 /* 195 * Hold the (locked) spinlock when testing the cond. 196 */ 197 #define wait_event_interruptible_lock_irq(wqh, cond, lock) ({ \ 198 __wait_event_common(wqh, cond, MAX_SCHEDULE_TIMEOUT, \ 199 TASK_INTERRUPTIBLE, &(lock)); \ 200 }) 201 202 static inline void 203 __add_wait_queue(wait_queue_head_t *wqh, wait_queue_t *wq) 204 { 205 list_add(&wq->task_list, &wqh->task_list); 206 } 207 208 static inline void 209 add_wait_queue(wait_queue_head_t *wqh, wait_queue_t *wq) 210 { 211 212 spin_lock(&wqh->lock); 213 __add_wait_queue(wqh, wq); 214 spin_unlock(&wqh->lock); 215 } 216 217 static inline void 218 __add_wait_queue_tail(wait_queue_head_t *wqh, wait_queue_t *wq) 219 { 220 list_add_tail(&wq->task_list, &wqh->task_list); 221 } 222 223 static inline void 224 __remove_wait_queue(wait_queue_head_t *wqh, wait_queue_t *wq) 225 { 226 list_del(&wq->task_list); 227 } 228 229 static inline void 230 remove_wait_queue(wait_queue_head_t *wqh, wait_queue_t *wq) 231 { 232 233 spin_lock(&wqh->lock); 234 __remove_wait_queue(wqh, wq); 235 spin_unlock(&wqh->lock); 236 } 237 238 bool linux_waitqueue_active(wait_queue_head_t *); 239 240 #define waitqueue_active(wqh) linux_waitqueue_active(wqh) 241 242 void linux_prepare_to_wait(wait_queue_head_t *, wait_queue_t *, int); 243 void linux_finish_wait(wait_queue_head_t *, wait_queue_t *); 244 245 #define prepare_to_wait(wqh, wq, state) linux_prepare_to_wait(wqh, wq, state) 246 #define finish_wait(wqh, wq) linux_finish_wait(wqh, wq) 247 248 void linux_wake_up_bit(void *, int); 249 int linux_wait_on_bit_timeout(unsigned long *, int, unsigned int, int); 250 void linux_wake_up_atomic_t(atomic_t *); 251 int linux_wait_on_atomic_t(atomic_t *, unsigned int); 252 253 #define wake_up_bit(word, bit) linux_wake_up_bit(word, bit) 254 #define wait_on_bit_timeout(word, bit, state, timeout) \ 255 linux_wait_on_bit_timeout(word, bit, state, timeout) 256 #define wake_up_atomic_t(a) linux_wake_up_atomic_t(a) 257 /* 258 * All existing callers have a cb that just schedule()s. To avoid adding 259 * complexity, just emulate that internally. The prototype is different so that 260 * callers must be manually modified; a cb that does something other than call 261 * schedule() will require special treatment. 262 */ 263 #define wait_on_atomic_t(a, state) linux_wait_on_atomic_t(a, state) 264 265 struct task_struct; 266 bool linux_wake_up_state(struct task_struct *, unsigned int); 267 268 #define wake_up_process(task) linux_wake_up_state(task, TASK_NORMAL) 269 #define wake_up_state(task, state) linux_wake_up_state(task, state) 270 271 #endif /* _LINUX_WAIT_H_ */ 272