1 /*- 2 * Copyright (c) 2017 Mark Johnston <markj@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conds 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conds, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conds and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 #include <sys/param.h> 29 #include <sys/systm.h> 30 #include <sys/proc.h> 31 #include <sys/signalvar.h> 32 #include <sys/sleepqueue.h> 33 34 #include <linux/delay.h> 35 #include <linux/errno.h> 36 #include <linux/kernel.h> 37 #include <linux/list.h> 38 #include <linux/sched.h> 39 #include <linux/spinlock.h> 40 #include <linux/wait.h> 41 42 static int 43 linux_add_to_sleepqueue(void *wchan, struct task_struct *task, 44 const char *wmesg, int timeout, int state) 45 { 46 int flags, ret; 47 48 MPASS((state & ~(TASK_PARKED | TASK_NORMAL)) == 0); 49 50 flags = SLEEPQ_SLEEP | ((state & TASK_INTERRUPTIBLE) != 0 ? 51 SLEEPQ_INTERRUPTIBLE : 0); 52 53 sleepq_add(wchan, NULL, wmesg, flags, 0); 54 if (timeout != 0) 55 sleepq_set_timeout(wchan, timeout); 56 57 DROP_GIANT(); 58 if ((state & TASK_INTERRUPTIBLE) != 0) { 59 if (timeout == 0) 60 ret = -sleepq_wait_sig(wchan, 0); 61 else 62 ret = -sleepq_timedwait_sig(wchan, 0); 63 } else { 64 if (timeout == 0) { 65 sleepq_wait(wchan, 0); 66 ret = 0; 67 } else 68 ret = -sleepq_timedwait(wchan, 0); 69 } 70 PICKUP_GIANT(); 71 72 /* filter return value */ 73 if (ret != 0 && ret != -EWOULDBLOCK) { 74 linux_schedule_save_interrupt_value(task, ret); 75 ret = -ERESTARTSYS; 76 } 77 return (ret); 78 } 79 80 unsigned int 81 linux_msleep_interruptible(unsigned int ms) 82 { 83 int ret; 84 85 /* guard against invalid values */ 86 if (ms == 0) 87 ms = 1; 88 ret = -pause_sbt("lnxsleep", mstosbt(ms), 0, C_HARDCLOCK | C_CATCH); 89 90 switch (ret) { 91 case -EWOULDBLOCK: 92 return (0); 93 default: 94 linux_schedule_save_interrupt_value(current, ret); 95 return (ms); 96 } 97 } 98 99 static int 100 wake_up_task(struct task_struct *task, unsigned int state) 101 { 102 int ret, wakeup_swapper; 103 104 ret = wakeup_swapper = 0; 105 sleepq_lock(task); 106 if ((atomic_read(&task->state) & state) != 0) { 107 set_task_state(task, TASK_WAKING); 108 wakeup_swapper = sleepq_signal(task, SLEEPQ_SLEEP, 0, 0); 109 ret = 1; 110 } 111 sleepq_release(task); 112 if (wakeup_swapper) 113 kick_proc0(); 114 return (ret); 115 } 116 117 bool 118 linux_signal_pending(struct task_struct *task) 119 { 120 struct thread *td; 121 sigset_t pending; 122 123 td = task->task_thread; 124 PROC_LOCK(td->td_proc); 125 pending = td->td_siglist; 126 SIGSETOR(pending, td->td_proc->p_siglist); 127 SIGSETNAND(pending, td->td_sigmask); 128 PROC_UNLOCK(td->td_proc); 129 return (!SIGISEMPTY(pending)); 130 } 131 132 bool 133 linux_fatal_signal_pending(struct task_struct *task) 134 { 135 struct thread *td; 136 bool ret; 137 138 td = task->task_thread; 139 PROC_LOCK(td->td_proc); 140 ret = SIGISMEMBER(td->td_siglist, SIGKILL) || 141 SIGISMEMBER(td->td_proc->p_siglist, SIGKILL); 142 PROC_UNLOCK(td->td_proc); 143 return (ret); 144 } 145 146 bool 147 linux_signal_pending_state(long state, struct task_struct *task) 148 { 149 150 MPASS((state & ~TASK_NORMAL) == 0); 151 152 if ((state & TASK_INTERRUPTIBLE) == 0) 153 return (false); 154 return (linux_signal_pending(task)); 155 } 156 157 void 158 linux_send_sig(int signo, struct task_struct *task) 159 { 160 struct thread *td; 161 162 td = task->task_thread; 163 PROC_LOCK(td->td_proc); 164 tdsignal(td, signo); 165 PROC_UNLOCK(td->td_proc); 166 } 167 168 int 169 autoremove_wake_function(wait_queue_t *wq, unsigned int state, int flags, 170 void *key __unused) 171 { 172 struct task_struct *task; 173 int ret; 174 175 task = wq->private; 176 if ((ret = wake_up_task(task, state)) != 0) 177 list_del_init(&wq->task_list); 178 return (ret); 179 } 180 181 int 182 default_wake_function(wait_queue_t *wq, unsigned int state, int flags, 183 void *key __unused) 184 { 185 return (wake_up_task(wq->private, state)); 186 } 187 188 void 189 linux_init_wait_entry(wait_queue_t *wq, int flags) 190 { 191 192 memset(wq, 0, sizeof(*wq)); 193 wq->flags = flags; 194 wq->private = current; 195 wq->func = autoremove_wake_function; 196 INIT_LIST_HEAD(&wq->task_list); 197 } 198 199 void 200 linux_wake_up(wait_queue_head_t *wqh, unsigned int state, int nr, bool locked) 201 { 202 wait_queue_t *pos, *next; 203 204 if (!locked) 205 spin_lock(&wqh->lock); 206 list_for_each_entry_safe(pos, next, &wqh->task_list, task_list) { 207 if (pos->func == NULL) { 208 if (wake_up_task(pos->private, state) != 0 && --nr == 0) 209 break; 210 } else { 211 if (pos->func(pos, state, 0, NULL) != 0 && --nr == 0) 212 break; 213 } 214 } 215 if (!locked) 216 spin_unlock(&wqh->lock); 217 } 218 219 void 220 linux_prepare_to_wait(wait_queue_head_t *wqh, wait_queue_t *wq, int state) 221 { 222 223 spin_lock(&wqh->lock); 224 if (list_empty(&wq->task_list)) 225 __add_wait_queue(wqh, wq); 226 set_task_state(current, state); 227 spin_unlock(&wqh->lock); 228 } 229 230 void 231 linux_finish_wait(wait_queue_head_t *wqh, wait_queue_t *wq) 232 { 233 234 spin_lock(&wqh->lock); 235 set_task_state(current, TASK_RUNNING); 236 if (!list_empty(&wq->task_list)) { 237 __remove_wait_queue(wqh, wq); 238 INIT_LIST_HEAD(&wq->task_list); 239 } 240 spin_unlock(&wqh->lock); 241 } 242 243 bool 244 linux_waitqueue_active(wait_queue_head_t *wqh) 245 { 246 bool ret; 247 248 spin_lock(&wqh->lock); 249 ret = !list_empty(&wqh->task_list); 250 spin_unlock(&wqh->lock); 251 return (ret); 252 } 253 254 int 255 linux_wait_event_common(wait_queue_head_t *wqh, wait_queue_t *wq, int timeout, 256 unsigned int state, spinlock_t *lock) 257 { 258 struct task_struct *task; 259 int ret; 260 261 if (lock != NULL) 262 spin_unlock_irq(lock); 263 264 /* range check timeout */ 265 if (timeout < 1) 266 timeout = 1; 267 else if (timeout == MAX_SCHEDULE_TIMEOUT) 268 timeout = 0; 269 270 task = current; 271 272 /* 273 * Our wait queue entry is on the stack - make sure it doesn't 274 * get swapped out while we sleep. 275 */ 276 PHOLD(task->task_thread->td_proc); 277 sleepq_lock(task); 278 if (atomic_read(&task->state) != TASK_WAKING) { 279 ret = linux_add_to_sleepqueue(task, task, "wevent", timeout, 280 state); 281 } else { 282 sleepq_release(task); 283 ret = 0; 284 } 285 PRELE(task->task_thread->td_proc); 286 287 if (lock != NULL) 288 spin_lock_irq(lock); 289 return (ret); 290 } 291 292 int 293 linux_schedule_timeout(int timeout) 294 { 295 struct task_struct *task; 296 int ret; 297 int state; 298 int remainder; 299 300 task = current; 301 302 /* range check timeout */ 303 if (timeout < 1) 304 timeout = 1; 305 else if (timeout == MAX_SCHEDULE_TIMEOUT) 306 timeout = 0; 307 308 remainder = ticks + timeout; 309 310 sleepq_lock(task); 311 state = atomic_read(&task->state); 312 if (state != TASK_WAKING) { 313 ret = linux_add_to_sleepqueue(task, task, "sched", timeout, 314 state); 315 } else { 316 sleepq_release(task); 317 ret = 0; 318 } 319 set_task_state(task, TASK_RUNNING); 320 321 if (timeout == 0) 322 return (MAX_SCHEDULE_TIMEOUT); 323 324 /* range check return value */ 325 remainder -= ticks; 326 327 /* range check return value */ 328 if (ret == -ERESTARTSYS && remainder < 1) 329 remainder = 1; 330 else if (remainder < 0) 331 remainder = 0; 332 else if (remainder > timeout) 333 remainder = timeout; 334 return (remainder); 335 } 336 337 static void 338 wake_up_sleepers(void *wchan) 339 { 340 int wakeup_swapper; 341 342 sleepq_lock(wchan); 343 wakeup_swapper = sleepq_signal(wchan, SLEEPQ_SLEEP, 0, 0); 344 sleepq_release(wchan); 345 if (wakeup_swapper) 346 kick_proc0(); 347 } 348 349 #define bit_to_wchan(word, bit) ((void *)(((uintptr_t)(word) << 6) | (bit))) 350 351 void 352 linux_wake_up_bit(void *word, int bit) 353 { 354 355 wake_up_sleepers(bit_to_wchan(word, bit)); 356 } 357 358 int 359 linux_wait_on_bit_timeout(unsigned long *word, int bit, unsigned int state, 360 int timeout) 361 { 362 struct task_struct *task; 363 void *wchan; 364 int ret; 365 366 /* range check timeout */ 367 if (timeout < 1) 368 timeout = 1; 369 else if (timeout == MAX_SCHEDULE_TIMEOUT) 370 timeout = 0; 371 372 task = current; 373 wchan = bit_to_wchan(word, bit); 374 for (;;) { 375 sleepq_lock(wchan); 376 if ((*word & (1 << bit)) == 0) { 377 sleepq_release(wchan); 378 ret = 0; 379 break; 380 } 381 set_task_state(task, state); 382 ret = linux_add_to_sleepqueue(wchan, task, "wbit", timeout, 383 state); 384 if (ret != 0) 385 break; 386 } 387 set_task_state(task, TASK_RUNNING); 388 389 return (ret); 390 } 391 392 void 393 linux_wake_up_atomic_t(atomic_t *a) 394 { 395 396 wake_up_sleepers(a); 397 } 398 399 int 400 linux_wait_on_atomic_t(atomic_t *a, unsigned int state) 401 { 402 struct task_struct *task; 403 void *wchan; 404 int ret; 405 406 task = current; 407 wchan = a; 408 for (;;) { 409 sleepq_lock(wchan); 410 if (atomic_read(a) == 0) { 411 sleepq_release(wchan); 412 ret = 0; 413 break; 414 } 415 set_task_state(task, state); 416 ret = linux_add_to_sleepqueue(wchan, task, "watomic", 0, state); 417 if (ret != 0) 418 break; 419 } 420 set_task_state(task, TASK_RUNNING); 421 422 return (ret); 423 } 424 425 bool 426 linux_wake_up_state(struct task_struct *task, unsigned int state) 427 { 428 429 return (wake_up_task(task, state) != 0); 430 } 431