1 /*- 2 * Copyright (c) 2017 Mark Johnston <markj@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conds 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conds, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conds and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include <sys/param.h> 31 #include <sys/systm.h> 32 #include <sys/proc.h> 33 #include <sys/signalvar.h> 34 #include <sys/sleepqueue.h> 35 36 #include <linux/delay.h> 37 #include <linux/errno.h> 38 #include <linux/kernel.h> 39 #include <linux/list.h> 40 #include <linux/sched.h> 41 #include <linux/spinlock.h> 42 #include <linux/wait.h> 43 44 static int 45 linux_add_to_sleepqueue(void *wchan, struct task_struct *task, 46 const char *wmesg, int timeout, int state) 47 { 48 int flags, ret; 49 50 MPASS((state & ~(TASK_PARKED | TASK_NORMAL)) == 0); 51 52 flags = SLEEPQ_SLEEP | ((state & TASK_INTERRUPTIBLE) != 0 ? 53 SLEEPQ_INTERRUPTIBLE : 0); 54 55 sleepq_add(wchan, NULL, wmesg, flags, 0); 56 if (timeout != 0) 57 sleepq_set_timeout(wchan, timeout); 58 59 DROP_GIANT(); 60 if ((state & TASK_INTERRUPTIBLE) != 0) { 61 if (timeout == 0) 62 ret = -sleepq_wait_sig(wchan, 0); 63 else 64 ret = -sleepq_timedwait_sig(wchan, 0); 65 } else { 66 if (timeout == 0) { 67 sleepq_wait(wchan, 0); 68 ret = 0; 69 } else 70 ret = -sleepq_timedwait(wchan, 0); 71 } 72 PICKUP_GIANT(); 73 74 /* filter return value */ 75 if (ret != 0 && ret != -EWOULDBLOCK) { 76 linux_schedule_save_interrupt_value(task, ret); 77 ret = -ERESTARTSYS; 78 } 79 return (ret); 80 } 81 82 unsigned int 83 linux_msleep_interruptible(unsigned int ms) 84 { 85 int ret; 86 87 /* guard against invalid values */ 88 if (ms == 0) 89 ms = 1; 90 ret = -pause_sbt("lnxsleep", mstosbt(ms), 0, C_HARDCLOCK | C_CATCH); 91 92 switch (ret) { 93 case -EWOULDBLOCK: 94 return (0); 95 default: 96 linux_schedule_save_interrupt_value(current, ret); 97 return (ms); 98 } 99 } 100 101 static int 102 wake_up_task(struct task_struct *task, unsigned int state) 103 { 104 int ret, wakeup_swapper; 105 106 ret = wakeup_swapper = 0; 107 sleepq_lock(task); 108 if ((atomic_read(&task->state) & state) != 0) { 109 set_task_state(task, TASK_WAKING); 110 wakeup_swapper = sleepq_signal(task, SLEEPQ_SLEEP, 0, 0); 111 ret = 1; 112 } 113 sleepq_release(task); 114 if (wakeup_swapper) 115 kick_proc0(); 116 return (ret); 117 } 118 119 bool 120 linux_signal_pending(struct task_struct *task) 121 { 122 struct thread *td; 123 sigset_t pending; 124 125 td = task->task_thread; 126 PROC_LOCK(td->td_proc); 127 pending = td->td_siglist; 128 SIGSETOR(pending, td->td_proc->p_siglist); 129 SIGSETNAND(pending, td->td_sigmask); 130 PROC_UNLOCK(td->td_proc); 131 return (!SIGISEMPTY(pending)); 132 } 133 134 bool 135 linux_fatal_signal_pending(struct task_struct *task) 136 { 137 struct thread *td; 138 bool ret; 139 140 td = task->task_thread; 141 PROC_LOCK(td->td_proc); 142 ret = SIGISMEMBER(td->td_siglist, SIGKILL) || 143 SIGISMEMBER(td->td_proc->p_siglist, SIGKILL); 144 PROC_UNLOCK(td->td_proc); 145 return (ret); 146 } 147 148 bool 149 linux_signal_pending_state(long state, struct task_struct *task) 150 { 151 152 MPASS((state & ~TASK_NORMAL) == 0); 153 154 if ((state & TASK_INTERRUPTIBLE) == 0) 155 return (false); 156 return (linux_signal_pending(task)); 157 } 158 159 void 160 linux_send_sig(int signo, struct task_struct *task) 161 { 162 struct thread *td; 163 164 td = task->task_thread; 165 PROC_LOCK(td->td_proc); 166 tdsignal(td, signo); 167 PROC_UNLOCK(td->td_proc); 168 } 169 170 int 171 autoremove_wake_function(wait_queue_t *wq, unsigned int state, int flags, 172 void *key __unused) 173 { 174 struct task_struct *task; 175 int ret; 176 177 task = wq->private; 178 if ((ret = wake_up_task(task, state)) != 0) 179 list_del_init(&wq->task_list); 180 return (ret); 181 } 182 183 int 184 default_wake_function(wait_queue_t *wq, unsigned int state, int flags, 185 void *key __unused) 186 { 187 return (wake_up_task(wq->private, state)); 188 } 189 190 void 191 linux_init_wait_entry(wait_queue_t *wq, int flags) 192 { 193 194 memset(wq, 0, sizeof(*wq)); 195 wq->flags = flags; 196 wq->private = current; 197 wq->func = autoremove_wake_function; 198 INIT_LIST_HEAD(&wq->task_list); 199 } 200 201 void 202 linux_wake_up(wait_queue_head_t *wqh, unsigned int state, int nr, bool locked) 203 { 204 wait_queue_t *pos, *next; 205 unsigned long flags; 206 207 if (!locked) 208 spin_lock_irqsave(&wqh->lock, flags); 209 210 list_for_each_entry_safe(pos, next, &wqh->task_list, task_list) { 211 if (pos->func == NULL) { 212 if (wake_up_task(pos->private, state) != 0 && --nr == 0) 213 break; 214 } else { 215 if (pos->func(pos, state, 0, NULL) != 0 && --nr == 0) 216 break; 217 } 218 } 219 if (!locked) 220 spin_unlock_irqrestore(&wqh->lock, flags); 221 } 222 223 void 224 linux_prepare_to_wait(wait_queue_head_t *wqh, wait_queue_t *wq, int state) 225 { 226 227 spin_lock(&wqh->lock); 228 if (list_empty(&wq->task_list)) 229 __add_wait_queue(wqh, wq); 230 set_task_state(current, state); 231 spin_unlock(&wqh->lock); 232 } 233 234 void 235 linux_finish_wait(wait_queue_head_t *wqh, wait_queue_t *wq) 236 { 237 238 spin_lock(&wqh->lock); 239 set_task_state(current, TASK_RUNNING); 240 if (!list_empty(&wq->task_list)) { 241 __remove_wait_queue(wqh, wq); 242 INIT_LIST_HEAD(&wq->task_list); 243 } 244 spin_unlock(&wqh->lock); 245 } 246 247 bool 248 linux_waitqueue_active(wait_queue_head_t *wqh) 249 { 250 bool ret; 251 252 spin_lock(&wqh->lock); 253 ret = !list_empty(&wqh->task_list); 254 spin_unlock(&wqh->lock); 255 return (ret); 256 } 257 258 int 259 linux_wait_event_common(wait_queue_head_t *wqh, wait_queue_t *wq, int timeout, 260 unsigned int state, spinlock_t *lock) 261 { 262 struct task_struct *task; 263 int ret; 264 265 if (lock != NULL) 266 spin_unlock_irq(lock); 267 268 /* range check timeout */ 269 if (timeout < 1) 270 timeout = 1; 271 else if (timeout == MAX_SCHEDULE_TIMEOUT) 272 timeout = 0; 273 274 task = current; 275 276 /* 277 * Our wait queue entry is on the stack - make sure it doesn't 278 * get swapped out while we sleep. 279 */ 280 PHOLD(task->task_thread->td_proc); 281 sleepq_lock(task); 282 if (atomic_read(&task->state) != TASK_WAKING) { 283 ret = linux_add_to_sleepqueue(task, task, "wevent", timeout, 284 state); 285 } else { 286 sleepq_release(task); 287 ret = 0; 288 } 289 PRELE(task->task_thread->td_proc); 290 291 if (lock != NULL) 292 spin_lock_irq(lock); 293 return (ret); 294 } 295 296 int 297 linux_schedule_timeout(int timeout) 298 { 299 struct task_struct *task; 300 int ret; 301 int state; 302 int remainder; 303 304 task = current; 305 306 /* range check timeout */ 307 if (timeout < 1) 308 timeout = 1; 309 else if (timeout == MAX_SCHEDULE_TIMEOUT) 310 timeout = 0; 311 312 remainder = ticks + timeout; 313 314 sleepq_lock(task); 315 state = atomic_read(&task->state); 316 if (state != TASK_WAKING) { 317 ret = linux_add_to_sleepqueue(task, task, "sched", timeout, 318 state); 319 } else { 320 sleepq_release(task); 321 ret = 0; 322 } 323 set_task_state(task, TASK_RUNNING); 324 325 if (timeout == 0) 326 return (MAX_SCHEDULE_TIMEOUT); 327 328 /* range check return value */ 329 remainder -= ticks; 330 331 /* range check return value */ 332 if (ret == -ERESTARTSYS && remainder < 1) 333 remainder = 1; 334 else if (remainder < 0) 335 remainder = 0; 336 else if (remainder > timeout) 337 remainder = timeout; 338 return (remainder); 339 } 340 341 static void 342 wake_up_sleepers(void *wchan) 343 { 344 int wakeup_swapper; 345 346 sleepq_lock(wchan); 347 wakeup_swapper = sleepq_signal(wchan, SLEEPQ_SLEEP, 0, 0); 348 sleepq_release(wchan); 349 if (wakeup_swapper) 350 kick_proc0(); 351 } 352 353 #define bit_to_wchan(word, bit) ((void *)(((uintptr_t)(word) << 6) | (bit))) 354 355 void 356 linux_wake_up_bit(void *word, int bit) 357 { 358 359 wake_up_sleepers(bit_to_wchan(word, bit)); 360 } 361 362 int 363 linux_wait_on_bit_timeout(unsigned long *word, int bit, unsigned int state, 364 int timeout) 365 { 366 struct task_struct *task; 367 void *wchan; 368 int ret; 369 370 /* range check timeout */ 371 if (timeout < 1) 372 timeout = 1; 373 else if (timeout == MAX_SCHEDULE_TIMEOUT) 374 timeout = 0; 375 376 task = current; 377 wchan = bit_to_wchan(word, bit); 378 for (;;) { 379 sleepq_lock(wchan); 380 if ((*word & (1 << bit)) == 0) { 381 sleepq_release(wchan); 382 ret = 0; 383 break; 384 } 385 set_task_state(task, state); 386 ret = linux_add_to_sleepqueue(wchan, task, "wbit", timeout, 387 state); 388 if (ret != 0) 389 break; 390 } 391 set_task_state(task, TASK_RUNNING); 392 393 return (ret); 394 } 395 396 void 397 linux_wake_up_atomic_t(atomic_t *a) 398 { 399 400 wake_up_sleepers(a); 401 } 402 403 int 404 linux_wait_on_atomic_t(atomic_t *a, unsigned int state) 405 { 406 struct task_struct *task; 407 void *wchan; 408 int ret; 409 410 task = current; 411 wchan = a; 412 for (;;) { 413 sleepq_lock(wchan); 414 if (atomic_read(a) == 0) { 415 sleepq_release(wchan); 416 ret = 0; 417 break; 418 } 419 set_task_state(task, state); 420 ret = linux_add_to_sleepqueue(wchan, task, "watomic", 0, state); 421 if (ret != 0) 422 break; 423 } 424 set_task_state(task, TASK_RUNNING); 425 426 return (ret); 427 } 428 429 bool 430 linux_wake_up_state(struct task_struct *task, unsigned int state) 431 { 432 433 return (wake_up_task(task, state) != 0); 434 } 435