1 /*- 2 * Copyright (c) 2017 Mark Johnston <markj@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conds 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conds, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conds and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 #include <sys/param.h> 28 #include <sys/systm.h> 29 #include <sys/proc.h> 30 #include <sys/signalvar.h> 31 #include <sys/sleepqueue.h> 32 33 #include <linux/delay.h> 34 #include <linux/errno.h> 35 #include <linux/kernel.h> 36 #include <linux/list.h> 37 #include <linux/sched.h> 38 #include <linux/spinlock.h> 39 #include <linux/wait.h> 40 41 /* 42 * Convert a relative time in jiffies to a tick count, suitable for use with 43 * native FreeBSD interfaces (callouts, sleepqueues, etc.). 44 */ 45 static int 46 linux_jiffies_timeout_to_ticks(long timeout) 47 { 48 if (timeout < 1) 49 return (1); 50 else if (timeout == MAX_SCHEDULE_TIMEOUT) 51 return (0); 52 else if (timeout > INT_MAX) 53 return (INT_MAX); 54 else 55 return (timeout); 56 } 57 58 static int 59 linux_add_to_sleepqueue(void *wchan, struct task_struct *task, 60 const char *wmesg, long timeout, int state) 61 { 62 int flags, ret, stimeout; 63 64 MPASS((state & ~(TASK_PARKED | TASK_NORMAL)) == 0); 65 66 flags = SLEEPQ_SLEEP | ((state & TASK_INTERRUPTIBLE) != 0 ? 67 SLEEPQ_INTERRUPTIBLE : 0); 68 stimeout = linux_jiffies_timeout_to_ticks(timeout); 69 70 sleepq_add(wchan, NULL, wmesg, flags, 0); 71 if (stimeout != 0) 72 sleepq_set_timeout(wchan, stimeout); 73 74 DROP_GIANT(); 75 if ((state & TASK_INTERRUPTIBLE) != 0) { 76 if (stimeout == 0) 77 ret = -sleepq_wait_sig(wchan, 0); 78 else 79 ret = -sleepq_timedwait_sig(wchan, 0); 80 } else { 81 if (stimeout == 0) { 82 sleepq_wait(wchan, 0); 83 ret = 0; 84 } else 85 ret = -sleepq_timedwait(wchan, 0); 86 } 87 PICKUP_GIANT(); 88 89 /* filter return value */ 90 if (ret != 0 && ret != -EWOULDBLOCK) { 91 linux_schedule_save_interrupt_value(task, ret); 92 ret = -ERESTARTSYS; 93 } 94 return (ret); 95 } 96 97 unsigned int 98 linux_msleep_interruptible(unsigned int ms) 99 { 100 int ret; 101 102 /* guard against invalid values */ 103 if (ms == 0) 104 ms = 1; 105 ret = -pause_sbt("lnxsleep", mstosbt(ms), 0, C_HARDCLOCK | C_CATCH); 106 107 switch (ret) { 108 case -EWOULDBLOCK: 109 return (0); 110 default: 111 linux_schedule_save_interrupt_value(current, ret); 112 return (ms); 113 } 114 } 115 116 static int 117 wake_up_task(struct task_struct *task, unsigned int state) 118 { 119 int ret; 120 121 ret = 0; 122 sleepq_lock(task); 123 if ((atomic_read(&task->state) & state) != 0) { 124 set_task_state(task, TASK_WAKING); 125 sleepq_signal(task, SLEEPQ_SLEEP, 0, 0); 126 ret = 1; 127 } 128 sleepq_release(task); 129 return (ret); 130 } 131 132 bool 133 linux_signal_pending(struct task_struct *task) 134 { 135 struct thread *td; 136 sigset_t pending; 137 138 td = task->task_thread; 139 PROC_LOCK(td->td_proc); 140 pending = td->td_siglist; 141 SIGSETOR(pending, td->td_proc->p_siglist); 142 SIGSETNAND(pending, td->td_sigmask); 143 PROC_UNLOCK(td->td_proc); 144 return (!SIGISEMPTY(pending)); 145 } 146 147 bool 148 linux_fatal_signal_pending(struct task_struct *task) 149 { 150 struct thread *td; 151 bool ret; 152 153 td = task->task_thread; 154 PROC_LOCK(td->td_proc); 155 ret = SIGISMEMBER(td->td_siglist, SIGKILL) || 156 SIGISMEMBER(td->td_proc->p_siglist, SIGKILL); 157 PROC_UNLOCK(td->td_proc); 158 return (ret); 159 } 160 161 bool 162 linux_signal_pending_state(long state, struct task_struct *task) 163 { 164 165 MPASS((state & ~TASK_NORMAL) == 0); 166 167 if ((state & TASK_INTERRUPTIBLE) == 0) 168 return (false); 169 return (linux_signal_pending(task)); 170 } 171 172 void 173 linux_send_sig(int signo, struct task_struct *task) 174 { 175 struct thread *td; 176 177 td = task->task_thread; 178 PROC_LOCK(td->td_proc); 179 tdsignal(td, signo); 180 PROC_UNLOCK(td->td_proc); 181 } 182 183 int 184 autoremove_wake_function(wait_queue_t *wq, unsigned int state, int flags, 185 void *key __unused) 186 { 187 struct task_struct *task; 188 int ret; 189 190 task = wq->private; 191 if ((ret = wake_up_task(task, state)) != 0) 192 list_del_init(&wq->task_list); 193 return (ret); 194 } 195 196 int 197 default_wake_function(wait_queue_t *wq, unsigned int state, int flags, 198 void *key __unused) 199 { 200 return (wake_up_task(wq->private, state)); 201 } 202 203 long 204 linux_wait_woken(wait_queue_t *wq, unsigned state, long timeout) 205 { 206 void *wchan; 207 struct task_struct *task; 208 int ret; 209 int remainder; 210 211 task = current; 212 wchan = wq->private; 213 214 remainder = jiffies + timeout; 215 216 set_task_state(task, state); 217 218 sleepq_lock(wchan); 219 if (!(wq->flags & WQ_FLAG_WOKEN)) { 220 ret = linux_add_to_sleepqueue(wchan, task, "woken", 221 timeout, state); 222 } else { 223 sleepq_release(wchan); 224 ret = 0; 225 } 226 227 set_task_state(task, TASK_RUNNING); 228 wq->flags &= ~WQ_FLAG_WOKEN; 229 230 if (timeout == MAX_SCHEDULE_TIMEOUT) 231 return (MAX_SCHEDULE_TIMEOUT); 232 233 /* range check return value */ 234 remainder -= jiffies; 235 236 /* range check return value */ 237 if (ret == -ERESTARTSYS && remainder < 1) 238 remainder = 1; 239 else if (remainder < 0) 240 remainder = 0; 241 else if (remainder > timeout) 242 remainder = timeout; 243 return (remainder); 244 } 245 246 int 247 woken_wake_function(wait_queue_t *wq, unsigned int state, 248 int flags __unused, void *key __unused) 249 { 250 void *wchan; 251 252 wchan = wq->private; 253 254 sleepq_lock(wchan); 255 wq->flags |= WQ_FLAG_WOKEN; 256 sleepq_signal(wchan, SLEEPQ_SLEEP, 0, 0); 257 sleepq_release(wchan); 258 259 return (1); 260 } 261 262 void 263 linux_init_wait_entry(wait_queue_t *wq, int flags) 264 { 265 266 memset(wq, 0, sizeof(*wq)); 267 wq->flags = flags; 268 wq->private = current; 269 wq->func = autoremove_wake_function; 270 INIT_LIST_HEAD(&wq->task_list); 271 } 272 273 void 274 linux_wake_up(wait_queue_head_t *wqh, unsigned int state, int nr, bool locked) 275 { 276 wait_queue_t *pos, *next; 277 278 if (!locked) 279 spin_lock(&wqh->lock); 280 list_for_each_entry_safe(pos, next, &wqh->task_list, task_list) { 281 if (pos->func == NULL) { 282 if (wake_up_task(pos->private, state) != 0 && --nr == 0) 283 break; 284 } else { 285 if (pos->func(pos, state, 0, NULL) != 0 && --nr == 0) 286 break; 287 } 288 } 289 if (!locked) 290 spin_unlock(&wqh->lock); 291 } 292 293 void 294 linux_prepare_to_wait(wait_queue_head_t *wqh, wait_queue_t *wq, int state) 295 { 296 297 spin_lock(&wqh->lock); 298 if (list_empty(&wq->task_list)) 299 __add_wait_queue(wqh, wq); 300 set_task_state(current, state); 301 spin_unlock(&wqh->lock); 302 } 303 304 void 305 linux_finish_wait(wait_queue_head_t *wqh, wait_queue_t *wq) 306 { 307 308 spin_lock(&wqh->lock); 309 set_task_state(current, TASK_RUNNING); 310 if (!list_empty(&wq->task_list)) { 311 __remove_wait_queue(wqh, wq); 312 INIT_LIST_HEAD(&wq->task_list); 313 } 314 spin_unlock(&wqh->lock); 315 } 316 317 bool 318 linux_waitqueue_active(wait_queue_head_t *wqh) 319 { 320 bool ret; 321 322 spin_lock(&wqh->lock); 323 ret = !list_empty(&wqh->task_list); 324 spin_unlock(&wqh->lock); 325 return (ret); 326 } 327 328 int 329 linux_wait_event_common(wait_queue_head_t *wqh, wait_queue_t *wq, long timeout, 330 unsigned int state, spinlock_t *lock) 331 { 332 struct task_struct *task; 333 int ret; 334 335 if (lock != NULL) 336 spin_unlock_irq(lock); 337 338 task = current; 339 340 sleepq_lock(task); 341 if (atomic_read(&task->state) != TASK_WAKING) { 342 ret = linux_add_to_sleepqueue(task, task, "wevent", timeout, 343 state); 344 } else { 345 sleepq_release(task); 346 ret = 0; 347 } 348 349 if (lock != NULL) 350 spin_lock_irq(lock); 351 return (ret); 352 } 353 354 long 355 linux_schedule_timeout(long timeout) 356 { 357 struct task_struct *task; 358 long remainder; 359 int ret, state; 360 361 task = current; 362 363 remainder = jiffies + timeout; 364 365 sleepq_lock(task); 366 state = atomic_read(&task->state); 367 if (state != TASK_WAKING) { 368 ret = linux_add_to_sleepqueue(task, task, "sched", timeout, 369 state); 370 } else { 371 sleepq_release(task); 372 ret = 0; 373 } 374 set_task_state(task, TASK_RUNNING); 375 376 if (timeout == MAX_SCHEDULE_TIMEOUT) 377 return (MAX_SCHEDULE_TIMEOUT); 378 379 /* range check return value */ 380 remainder -= jiffies; 381 382 /* range check return value */ 383 if (ret == -ERESTARTSYS && remainder < 1) 384 remainder = 1; 385 else if (remainder < 0) 386 remainder = 0; 387 else if (remainder > timeout) 388 remainder = timeout; 389 return (remainder); 390 } 391 392 static void 393 wake_up_sleepers(void *wchan) 394 { 395 sleepq_lock(wchan); 396 sleepq_signal(wchan, SLEEPQ_SLEEP, 0, 0); 397 sleepq_release(wchan); 398 } 399 400 #define bit_to_wchan(word, bit) ((void *)(((uintptr_t)(word) << 6) | (bit))) 401 402 void 403 linux_wake_up_bit(void *word, int bit) 404 { 405 406 wake_up_sleepers(bit_to_wchan(word, bit)); 407 } 408 409 int 410 linux_wait_on_bit_timeout(unsigned long *word, int bit, unsigned int state, 411 long timeout) 412 { 413 struct task_struct *task; 414 void *wchan; 415 int ret; 416 417 task = current; 418 wchan = bit_to_wchan(word, bit); 419 for (;;) { 420 sleepq_lock(wchan); 421 if ((*word & (1 << bit)) == 0) { 422 sleepq_release(wchan); 423 ret = 0; 424 break; 425 } 426 set_task_state(task, state); 427 ret = linux_add_to_sleepqueue(wchan, task, "wbit", timeout, 428 state); 429 if (ret != 0) 430 break; 431 } 432 set_task_state(task, TASK_RUNNING); 433 434 return (ret); 435 } 436 437 void 438 linux_wake_up_atomic_t(atomic_t *a) 439 { 440 441 wake_up_sleepers(a); 442 } 443 444 int 445 linux_wait_on_atomic_t(atomic_t *a, unsigned int state) 446 { 447 struct task_struct *task; 448 void *wchan; 449 int ret; 450 451 task = current; 452 wchan = a; 453 for (;;) { 454 sleepq_lock(wchan); 455 if (atomic_read(a) == 0) { 456 sleepq_release(wchan); 457 ret = 0; 458 break; 459 } 460 set_task_state(task, state); 461 ret = linux_add_to_sleepqueue(wchan, task, "watomic", 0, state); 462 if (ret != 0) 463 break; 464 } 465 set_task_state(task, TASK_RUNNING); 466 467 return (ret); 468 } 469 470 bool 471 linux_wake_up_state(struct task_struct *task, unsigned int state) 472 { 473 474 return (wake_up_task(task, state) != 0); 475 } 476