1 /*- 2 * Copyright (c) 2017 Mark Johnston <markj@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conds 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conds, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conds and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include <sys/param.h> 31 #include <sys/systm.h> 32 #include <sys/proc.h> 33 #include <sys/signalvar.h> 34 #include <sys/sleepqueue.h> 35 36 #include <linux/delay.h> 37 #include <linux/errno.h> 38 #include <linux/kernel.h> 39 #include <linux/list.h> 40 #include <linux/sched.h> 41 #include <linux/spinlock.h> 42 #include <linux/wait.h> 43 44 static int 45 linux_add_to_sleepqueue(void *wchan, struct task_struct *task, 46 const char *wmesg, int timeout, int state) 47 { 48 int flags, ret; 49 50 MPASS((state & ~TASK_NORMAL) == 0); 51 52 flags = SLEEPQ_SLEEP | ((state & TASK_INTERRUPTIBLE) != 0 ? 53 SLEEPQ_INTERRUPTIBLE : 0); 54 55 sleepq_add(wchan, NULL, wmesg, flags, 0); 56 if (timeout != 0) 57 sleepq_set_timeout(wchan, timeout); 58 if ((state & TASK_INTERRUPTIBLE) != 0) { 59 if (timeout == 0) 60 ret = -sleepq_wait_sig(wchan, 0); 61 else 62 ret = -sleepq_timedwait_sig(wchan, 0); 63 } else { 64 if (timeout == 0) { 65 sleepq_wait(wchan, 0); 66 ret = 0; 67 } else 68 ret = -sleepq_timedwait(wchan, 0); 69 } 70 /* filter return value */ 71 if (ret != 0 && ret != -EWOULDBLOCK) { 72 linux_schedule_save_interrupt_value(task, ret); 73 ret = -ERESTARTSYS; 74 } 75 return (ret); 76 } 77 78 unsigned int 79 linux_msleep_interruptible(unsigned int ms) 80 { 81 int ret; 82 83 /* guard against invalid values */ 84 if (ms == 0) 85 ms = 1; 86 ret = -pause_sbt("lnxsleep", mstosbt(ms), 0, C_HARDCLOCK | C_CATCH); 87 88 switch (ret) { 89 case -EWOULDBLOCK: 90 return (0); 91 default: 92 linux_schedule_save_interrupt_value(current, ret); 93 return (ms); 94 } 95 } 96 97 static int 98 wake_up_task(struct task_struct *task, unsigned int state) 99 { 100 int ret, wakeup_swapper; 101 102 ret = wakeup_swapper = 0; 103 sleepq_lock(task); 104 if ((atomic_read(&task->state) & state) != 0) { 105 set_task_state(task, TASK_WAKING); 106 wakeup_swapper = sleepq_signal(task, SLEEPQ_SLEEP, 0, 0); 107 ret = 1; 108 } 109 sleepq_release(task); 110 if (wakeup_swapper) 111 kick_proc0(); 112 return (ret); 113 } 114 115 bool 116 linux_signal_pending(struct task_struct *task) 117 { 118 struct thread *td; 119 sigset_t pending; 120 121 td = task->task_thread; 122 PROC_LOCK(td->td_proc); 123 pending = td->td_siglist; 124 SIGSETOR(pending, td->td_proc->p_siglist); 125 SIGSETNAND(pending, td->td_sigmask); 126 PROC_UNLOCK(td->td_proc); 127 return (!SIGISEMPTY(pending)); 128 } 129 130 bool 131 linux_fatal_signal_pending(struct task_struct *task) 132 { 133 struct thread *td; 134 bool ret; 135 136 td = task->task_thread; 137 PROC_LOCK(td->td_proc); 138 ret = SIGISMEMBER(td->td_siglist, SIGKILL) || 139 SIGISMEMBER(td->td_proc->p_siglist, SIGKILL); 140 PROC_UNLOCK(td->td_proc); 141 return (ret); 142 } 143 144 bool 145 linux_signal_pending_state(long state, struct task_struct *task) 146 { 147 148 MPASS((state & ~TASK_NORMAL) == 0); 149 150 if ((state & TASK_INTERRUPTIBLE) == 0) 151 return (false); 152 return (linux_signal_pending(task)); 153 } 154 155 void 156 linux_send_sig(int signo, struct task_struct *task) 157 { 158 struct thread *td; 159 160 td = task->task_thread; 161 PROC_LOCK(td->td_proc); 162 tdsignal(td, signo); 163 PROC_UNLOCK(td->td_proc); 164 } 165 166 int 167 autoremove_wake_function(wait_queue_t *wq, unsigned int state, int flags, 168 void *key __unused) 169 { 170 struct task_struct *task; 171 int ret; 172 173 task = wq->private; 174 if ((ret = wake_up_task(task, state)) != 0) 175 list_del_init(&wq->task_list); 176 return (ret); 177 } 178 179 int 180 default_wake_function(wait_queue_t *wq, unsigned int state, int flags, 181 void *key __unused) 182 { 183 return (wake_up_task(wq->private, state)); 184 } 185 186 void 187 linux_wake_up(wait_queue_head_t *wqh, unsigned int state, int nr, bool locked) 188 { 189 wait_queue_t *pos, *next; 190 191 if (!locked) 192 spin_lock(&wqh->lock); 193 list_for_each_entry_safe(pos, next, &wqh->task_list, task_list) { 194 if (pos->func == NULL) { 195 if (wake_up_task(pos->private, state) != 0 && --nr == 0) 196 break; 197 } else { 198 if (pos->func(pos, state, 0, NULL) != 0 && --nr == 0) 199 break; 200 } 201 } 202 if (!locked) 203 spin_unlock(&wqh->lock); 204 } 205 206 void 207 linux_prepare_to_wait(wait_queue_head_t *wqh, wait_queue_t *wq, int state) 208 { 209 210 spin_lock(&wqh->lock); 211 if (list_empty(&wq->task_list)) 212 __add_wait_queue(wqh, wq); 213 set_task_state(current, state); 214 spin_unlock(&wqh->lock); 215 } 216 217 void 218 linux_finish_wait(wait_queue_head_t *wqh, wait_queue_t *wq) 219 { 220 221 spin_lock(&wqh->lock); 222 set_task_state(current, TASK_RUNNING); 223 if (!list_empty(&wq->task_list)) { 224 __remove_wait_queue(wqh, wq); 225 INIT_LIST_HEAD(&wq->task_list); 226 } 227 spin_unlock(&wqh->lock); 228 } 229 230 bool 231 linux_waitqueue_active(wait_queue_head_t *wqh) 232 { 233 bool ret; 234 235 spin_lock(&wqh->lock); 236 ret = !list_empty(&wqh->task_list); 237 spin_unlock(&wqh->lock); 238 return (ret); 239 } 240 241 int 242 linux_wait_event_common(wait_queue_head_t *wqh, wait_queue_t *wq, int timeout, 243 unsigned int state, spinlock_t *lock) 244 { 245 struct task_struct *task; 246 int ret; 247 248 if (lock != NULL) 249 spin_unlock_irq(lock); 250 251 DROP_GIANT(); 252 253 /* range check timeout */ 254 if (timeout < 1) 255 timeout = 1; 256 else if (timeout == MAX_SCHEDULE_TIMEOUT) 257 timeout = 0; 258 259 task = current; 260 261 /* 262 * Our wait queue entry is on the stack - make sure it doesn't 263 * get swapped out while we sleep. 264 */ 265 PHOLD(task->task_thread->td_proc); 266 sleepq_lock(task); 267 if (atomic_read(&task->state) != TASK_WAKING) { 268 ret = linux_add_to_sleepqueue(task, task, "wevent", timeout, state); 269 } else { 270 sleepq_release(task); 271 ret = 0; 272 } 273 PRELE(task->task_thread->td_proc); 274 275 PICKUP_GIANT(); 276 277 if (lock != NULL) 278 spin_lock_irq(lock); 279 return (ret); 280 } 281 282 int 283 linux_schedule_timeout(int timeout) 284 { 285 struct task_struct *task; 286 int ret; 287 int state; 288 int remainder; 289 290 task = current; 291 292 /* range check timeout */ 293 if (timeout < 1) 294 timeout = 1; 295 else if (timeout == MAX_SCHEDULE_TIMEOUT) 296 timeout = 0; 297 298 remainder = ticks + timeout; 299 300 DROP_GIANT(); 301 302 sleepq_lock(task); 303 state = atomic_read(&task->state); 304 if (state != TASK_WAKING) { 305 ret = linux_add_to_sleepqueue(task, task, "sched", timeout, state); 306 } else { 307 sleepq_release(task); 308 ret = 0; 309 } 310 set_task_state(task, TASK_RUNNING); 311 312 PICKUP_GIANT(); 313 314 if (timeout == 0) 315 return (MAX_SCHEDULE_TIMEOUT); 316 317 /* range check return value */ 318 remainder -= ticks; 319 320 /* range check return value */ 321 if (ret == -ERESTARTSYS && remainder < 1) 322 remainder = 1; 323 else if (remainder < 0) 324 remainder = 0; 325 else if (remainder > timeout) 326 remainder = timeout; 327 return (remainder); 328 } 329 330 static void 331 wake_up_sleepers(void *wchan) 332 { 333 int wakeup_swapper; 334 335 sleepq_lock(wchan); 336 wakeup_swapper = sleepq_signal(wchan, SLEEPQ_SLEEP, 0, 0); 337 sleepq_release(wchan); 338 if (wakeup_swapper) 339 kick_proc0(); 340 } 341 342 #define bit_to_wchan(word, bit) ((void *)(((uintptr_t)(word) << 6) | (bit))) 343 344 void 345 linux_wake_up_bit(void *word, int bit) 346 { 347 348 wake_up_sleepers(bit_to_wchan(word, bit)); 349 } 350 351 int 352 linux_wait_on_bit_timeout(unsigned long *word, int bit, unsigned int state, 353 int timeout) 354 { 355 struct task_struct *task; 356 void *wchan; 357 int ret; 358 359 DROP_GIANT(); 360 361 /* range check timeout */ 362 if (timeout < 1) 363 timeout = 1; 364 else if (timeout == MAX_SCHEDULE_TIMEOUT) 365 timeout = 0; 366 367 task = current; 368 wchan = bit_to_wchan(word, bit); 369 for (;;) { 370 sleepq_lock(wchan); 371 if ((*word & (1 << bit)) == 0) { 372 sleepq_release(wchan); 373 ret = 0; 374 break; 375 } 376 set_task_state(task, state); 377 ret = linux_add_to_sleepqueue(wchan, task, "wbit", timeout, state); 378 if (ret != 0) 379 break; 380 } 381 set_task_state(task, TASK_RUNNING); 382 383 PICKUP_GIANT(); 384 385 return (ret); 386 } 387 388 void 389 linux_wake_up_atomic_t(atomic_t *a) 390 { 391 392 wake_up_sleepers(a); 393 } 394 395 int 396 linux_wait_on_atomic_t(atomic_t *a, unsigned int state) 397 { 398 struct task_struct *task; 399 void *wchan; 400 int ret; 401 402 DROP_GIANT(); 403 404 task = current; 405 wchan = a; 406 for (;;) { 407 sleepq_lock(wchan); 408 if (atomic_read(a) == 0) { 409 sleepq_release(wchan); 410 ret = 0; 411 break; 412 } 413 set_task_state(task, state); 414 ret = linux_add_to_sleepqueue(wchan, task, "watomic", 0, state); 415 if (ret != 0) 416 break; 417 } 418 set_task_state(task, TASK_RUNNING); 419 420 PICKUP_GIANT(); 421 422 return (ret); 423 } 424 425 bool 426 linux_wake_up_state(struct task_struct *task, unsigned int state) 427 { 428 429 return (wake_up_task(task, state) != 0); 430 } 431