1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1982, 1986, 1990, 1991, 1993 5 * The Regents of the University of California. All rights reserved. 6 * (c) UNIX System Laboratories, Inc. 7 * All or some portions of this file are derived from material licensed 8 * to the University of California by American Telephone and Telegraph 9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 10 * the permission of UNIX System Laboratories, Inc. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * @(#)kern_synch.c 8.9 (Berkeley) 5/19/95 37 */ 38 39 #include <sys/cdefs.h> 40 __FBSDID("$FreeBSD$"); 41 42 #include "opt_ktrace.h" 43 #include "opt_sched.h" 44 45 #include <sys/param.h> 46 #include <sys/systm.h> 47 #include <sys/blockcount.h> 48 #include <sys/condvar.h> 49 #include <sys/kdb.h> 50 #include <sys/kernel.h> 51 #include <sys/ktr.h> 52 #include <sys/lock.h> 53 #include <sys/mutex.h> 54 #include <sys/proc.h> 55 #include <sys/resourcevar.h> 56 #include <sys/sched.h> 57 #include <sys/sdt.h> 58 #include <sys/signalvar.h> 59 #include <sys/sleepqueue.h> 60 #include <sys/smp.h> 61 #include <sys/sx.h> 62 #include <sys/sysctl.h> 63 #include <sys/sysproto.h> 64 #include <sys/vmmeter.h> 65 #ifdef KTRACE 66 #include <sys/uio.h> 67 #include <sys/ktrace.h> 68 #endif 69 #ifdef EPOCH_TRACE 70 #include <sys/epoch.h> 71 #endif 72 73 #include <machine/cpu.h> 74 75 static void synch_setup(void *dummy); 76 SYSINIT(synch_setup, SI_SUB_KICK_SCHEDULER, SI_ORDER_FIRST, synch_setup, 77 NULL); 78 79 int hogticks; 80 static const char pause_wchan[MAXCPU]; 81 82 static struct callout loadav_callout; 83 84 struct loadavg averunnable = 85 { {0, 0, 0}, FSCALE }; /* load average, of runnable procs */ 86 /* 87 * Constants for averages over 1, 5, and 15 minutes 88 * when sampling at 5 second intervals. 89 */ 90 static fixpt_t cexp[3] = { 91 0.9200444146293232 * FSCALE, /* exp(-1/12) */ 92 0.9834714538216174 * FSCALE, /* exp(-1/60) */ 93 0.9944598480048967 * FSCALE, /* exp(-1/180) */ 94 }; 95 96 /* kernel uses `FSCALE', userland (SHOULD) use kern.fscale */ 97 SYSCTL_INT(_kern, OID_AUTO, fscale, CTLFLAG_RD, SYSCTL_NULL_INT_PTR, FSCALE, 98 "Fixed-point scale factor used for calculating load average values"); 99 100 static void loadav(void *arg); 101 102 SDT_PROVIDER_DECLARE(sched); 103 SDT_PROBE_DEFINE(sched, , , preempt); 104 105 static void 106 sleepinit(void *unused) 107 { 108 109 hogticks = (hz / 10) * 2; /* Default only. */ 110 init_sleepqueues(); 111 } 112 113 /* 114 * vmem tries to lock the sleepq mutexes when free'ing kva, so make sure 115 * it is available. 116 */ 117 SYSINIT(sleepinit, SI_SUB_KMEM, SI_ORDER_ANY, sleepinit, NULL); 118 119 /* 120 * General sleep call. Suspends the current thread until a wakeup is 121 * performed on the specified identifier. The thread will then be made 122 * runnable with the specified priority. Sleeps at most sbt units of time 123 * (0 means no timeout). If pri includes the PCATCH flag, let signals 124 * interrupt the sleep, otherwise ignore them while sleeping. Returns 0 if 125 * awakened, EWOULDBLOCK if the timeout expires. If PCATCH is set and a 126 * signal becomes pending, ERESTART is returned if the current system 127 * call should be restarted if possible, and EINTR is returned if the system 128 * call should be interrupted by the signal (return EINTR). 129 * 130 * The lock argument is unlocked before the caller is suspended, and 131 * re-locked before _sleep() returns. If priority includes the PDROP 132 * flag the lock is not re-locked before returning. 133 */ 134 int 135 _sleep(const void *ident, struct lock_object *lock, int priority, 136 const char *wmesg, sbintime_t sbt, sbintime_t pr, int flags) 137 { 138 struct thread *td; 139 struct lock_class *class; 140 uintptr_t lock_state; 141 int catch, pri, rval, sleepq_flags; 142 WITNESS_SAVE_DECL(lock_witness); 143 144 td = curthread; 145 #ifdef KTRACE 146 if (KTRPOINT(td, KTR_CSW)) 147 ktrcsw(1, 0, wmesg); 148 #endif 149 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, lock, 150 "Sleeping on \"%s\"", wmesg); 151 KASSERT(sbt != 0 || mtx_owned(&Giant) || lock != NULL, 152 ("sleeping without a lock")); 153 KASSERT(ident != NULL, ("_sleep: NULL ident")); 154 KASSERT(TD_IS_RUNNING(td), ("_sleep: curthread not running")); 155 if (priority & PDROP) 156 KASSERT(lock != NULL && lock != &Giant.lock_object, 157 ("PDROP requires a non-Giant lock")); 158 if (lock != NULL) 159 class = LOCK_CLASS(lock); 160 else 161 class = NULL; 162 163 if (SCHEDULER_STOPPED_TD(td)) { 164 if (lock != NULL && priority & PDROP) 165 class->lc_unlock(lock); 166 return (0); 167 } 168 catch = priority & PCATCH; 169 pri = priority & PRIMASK; 170 171 KASSERT(!TD_ON_SLEEPQ(td), ("recursive sleep")); 172 173 if ((uintptr_t)ident >= (uintptr_t)&pause_wchan[0] && 174 (uintptr_t)ident <= (uintptr_t)&pause_wchan[MAXCPU - 1]) 175 sleepq_flags = SLEEPQ_PAUSE; 176 else 177 sleepq_flags = SLEEPQ_SLEEP; 178 if (catch) 179 sleepq_flags |= SLEEPQ_INTERRUPTIBLE; 180 181 sleepq_lock(ident); 182 CTR5(KTR_PROC, "sleep: thread %ld (pid %ld, %s) on %s (%p)", 183 td->td_tid, td->td_proc->p_pid, td->td_name, wmesg, ident); 184 185 if (lock == &Giant.lock_object) 186 mtx_assert(&Giant, MA_OWNED); 187 DROP_GIANT(); 188 if (lock != NULL && lock != &Giant.lock_object && 189 !(class->lc_flags & LC_SLEEPABLE)) { 190 WITNESS_SAVE(lock, lock_witness); 191 lock_state = class->lc_unlock(lock); 192 } else 193 /* GCC needs to follow the Yellow Brick Road */ 194 lock_state = -1; 195 196 /* 197 * We put ourselves on the sleep queue and start our timeout 198 * before calling thread_suspend_check, as we could stop there, 199 * and a wakeup or a SIGCONT (or both) could occur while we were 200 * stopped without resuming us. Thus, we must be ready for sleep 201 * when cursig() is called. If the wakeup happens while we're 202 * stopped, then td will no longer be on a sleep queue upon 203 * return from cursig(). 204 */ 205 sleepq_add(ident, lock, wmesg, sleepq_flags, 0); 206 if (sbt != 0) 207 sleepq_set_timeout_sbt(ident, sbt, pr, flags); 208 if (lock != NULL && class->lc_flags & LC_SLEEPABLE) { 209 sleepq_release(ident); 210 WITNESS_SAVE(lock, lock_witness); 211 lock_state = class->lc_unlock(lock); 212 sleepq_lock(ident); 213 } 214 if (sbt != 0 && catch) 215 rval = sleepq_timedwait_sig(ident, pri); 216 else if (sbt != 0) 217 rval = sleepq_timedwait(ident, pri); 218 else if (catch) 219 rval = sleepq_wait_sig(ident, pri); 220 else { 221 sleepq_wait(ident, pri); 222 rval = 0; 223 } 224 #ifdef KTRACE 225 if (KTRPOINT(td, KTR_CSW)) 226 ktrcsw(0, 0, wmesg); 227 #endif 228 PICKUP_GIANT(); 229 if (lock != NULL && lock != &Giant.lock_object && !(priority & PDROP)) { 230 class->lc_lock(lock, lock_state); 231 WITNESS_RESTORE(lock, lock_witness); 232 } 233 return (rval); 234 } 235 236 int 237 msleep_spin_sbt(const void *ident, struct mtx *mtx, const char *wmesg, 238 sbintime_t sbt, sbintime_t pr, int flags) 239 { 240 struct thread *td; 241 int rval; 242 WITNESS_SAVE_DECL(mtx); 243 244 td = curthread; 245 KASSERT(mtx != NULL, ("sleeping without a mutex")); 246 KASSERT(ident != NULL, ("msleep_spin_sbt: NULL ident")); 247 KASSERT(TD_IS_RUNNING(td), ("msleep_spin_sbt: curthread not running")); 248 249 if (SCHEDULER_STOPPED_TD(td)) 250 return (0); 251 252 sleepq_lock(ident); 253 CTR5(KTR_PROC, "msleep_spin: thread %ld (pid %ld, %s) on %s (%p)", 254 td->td_tid, td->td_proc->p_pid, td->td_name, wmesg, ident); 255 256 DROP_GIANT(); 257 mtx_assert(mtx, MA_OWNED | MA_NOTRECURSED); 258 WITNESS_SAVE(&mtx->lock_object, mtx); 259 mtx_unlock_spin(mtx); 260 261 /* 262 * We put ourselves on the sleep queue and start our timeout. 263 */ 264 sleepq_add(ident, &mtx->lock_object, wmesg, SLEEPQ_SLEEP, 0); 265 if (sbt != 0) 266 sleepq_set_timeout_sbt(ident, sbt, pr, flags); 267 268 /* 269 * Can't call ktrace with any spin locks held so it can lock the 270 * ktrace_mtx lock, and WITNESS_WARN considers it an error to hold 271 * any spin lock. Thus, we have to drop the sleepq spin lock while 272 * we handle those requests. This is safe since we have placed our 273 * thread on the sleep queue already. 274 */ 275 #ifdef KTRACE 276 if (KTRPOINT(td, KTR_CSW)) { 277 sleepq_release(ident); 278 ktrcsw(1, 0, wmesg); 279 sleepq_lock(ident); 280 } 281 #endif 282 #ifdef WITNESS 283 sleepq_release(ident); 284 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "Sleeping on \"%s\"", 285 wmesg); 286 sleepq_lock(ident); 287 #endif 288 if (sbt != 0) 289 rval = sleepq_timedwait(ident, 0); 290 else { 291 sleepq_wait(ident, 0); 292 rval = 0; 293 } 294 #ifdef KTRACE 295 if (KTRPOINT(td, KTR_CSW)) 296 ktrcsw(0, 0, wmesg); 297 #endif 298 PICKUP_GIANT(); 299 mtx_lock_spin(mtx); 300 WITNESS_RESTORE(&mtx->lock_object, mtx); 301 return (rval); 302 } 303 304 /* 305 * pause_sbt() delays the calling thread by the given signed binary 306 * time. During cold bootup, pause_sbt() uses the DELAY() function 307 * instead of the _sleep() function to do the waiting. The "sbt" 308 * argument must be greater than or equal to zero. A "sbt" value of 309 * zero is equivalent to a "sbt" value of one tick. 310 */ 311 int 312 pause_sbt(const char *wmesg, sbintime_t sbt, sbintime_t pr, int flags) 313 { 314 KASSERT(sbt >= 0, ("pause_sbt: timeout must be >= 0")); 315 316 /* silently convert invalid timeouts */ 317 if (sbt == 0) 318 sbt = tick_sbt; 319 320 if ((cold && curthread == &thread0) || kdb_active || 321 SCHEDULER_STOPPED()) { 322 /* 323 * We delay one second at a time to avoid overflowing the 324 * system specific DELAY() function(s): 325 */ 326 while (sbt >= SBT_1S) { 327 DELAY(1000000); 328 sbt -= SBT_1S; 329 } 330 /* Do the delay remainder, if any */ 331 sbt = howmany(sbt, SBT_1US); 332 if (sbt > 0) 333 DELAY(sbt); 334 return (EWOULDBLOCK); 335 } 336 return (_sleep(&pause_wchan[curcpu], NULL, 337 (flags & C_CATCH) ? PCATCH : 0, wmesg, sbt, pr, flags)); 338 } 339 340 /* 341 * Make all threads sleeping on the specified identifier runnable. 342 */ 343 void 344 wakeup(const void *ident) 345 { 346 int wakeup_swapper; 347 348 sleepq_lock(ident); 349 wakeup_swapper = sleepq_broadcast(ident, SLEEPQ_SLEEP, 0, 0); 350 sleepq_release(ident); 351 if (wakeup_swapper) { 352 KASSERT(ident != &proc0, 353 ("wakeup and wakeup_swapper and proc0")); 354 kick_proc0(); 355 } 356 } 357 358 /* 359 * Make a thread sleeping on the specified identifier runnable. 360 * May wake more than one thread if a target thread is currently 361 * swapped out. 362 */ 363 void 364 wakeup_one(const void *ident) 365 { 366 int wakeup_swapper; 367 368 sleepq_lock(ident); 369 wakeup_swapper = sleepq_signal(ident, SLEEPQ_SLEEP, 0, 0); 370 sleepq_release(ident); 371 if (wakeup_swapper) 372 kick_proc0(); 373 } 374 375 void 376 wakeup_any(const void *ident) 377 { 378 int wakeup_swapper; 379 380 sleepq_lock(ident); 381 wakeup_swapper = sleepq_signal(ident, SLEEPQ_SLEEP | SLEEPQ_UNFAIR, 382 0, 0); 383 sleepq_release(ident); 384 if (wakeup_swapper) 385 kick_proc0(); 386 } 387 388 /* 389 * Signal sleeping waiters after the counter has reached zero. 390 */ 391 void 392 _blockcount_wakeup(blockcount_t *bc, u_int old) 393 { 394 395 KASSERT(_BLOCKCOUNT_WAITERS(old), 396 ("%s: no waiters on %p", __func__, bc)); 397 398 if (atomic_cmpset_int(&bc->__count, _BLOCKCOUNT_WAITERS_FLAG, 0)) 399 wakeup(bc); 400 } 401 402 /* 403 * Wait for a wakeup. This does not guarantee that the count is still zero on 404 * return and may be subject to transient wakeups. Callers wanting a precise 405 * answer should use blockcount_wait() with an interlock. 406 * 407 * Return 0 if there is no work to wait for, and 1 if we slept waiting for work 408 * to complete. In the latter case the counter value must be re-read. 409 */ 410 int 411 _blockcount_sleep(blockcount_t *bc, struct lock_object *lock, const char *wmesg, 412 int prio) 413 { 414 void *wchan; 415 uintptr_t lock_state; 416 u_int old; 417 int ret; 418 419 KASSERT(lock != &Giant.lock_object, 420 ("%s: cannot use Giant as the interlock", __func__)); 421 422 /* 423 * Synchronize with the fence in blockcount_release(). If we end up 424 * waiting, the sleepqueue lock acquisition will provide the required 425 * side effects. 426 * 427 * If there is no work to wait for, but waiters are present, try to put 428 * ourselves to sleep to avoid jumping ahead. 429 */ 430 if (atomic_load_acq_int(&bc->__count) == 0) { 431 if (lock != NULL && (prio & PDROP) != 0) 432 LOCK_CLASS(lock)->lc_unlock(lock); 433 return (0); 434 } 435 lock_state = 0; 436 wchan = bc; 437 sleepq_lock(wchan); 438 DROP_GIANT(); 439 if (lock != NULL) 440 lock_state = LOCK_CLASS(lock)->lc_unlock(lock); 441 old = blockcount_read(bc); 442 do { 443 if (_BLOCKCOUNT_COUNT(old) == 0) { 444 sleepq_release(wchan); 445 ret = 0; 446 goto out; 447 } 448 if (_BLOCKCOUNT_WAITERS(old)) 449 break; 450 } while (!atomic_fcmpset_int(&bc->__count, &old, 451 old | _BLOCKCOUNT_WAITERS_FLAG)); 452 sleepq_add(wchan, NULL, wmesg, 0, 0); 453 sleepq_wait(wchan, prio); 454 ret = 1; 455 456 out: 457 PICKUP_GIANT(); 458 if (lock != NULL && (prio & PDROP) == 0) 459 LOCK_CLASS(lock)->lc_lock(lock, lock_state); 460 461 return (ret); 462 } 463 464 static void 465 kdb_switch(void) 466 { 467 thread_unlock(curthread); 468 kdb_backtrace(); 469 kdb_reenter(); 470 panic("%s: did not reenter debugger", __func__); 471 } 472 473 /* 474 * The machine independent parts of context switching. 475 * 476 * The thread lock is required on entry and is no longer held on return. 477 */ 478 void 479 mi_switch(int flags) 480 { 481 uint64_t runtime, new_switchtime; 482 struct thread *td; 483 484 td = curthread; /* XXX */ 485 THREAD_LOCK_ASSERT(td, MA_OWNED | MA_NOTRECURSED); 486 KASSERT(!TD_ON_RUNQ(td), ("mi_switch: called by old code")); 487 #ifdef INVARIANTS 488 if (!TD_ON_LOCK(td) && !TD_IS_RUNNING(td)) 489 mtx_assert(&Giant, MA_NOTOWNED); 490 #endif 491 KASSERT(td->td_critnest == 1 || KERNEL_PANICKED(), 492 ("mi_switch: switch in a critical section")); 493 KASSERT((flags & (SW_INVOL | SW_VOL)) != 0, 494 ("mi_switch: switch must be voluntary or involuntary")); 495 496 /* 497 * Don't perform context switches from the debugger. 498 */ 499 if (kdb_active) 500 kdb_switch(); 501 if (SCHEDULER_STOPPED_TD(td)) 502 return; 503 if (flags & SW_VOL) { 504 td->td_ru.ru_nvcsw++; 505 td->td_swvoltick = ticks; 506 } else { 507 td->td_ru.ru_nivcsw++; 508 td->td_swinvoltick = ticks; 509 } 510 #ifdef SCHED_STATS 511 SCHED_STAT_INC(sched_switch_stats[flags & SW_TYPE_MASK]); 512 #endif 513 /* 514 * Compute the amount of time during which the current 515 * thread was running, and add that to its total so far. 516 */ 517 new_switchtime = cpu_ticks(); 518 runtime = new_switchtime - PCPU_GET(switchtime); 519 td->td_runtime += runtime; 520 td->td_incruntime += runtime; 521 PCPU_SET(switchtime, new_switchtime); 522 td->td_generation++; /* bump preempt-detect counter */ 523 VM_CNT_INC(v_swtch); 524 PCPU_SET(switchticks, ticks); 525 CTR4(KTR_PROC, "mi_switch: old thread %ld (td_sched %p, pid %ld, %s)", 526 td->td_tid, td_get_sched(td), td->td_proc->p_pid, td->td_name); 527 #ifdef KDTRACE_HOOKS 528 if (SDT_PROBES_ENABLED() && 529 ((flags & SW_PREEMPT) != 0 || ((flags & SW_INVOL) != 0 && 530 (flags & SW_TYPE_MASK) == SWT_NEEDRESCHED))) 531 SDT_PROBE0(sched, , , preempt); 532 #endif 533 sched_switch(td, flags); 534 CTR4(KTR_PROC, "mi_switch: new thread %ld (td_sched %p, pid %ld, %s)", 535 td->td_tid, td_get_sched(td), td->td_proc->p_pid, td->td_name); 536 537 /* 538 * If the last thread was exiting, finish cleaning it up. 539 */ 540 if ((td = PCPU_GET(deadthread))) { 541 PCPU_SET(deadthread, NULL); 542 thread_stash(td); 543 } 544 spinlock_exit(); 545 } 546 547 /* 548 * Change thread state to be runnable, placing it on the run queue if 549 * it is in memory. If it is swapped out, return true so our caller 550 * will know to awaken the swapper. 551 * 552 * Requires the thread lock on entry, drops on exit. 553 */ 554 int 555 setrunnable(struct thread *td, int srqflags) 556 { 557 int swapin; 558 559 THREAD_LOCK_ASSERT(td, MA_OWNED); 560 KASSERT(td->td_proc->p_state != PRS_ZOMBIE, 561 ("setrunnable: pid %d is a zombie", td->td_proc->p_pid)); 562 563 swapin = 0; 564 switch (td->td_state) { 565 case TDS_RUNNING: 566 case TDS_RUNQ: 567 break; 568 case TDS_CAN_RUN: 569 KASSERT((td->td_flags & TDF_INMEM) != 0, 570 ("setrunnable: td %p not in mem, flags 0x%X inhibit 0x%X", 571 td, td->td_flags, td->td_inhibitors)); 572 /* unlocks thread lock according to flags */ 573 sched_wakeup(td, srqflags); 574 return (0); 575 case TDS_INHIBITED: 576 /* 577 * If we are only inhibited because we are swapped out 578 * arrange to swap in this process. 579 */ 580 if (td->td_inhibitors == TDI_SWAPPED && 581 (td->td_flags & TDF_SWAPINREQ) == 0) { 582 td->td_flags |= TDF_SWAPINREQ; 583 swapin = 1; 584 } 585 break; 586 default: 587 panic("setrunnable: state 0x%x", td->td_state); 588 } 589 if ((srqflags & (SRQ_HOLD | SRQ_HOLDTD)) == 0) 590 thread_unlock(td); 591 592 return (swapin); 593 } 594 595 /* 596 * Compute a tenex style load average of a quantity on 597 * 1, 5 and 15 minute intervals. 598 */ 599 static void 600 loadav(void *arg) 601 { 602 int i, nrun; 603 struct loadavg *avg; 604 605 nrun = sched_load(); 606 avg = &averunnable; 607 608 for (i = 0; i < 3; i++) 609 avg->ldavg[i] = (cexp[i] * avg->ldavg[i] + 610 nrun * FSCALE * (FSCALE - cexp[i])) >> FSHIFT; 611 612 /* 613 * Schedule the next update to occur after 5 seconds, but add a 614 * random variation to avoid synchronisation with processes that 615 * run at regular intervals. 616 */ 617 callout_reset_sbt(&loadav_callout, 618 SBT_1US * (4000000 + (int)(random() % 2000001)), SBT_1US, 619 loadav, NULL, C_DIRECT_EXEC | C_PREL(32)); 620 } 621 622 /* ARGSUSED */ 623 static void 624 synch_setup(void *dummy) 625 { 626 callout_init(&loadav_callout, 1); 627 628 /* Kick off timeout driven events by calling first time. */ 629 loadav(NULL); 630 } 631 632 int 633 should_yield(void) 634 { 635 636 return ((u_int)ticks - (u_int)curthread->td_swvoltick >= hogticks); 637 } 638 639 void 640 maybe_yield(void) 641 { 642 643 if (should_yield()) 644 kern_yield(PRI_USER); 645 } 646 647 void 648 kern_yield(int prio) 649 { 650 struct thread *td; 651 652 td = curthread; 653 DROP_GIANT(); 654 thread_lock(td); 655 if (prio == PRI_USER) 656 prio = td->td_user_pri; 657 if (prio >= 0) 658 sched_prio(td, prio); 659 mi_switch(SW_VOL | SWT_RELINQUISH); 660 PICKUP_GIANT(); 661 } 662 663 /* 664 * General purpose yield system call. 665 */ 666 int 667 sys_yield(struct thread *td, struct yield_args *uap) 668 { 669 670 thread_lock(td); 671 if (PRI_BASE(td->td_pri_class) == PRI_TIMESHARE) 672 sched_prio(td, PRI_MAX_TIMESHARE); 673 mi_switch(SW_VOL | SWT_RELINQUISH); 674 td->td_retval[0] = 0; 675 return (0); 676 } 677