1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1982, 1986, 1990, 1991, 1993 5 * The Regents of the University of California. All rights reserved. 6 * (c) UNIX System Laboratories, Inc. 7 * All or some portions of this file are derived from material licensed 8 * to the University of California by American Telephone and Telegraph 9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 10 * the permission of UNIX System Laboratories, Inc. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * @(#)kern_synch.c 8.9 (Berkeley) 5/19/95 37 */ 38 39 #include <sys/cdefs.h> 40 __FBSDID("$FreeBSD$"); 41 42 #include "opt_ktrace.h" 43 #include "opt_sched.h" 44 45 #include <sys/param.h> 46 #include <sys/systm.h> 47 #include <sys/condvar.h> 48 #include <sys/kdb.h> 49 #include <sys/kernel.h> 50 #include <sys/ktr.h> 51 #include <sys/lock.h> 52 #include <sys/mutex.h> 53 #include <sys/proc.h> 54 #include <sys/resourcevar.h> 55 #include <sys/refcount.h> 56 #include <sys/sched.h> 57 #include <sys/sdt.h> 58 #include <sys/signalvar.h> 59 #include <sys/sleepqueue.h> 60 #include <sys/smp.h> 61 #include <sys/sx.h> 62 #include <sys/sysctl.h> 63 #include <sys/sysproto.h> 64 #include <sys/vmmeter.h> 65 #ifdef KTRACE 66 #include <sys/uio.h> 67 #include <sys/ktrace.h> 68 #endif 69 #ifdef EPOCH_TRACE 70 #include <sys/epoch.h> 71 #endif 72 73 #include <machine/cpu.h> 74 75 static void synch_setup(void *dummy); 76 SYSINIT(synch_setup, SI_SUB_KICK_SCHEDULER, SI_ORDER_FIRST, synch_setup, 77 NULL); 78 79 int hogticks; 80 static uint8_t pause_wchan[MAXCPU]; 81 82 static struct callout loadav_callout; 83 84 struct loadavg averunnable = 85 { {0, 0, 0}, FSCALE }; /* load average, of runnable procs */ 86 /* 87 * Constants for averages over 1, 5, and 15 minutes 88 * when sampling at 5 second intervals. 89 */ 90 static fixpt_t cexp[3] = { 91 0.9200444146293232 * FSCALE, /* exp(-1/12) */ 92 0.9834714538216174 * FSCALE, /* exp(-1/60) */ 93 0.9944598480048967 * FSCALE, /* exp(-1/180) */ 94 }; 95 96 /* kernel uses `FSCALE', userland (SHOULD) use kern.fscale */ 97 SYSCTL_INT(_kern, OID_AUTO, fscale, CTLFLAG_RD, SYSCTL_NULL_INT_PTR, FSCALE, ""); 98 99 static void loadav(void *arg); 100 101 SDT_PROVIDER_DECLARE(sched); 102 SDT_PROBE_DEFINE(sched, , , preempt); 103 104 static void 105 sleepinit(void *unused) 106 { 107 108 hogticks = (hz / 10) * 2; /* Default only. */ 109 init_sleepqueues(); 110 } 111 112 /* 113 * vmem tries to lock the sleepq mutexes when free'ing kva, so make sure 114 * it is available. 115 */ 116 SYSINIT(sleepinit, SI_SUB_KMEM, SI_ORDER_ANY, sleepinit, NULL); 117 118 /* 119 * General sleep call. Suspends the current thread until a wakeup is 120 * performed on the specified identifier. The thread will then be made 121 * runnable with the specified priority. Sleeps at most sbt units of time 122 * (0 means no timeout). If pri includes the PCATCH flag, let signals 123 * interrupt the sleep, otherwise ignore them while sleeping. Returns 0 if 124 * awakened, EWOULDBLOCK if the timeout expires. If PCATCH is set and a 125 * signal becomes pending, ERESTART is returned if the current system 126 * call should be restarted if possible, and EINTR is returned if the system 127 * call should be interrupted by the signal (return EINTR). 128 * 129 * The lock argument is unlocked before the caller is suspended, and 130 * re-locked before _sleep() returns. If priority includes the PDROP 131 * flag the lock is not re-locked before returning. 132 */ 133 int 134 _sleep(void *ident, struct lock_object *lock, int priority, 135 const char *wmesg, sbintime_t sbt, sbintime_t pr, int flags) 136 { 137 struct thread *td; 138 struct lock_class *class; 139 uintptr_t lock_state; 140 int catch, pri, rval, sleepq_flags; 141 WITNESS_SAVE_DECL(lock_witness); 142 143 td = curthread; 144 #ifdef KTRACE 145 if (KTRPOINT(td, KTR_CSW)) 146 ktrcsw(1, 0, wmesg); 147 #endif 148 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, lock, 149 "Sleeping on \"%s\"", wmesg); 150 KASSERT(sbt != 0 || mtx_owned(&Giant) || lock != NULL, 151 ("sleeping without a lock")); 152 KASSERT(ident != NULL, ("_sleep: NULL ident")); 153 KASSERT(TD_IS_RUNNING(td), ("_sleep: curthread not running")); 154 if (priority & PDROP) 155 KASSERT(lock != NULL && lock != &Giant.lock_object, 156 ("PDROP requires a non-Giant lock")); 157 if (lock != NULL) 158 class = LOCK_CLASS(lock); 159 else 160 class = NULL; 161 162 if (SCHEDULER_STOPPED_TD(td)) { 163 if (lock != NULL && priority & PDROP) 164 class->lc_unlock(lock); 165 return (0); 166 } 167 catch = priority & PCATCH; 168 pri = priority & PRIMASK; 169 170 KASSERT(!TD_ON_SLEEPQ(td), ("recursive sleep")); 171 172 if ((uint8_t *)ident >= &pause_wchan[0] && 173 (uint8_t *)ident <= &pause_wchan[MAXCPU - 1]) 174 sleepq_flags = SLEEPQ_PAUSE; 175 else 176 sleepq_flags = SLEEPQ_SLEEP; 177 if (catch) 178 sleepq_flags |= SLEEPQ_INTERRUPTIBLE; 179 180 sleepq_lock(ident); 181 CTR5(KTR_PROC, "sleep: thread %ld (pid %ld, %s) on %s (%p)", 182 td->td_tid, td->td_proc->p_pid, td->td_name, wmesg, ident); 183 184 if (lock == &Giant.lock_object) 185 mtx_assert(&Giant, MA_OWNED); 186 DROP_GIANT(); 187 if (lock != NULL && lock != &Giant.lock_object && 188 !(class->lc_flags & LC_SLEEPABLE)) { 189 WITNESS_SAVE(lock, lock_witness); 190 lock_state = class->lc_unlock(lock); 191 } else 192 /* GCC needs to follow the Yellow Brick Road */ 193 lock_state = -1; 194 195 /* 196 * We put ourselves on the sleep queue and start our timeout 197 * before calling thread_suspend_check, as we could stop there, 198 * and a wakeup or a SIGCONT (or both) could occur while we were 199 * stopped without resuming us. Thus, we must be ready for sleep 200 * when cursig() is called. If the wakeup happens while we're 201 * stopped, then td will no longer be on a sleep queue upon 202 * return from cursig(). 203 */ 204 sleepq_add(ident, lock, wmesg, sleepq_flags, 0); 205 if (sbt != 0) 206 sleepq_set_timeout_sbt(ident, sbt, pr, flags); 207 if (lock != NULL && class->lc_flags & LC_SLEEPABLE) { 208 sleepq_release(ident); 209 WITNESS_SAVE(lock, lock_witness); 210 lock_state = class->lc_unlock(lock); 211 sleepq_lock(ident); 212 } 213 if (sbt != 0 && catch) 214 rval = sleepq_timedwait_sig(ident, pri); 215 else if (sbt != 0) 216 rval = sleepq_timedwait(ident, pri); 217 else if (catch) 218 rval = sleepq_wait_sig(ident, pri); 219 else { 220 sleepq_wait(ident, pri); 221 rval = 0; 222 } 223 #ifdef KTRACE 224 if (KTRPOINT(td, KTR_CSW)) 225 ktrcsw(0, 0, wmesg); 226 #endif 227 PICKUP_GIANT(); 228 if (lock != NULL && lock != &Giant.lock_object && !(priority & PDROP)) { 229 class->lc_lock(lock, lock_state); 230 WITNESS_RESTORE(lock, lock_witness); 231 } 232 return (rval); 233 } 234 235 int 236 msleep_spin_sbt(void *ident, struct mtx *mtx, const char *wmesg, 237 sbintime_t sbt, sbintime_t pr, int flags) 238 { 239 struct thread *td; 240 int rval; 241 WITNESS_SAVE_DECL(mtx); 242 243 td = curthread; 244 KASSERT(mtx != NULL, ("sleeping without a mutex")); 245 KASSERT(ident != NULL, ("msleep_spin_sbt: NULL ident")); 246 KASSERT(TD_IS_RUNNING(td), ("msleep_spin_sbt: curthread not running")); 247 248 if (SCHEDULER_STOPPED_TD(td)) 249 return (0); 250 251 sleepq_lock(ident); 252 CTR5(KTR_PROC, "msleep_spin: thread %ld (pid %ld, %s) on %s (%p)", 253 td->td_tid, td->td_proc->p_pid, td->td_name, wmesg, ident); 254 255 DROP_GIANT(); 256 mtx_assert(mtx, MA_OWNED | MA_NOTRECURSED); 257 WITNESS_SAVE(&mtx->lock_object, mtx); 258 mtx_unlock_spin(mtx); 259 260 /* 261 * We put ourselves on the sleep queue and start our timeout. 262 */ 263 sleepq_add(ident, &mtx->lock_object, wmesg, SLEEPQ_SLEEP, 0); 264 if (sbt != 0) 265 sleepq_set_timeout_sbt(ident, sbt, pr, flags); 266 267 /* 268 * Can't call ktrace with any spin locks held so it can lock the 269 * ktrace_mtx lock, and WITNESS_WARN considers it an error to hold 270 * any spin lock. Thus, we have to drop the sleepq spin lock while 271 * we handle those requests. This is safe since we have placed our 272 * thread on the sleep queue already. 273 */ 274 #ifdef KTRACE 275 if (KTRPOINT(td, KTR_CSW)) { 276 sleepq_release(ident); 277 ktrcsw(1, 0, wmesg); 278 sleepq_lock(ident); 279 } 280 #endif 281 #ifdef WITNESS 282 sleepq_release(ident); 283 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "Sleeping on \"%s\"", 284 wmesg); 285 sleepq_lock(ident); 286 #endif 287 if (sbt != 0) 288 rval = sleepq_timedwait(ident, 0); 289 else { 290 sleepq_wait(ident, 0); 291 rval = 0; 292 } 293 #ifdef KTRACE 294 if (KTRPOINT(td, KTR_CSW)) 295 ktrcsw(0, 0, wmesg); 296 #endif 297 PICKUP_GIANT(); 298 mtx_lock_spin(mtx); 299 WITNESS_RESTORE(&mtx->lock_object, mtx); 300 return (rval); 301 } 302 303 /* 304 * pause_sbt() delays the calling thread by the given signed binary 305 * time. During cold bootup, pause_sbt() uses the DELAY() function 306 * instead of the _sleep() function to do the waiting. The "sbt" 307 * argument must be greater than or equal to zero. A "sbt" value of 308 * zero is equivalent to a "sbt" value of one tick. 309 */ 310 int 311 pause_sbt(const char *wmesg, sbintime_t sbt, sbintime_t pr, int flags) 312 { 313 KASSERT(sbt >= 0, ("pause_sbt: timeout must be >= 0")); 314 315 /* silently convert invalid timeouts */ 316 if (sbt == 0) 317 sbt = tick_sbt; 318 319 if ((cold && curthread == &thread0) || kdb_active || 320 SCHEDULER_STOPPED()) { 321 /* 322 * We delay one second at a time to avoid overflowing the 323 * system specific DELAY() function(s): 324 */ 325 while (sbt >= SBT_1S) { 326 DELAY(1000000); 327 sbt -= SBT_1S; 328 } 329 /* Do the delay remainder, if any */ 330 sbt = howmany(sbt, SBT_1US); 331 if (sbt > 0) 332 DELAY(sbt); 333 return (EWOULDBLOCK); 334 } 335 return (_sleep(&pause_wchan[curcpu], NULL, 336 (flags & C_CATCH) ? PCATCH : 0, wmesg, sbt, pr, flags)); 337 } 338 339 /* 340 * Potentially release the last reference for refcount. Check for 341 * unlikely conditions and signal the caller as to whether it was 342 * the final ref. 343 */ 344 bool 345 refcount_release_last(volatile u_int *count, u_int n, u_int old) 346 { 347 u_int waiter; 348 349 waiter = old & REFCOUNT_WAITER; 350 old = REFCOUNT_COUNT(old); 351 if (__predict_false(n > old || REFCOUNT_SATURATED(old))) { 352 /* 353 * Avoid multiple destructor invocations if underflow occurred. 354 * This is not perfect since the memory backing the containing 355 * object may already have been reallocated. 356 */ 357 _refcount_update_saturated(count); 358 return (false); 359 } 360 361 /* 362 * Attempt to atomically clear the waiter bit. Wakeup waiters 363 * if we are successful. 364 */ 365 if (waiter != 0 && atomic_cmpset_int(count, REFCOUNT_WAITER, 0)) 366 wakeup(__DEVOLATILE(u_int *, count)); 367 368 /* 369 * Last reference. Signal the user to call the destructor. 370 * 371 * Ensure that the destructor sees all updates. The fence_rel 372 * at the start of refcount_releasen synchronizes with this fence. 373 */ 374 atomic_thread_fence_acq(); 375 return (true); 376 } 377 378 /* 379 * Wait for a refcount wakeup. This does not guarantee that the ref is still 380 * zero on return and may be subject to transient wakeups. Callers wanting 381 * a precise answer should use refcount_wait(). 382 */ 383 void 384 refcount_sleep(volatile u_int *count, const char *wmesg, int pri) 385 { 386 void *wchan; 387 u_int old; 388 389 if (REFCOUNT_COUNT(*count) == 0) 390 return; 391 wchan = __DEVOLATILE(void *, count); 392 sleepq_lock(wchan); 393 old = *count; 394 for (;;) { 395 if (REFCOUNT_COUNT(old) == 0) { 396 sleepq_release(wchan); 397 return; 398 } 399 if (old & REFCOUNT_WAITER) 400 break; 401 if (atomic_fcmpset_int(count, &old, old | REFCOUNT_WAITER)) 402 break; 403 } 404 sleepq_add(wchan, NULL, wmesg, 0, 0); 405 sleepq_wait(wchan, pri); 406 } 407 408 /* 409 * Make all threads sleeping on the specified identifier runnable. 410 */ 411 void 412 wakeup(void *ident) 413 { 414 int wakeup_swapper; 415 416 sleepq_lock(ident); 417 wakeup_swapper = sleepq_broadcast(ident, SLEEPQ_SLEEP, 0, 0); 418 sleepq_release(ident); 419 if (wakeup_swapper) { 420 KASSERT(ident != &proc0, 421 ("wakeup and wakeup_swapper and proc0")); 422 kick_proc0(); 423 } 424 } 425 426 /* 427 * Make a thread sleeping on the specified identifier runnable. 428 * May wake more than one thread if a target thread is currently 429 * swapped out. 430 */ 431 void 432 wakeup_one(void *ident) 433 { 434 int wakeup_swapper; 435 436 sleepq_lock(ident); 437 wakeup_swapper = sleepq_signal(ident, SLEEPQ_SLEEP, 0, 0); 438 sleepq_release(ident); 439 if (wakeup_swapper) 440 kick_proc0(); 441 } 442 443 void 444 wakeup_any(void *ident) 445 { 446 int wakeup_swapper; 447 448 sleepq_lock(ident); 449 wakeup_swapper = sleepq_signal(ident, SLEEPQ_SLEEP | SLEEPQ_UNFAIR, 450 0, 0); 451 sleepq_release(ident); 452 if (wakeup_swapper) 453 kick_proc0(); 454 } 455 456 static void 457 kdb_switch(void) 458 { 459 thread_unlock(curthread); 460 kdb_backtrace(); 461 kdb_reenter(); 462 panic("%s: did not reenter debugger", __func__); 463 } 464 465 /* 466 * The machine independent parts of context switching. 467 */ 468 void 469 mi_switch(int flags, struct thread *newtd) 470 { 471 uint64_t runtime, new_switchtime; 472 struct thread *td; 473 474 td = curthread; /* XXX */ 475 THREAD_LOCK_ASSERT(td, MA_OWNED | MA_NOTRECURSED); 476 KASSERT(!TD_ON_RUNQ(td), ("mi_switch: called by old code")); 477 #ifdef INVARIANTS 478 if (!TD_ON_LOCK(td) && !TD_IS_RUNNING(td)) 479 mtx_assert(&Giant, MA_NOTOWNED); 480 #endif 481 KASSERT(td->td_critnest == 1 || panicstr, 482 ("mi_switch: switch in a critical section")); 483 KASSERT((flags & (SW_INVOL | SW_VOL)) != 0, 484 ("mi_switch: switch must be voluntary or involuntary")); 485 KASSERT(newtd != curthread, ("mi_switch: preempting back to ourself")); 486 487 /* 488 * Don't perform context switches from the debugger. 489 */ 490 if (kdb_active) 491 kdb_switch(); 492 if (SCHEDULER_STOPPED_TD(td)) 493 return; 494 if (flags & SW_VOL) { 495 td->td_ru.ru_nvcsw++; 496 td->td_swvoltick = ticks; 497 } else { 498 td->td_ru.ru_nivcsw++; 499 td->td_swinvoltick = ticks; 500 } 501 #ifdef SCHED_STATS 502 SCHED_STAT_INC(sched_switch_stats[flags & SW_TYPE_MASK]); 503 #endif 504 /* 505 * Compute the amount of time during which the current 506 * thread was running, and add that to its total so far. 507 */ 508 new_switchtime = cpu_ticks(); 509 runtime = new_switchtime - PCPU_GET(switchtime); 510 td->td_runtime += runtime; 511 td->td_incruntime += runtime; 512 PCPU_SET(switchtime, new_switchtime); 513 td->td_generation++; /* bump preempt-detect counter */ 514 VM_CNT_INC(v_swtch); 515 PCPU_SET(switchticks, ticks); 516 CTR4(KTR_PROC, "mi_switch: old thread %ld (td_sched %p, pid %ld, %s)", 517 td->td_tid, td_get_sched(td), td->td_proc->p_pid, td->td_name); 518 #ifdef KDTRACE_HOOKS 519 if (SDT_PROBES_ENABLED() && 520 ((flags & SW_PREEMPT) != 0 || ((flags & SW_INVOL) != 0 && 521 (flags & SW_TYPE_MASK) == SWT_NEEDRESCHED))) 522 SDT_PROBE0(sched, , , preempt); 523 #endif 524 sched_switch(td, newtd, flags); 525 CTR4(KTR_PROC, "mi_switch: new thread %ld (td_sched %p, pid %ld, %s)", 526 td->td_tid, td_get_sched(td), td->td_proc->p_pid, td->td_name); 527 528 /* 529 * If the last thread was exiting, finish cleaning it up. 530 */ 531 if ((td = PCPU_GET(deadthread))) { 532 PCPU_SET(deadthread, NULL); 533 thread_stash(td); 534 } 535 } 536 537 /* 538 * Change thread state to be runnable, placing it on the run queue if 539 * it is in memory. If it is swapped out, return true so our caller 540 * will know to awaken the swapper. 541 */ 542 int 543 setrunnable(struct thread *td) 544 { 545 546 THREAD_LOCK_ASSERT(td, MA_OWNED); 547 KASSERT(td->td_proc->p_state != PRS_ZOMBIE, 548 ("setrunnable: pid %d is a zombie", td->td_proc->p_pid)); 549 switch (td->td_state) { 550 case TDS_RUNNING: 551 case TDS_RUNQ: 552 return (0); 553 case TDS_INHIBITED: 554 /* 555 * If we are only inhibited because we are swapped out 556 * then arange to swap in this process. Otherwise just return. 557 */ 558 if (td->td_inhibitors != TDI_SWAPPED) 559 return (0); 560 /* FALLTHROUGH */ 561 case TDS_CAN_RUN: 562 break; 563 default: 564 printf("state is 0x%x", td->td_state); 565 panic("setrunnable(2)"); 566 } 567 if ((td->td_flags & TDF_INMEM) == 0) { 568 if ((td->td_flags & TDF_SWAPINREQ) == 0) { 569 td->td_flags |= TDF_SWAPINREQ; 570 return (1); 571 } 572 } else 573 sched_wakeup(td); 574 return (0); 575 } 576 577 /* 578 * Compute a tenex style load average of a quantity on 579 * 1, 5 and 15 minute intervals. 580 */ 581 static void 582 loadav(void *arg) 583 { 584 int i, nrun; 585 struct loadavg *avg; 586 587 nrun = sched_load(); 588 avg = &averunnable; 589 590 for (i = 0; i < 3; i++) 591 avg->ldavg[i] = (cexp[i] * avg->ldavg[i] + 592 nrun * FSCALE * (FSCALE - cexp[i])) >> FSHIFT; 593 594 /* 595 * Schedule the next update to occur after 5 seconds, but add a 596 * random variation to avoid synchronisation with processes that 597 * run at regular intervals. 598 */ 599 callout_reset_sbt(&loadav_callout, 600 SBT_1US * (4000000 + (int)(random() % 2000001)), SBT_1US, 601 loadav, NULL, C_DIRECT_EXEC | C_PREL(32)); 602 } 603 604 /* ARGSUSED */ 605 static void 606 synch_setup(void *dummy) 607 { 608 callout_init(&loadav_callout, 1); 609 610 /* Kick off timeout driven events by calling first time. */ 611 loadav(NULL); 612 } 613 614 int 615 should_yield(void) 616 { 617 618 return ((u_int)ticks - (u_int)curthread->td_swvoltick >= hogticks); 619 } 620 621 void 622 maybe_yield(void) 623 { 624 625 if (should_yield()) 626 kern_yield(PRI_USER); 627 } 628 629 void 630 kern_yield(int prio) 631 { 632 struct thread *td; 633 634 td = curthread; 635 DROP_GIANT(); 636 thread_lock(td); 637 if (prio == PRI_USER) 638 prio = td->td_user_pri; 639 if (prio >= 0) 640 sched_prio(td, prio); 641 mi_switch(SW_VOL | SWT_RELINQUISH, NULL); 642 thread_unlock(td); 643 PICKUP_GIANT(); 644 } 645 646 /* 647 * General purpose yield system call. 648 */ 649 int 650 sys_yield(struct thread *td, struct yield_args *uap) 651 { 652 653 thread_lock(td); 654 if (PRI_BASE(td->td_pri_class) == PRI_TIMESHARE) 655 sched_prio(td, PRI_MAX_TIMESHARE); 656 mi_switch(SW_VOL | SWT_RELINQUISH, NULL); 657 thread_unlock(td); 658 td->td_retval[0] = 0; 659 return (0); 660 } 661