1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #include <sys/thread.h> 28 #include <sys/proc.h> 29 #include <sys/debug.h> 30 #include <sys/cmn_err.h> 31 #include <sys/systm.h> 32 #include <sys/sobject.h> 33 #include <sys/sleepq.h> 34 #include <sys/cpuvar.h> 35 #include <sys/condvar.h> 36 #include <sys/condvar_impl.h> 37 #include <sys/schedctl.h> 38 #include <sys/procfs.h> 39 #include <sys/sdt.h> 40 #include <sys/callo.h> 41 42 /* 43 * CV_MAX_WAITERS is the maximum number of waiters we track; once 44 * the number becomes higher than that, we look at the sleepq to 45 * see whether there are *really* any waiters. 46 */ 47 #define CV_MAX_WAITERS 1024 /* must be power of 2 */ 48 #define CV_WAITERS_MASK (CV_MAX_WAITERS - 1) 49 50 /* 51 * Threads don't "own" condition variables. 52 */ 53 /* ARGSUSED */ 54 static kthread_t * 55 cv_owner(void *cvp) 56 { 57 return (NULL); 58 } 59 60 /* 61 * Unsleep a thread that's blocked on a condition variable. 62 */ 63 static void 64 cv_unsleep(kthread_t *t) 65 { 66 condvar_impl_t *cvp = (condvar_impl_t *)t->t_wchan; 67 sleepq_head_t *sqh = SQHASH(cvp); 68 69 ASSERT(THREAD_LOCK_HELD(t)); 70 71 if (cvp == NULL) 72 panic("cv_unsleep: thread %p not on sleepq %p", 73 (void *)t, (void *)sqh); 74 DTRACE_SCHED1(wakeup, kthread_t *, t); 75 sleepq_unsleep(t); 76 if (cvp->cv_waiters != CV_MAX_WAITERS) 77 cvp->cv_waiters--; 78 disp_lock_exit_high(&sqh->sq_lock); 79 CL_SETRUN(t); 80 } 81 82 /* 83 * Change the priority of a thread that's blocked on a condition variable. 84 */ 85 static void 86 cv_change_pri(kthread_t *t, pri_t pri, pri_t *t_prip) 87 { 88 condvar_impl_t *cvp = (condvar_impl_t *)t->t_wchan; 89 sleepq_t *sqp = t->t_sleepq; 90 91 ASSERT(THREAD_LOCK_HELD(t)); 92 ASSERT(&SQHASH(cvp)->sq_queue == sqp); 93 94 if (cvp == NULL) 95 panic("cv_change_pri: %p not on sleep queue", (void *)t); 96 sleepq_dequeue(t); 97 *t_prip = pri; 98 sleepq_insert(sqp, t); 99 } 100 101 /* 102 * The sobj_ops vector exports a set of functions needed when a thread 103 * is asleep on a synchronization object of this type. 104 */ 105 static sobj_ops_t cv_sobj_ops = { 106 SOBJ_CV, cv_owner, cv_unsleep, cv_change_pri 107 }; 108 109 /* ARGSUSED */ 110 void 111 cv_init(kcondvar_t *cvp, char *name, kcv_type_t type, void *arg) 112 { 113 ((condvar_impl_t *)cvp)->cv_waiters = 0; 114 } 115 116 /* 117 * cv_destroy is not currently needed, but is part of the DDI. 118 * This is in case cv_init ever needs to allocate something for a cv. 119 */ 120 /* ARGSUSED */ 121 void 122 cv_destroy(kcondvar_t *cvp) 123 { 124 ASSERT((((condvar_impl_t *)cvp)->cv_waiters & CV_WAITERS_MASK) == 0); 125 } 126 127 /* 128 * The cv_block() function blocks a thread on a condition variable 129 * by putting it in a hashed sleep queue associated with the 130 * synchronization object. 131 * 132 * Threads are taken off the hashed sleep queues via calls to 133 * cv_signal(), cv_broadcast(), or cv_unsleep(). 134 */ 135 static void 136 cv_block(condvar_impl_t *cvp) 137 { 138 kthread_t *t = curthread; 139 klwp_t *lwp = ttolwp(t); 140 sleepq_head_t *sqh; 141 142 ASSERT(THREAD_LOCK_HELD(t)); 143 ASSERT(t != CPU->cpu_idle_thread); 144 ASSERT(CPU_ON_INTR(CPU) == 0); 145 ASSERT(t->t_wchan0 == NULL && t->t_wchan == NULL); 146 ASSERT(t->t_state == TS_ONPROC); 147 148 t->t_schedflag &= ~TS_SIGNALLED; 149 CL_SLEEP(t); /* assign kernel priority */ 150 t->t_wchan = (caddr_t)cvp; 151 t->t_sobj_ops = &cv_sobj_ops; 152 DTRACE_SCHED(sleep); 153 154 /* 155 * The check for t_intr is to avoid doing the 156 * account for an interrupt thread on the still-pinned 157 * lwp's statistics. 158 */ 159 if (lwp != NULL && t->t_intr == NULL) { 160 lwp->lwp_ru.nvcsw++; 161 (void) new_mstate(t, LMS_SLEEP); 162 } 163 164 sqh = SQHASH(cvp); 165 disp_lock_enter_high(&sqh->sq_lock); 166 if (cvp->cv_waiters < CV_MAX_WAITERS) 167 cvp->cv_waiters++; 168 ASSERT(cvp->cv_waiters <= CV_MAX_WAITERS); 169 THREAD_SLEEP(t, &sqh->sq_lock); 170 sleepq_insert(&sqh->sq_queue, t); 171 /* 172 * THREAD_SLEEP() moves curthread->t_lockp to point to the 173 * lock sqh->sq_lock. This lock is later released by the caller 174 * when it calls thread_unlock() on curthread. 175 */ 176 } 177 178 #define cv_block_sig(t, cvp) \ 179 { (t)->t_flag |= T_WAKEABLE; cv_block(cvp); } 180 181 /* 182 * Block on the indicated condition variable and release the 183 * associated kmutex while blocked. 184 */ 185 void 186 cv_wait(kcondvar_t *cvp, kmutex_t *mp) 187 { 188 if (panicstr) 189 return; 190 191 ASSERT(curthread->t_schedflag & TS_DONT_SWAP); 192 thread_lock(curthread); /* lock the thread */ 193 cv_block((condvar_impl_t *)cvp); 194 thread_unlock_nopreempt(curthread); /* unlock the waiters field */ 195 mutex_exit(mp); 196 swtch(); 197 mutex_enter(mp); 198 } 199 200 static void 201 cv_wakeup(void *arg) 202 { 203 kthread_t *t = arg; 204 205 /* 206 * This mutex is acquired and released in order to make sure that 207 * the wakeup does not happen before the block itself happens. 208 */ 209 mutex_enter(t->t_wait_mp); 210 mutex_exit(t->t_wait_mp); 211 setrun(t); 212 t->t_wait_mp = NULL; 213 } 214 215 /* 216 * Same as cv_wait except the thread will unblock at 'tim' 217 * (an absolute time) if it hasn't already unblocked. 218 * 219 * Returns the amount of time left from the original 'tim' value 220 * when it was unblocked. 221 */ 222 clock_t 223 cv_timedwait(kcondvar_t *cvp, kmutex_t *mp, clock_t tim) 224 { 225 kthread_t *t = curthread; 226 callout_id_t id; 227 clock_t timeleft; 228 int signalled; 229 230 if (panicstr) 231 return (-1); 232 233 timeleft = tim - lbolt; 234 if (timeleft <= 0) 235 return (-1); 236 t->t_wait_mp = mp; 237 id = realtime_timeout_default((void (*)(void *))cv_wakeup, t, timeleft); 238 thread_lock(t); /* lock the thread */ 239 cv_block((condvar_impl_t *)cvp); 240 thread_unlock_nopreempt(t); 241 mutex_exit(mp); 242 swtch(); 243 signalled = (t->t_schedflag & TS_SIGNALLED); 244 /* 245 * Get the time left. untimeout() returns -1 if the timeout has 246 * occured or the time remaining. If the time remaining is zero, 247 * the timeout has occured between when we were awoken and 248 * we called untimeout. We will treat this as if the timeout 249 * has occured and set timeleft to -1. 250 */ 251 timeleft = (t->t_wait_mp == NULL) ? -1 : untimeout_default(id, 0); 252 mutex_enter(mp); 253 if (timeleft <= 0) { 254 timeleft = -1; 255 if (signalled) /* avoid consuming the cv_signal() */ 256 cv_signal(cvp); 257 } 258 return (timeleft); 259 } 260 261 int 262 cv_wait_sig(kcondvar_t *cvp, kmutex_t *mp) 263 { 264 kthread_t *t = curthread; 265 proc_t *p = ttoproc(t); 266 klwp_t *lwp = ttolwp(t); 267 int cancel_pending; 268 int rval = 1; 269 int signalled = 0; 270 271 if (panicstr) 272 return (rval); 273 274 /* 275 * The check for t_intr is to catch an interrupt thread 276 * that has not yet unpinned the thread underneath. 277 */ 278 if (lwp == NULL || t->t_intr) { 279 cv_wait(cvp, mp); 280 return (rval); 281 } 282 283 ASSERT(curthread->t_schedflag & TS_DONT_SWAP); 284 cancel_pending = schedctl_cancel_pending(); 285 lwp->lwp_asleep = 1; 286 lwp->lwp_sysabort = 0; 287 thread_lock(t); 288 cv_block_sig(t, (condvar_impl_t *)cvp); 289 thread_unlock_nopreempt(t); 290 mutex_exit(mp); 291 if (ISSIG(t, JUSTLOOKING) || MUSTRETURN(p, t) || cancel_pending) 292 setrun(t); 293 /* ASSERT(no locks are held) */ 294 swtch(); 295 signalled = (t->t_schedflag & TS_SIGNALLED); 296 t->t_flag &= ~T_WAKEABLE; 297 mutex_enter(mp); 298 if (ISSIG_PENDING(t, lwp, p)) { 299 mutex_exit(mp); 300 if (issig(FORREAL)) 301 rval = 0; 302 mutex_enter(mp); 303 } 304 if (lwp->lwp_sysabort || MUSTRETURN(p, t)) 305 rval = 0; 306 if (rval != 0 && cancel_pending) { 307 schedctl_cancel_eintr(); 308 rval = 0; 309 } 310 lwp->lwp_asleep = 0; 311 lwp->lwp_sysabort = 0; 312 if (rval == 0 && signalled) /* avoid consuming the cv_signal() */ 313 cv_signal(cvp); 314 return (rval); 315 } 316 317 static clock_t 318 cv_timedwait_sig_internal(kcondvar_t *cvp, kmutex_t *mp, clock_t tim, int flag) 319 { 320 kthread_t *t = curthread; 321 proc_t *p = ttoproc(t); 322 klwp_t *lwp = ttolwp(t); 323 int cancel_pending = 0; 324 callout_id_t id; 325 clock_t rval = 1; 326 clock_t timeleft; 327 int signalled = 0; 328 329 /* 330 * If the flag is 0, then realtime_timeout() below creates a 331 * regular realtime timeout. If the flag is CALLOUT_FLAG_HRESTIME, 332 * then, it creates a special realtime timeout which is affected by 333 * changes to hrestime. See callo.h for details. 334 */ 335 ASSERT((flag == 0) || (flag == CALLOUT_FLAG_HRESTIME)); 336 if (panicstr) 337 return (rval); 338 339 /* 340 * If there is no lwp, then we don't need to wait for a signal. 341 * The check for t_intr is to catch an interrupt thread 342 * that has not yet unpinned the thread underneath. 343 */ 344 if (lwp == NULL || t->t_intr) 345 return (cv_timedwait(cvp, mp, tim)); 346 347 /* 348 * If tim is less than or equal to lbolt, then the timeout 349 * has already occured. So just check to see if there is a signal 350 * pending. If so return 0 indicating that there is a signal pending. 351 * Else return -1 indicating that the timeout occured. No need to 352 * wait on anything. 353 */ 354 timeleft = tim - lbolt; 355 if (timeleft <= 0) { 356 lwp->lwp_asleep = 1; 357 lwp->lwp_sysabort = 0; 358 rval = -1; 359 goto out; 360 } 361 362 /* 363 * Set the timeout and wait. 364 */ 365 cancel_pending = schedctl_cancel_pending(); 366 t->t_wait_mp = mp; 367 id = timeout_generic(CALLOUT_REALTIME, (void (*)(void *))cv_wakeup, t, 368 TICK_TO_NSEC(timeleft), nsec_per_tick, flag); 369 lwp->lwp_asleep = 1; 370 lwp->lwp_sysabort = 0; 371 thread_lock(t); 372 cv_block_sig(t, (condvar_impl_t *)cvp); 373 thread_unlock_nopreempt(t); 374 mutex_exit(mp); 375 if (ISSIG(t, JUSTLOOKING) || MUSTRETURN(p, t) || cancel_pending) 376 setrun(t); 377 /* ASSERT(no locks are held) */ 378 swtch(); 379 signalled = (t->t_schedflag & TS_SIGNALLED); 380 t->t_flag &= ~T_WAKEABLE; 381 382 /* 383 * Untimeout the thread. untimeout() returns -1 if the timeout has 384 * occured or the time remaining. If the time remaining is zero, 385 * the timeout has occured between when we were awoken and 386 * we called untimeout. We will treat this as if the timeout 387 * has occured and set rval to -1. 388 */ 389 rval = (t->t_wait_mp == NULL) ? -1 : untimeout_default(id, 0); 390 mutex_enter(mp); 391 if (rval <= 0) 392 rval = -1; 393 394 /* 395 * Check to see if a signal is pending. If so, regardless of whether 396 * or not we were awoken due to the signal, the signal is now pending 397 * and a return of 0 has the highest priority. 398 */ 399 out: 400 if (ISSIG_PENDING(t, lwp, p)) { 401 mutex_exit(mp); 402 if (issig(FORREAL)) 403 rval = 0; 404 mutex_enter(mp); 405 } 406 if (lwp->lwp_sysabort || MUSTRETURN(p, t)) 407 rval = 0; 408 if (rval != 0 && cancel_pending) { 409 schedctl_cancel_eintr(); 410 rval = 0; 411 } 412 lwp->lwp_asleep = 0; 413 lwp->lwp_sysabort = 0; 414 if (rval <= 0 && signalled) /* avoid consuming the cv_signal() */ 415 cv_signal(cvp); 416 return (rval); 417 } 418 419 /* 420 * Returns: 421 * Function result in order of precedence: 422 * 0 if a signal was received 423 * -1 if timeout occured 424 * >0 if awakened via cv_signal() or cv_broadcast(). 425 * (returns time remaining) 426 * 427 * cv_timedwait_sig() is now part of the DDI. 428 * 429 * This function is now just a wrapper for cv_timedwait_sig_internal(). 430 */ 431 clock_t 432 cv_timedwait_sig(kcondvar_t *cvp, kmutex_t *mp, clock_t tim) 433 { 434 return (cv_timedwait_sig_internal(cvp, mp, tim, 0)); 435 } 436 437 /* 438 * Like cv_wait_sig_swap but allows the caller to indicate (with a 439 * non-NULL sigret) that they will take care of signalling the cv 440 * after wakeup, if necessary. This is a vile hack that should only 441 * be used when no other option is available; almost all callers 442 * should just use cv_wait_sig_swap (which takes care of the cv_signal 443 * stuff automatically) instead. 444 */ 445 int 446 cv_wait_sig_swap_core(kcondvar_t *cvp, kmutex_t *mp, int *sigret) 447 { 448 kthread_t *t = curthread; 449 proc_t *p = ttoproc(t); 450 klwp_t *lwp = ttolwp(t); 451 int cancel_pending; 452 int rval = 1; 453 int signalled = 0; 454 455 if (panicstr) 456 return (rval); 457 458 /* 459 * The check for t_intr is to catch an interrupt thread 460 * that has not yet unpinned the thread underneath. 461 */ 462 if (lwp == NULL || t->t_intr) { 463 cv_wait(cvp, mp); 464 return (rval); 465 } 466 467 cancel_pending = schedctl_cancel_pending(); 468 lwp->lwp_asleep = 1; 469 lwp->lwp_sysabort = 0; 470 thread_lock(t); 471 t->t_kpri_req = 0; /* don't need kernel priority */ 472 cv_block_sig(t, (condvar_impl_t *)cvp); 473 /* I can be swapped now */ 474 curthread->t_schedflag &= ~TS_DONT_SWAP; 475 thread_unlock_nopreempt(t); 476 mutex_exit(mp); 477 if (ISSIG(t, JUSTLOOKING) || MUSTRETURN(p, t) || cancel_pending) 478 setrun(t); 479 /* ASSERT(no locks are held) */ 480 swtch(); 481 signalled = (t->t_schedflag & TS_SIGNALLED); 482 t->t_flag &= ~T_WAKEABLE; 483 /* TS_DONT_SWAP set by disp() */ 484 ASSERT(curthread->t_schedflag & TS_DONT_SWAP); 485 mutex_enter(mp); 486 if (ISSIG_PENDING(t, lwp, p)) { 487 mutex_exit(mp); 488 if (issig(FORREAL)) 489 rval = 0; 490 mutex_enter(mp); 491 } 492 if (lwp->lwp_sysabort || MUSTRETURN(p, t)) 493 rval = 0; 494 if (rval != 0 && cancel_pending) { 495 schedctl_cancel_eintr(); 496 rval = 0; 497 } 498 lwp->lwp_asleep = 0; 499 lwp->lwp_sysabort = 0; 500 if (rval == 0) { 501 if (sigret != NULL) 502 *sigret = signalled; /* just tell the caller */ 503 else if (signalled) 504 cv_signal(cvp); /* avoid consuming the cv_signal() */ 505 } 506 return (rval); 507 } 508 509 /* 510 * Same as cv_wait_sig but the thread can be swapped out while waiting. 511 * This should only be used when we know we aren't holding any locks. 512 */ 513 int 514 cv_wait_sig_swap(kcondvar_t *cvp, kmutex_t *mp) 515 { 516 return (cv_wait_sig_swap_core(cvp, mp, NULL)); 517 } 518 519 void 520 cv_signal(kcondvar_t *cvp) 521 { 522 condvar_impl_t *cp = (condvar_impl_t *)cvp; 523 524 /* make sure the cv_waiters field looks sane */ 525 ASSERT(cp->cv_waiters <= CV_MAX_WAITERS); 526 if (cp->cv_waiters > 0) { 527 sleepq_head_t *sqh = SQHASH(cp); 528 disp_lock_enter(&sqh->sq_lock); 529 ASSERT(CPU_ON_INTR(CPU) == 0); 530 if (cp->cv_waiters & CV_WAITERS_MASK) { 531 kthread_t *t; 532 cp->cv_waiters--; 533 t = sleepq_wakeone_chan(&sqh->sq_queue, cp); 534 /* 535 * If cv_waiters is non-zero (and less than 536 * CV_MAX_WAITERS) there should be a thread 537 * in the queue. 538 */ 539 ASSERT(t != NULL); 540 } else if (sleepq_wakeone_chan(&sqh->sq_queue, cp) == NULL) { 541 cp->cv_waiters = 0; 542 } 543 disp_lock_exit(&sqh->sq_lock); 544 } 545 } 546 547 void 548 cv_broadcast(kcondvar_t *cvp) 549 { 550 condvar_impl_t *cp = (condvar_impl_t *)cvp; 551 552 /* make sure the cv_waiters field looks sane */ 553 ASSERT(cp->cv_waiters <= CV_MAX_WAITERS); 554 if (cp->cv_waiters > 0) { 555 sleepq_head_t *sqh = SQHASH(cp); 556 disp_lock_enter(&sqh->sq_lock); 557 ASSERT(CPU_ON_INTR(CPU) == 0); 558 sleepq_wakeall_chan(&sqh->sq_queue, cp); 559 cp->cv_waiters = 0; 560 disp_lock_exit(&sqh->sq_lock); 561 } 562 } 563 564 /* 565 * Same as cv_wait(), but wakes up (after wakeup_time milliseconds) to check 566 * for requests to stop, like cv_wait_sig() but without dealing with signals. 567 * This is a horrible kludge. It is evil. It is vile. It is swill. 568 * If your code has to call this function then your code is the same. 569 */ 570 void 571 cv_wait_stop(kcondvar_t *cvp, kmutex_t *mp, int wakeup_time) 572 { 573 kthread_t *t = curthread; 574 klwp_t *lwp = ttolwp(t); 575 proc_t *p = ttoproc(t); 576 callout_id_t id; 577 clock_t tim; 578 579 if (panicstr) 580 return; 581 582 /* 583 * If there is no lwp, then we don't need to eventually stop it 584 * The check for t_intr is to catch an interrupt thread 585 * that has not yet unpinned the thread underneath. 586 */ 587 if (lwp == NULL || t->t_intr) { 588 cv_wait(cvp, mp); 589 return; 590 } 591 592 /* 593 * Wakeup in wakeup_time milliseconds, i.e., human time. 594 */ 595 tim = lbolt + MSEC_TO_TICK(wakeup_time); 596 t->t_wait_mp = mp; 597 id = realtime_timeout_default((void (*)(void *))cv_wakeup, t, 598 tim - lbolt); 599 thread_lock(t); /* lock the thread */ 600 cv_block((condvar_impl_t *)cvp); 601 thread_unlock_nopreempt(t); 602 mutex_exit(mp); 603 /* ASSERT(no locks are held); */ 604 swtch(); 605 if (t->t_wait_mp != NULL) 606 (void) untimeout_default(id, 0); 607 608 /* 609 * Check for reasons to stop, if lwp_nostop is not true. 610 * See issig_forreal() for explanations of the various stops. 611 */ 612 mutex_enter(&p->p_lock); 613 while (lwp->lwp_nostop == 0 && !(p->p_flag & SEXITLWPS)) { 614 /* 615 * Hold the lwp here for watchpoint manipulation. 616 */ 617 if (t->t_proc_flag & TP_PAUSE) { 618 stop(PR_SUSPENDED, SUSPEND_PAUSE); 619 continue; 620 } 621 /* 622 * System checkpoint. 623 */ 624 if (t->t_proc_flag & TP_CHKPT) { 625 stop(PR_CHECKPOINT, 0); 626 continue; 627 } 628 /* 629 * Honor fork1(), watchpoint activity (remapping a page), 630 * and lwp_suspend() requests. 631 */ 632 if ((p->p_flag & (SHOLDFORK1|SHOLDWATCH)) || 633 (t->t_proc_flag & TP_HOLDLWP)) { 634 stop(PR_SUSPENDED, SUSPEND_NORMAL); 635 continue; 636 } 637 /* 638 * Honor /proc requested stop. 639 */ 640 if (t->t_proc_flag & TP_PRSTOP) { 641 stop(PR_REQUESTED, 0); 642 } 643 /* 644 * If some lwp in the process has already stopped 645 * showing PR_JOBCONTROL, stop in sympathy with it. 646 */ 647 if (p->p_stopsig && t != p->p_agenttp) { 648 stop(PR_JOBCONTROL, p->p_stopsig); 649 continue; 650 } 651 break; 652 } 653 mutex_exit(&p->p_lock); 654 mutex_enter(mp); 655 } 656 657 /* 658 * Like cv_timedwait_sig(), but takes an absolute hires future time 659 * rather than a future time in clock ticks. Will not return showing 660 * that a timeout occurred until the future time is passed. 661 * If 'when' is a NULL pointer, no timeout will occur. 662 * Returns: 663 * Function result in order of precedence: 664 * 0 if a signal was received 665 * -1 if timeout occured 666 * >0 if awakened via cv_signal() or cv_broadcast() 667 * or by a spurious wakeup. 668 * (might return time remaining) 669 * As a special test, if someone abruptly resets the system time 670 * (but not through adjtime(2); drifting of the clock is allowed and 671 * expected [see timespectohz_adj()]), then we force a return of -1 672 * so the caller can return a premature timeout to the calling process 673 * so it can reevaluate the situation in light of the new system time. 674 * (The system clock has been reset if timecheck != timechanged.) 675 */ 676 int 677 cv_waituntil_sig(kcondvar_t *cvp, kmutex_t *mp, 678 timestruc_t *when, int timecheck) 679 { 680 timestruc_t now; 681 timestruc_t delta; 682 int rval; 683 684 if (when == NULL) 685 return (cv_wait_sig_swap(cvp, mp)); 686 687 gethrestime(&now); 688 delta = *when; 689 timespecsub(&delta, &now); 690 if (delta.tv_sec < 0 || (delta.tv_sec == 0 && delta.tv_nsec == 0)) { 691 /* 692 * We have already reached the absolute future time. 693 * Call cv_timedwait_sig() just to check for signals. 694 * We will return immediately with either 0 or -1. 695 */ 696 rval = cv_timedwait_sig(cvp, mp, lbolt); 697 } else { 698 gethrestime_lasttick(&now); 699 if (timecheck == timechanged) { 700 rval = cv_timedwait_sig_internal(cvp, mp, 701 lbolt + timespectohz(when, now), 702 CALLOUT_FLAG_HRESTIME); 703 704 } else { 705 /* 706 * Someone reset the system time; 707 * just force an immediate timeout. 708 */ 709 rval = -1; 710 } 711 if (rval == -1 && timecheck == timechanged) { 712 /* 713 * Even though cv_timedwait_sig() returned showing a 714 * timeout, the future time may not have passed yet. 715 * If not, change rval to indicate a normal wakeup. 716 */ 717 gethrestime(&now); 718 delta = *when; 719 timespecsub(&delta, &now); 720 if (delta.tv_sec > 0 || (delta.tv_sec == 0 && 721 delta.tv_nsec > 0)) 722 rval = 1; 723 } 724 } 725 return (rval); 726 } 727