1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #include <sys/thread.h> 28 #include <sys/proc.h> 29 #include <sys/debug.h> 30 #include <sys/cmn_err.h> 31 #include <sys/systm.h> 32 #include <sys/sobject.h> 33 #include <sys/sleepq.h> 34 #include <sys/cpuvar.h> 35 #include <sys/condvar.h> 36 #include <sys/condvar_impl.h> 37 #include <sys/schedctl.h> 38 #include <sys/procfs.h> 39 #include <sys/sdt.h> 40 #include <sys/callo.h> 41 42 /* 43 * CV_MAX_WAITERS is the maximum number of waiters we track; once 44 * the number becomes higher than that, we look at the sleepq to 45 * see whether there are *really* any waiters. 46 */ 47 #define CV_MAX_WAITERS 1024 /* must be power of 2 */ 48 #define CV_WAITERS_MASK (CV_MAX_WAITERS - 1) 49 50 /* 51 * Threads don't "own" condition variables. 52 */ 53 /* ARGSUSED */ 54 static kthread_t * 55 cv_owner(void *cvp) 56 { 57 return (NULL); 58 } 59 60 /* 61 * Unsleep a thread that's blocked on a condition variable. 62 */ 63 static void 64 cv_unsleep(kthread_t *t) 65 { 66 condvar_impl_t *cvp = (condvar_impl_t *)t->t_wchan; 67 sleepq_head_t *sqh = SQHASH(cvp); 68 69 ASSERT(THREAD_LOCK_HELD(t)); 70 71 if (cvp == NULL) 72 panic("cv_unsleep: thread %p not on sleepq %p", 73 (void *)t, (void *)sqh); 74 DTRACE_SCHED1(wakeup, kthread_t *, t); 75 sleepq_unsleep(t); 76 if (cvp->cv_waiters != CV_MAX_WAITERS) 77 cvp->cv_waiters--; 78 disp_lock_exit_high(&sqh->sq_lock); 79 CL_SETRUN(t); 80 } 81 82 /* 83 * Change the priority of a thread that's blocked on a condition variable. 84 */ 85 static void 86 cv_change_pri(kthread_t *t, pri_t pri, pri_t *t_prip) 87 { 88 condvar_impl_t *cvp = (condvar_impl_t *)t->t_wchan; 89 sleepq_t *sqp = t->t_sleepq; 90 91 ASSERT(THREAD_LOCK_HELD(t)); 92 ASSERT(&SQHASH(cvp)->sq_queue == sqp); 93 94 if (cvp == NULL) 95 panic("cv_change_pri: %p not on sleep queue", (void *)t); 96 sleepq_dequeue(t); 97 *t_prip = pri; 98 sleepq_insert(sqp, t); 99 } 100 101 /* 102 * The sobj_ops vector exports a set of functions needed when a thread 103 * is asleep on a synchronization object of this type. 104 */ 105 static sobj_ops_t cv_sobj_ops = { 106 SOBJ_CV, cv_owner, cv_unsleep, cv_change_pri 107 }; 108 109 /* ARGSUSED */ 110 void 111 cv_init(kcondvar_t *cvp, char *name, kcv_type_t type, void *arg) 112 { 113 ((condvar_impl_t *)cvp)->cv_waiters = 0; 114 } 115 116 /* 117 * cv_destroy is not currently needed, but is part of the DDI. 118 * This is in case cv_init ever needs to allocate something for a cv. 119 */ 120 /* ARGSUSED */ 121 void 122 cv_destroy(kcondvar_t *cvp) 123 { 124 ASSERT((((condvar_impl_t *)cvp)->cv_waiters & CV_WAITERS_MASK) == 0); 125 } 126 127 /* 128 * The cv_block() function blocks a thread on a condition variable 129 * by putting it in a hashed sleep queue associated with the 130 * synchronization object. 131 * 132 * Threads are taken off the hashed sleep queues via calls to 133 * cv_signal(), cv_broadcast(), or cv_unsleep(). 134 */ 135 static void 136 cv_block(condvar_impl_t *cvp) 137 { 138 kthread_t *t = curthread; 139 klwp_t *lwp = ttolwp(t); 140 sleepq_head_t *sqh; 141 142 ASSERT(THREAD_LOCK_HELD(t)); 143 ASSERT(t != CPU->cpu_idle_thread); 144 ASSERT(CPU_ON_INTR(CPU) == 0); 145 ASSERT(t->t_wchan0 == NULL && t->t_wchan == NULL); 146 ASSERT(t->t_state == TS_ONPROC); 147 148 t->t_schedflag &= ~TS_SIGNALLED; 149 CL_SLEEP(t); /* assign kernel priority */ 150 t->t_wchan = (caddr_t)cvp; 151 t->t_sobj_ops = &cv_sobj_ops; 152 DTRACE_SCHED(sleep); 153 154 /* 155 * The check for t_intr is to avoid doing the 156 * account for an interrupt thread on the still-pinned 157 * lwp's statistics. 158 */ 159 if (lwp != NULL && t->t_intr == NULL) { 160 lwp->lwp_ru.nvcsw++; 161 (void) new_mstate(t, LMS_SLEEP); 162 } 163 164 sqh = SQHASH(cvp); 165 disp_lock_enter_high(&sqh->sq_lock); 166 if (cvp->cv_waiters < CV_MAX_WAITERS) 167 cvp->cv_waiters++; 168 ASSERT(cvp->cv_waiters <= CV_MAX_WAITERS); 169 THREAD_SLEEP(t, &sqh->sq_lock); 170 sleepq_insert(&sqh->sq_queue, t); 171 /* 172 * THREAD_SLEEP() moves curthread->t_lockp to point to the 173 * lock sqh->sq_lock. This lock is later released by the caller 174 * when it calls thread_unlock() on curthread. 175 */ 176 } 177 178 #define cv_block_sig(t, cvp) \ 179 { (t)->t_flag |= T_WAKEABLE; cv_block(cvp); } 180 181 /* 182 * Block on the indicated condition variable and release the 183 * associated kmutex while blocked. 184 */ 185 void 186 cv_wait(kcondvar_t *cvp, kmutex_t *mp) 187 { 188 if (panicstr) 189 return; 190 191 ASSERT(curthread->t_schedflag & TS_DONT_SWAP); 192 thread_lock(curthread); /* lock the thread */ 193 cv_block((condvar_impl_t *)cvp); 194 thread_unlock_nopreempt(curthread); /* unlock the waiters field */ 195 mutex_exit(mp); 196 swtch(); 197 mutex_enter(mp); 198 } 199 200 static void 201 cv_wakeup(void *arg) 202 { 203 kthread_t *t = arg; 204 205 /* 206 * This mutex is acquired and released in order to make sure that 207 * the wakeup does not happen before the block itself happens. 208 */ 209 mutex_enter(&t->t_wait_mutex); 210 mutex_exit(&t->t_wait_mutex); 211 setrun(t); 212 } 213 214 /* 215 * Same as cv_wait except the thread will unblock at 'tim' 216 * (an absolute time) if it hasn't already unblocked. 217 * 218 * Returns the amount of time left from the original 'tim' value 219 * when it was unblocked. 220 */ 221 clock_t 222 cv_timedwait(kcondvar_t *cvp, kmutex_t *mp, clock_t tim) 223 { 224 kthread_t *t = curthread; 225 callout_id_t id; 226 clock_t timeleft; 227 int signalled; 228 229 if (panicstr) 230 return (-1); 231 232 timeleft = tim - lbolt; 233 if (timeleft <= 0) 234 return (-1); 235 mutex_enter(&t->t_wait_mutex); 236 id = realtime_timeout_default((void (*)(void *))cv_wakeup, t, timeleft); 237 thread_lock(t); /* lock the thread */ 238 cv_block((condvar_impl_t *)cvp); 239 thread_unlock_nopreempt(t); 240 mutex_exit(&t->t_wait_mutex); 241 mutex_exit(mp); 242 swtch(); 243 signalled = (t->t_schedflag & TS_SIGNALLED); 244 /* 245 * Get the time left. untimeout() returns -1 if the timeout has 246 * occured or the time remaining. If the time remaining is zero, 247 * the timeout has occured between when we were awoken and 248 * we called untimeout. We will treat this as if the timeout 249 * has occured and set timeleft to -1. 250 */ 251 timeleft = untimeout_default(id, 0); 252 mutex_enter(mp); 253 if (timeleft <= 0) { 254 timeleft = -1; 255 if (signalled) /* avoid consuming the cv_signal() */ 256 cv_signal(cvp); 257 } 258 return (timeleft); 259 } 260 261 int 262 cv_wait_sig(kcondvar_t *cvp, kmutex_t *mp) 263 { 264 kthread_t *t = curthread; 265 proc_t *p = ttoproc(t); 266 klwp_t *lwp = ttolwp(t); 267 int cancel_pending; 268 int rval = 1; 269 int signalled = 0; 270 271 if (panicstr) 272 return (rval); 273 274 /* 275 * The check for t_intr is to catch an interrupt thread 276 * that has not yet unpinned the thread underneath. 277 */ 278 if (lwp == NULL || t->t_intr) { 279 cv_wait(cvp, mp); 280 return (rval); 281 } 282 283 ASSERT(curthread->t_schedflag & TS_DONT_SWAP); 284 cancel_pending = schedctl_cancel_pending(); 285 lwp->lwp_asleep = 1; 286 lwp->lwp_sysabort = 0; 287 thread_lock(t); 288 cv_block_sig(t, (condvar_impl_t *)cvp); 289 thread_unlock_nopreempt(t); 290 mutex_exit(mp); 291 if (ISSIG(t, JUSTLOOKING) || MUSTRETURN(p, t) || cancel_pending) 292 setrun(t); 293 /* ASSERT(no locks are held) */ 294 swtch(); 295 signalled = (t->t_schedflag & TS_SIGNALLED); 296 t->t_flag &= ~T_WAKEABLE; 297 mutex_enter(mp); 298 if (ISSIG_PENDING(t, lwp, p)) { 299 mutex_exit(mp); 300 if (issig(FORREAL)) 301 rval = 0; 302 mutex_enter(mp); 303 } 304 if (lwp->lwp_sysabort || MUSTRETURN(p, t)) 305 rval = 0; 306 if (rval != 0 && cancel_pending) { 307 schedctl_cancel_eintr(); 308 rval = 0; 309 } 310 lwp->lwp_asleep = 0; 311 lwp->lwp_sysabort = 0; 312 if (rval == 0 && signalled) /* avoid consuming the cv_signal() */ 313 cv_signal(cvp); 314 return (rval); 315 } 316 317 static clock_t 318 cv_timedwait_sig_internal(kcondvar_t *cvp, kmutex_t *mp, clock_t tim, int flag) 319 { 320 kthread_t *t = curthread; 321 proc_t *p = ttoproc(t); 322 klwp_t *lwp = ttolwp(t); 323 int cancel_pending = 0; 324 callout_id_t id; 325 clock_t rval = 1; 326 clock_t timeleft; 327 int signalled = 0; 328 329 /* 330 * If the flag is 0, then realtime_timeout() below creates a 331 * regular realtime timeout. If the flag is CALLOUT_FLAG_HRESTIME, 332 * then, it creates a special realtime timeout which is affected by 333 * changes to hrestime. See callo.h for details. 334 */ 335 ASSERT((flag == 0) || (flag == CALLOUT_FLAG_HRESTIME)); 336 if (panicstr) 337 return (rval); 338 339 /* 340 * If there is no lwp, then we don't need to wait for a signal. 341 * The check for t_intr is to catch an interrupt thread 342 * that has not yet unpinned the thread underneath. 343 */ 344 if (lwp == NULL || t->t_intr) 345 return (cv_timedwait(cvp, mp, tim)); 346 347 /* 348 * If tim is less than or equal to lbolt, then the timeout 349 * has already occured. So just check to see if there is a signal 350 * pending. If so return 0 indicating that there is a signal pending. 351 * Else return -1 indicating that the timeout occured. No need to 352 * wait on anything. 353 */ 354 timeleft = tim - lbolt; 355 if (timeleft <= 0) { 356 lwp->lwp_asleep = 1; 357 lwp->lwp_sysabort = 0; 358 rval = -1; 359 goto out; 360 } 361 362 /* 363 * Set the timeout and wait. 364 */ 365 cancel_pending = schedctl_cancel_pending(); 366 mutex_enter(&t->t_wait_mutex); 367 id = timeout_generic(CALLOUT_REALTIME, (void (*)(void *))cv_wakeup, t, 368 TICK_TO_NSEC(timeleft), nsec_per_tick, flag); 369 lwp->lwp_asleep = 1; 370 lwp->lwp_sysabort = 0; 371 thread_lock(t); 372 cv_block_sig(t, (condvar_impl_t *)cvp); 373 thread_unlock_nopreempt(t); 374 mutex_exit(&t->t_wait_mutex); 375 mutex_exit(mp); 376 if (ISSIG(t, JUSTLOOKING) || MUSTRETURN(p, t) || cancel_pending) 377 setrun(t); 378 /* ASSERT(no locks are held) */ 379 swtch(); 380 signalled = (t->t_schedflag & TS_SIGNALLED); 381 t->t_flag &= ~T_WAKEABLE; 382 383 /* 384 * Untimeout the thread. untimeout() returns -1 if the timeout has 385 * occured or the time remaining. If the time remaining is zero, 386 * the timeout has occured between when we were awoken and 387 * we called untimeout. We will treat this as if the timeout 388 * has occured and set rval to -1. 389 */ 390 rval = untimeout_default(id, 0); 391 mutex_enter(mp); 392 if (rval <= 0) 393 rval = -1; 394 395 /* 396 * Check to see if a signal is pending. If so, regardless of whether 397 * or not we were awoken due to the signal, the signal is now pending 398 * and a return of 0 has the highest priority. 399 */ 400 out: 401 if (ISSIG_PENDING(t, lwp, p)) { 402 mutex_exit(mp); 403 if (issig(FORREAL)) 404 rval = 0; 405 mutex_enter(mp); 406 } 407 if (lwp->lwp_sysabort || MUSTRETURN(p, t)) 408 rval = 0; 409 if (rval != 0 && cancel_pending) { 410 schedctl_cancel_eintr(); 411 rval = 0; 412 } 413 lwp->lwp_asleep = 0; 414 lwp->lwp_sysabort = 0; 415 if (rval <= 0 && signalled) /* avoid consuming the cv_signal() */ 416 cv_signal(cvp); 417 return (rval); 418 } 419 420 /* 421 * Returns: 422 * Function result in order of precedence: 423 * 0 if a signal was received 424 * -1 if timeout occured 425 * >0 if awakened via cv_signal() or cv_broadcast(). 426 * (returns time remaining) 427 * 428 * cv_timedwait_sig() is now part of the DDI. 429 * 430 * This function is now just a wrapper for cv_timedwait_sig_internal(). 431 */ 432 clock_t 433 cv_timedwait_sig(kcondvar_t *cvp, kmutex_t *mp, clock_t tim) 434 { 435 return (cv_timedwait_sig_internal(cvp, mp, tim, 0)); 436 } 437 438 /* 439 * Like cv_wait_sig_swap but allows the caller to indicate (with a 440 * non-NULL sigret) that they will take care of signalling the cv 441 * after wakeup, if necessary. This is a vile hack that should only 442 * be used when no other option is available; almost all callers 443 * should just use cv_wait_sig_swap (which takes care of the cv_signal 444 * stuff automatically) instead. 445 */ 446 int 447 cv_wait_sig_swap_core(kcondvar_t *cvp, kmutex_t *mp, int *sigret) 448 { 449 kthread_t *t = curthread; 450 proc_t *p = ttoproc(t); 451 klwp_t *lwp = ttolwp(t); 452 int cancel_pending; 453 int rval = 1; 454 int signalled = 0; 455 456 if (panicstr) 457 return (rval); 458 459 /* 460 * The check for t_intr is to catch an interrupt thread 461 * that has not yet unpinned the thread underneath. 462 */ 463 if (lwp == NULL || t->t_intr) { 464 cv_wait(cvp, mp); 465 return (rval); 466 } 467 468 cancel_pending = schedctl_cancel_pending(); 469 lwp->lwp_asleep = 1; 470 lwp->lwp_sysabort = 0; 471 thread_lock(t); 472 t->t_kpri_req = 0; /* don't need kernel priority */ 473 cv_block_sig(t, (condvar_impl_t *)cvp); 474 /* I can be swapped now */ 475 curthread->t_schedflag &= ~TS_DONT_SWAP; 476 thread_unlock_nopreempt(t); 477 mutex_exit(mp); 478 if (ISSIG(t, JUSTLOOKING) || MUSTRETURN(p, t) || cancel_pending) 479 setrun(t); 480 /* ASSERT(no locks are held) */ 481 swtch(); 482 signalled = (t->t_schedflag & TS_SIGNALLED); 483 t->t_flag &= ~T_WAKEABLE; 484 /* TS_DONT_SWAP set by disp() */ 485 ASSERT(curthread->t_schedflag & TS_DONT_SWAP); 486 mutex_enter(mp); 487 if (ISSIG_PENDING(t, lwp, p)) { 488 mutex_exit(mp); 489 if (issig(FORREAL)) 490 rval = 0; 491 mutex_enter(mp); 492 } 493 if (lwp->lwp_sysabort || MUSTRETURN(p, t)) 494 rval = 0; 495 if (rval != 0 && cancel_pending) { 496 schedctl_cancel_eintr(); 497 rval = 0; 498 } 499 lwp->lwp_asleep = 0; 500 lwp->lwp_sysabort = 0; 501 if (rval == 0) { 502 if (sigret != NULL) 503 *sigret = signalled; /* just tell the caller */ 504 else if (signalled) 505 cv_signal(cvp); /* avoid consuming the cv_signal() */ 506 } 507 return (rval); 508 } 509 510 /* 511 * Same as cv_wait_sig but the thread can be swapped out while waiting. 512 * This should only be used when we know we aren't holding any locks. 513 */ 514 int 515 cv_wait_sig_swap(kcondvar_t *cvp, kmutex_t *mp) 516 { 517 return (cv_wait_sig_swap_core(cvp, mp, NULL)); 518 } 519 520 void 521 cv_signal(kcondvar_t *cvp) 522 { 523 condvar_impl_t *cp = (condvar_impl_t *)cvp; 524 525 /* make sure the cv_waiters field looks sane */ 526 ASSERT(cp->cv_waiters <= CV_MAX_WAITERS); 527 if (cp->cv_waiters > 0) { 528 sleepq_head_t *sqh = SQHASH(cp); 529 disp_lock_enter(&sqh->sq_lock); 530 ASSERT(CPU_ON_INTR(CPU) == 0); 531 if (cp->cv_waiters & CV_WAITERS_MASK) { 532 kthread_t *t; 533 cp->cv_waiters--; 534 t = sleepq_wakeone_chan(&sqh->sq_queue, cp); 535 /* 536 * If cv_waiters is non-zero (and less than 537 * CV_MAX_WAITERS) there should be a thread 538 * in the queue. 539 */ 540 ASSERT(t != NULL); 541 } else if (sleepq_wakeone_chan(&sqh->sq_queue, cp) == NULL) { 542 cp->cv_waiters = 0; 543 } 544 disp_lock_exit(&sqh->sq_lock); 545 } 546 } 547 548 void 549 cv_broadcast(kcondvar_t *cvp) 550 { 551 condvar_impl_t *cp = (condvar_impl_t *)cvp; 552 553 /* make sure the cv_waiters field looks sane */ 554 ASSERT(cp->cv_waiters <= CV_MAX_WAITERS); 555 if (cp->cv_waiters > 0) { 556 sleepq_head_t *sqh = SQHASH(cp); 557 disp_lock_enter(&sqh->sq_lock); 558 ASSERT(CPU_ON_INTR(CPU) == 0); 559 sleepq_wakeall_chan(&sqh->sq_queue, cp); 560 cp->cv_waiters = 0; 561 disp_lock_exit(&sqh->sq_lock); 562 } 563 } 564 565 /* 566 * Same as cv_wait(), but wakes up (after wakeup_time milliseconds) to check 567 * for requests to stop, like cv_wait_sig() but without dealing with signals. 568 * This is a horrible kludge. It is evil. It is vile. It is swill. 569 * If your code has to call this function then your code is the same. 570 */ 571 void 572 cv_wait_stop(kcondvar_t *cvp, kmutex_t *mp, int wakeup_time) 573 { 574 kthread_t *t = curthread; 575 klwp_t *lwp = ttolwp(t); 576 proc_t *p = ttoproc(t); 577 callout_id_t id; 578 clock_t tim; 579 580 if (panicstr) 581 return; 582 583 /* 584 * If there is no lwp, then we don't need to eventually stop it 585 * The check for t_intr is to catch an interrupt thread 586 * that has not yet unpinned the thread underneath. 587 */ 588 if (lwp == NULL || t->t_intr) { 589 cv_wait(cvp, mp); 590 return; 591 } 592 593 /* 594 * Wakeup in wakeup_time milliseconds, i.e., human time. 595 */ 596 tim = lbolt + MSEC_TO_TICK(wakeup_time); 597 mutex_enter(&t->t_wait_mutex); 598 id = realtime_timeout_default((void (*)(void *))cv_wakeup, t, 599 tim - lbolt); 600 thread_lock(t); /* lock the thread */ 601 cv_block((condvar_impl_t *)cvp); 602 thread_unlock_nopreempt(t); 603 mutex_exit(&t->t_wait_mutex); 604 mutex_exit(mp); 605 /* ASSERT(no locks are held); */ 606 swtch(); 607 (void) untimeout_default(id, 0); 608 609 /* 610 * Check for reasons to stop, if lwp_nostop is not true. 611 * See issig_forreal() for explanations of the various stops. 612 */ 613 mutex_enter(&p->p_lock); 614 while (lwp->lwp_nostop == 0 && !(p->p_flag & SEXITLWPS)) { 615 /* 616 * Hold the lwp here for watchpoint manipulation. 617 */ 618 if (t->t_proc_flag & TP_PAUSE) { 619 stop(PR_SUSPENDED, SUSPEND_PAUSE); 620 continue; 621 } 622 /* 623 * System checkpoint. 624 */ 625 if (t->t_proc_flag & TP_CHKPT) { 626 stop(PR_CHECKPOINT, 0); 627 continue; 628 } 629 /* 630 * Honor fork1(), watchpoint activity (remapping a page), 631 * and lwp_suspend() requests. 632 */ 633 if ((p->p_flag & (SHOLDFORK1|SHOLDWATCH)) || 634 (t->t_proc_flag & TP_HOLDLWP)) { 635 stop(PR_SUSPENDED, SUSPEND_NORMAL); 636 continue; 637 } 638 /* 639 * Honor /proc requested stop. 640 */ 641 if (t->t_proc_flag & TP_PRSTOP) { 642 stop(PR_REQUESTED, 0); 643 } 644 /* 645 * If some lwp in the process has already stopped 646 * showing PR_JOBCONTROL, stop in sympathy with it. 647 */ 648 if (p->p_stopsig && t != p->p_agenttp) { 649 stop(PR_JOBCONTROL, p->p_stopsig); 650 continue; 651 } 652 break; 653 } 654 mutex_exit(&p->p_lock); 655 mutex_enter(mp); 656 } 657 658 /* 659 * Like cv_timedwait_sig(), but takes an absolute hires future time 660 * rather than a future time in clock ticks. Will not return showing 661 * that a timeout occurred until the future time is passed. 662 * If 'when' is a NULL pointer, no timeout will occur. 663 * Returns: 664 * Function result in order of precedence: 665 * 0 if a signal was received 666 * -1 if timeout occured 667 * >0 if awakened via cv_signal() or cv_broadcast() 668 * or by a spurious wakeup. 669 * (might return time remaining) 670 * As a special test, if someone abruptly resets the system time 671 * (but not through adjtime(2); drifting of the clock is allowed and 672 * expected [see timespectohz_adj()]), then we force a return of -1 673 * so the caller can return a premature timeout to the calling process 674 * so it can reevaluate the situation in light of the new system time. 675 * (The system clock has been reset if timecheck != timechanged.) 676 */ 677 int 678 cv_waituntil_sig(kcondvar_t *cvp, kmutex_t *mp, 679 timestruc_t *when, int timecheck) 680 { 681 timestruc_t now; 682 timestruc_t delta; 683 int rval; 684 685 if (when == NULL) 686 return (cv_wait_sig_swap(cvp, mp)); 687 688 gethrestime(&now); 689 delta = *when; 690 timespecsub(&delta, &now); 691 if (delta.tv_sec < 0 || (delta.tv_sec == 0 && delta.tv_nsec == 0)) { 692 /* 693 * We have already reached the absolute future time. 694 * Call cv_timedwait_sig() just to check for signals. 695 * We will return immediately with either 0 or -1. 696 */ 697 rval = cv_timedwait_sig(cvp, mp, lbolt); 698 } else { 699 gethrestime_lasttick(&now); 700 if (timecheck == timechanged) { 701 rval = cv_timedwait_sig_internal(cvp, mp, 702 lbolt + timespectohz(when, now), 703 CALLOUT_FLAG_HRESTIME); 704 705 } else { 706 /* 707 * Someone reset the system time; 708 * just force an immediate timeout. 709 */ 710 rval = -1; 711 } 712 if (rval == -1 && timecheck == timechanged) { 713 /* 714 * Even though cv_timedwait_sig() returned showing a 715 * timeout, the future time may not have passed yet. 716 * If not, change rval to indicate a normal wakeup. 717 */ 718 gethrestime(&now); 719 delta = *when; 720 timespecsub(&delta, &now); 721 if (delta.tv_sec > 0 || (delta.tv_sec == 0 && 722 delta.tv_nsec > 0)) 723 rval = 1; 724 } 725 } 726 return (rval); 727 } 728