1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 #include <sys/thread.h> 30 #include <sys/proc.h> 31 #include <sys/debug.h> 32 #include <sys/cmn_err.h> 33 #include <sys/systm.h> 34 #include <sys/sobject.h> 35 #include <sys/sleepq.h> 36 #include <sys/cpuvar.h> 37 #include <sys/condvar.h> 38 #include <sys/condvar_impl.h> 39 #include <sys/schedctl.h> 40 #include <sys/procfs.h> 41 #include <sys/sdt.h> 42 43 /* 44 * CV_MAX_WAITERS is the maximum number of waiters we track; once 45 * the number becomes higher than that, we look at the sleepq to 46 * see whether there are *really* any waiters. 47 */ 48 #define CV_MAX_WAITERS 1024 /* must be power of 2 */ 49 #define CV_WAITERS_MASK (CV_MAX_WAITERS - 1) 50 51 /* 52 * Threads don't "own" condition variables. 53 */ 54 /* ARGSUSED */ 55 static kthread_t * 56 cv_owner(void *cvp) 57 { 58 return (NULL); 59 } 60 61 /* 62 * Unsleep a thread that's blocked on a condition variable. 63 */ 64 static void 65 cv_unsleep(kthread_t *t) 66 { 67 condvar_impl_t *cvp = (condvar_impl_t *)t->t_wchan; 68 sleepq_head_t *sqh = SQHASH(cvp); 69 70 ASSERT(THREAD_LOCK_HELD(t)); 71 72 if (cvp == NULL) 73 panic("cv_unsleep: thread %p not on sleepq %p", t, sqh); 74 DTRACE_SCHED1(wakeup, kthread_t *, t); 75 sleepq_unsleep(t); 76 if (cvp->cv_waiters != CV_MAX_WAITERS) 77 cvp->cv_waiters--; 78 disp_lock_exit_high(&sqh->sq_lock); 79 CL_SETRUN(t); 80 } 81 82 /* 83 * Change the priority of a thread that's blocked on a condition variable. 84 */ 85 static void 86 cv_change_pri(kthread_t *t, pri_t pri, pri_t *t_prip) 87 { 88 condvar_impl_t *cvp = (condvar_impl_t *)t->t_wchan; 89 sleepq_t *sqp = t->t_sleepq; 90 91 ASSERT(THREAD_LOCK_HELD(t)); 92 ASSERT(&SQHASH(cvp)->sq_queue == sqp); 93 94 if (cvp == NULL) 95 panic("cv_change_pri: %p not on sleep queue", t); 96 sleepq_dequeue(t); 97 *t_prip = pri; 98 sleepq_insert(sqp, t); 99 } 100 101 /* 102 * The sobj_ops vector exports a set of functions needed when a thread 103 * is asleep on a synchronization object of this type. 104 */ 105 static sobj_ops_t cv_sobj_ops = { 106 SOBJ_CV, cv_owner, cv_unsleep, cv_change_pri 107 }; 108 109 /* ARGSUSED */ 110 void 111 cv_init(kcondvar_t *cvp, char *name, kcv_type_t type, void *arg) 112 { 113 ((condvar_impl_t *)cvp)->cv_waiters = 0; 114 } 115 116 /* 117 * cv_destroy is not currently needed, but is part of the DDI. 118 * This is in case cv_init ever needs to allocate something for a cv. 119 */ 120 /* ARGSUSED */ 121 void 122 cv_destroy(kcondvar_t *cvp) 123 { 124 ASSERT((((condvar_impl_t *)cvp)->cv_waiters & CV_WAITERS_MASK) == 0); 125 } 126 127 /* 128 * The cv_block() function blocks a thread on a condition variable 129 * by putting it in a hashed sleep queue associated with the 130 * synchronization object. 131 * 132 * Threads are taken off the hashed sleep queues via calls to 133 * cv_signal(), cv_broadcast(), or cv_unsleep(). 134 */ 135 static void 136 cv_block(condvar_impl_t *cvp) 137 { 138 kthread_t *t = curthread; 139 klwp_t *lwp = ttolwp(t); 140 sleepq_head_t *sqh; 141 142 ASSERT(THREAD_LOCK_HELD(t)); 143 ASSERT(t != CPU->cpu_idle_thread); 144 ASSERT(CPU_ON_INTR(CPU) == 0); 145 ASSERT(t->t_wchan0 == NULL && t->t_wchan == NULL); 146 ASSERT(t->t_state == TS_ONPROC); 147 148 t->t_schedflag &= ~TS_SIGNALLED; 149 CL_SLEEP(t); /* assign kernel priority */ 150 t->t_wchan = (caddr_t)cvp; 151 t->t_sobj_ops = &cv_sobj_ops; 152 DTRACE_SCHED(sleep); 153 154 /* 155 * The check for t_intr is to avoid doing the 156 * account for an interrupt thread on the still-pinned 157 * lwp's statistics. 158 */ 159 if (lwp != NULL && t->t_intr == NULL) { 160 lwp->lwp_ru.nvcsw++; 161 (void) new_mstate(t, LMS_SLEEP); 162 } 163 164 sqh = SQHASH(cvp); 165 disp_lock_enter_high(&sqh->sq_lock); 166 if (cvp->cv_waiters < CV_MAX_WAITERS) 167 cvp->cv_waiters++; 168 ASSERT(cvp->cv_waiters <= CV_MAX_WAITERS); 169 THREAD_SLEEP(t, &sqh->sq_lock); 170 sleepq_insert(&sqh->sq_queue, t); 171 /* 172 * THREAD_SLEEP() moves curthread->t_lockp to point to the 173 * lock sqh->sq_lock. This lock is later released by the caller 174 * when it calls thread_unlock() on curthread. 175 */ 176 } 177 178 #define cv_block_sig(t, cvp) \ 179 { (t)->t_flag |= T_WAKEABLE; cv_block(cvp); } 180 181 /* 182 * Block on the indicated condition variable and release the 183 * associated kmutex while blocked. 184 */ 185 void 186 cv_wait(kcondvar_t *cvp, kmutex_t *mp) 187 { 188 if (panicstr) 189 return; 190 191 ASSERT(curthread->t_schedflag & TS_DONT_SWAP); 192 thread_lock(curthread); /* lock the thread */ 193 cv_block((condvar_impl_t *)cvp); 194 thread_unlock_nopreempt(curthread); /* unlock the waiters field */ 195 mutex_exit(mp); 196 swtch(); 197 mutex_enter(mp); 198 } 199 200 /* 201 * Same as cv_wait except the thread will unblock at 'tim' 202 * (an absolute time) if it hasn't already unblocked. 203 * 204 * Returns the amount of time left from the original 'tim' value 205 * when it was unblocked. 206 */ 207 clock_t 208 cv_timedwait(kcondvar_t *cvp, kmutex_t *mp, clock_t tim) 209 { 210 kthread_t *t = curthread; 211 timeout_id_t id; 212 clock_t timeleft; 213 int signalled; 214 215 if (panicstr) 216 return (-1); 217 218 timeleft = tim - lbolt; 219 if (timeleft <= 0) 220 return (-1); 221 id = realtime_timeout((void (*)(void *))setrun, t, timeleft); 222 thread_lock(t); /* lock the thread */ 223 cv_block((condvar_impl_t *)cvp); 224 thread_unlock_nopreempt(t); 225 mutex_exit(mp); 226 if ((tim - lbolt) <= 0) /* allow for wrap */ 227 setrun(t); 228 swtch(); 229 signalled = (t->t_schedflag & TS_SIGNALLED); 230 /* 231 * Get the time left. untimeout() returns -1 if the timeout has 232 * occured or the time remaining. If the time remaining is zero, 233 * the timeout has occured between when we were awoken and 234 * we called untimeout. We will treat this as if the timeout 235 * has occured and set timeleft to -1. 236 */ 237 timeleft = untimeout(id); 238 mutex_enter(mp); 239 if (timeleft <= 0) { 240 timeleft = -1; 241 if (signalled) /* avoid consuming the cv_signal() */ 242 cv_signal(cvp); 243 } 244 return (timeleft); 245 } 246 247 int 248 cv_wait_sig(kcondvar_t *cvp, kmutex_t *mp) 249 { 250 kthread_t *t = curthread; 251 proc_t *p = ttoproc(t); 252 klwp_t *lwp = ttolwp(t); 253 int cancel_pending; 254 int rval = 1; 255 int signalled = 0; 256 257 if (panicstr) 258 return (rval); 259 260 /* 261 * The check for t_intr is to catch an interrupt thread 262 * that has not yet unpinned the thread underneath. 263 */ 264 if (lwp == NULL || t->t_intr) { 265 cv_wait(cvp, mp); 266 return (rval); 267 } 268 269 ASSERT(curthread->t_schedflag & TS_DONT_SWAP); 270 cancel_pending = schedctl_cancel_pending(); 271 lwp->lwp_asleep = 1; 272 lwp->lwp_sysabort = 0; 273 thread_lock(t); 274 cv_block_sig(t, (condvar_impl_t *)cvp); 275 thread_unlock_nopreempt(t); 276 mutex_exit(mp); 277 if (ISSIG(t, JUSTLOOKING) || MUSTRETURN(p, t) || cancel_pending) 278 setrun(t); 279 /* ASSERT(no locks are held) */ 280 swtch(); 281 signalled = (t->t_schedflag & TS_SIGNALLED); 282 t->t_flag &= ~T_WAKEABLE; 283 mutex_enter(mp); 284 if (ISSIG_PENDING(t, lwp, p)) { 285 mutex_exit(mp); 286 if (issig(FORREAL)) 287 rval = 0; 288 mutex_enter(mp); 289 } 290 if (lwp->lwp_sysabort || MUSTRETURN(p, t)) 291 rval = 0; 292 if (rval != 0 && cancel_pending) { 293 schedctl_cancel_eintr(); 294 rval = 0; 295 } 296 lwp->lwp_asleep = 0; 297 lwp->lwp_sysabort = 0; 298 if (rval == 0 && signalled) /* avoid consuming the cv_signal() */ 299 cv_signal(cvp); 300 return (rval); 301 } 302 303 /* 304 * Returns: 305 * Function result in order of presidence: 306 * 0 if a signal was received 307 * -1 if timeout occured 308 * >0 if awakened via cv_signal() or cv_broadcast(). 309 * (returns time remaining) 310 * 311 * cv_timedwait_sig() is now part of the DDI. 312 */ 313 clock_t 314 cv_timedwait_sig(kcondvar_t *cvp, kmutex_t *mp, clock_t tim) 315 { 316 kthread_t *t = curthread; 317 proc_t *p = ttoproc(t); 318 klwp_t *lwp = ttolwp(t); 319 int cancel_pending = 0; 320 timeout_id_t id; 321 clock_t rval = 1; 322 clock_t timeleft; 323 int signalled = 0; 324 325 if (panicstr) 326 return (rval); 327 328 /* 329 * If there is no lwp, then we don't need to wait for a signal. 330 * The check for t_intr is to catch an interrupt thread 331 * that has not yet unpinned the thread underneath. 332 */ 333 if (lwp == NULL || t->t_intr) 334 return (cv_timedwait(cvp, mp, tim)); 335 336 /* 337 * If tim is less than or equal to lbolt, then the timeout 338 * has already occured. So just check to see if there is a signal 339 * pending. If so return 0 indicating that there is a signal pending. 340 * Else return -1 indicating that the timeout occured. No need to 341 * wait on anything. 342 */ 343 timeleft = tim - lbolt; 344 if (timeleft <= 0) { 345 lwp->lwp_asleep = 1; 346 lwp->lwp_sysabort = 0; 347 rval = -1; 348 goto out; 349 } 350 351 /* 352 * Set the timeout and wait. 353 */ 354 cancel_pending = schedctl_cancel_pending(); 355 id = realtime_timeout((void (*)(void *))setrun, t, timeleft); 356 lwp->lwp_asleep = 1; 357 lwp->lwp_sysabort = 0; 358 thread_lock(t); 359 cv_block_sig(t, (condvar_impl_t *)cvp); 360 thread_unlock_nopreempt(t); 361 mutex_exit(mp); 362 if (ISSIG(t, JUSTLOOKING) || MUSTRETURN(p, t) || cancel_pending || 363 (tim - lbolt <= 0)) 364 setrun(t); 365 /* ASSERT(no locks are held) */ 366 swtch(); 367 signalled = (t->t_schedflag & TS_SIGNALLED); 368 t->t_flag &= ~T_WAKEABLE; 369 mutex_enter(mp); 370 371 /* 372 * Untimeout the thread. untimeout() returns -1 if the timeout has 373 * occured or the time remaining. If the time remaining is zero, 374 * the timeout has occured between when we were awoken and 375 * we called untimeout. We will treat this as if the timeout 376 * has occured and set rval to -1. 377 */ 378 rval = untimeout(id); 379 if (rval <= 0) 380 rval = -1; 381 382 /* 383 * Check to see if a signal is pending. If so, regardless of whether 384 * or not we were awoken due to the signal, the signal is now pending 385 * and a return of 0 has the highest priority. 386 */ 387 out: 388 if (ISSIG_PENDING(t, lwp, p)) { 389 mutex_exit(mp); 390 if (issig(FORREAL)) 391 rval = 0; 392 mutex_enter(mp); 393 } 394 if (lwp->lwp_sysabort || MUSTRETURN(p, t)) 395 rval = 0; 396 if (rval != 0 && cancel_pending) { 397 schedctl_cancel_eintr(); 398 rval = 0; 399 } 400 lwp->lwp_asleep = 0; 401 lwp->lwp_sysabort = 0; 402 if (rval <= 0 && signalled) /* avoid consuming the cv_signal() */ 403 cv_signal(cvp); 404 return (rval); 405 } 406 407 /* 408 * Like cv_wait_sig_swap but allows the caller to indicate (with a 409 * non-NULL sigret) that they will take care of signalling the cv 410 * after wakeup, if necessary. This is a vile hack that should only 411 * be used when no other option is available; almost all callers 412 * should just use cv_wait_sig_swap (which takes care of the cv_signal 413 * stuff automatically) instead. 414 */ 415 int 416 cv_wait_sig_swap_core(kcondvar_t *cvp, kmutex_t *mp, int *sigret) 417 { 418 kthread_t *t = curthread; 419 proc_t *p = ttoproc(t); 420 klwp_t *lwp = ttolwp(t); 421 int cancel_pending; 422 int rval = 1; 423 int signalled = 0; 424 425 if (panicstr) 426 return (rval); 427 428 /* 429 * The check for t_intr is to catch an interrupt thread 430 * that has not yet unpinned the thread underneath. 431 */ 432 if (lwp == NULL || t->t_intr) { 433 cv_wait(cvp, mp); 434 return (rval); 435 } 436 437 cancel_pending = schedctl_cancel_pending(); 438 lwp->lwp_asleep = 1; 439 lwp->lwp_sysabort = 0; 440 thread_lock(t); 441 t->t_kpri_req = 0; /* don't need kernel priority */ 442 cv_block_sig(t, (condvar_impl_t *)cvp); 443 /* I can be swapped now */ 444 curthread->t_schedflag &= ~TS_DONT_SWAP; 445 thread_unlock_nopreempt(t); 446 mutex_exit(mp); 447 if (ISSIG(t, JUSTLOOKING) || MUSTRETURN(p, t) || cancel_pending) 448 setrun(t); 449 /* ASSERT(no locks are held) */ 450 swtch(); 451 signalled = (t->t_schedflag & TS_SIGNALLED); 452 t->t_flag &= ~T_WAKEABLE; 453 /* TS_DONT_SWAP set by disp() */ 454 ASSERT(curthread->t_schedflag & TS_DONT_SWAP); 455 mutex_enter(mp); 456 if (ISSIG_PENDING(t, lwp, p)) { 457 mutex_exit(mp); 458 if (issig(FORREAL)) 459 rval = 0; 460 mutex_enter(mp); 461 } 462 if (lwp->lwp_sysabort || MUSTRETURN(p, t)) 463 rval = 0; 464 if (rval != 0 && cancel_pending) { 465 schedctl_cancel_eintr(); 466 rval = 0; 467 } 468 lwp->lwp_asleep = 0; 469 lwp->lwp_sysabort = 0; 470 if (rval == 0) { 471 if (sigret != NULL) 472 *sigret = signalled; /* just tell the caller */ 473 else if (signalled) 474 cv_signal(cvp); /* avoid consuming the cv_signal() */ 475 } 476 return (rval); 477 } 478 479 /* 480 * Same as cv_wait_sig but the thread can be swapped out while waiting. 481 * This should only be used when we know we aren't holding any locks. 482 */ 483 int 484 cv_wait_sig_swap(kcondvar_t *cvp, kmutex_t *mp) 485 { 486 return (cv_wait_sig_swap_core(cvp, mp, NULL)); 487 } 488 489 void 490 cv_signal(kcondvar_t *cvp) 491 { 492 condvar_impl_t *cp = (condvar_impl_t *)cvp; 493 494 /* make sure the cv_waiters field looks sane */ 495 ASSERT(cp->cv_waiters <= CV_MAX_WAITERS); 496 if (cp->cv_waiters > 0) { 497 sleepq_head_t *sqh = SQHASH(cp); 498 disp_lock_enter(&sqh->sq_lock); 499 ASSERT(CPU_ON_INTR(CPU) == 0); 500 if (cp->cv_waiters & CV_WAITERS_MASK) { 501 kthread_t *t; 502 cp->cv_waiters--; 503 t = sleepq_wakeone_chan(&sqh->sq_queue, cp); 504 /* 505 * If cv_waiters is non-zero (and less than 506 * CV_MAX_WAITERS) there should be a thread 507 * in the queue. 508 */ 509 ASSERT(t != NULL); 510 } else if (sleepq_wakeone_chan(&sqh->sq_queue, cp) == NULL) { 511 cp->cv_waiters = 0; 512 } 513 disp_lock_exit(&sqh->sq_lock); 514 } 515 } 516 517 void 518 cv_broadcast(kcondvar_t *cvp) 519 { 520 condvar_impl_t *cp = (condvar_impl_t *)cvp; 521 522 /* make sure the cv_waiters field looks sane */ 523 ASSERT(cp->cv_waiters <= CV_MAX_WAITERS); 524 if (cp->cv_waiters > 0) { 525 sleepq_head_t *sqh = SQHASH(cp); 526 disp_lock_enter(&sqh->sq_lock); 527 ASSERT(CPU_ON_INTR(CPU) == 0); 528 sleepq_wakeall_chan(&sqh->sq_queue, cp); 529 cp->cv_waiters = 0; 530 disp_lock_exit(&sqh->sq_lock); 531 } 532 } 533 534 /* 535 * Same as cv_wait(), but wakes up (after wakeup_time milliseconds) to check 536 * for requests to stop, like cv_wait_sig() but without dealing with signals. 537 * This is a horrible kludge. It is evil. It is vile. It is swill. 538 * If your code has to call this function then your code is the same. 539 */ 540 void 541 cv_wait_stop(kcondvar_t *cvp, kmutex_t *mp, int wakeup_time) 542 { 543 kthread_t *t = curthread; 544 klwp_t *lwp = ttolwp(t); 545 proc_t *p = ttoproc(t); 546 timeout_id_t id; 547 clock_t tim; 548 549 if (panicstr) 550 return; 551 552 /* 553 * If there is no lwp, then we don't need to eventually stop it 554 * The check for t_intr is to catch an interrupt thread 555 * that has not yet unpinned the thread underneath. 556 */ 557 if (lwp == NULL || t->t_intr) { 558 cv_wait(cvp, mp); 559 return; 560 } 561 562 /* 563 * Wakeup in wakeup_time milliseconds, i.e., human time. 564 */ 565 tim = lbolt + MSEC_TO_TICK(wakeup_time); 566 id = realtime_timeout((void (*)(void *))setrun, t, tim - lbolt); 567 thread_lock(t); /* lock the thread */ 568 cv_block((condvar_impl_t *)cvp); 569 thread_unlock_nopreempt(t); 570 mutex_exit(mp); 571 /* ASSERT(no locks are held); */ 572 if ((tim - lbolt) <= 0) /* allow for wrap */ 573 setrun(t); 574 swtch(); 575 (void) untimeout(id); 576 577 /* 578 * Check for reasons to stop, if lwp_nostop is not true. 579 * See issig_forreal() for explanations of the various stops. 580 */ 581 mutex_enter(&p->p_lock); 582 while (lwp->lwp_nostop == 0 && !(p->p_flag & SEXITLWPS)) { 583 /* 584 * Hold the lwp here for watchpoint manipulation. 585 */ 586 if (t->t_proc_flag & TP_PAUSE) { 587 stop(PR_SUSPENDED, SUSPEND_PAUSE); 588 continue; 589 } 590 /* 591 * System checkpoint. 592 */ 593 if (t->t_proc_flag & TP_CHKPT) { 594 stop(PR_CHECKPOINT, 0); 595 continue; 596 } 597 /* 598 * Honor fork1(), watchpoint activity (remapping a page), 599 * and lwp_suspend() requests. 600 */ 601 if ((p->p_flag & (SHOLDFORK1|SHOLDWATCH)) || 602 (t->t_proc_flag & TP_HOLDLWP)) { 603 stop(PR_SUSPENDED, SUSPEND_NORMAL); 604 continue; 605 } 606 /* 607 * Honor /proc requested stop. 608 */ 609 if (t->t_proc_flag & TP_PRSTOP) { 610 stop(PR_REQUESTED, 0); 611 } 612 /* 613 * If some lwp in the process has already stopped 614 * showing PR_JOBCONTROL, stop in sympathy with it. 615 */ 616 if (p->p_stopsig && t != p->p_agenttp) { 617 stop(PR_JOBCONTROL, p->p_stopsig); 618 continue; 619 } 620 break; 621 } 622 mutex_exit(&p->p_lock); 623 mutex_enter(mp); 624 } 625 626 /* 627 * Like cv_timedwait_sig(), but takes an absolute hires future time 628 * rather than a future time in clock ticks. Will not return showing 629 * that a timeout occurred until the future time is passed. 630 * If 'when' is a NULL pointer, no timeout will occur. 631 * Returns: 632 * Function result in order of presidence: 633 * 0 if a signal was received 634 * -1 if timeout occured 635 * >0 if awakened via cv_signal() or cv_broadcast() 636 * or by a spurious wakeup. 637 * (might return time remaining) 638 * As a special test, if someone abruptly resets the system time 639 * (but not through adjtime(2); drifting of the clock is allowed and 640 * expected [see timespectohz_adj()]), then we force a return of -1 641 * so the caller can return a premature timeout to the calling process 642 * so it can reevaluate the situation in light of the new system time. 643 * (The system clock has been reset if timecheck != timechanged.) 644 */ 645 int 646 cv_waituntil_sig(kcondvar_t *cvp, kmutex_t *mp, 647 timestruc_t *when, int timecheck) 648 { 649 timestruc_t now; 650 timestruc_t delta; 651 int rval; 652 653 if (when == NULL) 654 return (cv_wait_sig_swap(cvp, mp)); 655 656 gethrestime(&now); 657 delta = *when; 658 timespecsub(&delta, &now); 659 if (delta.tv_sec < 0 || (delta.tv_sec == 0 && delta.tv_nsec == 0)) { 660 /* 661 * We have already reached the absolute future time. 662 * Call cv_timedwait_sig() just to check for signals. 663 * We will return immediately with either 0 or -1. 664 */ 665 rval = cv_timedwait_sig(cvp, mp, lbolt); 666 } else { 667 if (timecheck == timechanged) { 668 rval = cv_timedwait_sig(cvp, mp, 669 lbolt + timespectohz_adj(when, now)); 670 } else { 671 /* 672 * Someone reset the system time; 673 * just force an immediate timeout. 674 */ 675 rval = -1; 676 } 677 if (rval == -1 && timecheck == timechanged) { 678 /* 679 * Even though cv_timedwait_sig() returned showing a 680 * timeout, the future time may not have passed yet. 681 * If not, change rval to indicate a normal wakeup. 682 */ 683 gethrestime(&now); 684 delta = *when; 685 timespecsub(&delta, &now); 686 if (delta.tv_sec > 0 || (delta.tv_sec == 0 && 687 delta.tv_nsec > 0)) 688 rval = 1; 689 } 690 } 691 return (rval); 692 } 693