xref: /titanic_50/usr/src/uts/common/os/condvar.c (revision d29f5a711240f866521445b1656d114da090335e)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include <sys/thread.h>
30 #include <sys/proc.h>
31 #include <sys/debug.h>
32 #include <sys/cmn_err.h>
33 #include <sys/systm.h>
34 #include <sys/sobject.h>
35 #include <sys/sleepq.h>
36 #include <sys/cpuvar.h>
37 #include <sys/condvar.h>
38 #include <sys/condvar_impl.h>
39 #include <sys/schedctl.h>
40 #include <sys/procfs.h>
41 #include <sys/sdt.h>
42 
43 /*
44  * CV_MAX_WAITERS is the maximum number of waiters we track; once
45  * the number becomes higher than that, we look at the sleepq to
46  * see whether there are *really* any waiters.
47  */
48 #define	CV_MAX_WAITERS		1024		/* must be power of 2 */
49 #define	CV_WAITERS_MASK		(CV_MAX_WAITERS - 1)
50 
51 /*
52  * Threads don't "own" condition variables.
53  */
54 /* ARGSUSED */
55 static kthread_t *
56 cv_owner(void *cvp)
57 {
58 	return (NULL);
59 }
60 
61 /*
62  * Unsleep a thread that's blocked on a condition variable.
63  */
64 static void
65 cv_unsleep(kthread_t *t)
66 {
67 	condvar_impl_t *cvp = (condvar_impl_t *)t->t_wchan;
68 	sleepq_head_t *sqh = SQHASH(cvp);
69 
70 	ASSERT(THREAD_LOCK_HELD(t));
71 
72 	if (cvp == NULL)
73 		panic("cv_unsleep: thread %p not on sleepq %p",
74 		    (void *)t, (void *)sqh);
75 	DTRACE_SCHED1(wakeup, kthread_t *, t);
76 	sleepq_unsleep(t);
77 	if (cvp->cv_waiters != CV_MAX_WAITERS)
78 		cvp->cv_waiters--;
79 	disp_lock_exit_high(&sqh->sq_lock);
80 	CL_SETRUN(t);
81 }
82 
83 /*
84  * Change the priority of a thread that's blocked on a condition variable.
85  */
86 static void
87 cv_change_pri(kthread_t *t, pri_t pri, pri_t *t_prip)
88 {
89 	condvar_impl_t *cvp = (condvar_impl_t *)t->t_wchan;
90 	sleepq_t *sqp = t->t_sleepq;
91 
92 	ASSERT(THREAD_LOCK_HELD(t));
93 	ASSERT(&SQHASH(cvp)->sq_queue == sqp);
94 
95 	if (cvp == NULL)
96 		panic("cv_change_pri: %p not on sleep queue", (void *)t);
97 	sleepq_dequeue(t);
98 	*t_prip = pri;
99 	sleepq_insert(sqp, t);
100 }
101 
102 /*
103  * The sobj_ops vector exports a set of functions needed when a thread
104  * is asleep on a synchronization object of this type.
105  */
106 static sobj_ops_t cv_sobj_ops = {
107 	SOBJ_CV, cv_owner, cv_unsleep, cv_change_pri
108 };
109 
110 /* ARGSUSED */
111 void
112 cv_init(kcondvar_t *cvp, char *name, kcv_type_t type, void *arg)
113 {
114 	((condvar_impl_t *)cvp)->cv_waiters = 0;
115 }
116 
117 /*
118  * cv_destroy is not currently needed, but is part of the DDI.
119  * This is in case cv_init ever needs to allocate something for a cv.
120  */
121 /* ARGSUSED */
122 void
123 cv_destroy(kcondvar_t *cvp)
124 {
125 	ASSERT((((condvar_impl_t *)cvp)->cv_waiters & CV_WAITERS_MASK) == 0);
126 }
127 
128 /*
129  * The cv_block() function blocks a thread on a condition variable
130  * by putting it in a hashed sleep queue associated with the
131  * synchronization object.
132  *
133  * Threads are taken off the hashed sleep queues via calls to
134  * cv_signal(), cv_broadcast(), or cv_unsleep().
135  */
136 static void
137 cv_block(condvar_impl_t *cvp)
138 {
139 	kthread_t *t = curthread;
140 	klwp_t *lwp = ttolwp(t);
141 	sleepq_head_t *sqh;
142 
143 	ASSERT(THREAD_LOCK_HELD(t));
144 	ASSERT(t != CPU->cpu_idle_thread);
145 	ASSERT(CPU_ON_INTR(CPU) == 0);
146 	ASSERT(t->t_wchan0 == NULL && t->t_wchan == NULL);
147 	ASSERT(t->t_state == TS_ONPROC);
148 
149 	t->t_schedflag &= ~TS_SIGNALLED;
150 	CL_SLEEP(t);			/* assign kernel priority */
151 	t->t_wchan = (caddr_t)cvp;
152 	t->t_sobj_ops = &cv_sobj_ops;
153 	DTRACE_SCHED(sleep);
154 
155 	/*
156 	 * The check for t_intr is to avoid doing the
157 	 * account for an interrupt thread on the still-pinned
158 	 * lwp's statistics.
159 	 */
160 	if (lwp != NULL && t->t_intr == NULL) {
161 		lwp->lwp_ru.nvcsw++;
162 		(void) new_mstate(t, LMS_SLEEP);
163 	}
164 
165 	sqh = SQHASH(cvp);
166 	disp_lock_enter_high(&sqh->sq_lock);
167 	if (cvp->cv_waiters < CV_MAX_WAITERS)
168 		cvp->cv_waiters++;
169 	ASSERT(cvp->cv_waiters <= CV_MAX_WAITERS);
170 	THREAD_SLEEP(t, &sqh->sq_lock);
171 	sleepq_insert(&sqh->sq_queue, t);
172 	/*
173 	 * THREAD_SLEEP() moves curthread->t_lockp to point to the
174 	 * lock sqh->sq_lock. This lock is later released by the caller
175 	 * when it calls thread_unlock() on curthread.
176 	 */
177 }
178 
179 #define	cv_block_sig(t, cvp)	\
180 	{ (t)->t_flag |= T_WAKEABLE; cv_block(cvp); }
181 
182 /*
183  * Block on the indicated condition variable and release the
184  * associated kmutex while blocked.
185  */
186 void
187 cv_wait(kcondvar_t *cvp, kmutex_t *mp)
188 {
189 	if (panicstr)
190 		return;
191 
192 	ASSERT(curthread->t_schedflag & TS_DONT_SWAP);
193 	thread_lock(curthread);			/* lock the thread */
194 	cv_block((condvar_impl_t *)cvp);
195 	thread_unlock_nopreempt(curthread);	/* unlock the waiters field */
196 	mutex_exit(mp);
197 	swtch();
198 	mutex_enter(mp);
199 }
200 
201 /*
202  * Same as cv_wait except the thread will unblock at 'tim'
203  * (an absolute time) if it hasn't already unblocked.
204  *
205  * Returns the amount of time left from the original 'tim' value
206  * when it was unblocked.
207  */
208 clock_t
209 cv_timedwait(kcondvar_t *cvp, kmutex_t *mp, clock_t tim)
210 {
211 	kthread_t *t = curthread;
212 	timeout_id_t id;
213 	clock_t timeleft;
214 	int signalled;
215 
216 	if (panicstr)
217 		return (-1);
218 
219 	timeleft = tim - lbolt;
220 	if (timeleft <= 0)
221 		return (-1);
222 	id = realtime_timeout((void (*)(void *))setrun, t, timeleft);
223 	thread_lock(t);		/* lock the thread */
224 	cv_block((condvar_impl_t *)cvp);
225 	thread_unlock_nopreempt(t);
226 	mutex_exit(mp);
227 	if ((tim - lbolt) <= 0)		/* allow for wrap */
228 		setrun(t);
229 	swtch();
230 	signalled = (t->t_schedflag & TS_SIGNALLED);
231 	/*
232 	 * Get the time left. untimeout() returns -1 if the timeout has
233 	 * occured or the time remaining.  If the time remaining is zero,
234 	 * the timeout has occured between when we were awoken and
235 	 * we called untimeout.  We will treat this as if the timeout
236 	 * has occured and set timeleft to -1.
237 	 */
238 	timeleft = untimeout(id);
239 	mutex_enter(mp);
240 	if (timeleft <= 0) {
241 		timeleft = -1;
242 		if (signalled)	/* avoid consuming the cv_signal() */
243 			cv_signal(cvp);
244 	}
245 	return (timeleft);
246 }
247 
248 int
249 cv_wait_sig(kcondvar_t *cvp, kmutex_t *mp)
250 {
251 	kthread_t *t = curthread;
252 	proc_t *p = ttoproc(t);
253 	klwp_t *lwp = ttolwp(t);
254 	int cancel_pending;
255 	int rval = 1;
256 	int signalled = 0;
257 
258 	if (panicstr)
259 		return (rval);
260 
261 	/*
262 	 * The check for t_intr is to catch an interrupt thread
263 	 * that has not yet unpinned the thread underneath.
264 	 */
265 	if (lwp == NULL || t->t_intr) {
266 		cv_wait(cvp, mp);
267 		return (rval);
268 	}
269 
270 	ASSERT(curthread->t_schedflag & TS_DONT_SWAP);
271 	cancel_pending = schedctl_cancel_pending();
272 	lwp->lwp_asleep = 1;
273 	lwp->lwp_sysabort = 0;
274 	thread_lock(t);
275 	cv_block_sig(t, (condvar_impl_t *)cvp);
276 	thread_unlock_nopreempt(t);
277 	mutex_exit(mp);
278 	if (ISSIG(t, JUSTLOOKING) || MUSTRETURN(p, t) || cancel_pending)
279 		setrun(t);
280 	/* ASSERT(no locks are held) */
281 	swtch();
282 	signalled = (t->t_schedflag & TS_SIGNALLED);
283 	t->t_flag &= ~T_WAKEABLE;
284 	mutex_enter(mp);
285 	if (ISSIG_PENDING(t, lwp, p)) {
286 		mutex_exit(mp);
287 		if (issig(FORREAL))
288 			rval = 0;
289 		mutex_enter(mp);
290 	}
291 	if (lwp->lwp_sysabort || MUSTRETURN(p, t))
292 		rval = 0;
293 	if (rval != 0 && cancel_pending) {
294 		schedctl_cancel_eintr();
295 		rval = 0;
296 	}
297 	lwp->lwp_asleep = 0;
298 	lwp->lwp_sysabort = 0;
299 	if (rval == 0 && signalled)	/* avoid consuming the cv_signal() */
300 		cv_signal(cvp);
301 	return (rval);
302 }
303 
304 /*
305  * Returns:
306  * 	Function result in order of presidence:
307  *		 0 if a signal was received
308  *		-1 if timeout occured
309  *		>0 if awakened via cv_signal() or cv_broadcast().
310  *		   (returns time remaining)
311  *
312  * cv_timedwait_sig() is now part of the DDI.
313  */
314 clock_t
315 cv_timedwait_sig(kcondvar_t *cvp, kmutex_t *mp, clock_t tim)
316 {
317 	kthread_t *t = curthread;
318 	proc_t *p = ttoproc(t);
319 	klwp_t *lwp = ttolwp(t);
320 	int cancel_pending = 0;
321 	timeout_id_t id;
322 	clock_t rval = 1;
323 	clock_t timeleft;
324 	int signalled = 0;
325 
326 	if (panicstr)
327 		return (rval);
328 
329 	/*
330 	 * If there is no lwp, then we don't need to wait for a signal.
331 	 * The check for t_intr is to catch an interrupt thread
332 	 * that has not yet unpinned the thread underneath.
333 	 */
334 	if (lwp == NULL || t->t_intr)
335 		return (cv_timedwait(cvp, mp, tim));
336 
337 	/*
338 	 * If tim is less than or equal to lbolt, then the timeout
339 	 * has already occured.  So just check to see if there is a signal
340 	 * pending.  If so return 0 indicating that there is a signal pending.
341 	 * Else return -1 indicating that the timeout occured. No need to
342 	 * wait on anything.
343 	 */
344 	timeleft = tim - lbolt;
345 	if (timeleft <= 0) {
346 		lwp->lwp_asleep = 1;
347 		lwp->lwp_sysabort = 0;
348 		rval = -1;
349 		goto out;
350 	}
351 
352 	/*
353 	 * Set the timeout and wait.
354 	 */
355 	cancel_pending = schedctl_cancel_pending();
356 	id = realtime_timeout((void (*)(void *))setrun, t, timeleft);
357 	lwp->lwp_asleep = 1;
358 	lwp->lwp_sysabort = 0;
359 	thread_lock(t);
360 	cv_block_sig(t, (condvar_impl_t *)cvp);
361 	thread_unlock_nopreempt(t);
362 	mutex_exit(mp);
363 	if (ISSIG(t, JUSTLOOKING) || MUSTRETURN(p, t) || cancel_pending ||
364 	    (tim - lbolt <= 0))
365 		setrun(t);
366 	/* ASSERT(no locks are held) */
367 	swtch();
368 	signalled = (t->t_schedflag & TS_SIGNALLED);
369 	t->t_flag &= ~T_WAKEABLE;
370 	mutex_enter(mp);
371 
372 	/*
373 	 * Untimeout the thread.  untimeout() returns -1 if the timeout has
374 	 * occured or the time remaining.  If the time remaining is zero,
375 	 * the timeout has occured between when we were awoken and
376 	 * we called untimeout.  We will treat this as if the timeout
377 	 * has occured and set rval to -1.
378 	 */
379 	rval = untimeout(id);
380 	if (rval <= 0)
381 		rval = -1;
382 
383 	/*
384 	 * Check to see if a signal is pending.  If so, regardless of whether
385 	 * or not we were awoken due to the signal, the signal is now pending
386 	 * and a return of 0 has the highest priority.
387 	 */
388 out:
389 	if (ISSIG_PENDING(t, lwp, p)) {
390 		mutex_exit(mp);
391 		if (issig(FORREAL))
392 			rval = 0;
393 		mutex_enter(mp);
394 	}
395 	if (lwp->lwp_sysabort || MUSTRETURN(p, t))
396 		rval = 0;
397 	if (rval != 0 && cancel_pending) {
398 		schedctl_cancel_eintr();
399 		rval = 0;
400 	}
401 	lwp->lwp_asleep = 0;
402 	lwp->lwp_sysabort = 0;
403 	if (rval <= 0 && signalled)	/* avoid consuming the cv_signal() */
404 		cv_signal(cvp);
405 	return (rval);
406 }
407 
408 /*
409  * Like cv_wait_sig_swap but allows the caller to indicate (with a
410  * non-NULL sigret) that they will take care of signalling the cv
411  * after wakeup, if necessary.  This is a vile hack that should only
412  * be used when no other option is available; almost all callers
413  * should just use cv_wait_sig_swap (which takes care of the cv_signal
414  * stuff automatically) instead.
415  */
416 int
417 cv_wait_sig_swap_core(kcondvar_t *cvp, kmutex_t *mp, int *sigret)
418 {
419 	kthread_t *t = curthread;
420 	proc_t *p = ttoproc(t);
421 	klwp_t *lwp = ttolwp(t);
422 	int cancel_pending;
423 	int rval = 1;
424 	int signalled = 0;
425 
426 	if (panicstr)
427 		return (rval);
428 
429 	/*
430 	 * The check for t_intr is to catch an interrupt thread
431 	 * that has not yet unpinned the thread underneath.
432 	 */
433 	if (lwp == NULL || t->t_intr) {
434 		cv_wait(cvp, mp);
435 		return (rval);
436 	}
437 
438 	cancel_pending = schedctl_cancel_pending();
439 	lwp->lwp_asleep = 1;
440 	lwp->lwp_sysabort = 0;
441 	thread_lock(t);
442 	t->t_kpri_req = 0;	/* don't need kernel priority */
443 	cv_block_sig(t, (condvar_impl_t *)cvp);
444 	/* I can be swapped now */
445 	curthread->t_schedflag &= ~TS_DONT_SWAP;
446 	thread_unlock_nopreempt(t);
447 	mutex_exit(mp);
448 	if (ISSIG(t, JUSTLOOKING) || MUSTRETURN(p, t) || cancel_pending)
449 		setrun(t);
450 	/* ASSERT(no locks are held) */
451 	swtch();
452 	signalled = (t->t_schedflag & TS_SIGNALLED);
453 	t->t_flag &= ~T_WAKEABLE;
454 	/* TS_DONT_SWAP set by disp() */
455 	ASSERT(curthread->t_schedflag & TS_DONT_SWAP);
456 	mutex_enter(mp);
457 	if (ISSIG_PENDING(t, lwp, p)) {
458 		mutex_exit(mp);
459 		if (issig(FORREAL))
460 			rval = 0;
461 		mutex_enter(mp);
462 	}
463 	if (lwp->lwp_sysabort || MUSTRETURN(p, t))
464 		rval = 0;
465 	if (rval != 0 && cancel_pending) {
466 		schedctl_cancel_eintr();
467 		rval = 0;
468 	}
469 	lwp->lwp_asleep = 0;
470 	lwp->lwp_sysabort = 0;
471 	if (rval == 0) {
472 		if (sigret != NULL)
473 			*sigret = signalled;	/* just tell the caller */
474 		else if (signalled)
475 			cv_signal(cvp);	/* avoid consuming the cv_signal() */
476 	}
477 	return (rval);
478 }
479 
480 /*
481  * Same as cv_wait_sig but the thread can be swapped out while waiting.
482  * This should only be used when we know we aren't holding any locks.
483  */
484 int
485 cv_wait_sig_swap(kcondvar_t *cvp, kmutex_t *mp)
486 {
487 	return (cv_wait_sig_swap_core(cvp, mp, NULL));
488 }
489 
490 void
491 cv_signal(kcondvar_t *cvp)
492 {
493 	condvar_impl_t *cp = (condvar_impl_t *)cvp;
494 
495 	/* make sure the cv_waiters field looks sane */
496 	ASSERT(cp->cv_waiters <= CV_MAX_WAITERS);
497 	if (cp->cv_waiters > 0) {
498 		sleepq_head_t *sqh = SQHASH(cp);
499 		disp_lock_enter(&sqh->sq_lock);
500 		ASSERT(CPU_ON_INTR(CPU) == 0);
501 		if (cp->cv_waiters & CV_WAITERS_MASK) {
502 			kthread_t *t;
503 			cp->cv_waiters--;
504 			t = sleepq_wakeone_chan(&sqh->sq_queue, cp);
505 			/*
506 			 * If cv_waiters is non-zero (and less than
507 			 * CV_MAX_WAITERS) there should be a thread
508 			 * in the queue.
509 			 */
510 			ASSERT(t != NULL);
511 		} else if (sleepq_wakeone_chan(&sqh->sq_queue, cp) == NULL) {
512 			cp->cv_waiters = 0;
513 		}
514 		disp_lock_exit(&sqh->sq_lock);
515 	}
516 }
517 
518 void
519 cv_broadcast(kcondvar_t *cvp)
520 {
521 	condvar_impl_t *cp = (condvar_impl_t *)cvp;
522 
523 	/* make sure the cv_waiters field looks sane */
524 	ASSERT(cp->cv_waiters <= CV_MAX_WAITERS);
525 	if (cp->cv_waiters > 0) {
526 		sleepq_head_t *sqh = SQHASH(cp);
527 		disp_lock_enter(&sqh->sq_lock);
528 		ASSERT(CPU_ON_INTR(CPU) == 0);
529 		sleepq_wakeall_chan(&sqh->sq_queue, cp);
530 		cp->cv_waiters = 0;
531 		disp_lock_exit(&sqh->sq_lock);
532 	}
533 }
534 
535 /*
536  * Same as cv_wait(), but wakes up (after wakeup_time milliseconds) to check
537  * for requests to stop, like cv_wait_sig() but without dealing with signals.
538  * This is a horrible kludge.  It is evil.  It is vile.  It is swill.
539  * If your code has to call this function then your code is the same.
540  */
541 void
542 cv_wait_stop(kcondvar_t *cvp, kmutex_t *mp, int wakeup_time)
543 {
544 	kthread_t *t = curthread;
545 	klwp_t *lwp = ttolwp(t);
546 	proc_t *p = ttoproc(t);
547 	timeout_id_t id;
548 	clock_t tim;
549 
550 	if (panicstr)
551 		return;
552 
553 	/*
554 	 * If there is no lwp, then we don't need to eventually stop it
555 	 * The check for t_intr is to catch an interrupt thread
556 	 * that has not yet unpinned the thread underneath.
557 	 */
558 	if (lwp == NULL || t->t_intr) {
559 		cv_wait(cvp, mp);
560 		return;
561 	}
562 
563 	/*
564 	 * Wakeup in wakeup_time milliseconds, i.e., human time.
565 	 */
566 	tim = lbolt + MSEC_TO_TICK(wakeup_time);
567 	id = realtime_timeout((void (*)(void *))setrun, t, tim - lbolt);
568 	thread_lock(t);			/* lock the thread */
569 	cv_block((condvar_impl_t *)cvp);
570 	thread_unlock_nopreempt(t);
571 	mutex_exit(mp);
572 	/* ASSERT(no locks are held); */
573 	if ((tim - lbolt) <= 0)		/* allow for wrap */
574 		setrun(t);
575 	swtch();
576 	(void) untimeout(id);
577 
578 	/*
579 	 * Check for reasons to stop, if lwp_nostop is not true.
580 	 * See issig_forreal() for explanations of the various stops.
581 	 */
582 	mutex_enter(&p->p_lock);
583 	while (lwp->lwp_nostop == 0 && !(p->p_flag & SEXITLWPS)) {
584 		/*
585 		 * Hold the lwp here for watchpoint manipulation.
586 		 */
587 		if (t->t_proc_flag & TP_PAUSE) {
588 			stop(PR_SUSPENDED, SUSPEND_PAUSE);
589 			continue;
590 		}
591 		/*
592 		 * System checkpoint.
593 		 */
594 		if (t->t_proc_flag & TP_CHKPT) {
595 			stop(PR_CHECKPOINT, 0);
596 			continue;
597 		}
598 		/*
599 		 * Honor fork1(), watchpoint activity (remapping a page),
600 		 * and lwp_suspend() requests.
601 		 */
602 		if ((p->p_flag & (SHOLDFORK1|SHOLDWATCH)) ||
603 		    (t->t_proc_flag & TP_HOLDLWP)) {
604 			stop(PR_SUSPENDED, SUSPEND_NORMAL);
605 			continue;
606 		}
607 		/*
608 		 * Honor /proc requested stop.
609 		 */
610 		if (t->t_proc_flag & TP_PRSTOP) {
611 			stop(PR_REQUESTED, 0);
612 		}
613 		/*
614 		 * If some lwp in the process has already stopped
615 		 * showing PR_JOBCONTROL, stop in sympathy with it.
616 		 */
617 		if (p->p_stopsig && t != p->p_agenttp) {
618 			stop(PR_JOBCONTROL, p->p_stopsig);
619 			continue;
620 		}
621 		break;
622 	}
623 	mutex_exit(&p->p_lock);
624 	mutex_enter(mp);
625 }
626 
627 /*
628  * Like cv_timedwait_sig(), but takes an absolute hires future time
629  * rather than a future time in clock ticks.  Will not return showing
630  * that a timeout occurred until the future time is passed.
631  * If 'when' is a NULL pointer, no timeout will occur.
632  * Returns:
633  * 	Function result in order of presidence:
634  *		 0 if a signal was received
635  *		-1 if timeout occured
636  *	        >0 if awakened via cv_signal() or cv_broadcast()
637  *		   or by a spurious wakeup.
638  *		   (might return time remaining)
639  * As a special test, if someone abruptly resets the system time
640  * (but not through adjtime(2); drifting of the clock is allowed and
641  * expected [see timespectohz_adj()]), then we force a return of -1
642  * so the caller can return a premature timeout to the calling process
643  * so it can reevaluate the situation in light of the new system time.
644  * (The system clock has been reset if timecheck != timechanged.)
645  */
646 int
647 cv_waituntil_sig(kcondvar_t *cvp, kmutex_t *mp,
648 	timestruc_t *when, int timecheck)
649 {
650 	timestruc_t now;
651 	timestruc_t delta;
652 	int rval;
653 
654 	if (when == NULL)
655 		return (cv_wait_sig_swap(cvp, mp));
656 
657 	gethrestime_lasttick(&now);
658 	delta = *when;
659 	timespecsub(&delta, &now);
660 	if (delta.tv_sec < 0 || (delta.tv_sec == 0 && delta.tv_nsec == 0)) {
661 		/*
662 		 * We have already reached the absolute future time.
663 		 * Call cv_timedwait_sig() just to check for signals.
664 		 * We will return immediately with either 0 or -1.
665 		 */
666 		rval = cv_timedwait_sig(cvp, mp, lbolt);
667 	} else {
668 		if (timecheck == timechanged) {
669 			rval = cv_timedwait_sig(cvp, mp,
670 			    lbolt + timespectohz(when, now));
671 
672 		} else {
673 			/*
674 			 * Someone reset the system time;
675 			 * just force an immediate timeout.
676 			 */
677 			rval = -1;
678 		}
679 		if (rval == -1 && timecheck == timechanged) {
680 			/*
681 			 * Even though cv_timedwait_sig() returned showing a
682 			 * timeout, the future time may not have passed yet.
683 			 * If not, change rval to indicate a normal wakeup.
684 			 */
685 			gethrestime(&now);
686 			delta = *when;
687 			timespecsub(&delta, &now);
688 			if (delta.tv_sec > 0 || (delta.tv_sec == 0 &&
689 			    delta.tv_nsec > 0))
690 				rval = 1;
691 		}
692 	}
693 	return (rval);
694 }
695