xref: /illumos-gate/usr/src/uts/common/os/condvar.c (revision 350effc1e940138efb65a89b633f586280437495)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #include <sys/thread.h>
28 #include <sys/proc.h>
29 #include <sys/debug.h>
30 #include <sys/cmn_err.h>
31 #include <sys/systm.h>
32 #include <sys/sobject.h>
33 #include <sys/sleepq.h>
34 #include <sys/cpuvar.h>
35 #include <sys/condvar.h>
36 #include <sys/condvar_impl.h>
37 #include <sys/schedctl.h>
38 #include <sys/procfs.h>
39 #include <sys/sdt.h>
40 #include <sys/callo.h>
41 
42 clock_t cv_timedwait_hires(kcondvar_t *, kmutex_t *, hrtime_t, hrtime_t, int);
43 
44 /*
45  * CV_MAX_WAITERS is the maximum number of waiters we track; once
46  * the number becomes higher than that, we look at the sleepq to
47  * see whether there are *really* any waiters.
48  */
49 #define	CV_MAX_WAITERS		1024		/* must be power of 2 */
50 #define	CV_WAITERS_MASK		(CV_MAX_WAITERS - 1)
51 
52 /*
53  * Threads don't "own" condition variables.
54  */
55 /* ARGSUSED */
56 static kthread_t *
57 cv_owner(void *cvp)
58 {
59 	return (NULL);
60 }
61 
62 /*
63  * Unsleep a thread that's blocked on a condition variable.
64  */
65 static void
66 cv_unsleep(kthread_t *t)
67 {
68 	condvar_impl_t *cvp = (condvar_impl_t *)t->t_wchan;
69 	sleepq_head_t *sqh = SQHASH(cvp);
70 
71 	ASSERT(THREAD_LOCK_HELD(t));
72 
73 	if (cvp == NULL)
74 		panic("cv_unsleep: thread %p not on sleepq %p",
75 		    (void *)t, (void *)sqh);
76 	DTRACE_SCHED1(wakeup, kthread_t *, t);
77 	sleepq_unsleep(t);
78 	if (cvp->cv_waiters != CV_MAX_WAITERS)
79 		cvp->cv_waiters--;
80 	disp_lock_exit_high(&sqh->sq_lock);
81 	CL_SETRUN(t);
82 }
83 
84 /*
85  * Change the priority of a thread that's blocked on a condition variable.
86  */
87 static void
88 cv_change_pri(kthread_t *t, pri_t pri, pri_t *t_prip)
89 {
90 	condvar_impl_t *cvp = (condvar_impl_t *)t->t_wchan;
91 	sleepq_t *sqp = t->t_sleepq;
92 
93 	ASSERT(THREAD_LOCK_HELD(t));
94 	ASSERT(&SQHASH(cvp)->sq_queue == sqp);
95 
96 	if (cvp == NULL)
97 		panic("cv_change_pri: %p not on sleep queue", (void *)t);
98 	sleepq_dequeue(t);
99 	*t_prip = pri;
100 	sleepq_insert(sqp, t);
101 }
102 
103 /*
104  * The sobj_ops vector exports a set of functions needed when a thread
105  * is asleep on a synchronization object of this type.
106  */
107 static sobj_ops_t cv_sobj_ops = {
108 	SOBJ_CV, cv_owner, cv_unsleep, cv_change_pri
109 };
110 
111 /* ARGSUSED */
112 void
113 cv_init(kcondvar_t *cvp, char *name, kcv_type_t type, void *arg)
114 {
115 	((condvar_impl_t *)cvp)->cv_waiters = 0;
116 }
117 
118 /*
119  * cv_destroy is not currently needed, but is part of the DDI.
120  * This is in case cv_init ever needs to allocate something for a cv.
121  */
122 /* ARGSUSED */
123 void
124 cv_destroy(kcondvar_t *cvp)
125 {
126 	ASSERT((((condvar_impl_t *)cvp)->cv_waiters & CV_WAITERS_MASK) == 0);
127 }
128 
129 /*
130  * The cv_block() function blocks a thread on a condition variable
131  * by putting it in a hashed sleep queue associated with the
132  * synchronization object.
133  *
134  * Threads are taken off the hashed sleep queues via calls to
135  * cv_signal(), cv_broadcast(), or cv_unsleep().
136  */
137 static void
138 cv_block(condvar_impl_t *cvp)
139 {
140 	kthread_t *t = curthread;
141 	klwp_t *lwp = ttolwp(t);
142 	sleepq_head_t *sqh;
143 
144 	ASSERT(THREAD_LOCK_HELD(t));
145 	ASSERT(t != CPU->cpu_idle_thread);
146 	ASSERT(CPU_ON_INTR(CPU) == 0);
147 	ASSERT(t->t_wchan0 == NULL && t->t_wchan == NULL);
148 	ASSERT(t->t_state == TS_ONPROC);
149 
150 	t->t_schedflag &= ~TS_SIGNALLED;
151 	CL_SLEEP(t);			/* assign kernel priority */
152 	t->t_wchan = (caddr_t)cvp;
153 	t->t_sobj_ops = &cv_sobj_ops;
154 	DTRACE_SCHED(sleep);
155 
156 	/*
157 	 * The check for t_intr is to avoid doing the
158 	 * account for an interrupt thread on the still-pinned
159 	 * lwp's statistics.
160 	 */
161 	if (lwp != NULL && t->t_intr == NULL) {
162 		lwp->lwp_ru.nvcsw++;
163 		(void) new_mstate(t, LMS_SLEEP);
164 	}
165 
166 	sqh = SQHASH(cvp);
167 	disp_lock_enter_high(&sqh->sq_lock);
168 	if (cvp->cv_waiters < CV_MAX_WAITERS)
169 		cvp->cv_waiters++;
170 	ASSERT(cvp->cv_waiters <= CV_MAX_WAITERS);
171 	THREAD_SLEEP(t, &sqh->sq_lock);
172 	sleepq_insert(&sqh->sq_queue, t);
173 	/*
174 	 * THREAD_SLEEP() moves curthread->t_lockp to point to the
175 	 * lock sqh->sq_lock. This lock is later released by the caller
176 	 * when it calls thread_unlock() on curthread.
177 	 */
178 }
179 
180 #define	cv_block_sig(t, cvp)	\
181 	{ (t)->t_flag |= T_WAKEABLE; cv_block(cvp); }
182 
183 /*
184  * Block on the indicated condition variable and release the
185  * associated kmutex while blocked.
186  */
187 void
188 cv_wait(kcondvar_t *cvp, kmutex_t *mp)
189 {
190 	if (panicstr)
191 		return;
192 
193 	ASSERT(curthread->t_schedflag & TS_DONT_SWAP);
194 	thread_lock(curthread);			/* lock the thread */
195 	cv_block((condvar_impl_t *)cvp);
196 	thread_unlock_nopreempt(curthread);	/* unlock the waiters field */
197 	mutex_exit(mp);
198 	swtch();
199 	mutex_enter(mp);
200 }
201 
202 static void
203 cv_wakeup(void *arg)
204 {
205 	kthread_t *t = arg;
206 
207 	/*
208 	 * This mutex is acquired and released in order to make sure that
209 	 * the wakeup does not happen before the block itself happens.
210 	 */
211 	mutex_enter(&t->t_wait_mutex);
212 	mutex_exit(&t->t_wait_mutex);
213 	setrun(t);
214 }
215 
216 /*
217  * Same as cv_wait except the thread will unblock at 'tim'
218  * (an absolute time) if it hasn't already unblocked.
219  *
220  * Returns the amount of time left from the original 'tim' value
221  * when it was unblocked.
222  */
223 clock_t
224 cv_timedwait(kcondvar_t *cvp, kmutex_t *mp, clock_t tim)
225 {
226 	hrtime_t hrtim;
227 	clock_t now = ddi_get_lbolt();
228 
229 	if (tim <= now)
230 		return (-1);
231 
232 	hrtim = TICK_TO_NSEC(tim - now);
233 	return (cv_timedwait_hires(cvp, mp, hrtim, nsec_per_tick, 0));
234 }
235 
236 /*
237  * Same as cv_timedwait() except that the third argument is a relative
238  * timeout value, as opposed to an absolute one. There is also a fourth
239  * argument that specifies how accurately the timeout must be implemented.
240  */
241 clock_t
242 cv_reltimedwait(kcondvar_t *cvp, kmutex_t *mp, clock_t delta, time_res_t res)
243 {
244 	hrtime_t exp;
245 
246 	ASSERT(TIME_RES_VALID(res));
247 
248 	if (delta <= 0)
249 		return (-1);
250 
251 	if ((exp = TICK_TO_NSEC(delta)) < 0)
252 		exp = CY_INFINITY;
253 
254 	return (cv_timedwait_hires(cvp, mp, exp, time_res[res], 0));
255 }
256 
257 clock_t
258 cv_timedwait_hires(kcondvar_t *cvp, kmutex_t *mp, hrtime_t tim,
259     hrtime_t res, int flag)
260 {
261 	kthread_t *t = curthread;
262 	callout_id_t id;
263 	clock_t timeleft;
264 	hrtime_t limit;
265 	int signalled;
266 
267 	if (panicstr)
268 		return (-1);
269 
270 	limit = (flag & CALLOUT_FLAG_ABSOLUTE) ? gethrtime() : 0;
271 	if (tim <= limit)
272 		return (-1);
273 	mutex_enter(&t->t_wait_mutex);
274 	id = timeout_generic(CALLOUT_REALTIME, (void (*)(void *))cv_wakeup, t,
275 	    tim, res, flag);
276 	thread_lock(t);		/* lock the thread */
277 	cv_block((condvar_impl_t *)cvp);
278 	thread_unlock_nopreempt(t);
279 	mutex_exit(&t->t_wait_mutex);
280 	mutex_exit(mp);
281 	swtch();
282 	signalled = (t->t_schedflag & TS_SIGNALLED);
283 	/*
284 	 * Get the time left. untimeout() returns -1 if the timeout has
285 	 * occured or the time remaining.  If the time remaining is zero,
286 	 * the timeout has occured between when we were awoken and
287 	 * we called untimeout.  We will treat this as if the timeout
288 	 * has occured and set timeleft to -1.
289 	 */
290 	timeleft = untimeout_default(id, 0);
291 	mutex_enter(mp);
292 	if (timeleft <= 0) {
293 		timeleft = -1;
294 		if (signalled)	/* avoid consuming the cv_signal() */
295 			cv_signal(cvp);
296 	}
297 	return (timeleft);
298 }
299 
300 int
301 cv_wait_sig(kcondvar_t *cvp, kmutex_t *mp)
302 {
303 	kthread_t *t = curthread;
304 	proc_t *p = ttoproc(t);
305 	klwp_t *lwp = ttolwp(t);
306 	int cancel_pending;
307 	int rval = 1;
308 	int signalled = 0;
309 
310 	if (panicstr)
311 		return (rval);
312 
313 	/*
314 	 * The check for t_intr is to catch an interrupt thread
315 	 * that has not yet unpinned the thread underneath.
316 	 */
317 	if (lwp == NULL || t->t_intr) {
318 		cv_wait(cvp, mp);
319 		return (rval);
320 	}
321 
322 	ASSERT(curthread->t_schedflag & TS_DONT_SWAP);
323 	cancel_pending = schedctl_cancel_pending();
324 	lwp->lwp_asleep = 1;
325 	lwp->lwp_sysabort = 0;
326 	thread_lock(t);
327 	cv_block_sig(t, (condvar_impl_t *)cvp);
328 	thread_unlock_nopreempt(t);
329 	mutex_exit(mp);
330 	if (ISSIG(t, JUSTLOOKING) || MUSTRETURN(p, t) || cancel_pending)
331 		setrun(t);
332 	/* ASSERT(no locks are held) */
333 	swtch();
334 	signalled = (t->t_schedflag & TS_SIGNALLED);
335 	t->t_flag &= ~T_WAKEABLE;
336 	mutex_enter(mp);
337 	if (ISSIG_PENDING(t, lwp, p)) {
338 		mutex_exit(mp);
339 		if (issig(FORREAL))
340 			rval = 0;
341 		mutex_enter(mp);
342 	}
343 	if (lwp->lwp_sysabort || MUSTRETURN(p, t))
344 		rval = 0;
345 	if (rval != 0 && cancel_pending) {
346 		schedctl_cancel_eintr();
347 		rval = 0;
348 	}
349 	lwp->lwp_asleep = 0;
350 	lwp->lwp_sysabort = 0;
351 	if (rval == 0 && signalled)	/* avoid consuming the cv_signal() */
352 		cv_signal(cvp);
353 	return (rval);
354 }
355 
356 static clock_t
357 cv_timedwait_sig_hires(kcondvar_t *cvp, kmutex_t *mp, hrtime_t tim,
358     hrtime_t res, int flag)
359 {
360 	kthread_t *t = curthread;
361 	proc_t *p = ttoproc(t);
362 	klwp_t *lwp = ttolwp(t);
363 	int cancel_pending = 0;
364 	callout_id_t id;
365 	clock_t rval = 1;
366 	hrtime_t limit;
367 	int signalled = 0;
368 
369 	if (panicstr)
370 		return (rval);
371 
372 	/*
373 	 * If there is no lwp, then we don't need to wait for a signal.
374 	 * The check for t_intr is to catch an interrupt thread
375 	 * that has not yet unpinned the thread underneath.
376 	 */
377 	if (lwp == NULL || t->t_intr)
378 		return (cv_timedwait_hires(cvp, mp, tim, res, flag));
379 
380 	/*
381 	 * If tim is less than or equal to current hrtime, then the timeout
382 	 * has already occured.  So just check to see if there is a signal
383 	 * pending.  If so return 0 indicating that there is a signal pending.
384 	 * Else return -1 indicating that the timeout occured. No need to
385 	 * wait on anything.
386 	 */
387 	limit = (flag & CALLOUT_FLAG_ABSOLUTE) ? gethrtime() : 0;
388 	if (tim <= limit) {
389 		lwp->lwp_asleep = 1;
390 		lwp->lwp_sysabort = 0;
391 		rval = -1;
392 		goto out;
393 	}
394 
395 	/*
396 	 * Set the timeout and wait.
397 	 */
398 	cancel_pending = schedctl_cancel_pending();
399 	mutex_enter(&t->t_wait_mutex);
400 	id = timeout_generic(CALLOUT_REALTIME, (void (*)(void *))cv_wakeup, t,
401 	    tim, res, flag);
402 	lwp->lwp_asleep = 1;
403 	lwp->lwp_sysabort = 0;
404 	thread_lock(t);
405 	cv_block_sig(t, (condvar_impl_t *)cvp);
406 	thread_unlock_nopreempt(t);
407 	mutex_exit(&t->t_wait_mutex);
408 	mutex_exit(mp);
409 	if (ISSIG(t, JUSTLOOKING) || MUSTRETURN(p, t) || cancel_pending)
410 		setrun(t);
411 	/* ASSERT(no locks are held) */
412 	swtch();
413 	signalled = (t->t_schedflag & TS_SIGNALLED);
414 	t->t_flag &= ~T_WAKEABLE;
415 
416 	/*
417 	 * Untimeout the thread.  untimeout() returns -1 if the timeout has
418 	 * occured or the time remaining.  If the time remaining is zero,
419 	 * the timeout has occured between when we were awoken and
420 	 * we called untimeout.  We will treat this as if the timeout
421 	 * has occured and set rval to -1.
422 	 */
423 	rval = untimeout_default(id, 0);
424 	mutex_enter(mp);
425 	if (rval <= 0)
426 		rval = -1;
427 
428 	/*
429 	 * Check to see if a signal is pending.  If so, regardless of whether
430 	 * or not we were awoken due to the signal, the signal is now pending
431 	 * and a return of 0 has the highest priority.
432 	 */
433 out:
434 	if (ISSIG_PENDING(t, lwp, p)) {
435 		mutex_exit(mp);
436 		if (issig(FORREAL))
437 			rval = 0;
438 		mutex_enter(mp);
439 	}
440 	if (lwp->lwp_sysabort || MUSTRETURN(p, t))
441 		rval = 0;
442 	if (rval != 0 && cancel_pending) {
443 		schedctl_cancel_eintr();
444 		rval = 0;
445 	}
446 	lwp->lwp_asleep = 0;
447 	lwp->lwp_sysabort = 0;
448 	if (rval <= 0 && signalled)	/* avoid consuming the cv_signal() */
449 		cv_signal(cvp);
450 	return (rval);
451 }
452 
453 /*
454  * Returns:
455  * 	Function result in order of precedence:
456  *		 0 if a signal was received
457  *		-1 if timeout occured
458  *		>0 if awakened via cv_signal() or cv_broadcast().
459  *		   (returns time remaining)
460  *
461  * cv_timedwait_sig() is now part of the DDI.
462  *
463  * This function is now just a wrapper for cv_timedwait_sig_hires().
464  */
465 clock_t
466 cv_timedwait_sig(kcondvar_t *cvp, kmutex_t *mp, clock_t tim)
467 {
468 	hrtime_t hrtim;
469 
470 	hrtim = TICK_TO_NSEC(tim - ddi_get_lbolt());
471 	return (cv_timedwait_sig_hires(cvp, mp, hrtim, nsec_per_tick, 0));
472 }
473 
474 /*
475  * Same as cv_timedwait_sig() except that the third argument is a relative
476  * timeout value, as opposed to an absolute one. There is also a fourth
477  * argument that specifies how accurately the timeout must be implemented.
478  */
479 clock_t
480 cv_reltimedwait_sig(kcondvar_t *cvp, kmutex_t *mp, clock_t delta,
481     time_res_t res)
482 {
483 	hrtime_t exp;
484 
485 	ASSERT(TIME_RES_VALID(res));
486 
487 	if ((exp = TICK_TO_NSEC(delta)) < 0)
488 		exp = CY_INFINITY;
489 
490 	return (cv_timedwait_sig_hires(cvp, mp, exp, time_res[res], 0));
491 }
492 
493 /*
494  * Like cv_wait_sig_swap but allows the caller to indicate (with a
495  * non-NULL sigret) that they will take care of signalling the cv
496  * after wakeup, if necessary.  This is a vile hack that should only
497  * be used when no other option is available; almost all callers
498  * should just use cv_wait_sig_swap (which takes care of the cv_signal
499  * stuff automatically) instead.
500  */
501 int
502 cv_wait_sig_swap_core(kcondvar_t *cvp, kmutex_t *mp, int *sigret)
503 {
504 	kthread_t *t = curthread;
505 	proc_t *p = ttoproc(t);
506 	klwp_t *lwp = ttolwp(t);
507 	int cancel_pending;
508 	int rval = 1;
509 	int signalled = 0;
510 
511 	if (panicstr)
512 		return (rval);
513 
514 	/*
515 	 * The check for t_intr is to catch an interrupt thread
516 	 * that has not yet unpinned the thread underneath.
517 	 */
518 	if (lwp == NULL || t->t_intr) {
519 		cv_wait(cvp, mp);
520 		return (rval);
521 	}
522 
523 	cancel_pending = schedctl_cancel_pending();
524 	lwp->lwp_asleep = 1;
525 	lwp->lwp_sysabort = 0;
526 	thread_lock(t);
527 	t->t_kpri_req = 0;	/* don't need kernel priority */
528 	cv_block_sig(t, (condvar_impl_t *)cvp);
529 	/* I can be swapped now */
530 	curthread->t_schedflag &= ~TS_DONT_SWAP;
531 	thread_unlock_nopreempt(t);
532 	mutex_exit(mp);
533 	if (ISSIG(t, JUSTLOOKING) || MUSTRETURN(p, t) || cancel_pending)
534 		setrun(t);
535 	/* ASSERT(no locks are held) */
536 	swtch();
537 	signalled = (t->t_schedflag & TS_SIGNALLED);
538 	t->t_flag &= ~T_WAKEABLE;
539 	/* TS_DONT_SWAP set by disp() */
540 	ASSERT(curthread->t_schedflag & TS_DONT_SWAP);
541 	mutex_enter(mp);
542 	if (ISSIG_PENDING(t, lwp, p)) {
543 		mutex_exit(mp);
544 		if (issig(FORREAL))
545 			rval = 0;
546 		mutex_enter(mp);
547 	}
548 	if (lwp->lwp_sysabort || MUSTRETURN(p, t))
549 		rval = 0;
550 	if (rval != 0 && cancel_pending) {
551 		schedctl_cancel_eintr();
552 		rval = 0;
553 	}
554 	lwp->lwp_asleep = 0;
555 	lwp->lwp_sysabort = 0;
556 	if (rval == 0) {
557 		if (sigret != NULL)
558 			*sigret = signalled;	/* just tell the caller */
559 		else if (signalled)
560 			cv_signal(cvp);	/* avoid consuming the cv_signal() */
561 	}
562 	return (rval);
563 }
564 
565 /*
566  * Same as cv_wait_sig but the thread can be swapped out while waiting.
567  * This should only be used when we know we aren't holding any locks.
568  */
569 int
570 cv_wait_sig_swap(kcondvar_t *cvp, kmutex_t *mp)
571 {
572 	return (cv_wait_sig_swap_core(cvp, mp, NULL));
573 }
574 
575 void
576 cv_signal(kcondvar_t *cvp)
577 {
578 	condvar_impl_t *cp = (condvar_impl_t *)cvp;
579 
580 	/* make sure the cv_waiters field looks sane */
581 	ASSERT(cp->cv_waiters <= CV_MAX_WAITERS);
582 	if (cp->cv_waiters > 0) {
583 		sleepq_head_t *sqh = SQHASH(cp);
584 		disp_lock_enter(&sqh->sq_lock);
585 		ASSERT(CPU_ON_INTR(CPU) == 0);
586 		if (cp->cv_waiters & CV_WAITERS_MASK) {
587 			kthread_t *t;
588 			cp->cv_waiters--;
589 			t = sleepq_wakeone_chan(&sqh->sq_queue, cp);
590 			/*
591 			 * If cv_waiters is non-zero (and less than
592 			 * CV_MAX_WAITERS) there should be a thread
593 			 * in the queue.
594 			 */
595 			ASSERT(t != NULL);
596 		} else if (sleepq_wakeone_chan(&sqh->sq_queue, cp) == NULL) {
597 			cp->cv_waiters = 0;
598 		}
599 		disp_lock_exit(&sqh->sq_lock);
600 	}
601 }
602 
603 void
604 cv_broadcast(kcondvar_t *cvp)
605 {
606 	condvar_impl_t *cp = (condvar_impl_t *)cvp;
607 
608 	/* make sure the cv_waiters field looks sane */
609 	ASSERT(cp->cv_waiters <= CV_MAX_WAITERS);
610 	if (cp->cv_waiters > 0) {
611 		sleepq_head_t *sqh = SQHASH(cp);
612 		disp_lock_enter(&sqh->sq_lock);
613 		ASSERT(CPU_ON_INTR(CPU) == 0);
614 		sleepq_wakeall_chan(&sqh->sq_queue, cp);
615 		cp->cv_waiters = 0;
616 		disp_lock_exit(&sqh->sq_lock);
617 	}
618 }
619 
620 /*
621  * Same as cv_wait(), but wakes up (after wakeup_time milliseconds) to check
622  * for requests to stop, like cv_wait_sig() but without dealing with signals.
623  * This is a horrible kludge.  It is evil.  It is vile.  It is swill.
624  * If your code has to call this function then your code is the same.
625  */
626 void
627 cv_wait_stop(kcondvar_t *cvp, kmutex_t *mp, int wakeup_time)
628 {
629 	kthread_t *t = curthread;
630 	klwp_t *lwp = ttolwp(t);
631 	proc_t *p = ttoproc(t);
632 	callout_id_t id;
633 	clock_t tim;
634 
635 	if (panicstr)
636 		return;
637 
638 	/*
639 	 * If there is no lwp, then we don't need to eventually stop it
640 	 * The check for t_intr is to catch an interrupt thread
641 	 * that has not yet unpinned the thread underneath.
642 	 */
643 	if (lwp == NULL || t->t_intr) {
644 		cv_wait(cvp, mp);
645 		return;
646 	}
647 
648 	/*
649 	 * Wakeup in wakeup_time milliseconds, i.e., human time.
650 	 */
651 	tim = ddi_get_lbolt() + MSEC_TO_TICK(wakeup_time);
652 	mutex_enter(&t->t_wait_mutex);
653 	id = realtime_timeout_default((void (*)(void *))cv_wakeup, t,
654 	    tim - ddi_get_lbolt());
655 	thread_lock(t);			/* lock the thread */
656 	cv_block((condvar_impl_t *)cvp);
657 	thread_unlock_nopreempt(t);
658 	mutex_exit(&t->t_wait_mutex);
659 	mutex_exit(mp);
660 	/* ASSERT(no locks are held); */
661 	swtch();
662 	(void) untimeout_default(id, 0);
663 
664 	/*
665 	 * Check for reasons to stop, if lwp_nostop is not true.
666 	 * See issig_forreal() for explanations of the various stops.
667 	 */
668 	mutex_enter(&p->p_lock);
669 	while (lwp->lwp_nostop == 0 && !(p->p_flag & SEXITLWPS)) {
670 		/*
671 		 * Hold the lwp here for watchpoint manipulation.
672 		 */
673 		if (t->t_proc_flag & TP_PAUSE) {
674 			stop(PR_SUSPENDED, SUSPEND_PAUSE);
675 			continue;
676 		}
677 		/*
678 		 * System checkpoint.
679 		 */
680 		if (t->t_proc_flag & TP_CHKPT) {
681 			stop(PR_CHECKPOINT, 0);
682 			continue;
683 		}
684 		/*
685 		 * Honor fork1(), watchpoint activity (remapping a page),
686 		 * and lwp_suspend() requests.
687 		 */
688 		if ((p->p_flag & (SHOLDFORK1|SHOLDWATCH)) ||
689 		    (t->t_proc_flag & TP_HOLDLWP)) {
690 			stop(PR_SUSPENDED, SUSPEND_NORMAL);
691 			continue;
692 		}
693 		/*
694 		 * Honor /proc requested stop.
695 		 */
696 		if (t->t_proc_flag & TP_PRSTOP) {
697 			stop(PR_REQUESTED, 0);
698 		}
699 		/*
700 		 * If some lwp in the process has already stopped
701 		 * showing PR_JOBCONTROL, stop in sympathy with it.
702 		 */
703 		if (p->p_stopsig && t != p->p_agenttp) {
704 			stop(PR_JOBCONTROL, p->p_stopsig);
705 			continue;
706 		}
707 		break;
708 	}
709 	mutex_exit(&p->p_lock);
710 	mutex_enter(mp);
711 }
712 
713 /*
714  * Like cv_timedwait_sig(), but takes an absolute hires future time
715  * rather than a future time in clock ticks.  Will not return showing
716  * that a timeout occurred until the future time is passed.
717  * If 'when' is a NULL pointer, no timeout will occur.
718  * Returns:
719  * 	Function result in order of precedence:
720  *		 0 if a signal was received
721  *		-1 if timeout occured
722  *	        >0 if awakened via cv_signal() or cv_broadcast()
723  *		   or by a spurious wakeup.
724  *		   (might return time remaining)
725  * As a special test, if someone abruptly resets the system time
726  * (but not through adjtime(2); drifting of the clock is allowed and
727  * expected [see timespectohz_adj()]), then we force a return of -1
728  * so the caller can return a premature timeout to the calling process
729  * so it can reevaluate the situation in light of the new system time.
730  * (The system clock has been reset if timecheck != timechanged.)
731  */
732 int
733 cv_waituntil_sig(kcondvar_t *cvp, kmutex_t *mp,
734 	timestruc_t *when, int timecheck)
735 {
736 	timestruc_t now;
737 	timestruc_t delta;
738 	hrtime_t interval;
739 	int rval;
740 
741 	if (when == NULL)
742 		return (cv_wait_sig_swap(cvp, mp));
743 
744 	gethrestime(&now);
745 	delta = *when;
746 	timespecsub(&delta, &now);
747 	if (delta.tv_sec < 0 || (delta.tv_sec == 0 && delta.tv_nsec == 0)) {
748 		/*
749 		 * We have already reached the absolute future time.
750 		 * Call cv_timedwait_sig() just to check for signals.
751 		 * We will return immediately with either 0 or -1.
752 		 */
753 		rval = cv_timedwait_sig_hires(cvp, mp, 0, 1, 0);
754 	} else {
755 		if (timecheck == timechanged) {
756 			/*
757 			 * Make sure that the interval is atleast one tick.
758 			 * This is to prevent a user from flooding the system
759 			 * with very small, high resolution timers.
760 			 */
761 			interval = ts2hrt(&delta);
762 			if (interval < nsec_per_tick)
763 				interval = nsec_per_tick;
764 			rval = cv_timedwait_sig_hires(cvp, mp, interval, 1,
765 			    CALLOUT_FLAG_HRESTIME);
766 		} else {
767 			/*
768 			 * Someone reset the system time;
769 			 * just force an immediate timeout.
770 			 */
771 			rval = -1;
772 		}
773 		if (rval == -1 && timecheck == timechanged) {
774 			/*
775 			 * Even though cv_timedwait_sig() returned showing a
776 			 * timeout, the future time may not have passed yet.
777 			 * If not, change rval to indicate a normal wakeup.
778 			 */
779 			gethrestime(&now);
780 			delta = *when;
781 			timespecsub(&delta, &now);
782 			if (delta.tv_sec > 0 || (delta.tv_sec == 0 &&
783 			    delta.tv_nsec > 0))
784 				rval = 1;
785 		}
786 	}
787 	return (rval);
788 }
789