xref: /titanic_41/usr/src/uts/common/os/condvar.c (revision e0724c534a46ca4754330bc022bf1e2a68f5bb93)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 /*
28  * Copyright (c) 2012 by Delphix. All rights reserved.
29  */
30 
31 #include <sys/thread.h>
32 #include <sys/proc.h>
33 #include <sys/debug.h>
34 #include <sys/cmn_err.h>
35 #include <sys/systm.h>
36 #include <sys/sobject.h>
37 #include <sys/sleepq.h>
38 #include <sys/cpuvar.h>
39 #include <sys/condvar.h>
40 #include <sys/condvar_impl.h>
41 #include <sys/schedctl.h>
42 #include <sys/procfs.h>
43 #include <sys/sdt.h>
44 #include <sys/callo.h>
45 
46 clock_t cv_timedwait_hires(kcondvar_t *, kmutex_t *, hrtime_t, hrtime_t, int);
47 
48 /*
49  * CV_MAX_WAITERS is the maximum number of waiters we track; once
50  * the number becomes higher than that, we look at the sleepq to
51  * see whether there are *really* any waiters.
52  */
53 #define	CV_MAX_WAITERS		1024		/* must be power of 2 */
54 #define	CV_WAITERS_MASK		(CV_MAX_WAITERS - 1)
55 
56 /*
57  * Threads don't "own" condition variables.
58  */
59 /* ARGSUSED */
60 static kthread_t *
61 cv_owner(void *cvp)
62 {
63 	return (NULL);
64 }
65 
66 /*
67  * Unsleep a thread that's blocked on a condition variable.
68  */
69 static void
70 cv_unsleep(kthread_t *t)
71 {
72 	condvar_impl_t *cvp = (condvar_impl_t *)t->t_wchan;
73 	sleepq_head_t *sqh = SQHASH(cvp);
74 
75 	ASSERT(THREAD_LOCK_HELD(t));
76 
77 	if (cvp == NULL)
78 		panic("cv_unsleep: thread %p not on sleepq %p",
79 		    (void *)t, (void *)sqh);
80 	DTRACE_SCHED1(wakeup, kthread_t *, t);
81 	sleepq_unsleep(t);
82 	if (cvp->cv_waiters != CV_MAX_WAITERS)
83 		cvp->cv_waiters--;
84 	disp_lock_exit_high(&sqh->sq_lock);
85 	CL_SETRUN(t);
86 }
87 
88 /*
89  * Change the priority of a thread that's blocked on a condition variable.
90  */
91 static void
92 cv_change_pri(kthread_t *t, pri_t pri, pri_t *t_prip)
93 {
94 	condvar_impl_t *cvp = (condvar_impl_t *)t->t_wchan;
95 	sleepq_t *sqp = t->t_sleepq;
96 
97 	ASSERT(THREAD_LOCK_HELD(t));
98 	ASSERT(&SQHASH(cvp)->sq_queue == sqp);
99 
100 	if (cvp == NULL)
101 		panic("cv_change_pri: %p not on sleep queue", (void *)t);
102 	sleepq_dequeue(t);
103 	*t_prip = pri;
104 	sleepq_insert(sqp, t);
105 }
106 
107 /*
108  * The sobj_ops vector exports a set of functions needed when a thread
109  * is asleep on a synchronization object of this type.
110  */
111 static sobj_ops_t cv_sobj_ops = {
112 	SOBJ_CV, cv_owner, cv_unsleep, cv_change_pri
113 };
114 
115 /* ARGSUSED */
116 void
117 cv_init(kcondvar_t *cvp, char *name, kcv_type_t type, void *arg)
118 {
119 	((condvar_impl_t *)cvp)->cv_waiters = 0;
120 }
121 
122 /*
123  * cv_destroy is not currently needed, but is part of the DDI.
124  * This is in case cv_init ever needs to allocate something for a cv.
125  */
126 /* ARGSUSED */
127 void
128 cv_destroy(kcondvar_t *cvp)
129 {
130 	ASSERT((((condvar_impl_t *)cvp)->cv_waiters & CV_WAITERS_MASK) == 0);
131 }
132 
133 /*
134  * The cv_block() function blocks a thread on a condition variable
135  * by putting it in a hashed sleep queue associated with the
136  * synchronization object.
137  *
138  * Threads are taken off the hashed sleep queues via calls to
139  * cv_signal(), cv_broadcast(), or cv_unsleep().
140  */
141 static void
142 cv_block(condvar_impl_t *cvp)
143 {
144 	kthread_t *t = curthread;
145 	klwp_t *lwp = ttolwp(t);
146 	sleepq_head_t *sqh;
147 
148 	ASSERT(THREAD_LOCK_HELD(t));
149 	ASSERT(t != CPU->cpu_idle_thread);
150 	ASSERT(CPU_ON_INTR(CPU) == 0);
151 	ASSERT(t->t_wchan0 == NULL && t->t_wchan == NULL);
152 	ASSERT(t->t_state == TS_ONPROC);
153 
154 	t->t_schedflag &= ~TS_SIGNALLED;
155 	CL_SLEEP(t);			/* assign kernel priority */
156 	t->t_wchan = (caddr_t)cvp;
157 	t->t_sobj_ops = &cv_sobj_ops;
158 	DTRACE_SCHED(sleep);
159 
160 	/*
161 	 * The check for t_intr is to avoid doing the
162 	 * account for an interrupt thread on the still-pinned
163 	 * lwp's statistics.
164 	 */
165 	if (lwp != NULL && t->t_intr == NULL) {
166 		lwp->lwp_ru.nvcsw++;
167 		(void) new_mstate(t, LMS_SLEEP);
168 	}
169 
170 	sqh = SQHASH(cvp);
171 	disp_lock_enter_high(&sqh->sq_lock);
172 	if (cvp->cv_waiters < CV_MAX_WAITERS)
173 		cvp->cv_waiters++;
174 	ASSERT(cvp->cv_waiters <= CV_MAX_WAITERS);
175 	THREAD_SLEEP(t, &sqh->sq_lock);
176 	sleepq_insert(&sqh->sq_queue, t);
177 	/*
178 	 * THREAD_SLEEP() moves curthread->t_lockp to point to the
179 	 * lock sqh->sq_lock. This lock is later released by the caller
180 	 * when it calls thread_unlock() on curthread.
181 	 */
182 }
183 
184 #define	cv_block_sig(t, cvp)	\
185 	{ (t)->t_flag |= T_WAKEABLE; cv_block(cvp); }
186 
187 /*
188  * Block on the indicated condition variable and release the
189  * associated kmutex while blocked.
190  */
191 void
192 cv_wait(kcondvar_t *cvp, kmutex_t *mp)
193 {
194 	if (panicstr)
195 		return;
196 	ASSERT(!quiesce_active);
197 
198 	ASSERT(curthread->t_schedflag & TS_DONT_SWAP);
199 	thread_lock(curthread);			/* lock the thread */
200 	cv_block((condvar_impl_t *)cvp);
201 	thread_unlock_nopreempt(curthread);	/* unlock the waiters field */
202 	mutex_exit(mp);
203 	swtch();
204 	mutex_enter(mp);
205 }
206 
207 static void
208 cv_wakeup(void *arg)
209 {
210 	kthread_t *t = arg;
211 
212 	/*
213 	 * This mutex is acquired and released in order to make sure that
214 	 * the wakeup does not happen before the block itself happens.
215 	 */
216 	mutex_enter(&t->t_wait_mutex);
217 	mutex_exit(&t->t_wait_mutex);
218 	setrun(t);
219 }
220 
221 /*
222  * Same as cv_wait except the thread will unblock at 'tim'
223  * (an absolute time) if it hasn't already unblocked.
224  *
225  * Returns the amount of time left from the original 'tim' value
226  * when it was unblocked.
227  */
228 clock_t
229 cv_timedwait(kcondvar_t *cvp, kmutex_t *mp, clock_t tim)
230 {
231 	hrtime_t hrtim;
232 	clock_t now = ddi_get_lbolt();
233 
234 	if (tim <= now)
235 		return (-1);
236 
237 	hrtim = TICK_TO_NSEC(tim - now);
238 	return (cv_timedwait_hires(cvp, mp, hrtim, nsec_per_tick, 0));
239 }
240 
241 /*
242  * Same as cv_timedwait() except that the third argument is a relative
243  * timeout value, as opposed to an absolute one. There is also a fourth
244  * argument that specifies how accurately the timeout must be implemented.
245  */
246 clock_t
247 cv_reltimedwait(kcondvar_t *cvp, kmutex_t *mp, clock_t delta, time_res_t res)
248 {
249 	hrtime_t exp;
250 
251 	ASSERT(TIME_RES_VALID(res));
252 
253 	if (delta <= 0)
254 		return (-1);
255 
256 	if ((exp = TICK_TO_NSEC(delta)) < 0)
257 		exp = CY_INFINITY;
258 
259 	return (cv_timedwait_hires(cvp, mp, exp, time_res[res], 0));
260 }
261 
262 clock_t
263 cv_timedwait_hires(kcondvar_t *cvp, kmutex_t *mp, hrtime_t tim,
264     hrtime_t res, int flag)
265 {
266 	kthread_t *t = curthread;
267 	callout_id_t id;
268 	clock_t timeleft;
269 	hrtime_t limit;
270 	int signalled;
271 
272 	if (panicstr)
273 		return (-1);
274 	ASSERT(!quiesce_active);
275 
276 	limit = (flag & CALLOUT_FLAG_ABSOLUTE) ? gethrtime() : 0;
277 	if (tim <= limit)
278 		return (-1);
279 	mutex_enter(&t->t_wait_mutex);
280 	id = timeout_generic(CALLOUT_REALTIME, (void (*)(void *))cv_wakeup, t,
281 	    tim, res, flag);
282 	thread_lock(t);		/* lock the thread */
283 	cv_block((condvar_impl_t *)cvp);
284 	thread_unlock_nopreempt(t);
285 	mutex_exit(&t->t_wait_mutex);
286 	mutex_exit(mp);
287 	swtch();
288 	signalled = (t->t_schedflag & TS_SIGNALLED);
289 	/*
290 	 * Get the time left. untimeout() returns -1 if the timeout has
291 	 * occured or the time remaining.  If the time remaining is zero,
292 	 * the timeout has occured between when we were awoken and
293 	 * we called untimeout.  We will treat this as if the timeout
294 	 * has occured and set timeleft to -1.
295 	 */
296 	timeleft = untimeout_default(id, 0);
297 	mutex_enter(mp);
298 	if (timeleft <= 0) {
299 		timeleft = -1;
300 		if (signalled)	/* avoid consuming the cv_signal() */
301 			cv_signal(cvp);
302 	}
303 	return (timeleft);
304 }
305 
306 int
307 cv_wait_sig(kcondvar_t *cvp, kmutex_t *mp)
308 {
309 	kthread_t *t = curthread;
310 	proc_t *p = ttoproc(t);
311 	klwp_t *lwp = ttolwp(t);
312 	int cancel_pending;
313 	int rval = 1;
314 	int signalled = 0;
315 
316 	if (panicstr)
317 		return (rval);
318 	ASSERT(!quiesce_active);
319 
320 	/*
321 	 * Threads in system processes don't process signals.  This is
322 	 * true both for standard threads of system processes and for
323 	 * interrupt threads which have borrowed their pinned thread's LWP.
324 	 */
325 	if (lwp == NULL || (p->p_flag & SSYS)) {
326 		cv_wait(cvp, mp);
327 		return (rval);
328 	}
329 	ASSERT(t->t_intr == NULL);
330 
331 	ASSERT(curthread->t_schedflag & TS_DONT_SWAP);
332 	cancel_pending = schedctl_cancel_pending();
333 	lwp->lwp_asleep = 1;
334 	lwp->lwp_sysabort = 0;
335 	thread_lock(t);
336 	cv_block_sig(t, (condvar_impl_t *)cvp);
337 	thread_unlock_nopreempt(t);
338 	mutex_exit(mp);
339 	if (ISSIG(t, JUSTLOOKING) || MUSTRETURN(p, t) || cancel_pending)
340 		setrun(t);
341 	/* ASSERT(no locks are held) */
342 	swtch();
343 	signalled = (t->t_schedflag & TS_SIGNALLED);
344 	t->t_flag &= ~T_WAKEABLE;
345 	mutex_enter(mp);
346 	if (ISSIG_PENDING(t, lwp, p)) {
347 		mutex_exit(mp);
348 		if (issig(FORREAL))
349 			rval = 0;
350 		mutex_enter(mp);
351 	}
352 	if (lwp->lwp_sysabort || MUSTRETURN(p, t))
353 		rval = 0;
354 	if (rval != 0 && cancel_pending) {
355 		schedctl_cancel_eintr();
356 		rval = 0;
357 	}
358 	lwp->lwp_asleep = 0;
359 	lwp->lwp_sysabort = 0;
360 	if (rval == 0 && signalled)	/* avoid consuming the cv_signal() */
361 		cv_signal(cvp);
362 	return (rval);
363 }
364 
365 static clock_t
366 cv_timedwait_sig_hires(kcondvar_t *cvp, kmutex_t *mp, hrtime_t tim,
367     hrtime_t res, int flag)
368 {
369 	kthread_t *t = curthread;
370 	proc_t *p = ttoproc(t);
371 	klwp_t *lwp = ttolwp(t);
372 	int cancel_pending = 0;
373 	callout_id_t id;
374 	clock_t rval = 1;
375 	hrtime_t limit;
376 	int signalled = 0;
377 
378 	if (panicstr)
379 		return (rval);
380 	ASSERT(!quiesce_active);
381 
382 	/*
383 	 * Threads in system processes don't process signals.  This is
384 	 * true both for standard threads of system processes and for
385 	 * interrupt threads which have borrowed their pinned thread's LWP.
386 	 */
387 	if (lwp == NULL || (p->p_flag & SSYS))
388 		return (cv_timedwait_hires(cvp, mp, tim, res, flag));
389 	ASSERT(t->t_intr == NULL);
390 
391 	/*
392 	 * If tim is less than or equal to current hrtime, then the timeout
393 	 * has already occured.  So just check to see if there is a signal
394 	 * pending.  If so return 0 indicating that there is a signal pending.
395 	 * Else return -1 indicating that the timeout occured. No need to
396 	 * wait on anything.
397 	 */
398 	limit = (flag & CALLOUT_FLAG_ABSOLUTE) ? gethrtime() : 0;
399 	if (tim <= limit) {
400 		lwp->lwp_asleep = 1;
401 		lwp->lwp_sysabort = 0;
402 		rval = -1;
403 		goto out;
404 	}
405 
406 	/*
407 	 * Set the timeout and wait.
408 	 */
409 	cancel_pending = schedctl_cancel_pending();
410 	mutex_enter(&t->t_wait_mutex);
411 	id = timeout_generic(CALLOUT_REALTIME, (void (*)(void *))cv_wakeup, t,
412 	    tim, res, flag);
413 	lwp->lwp_asleep = 1;
414 	lwp->lwp_sysabort = 0;
415 	thread_lock(t);
416 	cv_block_sig(t, (condvar_impl_t *)cvp);
417 	thread_unlock_nopreempt(t);
418 	mutex_exit(&t->t_wait_mutex);
419 	mutex_exit(mp);
420 	if (ISSIG(t, JUSTLOOKING) || MUSTRETURN(p, t) || cancel_pending)
421 		setrun(t);
422 	/* ASSERT(no locks are held) */
423 	swtch();
424 	signalled = (t->t_schedflag & TS_SIGNALLED);
425 	t->t_flag &= ~T_WAKEABLE;
426 
427 	/*
428 	 * Untimeout the thread.  untimeout() returns -1 if the timeout has
429 	 * occured or the time remaining.  If the time remaining is zero,
430 	 * the timeout has occured between when we were awoken and
431 	 * we called untimeout.  We will treat this as if the timeout
432 	 * has occured and set rval to -1.
433 	 */
434 	rval = untimeout_default(id, 0);
435 	mutex_enter(mp);
436 	if (rval <= 0)
437 		rval = -1;
438 
439 	/*
440 	 * Check to see if a signal is pending.  If so, regardless of whether
441 	 * or not we were awoken due to the signal, the signal is now pending
442 	 * and a return of 0 has the highest priority.
443 	 */
444 out:
445 	if (ISSIG_PENDING(t, lwp, p)) {
446 		mutex_exit(mp);
447 		if (issig(FORREAL))
448 			rval = 0;
449 		mutex_enter(mp);
450 	}
451 	if (lwp->lwp_sysabort || MUSTRETURN(p, t))
452 		rval = 0;
453 	if (rval != 0 && cancel_pending) {
454 		schedctl_cancel_eintr();
455 		rval = 0;
456 	}
457 	lwp->lwp_asleep = 0;
458 	lwp->lwp_sysabort = 0;
459 	if (rval <= 0 && signalled)	/* avoid consuming the cv_signal() */
460 		cv_signal(cvp);
461 	return (rval);
462 }
463 
464 /*
465  * Returns:
466  * 	Function result in order of precedence:
467  *		 0 if a signal was received
468  *		-1 if timeout occured
469  *		>0 if awakened via cv_signal() or cv_broadcast().
470  *		   (returns time remaining)
471  *
472  * cv_timedwait_sig() is now part of the DDI.
473  *
474  * This function is now just a wrapper for cv_timedwait_sig_hires().
475  */
476 clock_t
477 cv_timedwait_sig(kcondvar_t *cvp, kmutex_t *mp, clock_t tim)
478 {
479 	hrtime_t hrtim;
480 
481 	hrtim = TICK_TO_NSEC(tim - ddi_get_lbolt());
482 	return (cv_timedwait_sig_hires(cvp, mp, hrtim, nsec_per_tick, 0));
483 }
484 
485 /*
486  * Wait until the specified time.
487  * If tim == -1, waits without timeout using cv_wait_sig_swap().
488  */
489 int
490 cv_timedwait_sig_hrtime(kcondvar_t *cvp, kmutex_t *mp, hrtime_t tim)
491 {
492 	if (tim == -1) {
493 		return (cv_wait_sig_swap(cvp, mp));
494 	} else {
495 		return (cv_timedwait_sig_hires(cvp, mp, tim, 1,
496 		    CALLOUT_FLAG_ABSOLUTE | CALLOUT_FLAG_ROUNDUP));
497 	}
498 }
499 
500 /*
501  * Same as cv_timedwait_sig() except that the third argument is a relative
502  * timeout value, as opposed to an absolute one. There is also a fourth
503  * argument that specifies how accurately the timeout must be implemented.
504  */
505 clock_t
506 cv_reltimedwait_sig(kcondvar_t *cvp, kmutex_t *mp, clock_t delta,
507     time_res_t res)
508 {
509 	hrtime_t exp = 0;
510 
511 	ASSERT(TIME_RES_VALID(res));
512 
513 	if (delta > 0) {
514 		if ((exp = TICK_TO_NSEC(delta)) < 0)
515 			exp = CY_INFINITY;
516 	}
517 
518 	return (cv_timedwait_sig_hires(cvp, mp, exp, time_res[res], 0));
519 }
520 
521 /*
522  * Like cv_wait_sig_swap but allows the caller to indicate (with a
523  * non-NULL sigret) that they will take care of signalling the cv
524  * after wakeup, if necessary.  This is a vile hack that should only
525  * be used when no other option is available; almost all callers
526  * should just use cv_wait_sig_swap (which takes care of the cv_signal
527  * stuff automatically) instead.
528  */
529 int
530 cv_wait_sig_swap_core(kcondvar_t *cvp, kmutex_t *mp, int *sigret)
531 {
532 	kthread_t *t = curthread;
533 	proc_t *p = ttoproc(t);
534 	klwp_t *lwp = ttolwp(t);
535 	int cancel_pending;
536 	int rval = 1;
537 	int signalled = 0;
538 
539 	if (panicstr)
540 		return (rval);
541 
542 	/*
543 	 * Threads in system processes don't process signals.  This is
544 	 * true both for standard threads of system processes and for
545 	 * interrupt threads which have borrowed their pinned thread's LWP.
546 	 */
547 	if (lwp == NULL || (p->p_flag & SSYS)) {
548 		cv_wait(cvp, mp);
549 		return (rval);
550 	}
551 	ASSERT(t->t_intr == NULL);
552 
553 	cancel_pending = schedctl_cancel_pending();
554 	lwp->lwp_asleep = 1;
555 	lwp->lwp_sysabort = 0;
556 	thread_lock(t);
557 	t->t_kpri_req = 0;	/* don't need kernel priority */
558 	cv_block_sig(t, (condvar_impl_t *)cvp);
559 	/* I can be swapped now */
560 	curthread->t_schedflag &= ~TS_DONT_SWAP;
561 	thread_unlock_nopreempt(t);
562 	mutex_exit(mp);
563 	if (ISSIG(t, JUSTLOOKING) || MUSTRETURN(p, t) || cancel_pending)
564 		setrun(t);
565 	/* ASSERT(no locks are held) */
566 	swtch();
567 	signalled = (t->t_schedflag & TS_SIGNALLED);
568 	t->t_flag &= ~T_WAKEABLE;
569 	/* TS_DONT_SWAP set by disp() */
570 	ASSERT(curthread->t_schedflag & TS_DONT_SWAP);
571 	mutex_enter(mp);
572 	if (ISSIG_PENDING(t, lwp, p)) {
573 		mutex_exit(mp);
574 		if (issig(FORREAL))
575 			rval = 0;
576 		mutex_enter(mp);
577 	}
578 	if (lwp->lwp_sysabort || MUSTRETURN(p, t))
579 		rval = 0;
580 	if (rval != 0 && cancel_pending) {
581 		schedctl_cancel_eintr();
582 		rval = 0;
583 	}
584 	lwp->lwp_asleep = 0;
585 	lwp->lwp_sysabort = 0;
586 	if (rval == 0) {
587 		if (sigret != NULL)
588 			*sigret = signalled;	/* just tell the caller */
589 		else if (signalled)
590 			cv_signal(cvp);	/* avoid consuming the cv_signal() */
591 	}
592 	return (rval);
593 }
594 
595 /*
596  * Same as cv_wait_sig but the thread can be swapped out while waiting.
597  * This should only be used when we know we aren't holding any locks.
598  */
599 int
600 cv_wait_sig_swap(kcondvar_t *cvp, kmutex_t *mp)
601 {
602 	return (cv_wait_sig_swap_core(cvp, mp, NULL));
603 }
604 
605 void
606 cv_signal(kcondvar_t *cvp)
607 {
608 	condvar_impl_t *cp = (condvar_impl_t *)cvp;
609 
610 	/* make sure the cv_waiters field looks sane */
611 	ASSERT(cp->cv_waiters <= CV_MAX_WAITERS);
612 	if (cp->cv_waiters > 0) {
613 		sleepq_head_t *sqh = SQHASH(cp);
614 		disp_lock_enter(&sqh->sq_lock);
615 		ASSERT(CPU_ON_INTR(CPU) == 0);
616 		if (cp->cv_waiters & CV_WAITERS_MASK) {
617 			kthread_t *t;
618 			cp->cv_waiters--;
619 			t = sleepq_wakeone_chan(&sqh->sq_queue, cp);
620 			/*
621 			 * If cv_waiters is non-zero (and less than
622 			 * CV_MAX_WAITERS) there should be a thread
623 			 * in the queue.
624 			 */
625 			ASSERT(t != NULL);
626 		} else if (sleepq_wakeone_chan(&sqh->sq_queue, cp) == NULL) {
627 			cp->cv_waiters = 0;
628 		}
629 		disp_lock_exit(&sqh->sq_lock);
630 	}
631 }
632 
633 void
634 cv_broadcast(kcondvar_t *cvp)
635 {
636 	condvar_impl_t *cp = (condvar_impl_t *)cvp;
637 
638 	/* make sure the cv_waiters field looks sane */
639 	ASSERT(cp->cv_waiters <= CV_MAX_WAITERS);
640 	if (cp->cv_waiters > 0) {
641 		sleepq_head_t *sqh = SQHASH(cp);
642 		disp_lock_enter(&sqh->sq_lock);
643 		ASSERT(CPU_ON_INTR(CPU) == 0);
644 		sleepq_wakeall_chan(&sqh->sq_queue, cp);
645 		cp->cv_waiters = 0;
646 		disp_lock_exit(&sqh->sq_lock);
647 	}
648 }
649 
650 /*
651  * Same as cv_wait(), but wakes up (after wakeup_time milliseconds) to check
652  * for requests to stop, like cv_wait_sig() but without dealing with signals.
653  * This is a horrible kludge.  It is evil.  It is vile.  It is swill.
654  * If your code has to call this function then your code is the same.
655  */
656 void
657 cv_wait_stop(kcondvar_t *cvp, kmutex_t *mp, int wakeup_time)
658 {
659 	kthread_t *t = curthread;
660 	klwp_t *lwp = ttolwp(t);
661 	proc_t *p = ttoproc(t);
662 	callout_id_t id;
663 	clock_t tim;
664 
665 	if (panicstr)
666 		return;
667 
668 	/*
669 	 * Threads in system processes don't process signals.  This is
670 	 * true both for standard threads of system processes and for
671 	 * interrupt threads which have borrowed their pinned thread's LWP.
672 	 */
673 	if (lwp == NULL || (p->p_flag & SSYS)) {
674 		cv_wait(cvp, mp);
675 		return;
676 	}
677 	ASSERT(t->t_intr == NULL);
678 
679 	/*
680 	 * Wakeup in wakeup_time milliseconds, i.e., human time.
681 	 */
682 	tim = ddi_get_lbolt() + MSEC_TO_TICK(wakeup_time);
683 	mutex_enter(&t->t_wait_mutex);
684 	id = realtime_timeout_default((void (*)(void *))cv_wakeup, t,
685 	    tim - ddi_get_lbolt());
686 	thread_lock(t);			/* lock the thread */
687 	cv_block((condvar_impl_t *)cvp);
688 	thread_unlock_nopreempt(t);
689 	mutex_exit(&t->t_wait_mutex);
690 	mutex_exit(mp);
691 	/* ASSERT(no locks are held); */
692 	swtch();
693 	(void) untimeout_default(id, 0);
694 
695 	/*
696 	 * Check for reasons to stop, if lwp_nostop is not true.
697 	 * See issig_forreal() for explanations of the various stops.
698 	 */
699 	mutex_enter(&p->p_lock);
700 	while (lwp->lwp_nostop == 0 && !(p->p_flag & SEXITLWPS)) {
701 		/*
702 		 * Hold the lwp here for watchpoint manipulation.
703 		 */
704 		if (t->t_proc_flag & TP_PAUSE) {
705 			stop(PR_SUSPENDED, SUSPEND_PAUSE);
706 			continue;
707 		}
708 		/*
709 		 * System checkpoint.
710 		 */
711 		if (t->t_proc_flag & TP_CHKPT) {
712 			stop(PR_CHECKPOINT, 0);
713 			continue;
714 		}
715 		/*
716 		 * Honor fork1(), watchpoint activity (remapping a page),
717 		 * and lwp_suspend() requests.
718 		 */
719 		if ((p->p_flag & (SHOLDFORK1|SHOLDWATCH)) ||
720 		    (t->t_proc_flag & TP_HOLDLWP)) {
721 			stop(PR_SUSPENDED, SUSPEND_NORMAL);
722 			continue;
723 		}
724 		/*
725 		 * Honor /proc requested stop.
726 		 */
727 		if (t->t_proc_flag & TP_PRSTOP) {
728 			stop(PR_REQUESTED, 0);
729 		}
730 		/*
731 		 * If some lwp in the process has already stopped
732 		 * showing PR_JOBCONTROL, stop in sympathy with it.
733 		 */
734 		if (p->p_stopsig && t != p->p_agenttp) {
735 			stop(PR_JOBCONTROL, p->p_stopsig);
736 			continue;
737 		}
738 		break;
739 	}
740 	mutex_exit(&p->p_lock);
741 	mutex_enter(mp);
742 }
743 
744 /*
745  * Like cv_timedwait_sig(), but takes an absolute hires future time
746  * rather than a future time in clock ticks.  Will not return showing
747  * that a timeout occurred until the future time is passed.
748  * If 'when' is a NULL pointer, no timeout will occur.
749  * Returns:
750  * 	Function result in order of precedence:
751  *		 0 if a signal was received
752  *		-1 if timeout occured
753  *	        >0 if awakened via cv_signal() or cv_broadcast()
754  *		   or by a spurious wakeup.
755  *		   (might return time remaining)
756  * As a special test, if someone abruptly resets the system time
757  * (but not through adjtime(2); drifting of the clock is allowed and
758  * expected [see timespectohz_adj()]), then we force a return of -1
759  * so the caller can return a premature timeout to the calling process
760  * so it can reevaluate the situation in light of the new system time.
761  * (The system clock has been reset if timecheck != timechanged.)
762  *
763  * Generally, cv_timedwait_sig_hrtime() should be used instead of this
764  * routine.  It waits based on hrtime rather than wall-clock time and therefore
765  * does not need to deal with the time changing.
766  */
767 int
768 cv_waituntil_sig(kcondvar_t *cvp, kmutex_t *mp,
769 	timestruc_t *when, int timecheck)
770 {
771 	timestruc_t now;
772 	timestruc_t delta;
773 	hrtime_t interval;
774 	int rval;
775 
776 	if (when == NULL)
777 		return (cv_wait_sig_swap(cvp, mp));
778 
779 	gethrestime(&now);
780 	delta = *when;
781 	timespecsub(&delta, &now);
782 	if (delta.tv_sec < 0 || (delta.tv_sec == 0 && delta.tv_nsec == 0)) {
783 		/*
784 		 * We have already reached the absolute future time.
785 		 * Call cv_timedwait_sig() just to check for signals.
786 		 * We will return immediately with either 0 or -1.
787 		 */
788 		rval = cv_timedwait_sig_hires(cvp, mp, 0, 1, 0);
789 	} else {
790 		if (timecheck == timechanged) {
791 			/*
792 			 * Make sure that the interval is atleast one tick.
793 			 * This is to prevent a user from flooding the system
794 			 * with very small, high resolution timers.
795 			 */
796 			interval = ts2hrt(&delta);
797 			if (interval < nsec_per_tick)
798 				interval = nsec_per_tick;
799 			rval = cv_timedwait_sig_hires(cvp, mp, interval, 1,
800 			    CALLOUT_FLAG_HRESTIME);
801 		} else {
802 			/*
803 			 * Someone reset the system time;
804 			 * just force an immediate timeout.
805 			 */
806 			rval = -1;
807 		}
808 		if (rval == -1 && timecheck == timechanged) {
809 			/*
810 			 * Even though cv_timedwait_sig() returned showing a
811 			 * timeout, the future time may not have passed yet.
812 			 * If not, change rval to indicate a normal wakeup.
813 			 */
814 			gethrestime(&now);
815 			delta = *when;
816 			timespecsub(&delta, &now);
817 			if (delta.tv_sec > 0 || (delta.tv_sec == 0 &&
818 			    delta.tv_nsec > 0))
819 				rval = 1;
820 		}
821 	}
822 	return (rval);
823 }
824