Lines Matching +full:wake +full:- +full:up
40 * To wake threads waiting for write access to lock 'lp' in foo_exit():
44 * [ handoff (change owner to one of the threads we're about to wake).
45 * [ If we're going to wake the last waiter, clear the waiters bit.
77 * situations called priority inversions in which a high-priority thread
78 * needs a lock held by a low-priority thread, which cannot run because
79 * of medium-priority threads. Without PI, the medium-priority threads
80 * can starve out the high-priority thread indefinitely. With PI, the
81 * low-priority thread becomes high-priority until it releases whatever
82 * synchronization object the real high-priority thread is waiting for.
95 * does, it's almost surely a second-order effect -- the real problem
106 * freelist. As threads wake up, the process is reversed.
140 ((uintptr_t)(sobj) - (uintptr_t)upimutextab < sizeof (upimutextab))
151 * must wake them, which forces a lock ordering on us: the turnstile lock
162 #define TURNSTILE_HASH_MASK (TURNSTILE_HASH_SIZE - 1)
185 ASSERT(DISP_LOCK_HELD(&TURNSTILE_CHAIN(ts->ts_sobj).tc_lock)); in turnstile_pi_inherit()
187 if (epri <= inheritor->t_pri) in turnstile_pi_inherit()
190 if (ts->ts_inheritor == NULL) { in turnstile_pi_inherit()
191 ts->ts_inheritor = inheritor; in turnstile_pi_inherit()
192 ts->ts_epri = epri; in turnstile_pi_inherit()
193 disp_lock_enter_high(&inheritor->t_pi_lock); in turnstile_pi_inherit()
194 ts->ts_prioinv = inheritor->t_prioinv; in turnstile_pi_inherit()
195 inheritor->t_prioinv = ts; in turnstile_pi_inherit()
196 disp_lock_exit_high(&inheritor->t_pi_lock); in turnstile_pi_inherit()
202 ASSERT(ts->ts_inheritor == inheritor); in turnstile_pi_inherit()
203 if (ts->ts_epri < epri) in turnstile_pi_inherit()
204 ts->ts_epri = epri; in turnstile_pi_inherit()
212 * If turnstile is non-NULL, remove it from inheritor's t_prioinv list.
221 disp_lock_enter_high(&inheritor->t_pi_lock); in turnstile_pi_tsdelete()
222 tspp = &inheritor->t_prioinv; in turnstile_pi_tsdelete()
225 *tspp = tsp->ts_prioinv; in turnstile_pi_tsdelete()
227 new_epri = MAX(new_epri, tsp->ts_epri); in turnstile_pi_tsdelete()
228 tspp = &tsp->ts_prioinv; in turnstile_pi_tsdelete()
230 disp_lock_exit_high(&inheritor->t_pi_lock); in turnstile_pi_tsdelete()
242 kthread_t *inheritor = ts->ts_inheritor; in turnstile_pi_waive()
251 ts->ts_inheritor = NULL; in turnstile_pi_waive()
287 disp_lock_enter(&tc->tc_lock); in turnstile_lookup()
289 for (ts = tc->tc_first; ts != NULL; ts = ts->ts_next) in turnstile_lookup()
290 if (ts->ts_sobj == sobj) in turnstile_lookup()
311 * obvious solution -- do a lock_try() for the owner lock -- isn't quite
386 * Turnstiles implement both kernel and user-level priority inheritance.
387 * To avoid missed wakeups in the user-level case, lwp_upimutex_lock() calls
400 * *either* that the lock is now held, or that this is a spurious wake-up, or
402 * It is up to lwp_upimutex_lock() to sort this all out.
417 ASSERT(DISP_LOCK_HELD(&tc->tc_lock)); in turnstile_block()
428 ts = t->t_ts; in turnstile_block()
429 ts->ts_sobj = sobj; in turnstile_block()
430 ts->ts_next = tc->tc_first; in turnstile_block()
431 tc->tc_first = ts; in turnstile_block()
432 ASSERT(ts->ts_waiters == 0); in turnstile_block()
439 turnstile_t *myts = t->t_ts; in turnstile_block()
440 myts->ts_free = ts->ts_free; in turnstile_block()
441 ts->ts_free = myts; in turnstile_block()
442 t->t_ts = ts; in turnstile_block()
443 ASSERT(ts->ts_sobj == sobj); in turnstile_block()
444 ASSERT(ts->ts_waiters > 0); in turnstile_block()
450 ASSERT(t != CPU->cpu_idle_thread); in turnstile_block()
452 ASSERT(t->t_wchan0 == NULL && t->t_wchan == NULL); in turnstile_block()
453 ASSERT(t->t_state == TS_ONPROC); in turnstile_block()
456 curthread->t_flag |= T_WAKEABLE; in turnstile_block()
459 THREAD_SLEEP(t, &tc->tc_lock); in turnstile_block()
460 t->t_wchan = sobj; in turnstile_block()
461 t->t_sobj_ops = sobj_ops; in turnstile_block()
465 lwp->lwp_ru.nvcsw++; in turnstile_block()
468 lwp->lwp_asleep = 1; in turnstile_block()
469 lwp->lwp_sysabort = 0; in turnstile_block()
471 * make wchan0 non-zero to conform to the rule that in turnstile_block()
472 * threads blocking for user-level objects have a in turnstile_block()
473 * non-zero wchan0: this prevents spurious wake-ups in turnstile_block()
476 t->t_wchan0 = (caddr_t)1; in turnstile_block()
479 ts->ts_waiters++; in turnstile_block()
480 sleepq_insert(&ts->ts_sleepq[qnum], t); in turnstile_block()
490 while (t->t_sobj_ops != NULL && in turnstile_block()
491 (owner = SOBJ_OWNER(t->t_sobj_ops, t->t_wchan)) != NULL) { in turnstile_block()
504 if (t->t_wchan == (void *)mp) in turnstile_block()
518 curthread->t_flag &= ~T_WAKEABLE; in turnstile_block()
519 if (lwptp->lwpt_id != 0) in turnstile_block()
522 lwp->lwp_asleep = 0; in turnstile_block()
523 lwp->lwp_sysabort = 0; in turnstile_block()
526 if (!turnstile_interlock(t->t_lockp, &owner->t_lockp)) { in turnstile_block()
538 * we may already have been woken up; if so, our in turnstile_block()
540 * and the call to swtch() will be a no-op. Phew. in turnstile_block()
560 * from non-SOBJ_USER_PI ops to SOBJ_USER_PI ops, then we know in turnstile_block()
568 if (SOBJ_TYPE(t->t_sobj_ops) != SOBJ_USER_PI && in turnstile_block()
569 owner->t_sobj_ops != NULL && in turnstile_block()
570 SOBJ_TYPE(owner->t_sobj_ops) == SOBJ_USER_PI) { in turnstile_block()
571 kmutex_t *upi_lock = (kmutex_t *)t->t_wchan; in turnstile_block()
574 ASSERT(SOBJ_TYPE(t->t_sobj_ops) == SOBJ_MUTEX); in turnstile_block()
576 if (t->t_lockp != owner->t_lockp) in turnstile_block()
594 turnstile_pi_inherit(t->t_ts, owner, DISP_PRIO(t)); in turnstile_block()
595 if (t->t_lockp != owner->t_lockp) in turnstile_block()
609 ushort_t s = curthread->t_oldspl; in turnstile_block()
612 clock_t tim = -1; in turnstile_block()
615 if (lwptp->lwpt_id != 0) { in turnstile_block()
618 * lwptp->lwpt_imm_timeout has been set with cas, in turnstile_block()
623 atomic_cas_uint(&lwptp->lwpt_imm_timeout, 0, 0); in turnstile_block()
632 curthread->t_flag &= ~T_WAKEABLE; in turnstile_block()
636 if (ISSIG(curthread, FORREAL) || lwp->lwp_sysabort || in turnstile_block()
639 else if (imm_timeout || (timedwait && tim == -1)) in turnstile_block()
641 lwp->lwp_sysabort = 0; in turnstile_block()
642 lwp->lwp_asleep = 0; in turnstile_block()
660 turnstile_t *ts = t->t_ts; in turnstile_dequeue()
661 turnstile_chain_t *tc = &TURNSTILE_CHAIN(ts->ts_sobj); in turnstile_dequeue()
664 ASSERT(DISP_LOCK_HELD(&tc->tc_lock)); in turnstile_dequeue()
665 ASSERT(t->t_lockp == &tc->tc_lock); in turnstile_dequeue()
667 if ((tsfree = ts->ts_free) != NULL) { in turnstile_dequeue()
668 ASSERT(ts->ts_waiters > 1); in turnstile_dequeue()
669 ASSERT(tsfree->ts_waiters == 0); in turnstile_dequeue()
670 t->t_ts = tsfree; in turnstile_dequeue()
671 ts->ts_free = tsfree->ts_free; in turnstile_dequeue()
672 tsfree->ts_free = NULL; in turnstile_dequeue()
677 * from the hash chain and leave the now-inactive in turnstile_dequeue()
683 ASSERT(ts->ts_waiters == 1); in turnstile_dequeue()
684 if (ts->ts_inheritor != NULL) { in turnstile_dequeue()
685 (void) turnstile_pi_tsdelete(ts, ts->ts_inheritor); in turnstile_dequeue()
698 ts->ts_inheritor = NULL; in turnstile_dequeue()
700 tspp = &tc->tc_first; in turnstile_dequeue()
702 tspp = &(*tspp)->ts_next; in turnstile_dequeue()
703 *tspp = ts->ts_next; in turnstile_dequeue()
704 ASSERT(t->t_ts == ts); in turnstile_dequeue()
706 ts->ts_waiters--; in turnstile_dequeue()
708 t->t_sobj_ops = NULL; in turnstile_dequeue()
709 t->t_wchan = NULL; in turnstile_dequeue()
710 t->t_wchan0 = NULL; in turnstile_dequeue()
711 ASSERT(t->t_state == TS_SLEEP); in turnstile_dequeue()
715 * Wake threads that are blocked in a turnstile.
720 turnstile_chain_t *tc = &TURNSTILE_CHAIN(ts->ts_sobj); in turnstile_wakeup()
721 sleepq_t *sqp = &ts->ts_sleepq[qnum]; in turnstile_wakeup()
723 ASSERT(DISP_LOCK_HELD(&tc->tc_lock)); in turnstile_wakeup()
728 if (ts->ts_inheritor != NULL) { in turnstile_wakeup()
731 while (nthreads-- > 0) { in turnstile_wakeup()
732 kthread_t *t = sqp->sq_first; in turnstile_wakeup()
733 ASSERT(t->t_ts == ts); in turnstile_wakeup()
734 ASSERT(ts->ts_waiters > 1 || ts->ts_inheritor == NULL); in turnstile_wakeup()
743 kthread_t *wp = ts->ts_sleepq[TS_WRITER_Q].sq_first; in turnstile_wakeup()
744 kthread_t *rp = ts->ts_sleepq[TS_READER_Q].sq_first; in turnstile_wakeup()
754 disp_lock_exit(&tc->tc_lock); in turnstile_wakeup()
763 sleepq_t *sqp = t->t_sleepq; in turnstile_change_pri()
774 * This is vital to the correctness of direct-handoff logic in some
784 * Wake up a thread blocked in a turnstile. Used to enable interruptibility