Lines Matching refs:self

216 	ulwp_t *self = curthread;  in _ceil_mylist_del()  local
220 for (mcpp = &self->ul_mxchain; in _ceil_mylist_del()
226 return (mcpp == &self->ul_mxchain); in _ceil_mylist_del()
239 ulwp_t *self = curthread; in _ceil_mylist_add() local
245 mcp->mxchain_next = self->ul_mxchain; in _ceil_mylist_add()
246 self->ul_mxchain = mcp; in _ceil_mylist_add()
254 set_rt_priority(ulwp_t *self, int prio) in set_rt_priority() argument
258 pcparm.pc_cid = self->ul_rtclassid; in set_rt_priority()
261 (void) priocntl(P_LWPID, self->ul_lwpid, PC_SETPARMS, &pcparm); in set_rt_priority()
271 ulwp_t *self = curthread; in _ceil_prio_inherit() local
273 self->ul_epri = prio; in _ceil_prio_inherit()
274 set_rt_priority(self, prio); in _ceil_prio_inherit()
285 ulwp_t *self = curthread; in _ceil_prio_waive() local
286 mxchain_t *mcp = self->ul_mxchain; in _ceil_prio_waive()
290 prio = self->ul_pri; in _ceil_prio_waive()
291 self->ul_epri = 0; in _ceil_prio_waive()
294 self->ul_epri = prio; in _ceil_prio_waive()
296 set_rt_priority(self, prio); in _ceil_prio_waive()
418 ulwp_t *self = curthread; in spin_lock_set() local
420 no_preempt(self); in spin_lock_set()
422 mp->mutex_owner = (uintptr_t)self; in spin_lock_set()
428 INCR32(self->ul_spin_lock_spin); in spin_lock_set()
431 mp->mutex_owner = (uintptr_t)self; in spin_lock_set()
437 if (self->ul_preempt > 1) { in spin_lock_set()
438 INCR32(self->ul_spin_lock_spin2); in spin_lock_set()
441 mp->mutex_owner = (uintptr_t)self; in spin_lock_set()
448 INCR32(self->ul_spin_lock_sleep); in spin_lock_set()
449 (void) ___lwp_mutex_timedlock(mp, NULL, self); in spin_lock_set()
455 ulwp_t *self = curthread; in spin_lock_clear() local
460 INCR32(self->ul_spin_lock_wakeup); in spin_lock_clear()
462 preempt(self); in spin_lock_clear()
471 ulwp_t *self = curthread; in queue_alloc() local
472 uberdata_t *udp = self->ul_uberdata; in queue_alloc()
480 ASSERT(self == udp->ulwp_one); in queue_alloc()
507 ulwp_t *self = curthread; in QVERIFY() local
508 uberdata_t *udp = self->ul_uberdata; in QVERIFY()
518 ASSERT(MUTEX_OWNED(&qp->qh_lock, self)); in QVERIFY()
917 ulwp_t *self = curthread; in dequeue_self() local
924 ASSERT(MUTEX_OWNED(&qp->qh_lock, self)); in dequeue_self()
931 if (ulwp == self) { in dequeue_self()
933 self->ul_cvmutex = NULL; in dequeue_self()
934 self->ul_sleepq = NULL; in dequeue_self()
935 self->ul_wchan = NULL; in dequeue_self()
955 ulwp_t *self = curthread; in unsleep_self() local
962 self->ul_critical++; in unsleep_self()
963 while (self->ul_sleepq != NULL) { in unsleep_self()
964 qp = queue_lock(self->ul_wchan, self->ul_qtype); in unsleep_self()
971 if (qp == self->ul_sleepq) in unsleep_self()
975 self->ul_writer = 0; in unsleep_self()
976 self->ul_critical--; in unsleep_self()
986 ulwp_t *self = curthread; in mutex_lock_kernel() local
987 uberdata_t *udp = self->ul_uberdata; in mutex_lock_kernel()
993 self->ul_sp = stkptr(); in mutex_lock_kernel()
994 self->ul_wchan = mp; in mutex_lock_kernel()
995 if (__td_event_report(self, TD_SLEEP, udp)) { in mutex_lock_kernel()
996 self->ul_td_evbuf.eventnum = TD_SLEEP; in mutex_lock_kernel()
997 self->ul_td_evbuf.eventdata = mp; in mutex_lock_kernel()
1012 if ((error = ___lwp_mutex_timedlock(mp, tsp, self)) != 0 && in mutex_lock_kernel()
1023 enter_critical(self); in mutex_lock_kernel()
1025 exit_critical(self); in mutex_lock_kernel()
1029 exit_critical(self); in mutex_lock_kernel()
1038 self->ul_wchan = NULL; in mutex_lock_kernel()
1039 self->ul_sp = 0; in mutex_lock_kernel()
1042 ASSERT(mp->mutex_owner == (uintptr_t)self); in mutex_lock_kernel()
1060 ulwp_t *self = curthread; in mutex_trylock_kernel() local
1061 uberdata_t *udp = self->ul_uberdata; in mutex_trylock_kernel()
1071 if ((error = ___lwp_mutex_trylock(mp, self)) != 0 && in mutex_trylock_kernel()
1082 enter_critical(self); in mutex_trylock_kernel()
1084 exit_critical(self); in mutex_trylock_kernel()
1088 exit_critical(self); in mutex_trylock_kernel()
1096 ASSERT(mp->mutex_owner == (uintptr_t)self); in mutex_trylock_kernel()
1108 ulwp_t *self = curthread; in setup_schedctl() local
1112 if ((scp = self->ul_schedctl) == NULL && /* no shared state yet */ in setup_schedctl()
1113 !self->ul_vfork && /* not a child of vfork() */ in setup_schedctl()
1114 !self->ul_schedctl_called) { /* haven't been called before */ in setup_schedctl()
1115 enter_critical(self); in setup_schedctl()
1116 self->ul_schedctl_called = &self->ul_uberdata->uberflags; in setup_schedctl()
1118 self->ul_schedctl = scp = tmp; in setup_schedctl()
1119 exit_critical(self); in setup_schedctl()
1155 ulwp_t *self = curthread; in _thr_schedctl() local
1158 if (self->ul_vfork) in _thr_schedctl()
1160 if (*(ptr = &self->ul_schedctl) == NULL) in _thr_schedctl()
1170 no_preempt(ulwp_t *self) in no_preempt() argument
1174 if (self->ul_preempt++ == 0) { in no_preempt()
1175 enter_critical(self); in no_preempt()
1176 if ((scp = self->ul_schedctl) != NULL || in no_preempt()
1181 self->ul_savpreempt = scp->sc_preemptctl.sc_nopreempt; in no_preempt()
1191 preempt(ulwp_t *self) in preempt() argument
1195 ASSERT(self->ul_preempt > 0); in preempt()
1196 if (--self->ul_preempt == 0) { in preempt()
1197 if ((scp = self->ul_schedctl) != NULL) { in preempt()
1201 scp->sc_preemptctl.sc_nopreempt = self->ul_savpreempt; in preempt()
1216 exit_critical(self); in preempt()
1229 preempt_unpark(ulwp_t *self, lwpid_t lwpid) in preempt_unpark() argument
1231 volatile sc_shared_t *scp = self->ul_schedctl; in preempt_unpark()
1233 ASSERT(self->ul_preempt == 1 && self->ul_critical > 0); in preempt_unpark()
1235 (self->ul_curplease && self->ul_critical == 1)) { in preempt_unpark()
1250 ulwp_t *self = curthread; in mutex_trylock_adaptive() local
1263 if (MUTEX_OWNED(mp, self)) in mutex_trylock_adaptive()
1266 enter_critical(self); in mutex_trylock_adaptive()
1280 *ownerp = (uintptr_t)self; in mutex_trylock_adaptive()
1288 if ((max_spinners = self->ul_max_spinners) >= ncpus) in mutex_trylock_adaptive()
1290 max_count = (max_spinners != 0)? self->ul_adaptive_spin : 0; in mutex_trylock_adaptive()
1308 *ownerp = (uintptr_t)self; in mutex_trylock_adaptive()
1356 *ownerp = (uintptr_t)self; in mutex_trylock_adaptive()
1374 exit_critical(self); in mutex_trylock_adaptive()
1440 ulwp_t *self = curthread; in mutex_trylock_process() local
1441 uberdata_t *udp = self->ul_uberdata; in mutex_trylock_process()
1453 self->ul_misaligned && !(mp->mutex_type & LOCK_ROBUST)); in mutex_trylock_process()
1461 enter_critical(self); in mutex_trylock_process()
1479 mp->mutex_owner = (uintptr_t)self; in mutex_trylock_process()
1486 mp->mutex_owner = (uintptr_t)self; in mutex_trylock_process()
1495 if ((max_spinners = self->ul_max_spinners) >= ncpus) in mutex_trylock_process()
1497 max_count = (max_spinners != 0)? self->ul_adaptive_spin : 0; in mutex_trylock_process()
1516 mp->mutex_owner = (uintptr_t)self; in mutex_trylock_process()
1524 mp->mutex_owner = (uintptr_t)self; in mutex_trylock_process()
1555 mp->mutex_owner = (uintptr_t)self; in mutex_trylock_process()
1561 mp->mutex_owner = (uintptr_t)self; in mutex_trylock_process()
1581 exit_critical(self); in mutex_trylock_process()
1709 ulwp_t *self = curthread; in mutex_unlock_queue() local
1714 sigoff(self); in mutex_unlock_queue()
1719 no_preempt(self); /* ensure a prompt wakeup */ in mutex_unlock_queue()
1725 preempt(self); in mutex_unlock_queue()
1727 sigon(self); in mutex_unlock_queue()
1737 ulwp_t *self = curthread; in mutex_unlock_process() local
1741 sigoff(self); in mutex_unlock_process()
1746 self->ul_misaligned && !(mp->mutex_type & LOCK_ROBUST)) { in mutex_unlock_process()
1752 no_preempt(self); /* ensure a prompt wakeup */ in mutex_unlock_process()
1754 preempt(self); in mutex_unlock_process()
1756 sigon(self); in mutex_unlock_process()
1764 no_preempt(self); /* ensure a prompt wakeup */ in mutex_unlock_process()
1766 preempt(self); in mutex_unlock_process()
1768 sigon(self); in mutex_unlock_process()
1784 mutex_lock_queue(ulwp_t *self, tdb_mutex_stats_t *msp, mutex_t *mp, in mutex_lock_queue() argument
1792 self->ul_sp = stkptr(); in mutex_lock_queue()
1793 if (__td_event_report(self, TD_SLEEP, udp)) { in mutex_lock_queue()
1794 self->ul_wchan = mp; in mutex_lock_queue()
1795 self->ul_td_evbuf.eventnum = TD_SLEEP; in mutex_lock_queue()
1796 self->ul_td_evbuf.eventdata = mp; in mutex_lock_queue()
1813 enqueue(qp, self, 0); in mutex_lock_queue()
1817 mp->mutex_owner = (uintptr_t)self; in mutex_lock_queue()
1821 set_parking_flag(self, 1); in mutex_lock_queue()
1828 set_parking_flag(self, 0); in mutex_lock_queue()
1838 if (self->ul_sleepq == NULL) { in mutex_lock_queue()
1846 mp->mutex_owner = (uintptr_t)self; in mutex_lock_queue()
1849 enqueue(qp, self, 0); in mutex_lock_queue()
1852 ASSERT(self->ul_sleepq == qp && in mutex_lock_queue()
1853 self->ul_qtype == MX && in mutex_lock_queue()
1854 self->ul_wchan == mp); in mutex_lock_queue()
1863 ASSERT(self->ul_sleepq == NULL && self->ul_link == NULL && in mutex_lock_queue()
1864 self->ul_wchan == NULL); in mutex_lock_queue()
1865 self->ul_sp = 0; in mutex_lock_queue()
2048 ulwp_t *self = curthread; in mutex_lock_internal() local
2049 uberdata_t *udp = self->ul_uberdata; in mutex_lock_internal()
2060 if (!self->ul_schedctl_called) in mutex_lock_internal()
2069 if (self->ul_error_detection && try == MUTEX_LOCK && in mutex_lock_internal()
2074 update_sched(self); in mutex_lock_internal()
2075 if (self->ul_cid != self->ul_rtclassid) { in mutex_lock_internal()
2080 myprio = self->ul_epri? self->ul_epri : self->ul_pri; in mutex_lock_internal()
2110 self->ul_pilocks++; in mutex_lock_internal()
2115 self->ul_pilocks++; in mutex_lock_internal()
2146 error = mutex_lock_queue(self, msp, mp, tsp); in mutex_lock_internal()
2167 if (__td_event_report(self, TD_LOCK_TRY, udp)) { in mutex_lock_internal()
2168 self->ul_td_evbuf.eventnum = TD_LOCK_TRY; in mutex_lock_internal()
2181 ulwp_t *self = curthread; in fast_process_lock() local
2182 uberdata_t *udp = self->ul_uberdata; in fast_process_lock()
2190 enter_critical(self); in fast_process_lock()
2194 self->ul_misaligned) { in fast_process_lock()
2197 mp->mutex_owner = (uintptr_t)self; in fast_process_lock()
2198 exit_critical(self); in fast_process_lock()
2205 mp->mutex_owner = (uintptr_t)self; in fast_process_lock()
2207 exit_critical(self); in fast_process_lock()
2211 exit_critical(self); in fast_process_lock()
2222 if (__td_event_report(self, TD_LOCK_TRY, udp)) { in fast_process_lock()
2223 self->ul_td_evbuf.eventnum = TD_LOCK_TRY; in fast_process_lock()
2232 ulwp_t *self = curthread; in mutex_lock_impl() local
2237 self->ul_error_detection && self->ul_misaligned == 0) in mutex_lock_impl()
2248 self->ul_uberdata->uberflags.uf_all) == 0) { in mutex_lock_impl()
2254 sigoff(self); in mutex_lock_impl()
2256 mp->mutex_owner = (uintptr_t)self; in mutex_lock_impl()
2257 sigon(self); in mutex_lock_impl()
2261 if (mtype && MUTEX_OWNER(mp) == self) in mutex_lock_impl()
2282 MUTEX_OWNER(mp) == self && !self->ul_async_safe && in mutex_lock_impl()
2294 if ((gflags = self->ul_schedctl_called) != NULL && in mutex_lock_impl()
2299 sigoff(self); in mutex_lock_impl()
2301 mp->mutex_owner = (uintptr_t)self; in mutex_lock_impl()
2302 sigon(self); in mutex_lock_impl()
2306 sigon(self); in mutex_lock_impl()
2307 if (mtype && MUTEX_OWNER(mp) == self) in mutex_lock_impl()
2310 return (mutex_lock_queue(self, NULL, mp, tsp)); in mutex_lock_impl()
2417 ulwp_t *self = curthread; in mutex_trylock() local
2418 uberdata_t *udp = self->ul_uberdata; in mutex_trylock()
2438 sigoff(self); in mutex_trylock()
2440 mp->mutex_owner = (uintptr_t)self; in mutex_trylock()
2441 sigon(self); in mutex_trylock()
2445 if (mtype && MUTEX_OWNER(mp) == self) in mutex_trylock()
2455 if ((gflags = self->ul_schedctl_called) != NULL && in mutex_trylock()
2460 sigoff(self); in mutex_trylock()
2462 mp->mutex_owner = (uintptr_t)self; in mutex_trylock()
2463 sigon(self); in mutex_trylock()
2467 sigon(self); in mutex_trylock()
2468 if (mtype && MUTEX_OWNER(mp) == self) in mutex_trylock()
2470 if (__td_event_report(self, TD_LOCK_TRY, udp)) { in mutex_trylock()
2471 self->ul_td_evbuf.eventnum = TD_LOCK_TRY; in mutex_trylock()
2484 ulwp_t *self = curthread; in mutex_unlock_internal() local
2485 uberdata_t *udp = self->ul_uberdata; in mutex_unlock_internal()
2496 if (self->ul_error_detection && !mutex_held(mp)) in mutex_unlock_internal()
2517 no_preempt(self); in mutex_unlock_internal()
2522 self->ul_pilocks--; in mutex_unlock_internal()
2524 preempt(self); in mutex_unlock_internal()
2530 preempt(self); in mutex_unlock_internal()
2548 ulwp_t *self = curthread; in mutex_unlock() local
2562 self->ul_uberdata->uberflags.uf_all) == 0) { in mutex_unlock()
2568 if ((mtype & LOCK_ERRORCHECK) && !MUTEX_OWNED(mp, self)) in mutex_unlock()
2580 sigoff(self); in mutex_unlock()
2583 sigon(self); in mutex_unlock()
2593 if ((gflags = self->ul_schedctl_called) != NULL) { in mutex_unlock()
2598 preempt(self); in mutex_unlock()
2609 if ((mtype & LOCK_ERRORCHECK) && !MUTEX_OWNED(mp, self)) in mutex_unlock()
2673 ulwp_t *self = curthread; in lmutex_lock() local
2674 uberdata_t *udp = self->ul_uberdata; in lmutex_lock()
2678 enter_critical(self); in lmutex_lock()
2689 mp->mutex_owner = (uintptr_t)self; in lmutex_lock()
2694 if (!self->ul_schedctl_called) in lmutex_lock()
2698 mp->mutex_owner = (uintptr_t)self; in lmutex_lock()
2701 (void) mutex_lock_queue(self, msp, mp, NULL); in lmutex_lock()
2712 ulwp_t *self = curthread; in lmutex_unlock() local
2713 uberdata_t *udp = self->ul_uberdata; in lmutex_unlock()
2736 preempt(self); in lmutex_unlock()
2739 exit_critical(self); in lmutex_unlock()
2751 ulwp_t *self = curthread; in sig_mutex_lock() local
2753 sigoff(self); in sig_mutex_lock()
2760 ulwp_t *self = curthread; in sig_mutex_unlock() local
2763 sigon(self); in sig_mutex_unlock()
2769 ulwp_t *self = curthread; in sig_mutex_trylock() local
2772 sigoff(self); in sig_mutex_trylock()
2774 sigon(self); in sig_mutex_trylock()
2844 ulwp_t *self = curthread; in cancel_safe_mutex_unlock() local
2846 ASSERT(self->ul_libc_locks != 0); in cancel_safe_mutex_unlock()
2857 if (--self->ul_libc_locks == 0 && in cancel_safe_mutex_unlock()
2858 !(self->ul_vfork | self->ul_nocancel | in cancel_safe_mutex_unlock()
2859 self->ul_critical | self->ul_sigdefer) && in cancel_safe_mutex_unlock()
2881 ulwp_t *self = curthread; in shared_mutex_held() local
2882 uberdata_t *udp = self->ul_uberdata; in shared_mutex_held()
2884 return (MUTEX_OWNED(mp, self) && mp->mutex_ownerpid == udp->pid); in shared_mutex_held()
2973 ulwp_t *self = curthread; in pthread_spin_trylock() local
2976 no_preempt(self); in pthread_spin_trylock()
2980 mp->mutex_owner = (uintptr_t)self; in pthread_spin_trylock()
2982 mp->mutex_ownerpid = self->ul_uberdata->pid; in pthread_spin_trylock()
2985 preempt(self); in pthread_spin_trylock()
2993 ulwp_t *self = curthread; in pthread_spin_lock() local
2997 ASSERT(!self->ul_critical || self->ul_bindflags); in pthread_spin_lock()
3007 no_preempt(self); in pthread_spin_lock()
3010 preempt(self); in pthread_spin_lock()
3016 mp->mutex_owner = (uintptr_t)self; in pthread_spin_lock()
3018 mp->mutex_ownerpid = self->ul_uberdata->pid; in pthread_spin_lock()
3019 preempt(self); in pthread_spin_lock()
3031 ulwp_t *self = curthread; in pthread_spin_unlock() local
3033 no_preempt(self); in pthread_spin_unlock()
3038 preempt(self); in pthread_spin_unlock()
3050 ulwp_t *self = curthread; in find_lock_entry() local
3055 if ((nlocks = self->ul_heldlockcnt) != 0) in find_lock_entry()
3056 lockptr = self->ul_heldlocks.array; in find_lock_entry()
3059 lockptr = &self->ul_heldlocks.single; in find_lock_entry()
3077 if ((nlocks = self->ul_heldlockcnt) == 0) { in find_lock_entry()
3082 self->ul_heldlockcnt = nlocks = INITIAL_LOCKS; in find_lock_entry()
3087 *lockptr = self->ul_heldlocks.single; in find_lock_entry()
3088 self->ul_heldlocks.array = lockptr; in find_lock_entry()
3099 (void) memcpy(lockptr, self->ul_heldlocks.array, in find_lock_entry()
3101 lfree(self->ul_heldlocks.array, nlocks * sizeof (mutex_t *)); in find_lock_entry()
3102 self->ul_heldlocks.array = lockptr; in find_lock_entry()
3103 self->ul_heldlockcnt *= 2; in find_lock_entry()
3152 ulwp_t *self = curthread; in heldlock_exit() local
3157 if ((nlocks = self->ul_heldlockcnt) != 0) in heldlock_exit()
3158 lockptr = self->ul_heldlocks.array; in heldlock_exit()
3161 lockptr = &self->ul_heldlocks.single; in heldlock_exit()
3181 heldlock_free(self); in heldlock_exit()
3226 ulwp_t *self = curthread; in cond_sleep_queue() local
3241 self->ul_sp = stkptr(); in cond_sleep_queue()
3243 enqueue(qp, self, 0); in cond_sleep_queue()
3245 self->ul_cvmutex = mp; in cond_sleep_queue()
3246 self->ul_cv_wake = cv_wake = (tsp != NULL); in cond_sleep_queue()
3247 self->ul_signalled = 0; in cond_sleep_queue()
3255 set_parking_flag(self, 1); in cond_sleep_queue()
3258 lwpid = preempt_unpark(self, lwpid); in cond_sleep_queue()
3259 preempt(self); in cond_sleep_queue()
3269 if (self->ul_cursig != 0 || in cond_sleep_queue()
3270 (self->ul_cancelable && self->ul_cancel_pending)) in cond_sleep_queue()
3271 set_parking_flag(self, 0); in cond_sleep_queue()
3277 set_parking_flag(self, 0); in cond_sleep_queue()
3287 if (self->ul_sleepq == NULL) in cond_sleep_queue()
3295 if (!cv_wake && self->ul_sleepq == mqp) { /* mutex queue */ in cond_sleep_queue()
3301 } else if (self->ul_sleepq == qp) { /* condvar queue */ in cond_sleep_queue()
3317 self->ul_sp = 0; in cond_sleep_queue()
3318 self->ul_cv_wake = 0; in cond_sleep_queue()
3319 ASSERT(self->ul_cvmutex == NULL); in cond_sleep_queue()
3320 ASSERT(self->ul_sleepq == NULL && self->ul_link == NULL && in cond_sleep_queue()
3321 self->ul_wchan == NULL); in cond_sleep_queue()
3323 signalled = self->ul_signalled; in cond_sleep_queue()
3324 self->ul_signalled = 0; in cond_sleep_queue()
3352 ulwp_t *self = curthread; in cond_wait_queue() local
3356 if (self->ul_error_detection && self->ul_misaligned == 0) in cond_wait_queue()
3373 if (self->ul_cond_wait_defer) in cond_wait_queue()
3374 sigoff(self); in cond_wait_queue()
3387 if (self->ul_cond_wait_defer) in cond_wait_queue()
3388 sigon(self); in cond_wait_queue()
3401 ulwp_t *self = curthread; in cond_sleep_kernel() local
3407 self->ul_sp = stkptr(); in cond_sleep_kernel()
3408 self->ul_wchan = cvp; in cond_sleep_kernel()
3409 sigoff(self); in cond_sleep_kernel()
3414 self->ul_pilocks--; in cond_sleep_kernel()
3424 set_parking_flag(self, 1); in cond_sleep_kernel()
3425 if (self->ul_cursig != 0 || in cond_sleep_kernel()
3426 (self->ul_cancelable && self->ul_cancel_pending)) in cond_sleep_kernel()
3427 set_parking_flag(self, 0); in cond_sleep_kernel()
3429 set_parking_flag(self, 0); in cond_sleep_kernel()
3430 sigon(self); in cond_sleep_kernel()
3431 self->ul_sp = 0; in cond_sleep_kernel()
3432 self->ul_wchan = NULL; in cond_sleep_kernel()
3439 ulwp_t *self = curthread; in cond_wait_kernel() local
3443 if (self->ul_error_detection && self->ul_misaligned == 0) in cond_wait_kernel()
3449 if (self->ul_cond_wait_defer) in cond_wait_kernel()
3450 sigoff(self); in cond_wait_kernel()
3467 if (self->ul_cond_wait_defer) in cond_wait_kernel()
3468 sigon(self); in cond_wait_kernel()
3481 ulwp_t *self = curthread; in cond_wait_common() local
3482 uberdata_t *udp = self->ul_uberdata; in cond_wait_common()
3502 if (__td_event_report(self, TD_SLEEP, udp)) { in cond_wait_common()
3503 self->ul_sp = stkptr(); in cond_wait_common()
3504 self->ul_wchan = cvp; in cond_wait_common()
3505 self->ul_td_evbuf.eventnum = TD_SLEEP; in cond_wait_common()
3506 self->ul_td_evbuf.eventdata = cvp; in cond_wait_common()
3508 self->ul_sp = 0; in cond_wait_common()
3521 if (self->ul_error_detection) { in cond_wait_common()
3578 ulwp_t *self = curthread; in __cond_wait() local
3579 uberdata_t *udp = self->ul_uberdata; in __cond_wait()
3590 if ((gflags = self->ul_schedctl_called) != NULL && in __cond_wait()
3592 self->ul_td_events_enable | in __cond_wait()
3810 ulwp_t *self = curthread; in cond_signal() local
3811 uberdata_t *udp = self->ul_uberdata; in cond_signal()
3867 if (ulwp->ul_cv_wake || !MUTEX_OWNED(mp, self)) { in cond_signal()
3870 no_preempt(self); in cond_signal()
3875 preempt(self); in cond_signal()
3941 ulwp_t *self = curthread; in cond_broadcast() local
3942 uberdata_t *udp = self->ul_uberdata; in cond_broadcast()
3995 if (ulwp->ul_cv_wake || !MUTEX_OWNED(mp, self)) { in cond_broadcast()
4019 no_preempt(self); in cond_broadcast()
4025 preempt(self); in cond_broadcast()