Lines Matching +full:wake +full:- +full:up

77  * Used to limit kmem for each thread. This is a per-thread limit that
81 * process, the per-thread limit automatically becomes a process-wide limit
95 static kthread_t *lwpsobj_pi_owner(upimutex_t *up);
110 * likely are 8-byte aligned, so we shift off the low-order 3 bits.
115 (LWPCHAN_LOCK_SIZE - 1)) + ((pool)? LWPCHAN_LOCK_SIZE : 0))
120 * Is this a POSIX threads user-level lock requiring priority inheritance?
127 uint_t x = (uintptr_t)lwpchan->lc_wchan ^ (uintptr_t)lwpchan->lc_wchan0; in lwpsqhash()
138 uint_t x = (uintptr_t)lwpchan->lc_wchan ^ (uintptr_t)lwpchan->lc_wchan0; in lwpchan_lock()
149 uint_t x = (uintptr_t)lwpchan->lc_wchan ^ (uintptr_t)lwpchan->lc_wchan0; in lwpchan_unlock()
168 mutex_enter(&p->p_lcp_lock); in lwpchan_delete_mapping()
169 lcp = p->p_lcp; in lwpchan_delete_mapping()
170 hashbucket = lcp->lwpchan_cache; in lwpchan_delete_mapping()
171 endbucket = hashbucket + lcp->lwpchan_size; in lwpchan_delete_mapping()
173 if (hashbucket->lwpchan_chain == NULL) in lwpchan_delete_mapping()
175 mutex_enter(&hashbucket->lwpchan_lock); in lwpchan_delete_mapping()
176 prev = &hashbucket->lwpchan_chain; in lwpchan_delete_mapping()
179 addr = ent->lwpchan_addr; in lwpchan_delete_mapping()
181 *prev = ent->lwpchan_next; in lwpchan_delete_mapping()
188 if (ent->lwpchan_pool == LWPCHAN_MPPOOL && in lwpchan_delete_mapping()
189 (ent->lwpchan_type & USYNC_PROCESS_ROBUST)) in lwpchan_delete_mapping()
192 * If there is a user-level robust lock in lwpchan_delete_mapping()
195 if ((addr = ent->lwpchan_uaddr) != NULL) in lwpchan_delete_mapping()
198 atomic_dec_32(&lcp->lwpchan_entries); in lwpchan_delete_mapping()
200 prev = &ent->lwpchan_next; in lwpchan_delete_mapping()
203 mutex_exit(&hashbucket->lwpchan_lock); in lwpchan_delete_mapping()
205 mutex_exit(&p->p_lcp_lock); in lwpchan_delete_mapping()
218 * All user-level sync object addresses are 8-byte aligned. in lwpchan_bucket()
220 * higher-order 2*lwpchan_bits bits for the hash index. in lwpchan_bucket()
223 i = (addr ^ (addr >> lcp->lwpchan_bits)) & lcp->lwpchan_mask; in lwpchan_bucket()
224 return (lcp->lwpchan_cache + i); in lwpchan_bucket()
228 * (Re)allocate the per-process lwpchan cache.
245 lcp->lwpchan_bits = bits; in lwpchan_alloc_cache()
246 lcp->lwpchan_size = 1 << lcp->lwpchan_bits; in lwpchan_alloc_cache()
247 lcp->lwpchan_mask = lcp->lwpchan_size - 1; in lwpchan_alloc_cache()
248 lcp->lwpchan_entries = 0; in lwpchan_alloc_cache()
249 lcp->lwpchan_cache = kmem_zalloc(lcp->lwpchan_size * in lwpchan_alloc_cache()
251 lcp->lwpchan_next_data = NULL; in lwpchan_alloc_cache()
253 mutex_enter(&p->p_lcp_lock); in lwpchan_alloc_cache()
254 if ((old_lcp = p->p_lcp) != NULL) { in lwpchan_alloc_cache()
255 if (old_lcp->lwpchan_bits >= bits) { in lwpchan_alloc_cache()
257 mutex_exit(&p->p_lcp_lock); in lwpchan_alloc_cache()
258 kmem_free(lcp->lwpchan_cache, lcp->lwpchan_size * in lwpchan_alloc_cache()
266 hashbucket = old_lcp->lwpchan_cache; in lwpchan_alloc_cache()
267 endbucket = hashbucket + old_lcp->lwpchan_size; in lwpchan_alloc_cache()
269 mutex_enter(&hashbucket->lwpchan_lock); in lwpchan_alloc_cache()
276 hashbucket = old_lcp->lwpchan_cache; in lwpchan_alloc_cache()
278 ent = hashbucket->lwpchan_chain; in lwpchan_alloc_cache()
280 next = ent->lwpchan_next; in lwpchan_alloc_cache()
282 (uintptr_t)ent->lwpchan_addr); in lwpchan_alloc_cache()
283 ent->lwpchan_next = newbucket->lwpchan_chain; in lwpchan_alloc_cache()
284 newbucket->lwpchan_chain = ent; in lwpchan_alloc_cache()
288 hashbucket->lwpchan_chain = NULL; in lwpchan_alloc_cache()
290 lcp->lwpchan_entries = count; in lwpchan_alloc_cache()
302 lcp->lwpchan_next_data = old_lcp; in lwpchan_alloc_cache()
310 p->p_lcp = lcp; in lwpchan_alloc_cache()
316 hashbucket = old_lcp->lwpchan_cache; in lwpchan_alloc_cache()
318 mutex_exit(&hashbucket->lwpchan_lock); in lwpchan_alloc_cache()
320 mutex_exit(&p->p_lcp_lock); in lwpchan_alloc_cache()
339 lcp = p->p_lcp; in lwpchan_destroy_cache()
340 p->p_lcp = NULL; in lwpchan_destroy_cache()
343 hashbucket = lcp->lwpchan_cache; in lwpchan_destroy_cache()
344 endbucket = hashbucket + lcp->lwpchan_size; in lwpchan_destroy_cache()
346 ent = hashbucket->lwpchan_chain; in lwpchan_destroy_cache()
347 hashbucket->lwpchan_chain = NULL; in lwpchan_destroy_cache()
349 next = ent->lwpchan_next; in lwpchan_destroy_cache()
350 if (ent->lwpchan_pool == LWPCHAN_MPPOOL && in lwpchan_destroy_cache()
351 (ent->lwpchan_type & (USYNC_PROCESS | LOCK_ROBUST)) in lwpchan_destroy_cache()
360 lwpchan_data_t *next_lcp = lcp->lwpchan_next_data; in lwpchan_destroy_cache()
361 kmem_free(lcp->lwpchan_cache, lcp->lwpchan_size * in lwpchan_destroy_cache()
370 * given process virtual address and non-zero when there is not.
371 * The returned non-zero value is the current length of the
381 for (ent = hashbucket->lwpchan_chain; ent; ent = ent->lwpchan_next) { in lwpchan_cache_mapping()
382 if (ent->lwpchan_addr == addr) { in lwpchan_cache_mapping()
383 if (ent->lwpchan_type != type || in lwpchan_cache_mapping()
384 ent->lwpchan_pool != pool) { in lwpchan_cache_mapping()
391 ent->lwpchan_type = (uint16_t)type; in lwpchan_cache_mapping()
392 ent->lwpchan_pool = (uint16_t)pool; in lwpchan_cache_mapping()
394 *lwpchan = ent->lwpchan_lwpchan; in lwpchan_cache_mapping()
420 if ((lcp = p->p_lcp) == NULL) { in lwpchan_get_mapping()
425 mutex_enter(&hashbucket->lwpchan_lock); in lwpchan_get_mapping()
426 if (lcp != p->p_lcp) { in lwpchan_get_mapping()
428 mutex_exit(&hashbucket->lwpchan_lock); in lwpchan_get_mapping()
433 mutex_exit(&hashbucket->lwpchan_lock); in lwpchan_get_mapping()
436 mutex_exit(&hashbucket->lwpchan_lock); in lwpchan_get_mapping()
439 lwpchan->lc_wchan0 = (caddr_t)(uintptr_t)memid.val[0]; in lwpchan_get_mapping()
440 lwpchan->lc_wchan = (caddr_t)(uintptr_t)memid.val[1]; in lwpchan_get_mapping()
442 mutex_enter(&hashbucket->lwpchan_lock); in lwpchan_get_mapping()
443 if (lcp != p->p_lcp) { in lwpchan_get_mapping()
445 mutex_exit(&hashbucket->lwpchan_lock); in lwpchan_get_mapping()
452 mutex_exit(&hashbucket->lwpchan_lock); in lwpchan_get_mapping()
456 if (count > lcp->lwpchan_bits + 2 && /* larger table, longer chains */ in lwpchan_get_mapping()
457 (bits = lcp->lwpchan_bits) < LWPCHAN_MAX_BITS) { in lwpchan_get_mapping()
459 mutex_exit(&hashbucket->lwpchan_lock); in lwpchan_get_mapping()
464 ent->lwpchan_addr = addr; in lwpchan_get_mapping()
465 ent->lwpchan_uaddr = uaddr; in lwpchan_get_mapping()
466 ent->lwpchan_type = (uint16_t)type; in lwpchan_get_mapping()
467 ent->lwpchan_pool = (uint16_t)pool; in lwpchan_get_mapping()
468 ent->lwpchan_lwpchan = *lwpchan; in lwpchan_get_mapping()
469 ent->lwpchan_next = hashbucket->lwpchan_chain; in lwpchan_get_mapping()
470 hashbucket->lwpchan_chain = ent; in lwpchan_get_mapping()
471 atomic_inc_32(&lcp->lwpchan_entries); in lwpchan_get_mapping()
472 mutex_exit(&hashbucket->lwpchan_lock); in lwpchan_get_mapping()
478 * synchronization object's virtual address. Process-shared
485 * If the lwp synch object is defined to be process-private, in get_lwpchan()
489 * The lwpchan cache is used only for process-shared objects. in get_lwpchan()
492 lwpchan->lc_wchan0 = (caddr_t)as; in get_lwpchan()
493 lwpchan->lc_wchan = addr; in get_lwpchan()
508 t->t_flag |= T_WAKEABLE; in lwp_block()
509 t->t_lwpchan = *lwpchan; in lwp_block()
510 t->t_sobj_ops = &lwp_sobj_ops; in lwp_block()
511 t->t_release = 0; in lwp_block()
513 disp_lock_enter_high(&sqh->sq_lock); in lwp_block()
516 THREAD_SLEEP(t, &sqh->sq_lock); in lwp_block()
517 sleepq_insert(&sqh->sq_queue, t); in lwp_block()
519 lwp->lwp_asleep = 1; in lwp_block()
520 lwp->lwp_sysabort = 0; in lwp_block()
521 lwp->lwp_ru.nvcsw++; in lwp_block()
526 lwpsobj_pi_owner(upimutex_t *up) in lwpsobj_pi_owner() argument
528 return (up->upi_owner); in lwpsobj_pi_owner()
536 for (upip = upibp->upib_first; upip != NULL; in upi_get()
537 upip = upip->upi_nextchain) { in upi_get()
538 if (upip->upi_lwpchan.lc_wchan0 == lcp->lc_wchan0 && in upi_get()
539 upip->upi_lwpchan.lc_wchan == lcp->lc_wchan) in upi_get()
548 ASSERT(MUTEX_HELD(&upibp->upib_lock)); in upi_chain_add()
556 upimutex->upi_nextchain = upibp->upib_first; in upi_chain_add()
557 upibp->upib_first = upimutex; in upi_chain_add()
565 ASSERT(MUTEX_HELD(&upibp->upib_lock)); in upi_chain_del()
567 prev = &upibp->upib_first; in upi_chain_del()
569 prev = &(*prev)->upi_nextchain; in upi_chain_del()
571 *prev = upimutex->upi_nextchain; in upi_chain_del()
572 upimutex->upi_nextchain = NULL; in upi_chain_del()
589 upimutex->upi_nextowned = t->t_upimutex; in upi_mylist_add()
590 t->t_upimutex = upimutex; in upi_mylist_add()
591 t->t_nupinest++; in upi_mylist_add()
592 ASSERT(t->t_nupinest > 0); in upi_mylist_add()
593 return (t->t_nupinest); in upi_mylist_add()
610 prev = &t->t_upimutex; in upi_mylist_del()
612 prev = &(*prev)->upi_nextowned; in upi_mylist_del()
614 *prev = upimutex->upi_nextowned; in upi_mylist_del()
615 upimutex->upi_nextowned = NULL; in upi_mylist_del()
616 ASSERT(t->t_nupinest > 0); in upi_mylist_del()
617 t->t_nupinest--; in upi_mylist_del()
627 return (upim->upi_owner == curthread); in upi_owned()
640 if (!get_lwpchan(curproc->p_as, (caddr_t)lp, type, in lwp_upimutex_owned()
645 mutex_enter(&upibp->upib_lock); in lwp_upimutex_owned()
647 if (upimutex == NULL || upimutex->upi_owner != curthread) { in lwp_upimutex_owned()
648 mutex_exit(&upibp->upib_lock); in lwp_upimutex_owned()
651 mutex_exit(&upibp->upib_lock); in lwp_upimutex_owned()
656 * Unlocks upimutex, waking up waiters if any. upimutex kmem is freed if
657 * no lock hand-off occurrs.
667 upibp = upimutex->upi_upibp; in upimutex_unlock()
668 mutex_enter(&upibp->upib_lock); in upimutex_unlock()
669 if (upimutex->upi_waiter != 0) { /* if waiters */ in upimutex_unlock()
672 /* hand-off lock to highest prio waiter */ in upimutex_unlock()
673 newowner = ts->ts_sleepq[TS_WRITER_Q].sq_first; in upimutex_unlock()
674 upimutex->upi_owner = newowner; in upimutex_unlock()
675 if (ts->ts_waiters == 1) in upimutex_unlock()
676 upimutex->upi_waiter = 0; in upimutex_unlock()
678 mutex_exit(&upibp->upib_lock); in upimutex_unlock()
682 turnstile_wakeup(ts, TS_WRITER_Q, ts->ts_waiters, NULL); in upimutex_unlock()
687 * will soon be freed). Re-calculate PI from existing in upimutex_unlock()
697 * de-allocate kernel memory (upimutex). in upimutex_unlock()
699 upi_chain_del(upimutex->upi_upibp, upimutex); in upimutex_unlock()
700 mutex_exit(&upibp->upib_lock); in upimutex_unlock()
723 if (!get_lwpchan(curproc->p_as, (caddr_t)lp, type, in lwp_upimutex_lock()
730 mutex_enter(&upibp->upib_lock); in lwp_upimutex_lock()
736 upimutex->upi_owner = curthread; /* grab lock */ in lwp_upimutex_lock()
737 upimutex->upi_upibp = upibp; in lwp_upimutex_lock()
738 upimutex->upi_vaddr = lp; in lwp_upimutex_lock()
739 upimutex->upi_lwpchan = lwpchan; in lwp_upimutex_lock()
740 mutex_exit(&upibp->upib_lock); in lwp_upimutex_lock()
743 fuword16_noerr(&lp->mutex_flag, &flag); in lwp_upimutex_lock()
753 * was done under the high-level upi mutex, in lwp_upimutex_lock()
774 * This is due to lock hand-off, and release of upimutex when no in lwp_upimutex_lock()
777 ASSERT(upimutex->upi_owner != NULL); in lwp_upimutex_lock()
778 if (upimutex->upi_owner == curthread) { in lwp_upimutex_lock()
781 * ERRORCHECK: if not, it should stall at user-level. in lwp_upimutex_lock()
784 mutex_exit(&upibp->upib_lock); in lwp_upimutex_lock()
789 mutex_exit(&upibp->upib_lock); in lwp_upimutex_lock()
796 if ((error = lwptp->lwpt_time_error) != 0) { in lwp_upimutex_lock()
802 mutex_exit(&upibp->upib_lock); in lwp_upimutex_lock()
805 if (lwptp->lwpt_tsp != NULL) { in lwp_upimutex_lock()
812 mutex_enter(&curthread->t_delay_lock); in lwp_upimutex_lock()
814 mutex_exit(&curthread->t_delay_lock); in lwp_upimutex_lock()
827 * This is the key to preventing a missed wake-up. Otherwise, the in lwp_upimutex_lock()
844 upimutex->upi_waiter = 1; in lwp_upimutex_lock()
846 &lwp_sobj_pi_ops, &upibp->upib_lock, lwptp); in lwp_upimutex_lock()
848 * Hand-off implies that we wakeup holding the lock, except when: in lwp_upimutex_lock()
849 * - deadlock is detected in lwp_upimutex_lock()
850 * - lock is not recoverable in lwp_upimutex_lock()
851 * - we got an interrupt or timeout in lwp_upimutex_lock()
852 * If we wake up due to an interrupt or timeout, we may in lwp_upimutex_lock()
853 * or may not be holding the lock due to mutex hand-off. in lwp_upimutex_lock()
860 * Unlock and return - the re-startable syscall will in lwp_upimutex_lock()
881 * Now, need to read the user-level lp->mutex_flag to do the following: in lwp_upimutex_lock()
883 * - if lock is held, check if EOWNERDEAD or ELOCKUNMAPPED in lwp_upimutex_lock()
885 * - if lock isn't held, check if ENOTRECOVERABLE should in lwp_upimutex_lock()
888 * Now, either lp->mutex_flag is readable or it's not. If not in lwp_upimutex_lock()
896 * could be due to a spurious wake-up or a NOTRECOVERABLE in lwp_upimutex_lock()
900 fuword16_noerr(&lp->mutex_flag, &flag); in lwp_upimutex_lock()
903 * If the thread wakes up from turnstile_block with the lock in lwp_upimutex_lock()
905 * since it would not have been handed-off the lock. in lwp_upimutex_lock()
923 * Wake-up without the upimutex held. Either this is a in lwp_upimutex_lock()
924 * spurious wake-up (due to signals, forkall(), whatever), or in lwp_upimutex_lock()
939 * same time as curthread having been woken up in lwp_upimutex_lock()
944 * with the owner-dead event. in lwp_upimutex_lock()
951 * set to LOCK_NOTRECOVERABLE, and the wake-up of in lwp_upimutex_lock()
956 * Of course, if the user-flag is not set with in lwp_upimutex_lock()
987 if (!get_lwpchan(curproc->p_as, (caddr_t)lp, type, in lwp_upimutex_unlock()
993 mutex_enter(&upibp->upib_lock); in lwp_upimutex_unlock()
997 * error. The user-level wrapper can return this error or stall, in lwp_upimutex_unlock()
1000 if (upimutex == NULL || upimutex->upi_owner != curthread) { in lwp_upimutex_unlock()
1001 mutex_exit(&upibp->upib_lock); in lwp_upimutex_unlock()
1005 mutex_exit(&upibp->upib_lock); /* release for user memory access */ in lwp_upimutex_unlock()
1007 fuword16_noerr(&lp->mutex_flag, &flag); in lwp_upimutex_unlock()
1014 suword16_noerr(&lp->mutex_flag, flag); in lwp_upimutex_unlock()
1025 * Set the owner and ownerpid fields of a user-level mutex. Note, this function
1040 suword32_noerr(&lp->mutex_ownerpid, pid); in set_owner_pid()
1042 if (((uintptr_t)lp & (_LONG_LONG_ALIGNMENT - 1)) == 0) { /* aligned */ in set_owner_pid()
1043 suword64_noerr(&lp->mutex_owner, un.word64); in set_owner_pid()
1047 /* mutex is unaligned or we are running on a 32-bit kernel */ in set_owner_pid()
1048 suword32_noerr((uint32_t *)&lp->mutex_owner, un.word32[0]); in set_owner_pid()
1049 suword32_noerr((uint32_t *)&lp->mutex_owner + 1, un.word32[1]); in set_owner_pid()
1053 * Clear the contents of a user-level mutex; return the flags.
1061 fuword16_noerr(&lp->mutex_flag, &flag); in lwp_clear_mutex()
1065 suword16_noerr(&lp->mutex_flag, flag); in lwp_clear_mutex()
1068 suword8_noerr(&lp->mutex_rcount, 0); in lwp_clear_mutex()
1089 lp = upip->upi_vaddr; in upi_dead()
1091 suword8_noerr(&lp->mutex_lockw, 0); in upi_dead()
1106 uint16_t lockflg = (ttoproc(t)->p_proc_flag & P_PR_EXEC)? in upimutex_cleanup()
1110 while ((upip = t->t_upimutex) != NULL) { in upimutex_cleanup()
1116 * woken up. Since user object is unmapped, it could in upimutex_cleanup()
1118 * The waiters will now all wake up and return in upimutex_cleanup()
1120 * has not been handed-off to them. in upimutex_cleanup()
1156 clock_t tim = -1; in lwp_mutex_timedlock()
1167 if ((caddr_t)lp >= p->p_as->a_userlimit) in lwp_mutex_timedlock()
1184 * Although LMS_USER_LOCK implies "asleep waiting for user-mode lock", in lwp_mutex_timedlock()
1198 * Force Copy-on-write if necessary and ensure that the in lwp_mutex_timedlock()
1202 fuword8_noerr(&lp->mutex_type, (uint8_t *)&type); in lwp_mutex_timedlock()
1203 suword8_noerr(&lp->mutex_type, type); in lwp_mutex_timedlock()
1218 (type & USYNC_PROCESS)? p->p_pid : 0); in lwp_mutex_timedlock()
1228 if (!get_lwpchan(curproc->p_as, (caddr_t)lp, type, in lwp_mutex_timedlock()
1236 fuword16_noerr(&lp->mutex_flag, &flag); in lwp_mutex_timedlock()
1243 fuword8_noerr(&lp->mutex_waiters, &waiters); in lwp_mutex_timedlock()
1244 suword8_noerr(&lp->mutex_waiters, 1); in lwp_mutex_timedlock()
1254 while (!ulock_try(&lp->mutex_lockw)) { in lwp_mutex_timedlock()
1276 mutex_enter(&t->t_delay_lock); in lwp_mutex_timedlock()
1278 mutex_exit(&t->t_delay_lock); in lwp_mutex_timedlock()
1289 mutex_exit(&t->t_delay_lock); in lwp_mutex_timedlock()
1295 t->t_flag &= ~T_WAKEABLE; in lwp_mutex_timedlock()
1299 if (ISSIG(t, FORREAL) || lwp->lwp_sysabort || MUSTRETURN(p, t)) in lwp_mutex_timedlock()
1301 else if (imm_timeout || (timedwait && tim == -1)) in lwp_mutex_timedlock()
1304 lwp->lwp_asleep = 0; in lwp_mutex_timedlock()
1305 lwp->lwp_sysabort = 0; in lwp_mutex_timedlock()
1310 * Need to re-compute waiters bit. The waiters field in in lwp_mutex_timedlock()
1313 * for me but I have woken up due to a signal or in lwp_mutex_timedlock()
1324 disp_lock_enter(&sqh->sq_lock); in lwp_mutex_timedlock()
1325 waiters = iswanted(sqh->sq_queue.sq_first, &lwpchan); in lwp_mutex_timedlock()
1326 disp_lock_exit(&sqh->sq_lock); in lwp_mutex_timedlock()
1329 lwp->lwp_asleep = 0; in lwp_mutex_timedlock()
1334 fuword8_noerr(&lp->mutex_waiters, &waiters); in lwp_mutex_timedlock()
1335 suword8_noerr(&lp->mutex_waiters, 1); in lwp_mutex_timedlock()
1337 fuword16_noerr(&lp->mutex_flag, &flag); in lwp_mutex_timedlock()
1345 if (t->t_mstate == LMS_USER_LOCK) in lwp_mutex_timedlock()
1349 set_owner_pid(lp, owner, (type & USYNC_PROCESS)? p->p_pid : 0); in lwp_mutex_timedlock()
1351 fuword16_noerr(&lp->mutex_flag, &flag); in lwp_mutex_timedlock()
1362 suword8_noerr(&lp->mutex_waiters, waiters); in lwp_mutex_timedlock()
1383 if (t->t_lwpchan.lc_wchan0 == lwpchan->lc_wchan0 && in iswanted()
1384 t->t_lwpchan.lc_wchan == lwpchan->lc_wchan) in iswanted()
1386 t = t->t_link; in iswanted()
1401 disp_lock_enter(&sqh->sq_lock); /* lock the sleep queue */ in lwp_queue_waiter()
1402 for (tp = sqh->sq_queue.sq_first; tp != NULL; tp = tp->t_link) { in lwp_queue_waiter()
1403 if (tp->t_lwpchan.lc_wchan0 == lwpchan->lc_wchan0 && in lwp_queue_waiter()
1404 tp->t_lwpchan.lc_wchan == lwpchan->lc_wchan) in lwp_queue_waiter()
1407 disp_lock_exit(&sqh->sq_lock); in lwp_queue_waiter()
1419 disp_lock_enter(&sqh->sq_lock); /* lock the sleep queue */ in lwp_release()
1420 tpp = &sqh->sq_queue.sq_first; in lwp_release()
1422 if (tp->t_lwpchan.lc_wchan0 == lwpchan->lc_wchan0 && in lwp_release()
1423 tp->t_lwpchan.lc_wchan == lwpchan->lc_wchan) { in lwp_release()
1429 * since been re-used to hold a lwp cv or lwp semaphore. in lwp_release()
1435 if (sync_type != (tp->t_flag & T_WAITCVSEM)) { in lwp_release()
1442 disp_lock_exit(&sqh->sq_lock); in lwp_release()
1445 *waiters = iswanted(tp->t_link, lwpchan); in lwp_release()
1448 tp->t_wchan0 = NULL; in lwp_release()
1449 tp->t_wchan = NULL; in lwp_release()
1450 tp->t_sobj_ops = NULL; in lwp_release()
1451 tp->t_release = 1; in lwp_release()
1457 tpp = &tp->t_link; in lwp_release()
1460 disp_lock_exit(&sqh->sq_lock); in lwp_release()
1472 disp_lock_enter(&sqh->sq_lock); /* lock sleep q queue */ in lwp_release_all()
1473 tpp = &sqh->sq_queue.sq_first; in lwp_release_all()
1475 if (tp->t_lwpchan.lc_wchan0 == lwpchan->lc_wchan0 && in lwp_release_all()
1476 tp->t_lwpchan.lc_wchan == lwpchan->lc_wchan) { in lwp_release_all()
1479 tp->t_wchan0 = NULL; in lwp_release_all()
1480 tp->t_wchan = NULL; in lwp_release_all()
1481 tp->t_sobj_ops = NULL; in lwp_release_all()
1485 tpp = &tp->t_link; in lwp_release_all()
1488 disp_lock_exit(&sqh->sq_lock); /* drop sleep q lock */ in lwp_release_all()
1507 if ((caddr_t)lp >= p->p_as->a_userlimit) in lwp_mutex_wakeup()
1519 * Force Copy-on-write if necessary and ensure that the in lwp_mutex_wakeup()
1523 fuword8_noerr(&lp->mutex_type, (uint8_t *)&type); in lwp_mutex_wakeup()
1524 suword8_noerr(&lp->mutex_type, type); in lwp_mutex_wakeup()
1525 if (!get_lwpchan(curproc->p_as, (caddr_t)lp, type, in lwp_mutex_wakeup()
1533 * Always wake up an lwp (if any) waiting on lwpchan. The woken lwp will in lwp_mutex_wakeup()
1534 * re-try the lock in lwp_mutex_timedlock(). The call to lwp_release() in lwp_mutex_wakeup()
1539 * sleeping since it got the lock on the re-try. The waiter in lwp_mutex_wakeup()
1544 * was woken up by a signal. This time, the waiter recomputes in lwp_mutex_wakeup()
1547 * memory that has been re-used after the lock was dropped. in lwp_mutex_wakeup()
1554 suword8_noerr(&lp->mutex_waiters, waiters); in lwp_mutex_wakeup()
1571 * lwpchan, returned by get_lwpchan(). If the timespec pointer is non-NULL,
1589 clock_t tim = -1; in lwp_cond_wait()
1599 if ((caddr_t)cv >= p->p_as->a_userlimit || in lwp_cond_wait()
1600 (caddr_t)mp >= p->p_as->a_userlimit) in lwp_cond_wait()
1633 * set up another on_fault() for a possible fault in lwp_cond_wait()
1648 * Force Copy-on-write if necessary and ensure that the in lwp_cond_wait()
1652 fuword8_noerr(&mp->mutex_type, (uint8_t *)&mtype); in lwp_cond_wait()
1653 suword8_noerr(&mp->mutex_type, mtype); in lwp_cond_wait()
1657 if (!get_lwpchan(p->p_as, (caddr_t)mp, mtype, in lwp_cond_wait()
1663 fuword16_noerr(&cv->cond_type, (uint16_t *)&type); in lwp_cond_wait()
1664 suword16_noerr(&cv->cond_type, type); in lwp_cond_wait()
1666 if (!get_lwpchan(p->p_as, (caddr_t)cv, type, in lwp_cond_wait()
1693 suword8_noerr(&cv->cond_waiters_kernel, 1); in lwp_cond_wait()
1699 ulock_clear(&mp->mutex_lockw); in lwp_cond_wait()
1700 fuword8_noerr(&mp->mutex_waiters, &waiters); in lwp_cond_wait()
1712 * re-evaluating it. in lwp_cond_wait()
1715 suword8_noerr(&mp->mutex_waiters, waiters); in lwp_cond_wait()
1720 suword8_noerr(&cv->cond_waiters_kernel, 1); in lwp_cond_wait()
1739 if (check_park && (!schedctl_is_park() || t->t_unpark)) { in lwp_cond_wait()
1741 * We received a signal at user-level before calling here in lwp_cond_wait()
1746 t->t_unpark = 0; in lwp_cond_wait()
1754 mutex_enter(&t->t_delay_lock); in lwp_cond_wait()
1756 mutex_exit(&t->t_delay_lock); in lwp_cond_wait()
1761 t->t_flag |= T_WAITCVSEM; in lwp_cond_wait()
1768 mutex_exit(&t->t_delay_lock); in lwp_cond_wait()
1775 t->t_flag &= ~(T_WAITCVSEM | T_WAKEABLE); in lwp_cond_wait()
1778 if (ISSIG(t, FORREAL) || lwp->lwp_sysabort || in lwp_cond_wait()
1781 else if (imm_timeout || (timedwait && tim == -1)) in lwp_cond_wait()
1783 lwp->lwp_asleep = 0; in lwp_cond_wait()
1784 lwp->lwp_sysabort = 0; in lwp_cond_wait()
1787 if (t->t_mstate == LMS_USER_LOCK) in lwp_cond_wait()
1800 if (t->t_release) in lwp_cond_wait()
1809 * returning to caller, since the caller always re-acquires it. in lwp_cond_wait()
1815 ulock_clear(&mp->mutex_lockw); in lwp_cond_wait()
1816 fuword8_noerr(&mp->mutex_waiters, &waiters); in lwp_cond_wait()
1823 suword8_noerr(&mp->mutex_waiters, waiters); in lwp_cond_wait()
1836 if (t->t_mstate == LMS_USER_LOCK) in lwp_cond_wait()
1856 if ((caddr_t)cv >= p->p_as->a_userlimit) in lwp_cond_signal()
1868 * Force Copy-on-write if necessary and ensure that the in lwp_cond_signal()
1872 fuword16_noerr(&cv->cond_type, (uint16_t *)&type); in lwp_cond_signal()
1873 suword16_noerr(&cv->cond_type, type); in lwp_cond_signal()
1874 if (!get_lwpchan(curproc->p_as, (caddr_t)cv, type, in lwp_cond_signal()
1881 fuword8_noerr(&cv->cond_waiters_kernel, &waiters); in lwp_cond_signal()
1886 * could not have been re-used or unmapped (for correctly in lwp_cond_signal()
1894 suword8_noerr(&cv->cond_waiters_kernel, waiters); in lwp_cond_signal()
1921 if ((caddr_t)cv >= p->p_as->a_userlimit) in lwp_cond_broadcast()
1933 * Force Copy-on-write if necessary and ensure that the in lwp_cond_broadcast()
1937 fuword16_noerr(&cv->cond_type, (uint16_t *)&type); in lwp_cond_broadcast()
1938 suword16_noerr(&cv->cond_type, type); in lwp_cond_broadcast()
1939 if (!get_lwpchan(curproc->p_as, (caddr_t)cv, type, in lwp_cond_broadcast()
1946 fuword8_noerr(&cv->cond_waiters_kernel, &waiters); in lwp_cond_broadcast()
1949 suword8_noerr(&cv->cond_waiters_kernel, 0); in lwp_cond_broadcast()
1975 if ((caddr_t)sp >= p->p_as->a_userlimit) in lwp_sema_trywait()
1987 * Force Copy-on-write if necessary and ensure that the in lwp_sema_trywait()
1991 fuword16_noerr((void *)&sp->sema_type, (uint16_t *)&type); in lwp_sema_trywait()
1992 suword16_noerr((void *)&sp->sema_type, type); in lwp_sema_trywait()
1993 if (!get_lwpchan(p->p_as, (caddr_t)sp, type, in lwp_sema_trywait()
2000 fuword32_noerr((void *)&sp->sema_count, (uint32_t *)&count); in lwp_sema_trywait()
2004 suword32_noerr((void *)&sp->sema_count, --count); in lwp_sema_trywait()
2006 fuword8_noerr(&sp->sema_waiters, &waiters); in lwp_sema_trywait()
2009 suword8_noerr(&sp->sema_waiters, waiters); in lwp_sema_trywait()
2033 clock_t tim = -1; in lwp_sema_timedwait()
2046 if ((caddr_t)sp >= p->p_as->a_userlimit) in lwp_sema_timedwait()
2071 * Force Copy-on-write if necessary and ensure that the in lwp_sema_timedwait()
2075 fuword16_noerr((void *)&sp->sema_type, (uint16_t *)&type); in lwp_sema_timedwait()
2076 suword16_noerr((void *)&sp->sema_type, type); in lwp_sema_timedwait()
2077 if (!get_lwpchan(p->p_as, (caddr_t)sp, type, in lwp_sema_timedwait()
2084 fuword32_noerr((void *)&sp->sema_count, (uint32_t *)&count); in lwp_sema_timedwait()
2095 suword8_noerr(&sp->sema_waiters, 1); in lwp_sema_timedwait()
2098 if (check_park && (!schedctl_is_park() || t->t_unpark)) { in lwp_sema_timedwait()
2100 * We received a signal at user-level before calling in lwp_sema_timedwait()
2105 t->t_unpark = 0; in lwp_sema_timedwait()
2113 mutex_enter(&t->t_delay_lock); in lwp_sema_timedwait()
2115 mutex_exit(&t->t_delay_lock); in lwp_sema_timedwait()
2120 t->t_flag |= T_WAITCVSEM; in lwp_sema_timedwait()
2127 mutex_exit(&t->t_delay_lock); in lwp_sema_timedwait()
2134 t->t_flag &= ~(T_WAITCVSEM | T_WAKEABLE); in lwp_sema_timedwait()
2138 if (ISSIG(t, FORREAL) || lwp->lwp_sysabort || in lwp_sema_timedwait()
2141 else if (imm_timeout || (timedwait && tim == -1)) in lwp_sema_timedwait()
2143 lwp->lwp_asleep = 0; in lwp_sema_timedwait()
2144 lwp->lwp_sysabort = 0; in lwp_sema_timedwait()
2149 fuword32_noerr((void *)&sp->sema_count, (uint32_t *)&count); in lwp_sema_timedwait()
2152 suword32_noerr((void *)&sp->sema_count, --count); in lwp_sema_timedwait()
2155 suword8_noerr(&sp->sema_waiters, waiters); in lwp_sema_timedwait()
2182 if ((caddr_t)sp >= p->p_as->a_userlimit) in lwp_sema_post()
2194 * Force Copy-on-write if necessary and ensure that the in lwp_sema_post()
2198 fuword16_noerr(&sp->sema_type, (uint16_t *)&type); in lwp_sema_post()
2199 suword16_noerr(&sp->sema_type, type); in lwp_sema_post()
2200 if (!get_lwpchan(curproc->p_as, (caddr_t)sp, type, in lwp_sema_post()
2207 fuword32_noerr(&sp->sema_count, (uint32_t *)&count); in lwp_sema_post()
2211 suword32_noerr(&sp->sema_count, ++count); in lwp_sema_post()
2213 fuword8_noerr(&sp->sema_waiters, &waiters); in lwp_sema_post()
2216 suword8_noerr(&sp->sema_waiters, waiters); in lwp_sema_post()
2240 * reflect the new state of the queue. For a safe hand-off we copy the new
2241 * rwstate value back to userland before we wake any of the new lock holders.
2263 disp_lock_enter(&sqh->sq_lock); in lwp_rwlock_release()
2264 tpp = &sqh->sq_queue.sq_first; in lwp_rwlock_release()
2266 if (tp->t_lwpchan.lc_wchan0 == lwpchan->lc_wchan0 && in lwp_rwlock_release()
2267 tp->t_lwpchan.lc_wchan == lwpchan->lc_wchan) { in lwp_rwlock_release()
2268 if (tp->t_writer & TRW_WANT_WRITE) { in lwp_rwlock_release()
2272 /* Just one writer to wake. */ in lwp_rwlock_release()
2288 /* Add reader to wake list. */ in lwp_rwlock_release()
2290 tp->t_link = wakelist; in lwp_rwlock_release()
2302 tpp = &tp->t_link; in lwp_rwlock_release()
2306 suword32_noerr(&rw->rwlock_readers, rwstate); in lwp_rwlock_release()
2308 /* Wake the new lock holder(s) up. */ in lwp_rwlock_release()
2312 tp->t_wchan0 = NULL; in lwp_rwlock_release()
2313 tp->t_wchan = NULL; in lwp_rwlock_release()
2314 tp->t_sobj_ops = NULL; in lwp_rwlock_release()
2315 tp->t_writer |= TRW_LOCK_GRANTED; in lwp_rwlock_release()
2316 tpnext = tp->t_link; in lwp_rwlock_release()
2317 tp->t_link = NULL; in lwp_rwlock_release()
2323 disp_lock_exit(&sqh->sq_lock); in lwp_rwlock_release()
2327 * We enter here holding the user-level mutex, which we must release before
2347 clock_t tim = -1; in lwp_rwlock_lock()
2360 if ((caddr_t)rw >= p->p_as->a_userlimit) in lwp_rwlock_lock()
2393 * Set up another on_fault() for a possible fault in lwp_rwlock_lock()
2417 * Force Copy-on-write if necessary and ensure that the in lwp_rwlock_lock()
2421 mp = &rw->mutex; in lwp_rwlock_lock()
2422 fuword8_noerr(&mp->mutex_type, (uint8_t *)&mtype); in lwp_rwlock_lock()
2423 fuword16_noerr(&rw->rwlock_type, (uint16_t *)&type); in lwp_rwlock_lock()
2424 suword8_noerr(&mp->mutex_type, mtype); in lwp_rwlock_lock()
2425 suword16_noerr(&rw->rwlock_type, type); in lwp_rwlock_lock()
2434 if (!get_lwpchan(p->p_as, (caddr_t)mp, mtype, in lwp_rwlock_lock()
2441 if (!get_lwpchan(p->p_as, (caddr_t)rw, type, in lwp_rwlock_lock()
2470 * The possibility of spurious wake-ups or killed waiters means in lwp_rwlock_lock()
2478 fuword32_noerr(&rw->rwlock_readers, &rwstate); in lwp_rwlock_lock()
2480 * We cannot legitimately get here from user-level in lwp_rwlock_lock()
2482 * Set it now to guard against user-level error. in lwp_rwlock_lock()
2526 !(tp->t_writer & TRW_WANT_WRITE))) { in lwp_rwlock_lock()
2537 suword32_noerr(&rw->rwlock_readers, rwstate); in lwp_rwlock_lock()
2569 t->t_writer = 0; in lwp_rwlock_lock()
2571 t->t_writer = TRW_WANT_WRITE; in lwp_rwlock_lock()
2572 suword32_noerr(&rw->rwlock_readers, rwstate); in lwp_rwlock_lock()
2578 ulock_clear(&mp->mutex_lockw); in lwp_rwlock_lock()
2579 fuword8_noerr(&mp->mutex_waiters, &mwaiters); in lwp_rwlock_lock()
2590 * update the waiter bit correctly by re-evaluating it. in lwp_rwlock_lock()
2593 suword8_noerr(&mp->mutex_waiters, mwaiters); in lwp_rwlock_lock()
2614 mutex_enter(&t->t_delay_lock); in lwp_rwlock_lock()
2616 mutex_exit(&t->t_delay_lock); in lwp_rwlock_lock()
2621 t->t_flag |= T_WAITCVSEM; in lwp_rwlock_lock()
2629 mutex_exit(&t->t_delay_lock); in lwp_rwlock_lock()
2641 acquired = (t->t_writer & TRW_LOCK_GRANTED); in lwp_rwlock_lock()
2642 t->t_writer = 0; in lwp_rwlock_lock()
2643 t->t_flag &= ~(T_WAITCVSEM | T_WAKEABLE); in lwp_rwlock_lock()
2646 if (ISSIG(t, FORREAL) || lwp->lwp_sysabort || MUSTRETURN(p, t)) in lwp_rwlock_lock()
2648 else if (imm_timeout || (timedwait && tim == -1)) in lwp_rwlock_lock()
2650 lwp->lwp_asleep = 0; in lwp_rwlock_lock()
2651 lwp->lwp_sysabort = 0; in lwp_rwlock_lock()
2660 if (t->t_mstate == LMS_USER_LOCK) in lwp_rwlock_lock()
2677 ulock_clear(&mp->mutex_lockw); in lwp_rwlock_lock()
2678 fuword8_noerr(&mp->mutex_waiters, &mwaiters); in lwp_rwlock_lock()
2685 suword8_noerr(&mp->mutex_waiters, mwaiters); in lwp_rwlock_lock()
2696 if (t->t_mstate == LMS_USER_LOCK) in lwp_rwlock_lock()
2704 * We enter here holding the user-level mutex but, unlike lwp_rwlock_lock(),
2722 if ((caddr_t)rw >= p->p_as->a_userlimit) in lwp_rwlock_unlock()
2739 * Force Copy-on-write if necessary and ensure that the in lwp_rwlock_unlock()
2743 fuword16_noerr(&rw->rwlock_type, (uint16_t *)&type); in lwp_rwlock_unlock()
2744 suword16_noerr(&rw->rwlock_type, type); in lwp_rwlock_unlock()
2753 if (!get_lwpchan(p->p_as, (caddr_t)rw, type, in lwp_rwlock_unlock()
2771 fuword32_noerr(&rw->rwlock_readers, &rwstate); in lwp_rwlock_unlock()
2775 rwstate--; in lwp_rwlock_unlock()
2779 suword32_noerr(&rw->rwlock_readers, rwstate); in lwp_rwlock_unlock()
2814 * Return the owner of the user-level s-object.
2825 * Wake up a thread asleep on a user-level synchronization
2832 if (t->t_wchan0 != NULL) { in lwp_unsleep()
2834 sleepq_t *sqp = t->t_sleepq; in lwp_unsleep()
2837 sqh = lwpsqhash(&t->t_lwpchan); in lwp_unsleep()
2838 ASSERT(&sqh->sq_queue == sqp); in lwp_unsleep()
2840 disp_lock_exit_high(&sqh->sq_lock); in lwp_unsleep()
2849 * Change the priority of a thread asleep on a user-level
2854 * o re-enqueue the thread.
2861 if (t->t_wchan0 != NULL) { in lwp_change_pri()
2862 sleepq_t *sqp = t->t_sleepq; in lwp_change_pri()
2872 * Clean up a left-over process-shared robust mutex
2887 if ((ent->lwpchan_type & (USYNC_PROCESS | LOCK_ROBUST)) in lwp_mutex_cleanup()
2891 lp = (lwp_mutex_t *)ent->lwpchan_addr; in lwp_mutex_cleanup()
2895 lwpchan_unlock(&ent->lwpchan_lwpchan, LWPCHAN_MPPOOL); in lwp_mutex_cleanup()
2901 fuword32_noerr(&lp->mutex_ownerpid, (uint32_t *)&owner_pid); in lwp_mutex_cleanup()
2903 if (UPIMUTEX(ent->lwpchan_type)) { in lwp_mutex_cleanup()
2904 lwpchan_t lwpchan = ent->lwpchan_lwpchan; in lwp_mutex_cleanup()
2907 if (owner_pid != curproc->p_pid) in lwp_mutex_cleanup()
2909 mutex_enter(&upibp->upib_lock); in lwp_mutex_cleanup()
2911 if (upimutex == NULL || upimutex->upi_owner != curthread) { in lwp_mutex_cleanup()
2912 mutex_exit(&upibp->upib_lock); in lwp_mutex_cleanup()
2915 mutex_exit(&upibp->upib_lock); in lwp_mutex_cleanup()
2918 suword8_noerr(&lp->mutex_lockw, 0); in lwp_mutex_cleanup()
2921 lwpchan_lock(&ent->lwpchan_lwpchan, LWPCHAN_MPPOOL); in lwp_mutex_cleanup()
2927 * There is no harm in this since user-level libc code in lwp_mutex_cleanup()
2930 suword8_noerr(&lp->mutex_spinners, 0); in lwp_mutex_cleanup()
2931 if (owner_pid != curproc->p_pid) { in lwp_mutex_cleanup()
2934 * If there are waiters, we wake up one or all of them. in lwp_mutex_cleanup()
2935 * It doesn't hurt to wake them up in error since in lwp_mutex_cleanup()
2939 fuword8_noerr(&lp->mutex_waiters, &waiters); in lwp_mutex_cleanup()
2941 fuword16_noerr(&lp->mutex_flag, &flag); in lwp_mutex_cleanup()
2943 lwp_release_all(&ent->lwpchan_lwpchan); in lwp_mutex_cleanup()
2944 suword8_noerr(&lp->mutex_waiters, 0); in lwp_mutex_cleanup()
2945 } else if (lwp_release(&ent->lwpchan_lwpchan, in lwp_mutex_cleanup()
2947 suword8_noerr(&lp->mutex_waiters, in lwp_mutex_cleanup()
2956 ulock_clear(&lp->mutex_lockw); in lwp_mutex_cleanup()
2957 fuword8_noerr(&lp->mutex_waiters, &waiters); in lwp_mutex_cleanup()
2959 lwp_release(&ent->lwpchan_lwpchan, &waiters, 0)) in lwp_mutex_cleanup()
2960 suword8_noerr(&lp->mutex_waiters, waiters); in lwp_mutex_cleanup()
2962 lwpchan_unlock(&ent->lwpchan_lwpchan, LWPCHAN_MPPOOL); in lwp_mutex_cleanup()
2971 * Register a process-shared robust mutex in the lwpchan cache.
2991 * Force Copy-on-write if necessary and ensure that the in lwp_mutex_register()
2995 fuword8_noerr(&lp->mutex_type, &type); in lwp_mutex_register()
2996 suword8_noerr(&lp->mutex_type, type); in lwp_mutex_register()
3000 } else if (!lwpchan_get_mapping(curproc->p_as, (caddr_t)lp, in lwp_mutex_register()
3014 * There is a user-level robust lock registration in libc.
3015 * Mark it as invalid by storing -1 into the location of the pointer.
3021 (void) sulword(uaddr, (ulong_t)-1); in lwp_mutex_unregister()
3024 (void) suword32(uaddr, (uint32_t)-1); in lwp_mutex_unregister()
3042 if ((caddr_t)lp >= p->p_as->a_userlimit) in lwp_mutex_trylock()
3054 * Force Copy-on-write if necessary and ensure that the in lwp_mutex_trylock()
3058 fuword8_noerr(&lp->mutex_type, (uint8_t *)&type); in lwp_mutex_trylock()
3059 suword8_noerr(&lp->mutex_type, type); in lwp_mutex_trylock()
3074 (type & USYNC_PROCESS)? p->p_pid : 0); in lwp_mutex_trylock()
3083 if (!get_lwpchan(curproc->p_as, (caddr_t)lp, type, in lwp_mutex_trylock()
3091 fuword16_noerr(&lp->mutex_flag, &flag); in lwp_mutex_trylock()
3101 if (!ulock_try(&lp->mutex_lockw)) in lwp_mutex_trylock()
3104 set_owner_pid(lp, owner, (type & USYNC_PROCESS)? p->p_pid : 0); in lwp_mutex_trylock()
3106 fuword16_noerr(&lp->mutex_flag, &flag); in lwp_mutex_trylock()
3121 if (t->t_mstate == LMS_USER_LOCK) in lwp_mutex_trylock()
3149 if ((caddr_t)lp >= p->p_as->a_userlimit) in lwp_mutex_unlock()
3160 * Force Copy-on-write if necessary and ensure that the in lwp_mutex_unlock()
3164 fuword8_noerr(&lp->mutex_type, (uint8_t *)&type); in lwp_mutex_unlock()
3165 suword8_noerr(&lp->mutex_type, type); in lwp_mutex_unlock()
3177 if (!get_lwpchan(curproc->p_as, (caddr_t)lp, type, in lwp_mutex_unlock()
3185 fuword16_noerr(&lp->mutex_flag, &flag); in lwp_mutex_unlock()
3189 suword16_noerr(&lp->mutex_flag, flag); in lwp_mutex_unlock()
3193 ulock_clear(&lp->mutex_lockw); in lwp_mutex_unlock()
3195 * Always wake up an lwp (if any) waiting on lwpchan. The woken lwp will in lwp_mutex_unlock()
3196 * re-try the lock in lwp_mutex_timedlock(). The call to lwp_release() in lwp_mutex_unlock()
3201 * sleeping since it got the lock on the re-try. The waiter in lwp_mutex_unlock()
3206 * was woken up by a signal. This time, the waiter recomputes in lwp_mutex_unlock()
3209 * memory that has been re-used after the lock was dropped. in lwp_mutex_unlock()
3213 fuword8_noerr(&lp->mutex_waiters, &waiters); in lwp_mutex_unlock()
3218 suword8_noerr(&lp->mutex_waiters, 0); in lwp_mutex_unlock()
3220 suword8_noerr(&lp->mutex_waiters, waiters); in lwp_mutex_unlock()