Lines Matching refs:mp
130 mutex_init(mutex_t *mp, int type, void *arg) in mutex_init() argument
170 if (!(mp->mutex_flag & LOCK_INITED)) { in mutex_init()
171 mp->mutex_type = (uint8_t)type; in mutex_init()
172 atomic_or_16(&mp->mutex_flag, LOCK_INITED); in mutex_init()
173 mp->mutex_magic = MUTEX_MAGIC; in mutex_init()
174 } else if (type != mp->mutex_type || in mutex_init()
175 ((type & LOCK_PRIO_PROTECT) && mp->mutex_ceiling != ceil)) { in mutex_init()
177 } else if (mutex_consistent(mp) != 0) { in mutex_init()
182 register_lock(mp); in mutex_init()
184 (void) memset(mp, 0, sizeof (*mp)); in mutex_init()
185 mp->mutex_type = (uint8_t)type; in mutex_init()
186 mp->mutex_flag = LOCK_INITED; in mutex_init()
187 mp->mutex_magic = MUTEX_MAGIC; in mutex_init()
191 mp->mutex_ceiling = ceil; in mutex_init()
202 ((uintptr_t)mp & (_LONG_LONG_ALIGNMENT - 1)) && in mutex_init()
214 _ceil_mylist_del(mutex_t *mp) in _ceil_mylist_del() argument
223 if (mcp->mxchain_mx == mp) { in _ceil_mylist_del()
237 _ceil_mylist_add(mutex_t *mp) in _ceil_mylist_add() argument
244 mcp->mxchain_mx = mp; in _ceil_mylist_add()
416 spin_lock_set(mutex_t *mp) in spin_lock_set() argument
421 if (set_lock_byte(&mp->mutex_lockw) == 0) { in spin_lock_set()
422 mp->mutex_owner = (uintptr_t)self; in spin_lock_set()
429 if (mutex_queuelock_adaptive(mp) == 0 || in spin_lock_set()
430 set_lock_byte(&mp->mutex_lockw) == 0) { in spin_lock_set()
431 mp->mutex_owner = (uintptr_t)self; in spin_lock_set()
439 if (mutex_queuelock_adaptive(mp) == 0 || in spin_lock_set()
440 set_lock_byte(&mp->mutex_lockw) == 0) { in spin_lock_set()
441 mp->mutex_owner = (uintptr_t)self; in spin_lock_set()
449 (void) ___lwp_mutex_timedlock(mp, NULL, self); in spin_lock_set()
453 spin_lock_clear(mutex_t *mp) in spin_lock_clear() argument
457 mp->mutex_owner = 0; in spin_lock_clear()
458 if (atomic_swap_32(&mp->mutex_lockword, 0) & WAITERMASK) { in spin_lock_clear()
459 (void) ___lwp_mutex_wakeup(mp, 0); in spin_lock_clear()
984 mutex_lock_kernel(mutex_t *mp, timespec_t *tsp, tdb_mutex_stats_t *msp) in mutex_lock_kernel() argument
988 int mtype = mp->mutex_type; in mutex_lock_kernel()
994 self->ul_wchan = mp; in mutex_lock_kernel()
997 self->ul_td_evbuf.eventdata = mp; in mutex_lock_kernel()
1005 DTRACE_PROBE1(plockstat, mutex__block, mp); in mutex_lock_kernel()
1012 if ((error = ___lwp_mutex_timedlock(mp, tsp, self)) != 0 && in mutex_lock_kernel()
1024 if (mp->mutex_ownerpid == udp->pid) { in mutex_lock_kernel()
1042 ASSERT(mp->mutex_owner == (uintptr_t)self); in mutex_lock_kernel()
1043 DTRACE_PROBE2(plockstat, mutex__blocked, mp, 1); in mutex_lock_kernel()
1044 DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0); in mutex_lock_kernel()
1046 DTRACE_PROBE2(plockstat, mutex__blocked, mp, 0); in mutex_lock_kernel()
1047 DTRACE_PROBE2(plockstat, mutex__error, mp, error); in mutex_lock_kernel()
1058 mutex_trylock_kernel(mutex_t *mp) in mutex_trylock_kernel() argument
1062 int mtype = mp->mutex_type; in mutex_trylock_kernel()
1071 if ((error = ___lwp_mutex_trylock(mp, self)) != 0 && in mutex_trylock_kernel()
1083 if (mp->mutex_ownerpid == udp->pid) { in mutex_trylock_kernel()
1096 ASSERT(mp->mutex_owner == (uintptr_t)self); in mutex_trylock_kernel()
1097 DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0); in mutex_trylock_kernel()
1099 DTRACE_PROBE2(plockstat, mutex__error, mp, error); in mutex_trylock_kernel()
1248 mutex_trylock_adaptive(mutex_t *mp, int tryhard) in mutex_trylock_adaptive() argument
1254 volatile uint8_t *lockp = (volatile uint8_t *)&mp->mutex_lockw; in mutex_trylock_adaptive()
1255 volatile uint64_t *ownerp = (volatile uint64_t *)&mp->mutex_owner; in mutex_trylock_adaptive()
1261 ASSERT(!(mp->mutex_type & USYNC_PROCESS)); in mutex_trylock_adaptive()
1263 if (MUTEX_OWNED(mp, self)) in mutex_trylock_adaptive()
1269 if (mp->mutex_flag & LOCK_NOTRECOVERABLE) { in mutex_trylock_adaptive()
1270 ASSERT(mp->mutex_type & LOCK_ROBUST); in mutex_trylock_adaptive()
1303 if (spinners_incr(&mp->mutex_lockword, max_spinners) == -1) in mutex_trylock_adaptive()
1305 DTRACE_PROBE1(plockstat, mutex__spin, mp); in mutex_trylock_adaptive()
1338 new_lockword = spinners_decr(&mp->mutex_lockword); in mutex_trylock_adaptive()
1363 if (error == 0 && (mp->mutex_flag & LOCK_NOTRECOVERABLE)) { in mutex_trylock_adaptive()
1364 ASSERT(mp->mutex_type & LOCK_ROBUST); in mutex_trylock_adaptive()
1370 (void) clear_lockbyte(&mp->mutex_lockword); in mutex_trylock_adaptive()
1378 DTRACE_PROBE3(plockstat, mutex__spun, mp, 0, count); in mutex_trylock_adaptive()
1381 DTRACE_PROBE2(plockstat, mutex__error, mp, error); in mutex_trylock_adaptive()
1385 DTRACE_PROBE3(plockstat, mutex__spun, mp, 1, count); in mutex_trylock_adaptive()
1387 DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, count); in mutex_trylock_adaptive()
1388 if (mp->mutex_flag & LOCK_OWNERDEAD) { in mutex_trylock_adaptive()
1389 ASSERT(mp->mutex_type & LOCK_ROBUST); in mutex_trylock_adaptive()
1402 mutex_queuelock_adaptive(mutex_t *mp) in mutex_queuelock_adaptive() argument
1410 ASSERT(mp->mutex_type == USYNC_THREAD); in mutex_queuelock_adaptive()
1415 lockp = (volatile uint8_t *)&mp->mutex_lockw; in mutex_queuelock_adaptive()
1416 ownerp = (volatile uint64_t *)&mp->mutex_owner; in mutex_queuelock_adaptive()
1438 mutex_trylock_process(mutex_t *mp, int tryhard) in mutex_trylock_process() argument
1443 volatile uint64_t *lockp = (volatile uint64_t *)&mp->mutex_lockword64; in mutex_trylock_process()
1452 (((uintptr_t)mp & (_LONG_LONG_ALIGNMENT - 1)) && in mutex_trylock_process()
1453 self->ul_misaligned && !(mp->mutex_type & LOCK_ROBUST)); in mutex_trylock_process()
1456 ASSERT(mp->mutex_type & USYNC_PROCESS); in mutex_trylock_process()
1458 if (shared_mutex_held(mp)) in mutex_trylock_process()
1464 if (mp->mutex_flag & LOCK_NOTRECOVERABLE) { in mutex_trylock_process()
1465 ASSERT(mp->mutex_type & LOCK_ROBUST); in mutex_trylock_process()
1477 if (set_lock_byte(&mp->mutex_lockw) == 0) { in mutex_trylock_process()
1478 mp->mutex_ownerpid = udp->pid; in mutex_trylock_process()
1479 mp->mutex_owner = (uintptr_t)self; in mutex_trylock_process()
1486 mp->mutex_owner = (uintptr_t)self; in mutex_trylock_process()
1506 if (spinners_incr(&mp->mutex_lockword, max_spinners) == -1) in mutex_trylock_process()
1508 DTRACE_PROBE1(plockstat, mutex__spin, mp); in mutex_trylock_process()
1514 set_lock_byte(&mp->mutex_lockw) == 0) { in mutex_trylock_process()
1515 mp->mutex_ownerpid = udp->pid; in mutex_trylock_process()
1516 mp->mutex_owner = (uintptr_t)self; in mutex_trylock_process()
1524 mp->mutex_owner = (uintptr_t)self; in mutex_trylock_process()
1533 new_lockword = spinners_decr(&mp->mutex_lockword); in mutex_trylock_process()
1553 if (set_lock_byte(&mp->mutex_lockw) == 0) { in mutex_trylock_process()
1554 mp->mutex_ownerpid = udp->pid; in mutex_trylock_process()
1555 mp->mutex_owner = (uintptr_t)self; in mutex_trylock_process()
1561 mp->mutex_owner = (uintptr_t)self; in mutex_trylock_process()
1569 if (error == 0 && (mp->mutex_flag & LOCK_NOTRECOVERABLE)) { in mutex_trylock_process()
1570 ASSERT(mp->mutex_type & LOCK_ROBUST); in mutex_trylock_process()
1575 mp->mutex_owner = 0; in mutex_trylock_process()
1577 (void) clear_lockbyte64(&mp->mutex_lockword64); in mutex_trylock_process()
1585 DTRACE_PROBE3(plockstat, mutex__spun, mp, 0, count); in mutex_trylock_process()
1588 DTRACE_PROBE2(plockstat, mutex__error, mp, error); in mutex_trylock_process()
1592 DTRACE_PROBE3(plockstat, mutex__spun, mp, 1, count); in mutex_trylock_process()
1594 DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, count); in mutex_trylock_process()
1595 if (mp->mutex_flag & (LOCK_OWNERDEAD | LOCK_UNMAPPED)) { in mutex_trylock_process()
1596 ASSERT(mp->mutex_type & LOCK_ROBUST); in mutex_trylock_process()
1597 if (mp->mutex_flag & LOCK_OWNERDEAD) in mutex_trylock_process()
1599 else if (mp->mutex_type & USYNC_PROCESS_ROBUST) in mutex_trylock_process()
1616 mutex_wakeup(mutex_t *mp) in mutex_wakeup() argument
1628 qp = queue_lock(mp, MX); in mutex_wakeup()
1631 mp->mutex_waiters = more; in mutex_wakeup()
1641 mutex_wakeup_all(mutex_t *mp) in mutex_wakeup_all() argument
1667 qp = queue_lock(mp, MX); in mutex_wakeup_all()
1672 ASSERT(ulwp->ul_wchan == mp); in mutex_wakeup_all()
1684 mp->mutex_waiters = 0; in mutex_wakeup_all()
1707 mutex_unlock_queue(mutex_t *mp, int release_all) in mutex_unlock_queue() argument
1713 DTRACE_PROBE2(plockstat, mutex__release, mp, 0); in mutex_unlock_queue()
1715 mp->mutex_owner = 0; in mutex_unlock_queue()
1716 old_lockword = clear_lockbyte(&mp->mutex_lockword); in mutex_unlock_queue()
1721 mutex_wakeup_all(mp); in mutex_unlock_queue()
1723 lwpid = mutex_wakeup(mp); in mutex_unlock_queue()
1735 mutex_unlock_process(mutex_t *mp, int release_all) in mutex_unlock_process() argument
1740 DTRACE_PROBE2(plockstat, mutex__release, mp, 0); in mutex_unlock_process()
1742 mp->mutex_owner = 0; in mutex_unlock_process()
1745 if (((uintptr_t)mp & (_LONG_LONG_ALIGNMENT - 1)) && in mutex_unlock_process()
1746 self->ul_misaligned && !(mp->mutex_type & LOCK_ROBUST)) { in mutex_unlock_process()
1748 mp->mutex_ownerpid = 0; in mutex_unlock_process()
1749 old_lockword = clear_lockbyte(&mp->mutex_lockword); in mutex_unlock_process()
1753 (void) ___lwp_mutex_wakeup(mp, release_all); in mutex_unlock_process()
1761 old_lockword64 = clear_lockbyte64(&mp->mutex_lockword64); in mutex_unlock_process()
1765 (void) ___lwp_mutex_wakeup(mp, release_all); in mutex_unlock_process()
1784 mutex_lock_queue(ulwp_t *self, tdb_mutex_stats_t *msp, mutex_t *mp, in mutex_lock_queue() argument
1794 self->ul_wchan = mp; in mutex_lock_queue()
1796 self->ul_td_evbuf.eventdata = mp; in mutex_lock_queue()
1804 DTRACE_PROBE1(plockstat, mutex__block, mp); in mutex_lock_queue()
1812 qp = queue_lock(mp, MX); in mutex_lock_queue()
1814 mp->mutex_waiters = 1; in mutex_lock_queue()
1816 if (set_lock_byte(&mp->mutex_lockw) == 0) { in mutex_lock_queue()
1817 mp->mutex_owner = (uintptr_t)self; in mutex_lock_queue()
1818 mp->mutex_waiters = dequeue_self(qp); in mutex_lock_queue()
1837 qp = queue_lock(mp, MX); in mutex_lock_queue()
1840 mp->mutex_waiters = queue_waiter(qp)? 1 : 0; in mutex_lock_queue()
1845 if (set_lock_byte(&mp->mutex_lockw) == 0) { in mutex_lock_queue()
1846 mp->mutex_owner = (uintptr_t)self; in mutex_lock_queue()
1850 mp->mutex_waiters = 1; in mutex_lock_queue()
1854 self->ul_wchan == mp); in mutex_lock_queue()
1857 mp->mutex_waiters = dequeue_self(qp); in mutex_lock_queue()
1869 if (error == 0 && (mp->mutex_flag & LOCK_NOTRECOVERABLE)) { in mutex_lock_queue()
1870 ASSERT(mp->mutex_type & LOCK_ROBUST); in mutex_lock_queue()
1875 mp->mutex_owner = 0; in mutex_lock_queue()
1876 (void) clear_lockbyte(&mp->mutex_lockword); in mutex_lock_queue()
1886 DTRACE_PROBE2(plockstat, mutex__blocked, mp, 0); in mutex_lock_queue()
1887 DTRACE_PROBE2(plockstat, mutex__error, mp, error); in mutex_lock_queue()
1889 DTRACE_PROBE2(plockstat, mutex__blocked, mp, 1); in mutex_lock_queue()
1890 DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0); in mutex_lock_queue()
1891 if (mp->mutex_flag & LOCK_OWNERDEAD) { in mutex_lock_queue()
1892 ASSERT(mp->mutex_type & LOCK_ROBUST); in mutex_lock_queue()
1901 mutex_recursion(mutex_t *mp, int mtype, int try) in mutex_recursion() argument
1903 ASSERT(mutex_held(mp)); in mutex_recursion()
1908 if (mp->mutex_rcount == RECURSION_MAX) { in mutex_recursion()
1909 DTRACE_PROBE2(plockstat, mutex__error, mp, EAGAIN); in mutex_recursion()
1912 mp->mutex_rcount++; in mutex_recursion()
1913 DTRACE_PROBE3(plockstat, mutex__acquire, mp, 1, 0); in mutex_recursion()
1917 DTRACE_PROBE2(plockstat, mutex__error, mp, EDEADLK); in mutex_recursion()
1930 register_lock(mutex_t *mp) in register_lock() argument
1933 uint_t hash = LOCK_HASH(mp); in register_lock()
1956 if (rlp->robust_lock == mp) /* already registered */ in register_lock()
1970 if (rlp->robust_lock == mp) { /* already registered */ in register_lock()
1988 rlp->robust_lock = mp; in register_lock()
1996 rlp->robust_lock = mp; in register_lock()
2006 (void) ___lwp_mutex_register(mp, &rlp->robust_lock); in register_lock()
2046 mutex_lock_internal(mutex_t *mp, timespec_t *tsp, int try) in mutex_lock_internal() argument
2050 int mtype = mp->mutex_type; in mutex_lock_internal()
2051 tdb_mutex_stats_t *msp = MUTEX_STATS(mp, udp); in mutex_lock_internal()
2066 if ((mtype & (LOCK_RECURSIVE|LOCK_ERRORCHECK)) && mutex_held(mp)) in mutex_lock_internal()
2067 return (mutex_recursion(mp, mtype, try)); in mutex_lock_internal()
2070 tsp == NULL && mutex_held(mp)) in mutex_lock_internal()
2071 lock_error(mp, "mutex_lock", NULL, NULL); in mutex_lock_internal()
2076 DTRACE_PROBE2(plockstat, mutex__error, mp, EPERM); in mutex_lock_internal()
2079 ceil = mp->mutex_ceiling; in mutex_lock_internal()
2082 DTRACE_PROBE2(plockstat, mutex__error, mp, EINVAL); in mutex_lock_internal()
2085 if ((error = _ceil_mylist_add(mp)) != 0) { in mutex_lock_internal()
2086 DTRACE_PROBE2(plockstat, mutex__error, mp, error); in mutex_lock_internal()
2095 register_lock(mp); in mutex_lock_internal()
2100 error = mutex_trylock_kernel(mp); in mutex_lock_internal()
2102 error = mutex_lock_kernel(mp, tsp, msp); in mutex_lock_internal()
2111 mp->mutex_lockw = LOCKSET; in mutex_lock_internal()
2116 mp->mutex_lockw = LOCKSET; in mutex_lock_internal()
2140 error = mutex_trylock_process(mp, try == MUTEX_LOCK); in mutex_lock_internal()
2142 error = mutex_lock_kernel(mp, tsp, msp); in mutex_lock_internal()
2144 error = mutex_trylock_adaptive(mp, try == MUTEX_LOCK); in mutex_lock_internal()
2146 error = mutex_lock_queue(self, msp, mp, tsp); in mutex_lock_internal()
2154 remember_lock(mp); in mutex_lock_internal()
2160 (void) _ceil_mylist_del(mp); in mutex_lock_internal()
2179 fast_process_lock(mutex_t *mp, timespec_t *tsp, int mtype, int try) in fast_process_lock() argument
2193 if (((uintptr_t)mp & (_LONG_LONG_ALIGNMENT - 1)) && in fast_process_lock()
2195 if (set_lock_byte(&mp->mutex_lockw) == 0) { in fast_process_lock()
2196 mp->mutex_ownerpid = udp->pid; in fast_process_lock()
2197 mp->mutex_owner = (uintptr_t)self; in fast_process_lock()
2199 DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0); in fast_process_lock()
2204 if (set_lock_byte64(&mp->mutex_lockword64, udp->pid) == 0) { in fast_process_lock()
2205 mp->mutex_owner = (uintptr_t)self; in fast_process_lock()
2208 DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0); in fast_process_lock()
2213 if ((mtype & (LOCK_RECURSIVE|LOCK_ERRORCHECK)) && shared_mutex_held(mp)) in fast_process_lock()
2214 return (mutex_recursion(mp, mtype, try)); in fast_process_lock()
2217 if (mutex_trylock_process(mp, 1) == 0) in fast_process_lock()
2219 return (mutex_lock_kernel(mp, tsp, NULL)); in fast_process_lock()
2230 mutex_lock_impl(mutex_t *mp, timespec_t *tsp) in mutex_lock_impl() argument
2233 int mtype = mp->mutex_type; in mutex_lock_impl()
2236 if (((uintptr_t)mp & (_LONG_LONG_ALIGNMENT - 1)) && in mutex_lock_impl()
2238 lock_error(mp, "mutex_lock", NULL, "mutex is misaligned"); in mutex_lock_impl()
2253 if (mp->mutex_lockw == 0) { in mutex_lock_impl()
2255 mp->mutex_lockw = LOCKSET; in mutex_lock_impl()
2256 mp->mutex_owner = (uintptr_t)self; in mutex_lock_impl()
2258 DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0); in mutex_lock_impl()
2261 if (mtype && MUTEX_OWNER(mp) == self) in mutex_lock_impl()
2262 return (mutex_recursion(mp, mtype, MUTEX_LOCK)); in mutex_lock_impl()
2282 MUTEX_OWNER(mp) == self && !self->ul_async_safe && in mutex_lock_impl()
2283 (mp->mutex_flag & LOCK_DEADLOCK) == 0) { in mutex_lock_impl()
2284 DTRACE_PROBE2(plockstat, mutex__error, mp, EDEADLK); in mutex_lock_impl()
2298 return (fast_process_lock(mp, tsp, mtype, MUTEX_LOCK)); in mutex_lock_impl()
2300 if (set_lock_byte(&mp->mutex_lockw) == 0) { in mutex_lock_impl()
2301 mp->mutex_owner = (uintptr_t)self; in mutex_lock_impl()
2303 DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0); in mutex_lock_impl()
2307 if (mtype && MUTEX_OWNER(mp) == self) in mutex_lock_impl()
2308 return (mutex_recursion(mp, mtype, MUTEX_LOCK)); in mutex_lock_impl()
2309 if (mutex_trylock_adaptive(mp, 1) != 0) in mutex_lock_impl()
2310 return (mutex_lock_queue(self, NULL, mp, tsp)); in mutex_lock_impl()
2315 return (mutex_lock_internal(mp, tsp, MUTEX_LOCK)); in mutex_lock_impl()
2321 mutex_lock(mutex_t *mp) in mutex_lock() argument
2324 return (mutex_lock_impl(mp, NULL)); in mutex_lock()
2329 mutex_enter(mutex_t *mp) in mutex_enter() argument
2332 int attr = mp->mutex_type & ALL_ATTRIBUTES; in mutex_enter()
2339 mutex_panic(mp, "mutex_enter: bad mutex type"); in mutex_enter()
2341 ret = mutex_lock(mp); in mutex_enter()
2343 mutex_panic(mp, "recursive mutex_enter"); in mutex_enter()
2345 mutex_panic(mp, "excessive recursive mutex_enter"); in mutex_enter()
2347 mutex_panic(mp, "unknown mutex_enter failure"); in mutex_enter()
2352 pthread_mutex_clocklock(pthread_mutex_t *restrict mp, clockid_t clock, in pthread_mutex_clocklock() argument
2369 error = mutex_lock_impl((mutex_t *)mp, &tslocal); in pthread_mutex_clocklock()
2376 pthread_mutex_timedlock(pthread_mutex_t *restrict mp, in pthread_mutex_timedlock() argument
2379 return (pthread_mutex_clocklock(mp, CLOCK_REALTIME, abstime)); in pthread_mutex_timedlock()
2383 pthread_mutex_relclocklock_np(pthread_mutex_t *restrict mp, clockid_t clock, in pthread_mutex_relclocklock_np() argument
2400 error = mutex_lock_impl((mutex_t *)mp, &tslocal); in pthread_mutex_relclocklock_np()
2407 pthread_mutex_reltimedlock_np(pthread_mutex_t *restrict mp, in pthread_mutex_reltimedlock_np() argument
2410 return (pthread_mutex_relclocklock_np(mp, CLOCK_REALTIME, reltime)); in pthread_mutex_reltimedlock_np()
2415 mutex_trylock(mutex_t *mp) in mutex_trylock() argument
2419 int mtype = mp->mutex_type; in mutex_trylock()
2437 if (mp->mutex_lockw == 0) { in mutex_trylock()
2439 mp->mutex_lockw = LOCKSET; in mutex_trylock()
2440 mp->mutex_owner = (uintptr_t)self; in mutex_trylock()
2442 DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0); in mutex_trylock()
2445 if (mtype && MUTEX_OWNER(mp) == self) in mutex_trylock()
2446 return (mutex_recursion(mp, mtype, MUTEX_TRY)); in mutex_trylock()
2459 return (fast_process_lock(mp, NULL, mtype, MUTEX_TRY)); in mutex_trylock()
2461 if (set_lock_byte(&mp->mutex_lockw) == 0) { in mutex_trylock()
2462 mp->mutex_owner = (uintptr_t)self; in mutex_trylock()
2464 DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0); in mutex_trylock()
2468 if (mtype && MUTEX_OWNER(mp) == self) in mutex_trylock()
2469 return (mutex_recursion(mp, mtype, MUTEX_TRY)); in mutex_trylock()
2478 return (mutex_lock_internal(mp, NULL, MUTEX_TRY)); in mutex_trylock()
2482 mutex_unlock_internal(mutex_t *mp, int retain_robust_flags) in mutex_unlock_internal() argument
2486 int mtype = mp->mutex_type; in mutex_unlock_internal()
2493 !mutex_held(mp)) in mutex_unlock_internal()
2496 if (self->ul_error_detection && !mutex_held(mp)) in mutex_unlock_internal()
2497 lock_error(mp, "mutex_unlock", NULL, NULL); in mutex_unlock_internal()
2499 if ((mtype & LOCK_RECURSIVE) && mp->mutex_rcount != 0) { in mutex_unlock_internal()
2500 mp->mutex_rcount--; in mutex_unlock_internal()
2501 DTRACE_PROBE2(plockstat, mutex__release, mp, 1); in mutex_unlock_internal()
2505 if ((msp = MUTEX_STATS(mp, udp)) != NULL) in mutex_unlock_internal()
2509 (mp->mutex_flag & (LOCK_OWNERDEAD | LOCK_UNMAPPED))) { in mutex_unlock_internal()
2511 mp->mutex_flag &= ~(LOCK_OWNERDEAD | LOCK_UNMAPPED); in mutex_unlock_internal()
2512 mp->mutex_flag |= LOCK_NOTRECOVERABLE; in mutex_unlock_internal()
2514 release_all = ((mp->mutex_flag & LOCK_NOTRECOVERABLE) != 0); in mutex_unlock_internal()
2518 mp->mutex_owner = 0; in mutex_unlock_internal()
2520 DTRACE_PROBE2(plockstat, mutex__release, mp, 0); in mutex_unlock_internal()
2521 mp->mutex_lockw = LOCKCLEAR; in mutex_unlock_internal()
2523 error = ___lwp_mutex_unlock(mp); in mutex_unlock_internal()
2526 mutex_unlock_process(mp, release_all); in mutex_unlock_internal()
2528 if ((lwpid = mutex_unlock_queue(mp, release_all)) != 0) { in mutex_unlock_internal()
2535 forget_lock(mp); in mutex_unlock_internal()
2537 if ((mtype & LOCK_PRIO_PROTECT) && _ceil_mylist_del(mp)) in mutex_unlock_internal()
2546 mutex_unlock(mutex_t *mp) in mutex_unlock() argument
2549 int mtype = mp->mutex_type; in mutex_unlock()
2568 if ((mtype & LOCK_ERRORCHECK) && !MUTEX_OWNED(mp, self)) in mutex_unlock()
2570 if ((mtype & LOCK_RECURSIVE) && mp->mutex_rcount != 0) { in mutex_unlock()
2571 mp->mutex_rcount--; in mutex_unlock()
2572 DTRACE_PROBE2(plockstat, mutex__release, mp, 1); in mutex_unlock()
2581 mp->mutex_owner = 0; in mutex_unlock()
2582 mp->mutex_lockword = 0; in mutex_unlock()
2584 DTRACE_PROBE2(plockstat, mutex__release, mp, 0); in mutex_unlock()
2596 if ((lwpid = mutex_unlock_queue(mp, 0)) != 0) { in mutex_unlock()
2609 if ((mtype & LOCK_ERRORCHECK) && !MUTEX_OWNED(mp, self)) in mutex_unlock()
2611 if ((mtype & LOCK_RECURSIVE) && mp->mutex_rcount != 0) { in mutex_unlock()
2612 mp->mutex_rcount--; in mutex_unlock()
2613 DTRACE_PROBE2(plockstat, mutex__release, mp, 1); in mutex_unlock()
2625 if ((mtype & LOCK_ERRORCHECK) && !shared_mutex_held(mp)) in mutex_unlock()
2627 if ((mtype & LOCK_RECURSIVE) && mp->mutex_rcount != 0) { in mutex_unlock()
2628 mp->mutex_rcount--; in mutex_unlock()
2629 DTRACE_PROBE2(plockstat, mutex__release, mp, 1); in mutex_unlock()
2632 mutex_unlock_process(mp, 0); in mutex_unlock()
2639 return (mutex_unlock_internal(mp, 0)); in mutex_unlock()
2644 mutex_exit(mutex_t *mp) in mutex_exit() argument
2647 int attr = mp->mutex_type & ALL_ATTRIBUTES; in mutex_exit()
2651 mutex_panic(mp, "mutex_exit: bad mutex type"); in mutex_exit()
2653 ret = mutex_unlock(mp); in mutex_exit()
2655 mutex_panic(mp, "mutex_exit: not owner"); in mutex_exit()
2657 mutex_panic(mp, "unknown mutex_exit failure"); in mutex_exit()
2671 lmutex_lock(mutex_t *mp) in lmutex_lock() argument
2676 ASSERT(mp->mutex_type == USYNC_THREAD); in lmutex_lock()
2687 ASSERT(mp->mutex_lockw == 0); in lmutex_lock()
2688 mp->mutex_lockw = LOCKSET; in lmutex_lock()
2689 mp->mutex_owner = (uintptr_t)self; in lmutex_lock()
2690 DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0); in lmutex_lock()
2692 tdb_mutex_stats_t *msp = MUTEX_STATS(mp, udp); in lmutex_lock()
2697 if (set_lock_byte(&mp->mutex_lockw) == 0) { in lmutex_lock()
2698 mp->mutex_owner = (uintptr_t)self; in lmutex_lock()
2699 DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0); in lmutex_lock()
2700 } else if (mutex_trylock_adaptive(mp, 1) != 0) { in lmutex_lock()
2701 (void) mutex_lock_queue(self, msp, mp, NULL); in lmutex_lock()
2710 lmutex_unlock(mutex_t *mp) in lmutex_unlock() argument
2715 ASSERT(mp->mutex_type == USYNC_THREAD); in lmutex_unlock()
2725 mp->mutex_owner = 0; in lmutex_unlock()
2726 mp->mutex_lockword = 0; in lmutex_unlock()
2727 DTRACE_PROBE2(plockstat, mutex__release, mp, 0); in lmutex_unlock()
2729 tdb_mutex_stats_t *msp = MUTEX_STATS(mp, udp); in lmutex_unlock()
2734 if ((lwpid = mutex_unlock_queue(mp, 0)) != 0) { in lmutex_unlock()
2749 sig_mutex_lock(mutex_t *mp) in sig_mutex_lock() argument
2754 (void) mutex_lock(mp); in sig_mutex_lock()
2758 sig_mutex_unlock(mutex_t *mp) in sig_mutex_unlock() argument
2762 (void) mutex_unlock(mp); in sig_mutex_unlock()
2767 sig_mutex_trylock(mutex_t *mp) in sig_mutex_trylock() argument
2773 if ((error = mutex_trylock(mp)) != 0) in sig_mutex_trylock()
2782 sig_cond_wait(cond_t *cv, mutex_t *mp) in sig_cond_wait() argument
2788 error = __cond_wait(cv, mp); in sig_cond_wait()
2790 sig_mutex_unlock(mp); in sig_cond_wait()
2792 sig_mutex_lock(mp); in sig_cond_wait()
2802 sig_cond_reltimedwait(cond_t *cv, mutex_t *mp, const timespec_t *ts) in sig_cond_reltimedwait() argument
2808 error = __cond_reltimedwait(cv, mp, ts); in sig_cond_reltimedwait()
2810 sig_mutex_unlock(mp); in sig_cond_reltimedwait()
2812 sig_mutex_lock(mp); in sig_cond_reltimedwait()
2825 cancel_safe_mutex_lock(mutex_t *mp) in cancel_safe_mutex_lock() argument
2827 (void) mutex_lock(mp); in cancel_safe_mutex_lock()
2832 cancel_safe_mutex_trylock(mutex_t *mp) in cancel_safe_mutex_trylock() argument
2836 if ((error = mutex_trylock(mp)) == 0) in cancel_safe_mutex_trylock()
2842 cancel_safe_mutex_unlock(mutex_t *mp) in cancel_safe_mutex_unlock() argument
2848 (void) mutex_unlock(mp); in cancel_safe_mutex_unlock()
2880 volatile mutex_t *mp = (volatile mutex_t *)mparg; in shared_mutex_held() local
2884 return (MUTEX_OWNED(mp, self) && mp->mutex_ownerpid == udp->pid); in shared_mutex_held()
2891 volatile mutex_t *mp = (volatile mutex_t *)mparg; in mutex_held() local
2895 return (MUTEX_OWNED(mp, curthread)); in mutex_held()
2901 mutex_destroy(mutex_t *mp) in mutex_destroy() argument
2903 if (mp->mutex_type & USYNC_PROCESS) in mutex_destroy()
2904 forget_lock(mp); in mutex_destroy()
2905 (void) memset(mp, 0, sizeof (*mp)); in mutex_destroy()
2906 tdb_sync_obj_deregister(mp); in mutex_destroy()
2913 mutex_consistent(mutex_t *mp) in mutex_consistent() argument
2919 if (mutex_held(mp) && in mutex_consistent()
2920 (mp->mutex_type & LOCK_ROBUST) && in mutex_consistent()
2921 (mp->mutex_flag & LOCK_INITED) && in mutex_consistent()
2922 (mp->mutex_flag & (LOCK_OWNERDEAD | LOCK_UNMAPPED))) { in mutex_consistent()
2923 mp->mutex_flag &= ~(LOCK_OWNERDEAD | LOCK_UNMAPPED); in mutex_consistent()
2924 mp->mutex_rcount = 0; in mutex_consistent()
2938 mutex_t *mp = (mutex_t *)lock; in pthread_spin_init() local
2940 (void) memset(mp, 0, sizeof (*mp)); in pthread_spin_init()
2942 mp->mutex_type = USYNC_PROCESS; in pthread_spin_init()
2944 mp->mutex_type = USYNC_THREAD; in pthread_spin_init()
2945 mp->mutex_flag = LOCK_INITED; in pthread_spin_init()
2946 mp->mutex_magic = MUTEX_MAGIC; in pthread_spin_init()
2955 if (((uintptr_t)mp & (_LONG_LONG_ALIGNMENT - 1)) && in pthread_spin_init()
2972 mutex_t *mp = (mutex_t *)lock; in pthread_spin_trylock() local
2977 if (set_lock_byte(&mp->mutex_lockw) != 0) in pthread_spin_trylock()
2980 mp->mutex_owner = (uintptr_t)self; in pthread_spin_trylock()
2981 if (mp->mutex_type == USYNC_PROCESS) in pthread_spin_trylock()
2982 mp->mutex_ownerpid = self->ul_uberdata->pid; in pthread_spin_trylock()
2983 DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0); in pthread_spin_trylock()
2992 mutex_t *mp = (mutex_t *)lock; in pthread_spin_lock() local
2994 volatile uint8_t *lockp = (volatile uint8_t *)&mp->mutex_lockw; in pthread_spin_lock()
2999 DTRACE_PROBE1(plockstat, mutex__spin, mp); in pthread_spin_lock()
3016 mp->mutex_owner = (uintptr_t)self; in pthread_spin_lock()
3017 if (mp->mutex_type == USYNC_PROCESS) in pthread_spin_lock()
3018 mp->mutex_ownerpid = self->ul_uberdata->pid; in pthread_spin_lock()
3021 DTRACE_PROBE3(plockstat, mutex__spun, mp, 1, count); in pthread_spin_lock()
3023 DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, count); in pthread_spin_lock()
3030 mutex_t *mp = (mutex_t *)lock; in pthread_spin_unlock() local
3034 mp->mutex_owner = 0; in pthread_spin_unlock()
3035 mp->mutex_ownerpid = 0; in pthread_spin_unlock()
3036 DTRACE_PROBE2(plockstat, mutex__release, mp, 0); in pthread_spin_unlock()
3037 (void) atomic_swap_32(&mp->mutex_lockword, 0); in pthread_spin_unlock()
3155 mutex_t *mp; in heldlock_exit() local
3170 if ((mp = *lockptr) != NULL && in heldlock_exit()
3171 mutex_held(mp) && in heldlock_exit()
3172 (mp->mutex_type & (LOCK_ROBUST | LOCK_PRIO_INHERIT)) == in heldlock_exit()
3174 mp->mutex_rcount = 0; in heldlock_exit()
3175 if (!(mp->mutex_flag & LOCK_UNMAPPED)) in heldlock_exit()
3176 mp->mutex_flag |= LOCK_OWNERDEAD; in heldlock_exit()
3177 (void) mutex_unlock_internal(mp, 1); in heldlock_exit()
3224 cond_sleep_queue(cond_t *cvp, mutex_t *mp, timespec_t *tsp) in cond_sleep_queue() argument
3245 self->ul_cvmutex = mp; in cond_sleep_queue()
3248 if (mp->mutex_flag & LOCK_OWNERDEAD) { in cond_sleep_queue()
3249 mp->mutex_flag &= ~LOCK_OWNERDEAD; in cond_sleep_queue()
3250 mp->mutex_flag |= LOCK_NOTRECOVERABLE; in cond_sleep_queue()
3252 release_all = ((mp->mutex_flag & LOCK_NOTRECOVERABLE) != 0); in cond_sleep_queue()
3253 lwpid = mutex_unlock_queue(mp, release_all); in cond_sleep_queue()
3286 mqp = queue_lock(mp, MX); in cond_sleep_queue()
3297 mp->mutex_waiters = dequeue_self(mqp); in cond_sleep_queue()
3341 cond_wait_check_alignment(cond_t *cvp, mutex_t *mp) in cond_wait_check_alignment() argument
3343 if ((uintptr_t)mp & (_LONG_LONG_ALIGNMENT - 1)) in cond_wait_check_alignment()
3344 lock_error(mp, "cond_wait", cvp, "mutex is misaligned"); in cond_wait_check_alignment()
3346 lock_error(mp, "cond_wait", cvp, "condvar is misaligned"); in cond_wait_check_alignment()
3350 cond_wait_queue(cond_t *cvp, mutex_t *mp, timespec_t *tsp) in cond_wait_queue() argument
3357 cond_wait_check_alignment(cvp, mp); in cond_wait_queue()
3376 error = cond_sleep_queue(cvp, mp, tsp); in cond_wait_queue()
3381 if ((merror = mutex_lock_impl(mp, NULL)) != 0) in cond_wait_queue()
3398 cond_sleep_kernel(cond_t *cvp, mutex_t *mp, timespec_t *tsp) in cond_sleep_kernel() argument
3400 int mtype = mp->mutex_type; in cond_sleep_kernel()
3404 if ((mtype & LOCK_PRIO_PROTECT) && _ceil_mylist_del(mp)) in cond_sleep_kernel()
3410 mp->mutex_owner = 0; in cond_sleep_kernel()
3413 mp->mutex_lockw = LOCKCLEAR; in cond_sleep_kernel()
3428 error = ___lwp_cond_wait(cvp, mp, tsp, 1); in cond_sleep_kernel()
3437 cond_wait_kernel(cond_t *cvp, mutex_t *mp, timespec_t *tsp) in cond_wait_kernel() argument
3444 cond_wait_check_alignment(cvp, mp); in cond_wait_kernel()
3452 error = cond_sleep_kernel(cvp, mp, tsp); in cond_wait_kernel()
3461 if ((merror = mutex_lock_impl(mp, NULL)) != 0) in cond_wait_kernel()
3477 cond_wait_common(cond_t *cvp, mutex_t *mp, timespec_t *tsp) in cond_wait_common() argument
3479 int mtype = mp->mutex_type; in cond_wait_common()
3484 tdb_mutex_stats_t *msp = MUTEX_STATS(mp, udp); in cond_wait_common()
3522 if (!mutex_held(mp)) in cond_wait_common()
3523 lock_error(mp, "cond_wait", cvp, NULL); in cond_wait_common()
3524 if ((mtype & LOCK_RECURSIVE) && mp->mutex_rcount != 0) in cond_wait_common()
3525 lock_error(mp, "recursive mutex in cond_wait", in cond_wait_common()
3529 lock_error(mp, "cond_wait", cvp, in cond_wait_common()
3534 lock_error(mp, "cond_wait", cvp, in cond_wait_common()
3546 rcount = mp->mutex_rcount; in cond_wait_common()
3547 mp->mutex_rcount = 0; in cond_wait_common()
3551 error = cond_wait_kernel(cvp, mp, tsp); in cond_wait_common()
3553 error = cond_wait_queue(cvp, mp, tsp); in cond_wait_common()
3554 mp->mutex_rcount = rcount; in cond_wait_common()
3576 __cond_wait(cond_t *cvp, mutex_t *mp) in __cond_wait() argument
3582 if ((mp->mutex_type & (LOCK_ERRORCHECK | LOCK_ROBUST)) && in __cond_wait()
3583 !mutex_held(mp)) in __cond_wait()
3591 (cvp->cond_type | mp->mutex_type | gflags->uf_trs_ted | in __cond_wait()
3594 return (cond_wait_queue(cvp, mp, NULL)); in __cond_wait()
3599 return (cond_wait_common(cvp, mp, NULL)); in __cond_wait()
3604 cond_wait(cond_t *cvp, mutex_t *mp) in cond_wait() argument
3609 error = __cond_wait(cvp, mp); in cond_wait()
3621 pthread_cond_wait(pthread_cond_t *restrict cvp, pthread_mutex_t *restrict mp) in pthread_cond_wait() argument
3625 error = cond_wait((cond_t *)cvp, (mutex_t *)mp); in pthread_cond_wait()
3633 __cond_timedwait(cond_t *cvp, mutex_t *mp, clockid_t clock_id, in __cond_timedwait() argument
3639 if ((mp->mutex_type & (LOCK_ERRORCHECK | LOCK_ROBUST)) && in __cond_timedwait()
3640 !mutex_held(mp)) in __cond_timedwait()
3646 error = cond_wait_common(cvp, mp, &reltime); in __cond_timedwait()
3662 cond_clockwait(cond_t *cvp, mutex_t *mp, clockid_t clock, in cond_clockwait() argument
3668 error = __cond_timedwait(cvp, mp, clock, abstime); in cond_clockwait()
3698 cond_timedwait(cond_t *cvp, mutex_t *mp, const timespec_t *abstime) in cond_timedwait() argument
3700 return (cond_clockwait(cvp, mp, cond_clock(cvp), abstime)); in cond_timedwait()
3710 pthread_mutex_t *restrict mp, clockid_t clock, in pthread_cond_clockwait() argument
3724 error = cond_clockwait((cond_t *)cvp, (mutex_t *)mp, clock, abstime); in pthread_cond_clockwait()
3734 pthread_mutex_t *restrict mp, const struct timespec *restrict abstime) in pthread_cond_timedwait() argument
3737 return (pthread_cond_clockwait(cvp, mp, cond_clock(cond), abstime)); in pthread_cond_timedwait()
3748 __cond_reltimedwait(cond_t *cvp, mutex_t *mp, const timespec_t *reltime) in __cond_reltimedwait() argument
3752 if ((mp->mutex_type & (LOCK_ERRORCHECK | LOCK_ROBUST)) && in __cond_reltimedwait()
3753 !mutex_held(mp)) in __cond_reltimedwait()
3756 return (cond_wait_common(cvp, mp, &tslocal)); in __cond_reltimedwait()
3760 cond_reltimedwait(cond_t *cvp, mutex_t *mp, const timespec_t *reltime) in cond_reltimedwait() argument
3765 error = __cond_reltimedwait(cvp, mp, reltime); in cond_reltimedwait()
3775 pthread_mutex_t *restrict mp, clockid_t clock, in pthread_cond_relclockwait_np() argument
3788 error = cond_reltimedwait((cond_t *)cvp, (mutex_t *)mp, reltime); in pthread_cond_relclockwait_np()
3798 pthread_mutex_t *restrict mp, const struct timespec *restrict reltime) in pthread_cond_reltimedwait_np() argument
3801 return (pthread_cond_relclockwait_np(cvp, mp, cond_clock(cond), in pthread_cond_reltimedwait_np()
3817 mutex_t *mp; in cond_signal() local
3863 mp = ulwp->ul_cvmutex; /* the mutex it will acquire */ in cond_signal()
3865 ASSERT(mp != NULL); in cond_signal()
3867 if (ulwp->ul_cv_wake || !MUTEX_OWNED(mp, self)) { in cond_signal()
3878 mqp = queue_lock(mp, MX); in cond_signal()
3880 mp->mutex_waiters = 1; in cond_signal()
3947 mutex_t *mp; in cond_broadcast() local
3992 mp = ulwp->ul_cvmutex; /* its mutex */ in cond_broadcast()
3994 ASSERT(mp != NULL); in cond_broadcast()
3995 if (ulwp->ul_cv_wake || !MUTEX_OWNED(mp, self)) { in cond_broadcast()
4004 if (mp != mp_cache) { in cond_broadcast()
4005 mp_cache = mp; in cond_broadcast()
4008 mqp = queue_lock(mp, MX); in cond_broadcast()
4011 mp->mutex_waiters = 1; in cond_broadcast()