17c478bd9Sstevel@tonic-gate /* 27c478bd9Sstevel@tonic-gate * CDDL HEADER START 37c478bd9Sstevel@tonic-gate * 47c478bd9Sstevel@tonic-gate * The contents of this file are subject to the terms of the 5374ae87fSsvemuri * Common Development and Distribution License (the "License"). 6374ae87fSsvemuri * You may not use this file except in compliance with the License. 77c478bd9Sstevel@tonic-gate * 87c478bd9Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 97c478bd9Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 107c478bd9Sstevel@tonic-gate * See the License for the specific language governing permissions 117c478bd9Sstevel@tonic-gate * and limitations under the License. 127c478bd9Sstevel@tonic-gate * 137c478bd9Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 147c478bd9Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 157c478bd9Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 167c478bd9Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 177c478bd9Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 187c478bd9Sstevel@tonic-gate * 197c478bd9Sstevel@tonic-gate * CDDL HEADER END 207c478bd9Sstevel@tonic-gate */ 217c478bd9Sstevel@tonic-gate /* 22374ae87fSsvemuri * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 237c478bd9Sstevel@tonic-gate * Use is subject to license terms. 247c478bd9Sstevel@tonic-gate */ 257c478bd9Sstevel@tonic-gate 26b3d32f0cSBryan Cantrill /* 27*2c164fafSPatrick Mooney * Copyright 2019 Joyent, Inc. 28b3d32f0cSBryan Cantrill */ 29b3d32f0cSBryan Cantrill 307c478bd9Sstevel@tonic-gate #include <sys/param.h> 317c478bd9Sstevel@tonic-gate #include <sys/thread.h> 327c478bd9Sstevel@tonic-gate #include <sys/cmn_err.h> 337c478bd9Sstevel@tonic-gate #include <sys/debug.h> 347c478bd9Sstevel@tonic-gate #include <sys/cpuvar.h> 357c478bd9Sstevel@tonic-gate #include <sys/sobject.h> 367c478bd9Sstevel@tonic-gate #include <sys/turnstile.h> 377c478bd9Sstevel@tonic-gate #include <sys/rwlock.h> 387c478bd9Sstevel@tonic-gate #include <sys/rwlock_impl.h> 397c478bd9Sstevel@tonic-gate #include <sys/atomic.h> 407c478bd9Sstevel@tonic-gate #include <sys/lockstat.h> 417c478bd9Sstevel@tonic-gate 427c478bd9Sstevel@tonic-gate /* 437c478bd9Sstevel@tonic-gate * Big Theory Statement for readers/writer locking primitives. 447c478bd9Sstevel@tonic-gate * 457c478bd9Sstevel@tonic-gate * An rwlock provides exclusive access to a single thread ("writer") or 467c478bd9Sstevel@tonic-gate * concurrent access to multiple threads ("readers"). See rwlock(9F) 477c478bd9Sstevel@tonic-gate * for a full description of the interfaces and programming model. 487c478bd9Sstevel@tonic-gate * The rest of this comment describes the implementation. 497c478bd9Sstevel@tonic-gate * 507c478bd9Sstevel@tonic-gate * An rwlock is a single word with the following structure: 517c478bd9Sstevel@tonic-gate * 527c478bd9Sstevel@tonic-gate * --------------------------------------------------------------------- 537c478bd9Sstevel@tonic-gate * | OWNER (writer) or HOLD COUNT (readers) | WRLOCK | WRWANT | WAIT | 547c478bd9Sstevel@tonic-gate * --------------------------------------------------------------------- 557c478bd9Sstevel@tonic-gate * 63 / 31 .. 3 2 1 0 567c478bd9Sstevel@tonic-gate * 577c478bd9Sstevel@tonic-gate * The waiters bit (0) indicates whether any threads are blocked waiting 587c478bd9Sstevel@tonic-gate * for the lock. The write-wanted bit (1) indicates whether any threads 597c478bd9Sstevel@tonic-gate * are blocked waiting for write access. The write-locked bit (2) indicates 607c478bd9Sstevel@tonic-gate * whether the lock is held by a writer, which determines whether the upper 617c478bd9Sstevel@tonic-gate * bits (3..31 in ILP32, 3..63 in LP64) should be interpreted as the owner 627c478bd9Sstevel@tonic-gate * (thread pointer) or the hold count (number of readers). 637c478bd9Sstevel@tonic-gate * 647c478bd9Sstevel@tonic-gate * In the absence of any contention, a writer gets the lock by setting 657c478bd9Sstevel@tonic-gate * this word to (curthread | RW_WRITE_LOCKED); a reader gets the lock 667c478bd9Sstevel@tonic-gate * by incrementing the hold count (i.e. adding 8, aka RW_READ_LOCK). 677c478bd9Sstevel@tonic-gate * 687c478bd9Sstevel@tonic-gate * A writer will fail to acquire the lock if any other thread owns it. 69b3d32f0cSBryan Cantrill * A reader will fail if the lock is either owned (in the RW_READER and 70b3d32f0cSBryan Cantrill * RW_READER_STARVEWRITER cases) or wanted by a writer (in the RW_READER 71b3d32f0cSBryan Cantrill * case). rw_tryenter() returns 0 in these cases; rw_enter() blocks until 72b3d32f0cSBryan Cantrill * the lock becomes available. 737c478bd9Sstevel@tonic-gate * 747c478bd9Sstevel@tonic-gate * When a thread blocks it acquires the rwlock's hashed turnstile lock and 757c478bd9Sstevel@tonic-gate * attempts to set RW_HAS_WAITERS (and RW_WRITE_WANTED in the writer case) 767c478bd9Sstevel@tonic-gate * atomically *only if the lock still appears busy*. A thread must never 777c478bd9Sstevel@tonic-gate * accidentally block for an available lock since there would be no owner 787c478bd9Sstevel@tonic-gate * to awaken it. casip() provides the required atomicity. Once casip() 797c478bd9Sstevel@tonic-gate * succeeds, the decision to block becomes final and irreversible. The 807c478bd9Sstevel@tonic-gate * thread will not become runnable again until it has been granted ownership 817c478bd9Sstevel@tonic-gate * of the lock via direct handoff from a former owner as described below. 827c478bd9Sstevel@tonic-gate * 837c478bd9Sstevel@tonic-gate * In the absence of any waiters, rw_exit() just clears the lock (if it 847c478bd9Sstevel@tonic-gate * is write-locked) or decrements the hold count (if it is read-locked). 857c478bd9Sstevel@tonic-gate * Note that even if waiters are present, decrementing the hold count 867c478bd9Sstevel@tonic-gate * to a non-zero value requires no special action since the lock is still 877c478bd9Sstevel@tonic-gate * held by at least one other thread. 887c478bd9Sstevel@tonic-gate * 897c478bd9Sstevel@tonic-gate * On the "final exit" (transition to unheld state) of a lock with waiters, 907c478bd9Sstevel@tonic-gate * rw_exit_wakeup() grabs the turnstile lock and transfers ownership directly 917c478bd9Sstevel@tonic-gate * to the next writer or set of readers. There are several advantages to this 927c478bd9Sstevel@tonic-gate * approach: (1) it closes all windows for priority inversion (when a new 937c478bd9Sstevel@tonic-gate * writer has grabbed the lock but has not yet inherited from blocked readers); 947c478bd9Sstevel@tonic-gate * (2) it prevents starvation of equal-priority threads by granting the lock 957c478bd9Sstevel@tonic-gate * in FIFO order; (3) it eliminates the need for a write-wanted count -- a 967c478bd9Sstevel@tonic-gate * single bit suffices because the lock remains held until all waiting 977c478bd9Sstevel@tonic-gate * writers are gone; (4) when we awaken N readers we can perform a single 987c478bd9Sstevel@tonic-gate * "atomic_add(&x, N)" to set the total hold count rather than having all N 997c478bd9Sstevel@tonic-gate * threads fight for the cache to perform an "atomic_add(&x, 1)" upon wakeup. 1007c478bd9Sstevel@tonic-gate * 1017c478bd9Sstevel@tonic-gate * The most interesting policy decision in rw_exit_wakeup() is which thread 1027c478bd9Sstevel@tonic-gate * to wake. Starvation is always possible with priority-based scheduling, 1037c478bd9Sstevel@tonic-gate * but any sane wakeup policy should at least satisfy these requirements: 1047c478bd9Sstevel@tonic-gate * 1057c478bd9Sstevel@tonic-gate * (1) The highest-priority thread in the system should not starve. 1067c478bd9Sstevel@tonic-gate * (2) The highest-priority writer should not starve. 1077c478bd9Sstevel@tonic-gate * (3) No writer should starve due to lower-priority threads. 1087c478bd9Sstevel@tonic-gate * (4) No reader should starve due to lower-priority writers. 1097c478bd9Sstevel@tonic-gate * (5) If all threads have equal priority, none of them should starve. 1107c478bd9Sstevel@tonic-gate * 1117c478bd9Sstevel@tonic-gate * We used to employ a writers-always-win policy, which doesn't even 1127c478bd9Sstevel@tonic-gate * satisfy (1): a steady stream of low-priority writers can starve out 1137c478bd9Sstevel@tonic-gate * a real-time reader! This is clearly a broken policy -- it violates 1147c478bd9Sstevel@tonic-gate * (1), (4), and (5) -- but it's how rwlocks always used to behave. 1157c478bd9Sstevel@tonic-gate * 1167c478bd9Sstevel@tonic-gate * A round-robin policy (exiting readers grant the lock to blocked writers 1177c478bd9Sstevel@tonic-gate * and vice versa) satisfies all but (3): a single high-priority writer 1187c478bd9Sstevel@tonic-gate * and many low-priority readers can starve out medium-priority writers. 1197c478bd9Sstevel@tonic-gate * 1207c478bd9Sstevel@tonic-gate * A strict priority policy (grant the lock to the highest priority blocked 1217c478bd9Sstevel@tonic-gate * thread) satisfies everything but (2): a steady stream of high-priority 1227c478bd9Sstevel@tonic-gate * readers can permanently starve the highest-priority writer. 1237c478bd9Sstevel@tonic-gate * 1247c478bd9Sstevel@tonic-gate * The reason we care about (2) is that it's important to process writers 1257c478bd9Sstevel@tonic-gate * reasonably quickly -- even if they're low priority -- because their very 1267c478bd9Sstevel@tonic-gate * presence causes all readers to take the slow (blocking) path through this 1277c478bd9Sstevel@tonic-gate * code. There is also a general sense that writers deserve some degree of 1287c478bd9Sstevel@tonic-gate * deference because they're updating the data upon which all readers act. 1297c478bd9Sstevel@tonic-gate * Presumably this data should not be allowed to become arbitrarily stale 1307c478bd9Sstevel@tonic-gate * due to writer starvation. Finally, it seems reasonable to level the 1317c478bd9Sstevel@tonic-gate * playing field a bit to compensate for the fact that it's so much harder 1327c478bd9Sstevel@tonic-gate * for a writer to get in when there are already many readers present. 1337c478bd9Sstevel@tonic-gate * 1347c478bd9Sstevel@tonic-gate * A hybrid of round-robin and strict priority can be made to satisfy 1357c478bd9Sstevel@tonic-gate * all five criteria. In this "writer priority policy" exiting readers 1367c478bd9Sstevel@tonic-gate * always grant the lock to waiting writers, but exiting writers only 1377c478bd9Sstevel@tonic-gate * grant the lock to readers of the same or higher priority than the 1387c478bd9Sstevel@tonic-gate * highest-priority blocked writer. Thus requirement (2) is satisfied, 1397c478bd9Sstevel@tonic-gate * necessarily, by a willful act of priority inversion: an exiting reader 1407c478bd9Sstevel@tonic-gate * will grant the lock to a blocked writer even if there are blocked 1417c478bd9Sstevel@tonic-gate * readers of higher priority. The situation is mitigated by the fact 1427c478bd9Sstevel@tonic-gate * that writers always inherit priority from blocked readers, and the 1437c478bd9Sstevel@tonic-gate * writer will awaken those readers as soon as it exits the lock. 1447c478bd9Sstevel@tonic-gate * 145b3d32f0cSBryan Cantrill * Finally, note that this hybrid scheme -- and indeed, any scheme that 146b3d32f0cSBryan Cantrill * satisfies requirement (2) -- has an important consequence: if a lock is 147b3d32f0cSBryan Cantrill * held as reader and a writer subsequently becomes blocked, any further 148b3d32f0cSBryan Cantrill * readers must be blocked to avoid writer starvation. This implementation 149b3d32f0cSBryan Cantrill * detail has ramifications for the semantics of rwlocks, as it prohibits 150b3d32f0cSBryan Cantrill * recursively acquiring an rwlock as reader: any writer that wishes to 151b3d32f0cSBryan Cantrill * acquire the lock after the first but before the second acquisition as 152b3d32f0cSBryan Cantrill * reader will block the second acquisition -- resulting in deadlock. This 153b3d32f0cSBryan Cantrill * itself is not necessarily prohibitive, as it is often straightforward to 154b3d32f0cSBryan Cantrill * prevent a single thread from recursively acquiring an rwlock as reader. 155b3d32f0cSBryan Cantrill * However, a more subtle situation arises when both a traditional mutex and 156b3d32f0cSBryan Cantrill * a reader lock are acquired by two different threads in opposite order. 157b3d32f0cSBryan Cantrill * (That is, one thread first acquires the mutex and then the rwlock as 158b3d32f0cSBryan Cantrill * reader; the other acquires the rwlock as reader and then the mutex.) As 159b3d32f0cSBryan Cantrill * with the single threaded case, this is fine absent a blocked writer: the 160b3d32f0cSBryan Cantrill * thread that acquires the mutex before acquiring the rwlock as reader will 161b3d32f0cSBryan Cantrill * be able to successfully acquire the rwlock -- even as/if the other thread 162b3d32f0cSBryan Cantrill * has the rwlock as reader and is blocked on the held mutex. However, if 163b3d32f0cSBryan Cantrill * an unrelated writer (that is, a third thread) becomes blocked on the 164b3d32f0cSBryan Cantrill * rwlock after the first thread acquires the rwlock as reader but before 165b3d32f0cSBryan Cantrill * it's able to acquire the mutex, the second thread -- with the mutex held 166b3d32f0cSBryan Cantrill * -- will not be able to acquire the rwlock as reader due to the waiting 167b3d32f0cSBryan Cantrill * writer, deadlocking the three threads. Unlike the single-threaded 168b3d32f0cSBryan Cantrill * (recursive) rwlock acquisition case, this case can be quite a bit 169b3d32f0cSBryan Cantrill * thornier to fix, especially as there is nothing inherently wrong in the 170b3d32f0cSBryan Cantrill * locking strategy: the deadlock is really induced by requirement (2), not 171b3d32f0cSBryan Cantrill * the consumers of the rwlock. To permit such consumers, we allow rwlock 172b3d32f0cSBryan Cantrill * acquirers to explicitly opt out of requirement (2) by specifying 173b3d32f0cSBryan Cantrill * RW_READER_STARVEWRITER when acquiring the rwlock. This (obviously) means 174b3d32f0cSBryan Cantrill * that inifinite readers can starve writers, but it also allows for 175b3d32f0cSBryan Cantrill * multiple readers in the presence of other synchronization primitives 176b3d32f0cSBryan Cantrill * without regard for lock-ordering. And while certainly odd (and perhaps 177b3d32f0cSBryan Cantrill * unwise), RW_READER_STARVEWRITER can be safely used alongside RW_READER on 178b3d32f0cSBryan Cantrill * the same lock -- RW_READER_STARVEWRITER describes only the act of lock 179b3d32f0cSBryan Cantrill * acquisition with respect to waiting writers, not the lock itself. 180b3d32f0cSBryan Cantrill * 1817c478bd9Sstevel@tonic-gate * rw_downgrade() follows the same wakeup policy as an exiting writer. 1827c478bd9Sstevel@tonic-gate * 1837c478bd9Sstevel@tonic-gate * rw_tryupgrade() has the same failure mode as rw_tryenter() for a 1847c478bd9Sstevel@tonic-gate * write lock. Both honor the WRITE_WANTED bit by specification. 1857c478bd9Sstevel@tonic-gate * 1867c478bd9Sstevel@tonic-gate * The following rules apply to manipulation of rwlock internal state: 1877c478bd9Sstevel@tonic-gate * 1887c478bd9Sstevel@tonic-gate * (1) The rwlock is only modified via the atomic primitives casip() 1897c478bd9Sstevel@tonic-gate * and atomic_add_ip(). 1907c478bd9Sstevel@tonic-gate * 1917c478bd9Sstevel@tonic-gate * (2) The waiters bit and write-wanted bit are only modified under 1927c478bd9Sstevel@tonic-gate * turnstile_lookup(). This ensures that the turnstile is consistent 1937c478bd9Sstevel@tonic-gate * with the rwlock. 1947c478bd9Sstevel@tonic-gate * 1957c478bd9Sstevel@tonic-gate * (3) Waiters receive the lock by direct handoff from the previous 1967c478bd9Sstevel@tonic-gate * owner. Therefore, waiters *always* wake up holding the lock. 1977c478bd9Sstevel@tonic-gate */ 1987c478bd9Sstevel@tonic-gate 1997c478bd9Sstevel@tonic-gate /* 2007c478bd9Sstevel@tonic-gate * The sobj_ops vector exports a set of functions needed when a thread 2017c478bd9Sstevel@tonic-gate * is asleep on a synchronization object of a given type. 2027c478bd9Sstevel@tonic-gate */ 2037c478bd9Sstevel@tonic-gate static sobj_ops_t rw_sobj_ops = { 2047c478bd9Sstevel@tonic-gate SOBJ_RWLOCK, rw_owner, turnstile_stay_asleep, turnstile_change_pri 2057c478bd9Sstevel@tonic-gate }; 2067c478bd9Sstevel@tonic-gate 2077c478bd9Sstevel@tonic-gate /* 2087c478bd9Sstevel@tonic-gate * If the system panics on an rwlock, save the address of the offending 2097c478bd9Sstevel@tonic-gate * rwlock in panic_rwlock_addr, and save the contents in panic_rwlock. 2107c478bd9Sstevel@tonic-gate */ 2117c478bd9Sstevel@tonic-gate static rwlock_impl_t panic_rwlock; 2127c478bd9Sstevel@tonic-gate static rwlock_impl_t *panic_rwlock_addr; 2137c478bd9Sstevel@tonic-gate 2147c478bd9Sstevel@tonic-gate static void 2157c478bd9Sstevel@tonic-gate rw_panic(char *msg, rwlock_impl_t *lp) 2167c478bd9Sstevel@tonic-gate { 2177c478bd9Sstevel@tonic-gate if (panicstr) 2187c478bd9Sstevel@tonic-gate return; 2197c478bd9Sstevel@tonic-gate 22075d94465SJosef 'Jeff' Sipek if (atomic_cas_ptr(&panic_rwlock_addr, NULL, lp) == NULL) 2217c478bd9Sstevel@tonic-gate panic_rwlock = *lp; 2227c478bd9Sstevel@tonic-gate 2237c478bd9Sstevel@tonic-gate panic("%s, lp=%p wwwh=%lx thread=%p", 2248793b36bSNick Todd msg, (void *)lp, panic_rwlock.rw_wwwh, (void *)curthread); 2257c478bd9Sstevel@tonic-gate } 2267c478bd9Sstevel@tonic-gate 2277c478bd9Sstevel@tonic-gate /* ARGSUSED */ 2287c478bd9Sstevel@tonic-gate void 2297c478bd9Sstevel@tonic-gate rw_init(krwlock_t *rwlp, char *name, krw_type_t type, void *arg) 2307c478bd9Sstevel@tonic-gate { 2317c478bd9Sstevel@tonic-gate ((rwlock_impl_t *)rwlp)->rw_wwwh = 0; 2327c478bd9Sstevel@tonic-gate } 2337c478bd9Sstevel@tonic-gate 2347c478bd9Sstevel@tonic-gate void 2357c478bd9Sstevel@tonic-gate rw_destroy(krwlock_t *rwlp) 2367c478bd9Sstevel@tonic-gate { 2377c478bd9Sstevel@tonic-gate rwlock_impl_t *lp = (rwlock_impl_t *)rwlp; 2387c478bd9Sstevel@tonic-gate 2397c478bd9Sstevel@tonic-gate if (lp->rw_wwwh != 0) { 2407c478bd9Sstevel@tonic-gate if ((lp->rw_wwwh & RW_DOUBLE_LOCK) == RW_DOUBLE_LOCK) 2417c478bd9Sstevel@tonic-gate rw_panic("rw_destroy: lock already destroyed", lp); 2427c478bd9Sstevel@tonic-gate else 2437c478bd9Sstevel@tonic-gate rw_panic("rw_destroy: lock still active", lp); 2447c478bd9Sstevel@tonic-gate } 2457c478bd9Sstevel@tonic-gate 2467c478bd9Sstevel@tonic-gate lp->rw_wwwh = RW_DOUBLE_LOCK; 2477c478bd9Sstevel@tonic-gate } 2487c478bd9Sstevel@tonic-gate 2497c478bd9Sstevel@tonic-gate /* 2507c478bd9Sstevel@tonic-gate * Verify that an rwlock is held correctly. 2517c478bd9Sstevel@tonic-gate */ 2527c478bd9Sstevel@tonic-gate static int 2537c478bd9Sstevel@tonic-gate rw_locked(rwlock_impl_t *lp, krw_t rw) 2547c478bd9Sstevel@tonic-gate { 2557c478bd9Sstevel@tonic-gate uintptr_t old = lp->rw_wwwh; 2567c478bd9Sstevel@tonic-gate 257b3d32f0cSBryan Cantrill if (rw == RW_READER || rw == RW_READER_STARVEWRITER) 2587c478bd9Sstevel@tonic-gate return ((old & RW_LOCKED) && !(old & RW_WRITE_LOCKED)); 2597c478bd9Sstevel@tonic-gate 2607c478bd9Sstevel@tonic-gate if (rw == RW_WRITER) 2617c478bd9Sstevel@tonic-gate return ((old & RW_OWNER) == (uintptr_t)curthread); 2627c478bd9Sstevel@tonic-gate 2637c478bd9Sstevel@tonic-gate return (0); 2647c478bd9Sstevel@tonic-gate } 2657c478bd9Sstevel@tonic-gate 266374ae87fSsvemuri uint_t (*rw_lock_backoff)(uint_t) = NULL; 267374ae87fSsvemuri void (*rw_lock_delay)(uint_t) = NULL; 268374ae87fSsvemuri 2697c478bd9Sstevel@tonic-gate /* 2707c478bd9Sstevel@tonic-gate * Full-service implementation of rw_enter() to handle all the hard cases. 2717c478bd9Sstevel@tonic-gate * Called from the assembly version if anything complicated is going on. 2727c478bd9Sstevel@tonic-gate */ 2737c478bd9Sstevel@tonic-gate void 2747c478bd9Sstevel@tonic-gate rw_enter_sleep(rwlock_impl_t *lp, krw_t rw) 2757c478bd9Sstevel@tonic-gate { 2767c478bd9Sstevel@tonic-gate uintptr_t old, new, lock_value, lock_busy, lock_wait; 2777c478bd9Sstevel@tonic-gate hrtime_t sleep_time; 2787c478bd9Sstevel@tonic-gate turnstile_t *ts; 279374ae87fSsvemuri uint_t backoff = 0; 280374ae87fSsvemuri int loop_count = 0; 2817c478bd9Sstevel@tonic-gate 2827c478bd9Sstevel@tonic-gate if (rw == RW_READER) { 2837c478bd9Sstevel@tonic-gate lock_value = RW_READ_LOCK; 2847c478bd9Sstevel@tonic-gate lock_busy = RW_WRITE_CLAIMED; 2857c478bd9Sstevel@tonic-gate lock_wait = RW_HAS_WAITERS; 286b3d32f0cSBryan Cantrill } else if (rw == RW_READER_STARVEWRITER) { 287b3d32f0cSBryan Cantrill lock_value = RW_READ_LOCK; 288b3d32f0cSBryan Cantrill lock_busy = RW_WRITE_LOCKED; 289b3d32f0cSBryan Cantrill lock_wait = RW_HAS_WAITERS; 2907c478bd9Sstevel@tonic-gate } else { 2917c478bd9Sstevel@tonic-gate lock_value = RW_WRITE_LOCK(curthread); 2927c478bd9Sstevel@tonic-gate lock_busy = (uintptr_t)RW_LOCKED; 2937c478bd9Sstevel@tonic-gate lock_wait = RW_HAS_WAITERS | RW_WRITE_WANTED; 2947c478bd9Sstevel@tonic-gate } 2957c478bd9Sstevel@tonic-gate 2967c478bd9Sstevel@tonic-gate for (;;) { 2977c478bd9Sstevel@tonic-gate if (((old = lp->rw_wwwh) & lock_busy) == 0) { 298374ae87fSsvemuri if (casip(&lp->rw_wwwh, old, old + lock_value) != old) { 299374ae87fSsvemuri if (rw_lock_delay != NULL) { 300374ae87fSsvemuri backoff = rw_lock_backoff(backoff); 301374ae87fSsvemuri rw_lock_delay(backoff); 302374ae87fSsvemuri if (++loop_count == ncpus_online) { 303374ae87fSsvemuri backoff = 0; 304374ae87fSsvemuri loop_count = 0; 305374ae87fSsvemuri } 306374ae87fSsvemuri } 3077c478bd9Sstevel@tonic-gate continue; 308374ae87fSsvemuri } 3097c478bd9Sstevel@tonic-gate break; 3107c478bd9Sstevel@tonic-gate } 3117c478bd9Sstevel@tonic-gate 3127c478bd9Sstevel@tonic-gate if (panicstr) 3137c478bd9Sstevel@tonic-gate return; 3147c478bd9Sstevel@tonic-gate 3157c478bd9Sstevel@tonic-gate if ((old & RW_DOUBLE_LOCK) == RW_DOUBLE_LOCK) { 3167c478bd9Sstevel@tonic-gate rw_panic("rw_enter: bad rwlock", lp); 3177c478bd9Sstevel@tonic-gate return; 3187c478bd9Sstevel@tonic-gate } 3197c478bd9Sstevel@tonic-gate 3207c478bd9Sstevel@tonic-gate if ((old & RW_OWNER) == (uintptr_t)curthread) { 3217c478bd9Sstevel@tonic-gate rw_panic("recursive rw_enter", lp); 3227c478bd9Sstevel@tonic-gate return; 3237c478bd9Sstevel@tonic-gate } 3247c478bd9Sstevel@tonic-gate 3257c478bd9Sstevel@tonic-gate ts = turnstile_lookup(lp); 3267c478bd9Sstevel@tonic-gate 3277c478bd9Sstevel@tonic-gate do { 3287c478bd9Sstevel@tonic-gate if (((old = lp->rw_wwwh) & lock_busy) == 0) 3297c478bd9Sstevel@tonic-gate break; 3307c478bd9Sstevel@tonic-gate new = old | lock_wait; 3317c478bd9Sstevel@tonic-gate } while (old != new && casip(&lp->rw_wwwh, old, new) != old); 3327c478bd9Sstevel@tonic-gate 3337c478bd9Sstevel@tonic-gate if ((old & lock_busy) == 0) { 3347c478bd9Sstevel@tonic-gate /* 3357c478bd9Sstevel@tonic-gate * The lock appears free now; try the dance again 3367c478bd9Sstevel@tonic-gate */ 3377c478bd9Sstevel@tonic-gate turnstile_exit(lp); 3387c478bd9Sstevel@tonic-gate continue; 3397c478bd9Sstevel@tonic-gate } 3407c478bd9Sstevel@tonic-gate 3417c478bd9Sstevel@tonic-gate /* 342*2c164fafSPatrick Mooney * We really are going to block, so bump the stats. 3437c478bd9Sstevel@tonic-gate */ 3447c478bd9Sstevel@tonic-gate ASSERT(lp->rw_wwwh & lock_wait); 3457c478bd9Sstevel@tonic-gate ASSERT(lp->rw_wwwh & RW_LOCKED); 3467c478bd9Sstevel@tonic-gate 3477c478bd9Sstevel@tonic-gate sleep_time = -gethrtime(); 348b3d32f0cSBryan Cantrill if (rw != RW_WRITER) { 3497c478bd9Sstevel@tonic-gate CPU_STATS_ADDQ(CPU, sys, rw_rdfails, 1); 3507c478bd9Sstevel@tonic-gate (void) turnstile_block(ts, TS_READER_Q, lp, 3517c478bd9Sstevel@tonic-gate &rw_sobj_ops, NULL, NULL); 3527c478bd9Sstevel@tonic-gate } else { 3537c478bd9Sstevel@tonic-gate CPU_STATS_ADDQ(CPU, sys, rw_wrfails, 1); 3547c478bd9Sstevel@tonic-gate (void) turnstile_block(ts, TS_WRITER_Q, lp, 3557c478bd9Sstevel@tonic-gate &rw_sobj_ops, NULL, NULL); 3567c478bd9Sstevel@tonic-gate } 3577c478bd9Sstevel@tonic-gate sleep_time += gethrtime(); 3587c478bd9Sstevel@tonic-gate 3597c478bd9Sstevel@tonic-gate LOCKSTAT_RECORD4(LS_RW_ENTER_BLOCK, lp, sleep_time, rw, 3607c478bd9Sstevel@tonic-gate (old & RW_WRITE_LOCKED) ? 1 : 0, 3617c478bd9Sstevel@tonic-gate old >> RW_HOLD_COUNT_SHIFT); 3627c478bd9Sstevel@tonic-gate 3637c478bd9Sstevel@tonic-gate /* 364*2c164fafSPatrick Mooney * We wake up holding the lock via direct handoff from the 365*2c164fafSPatrick Mooney * previous owner. 3667c478bd9Sstevel@tonic-gate */ 3677c478bd9Sstevel@tonic-gate break; 3687c478bd9Sstevel@tonic-gate } 3697c478bd9Sstevel@tonic-gate 3707c478bd9Sstevel@tonic-gate ASSERT(rw_locked(lp, rw)); 3717c478bd9Sstevel@tonic-gate 3727c478bd9Sstevel@tonic-gate membar_enter(); 3737c478bd9Sstevel@tonic-gate 3747c478bd9Sstevel@tonic-gate LOCKSTAT_RECORD(LS_RW_ENTER_ACQUIRE, lp, rw); 3757c478bd9Sstevel@tonic-gate } 3767c478bd9Sstevel@tonic-gate 3777c478bd9Sstevel@tonic-gate /* 3787c478bd9Sstevel@tonic-gate * Return the number of readers to wake, or zero if we should wake a writer. 3797c478bd9Sstevel@tonic-gate * Called only by exiting/downgrading writers (readers don't wake readers). 3807c478bd9Sstevel@tonic-gate */ 3817c478bd9Sstevel@tonic-gate static int 3827c478bd9Sstevel@tonic-gate rw_readers_to_wake(turnstile_t *ts) 3837c478bd9Sstevel@tonic-gate { 3847c478bd9Sstevel@tonic-gate kthread_t *next_writer = ts->ts_sleepq[TS_WRITER_Q].sq_first; 3857c478bd9Sstevel@tonic-gate kthread_t *next_reader = ts->ts_sleepq[TS_READER_Q].sq_first; 3867c478bd9Sstevel@tonic-gate pri_t wpri = (next_writer != NULL) ? DISP_PRIO(next_writer) : -1; 3877c478bd9Sstevel@tonic-gate int count = 0; 3887c478bd9Sstevel@tonic-gate 3897c478bd9Sstevel@tonic-gate while (next_reader != NULL) { 3907c478bd9Sstevel@tonic-gate if (DISP_PRIO(next_reader) < wpri) 3917c478bd9Sstevel@tonic-gate break; 3927c478bd9Sstevel@tonic-gate next_reader = next_reader->t_link; 3937c478bd9Sstevel@tonic-gate count++; 3947c478bd9Sstevel@tonic-gate } 3957c478bd9Sstevel@tonic-gate return (count); 3967c478bd9Sstevel@tonic-gate } 3977c478bd9Sstevel@tonic-gate 3987c478bd9Sstevel@tonic-gate /* 3997c478bd9Sstevel@tonic-gate * Full-service implementation of rw_exit() to handle all the hard cases. 4007c478bd9Sstevel@tonic-gate * Called from the assembly version if anything complicated is going on. 4017c478bd9Sstevel@tonic-gate * There is no semantic difference between calling rw_exit() and calling 4027c478bd9Sstevel@tonic-gate * rw_exit_wakeup() directly. 4037c478bd9Sstevel@tonic-gate */ 4047c478bd9Sstevel@tonic-gate void 4057c478bd9Sstevel@tonic-gate rw_exit_wakeup(rwlock_impl_t *lp) 4067c478bd9Sstevel@tonic-gate { 4077c478bd9Sstevel@tonic-gate turnstile_t *ts; 4087c478bd9Sstevel@tonic-gate uintptr_t old, new, lock_value; 4097c478bd9Sstevel@tonic-gate kthread_t *next_writer; 4107c478bd9Sstevel@tonic-gate int nreaders; 411374ae87fSsvemuri uint_t backoff = 0; 412374ae87fSsvemuri int loop_count = 0; 4137c478bd9Sstevel@tonic-gate 4147c478bd9Sstevel@tonic-gate membar_exit(); 4157c478bd9Sstevel@tonic-gate 4167c478bd9Sstevel@tonic-gate old = lp->rw_wwwh; 4177c478bd9Sstevel@tonic-gate if (old & RW_WRITE_LOCKED) { 4187c478bd9Sstevel@tonic-gate if ((old & RW_OWNER) != (uintptr_t)curthread) { 4197c478bd9Sstevel@tonic-gate rw_panic("rw_exit: not owner", lp); 4207c478bd9Sstevel@tonic-gate lp->rw_wwwh = 0; 4217c478bd9Sstevel@tonic-gate return; 4227c478bd9Sstevel@tonic-gate } 4237c478bd9Sstevel@tonic-gate lock_value = RW_WRITE_LOCK(curthread); 4247c478bd9Sstevel@tonic-gate } else { 4257c478bd9Sstevel@tonic-gate if ((old & RW_LOCKED) == 0) { 4267c478bd9Sstevel@tonic-gate rw_panic("rw_exit: lock not held", lp); 4277c478bd9Sstevel@tonic-gate return; 4287c478bd9Sstevel@tonic-gate } 4297c478bd9Sstevel@tonic-gate lock_value = RW_READ_LOCK; 4307c478bd9Sstevel@tonic-gate } 4317c478bd9Sstevel@tonic-gate 4327c478bd9Sstevel@tonic-gate for (;;) { 4337c478bd9Sstevel@tonic-gate /* 4347c478bd9Sstevel@tonic-gate * If this is *not* the final exit of a lock with waiters, 4357c478bd9Sstevel@tonic-gate * just drop the lock -- there's nothing tricky going on. 4367c478bd9Sstevel@tonic-gate */ 4377c478bd9Sstevel@tonic-gate old = lp->rw_wwwh; 4387c478bd9Sstevel@tonic-gate new = old - lock_value; 4397c478bd9Sstevel@tonic-gate if ((new & (RW_LOCKED | RW_HAS_WAITERS)) != RW_HAS_WAITERS) { 440374ae87fSsvemuri if (casip(&lp->rw_wwwh, old, new) != old) { 441374ae87fSsvemuri if (rw_lock_delay != NULL) { 442374ae87fSsvemuri backoff = rw_lock_backoff(backoff); 443374ae87fSsvemuri rw_lock_delay(backoff); 444374ae87fSsvemuri if (++loop_count == ncpus_online) { 445374ae87fSsvemuri backoff = 0; 446374ae87fSsvemuri loop_count = 0; 447374ae87fSsvemuri } 448374ae87fSsvemuri } 4497c478bd9Sstevel@tonic-gate continue; 450374ae87fSsvemuri } 4517c478bd9Sstevel@tonic-gate break; 4527c478bd9Sstevel@tonic-gate } 4537c478bd9Sstevel@tonic-gate 4547c478bd9Sstevel@tonic-gate /* 455b3d32f0cSBryan Cantrill * This appears to be the final exit of a lock with waiters. 456b3d32f0cSBryan Cantrill * If we do not have the lock as writer (that is, if this is 457b3d32f0cSBryan Cantrill * the last exit of a reader with waiting writers), we will 458b3d32f0cSBryan Cantrill * grab the lock as writer to prevent additional readers. 459b3d32f0cSBryan Cantrill * (This is required because a reader that is acquiring the 460b3d32f0cSBryan Cantrill * lock via RW_READER_STARVEWRITER will not observe the 461b3d32f0cSBryan Cantrill * RW_WRITE_WANTED bit -- and we could therefore be racing 462b3d32f0cSBryan Cantrill * with such readers here.) 463b3d32f0cSBryan Cantrill */ 464b3d32f0cSBryan Cantrill if (!(old & RW_WRITE_LOCKED)) { 465b3d32f0cSBryan Cantrill new = RW_WRITE_LOCK(curthread) | 466b3d32f0cSBryan Cantrill RW_HAS_WAITERS | RW_WRITE_WANTED; 467b3d32f0cSBryan Cantrill 468b3d32f0cSBryan Cantrill if (casip(&lp->rw_wwwh, old, new) != old) 469b3d32f0cSBryan Cantrill continue; 470b3d32f0cSBryan Cantrill } 471b3d32f0cSBryan Cantrill 472b3d32f0cSBryan Cantrill /* 4737c478bd9Sstevel@tonic-gate * Perform the final exit of a lock that has waiters. 4747c478bd9Sstevel@tonic-gate */ 4757c478bd9Sstevel@tonic-gate ts = turnstile_lookup(lp); 4767c478bd9Sstevel@tonic-gate 4777c478bd9Sstevel@tonic-gate next_writer = ts->ts_sleepq[TS_WRITER_Q].sq_first; 4787c478bd9Sstevel@tonic-gate 4797c478bd9Sstevel@tonic-gate if ((old & RW_WRITE_LOCKED) && 4807c478bd9Sstevel@tonic-gate (nreaders = rw_readers_to_wake(ts)) > 0) { 4817c478bd9Sstevel@tonic-gate /* 4827c478bd9Sstevel@tonic-gate * Don't drop the lock -- just set the hold count 4837c478bd9Sstevel@tonic-gate * such that we grant the lock to all readers at once. 4847c478bd9Sstevel@tonic-gate */ 4857c478bd9Sstevel@tonic-gate new = nreaders * RW_READ_LOCK; 4867c478bd9Sstevel@tonic-gate if (ts->ts_waiters > nreaders) 4877c478bd9Sstevel@tonic-gate new |= RW_HAS_WAITERS; 4887c478bd9Sstevel@tonic-gate if (next_writer) 4897c478bd9Sstevel@tonic-gate new |= RW_WRITE_WANTED; 4907c478bd9Sstevel@tonic-gate lp->rw_wwwh = new; 4917c478bd9Sstevel@tonic-gate membar_enter(); 4927c478bd9Sstevel@tonic-gate turnstile_wakeup(ts, TS_READER_Q, nreaders, NULL); 4937c478bd9Sstevel@tonic-gate } else { 4947c478bd9Sstevel@tonic-gate /* 4957c478bd9Sstevel@tonic-gate * Don't drop the lock -- just transfer ownership 4967c478bd9Sstevel@tonic-gate * directly to next_writer. Note that there must 4977c478bd9Sstevel@tonic-gate * be at least one waiting writer, because we get 4987c478bd9Sstevel@tonic-gate * here only if (A) the lock is read-locked or 4997c478bd9Sstevel@tonic-gate * (B) there are no waiting readers. In case (A), 5007c478bd9Sstevel@tonic-gate * since the lock is read-locked there would be no 5017c478bd9Sstevel@tonic-gate * reason for other readers to have blocked unless 5027c478bd9Sstevel@tonic-gate * the RW_WRITE_WANTED bit was set. In case (B), 5037c478bd9Sstevel@tonic-gate * since there are waiters but no waiting readers, 5047c478bd9Sstevel@tonic-gate * they must all be waiting writers. 5057c478bd9Sstevel@tonic-gate */ 5067c478bd9Sstevel@tonic-gate ASSERT(lp->rw_wwwh & RW_WRITE_WANTED); 5077c478bd9Sstevel@tonic-gate new = RW_WRITE_LOCK(next_writer); 5087c478bd9Sstevel@tonic-gate if (ts->ts_waiters > 1) 5097c478bd9Sstevel@tonic-gate new |= RW_HAS_WAITERS; 5107c478bd9Sstevel@tonic-gate if (next_writer->t_link) 5117c478bd9Sstevel@tonic-gate new |= RW_WRITE_WANTED; 5127c478bd9Sstevel@tonic-gate lp->rw_wwwh = new; 5137c478bd9Sstevel@tonic-gate membar_enter(); 5147c478bd9Sstevel@tonic-gate turnstile_wakeup(ts, TS_WRITER_Q, 1, next_writer); 5157c478bd9Sstevel@tonic-gate } 5167c478bd9Sstevel@tonic-gate break; 5177c478bd9Sstevel@tonic-gate } 5187c478bd9Sstevel@tonic-gate 5197c478bd9Sstevel@tonic-gate if (lock_value == RW_READ_LOCK) { 5207c478bd9Sstevel@tonic-gate LOCKSTAT_RECORD(LS_RW_EXIT_RELEASE, lp, RW_READER); 5217c478bd9Sstevel@tonic-gate } else { 5227c478bd9Sstevel@tonic-gate LOCKSTAT_RECORD(LS_RW_EXIT_RELEASE, lp, RW_WRITER); 5237c478bd9Sstevel@tonic-gate } 5247c478bd9Sstevel@tonic-gate } 5257c478bd9Sstevel@tonic-gate 5267c478bd9Sstevel@tonic-gate int 5277c478bd9Sstevel@tonic-gate rw_tryenter(krwlock_t *rwlp, krw_t rw) 5287c478bd9Sstevel@tonic-gate { 5297c478bd9Sstevel@tonic-gate rwlock_impl_t *lp = (rwlock_impl_t *)rwlp; 5307c478bd9Sstevel@tonic-gate uintptr_t old; 5317c478bd9Sstevel@tonic-gate 532b3d32f0cSBryan Cantrill if (rw != RW_WRITER) { 533374ae87fSsvemuri uint_t backoff = 0; 534374ae87fSsvemuri int loop_count = 0; 535374ae87fSsvemuri for (;;) { 536b3d32f0cSBryan Cantrill if ((old = lp->rw_wwwh) & (rw == RW_READER ? 537b3d32f0cSBryan Cantrill RW_WRITE_CLAIMED : RW_WRITE_LOCKED)) { 5387c478bd9Sstevel@tonic-gate return (0); 5397c478bd9Sstevel@tonic-gate } 540374ae87fSsvemuri if (casip(&lp->rw_wwwh, old, old + RW_READ_LOCK) == old) 541374ae87fSsvemuri break; 542374ae87fSsvemuri if (rw_lock_delay != NULL) { 543374ae87fSsvemuri backoff = rw_lock_backoff(backoff); 544374ae87fSsvemuri rw_lock_delay(backoff); 545374ae87fSsvemuri if (++loop_count == ncpus_online) { 546374ae87fSsvemuri backoff = 0; 547374ae87fSsvemuri loop_count = 0; 548374ae87fSsvemuri } 549374ae87fSsvemuri } 550374ae87fSsvemuri } 5517c478bd9Sstevel@tonic-gate LOCKSTAT_RECORD(LS_RW_TRYENTER_ACQUIRE, lp, rw); 5527c478bd9Sstevel@tonic-gate } else { 5537c478bd9Sstevel@tonic-gate if (casip(&lp->rw_wwwh, 0, RW_WRITE_LOCK(curthread)) != 0) 5547c478bd9Sstevel@tonic-gate return (0); 5557c478bd9Sstevel@tonic-gate LOCKSTAT_RECORD(LS_RW_TRYENTER_ACQUIRE, lp, rw); 5567c478bd9Sstevel@tonic-gate } 5577c478bd9Sstevel@tonic-gate ASSERT(rw_locked(lp, rw)); 5587c478bd9Sstevel@tonic-gate membar_enter(); 5597c478bd9Sstevel@tonic-gate return (1); 5607c478bd9Sstevel@tonic-gate } 5617c478bd9Sstevel@tonic-gate 5627c478bd9Sstevel@tonic-gate void 5637c478bd9Sstevel@tonic-gate rw_downgrade(krwlock_t *rwlp) 5647c478bd9Sstevel@tonic-gate { 5657c478bd9Sstevel@tonic-gate rwlock_impl_t *lp = (rwlock_impl_t *)rwlp; 5667c478bd9Sstevel@tonic-gate 5677c478bd9Sstevel@tonic-gate membar_exit(); 5687c478bd9Sstevel@tonic-gate 5697c478bd9Sstevel@tonic-gate if ((lp->rw_wwwh & RW_OWNER) != (uintptr_t)curthread) { 5707c478bd9Sstevel@tonic-gate rw_panic("rw_downgrade: not owner", lp); 5717c478bd9Sstevel@tonic-gate return; 5727c478bd9Sstevel@tonic-gate } 5737c478bd9Sstevel@tonic-gate 5747c478bd9Sstevel@tonic-gate if (atomic_add_ip_nv(&lp->rw_wwwh, 5757c478bd9Sstevel@tonic-gate RW_READ_LOCK - RW_WRITE_LOCK(curthread)) & RW_HAS_WAITERS) { 5767c478bd9Sstevel@tonic-gate turnstile_t *ts = turnstile_lookup(lp); 5777c478bd9Sstevel@tonic-gate int nreaders = rw_readers_to_wake(ts); 5787c478bd9Sstevel@tonic-gate if (nreaders > 0) { 5797c478bd9Sstevel@tonic-gate uintptr_t delta = nreaders * RW_READ_LOCK; 5807c478bd9Sstevel@tonic-gate if (ts->ts_waiters == nreaders) 5817c478bd9Sstevel@tonic-gate delta -= RW_HAS_WAITERS; 5827c478bd9Sstevel@tonic-gate atomic_add_ip(&lp->rw_wwwh, delta); 5837c478bd9Sstevel@tonic-gate } 5847c478bd9Sstevel@tonic-gate turnstile_wakeup(ts, TS_READER_Q, nreaders, NULL); 5857c478bd9Sstevel@tonic-gate } 5867c478bd9Sstevel@tonic-gate ASSERT(rw_locked(lp, RW_READER)); 5877c478bd9Sstevel@tonic-gate LOCKSTAT_RECORD0(LS_RW_DOWNGRADE_DOWNGRADE, lp); 5887c478bd9Sstevel@tonic-gate } 5897c478bd9Sstevel@tonic-gate 5907c478bd9Sstevel@tonic-gate int 5917c478bd9Sstevel@tonic-gate rw_tryupgrade(krwlock_t *rwlp) 5927c478bd9Sstevel@tonic-gate { 5937c478bd9Sstevel@tonic-gate rwlock_impl_t *lp = (rwlock_impl_t *)rwlp; 5947c478bd9Sstevel@tonic-gate uintptr_t old, new; 5957c478bd9Sstevel@tonic-gate 5967c478bd9Sstevel@tonic-gate ASSERT(rw_locked(lp, RW_READER)); 5977c478bd9Sstevel@tonic-gate 5987c478bd9Sstevel@tonic-gate do { 5997c478bd9Sstevel@tonic-gate if (((old = lp->rw_wwwh) & ~RW_HAS_WAITERS) != RW_READ_LOCK) 6007c478bd9Sstevel@tonic-gate return (0); 6017c478bd9Sstevel@tonic-gate new = old + RW_WRITE_LOCK(curthread) - RW_READ_LOCK; 6027c478bd9Sstevel@tonic-gate } while (casip(&lp->rw_wwwh, old, new) != old); 6037c478bd9Sstevel@tonic-gate 6047c478bd9Sstevel@tonic-gate membar_enter(); 6057c478bd9Sstevel@tonic-gate LOCKSTAT_RECORD0(LS_RW_TRYUPGRADE_UPGRADE, lp); 6067c478bd9Sstevel@tonic-gate ASSERT(rw_locked(lp, RW_WRITER)); 6077c478bd9Sstevel@tonic-gate return (1); 6087c478bd9Sstevel@tonic-gate } 6097c478bd9Sstevel@tonic-gate 6107c478bd9Sstevel@tonic-gate int 6117c478bd9Sstevel@tonic-gate rw_read_held(krwlock_t *rwlp) 6127c478bd9Sstevel@tonic-gate { 6137c478bd9Sstevel@tonic-gate uintptr_t tmp; 6147c478bd9Sstevel@tonic-gate 6157c478bd9Sstevel@tonic-gate return (_RW_READ_HELD(rwlp, tmp)); 6167c478bd9Sstevel@tonic-gate } 6177c478bd9Sstevel@tonic-gate 6187c478bd9Sstevel@tonic-gate int 6197c478bd9Sstevel@tonic-gate rw_write_held(krwlock_t *rwlp) 6207c478bd9Sstevel@tonic-gate { 6217c478bd9Sstevel@tonic-gate return (_RW_WRITE_HELD(rwlp)); 6227c478bd9Sstevel@tonic-gate } 6237c478bd9Sstevel@tonic-gate 6247c478bd9Sstevel@tonic-gate int 6257c478bd9Sstevel@tonic-gate rw_lock_held(krwlock_t *rwlp) 6267c478bd9Sstevel@tonic-gate { 6277c478bd9Sstevel@tonic-gate return (_RW_LOCK_HELD(rwlp)); 6287c478bd9Sstevel@tonic-gate } 6297c478bd9Sstevel@tonic-gate 6307c478bd9Sstevel@tonic-gate /* 6317c478bd9Sstevel@tonic-gate * Like rw_read_held(), but ASSERTs that the lock is currently held 6327c478bd9Sstevel@tonic-gate */ 6337c478bd9Sstevel@tonic-gate int 6347c478bd9Sstevel@tonic-gate rw_read_locked(krwlock_t *rwlp) 6357c478bd9Sstevel@tonic-gate { 6367c478bd9Sstevel@tonic-gate uintptr_t old = ((rwlock_impl_t *)rwlp)->rw_wwwh; 6377c478bd9Sstevel@tonic-gate 6387c478bd9Sstevel@tonic-gate ASSERT(old & RW_LOCKED); 6397c478bd9Sstevel@tonic-gate return ((old & RW_LOCKED) && !(old & RW_WRITE_LOCKED)); 6407c478bd9Sstevel@tonic-gate } 6417c478bd9Sstevel@tonic-gate 6427c478bd9Sstevel@tonic-gate /* 6437c478bd9Sstevel@tonic-gate * Returns non-zero if the lock is either held or desired by a writer 6447c478bd9Sstevel@tonic-gate */ 6457c478bd9Sstevel@tonic-gate int 6467c478bd9Sstevel@tonic-gate rw_iswriter(krwlock_t *rwlp) 6477c478bd9Sstevel@tonic-gate { 6487c478bd9Sstevel@tonic-gate return (_RW_ISWRITER(rwlp)); 6497c478bd9Sstevel@tonic-gate } 6507c478bd9Sstevel@tonic-gate 6517c478bd9Sstevel@tonic-gate kthread_t * 6527c478bd9Sstevel@tonic-gate rw_owner(krwlock_t *rwlp) 6537c478bd9Sstevel@tonic-gate { 6547c478bd9Sstevel@tonic-gate uintptr_t old = ((rwlock_impl_t *)rwlp)->rw_wwwh; 6557c478bd9Sstevel@tonic-gate 6567c478bd9Sstevel@tonic-gate return ((old & RW_WRITE_LOCKED) ? (kthread_t *)(old & RW_OWNER) : NULL); 6577c478bd9Sstevel@tonic-gate } 658