xref: /titanic_41/usr/src/uts/common/os/rwlock.c (revision 8b6220d73c6a079b62251e38103a523c41ee541a)
17c478bd9Sstevel@tonic-gate /*
27c478bd9Sstevel@tonic-gate  * CDDL HEADER START
37c478bd9Sstevel@tonic-gate  *
47c478bd9Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
5374ae87fSsvemuri  * Common Development and Distribution License (the "License").
6374ae87fSsvemuri  * You may not use this file except in compliance with the License.
77c478bd9Sstevel@tonic-gate  *
87c478bd9Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
97c478bd9Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
107c478bd9Sstevel@tonic-gate  * See the License for the specific language governing permissions
117c478bd9Sstevel@tonic-gate  * and limitations under the License.
127c478bd9Sstevel@tonic-gate  *
137c478bd9Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
147c478bd9Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
157c478bd9Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
167c478bd9Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
177c478bd9Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
187c478bd9Sstevel@tonic-gate  *
197c478bd9Sstevel@tonic-gate  * CDDL HEADER END
207c478bd9Sstevel@tonic-gate  */
217c478bd9Sstevel@tonic-gate /*
22374ae87fSsvemuri  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
237c478bd9Sstevel@tonic-gate  * Use is subject to license terms.
247c478bd9Sstevel@tonic-gate  */
257c478bd9Sstevel@tonic-gate 
26cd04b6efSBryan Cantrill /*
27cd04b6efSBryan Cantrill  * Copyright (c) 2013, Joyent, Inc.  All rights reserved.
28cd04b6efSBryan Cantrill  */
29cd04b6efSBryan Cantrill 
307c478bd9Sstevel@tonic-gate #include <sys/param.h>
317c478bd9Sstevel@tonic-gate #include <sys/thread.h>
327c478bd9Sstevel@tonic-gate #include <sys/cmn_err.h>
337c478bd9Sstevel@tonic-gate #include <sys/debug.h>
347c478bd9Sstevel@tonic-gate #include <sys/cpuvar.h>
357c478bd9Sstevel@tonic-gate #include <sys/sobject.h>
367c478bd9Sstevel@tonic-gate #include <sys/turnstile.h>
377c478bd9Sstevel@tonic-gate #include <sys/rwlock.h>
387c478bd9Sstevel@tonic-gate #include <sys/rwlock_impl.h>
397c478bd9Sstevel@tonic-gate #include <sys/atomic.h>
407c478bd9Sstevel@tonic-gate #include <sys/lockstat.h>
417c478bd9Sstevel@tonic-gate 
427c478bd9Sstevel@tonic-gate /*
437c478bd9Sstevel@tonic-gate  * Big Theory Statement for readers/writer locking primitives.
447c478bd9Sstevel@tonic-gate  *
457c478bd9Sstevel@tonic-gate  * An rwlock provides exclusive access to a single thread ("writer") or
467c478bd9Sstevel@tonic-gate  * concurrent access to multiple threads ("readers").  See rwlock(9F)
477c478bd9Sstevel@tonic-gate  * for a full description of the interfaces and programming model.
487c478bd9Sstevel@tonic-gate  * The rest of this comment describes the implementation.
497c478bd9Sstevel@tonic-gate  *
507c478bd9Sstevel@tonic-gate  * An rwlock is a single word with the following structure:
517c478bd9Sstevel@tonic-gate  *
527c478bd9Sstevel@tonic-gate  *	---------------------------------------------------------------------
537c478bd9Sstevel@tonic-gate  *	| OWNER (writer) or HOLD COUNT (readers)   | WRLOCK | WRWANT | WAIT |
547c478bd9Sstevel@tonic-gate  *	---------------------------------------------------------------------
557c478bd9Sstevel@tonic-gate  *			63 / 31 .. 3			2	1	0
567c478bd9Sstevel@tonic-gate  *
577c478bd9Sstevel@tonic-gate  * The waiters bit (0) indicates whether any threads are blocked waiting
587c478bd9Sstevel@tonic-gate  * for the lock.  The write-wanted bit (1) indicates whether any threads
597c478bd9Sstevel@tonic-gate  * are blocked waiting for write access.  The write-locked bit (2) indicates
607c478bd9Sstevel@tonic-gate  * whether the lock is held by a writer, which determines whether the upper
617c478bd9Sstevel@tonic-gate  * bits (3..31 in ILP32, 3..63 in LP64) should be interpreted as the owner
627c478bd9Sstevel@tonic-gate  * (thread pointer) or the hold count (number of readers).
637c478bd9Sstevel@tonic-gate  *
647c478bd9Sstevel@tonic-gate  * In the absence of any contention, a writer gets the lock by setting
657c478bd9Sstevel@tonic-gate  * this word to (curthread | RW_WRITE_LOCKED); a reader gets the lock
667c478bd9Sstevel@tonic-gate  * by incrementing the hold count (i.e. adding 8, aka RW_READ_LOCK).
677c478bd9Sstevel@tonic-gate  *
687c478bd9Sstevel@tonic-gate  * A writer will fail to acquire the lock if any other thread owns it.
69cd04b6efSBryan Cantrill  * A reader will fail if the lock is either owned (in the RW_READER and
70cd04b6efSBryan Cantrill  * RW_READER_STARVEWRITER cases) or wanted by a writer (in the RW_READER
71cd04b6efSBryan Cantrill  * case). rw_tryenter() returns 0 in these cases; rw_enter() blocks until
72cd04b6efSBryan Cantrill  * the lock becomes available.
737c478bd9Sstevel@tonic-gate  *
747c478bd9Sstevel@tonic-gate  * When a thread blocks it acquires the rwlock's hashed turnstile lock and
757c478bd9Sstevel@tonic-gate  * attempts to set RW_HAS_WAITERS (and RW_WRITE_WANTED in the writer case)
767c478bd9Sstevel@tonic-gate  * atomically *only if the lock still appears busy*.  A thread must never
777c478bd9Sstevel@tonic-gate  * accidentally block for an available lock since there would be no owner
787c478bd9Sstevel@tonic-gate  * to awaken it.  casip() provides the required atomicity.  Once casip()
797c478bd9Sstevel@tonic-gate  * succeeds, the decision to block becomes final and irreversible.  The
807c478bd9Sstevel@tonic-gate  * thread will not become runnable again until it has been granted ownership
817c478bd9Sstevel@tonic-gate  * of the lock via direct handoff from a former owner as described below.
827c478bd9Sstevel@tonic-gate  *
837c478bd9Sstevel@tonic-gate  * In the absence of any waiters, rw_exit() just clears the lock (if it
847c478bd9Sstevel@tonic-gate  * is write-locked) or decrements the hold count (if it is read-locked).
857c478bd9Sstevel@tonic-gate  * Note that even if waiters are present, decrementing the hold count
867c478bd9Sstevel@tonic-gate  * to a non-zero value requires no special action since the lock is still
877c478bd9Sstevel@tonic-gate  * held by at least one other thread.
887c478bd9Sstevel@tonic-gate  *
897c478bd9Sstevel@tonic-gate  * On the "final exit" (transition to unheld state) of a lock with waiters,
907c478bd9Sstevel@tonic-gate  * rw_exit_wakeup() grabs the turnstile lock and transfers ownership directly
917c478bd9Sstevel@tonic-gate  * to the next writer or set of readers.  There are several advantages to this
927c478bd9Sstevel@tonic-gate  * approach: (1) it closes all windows for priority inversion (when a new
937c478bd9Sstevel@tonic-gate  * writer has grabbed the lock but has not yet inherited from blocked readers);
947c478bd9Sstevel@tonic-gate  * (2) it prevents starvation of equal-priority threads by granting the lock
957c478bd9Sstevel@tonic-gate  * in FIFO order; (3) it eliminates the need for a write-wanted count -- a
967c478bd9Sstevel@tonic-gate  * single bit suffices because the lock remains held until all waiting
977c478bd9Sstevel@tonic-gate  * writers are gone; (4) when we awaken N readers we can perform a single
987c478bd9Sstevel@tonic-gate  * "atomic_add(&x, N)" to set the total hold count rather than having all N
997c478bd9Sstevel@tonic-gate  * threads fight for the cache to perform an "atomic_add(&x, 1)" upon wakeup.
1007c478bd9Sstevel@tonic-gate  *
1017c478bd9Sstevel@tonic-gate  * The most interesting policy decision in rw_exit_wakeup() is which thread
1027c478bd9Sstevel@tonic-gate  * to wake.  Starvation is always possible with priority-based scheduling,
1037c478bd9Sstevel@tonic-gate  * but any sane wakeup policy should at least satisfy these requirements:
1047c478bd9Sstevel@tonic-gate  *
1057c478bd9Sstevel@tonic-gate  * (1) The highest-priority thread in the system should not starve.
1067c478bd9Sstevel@tonic-gate  * (2) The highest-priority writer should not starve.
1077c478bd9Sstevel@tonic-gate  * (3) No writer should starve due to lower-priority threads.
1087c478bd9Sstevel@tonic-gate  * (4) No reader should starve due to lower-priority writers.
1097c478bd9Sstevel@tonic-gate  * (5) If all threads have equal priority, none of them should starve.
1107c478bd9Sstevel@tonic-gate  *
1117c478bd9Sstevel@tonic-gate  * We used to employ a writers-always-win policy, which doesn't even
1127c478bd9Sstevel@tonic-gate  * satisfy (1): a steady stream of low-priority writers can starve out
1137c478bd9Sstevel@tonic-gate  * a real-time reader!  This is clearly a broken policy -- it violates
1147c478bd9Sstevel@tonic-gate  * (1), (4), and (5) -- but it's how rwlocks always used to behave.
1157c478bd9Sstevel@tonic-gate  *
1167c478bd9Sstevel@tonic-gate  * A round-robin policy (exiting readers grant the lock to blocked writers
1177c478bd9Sstevel@tonic-gate  * and vice versa) satisfies all but (3): a single high-priority writer
1187c478bd9Sstevel@tonic-gate  * and many low-priority readers can starve out medium-priority writers.
1197c478bd9Sstevel@tonic-gate  *
1207c478bd9Sstevel@tonic-gate  * A strict priority policy (grant the lock to the highest priority blocked
1217c478bd9Sstevel@tonic-gate  * thread) satisfies everything but (2): a steady stream of high-priority
1227c478bd9Sstevel@tonic-gate  * readers can permanently starve the highest-priority writer.
1237c478bd9Sstevel@tonic-gate  *
1247c478bd9Sstevel@tonic-gate  * The reason we care about (2) is that it's important to process writers
1257c478bd9Sstevel@tonic-gate  * reasonably quickly -- even if they're low priority -- because their very
1267c478bd9Sstevel@tonic-gate  * presence causes all readers to take the slow (blocking) path through this
1277c478bd9Sstevel@tonic-gate  * code.  There is also a general sense that writers deserve some degree of
1287c478bd9Sstevel@tonic-gate  * deference because they're updating the data upon which all readers act.
1297c478bd9Sstevel@tonic-gate  * Presumably this data should not be allowed to become arbitrarily stale
1307c478bd9Sstevel@tonic-gate  * due to writer starvation.  Finally, it seems reasonable to level the
1317c478bd9Sstevel@tonic-gate  * playing field a bit to compensate for the fact that it's so much harder
1327c478bd9Sstevel@tonic-gate  * for a writer to get in when there are already many readers present.
1337c478bd9Sstevel@tonic-gate  *
1347c478bd9Sstevel@tonic-gate  * A hybrid of round-robin and strict priority can be made to satisfy
1357c478bd9Sstevel@tonic-gate  * all five criteria.  In this "writer priority policy" exiting readers
1367c478bd9Sstevel@tonic-gate  * always grant the lock to waiting writers, but exiting writers only
1377c478bd9Sstevel@tonic-gate  * grant the lock to readers of the same or higher priority than the
1387c478bd9Sstevel@tonic-gate  * highest-priority blocked writer.  Thus requirement (2) is satisfied,
1397c478bd9Sstevel@tonic-gate  * necessarily, by a willful act of priority inversion: an exiting reader
1407c478bd9Sstevel@tonic-gate  * will grant the lock to a blocked writer even if there are blocked
1417c478bd9Sstevel@tonic-gate  * readers of higher priority.  The situation is mitigated by the fact
1427c478bd9Sstevel@tonic-gate  * that writers always inherit priority from blocked readers, and the
1437c478bd9Sstevel@tonic-gate  * writer will awaken those readers as soon as it exits the lock.
1447c478bd9Sstevel@tonic-gate  *
145cd04b6efSBryan Cantrill  * Finally, note that this hybrid scheme -- and indeed, any scheme that
146cd04b6efSBryan Cantrill  * satisfies requirement (2) -- has an important consequence:  if a lock is
147cd04b6efSBryan Cantrill  * held as reader and a writer subsequently becomes blocked, any further
148cd04b6efSBryan Cantrill  * readers must be blocked to avoid writer starvation.  This implementation
149cd04b6efSBryan Cantrill  * detail has ramifications for the semantics of rwlocks, as it prohibits
150cd04b6efSBryan Cantrill  * recursively acquiring an rwlock as reader: any writer that wishes to
151cd04b6efSBryan Cantrill  * acquire the lock after the first but before the second acquisition as
152cd04b6efSBryan Cantrill  * reader will block the second acquisition -- resulting in deadlock.  This
153cd04b6efSBryan Cantrill  * itself is not necessarily prohibitive, as it is often straightforward to
154cd04b6efSBryan Cantrill  * prevent a single thread from recursively acquiring an rwlock as reader.
155cd04b6efSBryan Cantrill  * However, a more subtle situation arises when both a traditional mutex and
156cd04b6efSBryan Cantrill  * a reader lock are acquired by two different threads in opposite order.
157cd04b6efSBryan Cantrill  * (That is, one thread first acquires the mutex and then the rwlock as
158cd04b6efSBryan Cantrill  * reader; the other acquires the rwlock as reader and then the mutex.) As
159cd04b6efSBryan Cantrill  * with the single threaded case, this is fine absent a blocked writer: the
160cd04b6efSBryan Cantrill  * thread that acquires the mutex before acquiring the rwlock as reader will
161cd04b6efSBryan Cantrill  * be able to successfully acquire the rwlock -- even as/if the other thread
162cd04b6efSBryan Cantrill  * has the rwlock as reader and is blocked on the held mutex.  However, if
163cd04b6efSBryan Cantrill  * an unrelated writer (that is, a third thread) becomes blocked on the
164cd04b6efSBryan Cantrill  * rwlock after the first thread acquires the rwlock as reader but before
165cd04b6efSBryan Cantrill  * it's able to acquire the mutex, the second thread -- with the mutex held
166cd04b6efSBryan Cantrill  * -- will not be able to acquire the rwlock as reader due to the waiting
167cd04b6efSBryan Cantrill  * writer, deadlocking the three threads.  Unlike the single-threaded
168cd04b6efSBryan Cantrill  * (recursive) rwlock acquisition case, this case can be quite a bit
169cd04b6efSBryan Cantrill  * thornier to fix, especially as there is nothing inherently wrong in the
170cd04b6efSBryan Cantrill  * locking strategy: the deadlock is really induced by requirement (2), not
171cd04b6efSBryan Cantrill  * the consumers of the rwlock.  To permit such consumers, we allow rwlock
172cd04b6efSBryan Cantrill  * acquirers to explicitly opt out of requirement (2) by specifying
173cd04b6efSBryan Cantrill  * RW_READER_STARVEWRITER when acquiring the rwlock.  This (obviously) means
174cd04b6efSBryan Cantrill  * that inifinite readers can starve writers, but it also allows for
175cd04b6efSBryan Cantrill  * multiple readers in the presence of other synchronization primitives
176cd04b6efSBryan Cantrill  * without regard for lock-ordering.  And while certainly odd (and perhaps
177cd04b6efSBryan Cantrill  * unwise), RW_READER_STARVEWRITER can be safely used alongside RW_READER on
178cd04b6efSBryan Cantrill  * the same lock -- RW_READER_STARVEWRITER describes only the act of lock
179cd04b6efSBryan Cantrill  * acquisition with respect to waiting writers, not the lock itself.
180cd04b6efSBryan Cantrill  *
1817c478bd9Sstevel@tonic-gate  * rw_downgrade() follows the same wakeup policy as an exiting writer.
1827c478bd9Sstevel@tonic-gate  *
1837c478bd9Sstevel@tonic-gate  * rw_tryupgrade() has the same failure mode as rw_tryenter() for a
1847c478bd9Sstevel@tonic-gate  * write lock.  Both honor the WRITE_WANTED bit by specification.
1857c478bd9Sstevel@tonic-gate  *
1867c478bd9Sstevel@tonic-gate  * The following rules apply to manipulation of rwlock internal state:
1877c478bd9Sstevel@tonic-gate  *
1887c478bd9Sstevel@tonic-gate  * (1) The rwlock is only modified via the atomic primitives casip()
1897c478bd9Sstevel@tonic-gate  *     and atomic_add_ip().
1907c478bd9Sstevel@tonic-gate  *
1917c478bd9Sstevel@tonic-gate  * (2) The waiters bit and write-wanted bit are only modified under
1927c478bd9Sstevel@tonic-gate  *     turnstile_lookup().  This ensures that the turnstile is consistent
1937c478bd9Sstevel@tonic-gate  *     with the rwlock.
1947c478bd9Sstevel@tonic-gate  *
1957c478bd9Sstevel@tonic-gate  * (3) Waiters receive the lock by direct handoff from the previous
1967c478bd9Sstevel@tonic-gate  *     owner.  Therefore, waiters *always* wake up holding the lock.
1977c478bd9Sstevel@tonic-gate  */
1987c478bd9Sstevel@tonic-gate 
1997c478bd9Sstevel@tonic-gate /*
2007c478bd9Sstevel@tonic-gate  * The sobj_ops vector exports a set of functions needed when a thread
2017c478bd9Sstevel@tonic-gate  * is asleep on a synchronization object of a given type.
2027c478bd9Sstevel@tonic-gate  */
2037c478bd9Sstevel@tonic-gate static sobj_ops_t rw_sobj_ops = {
2047c478bd9Sstevel@tonic-gate 	SOBJ_RWLOCK, rw_owner, turnstile_stay_asleep, turnstile_change_pri
2057c478bd9Sstevel@tonic-gate };
2067c478bd9Sstevel@tonic-gate 
2077c478bd9Sstevel@tonic-gate /*
2087c478bd9Sstevel@tonic-gate  * If the system panics on an rwlock, save the address of the offending
2097c478bd9Sstevel@tonic-gate  * rwlock in panic_rwlock_addr, and save the contents in panic_rwlock.
2107c478bd9Sstevel@tonic-gate  */
2117c478bd9Sstevel@tonic-gate static rwlock_impl_t panic_rwlock;
2127c478bd9Sstevel@tonic-gate static rwlock_impl_t *panic_rwlock_addr;
2137c478bd9Sstevel@tonic-gate 
2147c478bd9Sstevel@tonic-gate static void
rw_panic(char * msg,rwlock_impl_t * lp)2157c478bd9Sstevel@tonic-gate rw_panic(char *msg, rwlock_impl_t *lp)
2167c478bd9Sstevel@tonic-gate {
2177c478bd9Sstevel@tonic-gate 	if (panicstr)
2187c478bd9Sstevel@tonic-gate 		return;
2197c478bd9Sstevel@tonic-gate 
220*8b6220d7SJosef 'Jeff' Sipek 	if (atomic_cas_ptr(&panic_rwlock_addr, NULL, lp) == NULL)
2217c478bd9Sstevel@tonic-gate 		panic_rwlock = *lp;
2227c478bd9Sstevel@tonic-gate 
2237c478bd9Sstevel@tonic-gate 	panic("%s, lp=%p wwwh=%lx thread=%p",
2248793b36bSNick Todd 	    msg, (void *)lp, panic_rwlock.rw_wwwh, (void *)curthread);
2257c478bd9Sstevel@tonic-gate }
2267c478bd9Sstevel@tonic-gate 
2277c478bd9Sstevel@tonic-gate /* ARGSUSED */
2287c478bd9Sstevel@tonic-gate void
rw_init(krwlock_t * rwlp,char * name,krw_type_t type,void * arg)2297c478bd9Sstevel@tonic-gate rw_init(krwlock_t *rwlp, char *name, krw_type_t type, void *arg)
2307c478bd9Sstevel@tonic-gate {
2317c478bd9Sstevel@tonic-gate 	((rwlock_impl_t *)rwlp)->rw_wwwh = 0;
2327c478bd9Sstevel@tonic-gate }
2337c478bd9Sstevel@tonic-gate 
2347c478bd9Sstevel@tonic-gate void
rw_destroy(krwlock_t * rwlp)2357c478bd9Sstevel@tonic-gate rw_destroy(krwlock_t *rwlp)
2367c478bd9Sstevel@tonic-gate {
2377c478bd9Sstevel@tonic-gate 	rwlock_impl_t *lp = (rwlock_impl_t *)rwlp;
2387c478bd9Sstevel@tonic-gate 
2397c478bd9Sstevel@tonic-gate 	if (lp->rw_wwwh != 0) {
2407c478bd9Sstevel@tonic-gate 		if ((lp->rw_wwwh & RW_DOUBLE_LOCK) == RW_DOUBLE_LOCK)
2417c478bd9Sstevel@tonic-gate 			rw_panic("rw_destroy: lock already destroyed", lp);
2427c478bd9Sstevel@tonic-gate 		else
2437c478bd9Sstevel@tonic-gate 			rw_panic("rw_destroy: lock still active", lp);
2447c478bd9Sstevel@tonic-gate 	}
2457c478bd9Sstevel@tonic-gate 
2467c478bd9Sstevel@tonic-gate 	lp->rw_wwwh = RW_DOUBLE_LOCK;
2477c478bd9Sstevel@tonic-gate }
2487c478bd9Sstevel@tonic-gate 
2497c478bd9Sstevel@tonic-gate /*
2507c478bd9Sstevel@tonic-gate  * Verify that an rwlock is held correctly.
2517c478bd9Sstevel@tonic-gate  */
2527c478bd9Sstevel@tonic-gate static int
rw_locked(rwlock_impl_t * lp,krw_t rw)2537c478bd9Sstevel@tonic-gate rw_locked(rwlock_impl_t *lp, krw_t rw)
2547c478bd9Sstevel@tonic-gate {
2557c478bd9Sstevel@tonic-gate 	uintptr_t old = lp->rw_wwwh;
2567c478bd9Sstevel@tonic-gate 
257cd04b6efSBryan Cantrill 	if (rw == RW_READER || rw == RW_READER_STARVEWRITER)
2587c478bd9Sstevel@tonic-gate 		return ((old & RW_LOCKED) && !(old & RW_WRITE_LOCKED));
2597c478bd9Sstevel@tonic-gate 
2607c478bd9Sstevel@tonic-gate 	if (rw == RW_WRITER)
2617c478bd9Sstevel@tonic-gate 		return ((old & RW_OWNER) == (uintptr_t)curthread);
2627c478bd9Sstevel@tonic-gate 
2637c478bd9Sstevel@tonic-gate 	return (0);
2647c478bd9Sstevel@tonic-gate }
2657c478bd9Sstevel@tonic-gate 
266374ae87fSsvemuri uint_t (*rw_lock_backoff)(uint_t) = NULL;
267374ae87fSsvemuri void (*rw_lock_delay)(uint_t) = NULL;
268374ae87fSsvemuri 
2697c478bd9Sstevel@tonic-gate /*
2707c478bd9Sstevel@tonic-gate  * Full-service implementation of rw_enter() to handle all the hard cases.
2717c478bd9Sstevel@tonic-gate  * Called from the assembly version if anything complicated is going on.
2727c478bd9Sstevel@tonic-gate  * The only semantic difference between calling rw_enter() and calling
2737c478bd9Sstevel@tonic-gate  * rw_enter_sleep() directly is that we assume the caller has already done
274cd04b6efSBryan Cantrill  * a THREAD_KPRI_REQUEST() in the RW_READER cases.
2757c478bd9Sstevel@tonic-gate  */
2767c478bd9Sstevel@tonic-gate void
rw_enter_sleep(rwlock_impl_t * lp,krw_t rw)2777c478bd9Sstevel@tonic-gate rw_enter_sleep(rwlock_impl_t *lp, krw_t rw)
2787c478bd9Sstevel@tonic-gate {
2797c478bd9Sstevel@tonic-gate 	uintptr_t old, new, lock_value, lock_busy, lock_wait;
2807c478bd9Sstevel@tonic-gate 	hrtime_t sleep_time;
2817c478bd9Sstevel@tonic-gate 	turnstile_t *ts;
282374ae87fSsvemuri 	uint_t  backoff = 0;
283374ae87fSsvemuri 	int loop_count = 0;
2847c478bd9Sstevel@tonic-gate 
2857c478bd9Sstevel@tonic-gate 	if (rw == RW_READER) {
2867c478bd9Sstevel@tonic-gate 		lock_value = RW_READ_LOCK;
2877c478bd9Sstevel@tonic-gate 		lock_busy = RW_WRITE_CLAIMED;
2887c478bd9Sstevel@tonic-gate 		lock_wait = RW_HAS_WAITERS;
289cd04b6efSBryan Cantrill 	} else if (rw == RW_READER_STARVEWRITER) {
290cd04b6efSBryan Cantrill 		lock_value = RW_READ_LOCK;
291cd04b6efSBryan Cantrill 		lock_busy = RW_WRITE_LOCKED;
292cd04b6efSBryan Cantrill 		lock_wait = RW_HAS_WAITERS;
2937c478bd9Sstevel@tonic-gate 	} else {
2947c478bd9Sstevel@tonic-gate 		lock_value = RW_WRITE_LOCK(curthread);
2957c478bd9Sstevel@tonic-gate 		lock_busy = (uintptr_t)RW_LOCKED;
2967c478bd9Sstevel@tonic-gate 		lock_wait = RW_HAS_WAITERS | RW_WRITE_WANTED;
2977c478bd9Sstevel@tonic-gate 	}
2987c478bd9Sstevel@tonic-gate 
2997c478bd9Sstevel@tonic-gate 	for (;;) {
3007c478bd9Sstevel@tonic-gate 		if (((old = lp->rw_wwwh) & lock_busy) == 0) {
301374ae87fSsvemuri 			if (casip(&lp->rw_wwwh, old, old + lock_value) != old) {
302374ae87fSsvemuri 				if (rw_lock_delay != NULL) {
303374ae87fSsvemuri 					backoff = rw_lock_backoff(backoff);
304374ae87fSsvemuri 					rw_lock_delay(backoff);
305374ae87fSsvemuri 					if (++loop_count == ncpus_online) {
306374ae87fSsvemuri 						backoff = 0;
307374ae87fSsvemuri 						loop_count = 0;
308374ae87fSsvemuri 					}
309374ae87fSsvemuri 				}
3107c478bd9Sstevel@tonic-gate 				continue;
311374ae87fSsvemuri 			}
3127c478bd9Sstevel@tonic-gate 			break;
3137c478bd9Sstevel@tonic-gate 		}
3147c478bd9Sstevel@tonic-gate 
3157c478bd9Sstevel@tonic-gate 		if (panicstr)
3167c478bd9Sstevel@tonic-gate 			return;
3177c478bd9Sstevel@tonic-gate 
3187c478bd9Sstevel@tonic-gate 		if ((old & RW_DOUBLE_LOCK) == RW_DOUBLE_LOCK) {
3197c478bd9Sstevel@tonic-gate 			rw_panic("rw_enter: bad rwlock", lp);
3207c478bd9Sstevel@tonic-gate 			return;
3217c478bd9Sstevel@tonic-gate 		}
3227c478bd9Sstevel@tonic-gate 
3237c478bd9Sstevel@tonic-gate 		if ((old & RW_OWNER) == (uintptr_t)curthread) {
3247c478bd9Sstevel@tonic-gate 			rw_panic("recursive rw_enter", lp);
3257c478bd9Sstevel@tonic-gate 			return;
3267c478bd9Sstevel@tonic-gate 		}
3277c478bd9Sstevel@tonic-gate 
3287c478bd9Sstevel@tonic-gate 		ts = turnstile_lookup(lp);
3297c478bd9Sstevel@tonic-gate 
3307c478bd9Sstevel@tonic-gate 		do {
3317c478bd9Sstevel@tonic-gate 			if (((old = lp->rw_wwwh) & lock_busy) == 0)
3327c478bd9Sstevel@tonic-gate 				break;
3337c478bd9Sstevel@tonic-gate 			new = old | lock_wait;
3347c478bd9Sstevel@tonic-gate 		} while (old != new && casip(&lp->rw_wwwh, old, new) != old);
3357c478bd9Sstevel@tonic-gate 
3367c478bd9Sstevel@tonic-gate 		if ((old & lock_busy) == 0) {
3377c478bd9Sstevel@tonic-gate 			/*
3387c478bd9Sstevel@tonic-gate 			 * The lock appears free now; try the dance again
3397c478bd9Sstevel@tonic-gate 			 */
3407c478bd9Sstevel@tonic-gate 			turnstile_exit(lp);
3417c478bd9Sstevel@tonic-gate 			continue;
3427c478bd9Sstevel@tonic-gate 		}
3437c478bd9Sstevel@tonic-gate 
3447c478bd9Sstevel@tonic-gate 		/*
3457c478bd9Sstevel@tonic-gate 		 * We really are going to block.  Bump the stats, and drop
3467c478bd9Sstevel@tonic-gate 		 * kpri if we're a reader.
3477c478bd9Sstevel@tonic-gate 		 */
3487c478bd9Sstevel@tonic-gate 		ASSERT(lp->rw_wwwh & lock_wait);
3497c478bd9Sstevel@tonic-gate 		ASSERT(lp->rw_wwwh & RW_LOCKED);
3507c478bd9Sstevel@tonic-gate 
3517c478bd9Sstevel@tonic-gate 		sleep_time = -gethrtime();
352cd04b6efSBryan Cantrill 		if (rw != RW_WRITER) {
3537c478bd9Sstevel@tonic-gate 			THREAD_KPRI_RELEASE();
3547c478bd9Sstevel@tonic-gate 			CPU_STATS_ADDQ(CPU, sys, rw_rdfails, 1);
3557c478bd9Sstevel@tonic-gate 			(void) turnstile_block(ts, TS_READER_Q, lp,
3567c478bd9Sstevel@tonic-gate 			    &rw_sobj_ops, NULL, NULL);
3577c478bd9Sstevel@tonic-gate 		} else {
3587c478bd9Sstevel@tonic-gate 			CPU_STATS_ADDQ(CPU, sys, rw_wrfails, 1);
3597c478bd9Sstevel@tonic-gate 			(void) turnstile_block(ts, TS_WRITER_Q, lp,
3607c478bd9Sstevel@tonic-gate 			    &rw_sobj_ops, NULL, NULL);
3617c478bd9Sstevel@tonic-gate 		}
3627c478bd9Sstevel@tonic-gate 		sleep_time += gethrtime();
3637c478bd9Sstevel@tonic-gate 
3647c478bd9Sstevel@tonic-gate 		LOCKSTAT_RECORD4(LS_RW_ENTER_BLOCK, lp, sleep_time, rw,
3657c478bd9Sstevel@tonic-gate 		    (old & RW_WRITE_LOCKED) ? 1 : 0,
3667c478bd9Sstevel@tonic-gate 		    old >> RW_HOLD_COUNT_SHIFT);
3677c478bd9Sstevel@tonic-gate 
3687c478bd9Sstevel@tonic-gate 		/*
3697c478bd9Sstevel@tonic-gate 		 * We wake up holding the lock (and having kpri if we're
3707c478bd9Sstevel@tonic-gate 		 * a reader) via direct handoff from the previous owner.
3717c478bd9Sstevel@tonic-gate 		 */
3727c478bd9Sstevel@tonic-gate 		break;
3737c478bd9Sstevel@tonic-gate 	}
3747c478bd9Sstevel@tonic-gate 
3757c478bd9Sstevel@tonic-gate 	ASSERT(rw_locked(lp, rw));
3767c478bd9Sstevel@tonic-gate 
3777c478bd9Sstevel@tonic-gate 	membar_enter();
3787c478bd9Sstevel@tonic-gate 
3797c478bd9Sstevel@tonic-gate 	LOCKSTAT_RECORD(LS_RW_ENTER_ACQUIRE, lp, rw);
3807c478bd9Sstevel@tonic-gate }
3817c478bd9Sstevel@tonic-gate 
3827c478bd9Sstevel@tonic-gate /*
3837c478bd9Sstevel@tonic-gate  * Return the number of readers to wake, or zero if we should wake a writer.
3847c478bd9Sstevel@tonic-gate  * Called only by exiting/downgrading writers (readers don't wake readers).
3857c478bd9Sstevel@tonic-gate  */
3867c478bd9Sstevel@tonic-gate static int
rw_readers_to_wake(turnstile_t * ts)3877c478bd9Sstevel@tonic-gate rw_readers_to_wake(turnstile_t *ts)
3887c478bd9Sstevel@tonic-gate {
3897c478bd9Sstevel@tonic-gate 	kthread_t *next_writer = ts->ts_sleepq[TS_WRITER_Q].sq_first;
3907c478bd9Sstevel@tonic-gate 	kthread_t *next_reader = ts->ts_sleepq[TS_READER_Q].sq_first;
3917c478bd9Sstevel@tonic-gate 	pri_t wpri = (next_writer != NULL) ? DISP_PRIO(next_writer) : -1;
3927c478bd9Sstevel@tonic-gate 	int count = 0;
3937c478bd9Sstevel@tonic-gate 
3947c478bd9Sstevel@tonic-gate 	while (next_reader != NULL) {
3957c478bd9Sstevel@tonic-gate 		if (DISP_PRIO(next_reader) < wpri)
3967c478bd9Sstevel@tonic-gate 			break;
3977c478bd9Sstevel@tonic-gate 		next_reader->t_kpri_req++;
3987c478bd9Sstevel@tonic-gate 		next_reader = next_reader->t_link;
3997c478bd9Sstevel@tonic-gate 		count++;
4007c478bd9Sstevel@tonic-gate 	}
4017c478bd9Sstevel@tonic-gate 	return (count);
4027c478bd9Sstevel@tonic-gate }
4037c478bd9Sstevel@tonic-gate 
4047c478bd9Sstevel@tonic-gate /*
4057c478bd9Sstevel@tonic-gate  * Full-service implementation of rw_exit() to handle all the hard cases.
4067c478bd9Sstevel@tonic-gate  * Called from the assembly version if anything complicated is going on.
4077c478bd9Sstevel@tonic-gate  * There is no semantic difference between calling rw_exit() and calling
4087c478bd9Sstevel@tonic-gate  * rw_exit_wakeup() directly.
4097c478bd9Sstevel@tonic-gate  */
4107c478bd9Sstevel@tonic-gate void
rw_exit_wakeup(rwlock_impl_t * lp)4117c478bd9Sstevel@tonic-gate rw_exit_wakeup(rwlock_impl_t *lp)
4127c478bd9Sstevel@tonic-gate {
4137c478bd9Sstevel@tonic-gate 	turnstile_t *ts;
4147c478bd9Sstevel@tonic-gate 	uintptr_t old, new, lock_value;
4157c478bd9Sstevel@tonic-gate 	kthread_t *next_writer;
4167c478bd9Sstevel@tonic-gate 	int nreaders;
417374ae87fSsvemuri 	uint_t  backoff = 0;
418374ae87fSsvemuri 	int loop_count = 0;
4197c478bd9Sstevel@tonic-gate 
4207c478bd9Sstevel@tonic-gate 	membar_exit();
4217c478bd9Sstevel@tonic-gate 
4227c478bd9Sstevel@tonic-gate 	old = lp->rw_wwwh;
4237c478bd9Sstevel@tonic-gate 	if (old & RW_WRITE_LOCKED) {
4247c478bd9Sstevel@tonic-gate 		if ((old & RW_OWNER) != (uintptr_t)curthread) {
4257c478bd9Sstevel@tonic-gate 			rw_panic("rw_exit: not owner", lp);
4267c478bd9Sstevel@tonic-gate 			lp->rw_wwwh = 0;
4277c478bd9Sstevel@tonic-gate 			return;
4287c478bd9Sstevel@tonic-gate 		}
4297c478bd9Sstevel@tonic-gate 		lock_value = RW_WRITE_LOCK(curthread);
4307c478bd9Sstevel@tonic-gate 	} else {
4317c478bd9Sstevel@tonic-gate 		if ((old & RW_LOCKED) == 0) {
4327c478bd9Sstevel@tonic-gate 			rw_panic("rw_exit: lock not held", lp);
4337c478bd9Sstevel@tonic-gate 			return;
4347c478bd9Sstevel@tonic-gate 		}
4357c478bd9Sstevel@tonic-gate 		lock_value = RW_READ_LOCK;
4367c478bd9Sstevel@tonic-gate 	}
4377c478bd9Sstevel@tonic-gate 
4387c478bd9Sstevel@tonic-gate 	for (;;) {
4397c478bd9Sstevel@tonic-gate 		/*
4407c478bd9Sstevel@tonic-gate 		 * If this is *not* the final exit of a lock with waiters,
4417c478bd9Sstevel@tonic-gate 		 * just drop the lock -- there's nothing tricky going on.
4427c478bd9Sstevel@tonic-gate 		 */
4437c478bd9Sstevel@tonic-gate 		old = lp->rw_wwwh;
4447c478bd9Sstevel@tonic-gate 		new = old - lock_value;
4457c478bd9Sstevel@tonic-gate 		if ((new & (RW_LOCKED | RW_HAS_WAITERS)) != RW_HAS_WAITERS) {
446374ae87fSsvemuri 			if (casip(&lp->rw_wwwh, old, new) != old) {
447374ae87fSsvemuri 				if (rw_lock_delay != NULL) {
448374ae87fSsvemuri 					backoff = rw_lock_backoff(backoff);
449374ae87fSsvemuri 					rw_lock_delay(backoff);
450374ae87fSsvemuri 					if (++loop_count == ncpus_online) {
451374ae87fSsvemuri 						backoff = 0;
452374ae87fSsvemuri 						loop_count = 0;
453374ae87fSsvemuri 					}
454374ae87fSsvemuri 				}
4557c478bd9Sstevel@tonic-gate 				continue;
456374ae87fSsvemuri 			}
4577c478bd9Sstevel@tonic-gate 			break;
4587c478bd9Sstevel@tonic-gate 		}
4597c478bd9Sstevel@tonic-gate 
4607c478bd9Sstevel@tonic-gate 		/*
461cd04b6efSBryan Cantrill 		 * This appears to be the final exit of a lock with waiters.
462cd04b6efSBryan Cantrill 		 * If we do not have the lock as writer (that is, if this is
463cd04b6efSBryan Cantrill 		 * the last exit of a reader with waiting writers), we will
464cd04b6efSBryan Cantrill 		 * grab the lock as writer to prevent additional readers.
465cd04b6efSBryan Cantrill 		 * (This is required because a reader that is acquiring the
466cd04b6efSBryan Cantrill 		 * lock via RW_READER_STARVEWRITER will not observe the
467cd04b6efSBryan Cantrill 		 * RW_WRITE_WANTED bit -- and we could therefore be racing
468cd04b6efSBryan Cantrill 		 * with such readers here.)
469cd04b6efSBryan Cantrill 		 */
470cd04b6efSBryan Cantrill 		if (!(old & RW_WRITE_LOCKED)) {
471cd04b6efSBryan Cantrill 			new = RW_WRITE_LOCK(curthread) |
472cd04b6efSBryan Cantrill 			    RW_HAS_WAITERS | RW_WRITE_WANTED;
473cd04b6efSBryan Cantrill 
474cd04b6efSBryan Cantrill 			if (casip(&lp->rw_wwwh, old, new) != old)
475cd04b6efSBryan Cantrill 				continue;
476cd04b6efSBryan Cantrill 		}
477cd04b6efSBryan Cantrill 
478cd04b6efSBryan Cantrill 		/*
4797c478bd9Sstevel@tonic-gate 		 * Perform the final exit of a lock that has waiters.
4807c478bd9Sstevel@tonic-gate 		 */
4817c478bd9Sstevel@tonic-gate 		ts = turnstile_lookup(lp);
4827c478bd9Sstevel@tonic-gate 
4837c478bd9Sstevel@tonic-gate 		next_writer = ts->ts_sleepq[TS_WRITER_Q].sq_first;
4847c478bd9Sstevel@tonic-gate 
4857c478bd9Sstevel@tonic-gate 		if ((old & RW_WRITE_LOCKED) &&
4867c478bd9Sstevel@tonic-gate 		    (nreaders = rw_readers_to_wake(ts)) > 0) {
4877c478bd9Sstevel@tonic-gate 			/*
4887c478bd9Sstevel@tonic-gate 			 * Don't drop the lock -- just set the hold count
4897c478bd9Sstevel@tonic-gate 			 * such that we grant the lock to all readers at once.
4907c478bd9Sstevel@tonic-gate 			 */
4917c478bd9Sstevel@tonic-gate 			new = nreaders * RW_READ_LOCK;
4927c478bd9Sstevel@tonic-gate 			if (ts->ts_waiters > nreaders)
4937c478bd9Sstevel@tonic-gate 				new |= RW_HAS_WAITERS;
4947c478bd9Sstevel@tonic-gate 			if (next_writer)
4957c478bd9Sstevel@tonic-gate 				new |= RW_WRITE_WANTED;
4967c478bd9Sstevel@tonic-gate 			lp->rw_wwwh = new;
4977c478bd9Sstevel@tonic-gate 			membar_enter();
4987c478bd9Sstevel@tonic-gate 			turnstile_wakeup(ts, TS_READER_Q, nreaders, NULL);
4997c478bd9Sstevel@tonic-gate 		} else {
5007c478bd9Sstevel@tonic-gate 			/*
5017c478bd9Sstevel@tonic-gate 			 * Don't drop the lock -- just transfer ownership
5027c478bd9Sstevel@tonic-gate 			 * directly to next_writer.  Note that there must
5037c478bd9Sstevel@tonic-gate 			 * be at least one waiting writer, because we get
5047c478bd9Sstevel@tonic-gate 			 * here only if (A) the lock is read-locked or
5057c478bd9Sstevel@tonic-gate 			 * (B) there are no waiting readers.  In case (A),
5067c478bd9Sstevel@tonic-gate 			 * since the lock is read-locked there would be no
5077c478bd9Sstevel@tonic-gate 			 * reason for other readers to have blocked unless
5087c478bd9Sstevel@tonic-gate 			 * the RW_WRITE_WANTED bit was set.  In case (B),
5097c478bd9Sstevel@tonic-gate 			 * since there are waiters but no waiting readers,
5107c478bd9Sstevel@tonic-gate 			 * they must all be waiting writers.
5117c478bd9Sstevel@tonic-gate 			 */
5127c478bd9Sstevel@tonic-gate 			ASSERT(lp->rw_wwwh & RW_WRITE_WANTED);
5137c478bd9Sstevel@tonic-gate 			new = RW_WRITE_LOCK(next_writer);
5147c478bd9Sstevel@tonic-gate 			if (ts->ts_waiters > 1)
5157c478bd9Sstevel@tonic-gate 				new |= RW_HAS_WAITERS;
5167c478bd9Sstevel@tonic-gate 			if (next_writer->t_link)
5177c478bd9Sstevel@tonic-gate 				new |= RW_WRITE_WANTED;
5187c478bd9Sstevel@tonic-gate 			lp->rw_wwwh = new;
5197c478bd9Sstevel@tonic-gate 			membar_enter();
5207c478bd9Sstevel@tonic-gate 			turnstile_wakeup(ts, TS_WRITER_Q, 1, next_writer);
5217c478bd9Sstevel@tonic-gate 		}
5227c478bd9Sstevel@tonic-gate 		break;
5237c478bd9Sstevel@tonic-gate 	}
5247c478bd9Sstevel@tonic-gate 
5257c478bd9Sstevel@tonic-gate 	if (lock_value == RW_READ_LOCK) {
5267c478bd9Sstevel@tonic-gate 		THREAD_KPRI_RELEASE();
5277c478bd9Sstevel@tonic-gate 		LOCKSTAT_RECORD(LS_RW_EXIT_RELEASE, lp, RW_READER);
5287c478bd9Sstevel@tonic-gate 	} else {
5297c478bd9Sstevel@tonic-gate 		LOCKSTAT_RECORD(LS_RW_EXIT_RELEASE, lp, RW_WRITER);
5307c478bd9Sstevel@tonic-gate 	}
5317c478bd9Sstevel@tonic-gate }
5327c478bd9Sstevel@tonic-gate 
5337c478bd9Sstevel@tonic-gate int
rw_tryenter(krwlock_t * rwlp,krw_t rw)5347c478bd9Sstevel@tonic-gate rw_tryenter(krwlock_t *rwlp, krw_t rw)
5357c478bd9Sstevel@tonic-gate {
5367c478bd9Sstevel@tonic-gate 	rwlock_impl_t *lp = (rwlock_impl_t *)rwlp;
5377c478bd9Sstevel@tonic-gate 	uintptr_t old;
5387c478bd9Sstevel@tonic-gate 
539cd04b6efSBryan Cantrill 	if (rw != RW_WRITER) {
540374ae87fSsvemuri 		uint_t backoff = 0;
541374ae87fSsvemuri 		int loop_count = 0;
5427c478bd9Sstevel@tonic-gate 		THREAD_KPRI_REQUEST();
543374ae87fSsvemuri 		for (;;) {
544cd04b6efSBryan Cantrill 			if ((old = lp->rw_wwwh) & (rw == RW_READER ?
545cd04b6efSBryan Cantrill 			    RW_WRITE_CLAIMED : RW_WRITE_LOCKED)) {
5467c478bd9Sstevel@tonic-gate 				THREAD_KPRI_RELEASE();
5477c478bd9Sstevel@tonic-gate 				return (0);
5487c478bd9Sstevel@tonic-gate 			}
549374ae87fSsvemuri 			if (casip(&lp->rw_wwwh, old, old + RW_READ_LOCK) == old)
550374ae87fSsvemuri 				break;
551374ae87fSsvemuri 			if (rw_lock_delay != NULL) {
552374ae87fSsvemuri 				backoff = rw_lock_backoff(backoff);
553374ae87fSsvemuri 				rw_lock_delay(backoff);
554374ae87fSsvemuri 				if (++loop_count == ncpus_online) {
555374ae87fSsvemuri 					backoff = 0;
556374ae87fSsvemuri 					loop_count = 0;
557374ae87fSsvemuri 				}
558374ae87fSsvemuri 			}
559374ae87fSsvemuri 		}
5607c478bd9Sstevel@tonic-gate 		LOCKSTAT_RECORD(LS_RW_TRYENTER_ACQUIRE, lp, rw);
5617c478bd9Sstevel@tonic-gate 	} else {
5627c478bd9Sstevel@tonic-gate 		if (casip(&lp->rw_wwwh, 0, RW_WRITE_LOCK(curthread)) != 0)
5637c478bd9Sstevel@tonic-gate 			return (0);
5647c478bd9Sstevel@tonic-gate 		LOCKSTAT_RECORD(LS_RW_TRYENTER_ACQUIRE, lp, rw);
5657c478bd9Sstevel@tonic-gate 	}
5667c478bd9Sstevel@tonic-gate 	ASSERT(rw_locked(lp, rw));
5677c478bd9Sstevel@tonic-gate 	membar_enter();
5687c478bd9Sstevel@tonic-gate 	return (1);
5697c478bd9Sstevel@tonic-gate }
5707c478bd9Sstevel@tonic-gate 
5717c478bd9Sstevel@tonic-gate void
rw_downgrade(krwlock_t * rwlp)5727c478bd9Sstevel@tonic-gate rw_downgrade(krwlock_t *rwlp)
5737c478bd9Sstevel@tonic-gate {
5747c478bd9Sstevel@tonic-gate 	rwlock_impl_t *lp = (rwlock_impl_t *)rwlp;
5757c478bd9Sstevel@tonic-gate 
5767c478bd9Sstevel@tonic-gate 	THREAD_KPRI_REQUEST();
5777c478bd9Sstevel@tonic-gate 	membar_exit();
5787c478bd9Sstevel@tonic-gate 
5797c478bd9Sstevel@tonic-gate 	if ((lp->rw_wwwh & RW_OWNER) != (uintptr_t)curthread) {
5807c478bd9Sstevel@tonic-gate 		rw_panic("rw_downgrade: not owner", lp);
5817c478bd9Sstevel@tonic-gate 		return;
5827c478bd9Sstevel@tonic-gate 	}
5837c478bd9Sstevel@tonic-gate 
5847c478bd9Sstevel@tonic-gate 	if (atomic_add_ip_nv(&lp->rw_wwwh,
5857c478bd9Sstevel@tonic-gate 	    RW_READ_LOCK - RW_WRITE_LOCK(curthread)) & RW_HAS_WAITERS) {
5867c478bd9Sstevel@tonic-gate 		turnstile_t *ts = turnstile_lookup(lp);
5877c478bd9Sstevel@tonic-gate 		int nreaders = rw_readers_to_wake(ts);
5887c478bd9Sstevel@tonic-gate 		if (nreaders > 0) {
5897c478bd9Sstevel@tonic-gate 			uintptr_t delta = nreaders * RW_READ_LOCK;
5907c478bd9Sstevel@tonic-gate 			if (ts->ts_waiters == nreaders)
5917c478bd9Sstevel@tonic-gate 				delta -= RW_HAS_WAITERS;
5927c478bd9Sstevel@tonic-gate 			atomic_add_ip(&lp->rw_wwwh, delta);
5937c478bd9Sstevel@tonic-gate 		}
5947c478bd9Sstevel@tonic-gate 		turnstile_wakeup(ts, TS_READER_Q, nreaders, NULL);
5957c478bd9Sstevel@tonic-gate 	}
5967c478bd9Sstevel@tonic-gate 	ASSERT(rw_locked(lp, RW_READER));
5977c478bd9Sstevel@tonic-gate 	LOCKSTAT_RECORD0(LS_RW_DOWNGRADE_DOWNGRADE, lp);
5987c478bd9Sstevel@tonic-gate }
5997c478bd9Sstevel@tonic-gate 
6007c478bd9Sstevel@tonic-gate int
rw_tryupgrade(krwlock_t * rwlp)6017c478bd9Sstevel@tonic-gate rw_tryupgrade(krwlock_t *rwlp)
6027c478bd9Sstevel@tonic-gate {
6037c478bd9Sstevel@tonic-gate 	rwlock_impl_t *lp = (rwlock_impl_t *)rwlp;
6047c478bd9Sstevel@tonic-gate 	uintptr_t old, new;
6057c478bd9Sstevel@tonic-gate 
6067c478bd9Sstevel@tonic-gate 	ASSERT(rw_locked(lp, RW_READER));
6077c478bd9Sstevel@tonic-gate 
6087c478bd9Sstevel@tonic-gate 	do {
6097c478bd9Sstevel@tonic-gate 		if (((old = lp->rw_wwwh) & ~RW_HAS_WAITERS) != RW_READ_LOCK)
6107c478bd9Sstevel@tonic-gate 			return (0);
6117c478bd9Sstevel@tonic-gate 		new = old + RW_WRITE_LOCK(curthread) - RW_READ_LOCK;
6127c478bd9Sstevel@tonic-gate 	} while (casip(&lp->rw_wwwh, old, new) != old);
6137c478bd9Sstevel@tonic-gate 
6147c478bd9Sstevel@tonic-gate 	membar_enter();
6157c478bd9Sstevel@tonic-gate 	THREAD_KPRI_RELEASE();
6167c478bd9Sstevel@tonic-gate 	LOCKSTAT_RECORD0(LS_RW_TRYUPGRADE_UPGRADE, lp);
6177c478bd9Sstevel@tonic-gate 	ASSERT(rw_locked(lp, RW_WRITER));
6187c478bd9Sstevel@tonic-gate 	return (1);
6197c478bd9Sstevel@tonic-gate }
6207c478bd9Sstevel@tonic-gate 
6217c478bd9Sstevel@tonic-gate int
rw_read_held(krwlock_t * rwlp)6227c478bd9Sstevel@tonic-gate rw_read_held(krwlock_t *rwlp)
6237c478bd9Sstevel@tonic-gate {
6247c478bd9Sstevel@tonic-gate 	uintptr_t tmp;
6257c478bd9Sstevel@tonic-gate 
6267c478bd9Sstevel@tonic-gate 	return (_RW_READ_HELD(rwlp, tmp));
6277c478bd9Sstevel@tonic-gate }
6287c478bd9Sstevel@tonic-gate 
6297c478bd9Sstevel@tonic-gate int
rw_write_held(krwlock_t * rwlp)6307c478bd9Sstevel@tonic-gate rw_write_held(krwlock_t *rwlp)
6317c478bd9Sstevel@tonic-gate {
6327c478bd9Sstevel@tonic-gate 	return (_RW_WRITE_HELD(rwlp));
6337c478bd9Sstevel@tonic-gate }
6347c478bd9Sstevel@tonic-gate 
6357c478bd9Sstevel@tonic-gate int
rw_lock_held(krwlock_t * rwlp)6367c478bd9Sstevel@tonic-gate rw_lock_held(krwlock_t *rwlp)
6377c478bd9Sstevel@tonic-gate {
6387c478bd9Sstevel@tonic-gate 	return (_RW_LOCK_HELD(rwlp));
6397c478bd9Sstevel@tonic-gate }
6407c478bd9Sstevel@tonic-gate 
6417c478bd9Sstevel@tonic-gate /*
6427c478bd9Sstevel@tonic-gate  * Like rw_read_held(), but ASSERTs that the lock is currently held
6437c478bd9Sstevel@tonic-gate  */
6447c478bd9Sstevel@tonic-gate int
rw_read_locked(krwlock_t * rwlp)6457c478bd9Sstevel@tonic-gate rw_read_locked(krwlock_t *rwlp)
6467c478bd9Sstevel@tonic-gate {
6477c478bd9Sstevel@tonic-gate 	uintptr_t old = ((rwlock_impl_t *)rwlp)->rw_wwwh;
6487c478bd9Sstevel@tonic-gate 
6497c478bd9Sstevel@tonic-gate 	ASSERT(old & RW_LOCKED);
6507c478bd9Sstevel@tonic-gate 	return ((old & RW_LOCKED) && !(old & RW_WRITE_LOCKED));
6517c478bd9Sstevel@tonic-gate }
6527c478bd9Sstevel@tonic-gate 
6537c478bd9Sstevel@tonic-gate /*
6547c478bd9Sstevel@tonic-gate  * Returns non-zero if the lock is either held or desired by a writer
6557c478bd9Sstevel@tonic-gate  */
6567c478bd9Sstevel@tonic-gate int
rw_iswriter(krwlock_t * rwlp)6577c478bd9Sstevel@tonic-gate rw_iswriter(krwlock_t *rwlp)
6587c478bd9Sstevel@tonic-gate {
6597c478bd9Sstevel@tonic-gate 	return (_RW_ISWRITER(rwlp));
6607c478bd9Sstevel@tonic-gate }
6617c478bd9Sstevel@tonic-gate 
6627c478bd9Sstevel@tonic-gate kthread_t *
rw_owner(krwlock_t * rwlp)6637c478bd9Sstevel@tonic-gate rw_owner(krwlock_t *rwlp)
6647c478bd9Sstevel@tonic-gate {
6657c478bd9Sstevel@tonic-gate 	uintptr_t old = ((rwlock_impl_t *)rwlp)->rw_wwwh;
6667c478bd9Sstevel@tonic-gate 
6677c478bd9Sstevel@tonic-gate 	return ((old & RW_WRITE_LOCKED) ? (kthread_t *)(old & RW_OWNER) : NULL);
6687c478bd9Sstevel@tonic-gate }
669