xref: /freebsd/sys/kern/kern_rwlock.c (revision 830940567b49bb0c08dfaed40418999e76616909)
1 /*-
2  * Copyright (c) 2006 John Baldwin <jhb@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. Neither the name of the author nor the names of any co-contributors
14  *    may be used to endorse or promote products derived from this software
15  *    without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 
30 /*
31  * Machine independent bits of reader/writer lock implementation.
32  */
33 
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36 
37 #include "opt_ddb.h"
38 #include "opt_kdtrace.h"
39 #include "opt_no_adaptive_rwlocks.h"
40 
41 #include <sys/param.h>
42 #include <sys/ktr.h>
43 #include <sys/kernel.h>
44 #include <sys/lock.h>
45 #include <sys/mutex.h>
46 #include <sys/proc.h>
47 #include <sys/rwlock.h>
48 #include <sys/sysctl.h>
49 #include <sys/systm.h>
50 #include <sys/turnstile.h>
51 
52 #include <machine/cpu.h>
53 
54 #if defined(SMP) && !defined(NO_ADAPTIVE_RWLOCKS)
55 #define	ADAPTIVE_RWLOCKS
56 #endif
57 
58 #ifdef ADAPTIVE_RWLOCKS
59 static int rowner_retries = 10;
60 static int rowner_loops = 10000;
61 SYSCTL_NODE(_debug, OID_AUTO, rwlock, CTLFLAG_RD, NULL, "rwlock debugging");
62 SYSCTL_INT(_debug_rwlock, OID_AUTO, retry, CTLFLAG_RW, &rowner_retries, 0, "");
63 SYSCTL_INT(_debug_rwlock, OID_AUTO, loops, CTLFLAG_RW, &rowner_loops, 0, "");
64 #endif
65 
66 #ifdef DDB
67 #include <ddb/ddb.h>
68 
69 static void	db_show_rwlock(struct lock_object *lock);
70 #endif
71 static void	assert_rw(struct lock_object *lock, int what);
72 static void	lock_rw(struct lock_object *lock, int how);
73 #ifdef KDTRACE_HOOKS
74 static int	owner_rw(struct lock_object *lock, struct thread **owner);
75 #endif
76 static int	unlock_rw(struct lock_object *lock);
77 
78 struct lock_class lock_class_rw = {
79 	.lc_name = "rw",
80 	.lc_flags = LC_SLEEPLOCK | LC_RECURSABLE | LC_UPGRADABLE,
81 	.lc_assert = assert_rw,
82 #ifdef DDB
83 	.lc_ddb_show = db_show_rwlock,
84 #endif
85 	.lc_lock = lock_rw,
86 	.lc_unlock = unlock_rw,
87 #ifdef KDTRACE_HOOKS
88 	.lc_owner = owner_rw,
89 #endif
90 };
91 
92 /*
93  * Return a pointer to the owning thread if the lock is write-locked or
94  * NULL if the lock is unlocked or read-locked.
95  */
96 #define	rw_wowner(rw)							\
97 	((rw)->rw_lock & RW_LOCK_READ ? NULL :				\
98 	    (struct thread *)RW_OWNER((rw)->rw_lock))
99 
100 /*
101  * Returns if a write owner is recursed.  Write ownership is not assured
102  * here and should be previously checked.
103  */
104 #define	rw_recursed(rw)		((rw)->rw_recurse != 0)
105 
106 /*
107  * Return true if curthread helds the lock.
108  */
109 #define	rw_wlocked(rw)		(rw_wowner((rw)) == curthread)
110 
111 /*
112  * Return a pointer to the owning thread for this lock who should receive
113  * any priority lent by threads that block on this lock.  Currently this
114  * is identical to rw_wowner().
115  */
116 #define	rw_owner(rw)		rw_wowner(rw)
117 
118 #ifndef INVARIANTS
119 #define	_rw_assert(rw, what, file, line)
120 #endif
121 
122 void
123 assert_rw(struct lock_object *lock, int what)
124 {
125 
126 	rw_assert((struct rwlock *)lock, what);
127 }
128 
129 void
130 lock_rw(struct lock_object *lock, int how)
131 {
132 	struct rwlock *rw;
133 
134 	rw = (struct rwlock *)lock;
135 	if (how)
136 		rw_wlock(rw);
137 	else
138 		rw_rlock(rw);
139 }
140 
141 int
142 unlock_rw(struct lock_object *lock)
143 {
144 	struct rwlock *rw;
145 
146 	rw = (struct rwlock *)lock;
147 	rw_assert(rw, RA_LOCKED | LA_NOTRECURSED);
148 	if (rw->rw_lock & RW_LOCK_READ) {
149 		rw_runlock(rw);
150 		return (0);
151 	} else {
152 		rw_wunlock(rw);
153 		return (1);
154 	}
155 }
156 
157 #ifdef KDTRACE_HOOKS
158 int
159 owner_rw(struct lock_object *lock, struct thread **owner)
160 {
161 	struct rwlock *rw = (struct rwlock *)lock;
162 	uintptr_t x = rw->rw_lock;
163 
164 	*owner = rw_wowner(rw);
165 	return ((x & RW_LOCK_READ) != 0 ?  (RW_READERS(x) != 0) :
166 	    (*owner != NULL));
167 }
168 #endif
169 
170 void
171 rw_init_flags(struct rwlock *rw, const char *name, int opts)
172 {
173 	int flags;
174 
175 	MPASS((opts & ~(RW_DUPOK | RW_NOPROFILE | RW_NOWITNESS | RW_QUIET |
176 	    RW_RECURSE)) == 0);
177 	ASSERT_ATOMIC_LOAD(rw->rw_lock, ("%s: rw_lock not aligned for %s: %p",
178 	    __func__, name, &rw->rw_lock));
179 
180 	flags = LO_UPGRADABLE;
181 	if (opts & RW_DUPOK)
182 		flags |= LO_DUPOK;
183 	if (opts & RW_NOPROFILE)
184 		flags |= LO_NOPROFILE;
185 	if (!(opts & RW_NOWITNESS))
186 		flags |= LO_WITNESS;
187 	if (opts & RW_RECURSE)
188 		flags |= LO_RECURSABLE;
189 	if (opts & RW_QUIET)
190 		flags |= LO_QUIET;
191 
192 	rw->rw_lock = RW_UNLOCKED;
193 	rw->rw_recurse = 0;
194 	lock_init(&rw->lock_object, &lock_class_rw, name, NULL, flags);
195 }
196 
197 void
198 rw_destroy(struct rwlock *rw)
199 {
200 
201 	KASSERT(rw->rw_lock == RW_UNLOCKED, ("rw lock not unlocked"));
202 	KASSERT(rw->rw_recurse == 0, ("rw lock still recursed"));
203 	rw->rw_lock = RW_DESTROYED;
204 	lock_destroy(&rw->lock_object);
205 }
206 
207 void
208 rw_sysinit(void *arg)
209 {
210 	struct rw_args *args = arg;
211 
212 	rw_init(args->ra_rw, args->ra_desc);
213 }
214 
215 void
216 rw_sysinit_flags(void *arg)
217 {
218 	struct rw_args_flags *args = arg;
219 
220 	rw_init_flags(args->ra_rw, args->ra_desc, args->ra_flags);
221 }
222 
223 int
224 rw_wowned(struct rwlock *rw)
225 {
226 
227 	return (rw_wowner(rw) == curthread);
228 }
229 
230 void
231 _rw_wlock(struct rwlock *rw, const char *file, int line)
232 {
233 
234 	MPASS(curthread != NULL);
235 	KASSERT(rw->rw_lock != RW_DESTROYED,
236 	    ("rw_wlock() of destroyed rwlock @ %s:%d", file, line));
237 	WITNESS_CHECKORDER(&rw->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE, file,
238 	    line, NULL);
239 	__rw_wlock(rw, curthread, file, line);
240 	LOCK_LOG_LOCK("WLOCK", &rw->lock_object, 0, rw->rw_recurse, file, line);
241 	WITNESS_LOCK(&rw->lock_object, LOP_EXCLUSIVE, file, line);
242 	curthread->td_locks++;
243 }
244 
245 int
246 _rw_try_wlock(struct rwlock *rw, const char *file, int line)
247 {
248 	int rval;
249 
250 	KASSERT(rw->rw_lock != RW_DESTROYED,
251 	    ("rw_try_wlock() of destroyed rwlock @ %s:%d", file, line));
252 
253 	if (rw_wlocked(rw) &&
254 	    (rw->lock_object.lo_flags & LO_RECURSABLE) != 0) {
255 		rw->rw_recurse++;
256 		rval = 1;
257 	} else
258 		rval = atomic_cmpset_acq_ptr(&rw->rw_lock, RW_UNLOCKED,
259 		    (uintptr_t)curthread);
260 
261 	LOCK_LOG_TRY("WLOCK", &rw->lock_object, 0, rval, file, line);
262 	if (rval) {
263 		WITNESS_LOCK(&rw->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
264 		    file, line);
265 		curthread->td_locks++;
266 	}
267 	return (rval);
268 }
269 
270 void
271 _rw_wunlock(struct rwlock *rw, const char *file, int line)
272 {
273 
274 	MPASS(curthread != NULL);
275 	KASSERT(rw->rw_lock != RW_DESTROYED,
276 	    ("rw_wunlock() of destroyed rwlock @ %s:%d", file, line));
277 	_rw_assert(rw, RA_WLOCKED, file, line);
278 	curthread->td_locks--;
279 	WITNESS_UNLOCK(&rw->lock_object, LOP_EXCLUSIVE, file, line);
280 	LOCK_LOG_LOCK("WUNLOCK", &rw->lock_object, 0, rw->rw_recurse, file,
281 	    line);
282 	if (!rw_recursed(rw))
283 		LOCKSTAT_PROFILE_RELEASE_LOCK(LS_RW_WUNLOCK_RELEASE, rw);
284 	__rw_wunlock(rw, curthread, file, line);
285 }
286 /*
287  * Determines whether a new reader can acquire a lock.  Succeeds if the
288  * reader already owns a read lock and the lock is locked for read to
289  * prevent deadlock from reader recursion.  Also succeeds if the lock
290  * is unlocked and has no writer waiters or spinners.  Failing otherwise
291  * prioritizes writers before readers.
292  */
293 #define	RW_CAN_READ(_rw)						\
294     ((curthread->td_rw_rlocks && (_rw) & RW_LOCK_READ) || ((_rw) &	\
295     (RW_LOCK_READ | RW_LOCK_WRITE_WAITERS | RW_LOCK_WRITE_SPINNER)) ==	\
296     RW_LOCK_READ)
297 
298 void
299 _rw_rlock(struct rwlock *rw, const char *file, int line)
300 {
301 	struct turnstile *ts;
302 #ifdef ADAPTIVE_RWLOCKS
303 	volatile struct thread *owner;
304 	int spintries = 0;
305 	int i;
306 #endif
307 #ifdef LOCK_PROFILING
308 	uint64_t waittime = 0;
309 	int contested = 0;
310 #endif
311 	uintptr_t v;
312 #ifdef KDTRACE_HOOKS
313 	uint64_t spin_cnt = 0;
314 	uint64_t sleep_cnt = 0;
315 	int64_t sleep_time = 0;
316 #endif
317 
318 	KASSERT(rw->rw_lock != RW_DESTROYED,
319 	    ("rw_rlock() of destroyed rwlock @ %s:%d", file, line));
320 	KASSERT(rw_wowner(rw) != curthread,
321 	    ("%s (%s): wlock already held @ %s:%d", __func__,
322 	    rw->lock_object.lo_name, file, line));
323 	WITNESS_CHECKORDER(&rw->lock_object, LOP_NEWORDER, file, line, NULL);
324 
325 	for (;;) {
326 #ifdef KDTRACE_HOOKS
327 		spin_cnt++;
328 #endif
329 		/*
330 		 * Handle the easy case.  If no other thread has a write
331 		 * lock, then try to bump up the count of read locks.  Note
332 		 * that we have to preserve the current state of the
333 		 * RW_LOCK_WRITE_WAITERS flag.  If we fail to acquire a
334 		 * read lock, then rw_lock must have changed, so restart
335 		 * the loop.  Note that this handles the case of a
336 		 * completely unlocked rwlock since such a lock is encoded
337 		 * as a read lock with no waiters.
338 		 */
339 		v = rw->rw_lock;
340 		if (RW_CAN_READ(v)) {
341 			/*
342 			 * The RW_LOCK_READ_WAITERS flag should only be set
343 			 * if the lock has been unlocked and write waiters
344 			 * were present.
345 			 */
346 			if (atomic_cmpset_acq_ptr(&rw->rw_lock, v,
347 			    v + RW_ONE_READER)) {
348 				if (LOCK_LOG_TEST(&rw->lock_object, 0))
349 					CTR4(KTR_LOCK,
350 					    "%s: %p succeed %p -> %p", __func__,
351 					    rw, (void *)v,
352 					    (void *)(v + RW_ONE_READER));
353 				break;
354 			}
355 			continue;
356 		}
357 		lock_profile_obtain_lock_failed(&rw->lock_object,
358 		    &contested, &waittime);
359 
360 #ifdef ADAPTIVE_RWLOCKS
361 		/*
362 		 * If the owner is running on another CPU, spin until
363 		 * the owner stops running or the state of the lock
364 		 * changes.
365 		 */
366 		if ((v & RW_LOCK_READ) == 0) {
367 			owner = (struct thread *)RW_OWNER(v);
368 			if (TD_IS_RUNNING(owner)) {
369 				if (LOCK_LOG_TEST(&rw->lock_object, 0))
370 					CTR3(KTR_LOCK,
371 					    "%s: spinning on %p held by %p",
372 					    __func__, rw, owner);
373 				while ((struct thread*)RW_OWNER(rw->rw_lock) ==
374 				    owner && TD_IS_RUNNING(owner)) {
375 					cpu_spinwait();
376 #ifdef KDTRACE_HOOKS
377 					spin_cnt++;
378 #endif
379 				}
380 				continue;
381 			}
382 		} else if (spintries < rowner_retries) {
383 			spintries++;
384 			for (i = 0; i < rowner_loops; i++) {
385 				v = rw->rw_lock;
386 				if ((v & RW_LOCK_READ) == 0 || RW_CAN_READ(v))
387 					break;
388 				cpu_spinwait();
389 			}
390 			if (i != rowner_loops)
391 				continue;
392 		}
393 #endif
394 
395 		/*
396 		 * Okay, now it's the hard case.  Some other thread already
397 		 * has a write lock or there are write waiters present,
398 		 * acquire the turnstile lock so we can begin the process
399 		 * of blocking.
400 		 */
401 		ts = turnstile_trywait(&rw->lock_object);
402 
403 		/*
404 		 * The lock might have been released while we spun, so
405 		 * recheck its state and restart the loop if needed.
406 		 */
407 		v = rw->rw_lock;
408 		if (RW_CAN_READ(v)) {
409 			turnstile_cancel(ts);
410 			continue;
411 		}
412 
413 #ifdef ADAPTIVE_RWLOCKS
414 		/*
415 		 * The current lock owner might have started executing
416 		 * on another CPU (or the lock could have changed
417 		 * owners) while we were waiting on the turnstile
418 		 * chain lock.  If so, drop the turnstile lock and try
419 		 * again.
420 		 */
421 		if ((v & RW_LOCK_READ) == 0) {
422 			owner = (struct thread *)RW_OWNER(v);
423 			if (TD_IS_RUNNING(owner)) {
424 				turnstile_cancel(ts);
425 				continue;
426 			}
427 		}
428 #endif
429 
430 		/*
431 		 * The lock is held in write mode or it already has waiters.
432 		 */
433 		MPASS(!RW_CAN_READ(v));
434 
435 		/*
436 		 * If the RW_LOCK_READ_WAITERS flag is already set, then
437 		 * we can go ahead and block.  If it is not set then try
438 		 * to set it.  If we fail to set it drop the turnstile
439 		 * lock and restart the loop.
440 		 */
441 		if (!(v & RW_LOCK_READ_WAITERS)) {
442 			if (!atomic_cmpset_ptr(&rw->rw_lock, v,
443 			    v | RW_LOCK_READ_WAITERS)) {
444 				turnstile_cancel(ts);
445 				continue;
446 			}
447 			if (LOCK_LOG_TEST(&rw->lock_object, 0))
448 				CTR2(KTR_LOCK, "%s: %p set read waiters flag",
449 				    __func__, rw);
450 		}
451 
452 		/*
453 		 * We were unable to acquire the lock and the read waiters
454 		 * flag is set, so we must block on the turnstile.
455 		 */
456 		if (LOCK_LOG_TEST(&rw->lock_object, 0))
457 			CTR2(KTR_LOCK, "%s: %p blocking on turnstile", __func__,
458 			    rw);
459 #ifdef KDTRACE_HOOKS
460 		sleep_time -= lockstat_nsecs();
461 #endif
462 		turnstile_wait(ts, rw_owner(rw), TS_SHARED_QUEUE);
463 #ifdef KDTRACE_HOOKS
464 		sleep_time += lockstat_nsecs();
465 		sleep_cnt++;
466 #endif
467 		if (LOCK_LOG_TEST(&rw->lock_object, 0))
468 			CTR2(KTR_LOCK, "%s: %p resuming from turnstile",
469 			    __func__, rw);
470 	}
471 
472 	/*
473 	 * TODO: acquire "owner of record" here.  Here be turnstile dragons
474 	 * however.  turnstiles don't like owners changing between calls to
475 	 * turnstile_wait() currently.
476 	 */
477 	LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_RW_RLOCK_ACQUIRE, rw, contested,
478 	    waittime, file, line);
479 	LOCK_LOG_LOCK("RLOCK", &rw->lock_object, 0, 0, file, line);
480 	WITNESS_LOCK(&rw->lock_object, 0, file, line);
481 	curthread->td_locks++;
482 	curthread->td_rw_rlocks++;
483 #ifdef KDTRACE_HOOKS
484 	if (sleep_time)
485 		LOCKSTAT_RECORD1(LS_RW_RLOCK_BLOCK, rw, sleep_time);
486 
487 	/*
488 	 * Record only the loops spinning and not sleeping.
489 	 */
490 	if (spin_cnt > sleep_cnt)
491 		LOCKSTAT_RECORD1(LS_RW_RLOCK_SPIN, rw, (spin_cnt - sleep_cnt));
492 #endif
493 }
494 
495 int
496 _rw_try_rlock(struct rwlock *rw, const char *file, int line)
497 {
498 	uintptr_t x;
499 
500 	for (;;) {
501 		x = rw->rw_lock;
502 		KASSERT(rw->rw_lock != RW_DESTROYED,
503 		    ("rw_try_rlock() of destroyed rwlock @ %s:%d", file, line));
504 		if (!(x & RW_LOCK_READ))
505 			break;
506 		if (atomic_cmpset_acq_ptr(&rw->rw_lock, x, x + RW_ONE_READER)) {
507 			LOCK_LOG_TRY("RLOCK", &rw->lock_object, 0, 1, file,
508 			    line);
509 			WITNESS_LOCK(&rw->lock_object, LOP_TRYLOCK, file, line);
510 			curthread->td_locks++;
511 			curthread->td_rw_rlocks++;
512 			return (1);
513 		}
514 	}
515 
516 	LOCK_LOG_TRY("RLOCK", &rw->lock_object, 0, 0, file, line);
517 	return (0);
518 }
519 
520 void
521 _rw_runlock(struct rwlock *rw, const char *file, int line)
522 {
523 	struct turnstile *ts;
524 	uintptr_t x, v, queue;
525 
526 	KASSERT(rw->rw_lock != RW_DESTROYED,
527 	    ("rw_runlock() of destroyed rwlock @ %s:%d", file, line));
528 	_rw_assert(rw, RA_RLOCKED, file, line);
529 	curthread->td_locks--;
530 	curthread->td_rw_rlocks--;
531 	WITNESS_UNLOCK(&rw->lock_object, 0, file, line);
532 	LOCK_LOG_LOCK("RUNLOCK", &rw->lock_object, 0, 0, file, line);
533 
534 	/* TODO: drop "owner of record" here. */
535 
536 	for (;;) {
537 		/*
538 		 * See if there is more than one read lock held.  If so,
539 		 * just drop one and return.
540 		 */
541 		x = rw->rw_lock;
542 		if (RW_READERS(x) > 1) {
543 			if (atomic_cmpset_ptr(&rw->rw_lock, x,
544 			    x - RW_ONE_READER)) {
545 				if (LOCK_LOG_TEST(&rw->lock_object, 0))
546 					CTR4(KTR_LOCK,
547 					    "%s: %p succeeded %p -> %p",
548 					    __func__, rw, (void *)x,
549 					    (void *)(x - RW_ONE_READER));
550 				break;
551 			}
552 			continue;
553 		}
554 		/*
555 		 * If there aren't any waiters for a write lock, then try
556 		 * to drop it quickly.
557 		 */
558 		if (!(x & RW_LOCK_WAITERS)) {
559 			MPASS((x & ~RW_LOCK_WRITE_SPINNER) ==
560 			    RW_READERS_LOCK(1));
561 			if (atomic_cmpset_ptr(&rw->rw_lock, x, RW_UNLOCKED)) {
562 				if (LOCK_LOG_TEST(&rw->lock_object, 0))
563 					CTR2(KTR_LOCK, "%s: %p last succeeded",
564 					    __func__, rw);
565 				break;
566 			}
567 			continue;
568 		}
569 		/*
570 		 * Ok, we know we have waiters and we think we are the
571 		 * last reader, so grab the turnstile lock.
572 		 */
573 		turnstile_chain_lock(&rw->lock_object);
574 		v = rw->rw_lock & (RW_LOCK_WAITERS | RW_LOCK_WRITE_SPINNER);
575 		MPASS(v & RW_LOCK_WAITERS);
576 
577 		/*
578 		 * Try to drop our lock leaving the lock in a unlocked
579 		 * state.
580 		 *
581 		 * If you wanted to do explicit lock handoff you'd have to
582 		 * do it here.  You'd also want to use turnstile_signal()
583 		 * and you'd have to handle the race where a higher
584 		 * priority thread blocks on the write lock before the
585 		 * thread you wakeup actually runs and have the new thread
586 		 * "steal" the lock.  For now it's a lot simpler to just
587 		 * wakeup all of the waiters.
588 		 *
589 		 * As above, if we fail, then another thread might have
590 		 * acquired a read lock, so drop the turnstile lock and
591 		 * restart.
592 		 */
593 		x = RW_UNLOCKED;
594 		if (v & RW_LOCK_WRITE_WAITERS) {
595 			queue = TS_EXCLUSIVE_QUEUE;
596 			x |= (v & RW_LOCK_READ_WAITERS);
597 		} else
598 			queue = TS_SHARED_QUEUE;
599 		if (!atomic_cmpset_ptr(&rw->rw_lock, RW_READERS_LOCK(1) | v,
600 		    x)) {
601 			turnstile_chain_unlock(&rw->lock_object);
602 			continue;
603 		}
604 		if (LOCK_LOG_TEST(&rw->lock_object, 0))
605 			CTR2(KTR_LOCK, "%s: %p last succeeded with waiters",
606 			    __func__, rw);
607 
608 		/*
609 		 * Ok.  The lock is released and all that's left is to
610 		 * wake up the waiters.  Note that the lock might not be
611 		 * free anymore, but in that case the writers will just
612 		 * block again if they run before the new lock holder(s)
613 		 * release the lock.
614 		 */
615 		ts = turnstile_lookup(&rw->lock_object);
616 		MPASS(ts != NULL);
617 		turnstile_broadcast(ts, queue);
618 		turnstile_unpend(ts, TS_SHARED_LOCK);
619 		turnstile_chain_unlock(&rw->lock_object);
620 		break;
621 	}
622 	LOCKSTAT_PROFILE_RELEASE_LOCK(LS_RW_RUNLOCK_RELEASE, rw);
623 }
624 
625 /*
626  * This function is called when we are unable to obtain a write lock on the
627  * first try.  This means that at least one other thread holds either a
628  * read or write lock.
629  */
630 void
631 _rw_wlock_hard(struct rwlock *rw, uintptr_t tid, const char *file, int line)
632 {
633 	struct turnstile *ts;
634 #ifdef ADAPTIVE_RWLOCKS
635 	volatile struct thread *owner;
636 	int spintries = 0;
637 	int i;
638 #endif
639 	uintptr_t v, x;
640 #ifdef LOCK_PROFILING
641 	uint64_t waittime = 0;
642 	int contested = 0;
643 #endif
644 #ifdef KDTRACE_HOOKS
645 	uint64_t spin_cnt = 0;
646 	uint64_t sleep_cnt = 0;
647 	int64_t sleep_time = 0;
648 #endif
649 
650 	if (rw_wlocked(rw)) {
651 		KASSERT(rw->lock_object.lo_flags & LO_RECURSABLE,
652 		    ("%s: recursing but non-recursive rw %s @ %s:%d\n",
653 		    __func__, rw->lock_object.lo_name, file, line));
654 		rw->rw_recurse++;
655 		if (LOCK_LOG_TEST(&rw->lock_object, 0))
656 			CTR2(KTR_LOCK, "%s: %p recursing", __func__, rw);
657 		return;
658 	}
659 
660 	if (LOCK_LOG_TEST(&rw->lock_object, 0))
661 		CTR5(KTR_LOCK, "%s: %s contested (lock=%p) at %s:%d", __func__,
662 		    rw->lock_object.lo_name, (void *)rw->rw_lock, file, line);
663 
664 	while (!_rw_write_lock(rw, tid)) {
665 #ifdef KDTRACE_HOOKS
666 		spin_cnt++;
667 #endif
668 		lock_profile_obtain_lock_failed(&rw->lock_object,
669 		    &contested, &waittime);
670 #ifdef ADAPTIVE_RWLOCKS
671 		/*
672 		 * If the lock is write locked and the owner is
673 		 * running on another CPU, spin until the owner stops
674 		 * running or the state of the lock changes.
675 		 */
676 		v = rw->rw_lock;
677 		owner = (struct thread *)RW_OWNER(v);
678 		if (!(v & RW_LOCK_READ) && TD_IS_RUNNING(owner)) {
679 			if (LOCK_LOG_TEST(&rw->lock_object, 0))
680 				CTR3(KTR_LOCK, "%s: spinning on %p held by %p",
681 				    __func__, rw, owner);
682 			while ((struct thread*)RW_OWNER(rw->rw_lock) == owner &&
683 			    TD_IS_RUNNING(owner)) {
684 				cpu_spinwait();
685 #ifdef KDTRACE_HOOKS
686 				spin_cnt++;
687 #endif
688 			}
689 			continue;
690 		}
691 		if ((v & RW_LOCK_READ) && RW_READERS(v) &&
692 		    spintries < rowner_retries) {
693 			if (!(v & RW_LOCK_WRITE_SPINNER)) {
694 				if (!atomic_cmpset_ptr(&rw->rw_lock, v,
695 				    v | RW_LOCK_WRITE_SPINNER)) {
696 					continue;
697 				}
698 			}
699 			spintries++;
700 			for (i = 0; i < rowner_loops; i++) {
701 				if ((rw->rw_lock & RW_LOCK_WRITE_SPINNER) == 0)
702 					break;
703 				cpu_spinwait();
704 			}
705 #ifdef KDTRACE_HOOKS
706 			spin_cnt += rowner_loops - i;
707 #endif
708 			if (i != rowner_loops)
709 				continue;
710 		}
711 #endif
712 		ts = turnstile_trywait(&rw->lock_object);
713 		v = rw->rw_lock;
714 
715 #ifdef ADAPTIVE_RWLOCKS
716 		/*
717 		 * The current lock owner might have started executing
718 		 * on another CPU (or the lock could have changed
719 		 * owners) while we were waiting on the turnstile
720 		 * chain lock.  If so, drop the turnstile lock and try
721 		 * again.
722 		 */
723 		if (!(v & RW_LOCK_READ)) {
724 			owner = (struct thread *)RW_OWNER(v);
725 			if (TD_IS_RUNNING(owner)) {
726 				turnstile_cancel(ts);
727 				continue;
728 			}
729 		}
730 #endif
731 		/*
732 		 * Check for the waiters flags about this rwlock.
733 		 * If the lock was released, without maintain any pending
734 		 * waiters queue, simply try to acquire it.
735 		 * If a pending waiters queue is present, claim the lock
736 		 * ownership and maintain the pending queue.
737 		 */
738 		x = v & (RW_LOCK_WAITERS | RW_LOCK_WRITE_SPINNER);
739 		if ((v & ~x) == RW_UNLOCKED) {
740 			x &= ~RW_LOCK_WRITE_SPINNER;
741 			if (atomic_cmpset_acq_ptr(&rw->rw_lock, v, tid | x)) {
742 				if (x)
743 					turnstile_claim(ts);
744 				else
745 					turnstile_cancel(ts);
746 				break;
747 			}
748 			turnstile_cancel(ts);
749 			continue;
750 		}
751 		/*
752 		 * If the RW_LOCK_WRITE_WAITERS flag isn't set, then try to
753 		 * set it.  If we fail to set it, then loop back and try
754 		 * again.
755 		 */
756 		if (!(v & RW_LOCK_WRITE_WAITERS)) {
757 			if (!atomic_cmpset_ptr(&rw->rw_lock, v,
758 			    v | RW_LOCK_WRITE_WAITERS)) {
759 				turnstile_cancel(ts);
760 				continue;
761 			}
762 			if (LOCK_LOG_TEST(&rw->lock_object, 0))
763 				CTR2(KTR_LOCK, "%s: %p set write waiters flag",
764 				    __func__, rw);
765 		}
766 		/*
767 		 * We were unable to acquire the lock and the write waiters
768 		 * flag is set, so we must block on the turnstile.
769 		 */
770 		if (LOCK_LOG_TEST(&rw->lock_object, 0))
771 			CTR2(KTR_LOCK, "%s: %p blocking on turnstile", __func__,
772 			    rw);
773 #ifdef KDTRACE_HOOKS
774 		sleep_time -= lockstat_nsecs();
775 #endif
776 		turnstile_wait(ts, rw_owner(rw), TS_EXCLUSIVE_QUEUE);
777 #ifdef KDTRACE_HOOKS
778 		sleep_time += lockstat_nsecs();
779 		sleep_cnt++;
780 #endif
781 		if (LOCK_LOG_TEST(&rw->lock_object, 0))
782 			CTR2(KTR_LOCK, "%s: %p resuming from turnstile",
783 			    __func__, rw);
784 #ifdef ADAPTIVE_RWLOCKS
785 		spintries = 0;
786 #endif
787 	}
788 	LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_RW_WLOCK_ACQUIRE, rw, contested,
789 	    waittime, file, line);
790 #ifdef KDTRACE_HOOKS
791 	if (sleep_time)
792 		LOCKSTAT_RECORD1(LS_RW_WLOCK_BLOCK, rw, sleep_time);
793 
794 	/*
795 	 * Record only the loops spinning and not sleeping.
796 	 */
797 	if (spin_cnt > sleep_cnt)
798 		LOCKSTAT_RECORD1(LS_RW_WLOCK_SPIN, rw, (spin_cnt - sleep_cnt));
799 #endif
800 }
801 
802 /*
803  * This function is called if the first try at releasing a write lock failed.
804  * This means that one of the 2 waiter bits must be set indicating that at
805  * least one thread is waiting on this lock.
806  */
807 void
808 _rw_wunlock_hard(struct rwlock *rw, uintptr_t tid, const char *file, int line)
809 {
810 	struct turnstile *ts;
811 	uintptr_t v;
812 	int queue;
813 
814 	if (rw_wlocked(rw) && rw_recursed(rw)) {
815 		rw->rw_recurse--;
816 		if (LOCK_LOG_TEST(&rw->lock_object, 0))
817 			CTR2(KTR_LOCK, "%s: %p unrecursing", __func__, rw);
818 		return;
819 	}
820 
821 	KASSERT(rw->rw_lock & (RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS),
822 	    ("%s: neither of the waiter flags are set", __func__));
823 
824 	if (LOCK_LOG_TEST(&rw->lock_object, 0))
825 		CTR2(KTR_LOCK, "%s: %p contested", __func__, rw);
826 
827 	turnstile_chain_lock(&rw->lock_object);
828 	ts = turnstile_lookup(&rw->lock_object);
829 	MPASS(ts != NULL);
830 
831 	/*
832 	 * Use the same algo as sx locks for now.  Prefer waking up shared
833 	 * waiters if we have any over writers.  This is probably not ideal.
834 	 *
835 	 * 'v' is the value we are going to write back to rw_lock.  If we
836 	 * have waiters on both queues, we need to preserve the state of
837 	 * the waiter flag for the queue we don't wake up.  For now this is
838 	 * hardcoded for the algorithm mentioned above.
839 	 *
840 	 * In the case of both readers and writers waiting we wakeup the
841 	 * readers but leave the RW_LOCK_WRITE_WAITERS flag set.  If a
842 	 * new writer comes in before a reader it will claim the lock up
843 	 * above.  There is probably a potential priority inversion in
844 	 * there that could be worked around either by waking both queues
845 	 * of waiters or doing some complicated lock handoff gymnastics.
846 	 */
847 	v = RW_UNLOCKED;
848 	if (rw->rw_lock & RW_LOCK_WRITE_WAITERS) {
849 		queue = TS_EXCLUSIVE_QUEUE;
850 		v |= (rw->rw_lock & RW_LOCK_READ_WAITERS);
851 	} else
852 		queue = TS_SHARED_QUEUE;
853 
854 	/* Wake up all waiters for the specific queue. */
855 	if (LOCK_LOG_TEST(&rw->lock_object, 0))
856 		CTR3(KTR_LOCK, "%s: %p waking up %s waiters", __func__, rw,
857 		    queue == TS_SHARED_QUEUE ? "read" : "write");
858 	turnstile_broadcast(ts, queue);
859 	atomic_store_rel_ptr(&rw->rw_lock, v);
860 	turnstile_unpend(ts, TS_EXCLUSIVE_LOCK);
861 	turnstile_chain_unlock(&rw->lock_object);
862 }
863 
864 /*
865  * Attempt to do a non-blocking upgrade from a read lock to a write
866  * lock.  This will only succeed if this thread holds a single read
867  * lock.  Returns true if the upgrade succeeded and false otherwise.
868  */
869 int
870 _rw_try_upgrade(struct rwlock *rw, const char *file, int line)
871 {
872 	uintptr_t v, x, tid;
873 	struct turnstile *ts;
874 	int success;
875 
876 	KASSERT(rw->rw_lock != RW_DESTROYED,
877 	    ("rw_try_upgrade() of destroyed rwlock @ %s:%d", file, line));
878 	_rw_assert(rw, RA_RLOCKED, file, line);
879 
880 	/*
881 	 * Attempt to switch from one reader to a writer.  If there
882 	 * are any write waiters, then we will have to lock the
883 	 * turnstile first to prevent races with another writer
884 	 * calling turnstile_wait() before we have claimed this
885 	 * turnstile.  So, do the simple case of no waiters first.
886 	 */
887 	tid = (uintptr_t)curthread;
888 	success = 0;
889 	for (;;) {
890 		v = rw->rw_lock;
891 		if (RW_READERS(v) > 1)
892 			break;
893 		if (!(v & RW_LOCK_WAITERS)) {
894 			success = atomic_cmpset_ptr(&rw->rw_lock, v, tid);
895 			if (!success)
896 				continue;
897 			break;
898 		}
899 
900 		/*
901 		 * Ok, we think we have waiters, so lock the turnstile.
902 		 */
903 		ts = turnstile_trywait(&rw->lock_object);
904 		v = rw->rw_lock;
905 		if (RW_READERS(v) > 1) {
906 			turnstile_cancel(ts);
907 			break;
908 		}
909 		/*
910 		 * Try to switch from one reader to a writer again.  This time
911 		 * we honor the current state of the waiters flags.
912 		 * If we obtain the lock with the flags set, then claim
913 		 * ownership of the turnstile.
914 		 */
915 		x = rw->rw_lock & RW_LOCK_WAITERS;
916 		success = atomic_cmpset_ptr(&rw->rw_lock, v, tid | x);
917 		if (success) {
918 			if (x)
919 				turnstile_claim(ts);
920 			else
921 				turnstile_cancel(ts);
922 			break;
923 		}
924 		turnstile_cancel(ts);
925 	}
926 	LOCK_LOG_TRY("WUPGRADE", &rw->lock_object, 0, success, file, line);
927 	if (success) {
928 		curthread->td_rw_rlocks--;
929 		WITNESS_UPGRADE(&rw->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
930 		    file, line);
931 		LOCKSTAT_RECORD0(LS_RW_TRYUPGRADE_UPGRADE, rw);
932 	}
933 	return (success);
934 }
935 
936 /*
937  * Downgrade a write lock into a single read lock.
938  */
939 void
940 _rw_downgrade(struct rwlock *rw, const char *file, int line)
941 {
942 	struct turnstile *ts;
943 	uintptr_t tid, v;
944 	int rwait, wwait;
945 
946 	KASSERT(rw->rw_lock != RW_DESTROYED,
947 	    ("rw_downgrade() of destroyed rwlock @ %s:%d", file, line));
948 	_rw_assert(rw, RA_WLOCKED | RA_NOTRECURSED, file, line);
949 #ifndef INVARIANTS
950 	if (rw_recursed(rw))
951 		panic("downgrade of a recursed lock");
952 #endif
953 
954 	WITNESS_DOWNGRADE(&rw->lock_object, 0, file, line);
955 
956 	/*
957 	 * Convert from a writer to a single reader.  First we handle
958 	 * the easy case with no waiters.  If there are any waiters, we
959 	 * lock the turnstile and "disown" the lock.
960 	 */
961 	tid = (uintptr_t)curthread;
962 	if (atomic_cmpset_rel_ptr(&rw->rw_lock, tid, RW_READERS_LOCK(1)))
963 		goto out;
964 
965 	/*
966 	 * Ok, we think we have waiters, so lock the turnstile so we can
967 	 * read the waiter flags without any races.
968 	 */
969 	turnstile_chain_lock(&rw->lock_object);
970 	v = rw->rw_lock & RW_LOCK_WAITERS;
971 	rwait = v & RW_LOCK_READ_WAITERS;
972 	wwait = v & RW_LOCK_WRITE_WAITERS;
973 	MPASS(rwait | wwait);
974 
975 	/*
976 	 * Downgrade from a write lock while preserving waiters flag
977 	 * and give up ownership of the turnstile.
978 	 */
979 	ts = turnstile_lookup(&rw->lock_object);
980 	MPASS(ts != NULL);
981 	if (!wwait)
982 		v &= ~RW_LOCK_READ_WAITERS;
983 	atomic_store_rel_ptr(&rw->rw_lock, RW_READERS_LOCK(1) | v);
984 	/*
985 	 * Wake other readers if there are no writers pending.  Otherwise they
986 	 * won't be able to acquire the lock anyway.
987 	 */
988 	if (rwait && !wwait) {
989 		turnstile_broadcast(ts, TS_SHARED_QUEUE);
990 		turnstile_unpend(ts, TS_EXCLUSIVE_LOCK);
991 	} else
992 		turnstile_disown(ts);
993 	turnstile_chain_unlock(&rw->lock_object);
994 out:
995 	curthread->td_rw_rlocks++;
996 	LOCK_LOG_LOCK("WDOWNGRADE", &rw->lock_object, 0, 0, file, line);
997 	LOCKSTAT_RECORD0(LS_RW_DOWNGRADE_DOWNGRADE, rw);
998 }
999 
1000 #ifdef INVARIANT_SUPPORT
1001 #ifndef INVARIANTS
1002 #undef _rw_assert
1003 #endif
1004 
1005 /*
1006  * In the non-WITNESS case, rw_assert() can only detect that at least
1007  * *some* thread owns an rlock, but it cannot guarantee that *this*
1008  * thread owns an rlock.
1009  */
1010 void
1011 _rw_assert(struct rwlock *rw, int what, const char *file, int line)
1012 {
1013 
1014 	if (panicstr != NULL)
1015 		return;
1016 	switch (what) {
1017 	case RA_LOCKED:
1018 	case RA_LOCKED | RA_RECURSED:
1019 	case RA_LOCKED | RA_NOTRECURSED:
1020 	case RA_RLOCKED:
1021 #ifdef WITNESS
1022 		witness_assert(&rw->lock_object, what, file, line);
1023 #else
1024 		/*
1025 		 * If some other thread has a write lock or we have one
1026 		 * and are asserting a read lock, fail.  Also, if no one
1027 		 * has a lock at all, fail.
1028 		 */
1029 		if (rw->rw_lock == RW_UNLOCKED ||
1030 		    (!(rw->rw_lock & RW_LOCK_READ) && (what == RA_RLOCKED ||
1031 		    rw_wowner(rw) != curthread)))
1032 			panic("Lock %s not %slocked @ %s:%d\n",
1033 			    rw->lock_object.lo_name, (what == RA_RLOCKED) ?
1034 			    "read " : "", file, line);
1035 
1036 		if (!(rw->rw_lock & RW_LOCK_READ)) {
1037 			if (rw_recursed(rw)) {
1038 				if (what & RA_NOTRECURSED)
1039 					panic("Lock %s recursed @ %s:%d\n",
1040 					    rw->lock_object.lo_name, file,
1041 					    line);
1042 			} else if (what & RA_RECURSED)
1043 				panic("Lock %s not recursed @ %s:%d\n",
1044 				    rw->lock_object.lo_name, file, line);
1045 		}
1046 #endif
1047 		break;
1048 	case RA_WLOCKED:
1049 	case RA_WLOCKED | RA_RECURSED:
1050 	case RA_WLOCKED | RA_NOTRECURSED:
1051 		if (rw_wowner(rw) != curthread)
1052 			panic("Lock %s not exclusively locked @ %s:%d\n",
1053 			    rw->lock_object.lo_name, file, line);
1054 		if (rw_recursed(rw)) {
1055 			if (what & RA_NOTRECURSED)
1056 				panic("Lock %s recursed @ %s:%d\n",
1057 				    rw->lock_object.lo_name, file, line);
1058 		} else if (what & RA_RECURSED)
1059 			panic("Lock %s not recursed @ %s:%d\n",
1060 			    rw->lock_object.lo_name, file, line);
1061 		break;
1062 	case RA_UNLOCKED:
1063 #ifdef WITNESS
1064 		witness_assert(&rw->lock_object, what, file, line);
1065 #else
1066 		/*
1067 		 * If we hold a write lock fail.  We can't reliably check
1068 		 * to see if we hold a read lock or not.
1069 		 */
1070 		if (rw_wowner(rw) == curthread)
1071 			panic("Lock %s exclusively locked @ %s:%d\n",
1072 			    rw->lock_object.lo_name, file, line);
1073 #endif
1074 		break;
1075 	default:
1076 		panic("Unknown rw lock assertion: %d @ %s:%d", what, file,
1077 		    line);
1078 	}
1079 }
1080 #endif /* INVARIANT_SUPPORT */
1081 
1082 #ifdef DDB
1083 void
1084 db_show_rwlock(struct lock_object *lock)
1085 {
1086 	struct rwlock *rw;
1087 	struct thread *td;
1088 
1089 	rw = (struct rwlock *)lock;
1090 
1091 	db_printf(" state: ");
1092 	if (rw->rw_lock == RW_UNLOCKED)
1093 		db_printf("UNLOCKED\n");
1094 	else if (rw->rw_lock == RW_DESTROYED) {
1095 		db_printf("DESTROYED\n");
1096 		return;
1097 	} else if (rw->rw_lock & RW_LOCK_READ)
1098 		db_printf("RLOCK: %ju locks\n",
1099 		    (uintmax_t)(RW_READERS(rw->rw_lock)));
1100 	else {
1101 		td = rw_wowner(rw);
1102 		db_printf("WLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
1103 		    td->td_tid, td->td_proc->p_pid, td->td_name);
1104 		if (rw_recursed(rw))
1105 			db_printf(" recursed: %u\n", rw->rw_recurse);
1106 	}
1107 	db_printf(" waiters: ");
1108 	switch (rw->rw_lock & (RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS)) {
1109 	case RW_LOCK_READ_WAITERS:
1110 		db_printf("readers\n");
1111 		break;
1112 	case RW_LOCK_WRITE_WAITERS:
1113 		db_printf("writers\n");
1114 		break;
1115 	case RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS:
1116 		db_printf("readers and writers\n");
1117 		break;
1118 	default:
1119 		db_printf("none\n");
1120 		break;
1121 	}
1122 }
1123 
1124 #endif
1125