xref: /freebsd/sys/kern/kern_rwlock.c (revision c0020399a650364d0134f79f3fa319f84064372d)
1 /*-
2  * Copyright (c) 2006 John Baldwin <jhb@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. Neither the name of the author nor the names of any co-contributors
14  *    may be used to endorse or promote products derived from this software
15  *    without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 
30 /*
31  * Machine independent bits of reader/writer lock implementation.
32  */
33 
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36 
37 #include "opt_ddb.h"
38 #include "opt_no_adaptive_rwlocks.h"
39 
40 #include <sys/param.h>
41 #include <sys/ktr.h>
42 #include <sys/kernel.h>
43 #include <sys/lock.h>
44 #include <sys/mutex.h>
45 #include <sys/proc.h>
46 #include <sys/rwlock.h>
47 #include <sys/sysctl.h>
48 #include <sys/systm.h>
49 #include <sys/turnstile.h>
50 
51 #include <machine/cpu.h>
52 
53 CTASSERT((RW_RECURSE & LO_CLASSFLAGS) == RW_RECURSE);
54 
55 #if defined(SMP) && !defined(NO_ADAPTIVE_RWLOCKS)
56 #define	ADAPTIVE_RWLOCKS
57 #endif
58 
59 #ifdef ADAPTIVE_RWLOCKS
60 static int rowner_retries = 10;
61 static int rowner_loops = 10000;
62 SYSCTL_NODE(_debug, OID_AUTO, rwlock, CTLFLAG_RD, NULL, "rwlock debugging");
63 SYSCTL_INT(_debug_rwlock, OID_AUTO, retry, CTLFLAG_RW, &rowner_retries, 0, "");
64 SYSCTL_INT(_debug_rwlock, OID_AUTO, loops, CTLFLAG_RW, &rowner_loops, 0, "");
65 #endif
66 
67 #ifdef DDB
68 #include <ddb/ddb.h>
69 
70 static void	db_show_rwlock(struct lock_object *lock);
71 #endif
72 static void	assert_rw(struct lock_object *lock, int what);
73 static void	lock_rw(struct lock_object *lock, int how);
74 static int	unlock_rw(struct lock_object *lock);
75 
76 struct lock_class lock_class_rw = {
77 	.lc_name = "rw",
78 	.lc_flags = LC_SLEEPLOCK | LC_RECURSABLE | LC_UPGRADABLE,
79 	.lc_assert = assert_rw,
80 #ifdef DDB
81 	.lc_ddb_show = db_show_rwlock,
82 #endif
83 	.lc_lock = lock_rw,
84 	.lc_unlock = unlock_rw,
85 };
86 
87 /*
88  * Return a pointer to the owning thread if the lock is write-locked or
89  * NULL if the lock is unlocked or read-locked.
90  */
91 #define	rw_wowner(rw)							\
92 	((rw)->rw_lock & RW_LOCK_READ ? NULL :				\
93 	    (struct thread *)RW_OWNER((rw)->rw_lock))
94 
95 /*
96  * Returns if a write owner is recursed.  Write ownership is not assured
97  * here and should be previously checked.
98  */
99 #define	rw_recursed(rw)		((rw)->rw_recurse != 0)
100 
101 /*
102  * Return true if curthread helds the lock.
103  */
104 #define	rw_wlocked(rw)		(rw_wowner((rw)) == curthread)
105 
106 /*
107  * Return a pointer to the owning thread for this lock who should receive
108  * any priority lent by threads that block on this lock.  Currently this
109  * is identical to rw_wowner().
110  */
111 #define	rw_owner(rw)		rw_wowner(rw)
112 
113 #ifndef INVARIANTS
114 #define	_rw_assert(rw, what, file, line)
115 #endif
116 
117 void
118 assert_rw(struct lock_object *lock, int what)
119 {
120 
121 	rw_assert((struct rwlock *)lock, what);
122 }
123 
124 void
125 lock_rw(struct lock_object *lock, int how)
126 {
127 	struct rwlock *rw;
128 
129 	rw = (struct rwlock *)lock;
130 	if (how)
131 		rw_wlock(rw);
132 	else
133 		rw_rlock(rw);
134 }
135 
136 int
137 unlock_rw(struct lock_object *lock)
138 {
139 	struct rwlock *rw;
140 
141 	rw = (struct rwlock *)lock;
142 	rw_assert(rw, RA_LOCKED | LA_NOTRECURSED);
143 	if (rw->rw_lock & RW_LOCK_READ) {
144 		rw_runlock(rw);
145 		return (0);
146 	} else {
147 		rw_wunlock(rw);
148 		return (1);
149 	}
150 }
151 
152 void
153 rw_init_flags(struct rwlock *rw, const char *name, int opts)
154 {
155 	int flags;
156 
157 	MPASS((opts & ~(RW_DUPOK | RW_NOPROFILE | RW_NOWITNESS | RW_QUIET |
158 	    RW_RECURSE)) == 0);
159 
160 	flags = LO_UPGRADABLE | LO_RECURSABLE;
161 	if (opts & RW_DUPOK)
162 		flags |= LO_DUPOK;
163 	if (opts & RW_NOPROFILE)
164 		flags |= LO_NOPROFILE;
165 	if (!(opts & RW_NOWITNESS))
166 		flags |= LO_WITNESS;
167 	if (opts & RW_QUIET)
168 		flags |= LO_QUIET;
169 	flags |= opts & RW_RECURSE;
170 
171 	rw->rw_lock = RW_UNLOCKED;
172 	rw->rw_recurse = 0;
173 	lock_init(&rw->lock_object, &lock_class_rw, name, NULL, flags);
174 }
175 
176 void
177 rw_destroy(struct rwlock *rw)
178 {
179 
180 	KASSERT(rw->rw_lock == RW_UNLOCKED, ("rw lock not unlocked"));
181 	KASSERT(rw->rw_recurse == 0, ("rw lock still recursed"));
182 	rw->rw_lock = RW_DESTROYED;
183 	lock_destroy(&rw->lock_object);
184 }
185 
186 void
187 rw_sysinit(void *arg)
188 {
189 	struct rw_args *args = arg;
190 
191 	rw_init(args->ra_rw, args->ra_desc);
192 }
193 
194 void
195 rw_sysinit_flags(void *arg)
196 {
197 	struct rw_args_flags *args = arg;
198 
199 	rw_init_flags(args->ra_rw, args->ra_desc, args->ra_flags);
200 }
201 
202 int
203 rw_wowned(struct rwlock *rw)
204 {
205 
206 	return (rw_wowner(rw) == curthread);
207 }
208 
209 void
210 _rw_wlock(struct rwlock *rw, const char *file, int line)
211 {
212 
213 	MPASS(curthread != NULL);
214 	KASSERT(rw->rw_lock != RW_DESTROYED,
215 	    ("rw_wlock() of destroyed rwlock @ %s:%d", file, line));
216 	WITNESS_CHECKORDER(&rw->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE, file,
217 	    line, NULL);
218 	__rw_wlock(rw, curthread, file, line);
219 	LOCK_LOG_LOCK("WLOCK", &rw->lock_object, 0, rw->rw_recurse, file, line);
220 	WITNESS_LOCK(&rw->lock_object, LOP_EXCLUSIVE, file, line);
221 	curthread->td_locks++;
222 }
223 
224 int
225 _rw_try_wlock(struct rwlock *rw, const char *file, int line)
226 {
227 	int rval;
228 
229 	KASSERT(rw->rw_lock != RW_DESTROYED,
230 	    ("rw_try_wlock() of destroyed rwlock @ %s:%d", file, line));
231 
232 	if (rw_wlocked(rw) && (rw->lock_object.lo_flags & RW_RECURSE) != 0) {
233 		rw->rw_recurse++;
234 		rval = 1;
235 	} else
236 		rval = atomic_cmpset_acq_ptr(&rw->rw_lock, RW_UNLOCKED,
237 		    (uintptr_t)curthread);
238 
239 	LOCK_LOG_TRY("WLOCK", &rw->lock_object, 0, rval, file, line);
240 	if (rval) {
241 		WITNESS_LOCK(&rw->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
242 		    file, line);
243 		curthread->td_locks++;
244 	}
245 	return (rval);
246 }
247 
248 void
249 _rw_wunlock(struct rwlock *rw, const char *file, int line)
250 {
251 
252 	MPASS(curthread != NULL);
253 	KASSERT(rw->rw_lock != RW_DESTROYED,
254 	    ("rw_wunlock() of destroyed rwlock @ %s:%d", file, line));
255 	_rw_assert(rw, RA_WLOCKED, file, line);
256 	curthread->td_locks--;
257 	WITNESS_UNLOCK(&rw->lock_object, LOP_EXCLUSIVE, file, line);
258 	LOCK_LOG_LOCK("WUNLOCK", &rw->lock_object, 0, rw->rw_recurse, file,
259 	    line);
260 	if (!rw_recursed(rw))
261 		lock_profile_release_lock(&rw->lock_object);
262 	__rw_wunlock(rw, curthread, file, line);
263 }
264 /*
265  * Determines whether a new reader can acquire a lock.  Succeeds if the
266  * reader already owns a read lock and the lock is locked for read to
267  * prevent deadlock from reader recursion.  Also succeeds if the lock
268  * is unlocked and has no writer waiters or spinners.  Failing otherwise
269  * prioritizes writers before readers.
270  */
271 #define	RW_CAN_READ(_rw)						\
272     ((curthread->td_rw_rlocks && (_rw) & RW_LOCK_READ) || ((_rw) &	\
273     (RW_LOCK_READ | RW_LOCK_WRITE_WAITERS | RW_LOCK_WRITE_SPINNER)) ==	\
274     RW_LOCK_READ)
275 
276 void
277 _rw_rlock(struct rwlock *rw, const char *file, int line)
278 {
279 	struct turnstile *ts;
280 #ifdef ADAPTIVE_RWLOCKS
281 	volatile struct thread *owner;
282 	int spintries = 0;
283 	int i;
284 #endif
285 #ifdef LOCK_PROFILING
286 	uint64_t waittime = 0;
287 	int contested = 0;
288 #endif
289 	uintptr_t v;
290 
291 	KASSERT(rw->rw_lock != RW_DESTROYED,
292 	    ("rw_rlock() of destroyed rwlock @ %s:%d", file, line));
293 	KASSERT(rw_wowner(rw) != curthread,
294 	    ("%s (%s): wlock already held @ %s:%d", __func__,
295 	    rw->lock_object.lo_name, file, line));
296 	WITNESS_CHECKORDER(&rw->lock_object, LOP_NEWORDER, file, line, NULL);
297 
298 	for (;;) {
299 		/*
300 		 * Handle the easy case.  If no other thread has a write
301 		 * lock, then try to bump up the count of read locks.  Note
302 		 * that we have to preserve the current state of the
303 		 * RW_LOCK_WRITE_WAITERS flag.  If we fail to acquire a
304 		 * read lock, then rw_lock must have changed, so restart
305 		 * the loop.  Note that this handles the case of a
306 		 * completely unlocked rwlock since such a lock is encoded
307 		 * as a read lock with no waiters.
308 		 */
309 		v = rw->rw_lock;
310 		if (RW_CAN_READ(v)) {
311 			/*
312 			 * The RW_LOCK_READ_WAITERS flag should only be set
313 			 * if the lock has been unlocked and write waiters
314 			 * were present.
315 			 */
316 			if (atomic_cmpset_acq_ptr(&rw->rw_lock, v,
317 			    v + RW_ONE_READER)) {
318 				if (LOCK_LOG_TEST(&rw->lock_object, 0))
319 					CTR4(KTR_LOCK,
320 					    "%s: %p succeed %p -> %p", __func__,
321 					    rw, (void *)v,
322 					    (void *)(v + RW_ONE_READER));
323 				break;
324 			}
325 			cpu_spinwait();
326 			continue;
327 		}
328 		lock_profile_obtain_lock_failed(&rw->lock_object,
329 		    &contested, &waittime);
330 
331 #ifdef ADAPTIVE_RWLOCKS
332 		/*
333 		 * If the owner is running on another CPU, spin until
334 		 * the owner stops running or the state of the lock
335 		 * changes.
336 		 */
337 		if ((v & RW_LOCK_READ) == 0) {
338 			owner = (struct thread *)RW_OWNER(v);
339 			if (TD_IS_RUNNING(owner)) {
340 				if (LOCK_LOG_TEST(&rw->lock_object, 0))
341 					CTR3(KTR_LOCK,
342 					    "%s: spinning on %p held by %p",
343 					    __func__, rw, owner);
344 				while ((struct thread*)RW_OWNER(rw->rw_lock) ==
345 				    owner && TD_IS_RUNNING(owner))
346 					cpu_spinwait();
347 				continue;
348 			}
349 		} else if (spintries < rowner_retries) {
350 			spintries++;
351 			for (i = 0; i < rowner_loops; i++) {
352 				v = rw->rw_lock;
353 				if ((v & RW_LOCK_READ) == 0 || RW_CAN_READ(v))
354 					break;
355 				cpu_spinwait();
356 			}
357 			if (i != rowner_loops)
358 				continue;
359 		}
360 #endif
361 
362 		/*
363 		 * Okay, now it's the hard case.  Some other thread already
364 		 * has a write lock or there are write waiters present,
365 		 * acquire the turnstile lock so we can begin the process
366 		 * of blocking.
367 		 */
368 		ts = turnstile_trywait(&rw->lock_object);
369 
370 		/*
371 		 * The lock might have been released while we spun, so
372 		 * recheck its state and restart the loop if needed.
373 		 */
374 		v = rw->rw_lock;
375 		if (RW_CAN_READ(v)) {
376 			turnstile_cancel(ts);
377 			cpu_spinwait();
378 			continue;
379 		}
380 
381 #ifdef ADAPTIVE_RWLOCKS
382 		/*
383 		 * If the current owner of the lock is executing on another
384 		 * CPU quit the hard path and try to spin.
385 		 */
386 		if ((v & RW_LOCK_READ) == 0) {
387 			owner = (struct thread *)RW_OWNER(v);
388 			if (TD_IS_RUNNING(owner)) {
389 				turnstile_cancel(ts);
390 				cpu_spinwait();
391 				continue;
392 			}
393 		}
394 #endif
395 
396 		/*
397 		 * The lock is held in write mode or it already has waiters.
398 		 */
399 		MPASS(!RW_CAN_READ(v));
400 
401 		/*
402 		 * If the RW_LOCK_READ_WAITERS flag is already set, then
403 		 * we can go ahead and block.  If it is not set then try
404 		 * to set it.  If we fail to set it drop the turnstile
405 		 * lock and restart the loop.
406 		 */
407 		if (!(v & RW_LOCK_READ_WAITERS)) {
408 			if (!atomic_cmpset_ptr(&rw->rw_lock, v,
409 			    v | RW_LOCK_READ_WAITERS)) {
410 				turnstile_cancel(ts);
411 				cpu_spinwait();
412 				continue;
413 			}
414 			if (LOCK_LOG_TEST(&rw->lock_object, 0))
415 				CTR2(KTR_LOCK, "%s: %p set read waiters flag",
416 				    __func__, rw);
417 		}
418 
419 		/*
420 		 * We were unable to acquire the lock and the read waiters
421 		 * flag is set, so we must block on the turnstile.
422 		 */
423 		if (LOCK_LOG_TEST(&rw->lock_object, 0))
424 			CTR2(KTR_LOCK, "%s: %p blocking on turnstile", __func__,
425 			    rw);
426 		turnstile_wait(ts, rw_owner(rw), TS_SHARED_QUEUE);
427 		if (LOCK_LOG_TEST(&rw->lock_object, 0))
428 			CTR2(KTR_LOCK, "%s: %p resuming from turnstile",
429 			    __func__, rw);
430 	}
431 
432 	/*
433 	 * TODO: acquire "owner of record" here.  Here be turnstile dragons
434 	 * however.  turnstiles don't like owners changing between calls to
435 	 * turnstile_wait() currently.
436 	 */
437 	lock_profile_obtain_lock_success( &rw->lock_object, contested,
438 	    waittime, file, line);
439 	LOCK_LOG_LOCK("RLOCK", &rw->lock_object, 0, 0, file, line);
440 	WITNESS_LOCK(&rw->lock_object, 0, file, line);
441 	curthread->td_locks++;
442 	curthread->td_rw_rlocks++;
443 }
444 
445 int
446 _rw_try_rlock(struct rwlock *rw, const char *file, int line)
447 {
448 	uintptr_t x;
449 
450 	for (;;) {
451 		x = rw->rw_lock;
452 		KASSERT(rw->rw_lock != RW_DESTROYED,
453 		    ("rw_try_rlock() of destroyed rwlock @ %s:%d", file, line));
454 		if (!(x & RW_LOCK_READ))
455 			break;
456 		if (atomic_cmpset_acq_ptr(&rw->rw_lock, x, x + RW_ONE_READER)) {
457 			LOCK_LOG_TRY("RLOCK", &rw->lock_object, 0, 1, file,
458 			    line);
459 			WITNESS_LOCK(&rw->lock_object, LOP_TRYLOCK, file, line);
460 			curthread->td_locks++;
461 			curthread->td_rw_rlocks++;
462 			return (1);
463 		}
464 	}
465 
466 	LOCK_LOG_TRY("RLOCK", &rw->lock_object, 0, 0, file, line);
467 	return (0);
468 }
469 
470 void
471 _rw_runlock(struct rwlock *rw, const char *file, int line)
472 {
473 	struct turnstile *ts;
474 	uintptr_t x, v, queue;
475 
476 	KASSERT(rw->rw_lock != RW_DESTROYED,
477 	    ("rw_runlock() of destroyed rwlock @ %s:%d", file, line));
478 	_rw_assert(rw, RA_RLOCKED, file, line);
479 	curthread->td_locks--;
480 	curthread->td_rw_rlocks--;
481 	WITNESS_UNLOCK(&rw->lock_object, 0, file, line);
482 	LOCK_LOG_LOCK("RUNLOCK", &rw->lock_object, 0, 0, file, line);
483 
484 	/* TODO: drop "owner of record" here. */
485 
486 	for (;;) {
487 		/*
488 		 * See if there is more than one read lock held.  If so,
489 		 * just drop one and return.
490 		 */
491 		x = rw->rw_lock;
492 		if (RW_READERS(x) > 1) {
493 			if (atomic_cmpset_ptr(&rw->rw_lock, x,
494 			    x - RW_ONE_READER)) {
495 				if (LOCK_LOG_TEST(&rw->lock_object, 0))
496 					CTR4(KTR_LOCK,
497 					    "%s: %p succeeded %p -> %p",
498 					    __func__, rw, (void *)x,
499 					    (void *)(x - RW_ONE_READER));
500 				break;
501 			}
502 			continue;
503 		}
504 		/*
505 		 * If there aren't any waiters for a write lock, then try
506 		 * to drop it quickly.
507 		 */
508 		if (!(x & RW_LOCK_WAITERS)) {
509 			MPASS((x & ~RW_LOCK_WRITE_SPINNER) ==
510 			    RW_READERS_LOCK(1));
511 			if (atomic_cmpset_ptr(&rw->rw_lock, x, RW_UNLOCKED)) {
512 				if (LOCK_LOG_TEST(&rw->lock_object, 0))
513 					CTR2(KTR_LOCK, "%s: %p last succeeded",
514 					    __func__, rw);
515 				break;
516 			}
517 			continue;
518 		}
519 		/*
520 		 * Ok, we know we have waiters and we think we are the
521 		 * last reader, so grab the turnstile lock.
522 		 */
523 		turnstile_chain_lock(&rw->lock_object);
524 		v = rw->rw_lock & (RW_LOCK_WAITERS | RW_LOCK_WRITE_SPINNER);
525 		MPASS(v & RW_LOCK_WAITERS);
526 
527 		/*
528 		 * Try to drop our lock leaving the lock in a unlocked
529 		 * state.
530 		 *
531 		 * If you wanted to do explicit lock handoff you'd have to
532 		 * do it here.  You'd also want to use turnstile_signal()
533 		 * and you'd have to handle the race where a higher
534 		 * priority thread blocks on the write lock before the
535 		 * thread you wakeup actually runs and have the new thread
536 		 * "steal" the lock.  For now it's a lot simpler to just
537 		 * wakeup all of the waiters.
538 		 *
539 		 * As above, if we fail, then another thread might have
540 		 * acquired a read lock, so drop the turnstile lock and
541 		 * restart.
542 		 */
543 		x = RW_UNLOCKED;
544 		if (v & RW_LOCK_WRITE_WAITERS) {
545 			queue = TS_EXCLUSIVE_QUEUE;
546 			x |= (v & RW_LOCK_READ_WAITERS);
547 		} else
548 			queue = TS_SHARED_QUEUE;
549 		if (!atomic_cmpset_ptr(&rw->rw_lock, RW_READERS_LOCK(1) | v,
550 		    x)) {
551 			turnstile_chain_unlock(&rw->lock_object);
552 			continue;
553 		}
554 		if (LOCK_LOG_TEST(&rw->lock_object, 0))
555 			CTR2(KTR_LOCK, "%s: %p last succeeded with waiters",
556 			    __func__, rw);
557 
558 		/*
559 		 * Ok.  The lock is released and all that's left is to
560 		 * wake up the waiters.  Note that the lock might not be
561 		 * free anymore, but in that case the writers will just
562 		 * block again if they run before the new lock holder(s)
563 		 * release the lock.
564 		 */
565 		ts = turnstile_lookup(&rw->lock_object);
566 		MPASS(ts != NULL);
567 		turnstile_broadcast(ts, queue);
568 		turnstile_unpend(ts, TS_SHARED_LOCK);
569 		turnstile_chain_unlock(&rw->lock_object);
570 		break;
571 	}
572 	lock_profile_release_lock(&rw->lock_object);
573 }
574 
575 /*
576  * This function is called when we are unable to obtain a write lock on the
577  * first try.  This means that at least one other thread holds either a
578  * read or write lock.
579  */
580 void
581 _rw_wlock_hard(struct rwlock *rw, uintptr_t tid, const char *file, int line)
582 {
583 	struct turnstile *ts;
584 #ifdef ADAPTIVE_RWLOCKS
585 	volatile struct thread *owner;
586 	int spintries = 0;
587 	int i;
588 #endif
589 	uintptr_t v, x;
590 #ifdef LOCK_PROFILING
591 	uint64_t waittime = 0;
592 	int contested = 0;
593 #endif
594 
595 	if (rw_wlocked(rw)) {
596 		KASSERT(rw->lock_object.lo_flags & RW_RECURSE,
597 		    ("%s: recursing but non-recursive rw %s @ %s:%d\n",
598 		    __func__, rw->lock_object.lo_name, file, line));
599 		rw->rw_recurse++;
600 		if (LOCK_LOG_TEST(&rw->lock_object, 0))
601 			CTR2(KTR_LOCK, "%s: %p recursing", __func__, rw);
602 		return;
603 	}
604 
605 	if (LOCK_LOG_TEST(&rw->lock_object, 0))
606 		CTR5(KTR_LOCK, "%s: %s contested (lock=%p) at %s:%d", __func__,
607 		    rw->lock_object.lo_name, (void *)rw->rw_lock, file, line);
608 
609 	while (!_rw_write_lock(rw, tid)) {
610 		lock_profile_obtain_lock_failed(&rw->lock_object,
611 		    &contested, &waittime);
612 #ifdef ADAPTIVE_RWLOCKS
613 		/*
614 		 * If the lock is write locked and the owner is
615 		 * running on another CPU, spin until the owner stops
616 		 * running or the state of the lock changes.
617 		 */
618 		v = rw->rw_lock;
619 		owner = (struct thread *)RW_OWNER(v);
620 		if (!(v & RW_LOCK_READ) && TD_IS_RUNNING(owner)) {
621 			if (LOCK_LOG_TEST(&rw->lock_object, 0))
622 				CTR3(KTR_LOCK, "%s: spinning on %p held by %p",
623 				    __func__, rw, owner);
624 			while ((struct thread*)RW_OWNER(rw->rw_lock) == owner &&
625 			    TD_IS_RUNNING(owner))
626 				cpu_spinwait();
627 			continue;
628 		}
629 		if ((v & RW_LOCK_READ) && RW_READERS(v) &&
630 		    spintries < rowner_retries) {
631 			if (!(v & RW_LOCK_WRITE_SPINNER)) {
632 				if (!atomic_cmpset_ptr(&rw->rw_lock, v,
633 				    v | RW_LOCK_WRITE_SPINNER)) {
634 					cpu_spinwait();
635 					continue;
636 				}
637 			}
638 			spintries++;
639 			for (i = 0; i < rowner_loops; i++) {
640 				if ((rw->rw_lock & RW_LOCK_WRITE_SPINNER) == 0)
641 					break;
642 				cpu_spinwait();
643 			}
644 			if (i != rowner_loops)
645 				continue;
646 		}
647 #endif
648 		ts = turnstile_trywait(&rw->lock_object);
649 		v = rw->rw_lock;
650 
651 #ifdef ADAPTIVE_RWLOCKS
652 		/*
653 		 * If the current owner of the lock is executing on another
654 		 * CPU quit the hard path and try to spin.
655 		 */
656 		if (!(v & RW_LOCK_READ)) {
657 			owner = (struct thread *)RW_OWNER(v);
658 			if (TD_IS_RUNNING(owner)) {
659 				turnstile_cancel(ts);
660 				cpu_spinwait();
661 				continue;
662 			}
663 		}
664 #endif
665 		/*
666 		 * Check for the waiters flags about this rwlock.
667 		 * If the lock was released, without maintain any pending
668 		 * waiters queue, simply try to acquire it.
669 		 * If a pending waiters queue is present, claim the lock
670 		 * ownership and maintain the pending queue.
671 		 */
672 		x = v & (RW_LOCK_WAITERS | RW_LOCK_WRITE_SPINNER);
673 		if ((v & ~x) == RW_UNLOCKED) {
674 			x &= ~RW_LOCK_WRITE_SPINNER;
675 			if (atomic_cmpset_acq_ptr(&rw->rw_lock, v, tid | x)) {
676 				if (x)
677 					turnstile_claim(ts);
678 				else
679 					turnstile_cancel(ts);
680 				break;
681 			}
682 			turnstile_cancel(ts);
683 			cpu_spinwait();
684 			continue;
685 		}
686 		/*
687 		 * If the RW_LOCK_WRITE_WAITERS flag isn't set, then try to
688 		 * set it.  If we fail to set it, then loop back and try
689 		 * again.
690 		 */
691 		if (!(v & RW_LOCK_WRITE_WAITERS)) {
692 			if (!atomic_cmpset_ptr(&rw->rw_lock, v,
693 			    v | RW_LOCK_WRITE_WAITERS)) {
694 				turnstile_cancel(ts);
695 				cpu_spinwait();
696 				continue;
697 			}
698 			if (LOCK_LOG_TEST(&rw->lock_object, 0))
699 				CTR2(KTR_LOCK, "%s: %p set write waiters flag",
700 				    __func__, rw);
701 		}
702 		/*
703 		 * We were unable to acquire the lock and the write waiters
704 		 * flag is set, so we must block on the turnstile.
705 		 */
706 		if (LOCK_LOG_TEST(&rw->lock_object, 0))
707 			CTR2(KTR_LOCK, "%s: %p blocking on turnstile", __func__,
708 			    rw);
709 		turnstile_wait(ts, rw_owner(rw), TS_EXCLUSIVE_QUEUE);
710 		if (LOCK_LOG_TEST(&rw->lock_object, 0))
711 			CTR2(KTR_LOCK, "%s: %p resuming from turnstile",
712 			    __func__, rw);
713 #ifdef ADAPTIVE_RWLOCKS
714 		spintries = 0;
715 #endif
716 	}
717 	lock_profile_obtain_lock_success(&rw->lock_object, contested, waittime,
718 	    file, line);
719 }
720 
721 /*
722  * This function is called if the first try at releasing a write lock failed.
723  * This means that one of the 2 waiter bits must be set indicating that at
724  * least one thread is waiting on this lock.
725  */
726 void
727 _rw_wunlock_hard(struct rwlock *rw, uintptr_t tid, const char *file, int line)
728 {
729 	struct turnstile *ts;
730 	uintptr_t v;
731 	int queue;
732 
733 	if (rw_wlocked(rw) && rw_recursed(rw)) {
734 		rw->rw_recurse--;
735 		if (LOCK_LOG_TEST(&rw->lock_object, 0))
736 			CTR2(KTR_LOCK, "%s: %p unrecursing", __func__, rw);
737 		return;
738 	}
739 
740 	KASSERT(rw->rw_lock & (RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS),
741 	    ("%s: neither of the waiter flags are set", __func__));
742 
743 	if (LOCK_LOG_TEST(&rw->lock_object, 0))
744 		CTR2(KTR_LOCK, "%s: %p contested", __func__, rw);
745 
746 	turnstile_chain_lock(&rw->lock_object);
747 	ts = turnstile_lookup(&rw->lock_object);
748 	MPASS(ts != NULL);
749 
750 	/*
751 	 * Use the same algo as sx locks for now.  Prefer waking up shared
752 	 * waiters if we have any over writers.  This is probably not ideal.
753 	 *
754 	 * 'v' is the value we are going to write back to rw_lock.  If we
755 	 * have waiters on both queues, we need to preserve the state of
756 	 * the waiter flag for the queue we don't wake up.  For now this is
757 	 * hardcoded for the algorithm mentioned above.
758 	 *
759 	 * In the case of both readers and writers waiting we wakeup the
760 	 * readers but leave the RW_LOCK_WRITE_WAITERS flag set.  If a
761 	 * new writer comes in before a reader it will claim the lock up
762 	 * above.  There is probably a potential priority inversion in
763 	 * there that could be worked around either by waking both queues
764 	 * of waiters or doing some complicated lock handoff gymnastics.
765 	 */
766 	v = RW_UNLOCKED;
767 	if (rw->rw_lock & RW_LOCK_WRITE_WAITERS) {
768 		queue = TS_EXCLUSIVE_QUEUE;
769 		v |= (rw->rw_lock & RW_LOCK_READ_WAITERS);
770 	} else
771 		queue = TS_SHARED_QUEUE;
772 
773 	/* Wake up all waiters for the specific queue. */
774 	if (LOCK_LOG_TEST(&rw->lock_object, 0))
775 		CTR3(KTR_LOCK, "%s: %p waking up %s waiters", __func__, rw,
776 		    queue == TS_SHARED_QUEUE ? "read" : "write");
777 	turnstile_broadcast(ts, queue);
778 	atomic_store_rel_ptr(&rw->rw_lock, v);
779 	turnstile_unpend(ts, TS_EXCLUSIVE_LOCK);
780 	turnstile_chain_unlock(&rw->lock_object);
781 }
782 
783 /*
784  * Attempt to do a non-blocking upgrade from a read lock to a write
785  * lock.  This will only succeed if this thread holds a single read
786  * lock.  Returns true if the upgrade succeeded and false otherwise.
787  */
788 int
789 _rw_try_upgrade(struct rwlock *rw, const char *file, int line)
790 {
791 	uintptr_t v, x, tid;
792 	struct turnstile *ts;
793 	int success;
794 
795 	KASSERT(rw->rw_lock != RW_DESTROYED,
796 	    ("rw_try_upgrade() of destroyed rwlock @ %s:%d", file, line));
797 	_rw_assert(rw, RA_RLOCKED, file, line);
798 
799 	/*
800 	 * Attempt to switch from one reader to a writer.  If there
801 	 * are any write waiters, then we will have to lock the
802 	 * turnstile first to prevent races with another writer
803 	 * calling turnstile_wait() before we have claimed this
804 	 * turnstile.  So, do the simple case of no waiters first.
805 	 */
806 	tid = (uintptr_t)curthread;
807 	success = 0;
808 	for (;;) {
809 		v = rw->rw_lock;
810 		if (RW_READERS(v) > 1)
811 			break;
812 		if (!(v & RW_LOCK_WAITERS)) {
813 			success = atomic_cmpset_ptr(&rw->rw_lock, v, tid);
814 			if (!success)
815 				continue;
816 			break;
817 		}
818 
819 		/*
820 		 * Ok, we think we have waiters, so lock the turnstile.
821 		 */
822 		ts = turnstile_trywait(&rw->lock_object);
823 		v = rw->rw_lock;
824 		if (RW_READERS(v) > 1) {
825 			turnstile_cancel(ts);
826 			break;
827 		}
828 		/*
829 		 * Try to switch from one reader to a writer again.  This time
830 		 * we honor the current state of the waiters flags.
831 		 * If we obtain the lock with the flags set, then claim
832 		 * ownership of the turnstile.
833 		 */
834 		x = rw->rw_lock & RW_LOCK_WAITERS;
835 		success = atomic_cmpset_ptr(&rw->rw_lock, v, tid | x);
836 		if (success) {
837 			if (x)
838 				turnstile_claim(ts);
839 			else
840 				turnstile_cancel(ts);
841 			break;
842 		}
843 		turnstile_cancel(ts);
844 	}
845 	LOCK_LOG_TRY("WUPGRADE", &rw->lock_object, 0, success, file, line);
846 	if (success) {
847 		curthread->td_rw_rlocks--;
848 		WITNESS_UPGRADE(&rw->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
849 		    file, line);
850 	}
851 	return (success);
852 }
853 
854 /*
855  * Downgrade a write lock into a single read lock.
856  */
857 void
858 _rw_downgrade(struct rwlock *rw, const char *file, int line)
859 {
860 	struct turnstile *ts;
861 	uintptr_t tid, v;
862 	int rwait, wwait;
863 
864 	KASSERT(rw->rw_lock != RW_DESTROYED,
865 	    ("rw_downgrade() of destroyed rwlock @ %s:%d", file, line));
866 	_rw_assert(rw, RA_WLOCKED | RA_NOTRECURSED, file, line);
867 #ifndef INVARIANTS
868 	if (rw_recursed(rw))
869 		panic("downgrade of a recursed lock");
870 #endif
871 
872 	WITNESS_DOWNGRADE(&rw->lock_object, 0, file, line);
873 
874 	/*
875 	 * Convert from a writer to a single reader.  First we handle
876 	 * the easy case with no waiters.  If there are any waiters, we
877 	 * lock the turnstile and "disown" the lock.
878 	 */
879 	tid = (uintptr_t)curthread;
880 	if (atomic_cmpset_rel_ptr(&rw->rw_lock, tid, RW_READERS_LOCK(1)))
881 		goto out;
882 
883 	/*
884 	 * Ok, we think we have waiters, so lock the turnstile so we can
885 	 * read the waiter flags without any races.
886 	 */
887 	turnstile_chain_lock(&rw->lock_object);
888 	v = rw->rw_lock & RW_LOCK_WAITERS;
889 	rwait = v & RW_LOCK_READ_WAITERS;
890 	wwait = v & RW_LOCK_WRITE_WAITERS;
891 	MPASS(rwait | wwait);
892 
893 	/*
894 	 * Downgrade from a write lock while preserving waiters flag
895 	 * and give up ownership of the turnstile.
896 	 */
897 	ts = turnstile_lookup(&rw->lock_object);
898 	MPASS(ts != NULL);
899 	if (!wwait)
900 		v &= ~RW_LOCK_READ_WAITERS;
901 	atomic_store_rel_ptr(&rw->rw_lock, RW_READERS_LOCK(1) | v);
902 	/*
903 	 * Wake other readers if there are no writers pending.  Otherwise they
904 	 * won't be able to acquire the lock anyway.
905 	 */
906 	if (rwait && !wwait) {
907 		turnstile_broadcast(ts, TS_SHARED_QUEUE);
908 		turnstile_unpend(ts, TS_EXCLUSIVE_LOCK);
909 	} else
910 		turnstile_disown(ts);
911 	turnstile_chain_unlock(&rw->lock_object);
912 out:
913 	curthread->td_rw_rlocks++;
914 	LOCK_LOG_LOCK("WDOWNGRADE", &rw->lock_object, 0, 0, file, line);
915 }
916 
917 #ifdef INVARIANT_SUPPORT
918 #ifndef INVARIANTS
919 #undef _rw_assert
920 #endif
921 
922 /*
923  * In the non-WITNESS case, rw_assert() can only detect that at least
924  * *some* thread owns an rlock, but it cannot guarantee that *this*
925  * thread owns an rlock.
926  */
927 void
928 _rw_assert(struct rwlock *rw, int what, const char *file, int line)
929 {
930 
931 	if (panicstr != NULL)
932 		return;
933 	switch (what) {
934 	case RA_LOCKED:
935 	case RA_LOCKED | RA_RECURSED:
936 	case RA_LOCKED | RA_NOTRECURSED:
937 	case RA_RLOCKED:
938 #ifdef WITNESS
939 		witness_assert(&rw->lock_object, what, file, line);
940 #else
941 		/*
942 		 * If some other thread has a write lock or we have one
943 		 * and are asserting a read lock, fail.  Also, if no one
944 		 * has a lock at all, fail.
945 		 */
946 		if (rw->rw_lock == RW_UNLOCKED ||
947 		    (!(rw->rw_lock & RW_LOCK_READ) && (what == RA_RLOCKED ||
948 		    rw_wowner(rw) != curthread)))
949 			panic("Lock %s not %slocked @ %s:%d\n",
950 			    rw->lock_object.lo_name, (what == RA_RLOCKED) ?
951 			    "read " : "", file, line);
952 
953 		if (!(rw->rw_lock & RW_LOCK_READ)) {
954 			if (rw_recursed(rw)) {
955 				if (what & RA_NOTRECURSED)
956 					panic("Lock %s recursed @ %s:%d\n",
957 					    rw->lock_object.lo_name, file,
958 					    line);
959 			} else if (what & RA_RECURSED)
960 				panic("Lock %s not recursed @ %s:%d\n",
961 				    rw->lock_object.lo_name, file, line);
962 		}
963 #endif
964 		break;
965 	case RA_WLOCKED:
966 	case RA_WLOCKED | RA_RECURSED:
967 	case RA_WLOCKED | RA_NOTRECURSED:
968 		if (rw_wowner(rw) != curthread)
969 			panic("Lock %s not exclusively locked @ %s:%d\n",
970 			    rw->lock_object.lo_name, file, line);
971 		if (rw_recursed(rw)) {
972 			if (what & RA_NOTRECURSED)
973 				panic("Lock %s recursed @ %s:%d\n",
974 				    rw->lock_object.lo_name, file, line);
975 		} else if (what & RA_RECURSED)
976 			panic("Lock %s not recursed @ %s:%d\n",
977 			    rw->lock_object.lo_name, file, line);
978 		break;
979 	case RA_UNLOCKED:
980 #ifdef WITNESS
981 		witness_assert(&rw->lock_object, what, file, line);
982 #else
983 		/*
984 		 * If we hold a write lock fail.  We can't reliably check
985 		 * to see if we hold a read lock or not.
986 		 */
987 		if (rw_wowner(rw) == curthread)
988 			panic("Lock %s exclusively locked @ %s:%d\n",
989 			    rw->lock_object.lo_name, file, line);
990 #endif
991 		break;
992 	default:
993 		panic("Unknown rw lock assertion: %d @ %s:%d", what, file,
994 		    line);
995 	}
996 }
997 #endif /* INVARIANT_SUPPORT */
998 
999 #ifdef DDB
1000 void
1001 db_show_rwlock(struct lock_object *lock)
1002 {
1003 	struct rwlock *rw;
1004 	struct thread *td;
1005 
1006 	rw = (struct rwlock *)lock;
1007 
1008 	db_printf(" state: ");
1009 	if (rw->rw_lock == RW_UNLOCKED)
1010 		db_printf("UNLOCKED\n");
1011 	else if (rw->rw_lock == RW_DESTROYED) {
1012 		db_printf("DESTROYED\n");
1013 		return;
1014 	} else if (rw->rw_lock & RW_LOCK_READ)
1015 		db_printf("RLOCK: %ju locks\n",
1016 		    (uintmax_t)(RW_READERS(rw->rw_lock)));
1017 	else {
1018 		td = rw_wowner(rw);
1019 		db_printf("WLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
1020 		    td->td_tid, td->td_proc->p_pid, td->td_name);
1021 		if (rw_recursed(rw))
1022 			db_printf(" recursed: %u\n", rw->rw_recurse);
1023 	}
1024 	db_printf(" waiters: ");
1025 	switch (rw->rw_lock & (RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS)) {
1026 	case RW_LOCK_READ_WAITERS:
1027 		db_printf("readers\n");
1028 		break;
1029 	case RW_LOCK_WRITE_WAITERS:
1030 		db_printf("writers\n");
1031 		break;
1032 	case RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS:
1033 		db_printf("readers and writers\n");
1034 		break;
1035 	default:
1036 		db_printf("none\n");
1037 		break;
1038 	}
1039 }
1040 
1041 #endif
1042