xref: /freebsd/sys/kern/kern_rwlock.c (revision 35ae9291c2621d66ac66ed4a4996761946ac3e2d)
1 /*-
2  * Copyright (c) 2006 John Baldwin <jhb@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. Neither the name of the author nor the names of any co-contributors
14  *    may be used to endorse or promote products derived from this software
15  *    without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 
30 /*
31  * Machine independent bits of reader/writer lock implementation.
32  */
33 
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36 
37 #include "opt_ddb.h"
38 #include "opt_kdtrace.h"
39 #include "opt_no_adaptive_rwlocks.h"
40 
41 #include <sys/param.h>
42 #include <sys/ktr.h>
43 #include <sys/kernel.h>
44 #include <sys/lock.h>
45 #include <sys/mutex.h>
46 #include <sys/proc.h>
47 #include <sys/rwlock.h>
48 #include <sys/sysctl.h>
49 #include <sys/systm.h>
50 #include <sys/turnstile.h>
51 
52 #include <machine/cpu.h>
53 
54 #if defined(SMP) && !defined(NO_ADAPTIVE_RWLOCKS)
55 #define	ADAPTIVE_RWLOCKS
56 #endif
57 
58 #ifdef ADAPTIVE_RWLOCKS
59 static int rowner_retries = 10;
60 static int rowner_loops = 10000;
61 SYSCTL_NODE(_debug, OID_AUTO, rwlock, CTLFLAG_RD, NULL, "rwlock debugging");
62 SYSCTL_INT(_debug_rwlock, OID_AUTO, retry, CTLFLAG_RW, &rowner_retries, 0, "");
63 SYSCTL_INT(_debug_rwlock, OID_AUTO, loops, CTLFLAG_RW, &rowner_loops, 0, "");
64 #endif
65 
66 #ifdef DDB
67 #include <ddb/ddb.h>
68 
69 static void	db_show_rwlock(struct lock_object *lock);
70 #endif
71 static void	assert_rw(struct lock_object *lock, int what);
72 static void	lock_rw(struct lock_object *lock, int how);
73 #ifdef KDTRACE_HOOKS
74 static int	owner_rw(struct lock_object *lock, struct thread **owner);
75 #endif
76 static int	unlock_rw(struct lock_object *lock);
77 
78 struct lock_class lock_class_rw = {
79 	.lc_name = "rw",
80 	.lc_flags = LC_SLEEPLOCK | LC_RECURSABLE | LC_UPGRADABLE,
81 	.lc_assert = assert_rw,
82 #ifdef DDB
83 	.lc_ddb_show = db_show_rwlock,
84 #endif
85 	.lc_lock = lock_rw,
86 	.lc_unlock = unlock_rw,
87 #ifdef KDTRACE_HOOKS
88 	.lc_owner = owner_rw,
89 #endif
90 };
91 
92 /*
93  * Return a pointer to the owning thread if the lock is write-locked or
94  * NULL if the lock is unlocked or read-locked.
95  */
96 #define	rw_wowner(rw)							\
97 	((rw)->rw_lock & RW_LOCK_READ ? NULL :				\
98 	    (struct thread *)RW_OWNER((rw)->rw_lock))
99 
100 /*
101  * Returns if a write owner is recursed.  Write ownership is not assured
102  * here and should be previously checked.
103  */
104 #define	rw_recursed(rw)		((rw)->rw_recurse != 0)
105 
106 /*
107  * Return true if curthread helds the lock.
108  */
109 #define	rw_wlocked(rw)		(rw_wowner((rw)) == curthread)
110 
111 /*
112  * Return a pointer to the owning thread for this lock who should receive
113  * any priority lent by threads that block on this lock.  Currently this
114  * is identical to rw_wowner().
115  */
116 #define	rw_owner(rw)		rw_wowner(rw)
117 
118 #ifndef INVARIANTS
119 #define	_rw_assert(rw, what, file, line)
120 #endif
121 
122 void
123 assert_rw(struct lock_object *lock, int what)
124 {
125 
126 	rw_assert((struct rwlock *)lock, what);
127 }
128 
129 void
130 lock_rw(struct lock_object *lock, int how)
131 {
132 	struct rwlock *rw;
133 
134 	rw = (struct rwlock *)lock;
135 	if (how)
136 		rw_wlock(rw);
137 	else
138 		rw_rlock(rw);
139 }
140 
141 int
142 unlock_rw(struct lock_object *lock)
143 {
144 	struct rwlock *rw;
145 
146 	rw = (struct rwlock *)lock;
147 	rw_assert(rw, RA_LOCKED | LA_NOTRECURSED);
148 	if (rw->rw_lock & RW_LOCK_READ) {
149 		rw_runlock(rw);
150 		return (0);
151 	} else {
152 		rw_wunlock(rw);
153 		return (1);
154 	}
155 }
156 
157 #ifdef KDTRACE_HOOKS
158 int
159 owner_rw(struct lock_object *lock, struct thread **owner)
160 {
161 	struct rwlock *rw = (struct rwlock *)lock;
162 	uintptr_t x = rw->rw_lock;
163 
164 	*owner = rw_wowner(rw);
165 	return ((x & RW_LOCK_READ) != 0 ?  (RW_READERS(x) != 0) :
166 	    (*owner != NULL));
167 }
168 #endif
169 
170 void
171 rw_init_flags(struct rwlock *rw, const char *name, int opts)
172 {
173 	int flags;
174 
175 	MPASS((opts & ~(RW_DUPOK | RW_NOPROFILE | RW_NOWITNESS | RW_QUIET |
176 	    RW_RECURSE)) == 0);
177 	ASSERT_ATOMIC_LOAD_PTR(rw->rw_lock,
178 	    ("%s: rw_lock not aligned for %s: %p", __func__, name,
179 	    &rw->rw_lock));
180 
181 	flags = LO_UPGRADABLE;
182 	if (opts & RW_DUPOK)
183 		flags |= LO_DUPOK;
184 	if (opts & RW_NOPROFILE)
185 		flags |= LO_NOPROFILE;
186 	if (!(opts & RW_NOWITNESS))
187 		flags |= LO_WITNESS;
188 	if (opts & RW_RECURSE)
189 		flags |= LO_RECURSABLE;
190 	if (opts & RW_QUIET)
191 		flags |= LO_QUIET;
192 
193 	rw->rw_lock = RW_UNLOCKED;
194 	rw->rw_recurse = 0;
195 	lock_init(&rw->lock_object, &lock_class_rw, name, NULL, flags);
196 }
197 
198 void
199 rw_destroy(struct rwlock *rw)
200 {
201 
202 	KASSERT(rw->rw_lock == RW_UNLOCKED, ("rw lock not unlocked"));
203 	KASSERT(rw->rw_recurse == 0, ("rw lock still recursed"));
204 	rw->rw_lock = RW_DESTROYED;
205 	lock_destroy(&rw->lock_object);
206 }
207 
208 void
209 rw_sysinit(void *arg)
210 {
211 	struct rw_args *args = arg;
212 
213 	rw_init(args->ra_rw, args->ra_desc);
214 }
215 
216 void
217 rw_sysinit_flags(void *arg)
218 {
219 	struct rw_args_flags *args = arg;
220 
221 	rw_init_flags(args->ra_rw, args->ra_desc, args->ra_flags);
222 }
223 
224 int
225 rw_wowned(struct rwlock *rw)
226 {
227 
228 	return (rw_wowner(rw) == curthread);
229 }
230 
231 void
232 _rw_wlock(struct rwlock *rw, const char *file, int line)
233 {
234 
235 	MPASS(curthread != NULL);
236 	KASSERT(rw->rw_lock != RW_DESTROYED,
237 	    ("rw_wlock() of destroyed rwlock @ %s:%d", file, line));
238 	WITNESS_CHECKORDER(&rw->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE, file,
239 	    line, NULL);
240 	__rw_wlock(rw, curthread, file, line);
241 	LOCK_LOG_LOCK("WLOCK", &rw->lock_object, 0, rw->rw_recurse, file, line);
242 	WITNESS_LOCK(&rw->lock_object, LOP_EXCLUSIVE, file, line);
243 	curthread->td_locks++;
244 }
245 
246 int
247 _rw_try_wlock(struct rwlock *rw, const char *file, int line)
248 {
249 	int rval;
250 
251 	KASSERT(rw->rw_lock != RW_DESTROYED,
252 	    ("rw_try_wlock() of destroyed rwlock @ %s:%d", file, line));
253 
254 	if (rw_wlocked(rw) &&
255 	    (rw->lock_object.lo_flags & LO_RECURSABLE) != 0) {
256 		rw->rw_recurse++;
257 		rval = 1;
258 	} else
259 		rval = atomic_cmpset_acq_ptr(&rw->rw_lock, RW_UNLOCKED,
260 		    (uintptr_t)curthread);
261 
262 	LOCK_LOG_TRY("WLOCK", &rw->lock_object, 0, rval, file, line);
263 	if (rval) {
264 		WITNESS_LOCK(&rw->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
265 		    file, line);
266 		curthread->td_locks++;
267 	}
268 	return (rval);
269 }
270 
271 void
272 _rw_wunlock(struct rwlock *rw, const char *file, int line)
273 {
274 
275 	MPASS(curthread != NULL);
276 	KASSERT(rw->rw_lock != RW_DESTROYED,
277 	    ("rw_wunlock() of destroyed rwlock @ %s:%d", file, line));
278 	_rw_assert(rw, RA_WLOCKED, file, line);
279 	curthread->td_locks--;
280 	WITNESS_UNLOCK(&rw->lock_object, LOP_EXCLUSIVE, file, line);
281 	LOCK_LOG_LOCK("WUNLOCK", &rw->lock_object, 0, rw->rw_recurse, file,
282 	    line);
283 	if (!rw_recursed(rw))
284 		LOCKSTAT_PROFILE_RELEASE_LOCK(LS_RW_WUNLOCK_RELEASE, rw);
285 	__rw_wunlock(rw, curthread, file, line);
286 }
287 /*
288  * Determines whether a new reader can acquire a lock.  Succeeds if the
289  * reader already owns a read lock and the lock is locked for read to
290  * prevent deadlock from reader recursion.  Also succeeds if the lock
291  * is unlocked and has no writer waiters or spinners.  Failing otherwise
292  * prioritizes writers before readers.
293  */
294 #define	RW_CAN_READ(_rw)						\
295     ((curthread->td_rw_rlocks && (_rw) & RW_LOCK_READ) || ((_rw) &	\
296     (RW_LOCK_READ | RW_LOCK_WRITE_WAITERS | RW_LOCK_WRITE_SPINNER)) ==	\
297     RW_LOCK_READ)
298 
299 void
300 _rw_rlock(struct rwlock *rw, const char *file, int line)
301 {
302 	struct turnstile *ts;
303 #ifdef ADAPTIVE_RWLOCKS
304 	volatile struct thread *owner;
305 	int spintries = 0;
306 	int i;
307 #endif
308 #ifdef LOCK_PROFILING
309 	uint64_t waittime = 0;
310 	int contested = 0;
311 #endif
312 	uintptr_t v;
313 #ifdef KDTRACE_HOOKS
314 	uint64_t spin_cnt = 0;
315 	uint64_t sleep_cnt = 0;
316 	int64_t sleep_time = 0;
317 #endif
318 
319 	KASSERT(rw->rw_lock != RW_DESTROYED,
320 	    ("rw_rlock() of destroyed rwlock @ %s:%d", file, line));
321 	KASSERT(rw_wowner(rw) != curthread,
322 	    ("%s (%s): wlock already held @ %s:%d", __func__,
323 	    rw->lock_object.lo_name, file, line));
324 	WITNESS_CHECKORDER(&rw->lock_object, LOP_NEWORDER, file, line, NULL);
325 
326 	for (;;) {
327 #ifdef KDTRACE_HOOKS
328 		spin_cnt++;
329 #endif
330 		/*
331 		 * Handle the easy case.  If no other thread has a write
332 		 * lock, then try to bump up the count of read locks.  Note
333 		 * that we have to preserve the current state of the
334 		 * RW_LOCK_WRITE_WAITERS flag.  If we fail to acquire a
335 		 * read lock, then rw_lock must have changed, so restart
336 		 * the loop.  Note that this handles the case of a
337 		 * completely unlocked rwlock since such a lock is encoded
338 		 * as a read lock with no waiters.
339 		 */
340 		v = rw->rw_lock;
341 		if (RW_CAN_READ(v)) {
342 			/*
343 			 * The RW_LOCK_READ_WAITERS flag should only be set
344 			 * if the lock has been unlocked and write waiters
345 			 * were present.
346 			 */
347 			if (atomic_cmpset_acq_ptr(&rw->rw_lock, v,
348 			    v + RW_ONE_READER)) {
349 				if (LOCK_LOG_TEST(&rw->lock_object, 0))
350 					CTR4(KTR_LOCK,
351 					    "%s: %p succeed %p -> %p", __func__,
352 					    rw, (void *)v,
353 					    (void *)(v + RW_ONE_READER));
354 				break;
355 			}
356 			continue;
357 		}
358 		lock_profile_obtain_lock_failed(&rw->lock_object,
359 		    &contested, &waittime);
360 
361 #ifdef ADAPTIVE_RWLOCKS
362 		/*
363 		 * If the owner is running on another CPU, spin until
364 		 * the owner stops running or the state of the lock
365 		 * changes.
366 		 */
367 		if ((v & RW_LOCK_READ) == 0) {
368 			owner = (struct thread *)RW_OWNER(v);
369 			if (TD_IS_RUNNING(owner)) {
370 				if (LOCK_LOG_TEST(&rw->lock_object, 0))
371 					CTR3(KTR_LOCK,
372 					    "%s: spinning on %p held by %p",
373 					    __func__, rw, owner);
374 				while ((struct thread*)RW_OWNER(rw->rw_lock) ==
375 				    owner && TD_IS_RUNNING(owner)) {
376 					cpu_spinwait();
377 #ifdef KDTRACE_HOOKS
378 					spin_cnt++;
379 #endif
380 				}
381 				continue;
382 			}
383 		} else if (spintries < rowner_retries) {
384 			spintries++;
385 			for (i = 0; i < rowner_loops; i++) {
386 				v = rw->rw_lock;
387 				if ((v & RW_LOCK_READ) == 0 || RW_CAN_READ(v))
388 					break;
389 				cpu_spinwait();
390 			}
391 			if (i != rowner_loops)
392 				continue;
393 		}
394 #endif
395 
396 		/*
397 		 * Okay, now it's the hard case.  Some other thread already
398 		 * has a write lock or there are write waiters present,
399 		 * acquire the turnstile lock so we can begin the process
400 		 * of blocking.
401 		 */
402 		ts = turnstile_trywait(&rw->lock_object);
403 
404 		/*
405 		 * The lock might have been released while we spun, so
406 		 * recheck its state and restart the loop if needed.
407 		 */
408 		v = rw->rw_lock;
409 		if (RW_CAN_READ(v)) {
410 			turnstile_cancel(ts);
411 			continue;
412 		}
413 
414 #ifdef ADAPTIVE_RWLOCKS
415 		/*
416 		 * The current lock owner might have started executing
417 		 * on another CPU (or the lock could have changed
418 		 * owners) while we were waiting on the turnstile
419 		 * chain lock.  If so, drop the turnstile lock and try
420 		 * again.
421 		 */
422 		if ((v & RW_LOCK_READ) == 0) {
423 			owner = (struct thread *)RW_OWNER(v);
424 			if (TD_IS_RUNNING(owner)) {
425 				turnstile_cancel(ts);
426 				continue;
427 			}
428 		}
429 #endif
430 
431 		/*
432 		 * The lock is held in write mode or it already has waiters.
433 		 */
434 		MPASS(!RW_CAN_READ(v));
435 
436 		/*
437 		 * If the RW_LOCK_READ_WAITERS flag is already set, then
438 		 * we can go ahead and block.  If it is not set then try
439 		 * to set it.  If we fail to set it drop the turnstile
440 		 * lock and restart the loop.
441 		 */
442 		if (!(v & RW_LOCK_READ_WAITERS)) {
443 			if (!atomic_cmpset_ptr(&rw->rw_lock, v,
444 			    v | RW_LOCK_READ_WAITERS)) {
445 				turnstile_cancel(ts);
446 				continue;
447 			}
448 			if (LOCK_LOG_TEST(&rw->lock_object, 0))
449 				CTR2(KTR_LOCK, "%s: %p set read waiters flag",
450 				    __func__, rw);
451 		}
452 
453 		/*
454 		 * We were unable to acquire the lock and the read waiters
455 		 * flag is set, so we must block on the turnstile.
456 		 */
457 		if (LOCK_LOG_TEST(&rw->lock_object, 0))
458 			CTR2(KTR_LOCK, "%s: %p blocking on turnstile", __func__,
459 			    rw);
460 #ifdef KDTRACE_HOOKS
461 		sleep_time -= lockstat_nsecs();
462 #endif
463 		turnstile_wait(ts, rw_owner(rw), TS_SHARED_QUEUE);
464 #ifdef KDTRACE_HOOKS
465 		sleep_time += lockstat_nsecs();
466 		sleep_cnt++;
467 #endif
468 		if (LOCK_LOG_TEST(&rw->lock_object, 0))
469 			CTR2(KTR_LOCK, "%s: %p resuming from turnstile",
470 			    __func__, rw);
471 	}
472 
473 	/*
474 	 * TODO: acquire "owner of record" here.  Here be turnstile dragons
475 	 * however.  turnstiles don't like owners changing between calls to
476 	 * turnstile_wait() currently.
477 	 */
478 	LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_RW_RLOCK_ACQUIRE, rw, contested,
479 	    waittime, file, line);
480 	LOCK_LOG_LOCK("RLOCK", &rw->lock_object, 0, 0, file, line);
481 	WITNESS_LOCK(&rw->lock_object, 0, file, line);
482 	curthread->td_locks++;
483 	curthread->td_rw_rlocks++;
484 #ifdef KDTRACE_HOOKS
485 	if (sleep_time)
486 		LOCKSTAT_RECORD1(LS_RW_RLOCK_BLOCK, rw, sleep_time);
487 
488 	/*
489 	 * Record only the loops spinning and not sleeping.
490 	 */
491 	if (spin_cnt > sleep_cnt)
492 		LOCKSTAT_RECORD1(LS_RW_RLOCK_SPIN, rw, (spin_cnt - sleep_cnt));
493 #endif
494 }
495 
496 int
497 _rw_try_rlock(struct rwlock *rw, const char *file, int line)
498 {
499 	uintptr_t x;
500 
501 	for (;;) {
502 		x = rw->rw_lock;
503 		KASSERT(rw->rw_lock != RW_DESTROYED,
504 		    ("rw_try_rlock() of destroyed rwlock @ %s:%d", file, line));
505 		if (!(x & RW_LOCK_READ))
506 			break;
507 		if (atomic_cmpset_acq_ptr(&rw->rw_lock, x, x + RW_ONE_READER)) {
508 			LOCK_LOG_TRY("RLOCK", &rw->lock_object, 0, 1, file,
509 			    line);
510 			WITNESS_LOCK(&rw->lock_object, LOP_TRYLOCK, file, line);
511 			curthread->td_locks++;
512 			curthread->td_rw_rlocks++;
513 			return (1);
514 		}
515 	}
516 
517 	LOCK_LOG_TRY("RLOCK", &rw->lock_object, 0, 0, file, line);
518 	return (0);
519 }
520 
521 void
522 _rw_runlock(struct rwlock *rw, const char *file, int line)
523 {
524 	struct turnstile *ts;
525 	uintptr_t x, v, queue;
526 
527 	KASSERT(rw->rw_lock != RW_DESTROYED,
528 	    ("rw_runlock() of destroyed rwlock @ %s:%d", file, line));
529 	_rw_assert(rw, RA_RLOCKED, file, line);
530 	curthread->td_locks--;
531 	curthread->td_rw_rlocks--;
532 	WITNESS_UNLOCK(&rw->lock_object, 0, file, line);
533 	LOCK_LOG_LOCK("RUNLOCK", &rw->lock_object, 0, 0, file, line);
534 
535 	/* TODO: drop "owner of record" here. */
536 
537 	for (;;) {
538 		/*
539 		 * See if there is more than one read lock held.  If so,
540 		 * just drop one and return.
541 		 */
542 		x = rw->rw_lock;
543 		if (RW_READERS(x) > 1) {
544 			if (atomic_cmpset_ptr(&rw->rw_lock, x,
545 			    x - RW_ONE_READER)) {
546 				if (LOCK_LOG_TEST(&rw->lock_object, 0))
547 					CTR4(KTR_LOCK,
548 					    "%s: %p succeeded %p -> %p",
549 					    __func__, rw, (void *)x,
550 					    (void *)(x - RW_ONE_READER));
551 				break;
552 			}
553 			continue;
554 		}
555 		/*
556 		 * If there aren't any waiters for a write lock, then try
557 		 * to drop it quickly.
558 		 */
559 		if (!(x & RW_LOCK_WAITERS)) {
560 			MPASS((x & ~RW_LOCK_WRITE_SPINNER) ==
561 			    RW_READERS_LOCK(1));
562 			if (atomic_cmpset_ptr(&rw->rw_lock, x, RW_UNLOCKED)) {
563 				if (LOCK_LOG_TEST(&rw->lock_object, 0))
564 					CTR2(KTR_LOCK, "%s: %p last succeeded",
565 					    __func__, rw);
566 				break;
567 			}
568 			continue;
569 		}
570 		/*
571 		 * Ok, we know we have waiters and we think we are the
572 		 * last reader, so grab the turnstile lock.
573 		 */
574 		turnstile_chain_lock(&rw->lock_object);
575 		v = rw->rw_lock & (RW_LOCK_WAITERS | RW_LOCK_WRITE_SPINNER);
576 		MPASS(v & RW_LOCK_WAITERS);
577 
578 		/*
579 		 * Try to drop our lock leaving the lock in a unlocked
580 		 * state.
581 		 *
582 		 * If you wanted to do explicit lock handoff you'd have to
583 		 * do it here.  You'd also want to use turnstile_signal()
584 		 * and you'd have to handle the race where a higher
585 		 * priority thread blocks on the write lock before the
586 		 * thread you wakeup actually runs and have the new thread
587 		 * "steal" the lock.  For now it's a lot simpler to just
588 		 * wakeup all of the waiters.
589 		 *
590 		 * As above, if we fail, then another thread might have
591 		 * acquired a read lock, so drop the turnstile lock and
592 		 * restart.
593 		 */
594 		x = RW_UNLOCKED;
595 		if (v & RW_LOCK_WRITE_WAITERS) {
596 			queue = TS_EXCLUSIVE_QUEUE;
597 			x |= (v & RW_LOCK_READ_WAITERS);
598 		} else
599 			queue = TS_SHARED_QUEUE;
600 		if (!atomic_cmpset_ptr(&rw->rw_lock, RW_READERS_LOCK(1) | v,
601 		    x)) {
602 			turnstile_chain_unlock(&rw->lock_object);
603 			continue;
604 		}
605 		if (LOCK_LOG_TEST(&rw->lock_object, 0))
606 			CTR2(KTR_LOCK, "%s: %p last succeeded with waiters",
607 			    __func__, rw);
608 
609 		/*
610 		 * Ok.  The lock is released and all that's left is to
611 		 * wake up the waiters.  Note that the lock might not be
612 		 * free anymore, but in that case the writers will just
613 		 * block again if they run before the new lock holder(s)
614 		 * release the lock.
615 		 */
616 		ts = turnstile_lookup(&rw->lock_object);
617 		MPASS(ts != NULL);
618 		turnstile_broadcast(ts, queue);
619 		turnstile_unpend(ts, TS_SHARED_LOCK);
620 		turnstile_chain_unlock(&rw->lock_object);
621 		break;
622 	}
623 	LOCKSTAT_PROFILE_RELEASE_LOCK(LS_RW_RUNLOCK_RELEASE, rw);
624 }
625 
626 /*
627  * This function is called when we are unable to obtain a write lock on the
628  * first try.  This means that at least one other thread holds either a
629  * read or write lock.
630  */
631 void
632 _rw_wlock_hard(struct rwlock *rw, uintptr_t tid, const char *file, int line)
633 {
634 	struct turnstile *ts;
635 #ifdef ADAPTIVE_RWLOCKS
636 	volatile struct thread *owner;
637 	int spintries = 0;
638 	int i;
639 #endif
640 	uintptr_t v, x;
641 #ifdef LOCK_PROFILING
642 	uint64_t waittime = 0;
643 	int contested = 0;
644 #endif
645 #ifdef KDTRACE_HOOKS
646 	uint64_t spin_cnt = 0;
647 	uint64_t sleep_cnt = 0;
648 	int64_t sleep_time = 0;
649 #endif
650 
651 	if (rw_wlocked(rw)) {
652 		KASSERT(rw->lock_object.lo_flags & LO_RECURSABLE,
653 		    ("%s: recursing but non-recursive rw %s @ %s:%d\n",
654 		    __func__, rw->lock_object.lo_name, file, line));
655 		rw->rw_recurse++;
656 		if (LOCK_LOG_TEST(&rw->lock_object, 0))
657 			CTR2(KTR_LOCK, "%s: %p recursing", __func__, rw);
658 		return;
659 	}
660 
661 	if (LOCK_LOG_TEST(&rw->lock_object, 0))
662 		CTR5(KTR_LOCK, "%s: %s contested (lock=%p) at %s:%d", __func__,
663 		    rw->lock_object.lo_name, (void *)rw->rw_lock, file, line);
664 
665 	while (!_rw_write_lock(rw, tid)) {
666 #ifdef KDTRACE_HOOKS
667 		spin_cnt++;
668 #endif
669 		lock_profile_obtain_lock_failed(&rw->lock_object,
670 		    &contested, &waittime);
671 #ifdef ADAPTIVE_RWLOCKS
672 		/*
673 		 * If the lock is write locked and the owner is
674 		 * running on another CPU, spin until the owner stops
675 		 * running or the state of the lock changes.
676 		 */
677 		v = rw->rw_lock;
678 		owner = (struct thread *)RW_OWNER(v);
679 		if (!(v & RW_LOCK_READ) && TD_IS_RUNNING(owner)) {
680 			if (LOCK_LOG_TEST(&rw->lock_object, 0))
681 				CTR3(KTR_LOCK, "%s: spinning on %p held by %p",
682 				    __func__, rw, owner);
683 			while ((struct thread*)RW_OWNER(rw->rw_lock) == owner &&
684 			    TD_IS_RUNNING(owner)) {
685 				cpu_spinwait();
686 #ifdef KDTRACE_HOOKS
687 				spin_cnt++;
688 #endif
689 			}
690 			continue;
691 		}
692 		if ((v & RW_LOCK_READ) && RW_READERS(v) &&
693 		    spintries < rowner_retries) {
694 			if (!(v & RW_LOCK_WRITE_SPINNER)) {
695 				if (!atomic_cmpset_ptr(&rw->rw_lock, v,
696 				    v | RW_LOCK_WRITE_SPINNER)) {
697 					continue;
698 				}
699 			}
700 			spintries++;
701 			for (i = 0; i < rowner_loops; i++) {
702 				if ((rw->rw_lock & RW_LOCK_WRITE_SPINNER) == 0)
703 					break;
704 				cpu_spinwait();
705 			}
706 #ifdef KDTRACE_HOOKS
707 			spin_cnt += rowner_loops - i;
708 #endif
709 			if (i != rowner_loops)
710 				continue;
711 		}
712 #endif
713 		ts = turnstile_trywait(&rw->lock_object);
714 		v = rw->rw_lock;
715 
716 #ifdef ADAPTIVE_RWLOCKS
717 		/*
718 		 * The current lock owner might have started executing
719 		 * on another CPU (or the lock could have changed
720 		 * owners) while we were waiting on the turnstile
721 		 * chain lock.  If so, drop the turnstile lock and try
722 		 * again.
723 		 */
724 		if (!(v & RW_LOCK_READ)) {
725 			owner = (struct thread *)RW_OWNER(v);
726 			if (TD_IS_RUNNING(owner)) {
727 				turnstile_cancel(ts);
728 				continue;
729 			}
730 		}
731 #endif
732 		/*
733 		 * Check for the waiters flags about this rwlock.
734 		 * If the lock was released, without maintain any pending
735 		 * waiters queue, simply try to acquire it.
736 		 * If a pending waiters queue is present, claim the lock
737 		 * ownership and maintain the pending queue.
738 		 */
739 		x = v & (RW_LOCK_WAITERS | RW_LOCK_WRITE_SPINNER);
740 		if ((v & ~x) == RW_UNLOCKED) {
741 			x &= ~RW_LOCK_WRITE_SPINNER;
742 			if (atomic_cmpset_acq_ptr(&rw->rw_lock, v, tid | x)) {
743 				if (x)
744 					turnstile_claim(ts);
745 				else
746 					turnstile_cancel(ts);
747 				break;
748 			}
749 			turnstile_cancel(ts);
750 			continue;
751 		}
752 		/*
753 		 * If the RW_LOCK_WRITE_WAITERS flag isn't set, then try to
754 		 * set it.  If we fail to set it, then loop back and try
755 		 * again.
756 		 */
757 		if (!(v & RW_LOCK_WRITE_WAITERS)) {
758 			if (!atomic_cmpset_ptr(&rw->rw_lock, v,
759 			    v | RW_LOCK_WRITE_WAITERS)) {
760 				turnstile_cancel(ts);
761 				continue;
762 			}
763 			if (LOCK_LOG_TEST(&rw->lock_object, 0))
764 				CTR2(KTR_LOCK, "%s: %p set write waiters flag",
765 				    __func__, rw);
766 		}
767 		/*
768 		 * We were unable to acquire the lock and the write waiters
769 		 * flag is set, so we must block on the turnstile.
770 		 */
771 		if (LOCK_LOG_TEST(&rw->lock_object, 0))
772 			CTR2(KTR_LOCK, "%s: %p blocking on turnstile", __func__,
773 			    rw);
774 #ifdef KDTRACE_HOOKS
775 		sleep_time -= lockstat_nsecs();
776 #endif
777 		turnstile_wait(ts, rw_owner(rw), TS_EXCLUSIVE_QUEUE);
778 #ifdef KDTRACE_HOOKS
779 		sleep_time += lockstat_nsecs();
780 		sleep_cnt++;
781 #endif
782 		if (LOCK_LOG_TEST(&rw->lock_object, 0))
783 			CTR2(KTR_LOCK, "%s: %p resuming from turnstile",
784 			    __func__, rw);
785 #ifdef ADAPTIVE_RWLOCKS
786 		spintries = 0;
787 #endif
788 	}
789 	LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_RW_WLOCK_ACQUIRE, rw, contested,
790 	    waittime, file, line);
791 #ifdef KDTRACE_HOOKS
792 	if (sleep_time)
793 		LOCKSTAT_RECORD1(LS_RW_WLOCK_BLOCK, rw, sleep_time);
794 
795 	/*
796 	 * Record only the loops spinning and not sleeping.
797 	 */
798 	if (spin_cnt > sleep_cnt)
799 		LOCKSTAT_RECORD1(LS_RW_WLOCK_SPIN, rw, (spin_cnt - sleep_cnt));
800 #endif
801 }
802 
803 /*
804  * This function is called if the first try at releasing a write lock failed.
805  * This means that one of the 2 waiter bits must be set indicating that at
806  * least one thread is waiting on this lock.
807  */
808 void
809 _rw_wunlock_hard(struct rwlock *rw, uintptr_t tid, const char *file, int line)
810 {
811 	struct turnstile *ts;
812 	uintptr_t v;
813 	int queue;
814 
815 	if (rw_wlocked(rw) && rw_recursed(rw)) {
816 		rw->rw_recurse--;
817 		if (LOCK_LOG_TEST(&rw->lock_object, 0))
818 			CTR2(KTR_LOCK, "%s: %p unrecursing", __func__, rw);
819 		return;
820 	}
821 
822 	KASSERT(rw->rw_lock & (RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS),
823 	    ("%s: neither of the waiter flags are set", __func__));
824 
825 	if (LOCK_LOG_TEST(&rw->lock_object, 0))
826 		CTR2(KTR_LOCK, "%s: %p contested", __func__, rw);
827 
828 	turnstile_chain_lock(&rw->lock_object);
829 	ts = turnstile_lookup(&rw->lock_object);
830 	MPASS(ts != NULL);
831 
832 	/*
833 	 * Use the same algo as sx locks for now.  Prefer waking up shared
834 	 * waiters if we have any over writers.  This is probably not ideal.
835 	 *
836 	 * 'v' is the value we are going to write back to rw_lock.  If we
837 	 * have waiters on both queues, we need to preserve the state of
838 	 * the waiter flag for the queue we don't wake up.  For now this is
839 	 * hardcoded for the algorithm mentioned above.
840 	 *
841 	 * In the case of both readers and writers waiting we wakeup the
842 	 * readers but leave the RW_LOCK_WRITE_WAITERS flag set.  If a
843 	 * new writer comes in before a reader it will claim the lock up
844 	 * above.  There is probably a potential priority inversion in
845 	 * there that could be worked around either by waking both queues
846 	 * of waiters or doing some complicated lock handoff gymnastics.
847 	 */
848 	v = RW_UNLOCKED;
849 	if (rw->rw_lock & RW_LOCK_WRITE_WAITERS) {
850 		queue = TS_EXCLUSIVE_QUEUE;
851 		v |= (rw->rw_lock & RW_LOCK_READ_WAITERS);
852 	} else
853 		queue = TS_SHARED_QUEUE;
854 
855 	/* Wake up all waiters for the specific queue. */
856 	if (LOCK_LOG_TEST(&rw->lock_object, 0))
857 		CTR3(KTR_LOCK, "%s: %p waking up %s waiters", __func__, rw,
858 		    queue == TS_SHARED_QUEUE ? "read" : "write");
859 	turnstile_broadcast(ts, queue);
860 	atomic_store_rel_ptr(&rw->rw_lock, v);
861 	turnstile_unpend(ts, TS_EXCLUSIVE_LOCK);
862 	turnstile_chain_unlock(&rw->lock_object);
863 }
864 
865 /*
866  * Attempt to do a non-blocking upgrade from a read lock to a write
867  * lock.  This will only succeed if this thread holds a single read
868  * lock.  Returns true if the upgrade succeeded and false otherwise.
869  */
870 int
871 _rw_try_upgrade(struct rwlock *rw, const char *file, int line)
872 {
873 	uintptr_t v, x, tid;
874 	struct turnstile *ts;
875 	int success;
876 
877 	KASSERT(rw->rw_lock != RW_DESTROYED,
878 	    ("rw_try_upgrade() of destroyed rwlock @ %s:%d", file, line));
879 	_rw_assert(rw, RA_RLOCKED, file, line);
880 
881 	/*
882 	 * Attempt to switch from one reader to a writer.  If there
883 	 * are any write waiters, then we will have to lock the
884 	 * turnstile first to prevent races with another writer
885 	 * calling turnstile_wait() before we have claimed this
886 	 * turnstile.  So, do the simple case of no waiters first.
887 	 */
888 	tid = (uintptr_t)curthread;
889 	success = 0;
890 	for (;;) {
891 		v = rw->rw_lock;
892 		if (RW_READERS(v) > 1)
893 			break;
894 		if (!(v & RW_LOCK_WAITERS)) {
895 			success = atomic_cmpset_ptr(&rw->rw_lock, v, tid);
896 			if (!success)
897 				continue;
898 			break;
899 		}
900 
901 		/*
902 		 * Ok, we think we have waiters, so lock the turnstile.
903 		 */
904 		ts = turnstile_trywait(&rw->lock_object);
905 		v = rw->rw_lock;
906 		if (RW_READERS(v) > 1) {
907 			turnstile_cancel(ts);
908 			break;
909 		}
910 		/*
911 		 * Try to switch from one reader to a writer again.  This time
912 		 * we honor the current state of the waiters flags.
913 		 * If we obtain the lock with the flags set, then claim
914 		 * ownership of the turnstile.
915 		 */
916 		x = rw->rw_lock & RW_LOCK_WAITERS;
917 		success = atomic_cmpset_ptr(&rw->rw_lock, v, tid | x);
918 		if (success) {
919 			if (x)
920 				turnstile_claim(ts);
921 			else
922 				turnstile_cancel(ts);
923 			break;
924 		}
925 		turnstile_cancel(ts);
926 	}
927 	LOCK_LOG_TRY("WUPGRADE", &rw->lock_object, 0, success, file, line);
928 	if (success) {
929 		curthread->td_rw_rlocks--;
930 		WITNESS_UPGRADE(&rw->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
931 		    file, line);
932 		LOCKSTAT_RECORD0(LS_RW_TRYUPGRADE_UPGRADE, rw);
933 	}
934 	return (success);
935 }
936 
937 /*
938  * Downgrade a write lock into a single read lock.
939  */
940 void
941 _rw_downgrade(struct rwlock *rw, const char *file, int line)
942 {
943 	struct turnstile *ts;
944 	uintptr_t tid, v;
945 	int rwait, wwait;
946 
947 	KASSERT(rw->rw_lock != RW_DESTROYED,
948 	    ("rw_downgrade() of destroyed rwlock @ %s:%d", file, line));
949 	_rw_assert(rw, RA_WLOCKED | RA_NOTRECURSED, file, line);
950 #ifndef INVARIANTS
951 	if (rw_recursed(rw))
952 		panic("downgrade of a recursed lock");
953 #endif
954 
955 	WITNESS_DOWNGRADE(&rw->lock_object, 0, file, line);
956 
957 	/*
958 	 * Convert from a writer to a single reader.  First we handle
959 	 * the easy case with no waiters.  If there are any waiters, we
960 	 * lock the turnstile and "disown" the lock.
961 	 */
962 	tid = (uintptr_t)curthread;
963 	if (atomic_cmpset_rel_ptr(&rw->rw_lock, tid, RW_READERS_LOCK(1)))
964 		goto out;
965 
966 	/*
967 	 * Ok, we think we have waiters, so lock the turnstile so we can
968 	 * read the waiter flags without any races.
969 	 */
970 	turnstile_chain_lock(&rw->lock_object);
971 	v = rw->rw_lock & RW_LOCK_WAITERS;
972 	rwait = v & RW_LOCK_READ_WAITERS;
973 	wwait = v & RW_LOCK_WRITE_WAITERS;
974 	MPASS(rwait | wwait);
975 
976 	/*
977 	 * Downgrade from a write lock while preserving waiters flag
978 	 * and give up ownership of the turnstile.
979 	 */
980 	ts = turnstile_lookup(&rw->lock_object);
981 	MPASS(ts != NULL);
982 	if (!wwait)
983 		v &= ~RW_LOCK_READ_WAITERS;
984 	atomic_store_rel_ptr(&rw->rw_lock, RW_READERS_LOCK(1) | v);
985 	/*
986 	 * Wake other readers if there are no writers pending.  Otherwise they
987 	 * won't be able to acquire the lock anyway.
988 	 */
989 	if (rwait && !wwait) {
990 		turnstile_broadcast(ts, TS_SHARED_QUEUE);
991 		turnstile_unpend(ts, TS_EXCLUSIVE_LOCK);
992 	} else
993 		turnstile_disown(ts);
994 	turnstile_chain_unlock(&rw->lock_object);
995 out:
996 	curthread->td_rw_rlocks++;
997 	LOCK_LOG_LOCK("WDOWNGRADE", &rw->lock_object, 0, 0, file, line);
998 	LOCKSTAT_RECORD0(LS_RW_DOWNGRADE_DOWNGRADE, rw);
999 }
1000 
1001 #ifdef INVARIANT_SUPPORT
1002 #ifndef INVARIANTS
1003 #undef _rw_assert
1004 #endif
1005 
1006 /*
1007  * In the non-WITNESS case, rw_assert() can only detect that at least
1008  * *some* thread owns an rlock, but it cannot guarantee that *this*
1009  * thread owns an rlock.
1010  */
1011 void
1012 _rw_assert(struct rwlock *rw, int what, const char *file, int line)
1013 {
1014 
1015 	if (panicstr != NULL)
1016 		return;
1017 	switch (what) {
1018 	case RA_LOCKED:
1019 	case RA_LOCKED | RA_RECURSED:
1020 	case RA_LOCKED | RA_NOTRECURSED:
1021 	case RA_RLOCKED:
1022 #ifdef WITNESS
1023 		witness_assert(&rw->lock_object, what, file, line);
1024 #else
1025 		/*
1026 		 * If some other thread has a write lock or we have one
1027 		 * and are asserting a read lock, fail.  Also, if no one
1028 		 * has a lock at all, fail.
1029 		 */
1030 		if (rw->rw_lock == RW_UNLOCKED ||
1031 		    (!(rw->rw_lock & RW_LOCK_READ) && (what == RA_RLOCKED ||
1032 		    rw_wowner(rw) != curthread)))
1033 			panic("Lock %s not %slocked @ %s:%d\n",
1034 			    rw->lock_object.lo_name, (what == RA_RLOCKED) ?
1035 			    "read " : "", file, line);
1036 
1037 		if (!(rw->rw_lock & RW_LOCK_READ)) {
1038 			if (rw_recursed(rw)) {
1039 				if (what & RA_NOTRECURSED)
1040 					panic("Lock %s recursed @ %s:%d\n",
1041 					    rw->lock_object.lo_name, file,
1042 					    line);
1043 			} else if (what & RA_RECURSED)
1044 				panic("Lock %s not recursed @ %s:%d\n",
1045 				    rw->lock_object.lo_name, file, line);
1046 		}
1047 #endif
1048 		break;
1049 	case RA_WLOCKED:
1050 	case RA_WLOCKED | RA_RECURSED:
1051 	case RA_WLOCKED | RA_NOTRECURSED:
1052 		if (rw_wowner(rw) != curthread)
1053 			panic("Lock %s not exclusively locked @ %s:%d\n",
1054 			    rw->lock_object.lo_name, file, line);
1055 		if (rw_recursed(rw)) {
1056 			if (what & RA_NOTRECURSED)
1057 				panic("Lock %s recursed @ %s:%d\n",
1058 				    rw->lock_object.lo_name, file, line);
1059 		} else if (what & RA_RECURSED)
1060 			panic("Lock %s not recursed @ %s:%d\n",
1061 			    rw->lock_object.lo_name, file, line);
1062 		break;
1063 	case RA_UNLOCKED:
1064 #ifdef WITNESS
1065 		witness_assert(&rw->lock_object, what, file, line);
1066 #else
1067 		/*
1068 		 * If we hold a write lock fail.  We can't reliably check
1069 		 * to see if we hold a read lock or not.
1070 		 */
1071 		if (rw_wowner(rw) == curthread)
1072 			panic("Lock %s exclusively locked @ %s:%d\n",
1073 			    rw->lock_object.lo_name, file, line);
1074 #endif
1075 		break;
1076 	default:
1077 		panic("Unknown rw lock assertion: %d @ %s:%d", what, file,
1078 		    line);
1079 	}
1080 }
1081 #endif /* INVARIANT_SUPPORT */
1082 
1083 #ifdef DDB
1084 void
1085 db_show_rwlock(struct lock_object *lock)
1086 {
1087 	struct rwlock *rw;
1088 	struct thread *td;
1089 
1090 	rw = (struct rwlock *)lock;
1091 
1092 	db_printf(" state: ");
1093 	if (rw->rw_lock == RW_UNLOCKED)
1094 		db_printf("UNLOCKED\n");
1095 	else if (rw->rw_lock == RW_DESTROYED) {
1096 		db_printf("DESTROYED\n");
1097 		return;
1098 	} else if (rw->rw_lock & RW_LOCK_READ)
1099 		db_printf("RLOCK: %ju locks\n",
1100 		    (uintmax_t)(RW_READERS(rw->rw_lock)));
1101 	else {
1102 		td = rw_wowner(rw);
1103 		db_printf("WLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
1104 		    td->td_tid, td->td_proc->p_pid, td->td_name);
1105 		if (rw_recursed(rw))
1106 			db_printf(" recursed: %u\n", rw->rw_recurse);
1107 	}
1108 	db_printf(" waiters: ");
1109 	switch (rw->rw_lock & (RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS)) {
1110 	case RW_LOCK_READ_WAITERS:
1111 		db_printf("readers\n");
1112 		break;
1113 	case RW_LOCK_WRITE_WAITERS:
1114 		db_printf("writers\n");
1115 		break;
1116 	case RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS:
1117 		db_printf("readers and writers\n");
1118 		break;
1119 	default:
1120 		db_printf("none\n");
1121 		break;
1122 	}
1123 }
1124 
1125 #endif
1126