xref: /freebsd/sys/kern/kern_rmlock.c (revision f4b37ed0f8b307b1f3f0f630ca725d68f1dff30d)
1 /*-
2  * Copyright (c) 2007 Stephan Uphoff <ups@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. Neither the name of the author nor the names of any co-contributors
14  *    may be used to endorse or promote products derived from this software
15  *    without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 
30 /*
31  * Machine independent bits of reader/writer lock implementation.
32  */
33 
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36 
37 #include "opt_ddb.h"
38 
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 
42 #include <sys/kernel.h>
43 #include <sys/kdb.h>
44 #include <sys/ktr.h>
45 #include <sys/lock.h>
46 #include <sys/mutex.h>
47 #include <sys/proc.h>
48 #include <sys/rmlock.h>
49 #include <sys/sched.h>
50 #include <sys/smp.h>
51 #include <sys/turnstile.h>
52 #include <sys/lock_profile.h>
53 #include <machine/cpu.h>
54 
55 #ifdef DDB
56 #include <ddb/ddb.h>
57 #endif
58 
59 /*
60  * A cookie to mark destroyed rmlocks.  This is stored in the head of
61  * rm_activeReaders.
62  */
63 #define	RM_DESTROYED	((void *)0xdead)
64 
65 #define	rm_destroyed(rm)						\
66 	(LIST_FIRST(&(rm)->rm_activeReaders) == RM_DESTROYED)
67 
68 #define RMPF_ONQUEUE	1
69 #define RMPF_SIGNAL	2
70 
71 #ifndef INVARIANTS
72 #define	_rm_assert(c, what, file, line)
73 #endif
74 
75 static void	assert_rm(const struct lock_object *lock, int what);
76 #ifdef DDB
77 static void	db_show_rm(const struct lock_object *lock);
78 #endif
79 static void	lock_rm(struct lock_object *lock, uintptr_t how);
80 #ifdef KDTRACE_HOOKS
81 static int	owner_rm(const struct lock_object *lock, struct thread **owner);
82 #endif
83 static uintptr_t unlock_rm(struct lock_object *lock);
84 
85 struct lock_class lock_class_rm = {
86 	.lc_name = "rm",
87 	.lc_flags = LC_SLEEPLOCK | LC_RECURSABLE,
88 	.lc_assert = assert_rm,
89 #ifdef DDB
90 	.lc_ddb_show = db_show_rm,
91 #endif
92 	.lc_lock = lock_rm,
93 	.lc_unlock = unlock_rm,
94 #ifdef KDTRACE_HOOKS
95 	.lc_owner = owner_rm,
96 #endif
97 };
98 
99 struct lock_class lock_class_rm_sleepable = {
100 	.lc_name = "sleepable rm",
101 	.lc_flags = LC_SLEEPLOCK | LC_SLEEPABLE | LC_RECURSABLE,
102 	.lc_assert = assert_rm,
103 #ifdef DDB
104 	.lc_ddb_show = db_show_rm,
105 #endif
106 	.lc_lock = lock_rm,
107 	.lc_unlock = unlock_rm,
108 #ifdef KDTRACE_HOOKS
109 	.lc_owner = owner_rm,
110 #endif
111 };
112 
113 static void
114 assert_rm(const struct lock_object *lock, int what)
115 {
116 
117 	rm_assert((const struct rmlock *)lock, what);
118 }
119 
120 static void
121 lock_rm(struct lock_object *lock, uintptr_t how)
122 {
123 	struct rmlock *rm;
124 	struct rm_priotracker *tracker;
125 
126 	rm = (struct rmlock *)lock;
127 	if (how == 0)
128 		rm_wlock(rm);
129 	else {
130 		tracker = (struct rm_priotracker *)how;
131 		rm_rlock(rm, tracker);
132 	}
133 }
134 
135 static uintptr_t
136 unlock_rm(struct lock_object *lock)
137 {
138 	struct thread *td;
139 	struct pcpu *pc;
140 	struct rmlock *rm;
141 	struct rm_queue *queue;
142 	struct rm_priotracker *tracker;
143 	uintptr_t how;
144 
145 	rm = (struct rmlock *)lock;
146 	tracker = NULL;
147 	how = 0;
148 	rm_assert(rm, RA_LOCKED | RA_NOTRECURSED);
149 	if (rm_wowned(rm))
150 		rm_wunlock(rm);
151 	else {
152 		/*
153 		 * Find the right rm_priotracker structure for curthread.
154 		 * The guarantee about its uniqueness is given by the fact
155 		 * we already asserted the lock wasn't recursively acquired.
156 		 */
157 		critical_enter();
158 		td = curthread;
159 		pc = pcpu_find(curcpu);
160 		for (queue = pc->pc_rm_queue.rmq_next;
161 		    queue != &pc->pc_rm_queue; queue = queue->rmq_next) {
162 			tracker = (struct rm_priotracker *)queue;
163 				if ((tracker->rmp_rmlock == rm) &&
164 				    (tracker->rmp_thread == td)) {
165 					how = (uintptr_t)tracker;
166 					break;
167 				}
168 		}
169 		KASSERT(tracker != NULL,
170 		    ("rm_priotracker is non-NULL when lock held in read mode"));
171 		critical_exit();
172 		rm_runlock(rm, tracker);
173 	}
174 	return (how);
175 }
176 
177 #ifdef KDTRACE_HOOKS
178 static int
179 owner_rm(const struct lock_object *lock, struct thread **owner)
180 {
181 	const struct rmlock *rm;
182 	struct lock_class *lc;
183 
184 	rm = (const struct rmlock *)lock;
185 	lc = LOCK_CLASS(&rm->rm_wlock_object);
186 	return (lc->lc_owner(&rm->rm_wlock_object, owner));
187 }
188 #endif
189 
190 static struct mtx rm_spinlock;
191 
192 MTX_SYSINIT(rm_spinlock, &rm_spinlock, "rm_spinlock", MTX_SPIN);
193 
194 /*
195  * Add or remove tracker from per-cpu list.
196  *
197  * The per-cpu list can be traversed at any time in forward direction from an
198  * interrupt on the *local* cpu.
199  */
200 static void inline
201 rm_tracker_add(struct pcpu *pc, struct rm_priotracker *tracker)
202 {
203 	struct rm_queue *next;
204 
205 	/* Initialize all tracker pointers */
206 	tracker->rmp_cpuQueue.rmq_prev = &pc->pc_rm_queue;
207 	next = pc->pc_rm_queue.rmq_next;
208 	tracker->rmp_cpuQueue.rmq_next = next;
209 
210 	/* rmq_prev is not used during froward traversal. */
211 	next->rmq_prev = &tracker->rmp_cpuQueue;
212 
213 	/* Update pointer to first element. */
214 	pc->pc_rm_queue.rmq_next = &tracker->rmp_cpuQueue;
215 }
216 
217 /*
218  * Return a count of the number of trackers the thread 'td' already
219  * has on this CPU for the lock 'rm'.
220  */
221 static int
222 rm_trackers_present(const struct pcpu *pc, const struct rmlock *rm,
223     const struct thread *td)
224 {
225 	struct rm_queue *queue;
226 	struct rm_priotracker *tracker;
227 	int count;
228 
229 	count = 0;
230 	for (queue = pc->pc_rm_queue.rmq_next; queue != &pc->pc_rm_queue;
231 	    queue = queue->rmq_next) {
232 		tracker = (struct rm_priotracker *)queue;
233 		if ((tracker->rmp_rmlock == rm) && (tracker->rmp_thread == td))
234 			count++;
235 	}
236 	return (count);
237 }
238 
239 static void inline
240 rm_tracker_remove(struct pcpu *pc, struct rm_priotracker *tracker)
241 {
242 	struct rm_queue *next, *prev;
243 
244 	next = tracker->rmp_cpuQueue.rmq_next;
245 	prev = tracker->rmp_cpuQueue.rmq_prev;
246 
247 	/* Not used during forward traversal. */
248 	next->rmq_prev = prev;
249 
250 	/* Remove from list. */
251 	prev->rmq_next = next;
252 }
253 
254 static void
255 rm_cleanIPI(void *arg)
256 {
257 	struct pcpu *pc;
258 	struct rmlock *rm = arg;
259 	struct rm_priotracker *tracker;
260 	struct rm_queue *queue;
261 	pc = pcpu_find(curcpu);
262 
263 	for (queue = pc->pc_rm_queue.rmq_next; queue != &pc->pc_rm_queue;
264 	    queue = queue->rmq_next) {
265 		tracker = (struct rm_priotracker *)queue;
266 		if (tracker->rmp_rmlock == rm && tracker->rmp_flags == 0) {
267 			tracker->rmp_flags = RMPF_ONQUEUE;
268 			mtx_lock_spin(&rm_spinlock);
269 			LIST_INSERT_HEAD(&rm->rm_activeReaders, tracker,
270 			    rmp_qentry);
271 			mtx_unlock_spin(&rm_spinlock);
272 		}
273 	}
274 }
275 
276 void
277 rm_init_flags(struct rmlock *rm, const char *name, int opts)
278 {
279 	struct lock_class *lc;
280 	int liflags, xflags;
281 
282 	liflags = 0;
283 	if (!(opts & RM_NOWITNESS))
284 		liflags |= LO_WITNESS;
285 	if (opts & RM_RECURSE)
286 		liflags |= LO_RECURSABLE;
287 	if (opts & RM_NEW)
288 		liflags |= LO_NEW;
289 	rm->rm_writecpus = all_cpus;
290 	LIST_INIT(&rm->rm_activeReaders);
291 	if (opts & RM_SLEEPABLE) {
292 		liflags |= LO_SLEEPABLE;
293 		lc = &lock_class_rm_sleepable;
294 		xflags = (opts & RM_NEW ? SX_NEW : 0);
295 		sx_init_flags(&rm->rm_lock_sx, "rmlock_sx",
296 		    xflags | SX_NOWITNESS);
297 	} else {
298 		lc = &lock_class_rm;
299 		xflags = (opts & RM_NEW ? MTX_NEW : 0);
300 		mtx_init(&rm->rm_lock_mtx, name, "rmlock_mtx",
301 		    xflags | MTX_NOWITNESS);
302 	}
303 	lock_init(&rm->lock_object, lc, name, NULL, liflags);
304 }
305 
306 void
307 rm_init(struct rmlock *rm, const char *name)
308 {
309 
310 	rm_init_flags(rm, name, 0);
311 }
312 
313 void
314 rm_destroy(struct rmlock *rm)
315 {
316 
317 	rm_assert(rm, RA_UNLOCKED);
318 	LIST_FIRST(&rm->rm_activeReaders) = RM_DESTROYED;
319 	if (rm->lock_object.lo_flags & LO_SLEEPABLE)
320 		sx_destroy(&rm->rm_lock_sx);
321 	else
322 		mtx_destroy(&rm->rm_lock_mtx);
323 	lock_destroy(&rm->lock_object);
324 }
325 
326 int
327 rm_wowned(const struct rmlock *rm)
328 {
329 
330 	if (rm->lock_object.lo_flags & LO_SLEEPABLE)
331 		return (sx_xlocked(&rm->rm_lock_sx));
332 	else
333 		return (mtx_owned(&rm->rm_lock_mtx));
334 }
335 
336 void
337 rm_sysinit(void *arg)
338 {
339 	struct rm_args *args = arg;
340 
341 	rm_init(args->ra_rm, args->ra_desc);
342 }
343 
344 void
345 rm_sysinit_flags(void *arg)
346 {
347 	struct rm_args_flags *args = arg;
348 
349 	rm_init_flags(args->ra_rm, args->ra_desc, args->ra_opts);
350 }
351 
352 static int
353 _rm_rlock_hard(struct rmlock *rm, struct rm_priotracker *tracker, int trylock)
354 {
355 	struct pcpu *pc;
356 
357 	critical_enter();
358 	pc = pcpu_find(curcpu);
359 
360 	/* Check if we just need to do a proper critical_exit. */
361 	if (!CPU_ISSET(pc->pc_cpuid, &rm->rm_writecpus)) {
362 		critical_exit();
363 		return (1);
364 	}
365 
366 	/* Remove our tracker from the per-cpu list. */
367 	rm_tracker_remove(pc, tracker);
368 
369 	/* Check to see if the IPI granted us the lock after all. */
370 	if (tracker->rmp_flags) {
371 		/* Just add back tracker - we hold the lock. */
372 		rm_tracker_add(pc, tracker);
373 		critical_exit();
374 		return (1);
375 	}
376 
377 	/*
378 	 * We allow readers to aquire a lock even if a writer is blocked if
379 	 * the lock is recursive and the reader already holds the lock.
380 	 */
381 	if ((rm->lock_object.lo_flags & LO_RECURSABLE) != 0) {
382 		/*
383 		 * Just grant the lock if this thread already has a tracker
384 		 * for this lock on the per-cpu queue.
385 		 */
386 		if (rm_trackers_present(pc, rm, curthread) != 0) {
387 			mtx_lock_spin(&rm_spinlock);
388 			LIST_INSERT_HEAD(&rm->rm_activeReaders, tracker,
389 			    rmp_qentry);
390 			tracker->rmp_flags = RMPF_ONQUEUE;
391 			mtx_unlock_spin(&rm_spinlock);
392 			rm_tracker_add(pc, tracker);
393 			critical_exit();
394 			return (1);
395 		}
396 	}
397 
398 	sched_unpin();
399 	critical_exit();
400 
401 	if (trylock) {
402 		if (rm->lock_object.lo_flags & LO_SLEEPABLE) {
403 			if (!sx_try_xlock(&rm->rm_lock_sx))
404 				return (0);
405 		} else {
406 			if (!mtx_trylock(&rm->rm_lock_mtx))
407 				return (0);
408 		}
409 	} else {
410 		if (rm->lock_object.lo_flags & LO_SLEEPABLE)
411 			sx_xlock(&rm->rm_lock_sx);
412 		else
413 			mtx_lock(&rm->rm_lock_mtx);
414 	}
415 
416 	critical_enter();
417 	pc = pcpu_find(curcpu);
418 	CPU_CLR(pc->pc_cpuid, &rm->rm_writecpus);
419 	rm_tracker_add(pc, tracker);
420 	sched_pin();
421 	critical_exit();
422 
423 	if (rm->lock_object.lo_flags & LO_SLEEPABLE)
424 		sx_xunlock(&rm->rm_lock_sx);
425 	else
426 		mtx_unlock(&rm->rm_lock_mtx);
427 
428 	return (1);
429 }
430 
431 int
432 _rm_rlock(struct rmlock *rm, struct rm_priotracker *tracker, int trylock)
433 {
434 	struct thread *td = curthread;
435 	struct pcpu *pc;
436 
437 	if (SCHEDULER_STOPPED())
438 		return (1);
439 
440 	tracker->rmp_flags  = 0;
441 	tracker->rmp_thread = td;
442 	tracker->rmp_rmlock = rm;
443 
444 	if (rm->lock_object.lo_flags & LO_SLEEPABLE)
445 		THREAD_NO_SLEEPING();
446 
447 	td->td_critnest++;	/* critical_enter(); */
448 
449 	__compiler_membar();
450 
451 	pc = cpuid_to_pcpu[td->td_oncpu]; /* pcpu_find(td->td_oncpu); */
452 
453 	rm_tracker_add(pc, tracker);
454 
455 	sched_pin();
456 
457 	__compiler_membar();
458 
459 	td->td_critnest--;
460 
461 	/*
462 	 * Fast path to combine two common conditions into a single
463 	 * conditional jump.
464 	 */
465 	if (0 == (td->td_owepreempt |
466 	    CPU_ISSET(pc->pc_cpuid, &rm->rm_writecpus)))
467 		return (1);
468 
469 	/* We do not have a read token and need to acquire one. */
470 	return _rm_rlock_hard(rm, tracker, trylock);
471 }
472 
473 static void
474 _rm_unlock_hard(struct thread *td,struct rm_priotracker *tracker)
475 {
476 
477 	if (td->td_owepreempt) {
478 		td->td_critnest++;
479 		critical_exit();
480 	}
481 
482 	if (!tracker->rmp_flags)
483 		return;
484 
485 	mtx_lock_spin(&rm_spinlock);
486 	LIST_REMOVE(tracker, rmp_qentry);
487 
488 	if (tracker->rmp_flags & RMPF_SIGNAL) {
489 		struct rmlock *rm;
490 		struct turnstile *ts;
491 
492 		rm = tracker->rmp_rmlock;
493 
494 		turnstile_chain_lock(&rm->lock_object);
495 		mtx_unlock_spin(&rm_spinlock);
496 
497 		ts = turnstile_lookup(&rm->lock_object);
498 
499 		turnstile_signal(ts, TS_EXCLUSIVE_QUEUE);
500 		turnstile_unpend(ts, TS_EXCLUSIVE_LOCK);
501 		turnstile_chain_unlock(&rm->lock_object);
502 	} else
503 		mtx_unlock_spin(&rm_spinlock);
504 }
505 
506 void
507 _rm_runlock(struct rmlock *rm, struct rm_priotracker *tracker)
508 {
509 	struct pcpu *pc;
510 	struct thread *td = tracker->rmp_thread;
511 
512 	if (SCHEDULER_STOPPED())
513 		return;
514 
515 	td->td_critnest++;	/* critical_enter(); */
516 	pc = cpuid_to_pcpu[td->td_oncpu]; /* pcpu_find(td->td_oncpu); */
517 	rm_tracker_remove(pc, tracker);
518 	td->td_critnest--;
519 	sched_unpin();
520 
521 	if (rm->lock_object.lo_flags & LO_SLEEPABLE)
522 		THREAD_SLEEPING_OK();
523 
524 	if (0 == (td->td_owepreempt | tracker->rmp_flags))
525 		return;
526 
527 	_rm_unlock_hard(td, tracker);
528 }
529 
530 void
531 _rm_wlock(struct rmlock *rm)
532 {
533 	struct rm_priotracker *prio;
534 	struct turnstile *ts;
535 	cpuset_t readcpus;
536 
537 	if (SCHEDULER_STOPPED())
538 		return;
539 
540 	if (rm->lock_object.lo_flags & LO_SLEEPABLE)
541 		sx_xlock(&rm->rm_lock_sx);
542 	else
543 		mtx_lock(&rm->rm_lock_mtx);
544 
545 	if (CPU_CMP(&rm->rm_writecpus, &all_cpus)) {
546 		/* Get all read tokens back */
547 		readcpus = all_cpus;
548 		CPU_NAND(&readcpus, &rm->rm_writecpus);
549 		rm->rm_writecpus = all_cpus;
550 
551 		/*
552 		 * Assumes rm->rm_writecpus update is visible on other CPUs
553 		 * before rm_cleanIPI is called.
554 		 */
555 #ifdef SMP
556 		smp_rendezvous_cpus(readcpus,
557 		    smp_no_rendevous_barrier,
558 		    rm_cleanIPI,
559 		    smp_no_rendevous_barrier,
560 		    rm);
561 
562 #else
563 		rm_cleanIPI(rm);
564 #endif
565 
566 		mtx_lock_spin(&rm_spinlock);
567 		while ((prio = LIST_FIRST(&rm->rm_activeReaders)) != NULL) {
568 			ts = turnstile_trywait(&rm->lock_object);
569 			prio->rmp_flags = RMPF_ONQUEUE | RMPF_SIGNAL;
570 			mtx_unlock_spin(&rm_spinlock);
571 			turnstile_wait(ts, prio->rmp_thread,
572 			    TS_EXCLUSIVE_QUEUE);
573 			mtx_lock_spin(&rm_spinlock);
574 		}
575 		mtx_unlock_spin(&rm_spinlock);
576 	}
577 }
578 
579 void
580 _rm_wunlock(struct rmlock *rm)
581 {
582 
583 	if (rm->lock_object.lo_flags & LO_SLEEPABLE)
584 		sx_xunlock(&rm->rm_lock_sx);
585 	else
586 		mtx_unlock(&rm->rm_lock_mtx);
587 }
588 
589 #if LOCK_DEBUG > 0
590 
591 void
592 _rm_wlock_debug(struct rmlock *rm, const char *file, int line)
593 {
594 
595 	if (SCHEDULER_STOPPED())
596 		return;
597 
598 	KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
599 	    ("rm_wlock() by idle thread %p on rmlock %s @ %s:%d",
600 	    curthread, rm->lock_object.lo_name, file, line));
601 	KASSERT(!rm_destroyed(rm),
602 	    ("rm_wlock() of destroyed rmlock @ %s:%d", file, line));
603 	_rm_assert(rm, RA_UNLOCKED, file, line);
604 
605 	WITNESS_CHECKORDER(&rm->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE,
606 	    file, line, NULL);
607 
608 	_rm_wlock(rm);
609 
610 	LOCK_LOG_LOCK("RMWLOCK", &rm->lock_object, 0, 0, file, line);
611 
612 	WITNESS_LOCK(&rm->lock_object, LOP_EXCLUSIVE, file, line);
613 
614 	curthread->td_locks++;
615 
616 }
617 
618 void
619 _rm_wunlock_debug(struct rmlock *rm, const char *file, int line)
620 {
621 
622 	if (SCHEDULER_STOPPED())
623 		return;
624 
625 	KASSERT(!rm_destroyed(rm),
626 	    ("rm_wunlock() of destroyed rmlock @ %s:%d", file, line));
627 	_rm_assert(rm, RA_WLOCKED, file, line);
628 	WITNESS_UNLOCK(&rm->lock_object, LOP_EXCLUSIVE, file, line);
629 	LOCK_LOG_LOCK("RMWUNLOCK", &rm->lock_object, 0, 0, file, line);
630 	_rm_wunlock(rm);
631 	curthread->td_locks--;
632 }
633 
634 int
635 _rm_rlock_debug(struct rmlock *rm, struct rm_priotracker *tracker,
636     int trylock, const char *file, int line)
637 {
638 
639 	if (SCHEDULER_STOPPED())
640 		return (1);
641 
642 #ifdef INVARIANTS
643 	if (!(rm->lock_object.lo_flags & LO_RECURSABLE) && !trylock) {
644 		critical_enter();
645 		KASSERT(rm_trackers_present(pcpu_find(curcpu), rm,
646 		    curthread) == 0,
647 		    ("rm_rlock: recursed on non-recursive rmlock %s @ %s:%d\n",
648 		    rm->lock_object.lo_name, file, line));
649 		critical_exit();
650 	}
651 #endif
652 	KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
653 	    ("rm_rlock() by idle thread %p on rmlock %s @ %s:%d",
654 	    curthread, rm->lock_object.lo_name, file, line));
655 	KASSERT(!rm_destroyed(rm),
656 	    ("rm_rlock() of destroyed rmlock @ %s:%d", file, line));
657 	if (!trylock) {
658 		KASSERT(!rm_wowned(rm),
659 		    ("rm_rlock: wlock already held for %s @ %s:%d",
660 		    rm->lock_object.lo_name, file, line));
661 		WITNESS_CHECKORDER(&rm->lock_object, LOP_NEWORDER, file, line,
662 		    NULL);
663 	}
664 
665 	if (_rm_rlock(rm, tracker, trylock)) {
666 		if (trylock)
667 			LOCK_LOG_TRY("RMRLOCK", &rm->lock_object, 0, 1, file,
668 			    line);
669 		else
670 			LOCK_LOG_LOCK("RMRLOCK", &rm->lock_object, 0, 0, file,
671 			    line);
672 		WITNESS_LOCK(&rm->lock_object, 0, file, line);
673 
674 		curthread->td_locks++;
675 
676 		return (1);
677 	} else if (trylock)
678 		LOCK_LOG_TRY("RMRLOCK", &rm->lock_object, 0, 0, file, line);
679 
680 	return (0);
681 }
682 
683 void
684 _rm_runlock_debug(struct rmlock *rm, struct rm_priotracker *tracker,
685     const char *file, int line)
686 {
687 
688 	if (SCHEDULER_STOPPED())
689 		return;
690 
691 	KASSERT(!rm_destroyed(rm),
692 	    ("rm_runlock() of destroyed rmlock @ %s:%d", file, line));
693 	_rm_assert(rm, RA_RLOCKED, file, line);
694 	WITNESS_UNLOCK(&rm->lock_object, 0, file, line);
695 	LOCK_LOG_LOCK("RMRUNLOCK", &rm->lock_object, 0, 0, file, line);
696 	_rm_runlock(rm, tracker);
697 	curthread->td_locks--;
698 }
699 
700 #else
701 
702 /*
703  * Just strip out file and line arguments if no lock debugging is enabled in
704  * the kernel - we are called from a kernel module.
705  */
706 void
707 _rm_wlock_debug(struct rmlock *rm, const char *file, int line)
708 {
709 
710 	_rm_wlock(rm);
711 }
712 
713 void
714 _rm_wunlock_debug(struct rmlock *rm, const char *file, int line)
715 {
716 
717 	_rm_wunlock(rm);
718 }
719 
720 int
721 _rm_rlock_debug(struct rmlock *rm, struct rm_priotracker *tracker,
722     int trylock, const char *file, int line)
723 {
724 
725 	return _rm_rlock(rm, tracker, trylock);
726 }
727 
728 void
729 _rm_runlock_debug(struct rmlock *rm, struct rm_priotracker *tracker,
730     const char *file, int line)
731 {
732 
733 	_rm_runlock(rm, tracker);
734 }
735 
736 #endif
737 
738 #ifdef INVARIANT_SUPPORT
739 #ifndef INVARIANTS
740 #undef _rm_assert
741 #endif
742 
743 /*
744  * Note that this does not need to use witness_assert() for read lock
745  * assertions since an exact count of read locks held by this thread
746  * is computable.
747  */
748 void
749 _rm_assert(const struct rmlock *rm, int what, const char *file, int line)
750 {
751 	int count;
752 
753 	if (panicstr != NULL)
754 		return;
755 	switch (what) {
756 	case RA_LOCKED:
757 	case RA_LOCKED | RA_RECURSED:
758 	case RA_LOCKED | RA_NOTRECURSED:
759 	case RA_RLOCKED:
760 	case RA_RLOCKED | RA_RECURSED:
761 	case RA_RLOCKED | RA_NOTRECURSED:
762 		/*
763 		 * Handle the write-locked case.  Unlike other
764 		 * primitives, writers can never recurse.
765 		 */
766 		if (rm_wowned(rm)) {
767 			if (what & RA_RLOCKED)
768 				panic("Lock %s exclusively locked @ %s:%d\n",
769 				    rm->lock_object.lo_name, file, line);
770 			if (what & RA_RECURSED)
771 				panic("Lock %s not recursed @ %s:%d\n",
772 				    rm->lock_object.lo_name, file, line);
773 			break;
774 		}
775 
776 		critical_enter();
777 		count = rm_trackers_present(pcpu_find(curcpu), rm, curthread);
778 		critical_exit();
779 
780 		if (count == 0)
781 			panic("Lock %s not %slocked @ %s:%d\n",
782 			    rm->lock_object.lo_name, (what & RA_RLOCKED) ?
783 			    "read " : "", file, line);
784 		if (count > 1) {
785 			if (what & RA_NOTRECURSED)
786 				panic("Lock %s recursed @ %s:%d\n",
787 				    rm->lock_object.lo_name, file, line);
788 		} else if (what & RA_RECURSED)
789 			panic("Lock %s not recursed @ %s:%d\n",
790 			    rm->lock_object.lo_name, file, line);
791 		break;
792 	case RA_WLOCKED:
793 		if (!rm_wowned(rm))
794 			panic("Lock %s not exclusively locked @ %s:%d\n",
795 			    rm->lock_object.lo_name, file, line);
796 		break;
797 	case RA_UNLOCKED:
798 		if (rm_wowned(rm))
799 			panic("Lock %s exclusively locked @ %s:%d\n",
800 			    rm->lock_object.lo_name, file, line);
801 
802 		critical_enter();
803 		count = rm_trackers_present(pcpu_find(curcpu), rm, curthread);
804 		critical_exit();
805 
806 		if (count != 0)
807 			panic("Lock %s read locked @ %s:%d\n",
808 			    rm->lock_object.lo_name, file, line);
809 		break;
810 	default:
811 		panic("Unknown rm lock assertion: %d @ %s:%d", what, file,
812 		    line);
813 	}
814 }
815 #endif /* INVARIANT_SUPPORT */
816 
817 #ifdef DDB
818 static void
819 print_tracker(struct rm_priotracker *tr)
820 {
821 	struct thread *td;
822 
823 	td = tr->rmp_thread;
824 	db_printf("   thread %p (tid %d, pid %d, \"%s\") {", td, td->td_tid,
825 	    td->td_proc->p_pid, td->td_name);
826 	if (tr->rmp_flags & RMPF_ONQUEUE) {
827 		db_printf("ONQUEUE");
828 		if (tr->rmp_flags & RMPF_SIGNAL)
829 			db_printf(",SIGNAL");
830 	} else
831 		db_printf("0");
832 	db_printf("}\n");
833 }
834 
835 static void
836 db_show_rm(const struct lock_object *lock)
837 {
838 	struct rm_priotracker *tr;
839 	struct rm_queue *queue;
840 	const struct rmlock *rm;
841 	struct lock_class *lc;
842 	struct pcpu *pc;
843 
844 	rm = (const struct rmlock *)lock;
845 	db_printf(" writecpus: ");
846 	ddb_display_cpuset(__DEQUALIFY(const cpuset_t *, &rm->rm_writecpus));
847 	db_printf("\n");
848 	db_printf(" per-CPU readers:\n");
849 	STAILQ_FOREACH(pc, &cpuhead, pc_allcpu)
850 		for (queue = pc->pc_rm_queue.rmq_next;
851 		    queue != &pc->pc_rm_queue; queue = queue->rmq_next) {
852 			tr = (struct rm_priotracker *)queue;
853 			if (tr->rmp_rmlock == rm)
854 				print_tracker(tr);
855 		}
856 	db_printf(" active readers:\n");
857 	LIST_FOREACH(tr, &rm->rm_activeReaders, rmp_qentry)
858 		print_tracker(tr);
859 	lc = LOCK_CLASS(&rm->rm_wlock_object);
860 	db_printf("Backing write-lock (%s):\n", lc->lc_name);
861 	lc->lc_ddb_show(&rm->rm_wlock_object);
862 }
863 #endif
864