xref: /freebsd/sys/kern/kern_rmlock.c (revision cc16dea626cf2fc80cde667ac4798065108e596c)
1 /*-
2  * Copyright (c) 2007 Stephan Uphoff <ups@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. Neither the name of the author nor the names of any co-contributors
14  *    may be used to endorse or promote products derived from this software
15  *    without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 
30 /*
31  * Machine independent bits of reader/writer lock implementation.
32  */
33 
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36 
37 #include "opt_ddb.h"
38 #include "opt_kdtrace.h"
39 
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 
43 #include <sys/kernel.h>
44 #include <sys/kdb.h>
45 #include <sys/ktr.h>
46 #include <sys/lock.h>
47 #include <sys/mutex.h>
48 #include <sys/proc.h>
49 #include <sys/rmlock.h>
50 #include <sys/sched.h>
51 #include <sys/smp.h>
52 #include <sys/turnstile.h>
53 #include <sys/lock_profile.h>
54 #include <machine/cpu.h>
55 
56 #ifdef DDB
57 #include <ddb/ddb.h>
58 #endif
59 
60 /*
61  * A cookie to mark destroyed rmlocks.  This is stored in the head of
62  * rm_activeReaders.
63  */
64 #define	RM_DESTROYED	((void *)0xdead)
65 
66 #define	rm_destroyed(rm)						\
67 	(LIST_FIRST(&(rm)->rm_activeReaders) == RM_DESTROYED)
68 
69 #define RMPF_ONQUEUE	1
70 #define RMPF_SIGNAL	2
71 
72 #ifndef INVARIANTS
73 #define	_rm_assert(c, what, file, line)
74 #endif
75 
76 static void	assert_rm(const struct lock_object *lock, int what);
77 #ifdef DDB
78 static void	db_show_rm(const struct lock_object *lock);
79 #endif
80 static void	lock_rm(struct lock_object *lock, int how);
81 #ifdef KDTRACE_HOOKS
82 static int	owner_rm(const struct lock_object *lock, struct thread **owner);
83 #endif
84 static int	unlock_rm(struct lock_object *lock);
85 
86 struct lock_class lock_class_rm = {
87 	.lc_name = "rm",
88 	.lc_flags = LC_SLEEPLOCK | LC_RECURSABLE,
89 	.lc_assert = assert_rm,
90 #ifdef DDB
91 	.lc_ddb_show = db_show_rm,
92 #endif
93 	.lc_lock = lock_rm,
94 	.lc_unlock = unlock_rm,
95 #ifdef KDTRACE_HOOKS
96 	.lc_owner = owner_rm,
97 #endif
98 };
99 
100 struct lock_class lock_class_rm_sleepable = {
101 	.lc_name = "sleepable rm",
102 	.lc_flags = LC_SLEEPLOCK | LC_SLEEPABLE | LC_RECURSABLE,
103 	.lc_assert = assert_rm,
104 #ifdef DDB
105 	.lc_ddb_show = db_show_rm,
106 #endif
107 	.lc_lock = lock_rm,
108 	.lc_unlock = unlock_rm,
109 #ifdef KDTRACE_HOOKS
110 	.lc_owner = owner_rm,
111 #endif
112 };
113 
114 static void
115 assert_rm(const struct lock_object *lock, int what)
116 {
117 
118 	rm_assert((const struct rmlock *)lock, what);
119 }
120 
121 /*
122  * These do not support read locks because it would be hard to make
123  * the tracker work correctly with the current lock_class API as you
124  * would need to have the tracker pointer available when calling
125  * rm_rlock() in lock_rm().
126  */
127 static void
128 lock_rm(struct lock_object *lock, int how)
129 {
130 	struct rmlock *rm;
131 
132 	rm = (struct rmlock *)lock;
133 	if (how)
134 		rm_wlock(rm);
135 #ifdef INVARIANTS
136 	else
137 		panic("lock_rm called in read mode");
138 #endif
139 }
140 
141 static int
142 unlock_rm(struct lock_object *lock)
143 {
144 	struct rmlock *rm;
145 
146 	rm = (struct rmlock *)lock;
147 	rm_wunlock(rm);
148 	return (1);
149 }
150 
151 #ifdef KDTRACE_HOOKS
152 static int
153 owner_rm(const struct lock_object *lock, struct thread **owner)
154 {
155 	const struct rmlock *rm;
156 	struct lock_class *lc;
157 
158 	rm = (const struct rmlock *)lock;
159 	lc = LOCK_CLASS(&rm->rm_wlock_object);
160 	return (lc->lc_owner(&rm->rm_wlock_object, owner));
161 }
162 #endif
163 
164 static struct mtx rm_spinlock;
165 
166 MTX_SYSINIT(rm_spinlock, &rm_spinlock, "rm_spinlock", MTX_SPIN);
167 
168 /*
169  * Add or remove tracker from per-cpu list.
170  *
171  * The per-cpu list can be traversed at any time in forward direction from an
172  * interrupt on the *local* cpu.
173  */
174 static void inline
175 rm_tracker_add(struct pcpu *pc, struct rm_priotracker *tracker)
176 {
177 	struct rm_queue *next;
178 
179 	/* Initialize all tracker pointers */
180 	tracker->rmp_cpuQueue.rmq_prev = &pc->pc_rm_queue;
181 	next = pc->pc_rm_queue.rmq_next;
182 	tracker->rmp_cpuQueue.rmq_next = next;
183 
184 	/* rmq_prev is not used during froward traversal. */
185 	next->rmq_prev = &tracker->rmp_cpuQueue;
186 
187 	/* Update pointer to first element. */
188 	pc->pc_rm_queue.rmq_next = &tracker->rmp_cpuQueue;
189 }
190 
191 /*
192  * Return a count of the number of trackers the thread 'td' already
193  * has on this CPU for the lock 'rm'.
194  */
195 static int
196 rm_trackers_present(const struct pcpu *pc, const struct rmlock *rm,
197     const struct thread *td)
198 {
199 	struct rm_queue *queue;
200 	struct rm_priotracker *tracker;
201 	int count;
202 
203 	count = 0;
204 	for (queue = pc->pc_rm_queue.rmq_next; queue != &pc->pc_rm_queue;
205 	    queue = queue->rmq_next) {
206 		tracker = (struct rm_priotracker *)queue;
207 		if ((tracker->rmp_rmlock == rm) && (tracker->rmp_thread == td))
208 			count++;
209 	}
210 	return (count);
211 }
212 
213 static void inline
214 rm_tracker_remove(struct pcpu *pc, struct rm_priotracker *tracker)
215 {
216 	struct rm_queue *next, *prev;
217 
218 	next = tracker->rmp_cpuQueue.rmq_next;
219 	prev = tracker->rmp_cpuQueue.rmq_prev;
220 
221 	/* Not used during forward traversal. */
222 	next->rmq_prev = prev;
223 
224 	/* Remove from list. */
225 	prev->rmq_next = next;
226 }
227 
228 static void
229 rm_cleanIPI(void *arg)
230 {
231 	struct pcpu *pc;
232 	struct rmlock *rm = arg;
233 	struct rm_priotracker *tracker;
234 	struct rm_queue *queue;
235 	pc = pcpu_find(curcpu);
236 
237 	for (queue = pc->pc_rm_queue.rmq_next; queue != &pc->pc_rm_queue;
238 	    queue = queue->rmq_next) {
239 		tracker = (struct rm_priotracker *)queue;
240 		if (tracker->rmp_rmlock == rm && tracker->rmp_flags == 0) {
241 			tracker->rmp_flags = RMPF_ONQUEUE;
242 			mtx_lock_spin(&rm_spinlock);
243 			LIST_INSERT_HEAD(&rm->rm_activeReaders, tracker,
244 			    rmp_qentry);
245 			mtx_unlock_spin(&rm_spinlock);
246 		}
247 	}
248 }
249 
250 void
251 rm_init_flags(struct rmlock *rm, const char *name, int opts)
252 {
253 	struct lock_class *lc;
254 	int liflags;
255 
256 	liflags = 0;
257 	if (!(opts & RM_NOWITNESS))
258 		liflags |= LO_WITNESS;
259 	if (opts & RM_RECURSE)
260 		liflags |= LO_RECURSABLE;
261 	rm->rm_writecpus = all_cpus;
262 	LIST_INIT(&rm->rm_activeReaders);
263 	if (opts & RM_SLEEPABLE) {
264 		liflags |= LO_SLEEPABLE;
265 		lc = &lock_class_rm_sleepable;
266 		sx_init_flags(&rm->rm_lock_sx, "rmlock_sx", SX_NOWITNESS);
267 	} else {
268 		lc = &lock_class_rm;
269 		mtx_init(&rm->rm_lock_mtx, name, "rmlock_mtx", MTX_NOWITNESS);
270 	}
271 	lock_init(&rm->lock_object, lc, name, NULL, liflags);
272 }
273 
274 void
275 rm_init(struct rmlock *rm, const char *name)
276 {
277 
278 	rm_init_flags(rm, name, 0);
279 }
280 
281 void
282 rm_destroy(struct rmlock *rm)
283 {
284 
285 	rm_assert(rm, RA_UNLOCKED);
286 	LIST_FIRST(&rm->rm_activeReaders) = RM_DESTROYED;
287 	if (rm->lock_object.lo_flags & LO_SLEEPABLE)
288 		sx_destroy(&rm->rm_lock_sx);
289 	else
290 		mtx_destroy(&rm->rm_lock_mtx);
291 	lock_destroy(&rm->lock_object);
292 }
293 
294 int
295 rm_wowned(const struct rmlock *rm)
296 {
297 
298 	if (rm->lock_object.lo_flags & LO_SLEEPABLE)
299 		return (sx_xlocked(&rm->rm_lock_sx));
300 	else
301 		return (mtx_owned(&rm->rm_lock_mtx));
302 }
303 
304 void
305 rm_sysinit(void *arg)
306 {
307 	struct rm_args *args = arg;
308 
309 	rm_init(args->ra_rm, args->ra_desc);
310 }
311 
312 void
313 rm_sysinit_flags(void *arg)
314 {
315 	struct rm_args_flags *args = arg;
316 
317 	rm_init_flags(args->ra_rm, args->ra_desc, args->ra_opts);
318 }
319 
320 static int
321 _rm_rlock_hard(struct rmlock *rm, struct rm_priotracker *tracker, int trylock)
322 {
323 	struct pcpu *pc;
324 
325 	critical_enter();
326 	pc = pcpu_find(curcpu);
327 
328 	/* Check if we just need to do a proper critical_exit. */
329 	if (!CPU_ISSET(pc->pc_cpuid, &rm->rm_writecpus)) {
330 		critical_exit();
331 		return (1);
332 	}
333 
334 	/* Remove our tracker from the per-cpu list. */
335 	rm_tracker_remove(pc, tracker);
336 
337 	/* Check to see if the IPI granted us the lock after all. */
338 	if (tracker->rmp_flags) {
339 		/* Just add back tracker - we hold the lock. */
340 		rm_tracker_add(pc, tracker);
341 		critical_exit();
342 		return (1);
343 	}
344 
345 	/*
346 	 * We allow readers to aquire a lock even if a writer is blocked if
347 	 * the lock is recursive and the reader already holds the lock.
348 	 */
349 	if ((rm->lock_object.lo_flags & LO_RECURSABLE) != 0) {
350 		/*
351 		 * Just grant the lock if this thread already has a tracker
352 		 * for this lock on the per-cpu queue.
353 		 */
354 		if (rm_trackers_present(pc, rm, curthread) != 0) {
355 			mtx_lock_spin(&rm_spinlock);
356 			LIST_INSERT_HEAD(&rm->rm_activeReaders, tracker,
357 			    rmp_qentry);
358 			tracker->rmp_flags = RMPF_ONQUEUE;
359 			mtx_unlock_spin(&rm_spinlock);
360 			rm_tracker_add(pc, tracker);
361 			critical_exit();
362 			return (1);
363 		}
364 	}
365 
366 	sched_unpin();
367 	critical_exit();
368 
369 	if (trylock) {
370 		if (rm->lock_object.lo_flags & LO_SLEEPABLE) {
371 			if (!sx_try_xlock(&rm->rm_lock_sx))
372 				return (0);
373 		} else {
374 			if (!mtx_trylock(&rm->rm_lock_mtx))
375 				return (0);
376 		}
377 	} else {
378 		if (rm->lock_object.lo_flags & LO_SLEEPABLE)
379 			sx_xlock(&rm->rm_lock_sx);
380 		else
381 			mtx_lock(&rm->rm_lock_mtx);
382 	}
383 
384 	critical_enter();
385 	pc = pcpu_find(curcpu);
386 	CPU_CLR(pc->pc_cpuid, &rm->rm_writecpus);
387 	rm_tracker_add(pc, tracker);
388 	sched_pin();
389 	critical_exit();
390 
391 	if (rm->lock_object.lo_flags & LO_SLEEPABLE)
392 		sx_xunlock(&rm->rm_lock_sx);
393 	else
394 		mtx_unlock(&rm->rm_lock_mtx);
395 
396 	return (1);
397 }
398 
399 int
400 _rm_rlock(struct rmlock *rm, struct rm_priotracker *tracker, int trylock)
401 {
402 	struct thread *td = curthread;
403 	struct pcpu *pc;
404 
405 	if (SCHEDULER_STOPPED())
406 		return (1);
407 
408 	tracker->rmp_flags  = 0;
409 	tracker->rmp_thread = td;
410 	tracker->rmp_rmlock = rm;
411 
412 	if (rm->lock_object.lo_flags & LO_SLEEPABLE)
413 		THREAD_NO_SLEEPING();
414 
415 	td->td_critnest++;	/* critical_enter(); */
416 
417 	__compiler_membar();
418 
419 	pc = cpuid_to_pcpu[td->td_oncpu]; /* pcpu_find(td->td_oncpu); */
420 
421 	rm_tracker_add(pc, tracker);
422 
423 	sched_pin();
424 
425 	__compiler_membar();
426 
427 	td->td_critnest--;
428 
429 	/*
430 	 * Fast path to combine two common conditions into a single
431 	 * conditional jump.
432 	 */
433 	if (0 == (td->td_owepreempt |
434 	    CPU_ISSET(pc->pc_cpuid, &rm->rm_writecpus)))
435 		return (1);
436 
437 	/* We do not have a read token and need to acquire one. */
438 	return _rm_rlock_hard(rm, tracker, trylock);
439 }
440 
441 static void
442 _rm_unlock_hard(struct thread *td,struct rm_priotracker *tracker)
443 {
444 
445 	if (td->td_owepreempt) {
446 		td->td_critnest++;
447 		critical_exit();
448 	}
449 
450 	if (!tracker->rmp_flags)
451 		return;
452 
453 	mtx_lock_spin(&rm_spinlock);
454 	LIST_REMOVE(tracker, rmp_qentry);
455 
456 	if (tracker->rmp_flags & RMPF_SIGNAL) {
457 		struct rmlock *rm;
458 		struct turnstile *ts;
459 
460 		rm = tracker->rmp_rmlock;
461 
462 		turnstile_chain_lock(&rm->lock_object);
463 		mtx_unlock_spin(&rm_spinlock);
464 
465 		ts = turnstile_lookup(&rm->lock_object);
466 
467 		turnstile_signal(ts, TS_EXCLUSIVE_QUEUE);
468 		turnstile_unpend(ts, TS_EXCLUSIVE_LOCK);
469 		turnstile_chain_unlock(&rm->lock_object);
470 	} else
471 		mtx_unlock_spin(&rm_spinlock);
472 }
473 
474 void
475 _rm_runlock(struct rmlock *rm, struct rm_priotracker *tracker)
476 {
477 	struct pcpu *pc;
478 	struct thread *td = tracker->rmp_thread;
479 
480 	if (SCHEDULER_STOPPED())
481 		return;
482 
483 	td->td_critnest++;	/* critical_enter(); */
484 	pc = cpuid_to_pcpu[td->td_oncpu]; /* pcpu_find(td->td_oncpu); */
485 	rm_tracker_remove(pc, tracker);
486 	td->td_critnest--;
487 	sched_unpin();
488 
489 	if (rm->lock_object.lo_flags & LO_SLEEPABLE)
490 		THREAD_SLEEPING_OK();
491 
492 	if (0 == (td->td_owepreempt | tracker->rmp_flags))
493 		return;
494 
495 	_rm_unlock_hard(td, tracker);
496 }
497 
498 void
499 _rm_wlock(struct rmlock *rm)
500 {
501 	struct rm_priotracker *prio;
502 	struct turnstile *ts;
503 	cpuset_t readcpus;
504 
505 	if (SCHEDULER_STOPPED())
506 		return;
507 
508 	if (rm->lock_object.lo_flags & LO_SLEEPABLE)
509 		sx_xlock(&rm->rm_lock_sx);
510 	else
511 		mtx_lock(&rm->rm_lock_mtx);
512 
513 	if (CPU_CMP(&rm->rm_writecpus, &all_cpus)) {
514 		/* Get all read tokens back */
515 		readcpus = all_cpus;
516 		CPU_NAND(&readcpus, &rm->rm_writecpus);
517 		rm->rm_writecpus = all_cpus;
518 
519 		/*
520 		 * Assumes rm->rm_writecpus update is visible on other CPUs
521 		 * before rm_cleanIPI is called.
522 		 */
523 #ifdef SMP
524 		smp_rendezvous_cpus(readcpus,
525 		    smp_no_rendevous_barrier,
526 		    rm_cleanIPI,
527 		    smp_no_rendevous_barrier,
528 		    rm);
529 
530 #else
531 		rm_cleanIPI(rm);
532 #endif
533 
534 		mtx_lock_spin(&rm_spinlock);
535 		while ((prio = LIST_FIRST(&rm->rm_activeReaders)) != NULL) {
536 			ts = turnstile_trywait(&rm->lock_object);
537 			prio->rmp_flags = RMPF_ONQUEUE | RMPF_SIGNAL;
538 			mtx_unlock_spin(&rm_spinlock);
539 			turnstile_wait(ts, prio->rmp_thread,
540 			    TS_EXCLUSIVE_QUEUE);
541 			mtx_lock_spin(&rm_spinlock);
542 		}
543 		mtx_unlock_spin(&rm_spinlock);
544 	}
545 }
546 
547 void
548 _rm_wunlock(struct rmlock *rm)
549 {
550 
551 	if (rm->lock_object.lo_flags & LO_SLEEPABLE)
552 		sx_xunlock(&rm->rm_lock_sx);
553 	else
554 		mtx_unlock(&rm->rm_lock_mtx);
555 }
556 
557 #ifdef LOCK_DEBUG
558 
559 void
560 _rm_wlock_debug(struct rmlock *rm, const char *file, int line)
561 {
562 
563 	if (SCHEDULER_STOPPED())
564 		return;
565 
566 	KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
567 	    ("rm_wlock() by idle thread %p on rmlock %s @ %s:%d",
568 	    curthread, rm->lock_object.lo_name, file, line));
569 	KASSERT(!rm_destroyed(rm),
570 	    ("rm_wlock() of destroyed rmlock @ %s:%d", file, line));
571 	_rm_assert(rm, RA_UNLOCKED, file, line);
572 
573 	WITNESS_CHECKORDER(&rm->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE,
574 	    file, line, NULL);
575 
576 	_rm_wlock(rm);
577 
578 	LOCK_LOG_LOCK("RMWLOCK", &rm->lock_object, 0, 0, file, line);
579 
580 	WITNESS_LOCK(&rm->lock_object, LOP_EXCLUSIVE, file, line);
581 
582 	curthread->td_locks++;
583 
584 }
585 
586 void
587 _rm_wunlock_debug(struct rmlock *rm, const char *file, int line)
588 {
589 
590 	if (SCHEDULER_STOPPED())
591 		return;
592 
593 	KASSERT(!rm_destroyed(rm),
594 	    ("rm_wunlock() of destroyed rmlock @ %s:%d", file, line));
595 	_rm_assert(rm, RA_WLOCKED, file, line);
596 	WITNESS_UNLOCK(&rm->lock_object, LOP_EXCLUSIVE, file, line);
597 	LOCK_LOG_LOCK("RMWUNLOCK", &rm->lock_object, 0, 0, file, line);
598 	_rm_wunlock(rm);
599 	curthread->td_locks--;
600 }
601 
602 int
603 _rm_rlock_debug(struct rmlock *rm, struct rm_priotracker *tracker,
604     int trylock, const char *file, int line)
605 {
606 
607 	if (SCHEDULER_STOPPED())
608 		return (1);
609 
610 #ifdef INVARIANTS
611 	if (!(rm->lock_object.lo_flags & LO_RECURSABLE) && !trylock) {
612 		critical_enter();
613 		KASSERT(rm_trackers_present(pcpu_find(curcpu), rm,
614 		    curthread) == 0,
615 		    ("rm_rlock: recursed on non-recursive rmlock %s @ %s:%d\n",
616 		    rm->lock_object.lo_name, file, line));
617 		critical_exit();
618 	}
619 #endif
620 	KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
621 	    ("rm_rlock() by idle thread %p on rmlock %s @ %s:%d",
622 	    curthread, rm->lock_object.lo_name, file, line));
623 	KASSERT(!rm_destroyed(rm),
624 	    ("rm_rlock() of destroyed rmlock @ %s:%d", file, line));
625 	if (!trylock) {
626 		KASSERT(!rm_wowned(rm),
627 		    ("rm_rlock: wlock already held for %s @ %s:%d",
628 		    rm->lock_object.lo_name, file, line));
629 		WITNESS_CHECKORDER(&rm->lock_object, LOP_NEWORDER, file, line,
630 		    NULL);
631 	}
632 
633 	if (_rm_rlock(rm, tracker, trylock)) {
634 		if (trylock)
635 			LOCK_LOG_TRY("RMRLOCK", &rm->lock_object, 0, 1, file,
636 			    line);
637 		else
638 			LOCK_LOG_LOCK("RMRLOCK", &rm->lock_object, 0, 0, file,
639 			    line);
640 		WITNESS_LOCK(&rm->lock_object, 0, file, line);
641 
642 		curthread->td_locks++;
643 
644 		return (1);
645 	} else if (trylock)
646 		LOCK_LOG_TRY("RMRLOCK", &rm->lock_object, 0, 0, file, line);
647 
648 	return (0);
649 }
650 
651 void
652 _rm_runlock_debug(struct rmlock *rm, struct rm_priotracker *tracker,
653     const char *file, int line)
654 {
655 
656 	if (SCHEDULER_STOPPED())
657 		return;
658 
659 	KASSERT(!rm_destroyed(rm),
660 	    ("rm_runlock() of destroyed rmlock @ %s:%d", file, line));
661 	_rm_assert(rm, RA_RLOCKED, file, line);
662 	WITNESS_UNLOCK(&rm->lock_object, 0, file, line);
663 	LOCK_LOG_LOCK("RMRUNLOCK", &rm->lock_object, 0, 0, file, line);
664 	_rm_runlock(rm, tracker);
665 	curthread->td_locks--;
666 }
667 
668 #else
669 
670 /*
671  * Just strip out file and line arguments if no lock debugging is enabled in
672  * the kernel - we are called from a kernel module.
673  */
674 void
675 _rm_wlock_debug(struct rmlock *rm, const char *file, int line)
676 {
677 
678 	_rm_wlock(rm);
679 }
680 
681 void
682 _rm_wunlock_debug(struct rmlock *rm, const char *file, int line)
683 {
684 
685 	_rm_wunlock(rm);
686 }
687 
688 int
689 _rm_rlock_debug(struct rmlock *rm, struct rm_priotracker *tracker,
690     int trylock, const char *file, int line)
691 {
692 
693 	return _rm_rlock(rm, tracker, trylock);
694 }
695 
696 void
697 _rm_runlock_debug(struct rmlock *rm, struct rm_priotracker *tracker,
698     const char *file, int line)
699 {
700 
701 	_rm_runlock(rm, tracker);
702 }
703 
704 #endif
705 
706 #ifdef INVARIANT_SUPPORT
707 #ifndef INVARIANTS
708 #undef _rm_assert
709 #endif
710 
711 /*
712  * Note that this does not need to use witness_assert() for read lock
713  * assertions since an exact count of read locks held by this thread
714  * is computable.
715  */
716 void
717 _rm_assert(const struct rmlock *rm, int what, const char *file, int line)
718 {
719 	int count;
720 
721 	if (panicstr != NULL)
722 		return;
723 	switch (what) {
724 	case RA_LOCKED:
725 	case RA_LOCKED | RA_RECURSED:
726 	case RA_LOCKED | RA_NOTRECURSED:
727 	case RA_RLOCKED:
728 	case RA_RLOCKED | RA_RECURSED:
729 	case RA_RLOCKED | RA_NOTRECURSED:
730 		/*
731 		 * Handle the write-locked case.  Unlike other
732 		 * primitives, writers can never recurse.
733 		 */
734 		if (rm_wowned(rm)) {
735 			if (what & RA_RLOCKED)
736 				panic("Lock %s exclusively locked @ %s:%d\n",
737 				    rm->lock_object.lo_name, file, line);
738 			if (what & RA_RECURSED)
739 				panic("Lock %s not recursed @ %s:%d\n",
740 				    rm->lock_object.lo_name, file, line);
741 			break;
742 		}
743 
744 		critical_enter();
745 		count = rm_trackers_present(pcpu_find(curcpu), rm, curthread);
746 		critical_exit();
747 
748 		if (count == 0)
749 			panic("Lock %s not %slocked @ %s:%d\n",
750 			    rm->lock_object.lo_name, (what & RA_RLOCKED) ?
751 			    "read " : "", file, line);
752 		if (count > 1) {
753 			if (what & RA_NOTRECURSED)
754 				panic("Lock %s recursed @ %s:%d\n",
755 				    rm->lock_object.lo_name, file, line);
756 		} else if (what & RA_RECURSED)
757 			panic("Lock %s not recursed @ %s:%d\n",
758 			    rm->lock_object.lo_name, file, line);
759 		break;
760 	case RA_WLOCKED:
761 		if (!rm_wowned(rm))
762 			panic("Lock %s not exclusively locked @ %s:%d\n",
763 			    rm->lock_object.lo_name, file, line);
764 		break;
765 	case RA_UNLOCKED:
766 		if (rm_wowned(rm))
767 			panic("Lock %s exclusively locked @ %s:%d\n",
768 			    rm->lock_object.lo_name, file, line);
769 
770 		critical_enter();
771 		count = rm_trackers_present(pcpu_find(curcpu), rm, curthread);
772 		critical_exit();
773 
774 		if (count != 0)
775 			panic("Lock %s read locked @ %s:%d\n",
776 			    rm->lock_object.lo_name, file, line);
777 		break;
778 	default:
779 		panic("Unknown rm lock assertion: %d @ %s:%d", what, file,
780 		    line);
781 	}
782 }
783 #endif /* INVARIANT_SUPPORT */
784 
785 #ifdef DDB
786 static void
787 print_tracker(struct rm_priotracker *tr)
788 {
789 	struct thread *td;
790 
791 	td = tr->rmp_thread;
792 	db_printf("   thread %p (tid %d, pid %d, \"%s\") {", td, td->td_tid,
793 	    td->td_proc->p_pid, td->td_name);
794 	if (tr->rmp_flags & RMPF_ONQUEUE) {
795 		db_printf("ONQUEUE");
796 		if (tr->rmp_flags & RMPF_SIGNAL)
797 			db_printf(",SIGNAL");
798 	} else
799 		db_printf("0");
800 	db_printf("}\n");
801 }
802 
803 static void
804 db_show_rm(const struct lock_object *lock)
805 {
806 	struct rm_priotracker *tr;
807 	struct rm_queue *queue;
808 	const struct rmlock *rm;
809 	struct lock_class *lc;
810 	struct pcpu *pc;
811 
812 	rm = (const struct rmlock *)lock;
813 	db_printf(" writecpus: ");
814 	ddb_display_cpuset(__DEQUALIFY(const cpuset_t *, &rm->rm_writecpus));
815 	db_printf("\n");
816 	db_printf(" per-CPU readers:\n");
817 	STAILQ_FOREACH(pc, &cpuhead, pc_allcpu)
818 		for (queue = pc->pc_rm_queue.rmq_next;
819 		    queue != &pc->pc_rm_queue; queue = queue->rmq_next) {
820 			tr = (struct rm_priotracker *)queue;
821 			if (tr->rmp_rmlock == rm)
822 				print_tracker(tr);
823 		}
824 	db_printf(" active readers:\n");
825 	LIST_FOREACH(tr, &rm->rm_activeReaders, rmp_qentry)
826 		print_tracker(tr);
827 	lc = LOCK_CLASS(&rm->rm_wlock_object);
828 	db_printf("Backing write-lock (%s):\n", lc->lc_name);
829 	lc->lc_ddb_show(&rm->rm_wlock_object);
830 }
831 #endif
832