xref: /freebsd/sys/kern/kern_mutex.c (revision 94942af266ac119ede0ca836f9aa5a5ac0582938)
1 /*-
2  * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  * 3. Berkeley Software Design Inc's name may not be used to endorse or
13  *    promote products derived from this software without specific prior
14  *    written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  *	from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $
29  *	and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $
30  */
31 
32 /*
33  * Machine independent bits of mutex implementation.
34  */
35 
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
38 
39 #include "opt_adaptive_mutexes.h"
40 #include "opt_ddb.h"
41 #include "opt_global.h"
42 #include "opt_mutex_wake_all.h"
43 #include "opt_sched.h"
44 
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/bus.h>
48 #include <sys/conf.h>
49 #include <sys/kdb.h>
50 #include <sys/kernel.h>
51 #include <sys/ktr.h>
52 #include <sys/lock.h>
53 #include <sys/malloc.h>
54 #include <sys/mutex.h>
55 #include <sys/proc.h>
56 #include <sys/resourcevar.h>
57 #include <sys/sched.h>
58 #include <sys/sbuf.h>
59 #include <sys/sysctl.h>
60 #include <sys/turnstile.h>
61 #include <sys/vmmeter.h>
62 #include <sys/lock_profile.h>
63 
64 #include <machine/atomic.h>
65 #include <machine/bus.h>
66 #include <machine/cpu.h>
67 
68 #include <ddb/ddb.h>
69 
70 #include <fs/devfs/devfs_int.h>
71 
72 #include <vm/vm.h>
73 #include <vm/vm_extern.h>
74 
75 /*
76  * Force MUTEX_WAKE_ALL for now.
77  * single thread wakeup needs fixes to avoid race conditions with
78  * priority inheritance.
79  */
80 #ifndef MUTEX_WAKE_ALL
81 #define MUTEX_WAKE_ALL
82 #endif
83 
84 #if defined(SMP) && !defined(NO_ADAPTIVE_MUTEXES)
85 #define	ADAPTIVE_MUTEXES
86 #endif
87 
88 /*
89  * Internal utility macros.
90  */
91 #define mtx_unowned(m)	((m)->mtx_lock == MTX_UNOWNED)
92 
93 #define	mtx_destroyed(m) ((m)->mtx_lock == MTX_DESTROYED)
94 
95 #define	mtx_owner(m)	((struct thread *)((m)->mtx_lock & ~MTX_FLAGMASK))
96 
97 #ifdef DDB
98 static void	db_show_mtx(struct lock_object *lock);
99 #endif
100 static void	lock_mtx(struct lock_object *lock, int how);
101 static void	lock_spin(struct lock_object *lock, int how);
102 static int	unlock_mtx(struct lock_object *lock);
103 static int	unlock_spin(struct lock_object *lock);
104 
105 /*
106  * Lock classes for sleep and spin mutexes.
107  */
108 struct lock_class lock_class_mtx_sleep = {
109 	.lc_name = "sleep mutex",
110 	.lc_flags = LC_SLEEPLOCK | LC_RECURSABLE,
111 #ifdef DDB
112 	.lc_ddb_show = db_show_mtx,
113 #endif
114 	.lc_lock = lock_mtx,
115 	.lc_unlock = unlock_mtx,
116 };
117 struct lock_class lock_class_mtx_spin = {
118 	.lc_name = "spin mutex",
119 	.lc_flags = LC_SPINLOCK | LC_RECURSABLE,
120 #ifdef DDB
121 	.lc_ddb_show = db_show_mtx,
122 #endif
123 	.lc_lock = lock_spin,
124 	.lc_unlock = unlock_spin,
125 };
126 
127 /*
128  * System-wide mutexes
129  */
130 struct mtx sched_lock;
131 struct mtx Giant;
132 
133 #ifdef LOCK_PROFILING
134 static inline void lock_profile_init(void)
135 {
136         int i;
137         /* Initialize the mutex profiling locks */
138         for (i = 0; i < LPROF_LOCK_SIZE; i++) {
139                 mtx_init(&lprof_locks[i], "mprof lock",
140                     NULL, MTX_SPIN|MTX_QUIET|MTX_NOPROFILE);
141         }
142 }
143 #else
144 static inline void lock_profile_init(void) {;}
145 #endif
146 
147 void
148 lock_mtx(struct lock_object *lock, int how)
149 {
150 
151 	mtx_lock((struct mtx *)lock);
152 }
153 
154 void
155 lock_spin(struct lock_object *lock, int how)
156 {
157 
158 	panic("spin locks can only use msleep_spin");
159 }
160 
161 int
162 unlock_mtx(struct lock_object *lock)
163 {
164 	struct mtx *m;
165 
166 	m = (struct mtx *)lock;
167 	mtx_assert(m, MA_OWNED | MA_NOTRECURSED);
168 	mtx_unlock(m);
169 	return (0);
170 }
171 
172 int
173 unlock_spin(struct lock_object *lock)
174 {
175 
176 	panic("spin locks can only use msleep_spin");
177 }
178 
179 /*
180  * Function versions of the inlined __mtx_* macros.  These are used by
181  * modules and can also be called from assembly language if needed.
182  */
183 void
184 _mtx_lock_flags(struct mtx *m, int opts, const char *file, int line)
185 {
186 
187 	MPASS(curthread != NULL);
188 	KASSERT(m->mtx_lock != MTX_DESTROYED,
189 	    ("mtx_lock() of destroyed mutex @ %s:%d", file, line));
190 	KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep,
191 	    ("mtx_lock() of spin mutex %s @ %s:%d", m->lock_object.lo_name,
192 	    file, line));
193 	WITNESS_CHECKORDER(&m->lock_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE,
194 	    file, line);
195 
196 	_get_sleep_lock(m, curthread, opts, file, line);
197 	LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file,
198 	    line);
199 	WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
200 	curthread->td_locks++;
201 }
202 
203 void
204 _mtx_unlock_flags(struct mtx *m, int opts, const char *file, int line)
205 {
206 	MPASS(curthread != NULL);
207 	KASSERT(m->mtx_lock != MTX_DESTROYED,
208 	    ("mtx_unlock() of destroyed mutex @ %s:%d", file, line));
209 	KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep,
210 	    ("mtx_unlock() of spin mutex %s @ %s:%d", m->lock_object.lo_name,
211 	    file, line));
212 	curthread->td_locks--;
213 	WITNESS_UNLOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
214 	LOCK_LOG_LOCK("UNLOCK", &m->lock_object, opts, m->mtx_recurse, file,
215 	    line);
216 	mtx_assert(m, MA_OWNED);
217 
218 	if (m->mtx_recurse == 0)
219 		lock_profile_release_lock(&m->lock_object);
220 	_rel_sleep_lock(m, curthread, opts, file, line);
221 }
222 
223 void
224 _mtx_lock_spin_flags(struct mtx *m, int opts, const char *file, int line)
225 {
226 
227 	MPASS(curthread != NULL);
228 	KASSERT(m->mtx_lock != MTX_DESTROYED,
229 	    ("mtx_lock_spin() of destroyed mutex @ %s:%d", file, line));
230 	KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
231 	    ("mtx_lock_spin() of sleep mutex %s @ %s:%d",
232 	    m->lock_object.lo_name, file, line));
233 	WITNESS_CHECKORDER(&m->lock_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE,
234 	    file, line);
235 	_get_spin_lock(m, curthread, opts, file, line);
236 	LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file,
237 	    line);
238 	WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
239 }
240 
241 void
242 _mtx_unlock_spin_flags(struct mtx *m, int opts, const char *file, int line)
243 {
244 
245 	MPASS(curthread != NULL);
246 	KASSERT(m->mtx_lock != MTX_DESTROYED,
247 	    ("mtx_unlock_spin() of destroyed mutex @ %s:%d", file, line));
248 	KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
249 	    ("mtx_unlock_spin() of sleep mutex %s @ %s:%d",
250 	    m->lock_object.lo_name, file, line));
251 	WITNESS_UNLOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
252 	LOCK_LOG_LOCK("UNLOCK", &m->lock_object, opts, m->mtx_recurse, file,
253 	    line);
254 	mtx_assert(m, MA_OWNED);
255 
256 	_rel_spin_lock(m);
257 }
258 
259 /*
260  * The important part of mtx_trylock{,_flags}()
261  * Tries to acquire lock `m.'  If this function is called on a mutex that
262  * is already owned, it will recursively acquire the lock.
263  */
264 int
265 _mtx_trylock(struct mtx *m, int opts, const char *file, int line)
266 {
267 	int rval, contested = 0;
268 	uint64_t waittime = 0;
269 
270 	MPASS(curthread != NULL);
271 	KASSERT(m->mtx_lock != MTX_DESTROYED,
272 	    ("mtx_trylock() of destroyed mutex @ %s:%d", file, line));
273 	KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep,
274 	    ("mtx_trylock() of spin mutex %s @ %s:%d", m->lock_object.lo_name,
275 	    file, line));
276 
277 	if (mtx_owned(m) && (m->lock_object.lo_flags & LO_RECURSABLE) != 0) {
278 		m->mtx_recurse++;
279 		atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
280 		rval = 1;
281 	} else
282 		rval = _obtain_lock(m, (uintptr_t)curthread);
283 
284 	LOCK_LOG_TRY("LOCK", &m->lock_object, opts, rval, file, line);
285 	if (rval) {
286 		WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE | LOP_TRYLOCK,
287 		    file, line);
288 		curthread->td_locks++;
289 		if (m->mtx_recurse == 0)
290 			lock_profile_obtain_lock_success(&m->lock_object, contested,
291 			    waittime, file, line);
292 
293 	}
294 
295 	return (rval);
296 }
297 
298 /*
299  * _mtx_lock_sleep: the tougher part of acquiring an MTX_DEF lock.
300  *
301  * We call this if the lock is either contested (i.e. we need to go to
302  * sleep waiting for it), or if we need to recurse on it.
303  */
304 void
305 _mtx_lock_sleep(struct mtx *m, uintptr_t tid, int opts, const char *file,
306     int line)
307 {
308 #ifdef ADAPTIVE_MUTEXES
309 	volatile struct thread *owner;
310 #endif
311 #ifdef KTR
312 	int cont_logged = 0;
313 #endif
314 	int contested = 0;
315 	uint64_t waittime = 0;
316 	uintptr_t v;
317 
318 	if (mtx_owned(m)) {
319 		KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0,
320 	    ("_mtx_lock_sleep: recursed on non-recursive mutex %s @ %s:%d\n",
321 		    m->lock_object.lo_name, file, line));
322 		m->mtx_recurse++;
323 		atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
324 		if (LOCK_LOG_TEST(&m->lock_object, opts))
325 			CTR1(KTR_LOCK, "_mtx_lock_sleep: %p recursing", m);
326 		return;
327 	}
328 
329 	lock_profile_obtain_lock_failed(&m->lock_object,
330 		    &contested, &waittime);
331 	if (LOCK_LOG_TEST(&m->lock_object, opts))
332 		CTR4(KTR_LOCK,
333 		    "_mtx_lock_sleep: %s contested (lock=%p) at %s:%d",
334 		    m->lock_object.lo_name, (void *)m->mtx_lock, file, line);
335 
336 	while (!_obtain_lock(m, tid)) {
337 		turnstile_lock(&m->lock_object);
338 		v = m->mtx_lock;
339 
340 		/*
341 		 * Check if the lock has been released while spinning for
342 		 * the turnstile chain lock.
343 		 */
344 		if (v == MTX_UNOWNED) {
345 			turnstile_release(&m->lock_object);
346 			cpu_spinwait();
347 			continue;
348 		}
349 
350 #ifdef MUTEX_WAKE_ALL
351 		MPASS(v != MTX_CONTESTED);
352 #else
353 		/*
354 		 * The mutex was marked contested on release. This means that
355 		 * there are other threads blocked on it.  Grab ownership of
356 		 * it and propagate its priority to the current thread if
357 		 * necessary.
358 		 */
359 		if (v == MTX_CONTESTED) {
360 			m->mtx_lock = tid | MTX_CONTESTED;
361 			turnstile_claim(&m->lock_object);
362 			break;
363 		}
364 #endif
365 
366 		/*
367 		 * If the mutex isn't already contested and a failure occurs
368 		 * setting the contested bit, the mutex was either released
369 		 * or the state of the MTX_RECURSED bit changed.
370 		 */
371 		if ((v & MTX_CONTESTED) == 0 &&
372 		    !atomic_cmpset_ptr(&m->mtx_lock, v, v | MTX_CONTESTED)) {
373 			turnstile_release(&m->lock_object);
374 			cpu_spinwait();
375 			continue;
376 		}
377 
378 #ifdef ADAPTIVE_MUTEXES
379 		/*
380 		 * If the current owner of the lock is executing on another
381 		 * CPU, spin instead of blocking.
382 		 */
383 		owner = (struct thread *)(v & ~MTX_FLAGMASK);
384 #ifdef ADAPTIVE_GIANT
385 		if (TD_IS_RUNNING(owner))
386 #else
387 		if (m != &Giant && TD_IS_RUNNING(owner))
388 #endif
389 		{
390 			turnstile_release(&m->lock_object);
391 			while (mtx_owner(m) == owner && TD_IS_RUNNING(owner)) {
392 				cpu_spinwait();
393 			}
394 			continue;
395 		}
396 #endif	/* ADAPTIVE_MUTEXES */
397 
398 		/*
399 		 * We definitely must sleep for this lock.
400 		 */
401 		mtx_assert(m, MA_NOTOWNED);
402 
403 #ifdef KTR
404 		if (!cont_logged) {
405 			CTR6(KTR_CONTENTION,
406 			    "contention: %p at %s:%d wants %s, taken by %s:%d",
407 			    (void *)tid, file, line, m->lock_object.lo_name,
408 			    WITNESS_FILE(&m->lock_object),
409 			    WITNESS_LINE(&m->lock_object));
410 			cont_logged = 1;
411 		}
412 #endif
413 
414 		/*
415 		 * Block on the turnstile.
416 		 */
417 		turnstile_wait(&m->lock_object, mtx_owner(m),
418 		    TS_EXCLUSIVE_QUEUE);
419 	}
420 #ifdef KTR
421 	if (cont_logged) {
422 		CTR4(KTR_CONTENTION,
423 		    "contention end: %s acquired by %p at %s:%d",
424 		    m->lock_object.lo_name, (void *)tid, file, line);
425 	}
426 #endif
427 	lock_profile_obtain_lock_success(&m->lock_object, contested,
428 	    waittime, (file), (line));
429 }
430 
431 #ifdef SMP
432 /*
433  * _mtx_lock_spin: the tougher part of acquiring an MTX_SPIN lock.
434  *
435  * This is only called if we need to actually spin for the lock. Recursion
436  * is handled inline.
437  */
438 void
439 _mtx_lock_spin(struct mtx *m, uintptr_t tid, int opts, const char *file,
440     int line)
441 {
442 	int i = 0, contested = 0;
443 	struct thread *td;
444 	uint64_t waittime = 0;
445 
446 	if (LOCK_LOG_TEST(&m->lock_object, opts))
447 		CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m);
448 
449 	lock_profile_obtain_lock_failed(&m->lock_object, &contested, &waittime);
450 	while (!_obtain_lock(m, tid)) {
451 
452 		/* Give interrupts a chance while we spin. */
453 		spinlock_exit();
454 		while (m->mtx_lock != MTX_UNOWNED) {
455 			if (i++ < 10000000) {
456 				cpu_spinwait();
457 				continue;
458 			}
459 			if (i < 60000000 || kdb_active || panicstr != NULL)
460 				DELAY(1);
461 			else {
462 				td = mtx_owner(m);
463 
464 				/* If the mutex is unlocked, try again. */
465 				if (td == NULL)
466 					continue;
467 				printf(
468 			"spin lock %p (%s) held by %p (tid %d) too long\n",
469 				    m, m->lock_object.lo_name, td, td->td_tid);
470 #ifdef WITNESS
471 				witness_display_spinlock(&m->lock_object, td);
472 #endif
473 				panic("spin lock held too long");
474 			}
475 			cpu_spinwait();
476 		}
477 		spinlock_enter();
478 	}
479 
480 	if (LOCK_LOG_TEST(&m->lock_object, opts))
481 		CTR1(KTR_LOCK, "_mtx_lock_spin: %p spin done", m);
482 
483 	lock_profile_obtain_lock_success(&m->lock_object, contested,
484 	    waittime, (file), (line));
485 
486 }
487 #endif /* SMP */
488 
489 /*
490  * _mtx_unlock_sleep: the tougher part of releasing an MTX_DEF lock.
491  *
492  * We are only called here if the lock is recursed or contested (i.e. we
493  * need to wake up a blocked thread).
494  */
495 void
496 _mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line)
497 {
498 	struct turnstile *ts;
499 #ifndef PREEMPTION
500 	struct thread *td, *td1;
501 #endif
502 
503 	if (mtx_recursed(m)) {
504 		if (--(m->mtx_recurse) == 0)
505 			atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED);
506 		if (LOCK_LOG_TEST(&m->lock_object, opts))
507 			CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p unrecurse", m);
508 		return;
509 	}
510 
511 	turnstile_lock(&m->lock_object);
512 	ts = turnstile_lookup(&m->lock_object);
513 	if (LOCK_LOG_TEST(&m->lock_object, opts))
514 		CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p contested", m);
515 
516 #ifdef ADAPTIVE_MUTEXES
517 	if (ts == NULL) {
518 		_release_lock_quick(m);
519 		if (LOCK_LOG_TEST(&m->lock_object, opts))
520 			CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p no sleepers", m);
521 		turnstile_release(&m->lock_object);
522 		return;
523 	}
524 #else
525 	MPASS(ts != NULL);
526 #endif
527 #ifndef PREEMPTION
528 	/* XXX */
529 	td1 = turnstile_head(ts, TS_EXCLUSIVE_QUEUE);
530 #endif
531 #ifdef MUTEX_WAKE_ALL
532 	turnstile_broadcast(ts, TS_EXCLUSIVE_QUEUE);
533 	_release_lock_quick(m);
534 #else
535 	if (turnstile_signal(ts, TS_EXCLUSIVE_QUEUE)) {
536 		_release_lock_quick(m);
537 		if (LOCK_LOG_TEST(&m->lock_object, opts))
538 			CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p not held", m);
539 	} else {
540 		m->mtx_lock = MTX_CONTESTED;
541 		if (LOCK_LOG_TEST(&m->lock_object, opts))
542 			CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p still contested",
543 			    m);
544 	}
545 #endif
546 	turnstile_unpend(ts, TS_EXCLUSIVE_LOCK);
547 
548 #ifndef PREEMPTION
549 	/*
550 	 * XXX: This is just a hack until preemption is done.  However,
551 	 * once preemption is done we need to either wrap the
552 	 * turnstile_signal() and release of the actual lock in an
553 	 * extra critical section or change the preemption code to
554 	 * always just set a flag and never do instant-preempts.
555 	 */
556 	td = curthread;
557 	if (td->td_critnest > 0 || td1->td_priority >= td->td_priority)
558 		return;
559 
560 	mtx_lock_spin(&sched_lock);
561 	if (!TD_IS_RUNNING(td1)) {
562 #ifdef notyet
563 		if (td->td_ithd != NULL) {
564 			struct ithd *it = td->td_ithd;
565 
566 			if (it->it_interrupted) {
567 				if (LOCK_LOG_TEST(&m->lock_object, opts))
568 					CTR2(KTR_LOCK,
569 				    "_mtx_unlock_sleep: %p interrupted %p",
570 					    it, it->it_interrupted);
571 				intr_thd_fixup(it);
572 			}
573 		}
574 #endif
575 		if (LOCK_LOG_TEST(&m->lock_object, opts))
576 			CTR2(KTR_LOCK,
577 			    "_mtx_unlock_sleep: %p switching out lock=%p", m,
578 			    (void *)m->mtx_lock);
579 
580 		mi_switch(SW_INVOL, NULL);
581 		if (LOCK_LOG_TEST(&m->lock_object, opts))
582 			CTR2(KTR_LOCK, "_mtx_unlock_sleep: %p resuming lock=%p",
583 			    m, (void *)m->mtx_lock);
584 	}
585 	mtx_unlock_spin(&sched_lock);
586 #endif
587 }
588 
589 /*
590  * All the unlocking of MTX_SPIN locks is done inline.
591  * See the _rel_spin_lock() macro for the details.
592  */
593 
594 /*
595  * The backing function for the INVARIANTS-enabled mtx_assert()
596  */
597 #ifdef INVARIANT_SUPPORT
598 void
599 _mtx_assert(struct mtx *m, int what, const char *file, int line)
600 {
601 
602 	if (panicstr != NULL || dumping)
603 		return;
604 	switch (what) {
605 	case MA_OWNED:
606 	case MA_OWNED | MA_RECURSED:
607 	case MA_OWNED | MA_NOTRECURSED:
608 		if (!mtx_owned(m))
609 			panic("mutex %s not owned at %s:%d",
610 			    m->lock_object.lo_name, file, line);
611 		if (mtx_recursed(m)) {
612 			if ((what & MA_NOTRECURSED) != 0)
613 				panic("mutex %s recursed at %s:%d",
614 				    m->lock_object.lo_name, file, line);
615 		} else if ((what & MA_RECURSED) != 0) {
616 			panic("mutex %s unrecursed at %s:%d",
617 			    m->lock_object.lo_name, file, line);
618 		}
619 		break;
620 	case MA_NOTOWNED:
621 		if (mtx_owned(m))
622 			panic("mutex %s owned at %s:%d",
623 			    m->lock_object.lo_name, file, line);
624 		break;
625 	default:
626 		panic("unknown mtx_assert at %s:%d", file, line);
627 	}
628 }
629 #endif
630 
631 /*
632  * The MUTEX_DEBUG-enabled mtx_validate()
633  *
634  * Most of these checks have been moved off into the LO_INITIALIZED flag
635  * maintained by the witness code.
636  */
637 #ifdef MUTEX_DEBUG
638 
639 void	mtx_validate(struct mtx *);
640 
641 void
642 mtx_validate(struct mtx *m)
643 {
644 
645 /*
646  * XXX: When kernacc() does not require Giant we can reenable this check
647  */
648 #ifdef notyet
649 	/*
650 	 * Can't call kernacc() from early init386(), especially when
651 	 * initializing Giant mutex, because some stuff in kernacc()
652 	 * requires Giant itself.
653 	 */
654 	if (!cold)
655 		if (!kernacc((caddr_t)m, sizeof(m),
656 		    VM_PROT_READ | VM_PROT_WRITE))
657 			panic("Can't read and write to mutex %p", m);
658 #endif
659 }
660 #endif
661 
662 /*
663  * General init routine used by the MTX_SYSINIT() macro.
664  */
665 void
666 mtx_sysinit(void *arg)
667 {
668 	struct mtx_args *margs = arg;
669 
670 	mtx_init(margs->ma_mtx, margs->ma_desc, NULL, margs->ma_opts);
671 }
672 
673 /*
674  * Mutex initialization routine; initialize lock `m' of type contained in
675  * `opts' with options contained in `opts' and name `name.'  The optional
676  * lock type `type' is used as a general lock category name for use with
677  * witness.
678  */
679 void
680 mtx_init(struct mtx *m, const char *name, const char *type, int opts)
681 {
682 	struct lock_class *class;
683 	int flags;
684 
685 	MPASS((opts & ~(MTX_SPIN | MTX_QUIET | MTX_RECURSE |
686 		MTX_NOWITNESS | MTX_DUPOK | MTX_NOPROFILE)) == 0);
687 
688 #ifdef MUTEX_DEBUG
689 	/* Diagnostic and error correction */
690 	mtx_validate(m);
691 #endif
692 
693 	/* Determine lock class and lock flags. */
694 	if (opts & MTX_SPIN)
695 		class = &lock_class_mtx_spin;
696 	else
697 		class = &lock_class_mtx_sleep;
698 	flags = 0;
699 	if (opts & MTX_QUIET)
700 		flags |= LO_QUIET;
701 	if (opts & MTX_RECURSE)
702 		flags |= LO_RECURSABLE;
703 	if ((opts & MTX_NOWITNESS) == 0)
704 		flags |= LO_WITNESS;
705 	if (opts & MTX_DUPOK)
706 		flags |= LO_DUPOK;
707 	if (opts & MTX_NOPROFILE)
708 		flags |= LO_NOPROFILE;
709 
710 	/* Initialize mutex. */
711 	m->mtx_lock = MTX_UNOWNED;
712 	m->mtx_recurse = 0;
713 
714 	lock_init(&m->lock_object, class, name, type, flags);
715 }
716 
717 /*
718  * Remove lock `m' from all_mtx queue.  We don't allow MTX_QUIET to be
719  * passed in as a flag here because if the corresponding mtx_init() was
720  * called with MTX_QUIET set, then it will already be set in the mutex's
721  * flags.
722  */
723 void
724 mtx_destroy(struct mtx *m)
725 {
726 
727 	if (!mtx_owned(m))
728 		MPASS(mtx_unowned(m));
729 	else {
730 		MPASS((m->mtx_lock & (MTX_RECURSED|MTX_CONTESTED)) == 0);
731 
732 		/* Perform the non-mtx related part of mtx_unlock_spin(). */
733 		if (LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin)
734 			spinlock_exit();
735 		else
736 			curthread->td_locks--;
737 
738 		/* Tell witness this isn't locked to make it happy. */
739 		WITNESS_UNLOCK(&m->lock_object, LOP_EXCLUSIVE, __FILE__,
740 		    __LINE__);
741 	}
742 
743 	m->mtx_lock = MTX_DESTROYED;
744 	lock_destroy(&m->lock_object);
745 }
746 
747 /*
748  * Intialize the mutex code and system mutexes.  This is called from the MD
749  * startup code prior to mi_startup().  The per-CPU data space needs to be
750  * setup before this is called.
751  */
752 void
753 mutex_init(void)
754 {
755 
756 	/* Setup turnstiles so that sleep mutexes work. */
757 	init_turnstiles();
758 
759 	/*
760 	 * Initialize mutexes.
761 	 */
762 	mtx_init(&Giant, "Giant", NULL, MTX_DEF | MTX_RECURSE);
763 	mtx_init(&sched_lock, "sched lock", NULL, MTX_SPIN | MTX_RECURSE);
764 	mtx_init(&proc0.p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK);
765 	mtx_init(&devmtx, "cdev", NULL, MTX_DEF);
766 	mtx_lock(&Giant);
767 
768 	lock_profile_init();
769 }
770 
771 #ifdef DDB
772 void
773 db_show_mtx(struct lock_object *lock)
774 {
775 	struct thread *td;
776 	struct mtx *m;
777 
778 	m = (struct mtx *)lock;
779 
780 	db_printf(" flags: {");
781 	if (LOCK_CLASS(lock) == &lock_class_mtx_spin)
782 		db_printf("SPIN");
783 	else
784 		db_printf("DEF");
785 	if (m->lock_object.lo_flags & LO_RECURSABLE)
786 		db_printf(", RECURSE");
787 	if (m->lock_object.lo_flags & LO_DUPOK)
788 		db_printf(", DUPOK");
789 	db_printf("}\n");
790 	db_printf(" state: {");
791 	if (mtx_unowned(m))
792 		db_printf("UNOWNED");
793 	else if (mtx_destroyed(m))
794 		db_printf("DESTROYED");
795 	else {
796 		db_printf("OWNED");
797 		if (m->mtx_lock & MTX_CONTESTED)
798 			db_printf(", CONTESTED");
799 		if (m->mtx_lock & MTX_RECURSED)
800 			db_printf(", RECURSED");
801 	}
802 	db_printf("}\n");
803 	if (!mtx_unowned(m) && !mtx_destroyed(m)) {
804 		td = mtx_owner(m);
805 		db_printf(" owner: %p (tid %d, pid %d, \"%s\")\n", td,
806 		    td->td_tid, td->td_proc->p_pid, td->td_proc->p_comm);
807 		if (mtx_recursed(m))
808 			db_printf(" recursed: %d\n", m->mtx_recurse);
809 	}
810 }
811 #endif
812