xref: /freebsd/sys/kern/kern_mutex.c (revision 2b743a9e9ddc6736208dc8ca1ce06ce64ad20a19)
1 /*-
2  * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  * 3. Berkeley Software Design Inc's name may not be used to endorse or
13  *    promote products derived from this software without specific prior
14  *    written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  *	from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $
29  *	and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $
30  */
31 
32 /*
33  * Machine independent bits of mutex implementation.
34  */
35 
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
38 
39 #include "opt_adaptive_mutexes.h"
40 #include "opt_ddb.h"
41 #include "opt_global.h"
42 #include "opt_mutex_wake_all.h"
43 #include "opt_sched.h"
44 
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/bus.h>
48 #include <sys/conf.h>
49 #include <sys/kdb.h>
50 #include <sys/kernel.h>
51 #include <sys/ktr.h>
52 #include <sys/lock.h>
53 #include <sys/malloc.h>
54 #include <sys/mutex.h>
55 #include <sys/proc.h>
56 #include <sys/resourcevar.h>
57 #include <sys/sched.h>
58 #include <sys/sbuf.h>
59 #include <sys/sysctl.h>
60 #include <sys/turnstile.h>
61 #include <sys/vmmeter.h>
62 #include <sys/lock_profile.h>
63 
64 #include <machine/atomic.h>
65 #include <machine/bus.h>
66 #include <machine/cpu.h>
67 
68 #include <ddb/ddb.h>
69 
70 #include <fs/devfs/devfs_int.h>
71 
72 #include <vm/vm.h>
73 #include <vm/vm_extern.h>
74 
75 /*
76  * Force MUTEX_WAKE_ALL for now.
77  * single thread wakeup needs fixes to avoid race conditions with
78  * priority inheritance.
79  */
80 #ifndef MUTEX_WAKE_ALL
81 #define MUTEX_WAKE_ALL
82 #endif
83 
84 /*
85  * Internal utility macros.
86  */
87 #define mtx_unowned(m)	((m)->mtx_lock == MTX_UNOWNED)
88 
89 #define	mtx_owner(m)	((struct thread *)((m)->mtx_lock & ~MTX_FLAGMASK))
90 
91 #ifdef DDB
92 static void	db_show_mtx(struct lock_object *lock);
93 #endif
94 
95 /*
96  * Lock classes for sleep and spin mutexes.
97  */
98 struct lock_class lock_class_mtx_sleep = {
99 	"sleep mutex",
100 	LC_SLEEPLOCK | LC_RECURSABLE,
101 #ifdef DDB
102 	db_show_mtx
103 #endif
104 };
105 struct lock_class lock_class_mtx_spin = {
106 	"spin mutex",
107 	LC_SPINLOCK | LC_RECURSABLE,
108 #ifdef DDB
109 	db_show_mtx
110 #endif
111 };
112 
113 /*
114  * System-wide mutexes
115  */
116 struct mtx sched_lock;
117 struct mtx Giant;
118 
119 #ifdef LOCK_PROFILING
120 static inline void lock_profile_init(void)
121 {
122         int i;
123         /* Initialize the mutex profiling locks */
124         for (i = 0; i < LPROF_LOCK_SIZE; i++) {
125                 mtx_init(&lprof_locks[i], "mprof lock",
126                     NULL, MTX_SPIN|MTX_QUIET|MTX_NOPROFILE);
127         }
128 }
129 #else
130 static inline void lock_profile_init(void) {;}
131 #endif
132 
133 /*
134  * Function versions of the inlined __mtx_* macros.  These are used by
135  * modules and can also be called from assembly language if needed.
136  */
137 void
138 _mtx_lock_flags(struct mtx *m, int opts, const char *file, int line)
139 {
140 
141 	MPASS(curthread != NULL);
142 	KASSERT(m->mtx_lock != MTX_DESTROYED,
143 	    ("mtx_lock() of destroyed mutex @ %s:%d", file, line));
144 	KASSERT(LOCK_CLASS(&m->mtx_object) == &lock_class_mtx_sleep,
145 	    ("mtx_lock() of spin mutex %s @ %s:%d", m->mtx_object.lo_name,
146 	    file, line));
147 	WITNESS_CHECKORDER(&m->mtx_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE,
148 	    file, line);
149 
150 	_get_sleep_lock(m, curthread, opts, file, line);
151 	LOCK_LOG_LOCK("LOCK", &m->mtx_object, opts, m->mtx_recurse, file,
152 	    line);
153 	WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line);
154 	curthread->td_locks++;
155 }
156 
157 void
158 _mtx_unlock_flags(struct mtx *m, int opts, const char *file, int line)
159 {
160 #ifdef LOCK_PROFILING
161 	struct lock_object lo;
162 #endif
163 	MPASS(curthread != NULL);
164 	KASSERT(m->mtx_lock != MTX_DESTROYED,
165 	    ("mtx_unlock() of destroyed mutex @ %s:%d", file, line));
166 	KASSERT(LOCK_CLASS(&m->mtx_object) == &lock_class_mtx_sleep,
167 	    ("mtx_unlock() of spin mutex %s @ %s:%d", m->mtx_object.lo_name,
168 	    file, line));
169 	curthread->td_locks--;
170 	WITNESS_UNLOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line);
171 	LOCK_LOG_LOCK("UNLOCK", &m->mtx_object, opts, m->mtx_recurse, file,
172 	    line);
173 	mtx_assert(m, MA_OWNED);
174 #ifdef LOCK_PROFILING
175 	memcpy(&lo, &m->mtx_object, sizeof(lo));
176 	m->mtx_object.lo_flags &= ~LO_CONTESTED;
177 #endif
178 	_rel_sleep_lock(m, curthread, opts, file, line);
179 #ifdef LOCK_PROFILING
180 	lock_profile_release_lock(&lo);
181 #endif
182 }
183 
184 void
185 _mtx_lock_spin_flags(struct mtx *m, int opts, const char *file, int line)
186 {
187 
188 	MPASS(curthread != NULL);
189 	KASSERT(m->mtx_lock != MTX_DESTROYED,
190 	    ("mtx_lock_spin() of destroyed mutex @ %s:%d", file, line));
191 	KASSERT(LOCK_CLASS(&m->mtx_object) == &lock_class_mtx_spin,
192 	    ("mtx_lock_spin() of sleep mutex %s @ %s:%d",
193 	    m->mtx_object.lo_name, file, line));
194 	WITNESS_CHECKORDER(&m->mtx_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE,
195 	    file, line);
196 	_get_spin_lock(m, curthread, opts, file, line);
197 	LOCK_LOG_LOCK("LOCK", &m->mtx_object, opts, m->mtx_recurse, file,
198 	    line);
199 	WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line);
200 }
201 
202 void
203 _mtx_unlock_spin_flags(struct mtx *m, int opts, const char *file, int line)
204 {
205 #ifdef LOCK_PROFILING
206 	struct lock_object lo;
207 #endif
208 	MPASS(curthread != NULL);
209 	KASSERT(m->mtx_lock != MTX_DESTROYED,
210 	    ("mtx_unlock_spin() of destroyed mutex @ %s:%d", file, line));
211 	KASSERT(LOCK_CLASS(&m->mtx_object) == &lock_class_mtx_spin,
212 	    ("mtx_unlock_spin() of sleep mutex %s @ %s:%d",
213 	    m->mtx_object.lo_name, file, line));
214 	WITNESS_UNLOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line);
215 	LOCK_LOG_LOCK("UNLOCK", &m->mtx_object, opts, m->mtx_recurse, file,
216 	    line);
217 	mtx_assert(m, MA_OWNED);
218 #ifdef LOCK_PROFILING
219 	memcpy(&lo, &m->mtx_object, sizeof(lo));
220 	m->mtx_object.lo_flags &= ~LO_CONTESTED;
221 #endif
222 	_rel_spin_lock(m);
223 #ifdef LOCK_PROFILING
224 	lock_profile_release_lock(&lo);
225 #endif
226 }
227 
228 /*
229  * The important part of mtx_trylock{,_flags}()
230  * Tries to acquire lock `m.'  If this function is called on a mutex that
231  * is already owned, it will recursively acquire the lock.
232  */
233 int
234 _mtx_trylock(struct mtx *m, int opts, const char *file, int line)
235 {
236 	int rval, contested = 0;
237 	uint64_t waittime = 0;
238 
239 	MPASS(curthread != NULL);
240 	KASSERT(m->mtx_lock != MTX_DESTROYED,
241 	    ("mtx_trylock() of destroyed mutex @ %s:%d", file, line));
242 	KASSERT(LOCK_CLASS(&m->mtx_object) == &lock_class_mtx_sleep,
243 	    ("mtx_trylock() of spin mutex %s @ %s:%d", m->mtx_object.lo_name,
244 	    file, line));
245 
246 	if (mtx_owned(m) && (m->mtx_object.lo_flags & LO_RECURSABLE) != 0) {
247 		m->mtx_recurse++;
248 		atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
249 		rval = 1;
250 	} else
251 		rval = _obtain_lock(m, (uintptr_t)curthread);
252 
253 	LOCK_LOG_TRY("LOCK", &m->mtx_object, opts, rval, file, line);
254 	if (rval) {
255 		WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE | LOP_TRYLOCK,
256 		    file, line);
257 		curthread->td_locks++;
258 		if (m->mtx_recurse == 0)
259 			lock_profile_obtain_lock_success(&m->mtx_object, contested,
260 			    waittime, file, line);
261 
262 	}
263 
264 	return (rval);
265 }
266 
267 /*
268  * _mtx_lock_sleep: the tougher part of acquiring an MTX_DEF lock.
269  *
270  * We call this if the lock is either contested (i.e. we need to go to
271  * sleep waiting for it), or if we need to recurse on it.
272  */
273 void
274 _mtx_lock_sleep(struct mtx *m, uintptr_t tid, int opts, const char *file,
275     int line)
276 {
277 #if defined(SMP) && !defined(NO_ADAPTIVE_MUTEXES)
278 	volatile struct thread *owner;
279 #endif
280 #ifdef KTR
281 	int cont_logged = 0;
282 #endif
283 	uintptr_t v;
284 
285 	if (mtx_owned(m)) {
286 		KASSERT((m->mtx_object.lo_flags & LO_RECURSABLE) != 0,
287 	    ("_mtx_lock_sleep: recursed on non-recursive mutex %s @ %s:%d\n",
288 		    m->mtx_object.lo_name, file, line));
289 		m->mtx_recurse++;
290 		atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
291 		if (LOCK_LOG_TEST(&m->mtx_object, opts))
292 			CTR1(KTR_LOCK, "_mtx_lock_sleep: %p recursing", m);
293 		return;
294 	}
295 
296 	if (LOCK_LOG_TEST(&m->mtx_object, opts))
297 		CTR4(KTR_LOCK,
298 		    "_mtx_lock_sleep: %s contested (lock=%p) at %s:%d",
299 		    m->mtx_object.lo_name, (void *)m->mtx_lock, file, line);
300 
301 	while (!_obtain_lock(m, tid)) {
302 		turnstile_lock(&m->mtx_object);
303 		v = m->mtx_lock;
304 
305 		/*
306 		 * Check if the lock has been released while spinning for
307 		 * the turnstile chain lock.
308 		 */
309 		if (v == MTX_UNOWNED) {
310 			turnstile_release(&m->mtx_object);
311 			cpu_spinwait();
312 			continue;
313 		}
314 
315 #ifdef MUTEX_WAKE_ALL
316 		MPASS(v != MTX_CONTESTED);
317 #else
318 		/*
319 		 * The mutex was marked contested on release. This means that
320 		 * there are other threads blocked on it.  Grab ownership of
321 		 * it and propagate its priority to the current thread if
322 		 * necessary.
323 		 */
324 		if (v == MTX_CONTESTED) {
325 			m->mtx_lock = tid | MTX_CONTESTED;
326 			turnstile_claim(&m->mtx_object);
327 			break;
328 		}
329 #endif
330 
331 		/*
332 		 * If the mutex isn't already contested and a failure occurs
333 		 * setting the contested bit, the mutex was either released
334 		 * or the state of the MTX_RECURSED bit changed.
335 		 */
336 		if ((v & MTX_CONTESTED) == 0 &&
337 		    !atomic_cmpset_ptr(&m->mtx_lock, v, v | MTX_CONTESTED)) {
338 			turnstile_release(&m->mtx_object);
339 			cpu_spinwait();
340 			continue;
341 		}
342 
343 #if defined(SMP) && !defined(NO_ADAPTIVE_MUTEXES)
344 		/*
345 		 * If the current owner of the lock is executing on another
346 		 * CPU, spin instead of blocking.
347 		 */
348 		owner = (struct thread *)(v & ~MTX_FLAGMASK);
349 #ifdef ADAPTIVE_GIANT
350 		if (TD_IS_RUNNING(owner))
351 #else
352 		if (m != &Giant && TD_IS_RUNNING(owner))
353 #endif
354 		{
355 			turnstile_release(&m->mtx_object);
356 			while (mtx_owner(m) == owner && TD_IS_RUNNING(owner)) {
357 				cpu_spinwait();
358 			}
359 			continue;
360 		}
361 #endif	/* SMP && !NO_ADAPTIVE_MUTEXES */
362 
363 		/*
364 		 * We definitely must sleep for this lock.
365 		 */
366 		mtx_assert(m, MA_NOTOWNED);
367 
368 #ifdef KTR
369 		if (!cont_logged) {
370 			CTR6(KTR_CONTENTION,
371 			    "contention: %p at %s:%d wants %s, taken by %s:%d",
372 			    (void *)tid, file, line, m->mtx_object.lo_name,
373 			    WITNESS_FILE(&m->mtx_object),
374 			    WITNESS_LINE(&m->mtx_object));
375 			cont_logged = 1;
376 		}
377 #endif
378 
379 		/*
380 		 * Block on the turnstile.
381 		 */
382 		turnstile_wait(&m->mtx_object, mtx_owner(m),
383 		    TS_EXCLUSIVE_QUEUE);
384 	}
385 #ifdef KTR
386 	if (cont_logged) {
387 		CTR4(KTR_CONTENTION,
388 		    "contention end: %s acquired by %p at %s:%d",
389 		    m->mtx_object.lo_name, (void *)tid, file, line);
390 	}
391 #endif
392 	return;
393 }
394 
395 #ifdef SMP
396 /*
397  * _mtx_lock_spin: the tougher part of acquiring an MTX_SPIN lock.
398  *
399  * This is only called if we need to actually spin for the lock. Recursion
400  * is handled inline.
401  */
402 void
403 _mtx_lock_spin(struct mtx *m, uintptr_t tid, int opts, const char *file,
404     int line)
405 {
406 	int i = 0;
407 	struct thread *td;
408 
409 	if (LOCK_LOG_TEST(&m->mtx_object, opts))
410 		CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m);
411 
412 	while (!_obtain_lock(m, tid)) {
413 
414 		/* Give interrupts a chance while we spin. */
415 		spinlock_exit();
416 		while (m->mtx_lock != MTX_UNOWNED) {
417 			if (i++ < 10000000) {
418 				cpu_spinwait();
419 				continue;
420 			}
421 			if (i < 60000000 || kdb_active || panicstr != NULL)
422 				DELAY(1);
423 			else {
424 				td = mtx_owner(m);
425 
426 				/* If the mutex is unlocked, try again. */
427 				if (td == NULL)
428 					continue;
429 				printf(
430 			"spin lock %p (%s) held by %p (tid %d) too long\n",
431 				    m, m->mtx_object.lo_name, td, td->td_tid);
432 #ifdef WITNESS
433 				witness_display_spinlock(&m->mtx_object, td);
434 #endif
435 				panic("spin lock held too long");
436 			}
437 			cpu_spinwait();
438 		}
439 		spinlock_enter();
440 	}
441 
442 	if (LOCK_LOG_TEST(&m->mtx_object, opts))
443 		CTR1(KTR_LOCK, "_mtx_lock_spin: %p spin done", m);
444 
445 	return;
446 }
447 #endif /* SMP */
448 
449 /*
450  * _mtx_unlock_sleep: the tougher part of releasing an MTX_DEF lock.
451  *
452  * We are only called here if the lock is recursed or contested (i.e. we
453  * need to wake up a blocked thread).
454  */
455 void
456 _mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line)
457 {
458 	struct turnstile *ts;
459 #ifndef PREEMPTION
460 	struct thread *td, *td1;
461 #endif
462 
463 	if (mtx_recursed(m)) {
464 		if (--(m->mtx_recurse) == 0)
465 			atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED);
466 		if (LOCK_LOG_TEST(&m->mtx_object, opts))
467 			CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p unrecurse", m);
468 		return;
469 	}
470 
471 	turnstile_lock(&m->mtx_object);
472 	ts = turnstile_lookup(&m->mtx_object);
473 	if (LOCK_LOG_TEST(&m->mtx_object, opts))
474 		CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p contested", m);
475 
476 #if defined(SMP) && !defined(NO_ADAPTIVE_MUTEXES)
477 	if (ts == NULL) {
478 		_release_lock_quick(m);
479 		if (LOCK_LOG_TEST(&m->mtx_object, opts))
480 			CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p no sleepers", m);
481 		turnstile_release(&m->mtx_object);
482 		return;
483 	}
484 #else
485 	MPASS(ts != NULL);
486 #endif
487 #ifndef PREEMPTION
488 	/* XXX */
489 	td1 = turnstile_head(ts, TS_EXCLUSIVE_QUEUE);
490 #endif
491 #ifdef MUTEX_WAKE_ALL
492 	turnstile_broadcast(ts, TS_EXCLUSIVE_QUEUE);
493 	_release_lock_quick(m);
494 #else
495 	if (turnstile_signal(ts, TS_EXCLUSIVE_QUEUE)) {
496 		_release_lock_quick(m);
497 		if (LOCK_LOG_TEST(&m->mtx_object, opts))
498 			CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p not held", m);
499 	} else {
500 		m->mtx_lock = MTX_CONTESTED;
501 		if (LOCK_LOG_TEST(&m->mtx_object, opts))
502 			CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p still contested",
503 			    m);
504 	}
505 #endif
506 	turnstile_unpend(ts, TS_EXCLUSIVE_LOCK);
507 
508 #ifndef PREEMPTION
509 	/*
510 	 * XXX: This is just a hack until preemption is done.  However,
511 	 * once preemption is done we need to either wrap the
512 	 * turnstile_signal() and release of the actual lock in an
513 	 * extra critical section or change the preemption code to
514 	 * always just set a flag and never do instant-preempts.
515 	 */
516 	td = curthread;
517 	if (td->td_critnest > 0 || td1->td_priority >= td->td_priority)
518 		return;
519 	mtx_lock_spin(&sched_lock);
520 	if (!TD_IS_RUNNING(td1)) {
521 #ifdef notyet
522 		if (td->td_ithd != NULL) {
523 			struct ithd *it = td->td_ithd;
524 
525 			if (it->it_interrupted) {
526 				if (LOCK_LOG_TEST(&m->mtx_object, opts))
527 					CTR2(KTR_LOCK,
528 				    "_mtx_unlock_sleep: %p interrupted %p",
529 					    it, it->it_interrupted);
530 				intr_thd_fixup(it);
531 			}
532 		}
533 #endif
534 		if (LOCK_LOG_TEST(&m->mtx_object, opts))
535 			CTR2(KTR_LOCK,
536 			    "_mtx_unlock_sleep: %p switching out lock=%p", m,
537 			    (void *)m->mtx_lock);
538 
539 		mi_switch(SW_INVOL, NULL);
540 		if (LOCK_LOG_TEST(&m->mtx_object, opts))
541 			CTR2(KTR_LOCK, "_mtx_unlock_sleep: %p resuming lock=%p",
542 			    m, (void *)m->mtx_lock);
543 	}
544 	mtx_unlock_spin(&sched_lock);
545 #endif
546 
547 	return;
548 }
549 
550 /*
551  * All the unlocking of MTX_SPIN locks is done inline.
552  * See the _rel_spin_lock() macro for the details.
553  */
554 
555 /*
556  * The backing function for the INVARIANTS-enabled mtx_assert()
557  */
558 #ifdef INVARIANT_SUPPORT
559 void
560 _mtx_assert(struct mtx *m, int what, const char *file, int line)
561 {
562 
563 	if (panicstr != NULL || dumping)
564 		return;
565 	switch (what) {
566 	case MA_OWNED:
567 	case MA_OWNED | MA_RECURSED:
568 	case MA_OWNED | MA_NOTRECURSED:
569 		if (!mtx_owned(m))
570 			panic("mutex %s not owned at %s:%d",
571 			    m->mtx_object.lo_name, file, line);
572 		if (mtx_recursed(m)) {
573 			if ((what & MA_NOTRECURSED) != 0)
574 				panic("mutex %s recursed at %s:%d",
575 				    m->mtx_object.lo_name, file, line);
576 		} else if ((what & MA_RECURSED) != 0) {
577 			panic("mutex %s unrecursed at %s:%d",
578 			    m->mtx_object.lo_name, file, line);
579 		}
580 		break;
581 	case MA_NOTOWNED:
582 		if (mtx_owned(m))
583 			panic("mutex %s owned at %s:%d",
584 			    m->mtx_object.lo_name, file, line);
585 		break;
586 	default:
587 		panic("unknown mtx_assert at %s:%d", file, line);
588 	}
589 }
590 #endif
591 
592 /*
593  * The MUTEX_DEBUG-enabled mtx_validate()
594  *
595  * Most of these checks have been moved off into the LO_INITIALIZED flag
596  * maintained by the witness code.
597  */
598 #ifdef MUTEX_DEBUG
599 
600 void	mtx_validate(struct mtx *);
601 
602 void
603 mtx_validate(struct mtx *m)
604 {
605 
606 /*
607  * XXX: When kernacc() does not require Giant we can reenable this check
608  */
609 #ifdef notyet
610 	/*
611 	 * Can't call kernacc() from early init386(), especially when
612 	 * initializing Giant mutex, because some stuff in kernacc()
613 	 * requires Giant itself.
614 	 */
615 	if (!cold)
616 		if (!kernacc((caddr_t)m, sizeof(m),
617 		    VM_PROT_READ | VM_PROT_WRITE))
618 			panic("Can't read and write to mutex %p", m);
619 #endif
620 }
621 #endif
622 
623 /*
624  * General init routine used by the MTX_SYSINIT() macro.
625  */
626 void
627 mtx_sysinit(void *arg)
628 {
629 	struct mtx_args *margs = arg;
630 
631 	mtx_init(margs->ma_mtx, margs->ma_desc, NULL, margs->ma_opts);
632 }
633 
634 /*
635  * Mutex initialization routine; initialize lock `m' of type contained in
636  * `opts' with options contained in `opts' and name `name.'  The optional
637  * lock type `type' is used as a general lock category name for use with
638  * witness.
639  */
640 void
641 mtx_init(struct mtx *m, const char *name, const char *type, int opts)
642 {
643 	struct lock_class *class;
644 	int flags;
645 
646 	MPASS((opts & ~(MTX_SPIN | MTX_QUIET | MTX_RECURSE |
647 		MTX_NOWITNESS | MTX_DUPOK | MTX_NOPROFILE)) == 0);
648 
649 #ifdef MUTEX_DEBUG
650 	/* Diagnostic and error correction */
651 	mtx_validate(m);
652 #endif
653 
654 	/* Determine lock class and lock flags. */
655 	if (opts & MTX_SPIN)
656 		class = &lock_class_mtx_spin;
657 	else
658 		class = &lock_class_mtx_sleep;
659 	flags = 0;
660 	if (opts & MTX_QUIET)
661 		flags |= LO_QUIET;
662 	if (opts & MTX_RECURSE)
663 		flags |= LO_RECURSABLE;
664 	if ((opts & MTX_NOWITNESS) == 0)
665 		flags |= LO_WITNESS;
666 	if (opts & MTX_DUPOK)
667 		flags |= LO_DUPOK;
668 	if (opts & MTX_NOPROFILE)
669 		flags |= LO_NOPROFILE;
670 
671 	/* Initialize mutex. */
672 	m->mtx_lock = MTX_UNOWNED;
673 	m->mtx_recurse = 0;
674 
675 	lock_profile_object_init(&m->mtx_object, class, name);
676 	lock_init(&m->mtx_object, class, name, type, flags);
677 }
678 
679 /*
680  * Remove lock `m' from all_mtx queue.  We don't allow MTX_QUIET to be
681  * passed in as a flag here because if the corresponding mtx_init() was
682  * called with MTX_QUIET set, then it will already be set in the mutex's
683  * flags.
684  */
685 void
686 mtx_destroy(struct mtx *m)
687 {
688 
689 	if (!mtx_owned(m))
690 		MPASS(mtx_unowned(m));
691 	else {
692 		MPASS((m->mtx_lock & (MTX_RECURSED|MTX_CONTESTED)) == 0);
693 
694 		/* Perform the non-mtx related part of mtx_unlock_spin(). */
695 		if (LOCK_CLASS(&m->mtx_object) == &lock_class_mtx_spin)
696 			spinlock_exit();
697 		else
698 			curthread->td_locks--;
699 
700 		/* Tell witness this isn't locked to make it happy. */
701 		WITNESS_UNLOCK(&m->mtx_object, LOP_EXCLUSIVE, __FILE__,
702 		    __LINE__);
703 	}
704 
705 	m->mtx_lock = MTX_DESTROYED;
706 	lock_profile_object_destroy(&m->mtx_object);
707 	lock_destroy(&m->mtx_object);
708 }
709 
710 /*
711  * Intialize the mutex code and system mutexes.  This is called from the MD
712  * startup code prior to mi_startup().  The per-CPU data space needs to be
713  * setup before this is called.
714  */
715 void
716 mutex_init(void)
717 {
718 
719 	/* Setup turnstiles so that sleep mutexes work. */
720 	init_turnstiles();
721 
722 	/*
723 	 * Initialize mutexes.
724 	 */
725 	mtx_init(&Giant, "Giant", NULL, MTX_DEF | MTX_RECURSE);
726 	mtx_init(&sched_lock, "sched lock", NULL, MTX_SPIN | MTX_RECURSE);
727 	mtx_init(&proc0.p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK);
728 	mtx_init(&devmtx, "cdev", NULL, MTX_DEF);
729 	mtx_lock(&Giant);
730 
731 	lock_profile_init();
732 }
733 
734 #ifdef DDB
735 void
736 db_show_mtx(struct lock_object *lock)
737 {
738 	struct thread *td;
739 	struct mtx *m;
740 
741 	m = (struct mtx *)lock;
742 
743 	db_printf(" flags: {");
744 	if (LOCK_CLASS(lock) == &lock_class_mtx_spin)
745 		db_printf("SPIN");
746 	else
747 		db_printf("DEF");
748 	if (m->mtx_object.lo_flags & LO_RECURSABLE)
749 		db_printf(", RECURSE");
750 	if (m->mtx_object.lo_flags & LO_DUPOK)
751 		db_printf(", DUPOK");
752 	db_printf("}\n");
753 	db_printf(" state: {");
754 	if (mtx_unowned(m))
755 		db_printf("UNOWNED");
756 	else {
757 		db_printf("OWNED");
758 		if (m->mtx_lock & MTX_CONTESTED)
759 			db_printf(", CONTESTED");
760 		if (m->mtx_lock & MTX_RECURSED)
761 			db_printf(", RECURSED");
762 	}
763 	db_printf("}\n");
764 	if (!mtx_unowned(m)) {
765 		td = mtx_owner(m);
766 		db_printf(" owner: %p (tid %d, pid %d, \"%s\")\n", td,
767 		    td->td_tid, td->td_proc->p_pid, td->td_proc->p_comm);
768 		if (mtx_recursed(m))
769 			db_printf(" recursed: %d\n", m->mtx_recurse);
770 	}
771 }
772 #endif
773