xref: /freebsd/sys/kern/kern_mutex.c (revision 6990ffd8a95caaba6858ad44ff1b3157d1efba8f)
1 /*-
2  * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  * 3. Berkeley Software Design Inc's name may not be used to endorse or
13  *    promote products derived from this software without specific prior
14  *    written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  *	from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $
29  *	and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $
30  * $FreeBSD$
31  */
32 
33 /*
34  * Machine independent bits of mutex implementation and implementation of
35  * `witness' structure & related debugging routines.
36  */
37 
38 /*
39  *	Main Entry: witness
40  *	Pronunciation: 'wit-n&s
41  *	Function: noun
42  *	Etymology: Middle English witnesse, from Old English witnes knowledge,
43  *	    testimony, witness, from 2wit
44  *	Date: before 12th century
45  *	1 : attestation of a fact or event : TESTIMONY
46  *	2 : one that gives evidence; specifically : one who testifies in
47  *	    a cause or before a judicial tribunal
48  *	3 : one asked to be present at a transaction so as to be able to
49  *	    testify to its having taken place
50  *	4 : one who has personal knowledge of something
51  *	5 a : something serving as evidence or proof : SIGN
52  *	  b : public affirmation by word or example of usually
53  *	      religious faith or conviction <the heroic witness to divine
54  *	      life -- Pilot>
55  *	6 capitalized : a member of the Jehovah's Witnesses
56  */
57 
58 #include "opt_ddb.h"
59 
60 #include <sys/param.h>
61 #include <sys/bus.h>
62 #include <sys/kernel.h>
63 #include <sys/lock.h>
64 #include <sys/malloc.h>
65 #include <sys/mutex.h>
66 #include <sys/proc.h>
67 #include <sys/resourcevar.h>
68 #include <sys/sysctl.h>
69 #include <sys/systm.h>
70 #include <sys/vmmeter.h>
71 #include <sys/ktr.h>
72 
73 #include <machine/atomic.h>
74 #include <machine/bus.h>
75 #include <machine/clock.h>
76 #include <machine/cpu.h>
77 
78 #include <ddb/ddb.h>
79 
80 #include <vm/vm.h>
81 #include <vm/vm_extern.h>
82 
83 /*
84  * Internal utility macros.
85  */
86 #define mtx_unowned(m)	((m)->mtx_lock == MTX_UNOWNED)
87 
88 #define mtx_owner(m)	(mtx_unowned((m)) ? NULL \
89 	: (struct thread *)((m)->mtx_lock & MTX_FLAGMASK))
90 
91 #define SET_PRIO(td, pri)	(td)->td_ksegrp->kg_pri.pri_level = (pri)
92 
93 /*
94  * Lock classes for sleep and spin mutexes.
95  */
96 struct lock_class lock_class_mtx_sleep = {
97 	"sleep mutex",
98 	LC_SLEEPLOCK | LC_RECURSABLE
99 };
100 struct lock_class lock_class_mtx_spin = {
101 	"spin mutex",
102 	LC_SPINLOCK | LC_RECURSABLE
103 };
104 
105 /*
106  * Prototypes for non-exported routines.
107  */
108 static void	propagate_priority(struct thread *);
109 
110 static void
111 propagate_priority(struct thread *td)
112 {
113 	struct ksegrp *kg = td->td_ksegrp;
114 	int pri = kg->kg_pri.pri_level;
115 	struct mtx *m = td->td_blocked;
116 
117 	mtx_assert(&sched_lock, MA_OWNED);
118 	for (;;) {
119 		struct thread *td1;
120 
121 		td = mtx_owner(m);
122 
123 		if (td == NULL) {
124 			/*
125 			 * This really isn't quite right. Really
126 			 * ought to bump priority of thread that
127 			 * next acquires the mutex.
128 			 */
129 			MPASS(m->mtx_lock == MTX_CONTESTED);
130 			return;
131 		}
132 
133 		MPASS(td->td_proc->p_magic == P_MAGIC);
134 		KASSERT(td->td_proc->p_stat != SSLEEP, ("sleeping thread owns a mutex"));
135 		if (kg->kg_pri.pri_level <= pri) /* lower is higher priority */
136 			return;
137 
138 		/*
139 		 * Bump this thread's priority.
140 		 */
141 		SET_PRIO(td, pri);
142 
143 		/*
144 		 * If lock holder is actually running, just bump priority.
145 		 */
146 		 /* XXXKSE this test is not sufficient */
147 		if (td->td_kse && (td->td_kse->ke_oncpu != NOCPU)) {
148 			MPASS(td->td_proc->p_stat == SRUN
149 			|| td->td_proc->p_stat == SZOMB
150 			|| td->td_proc->p_stat == SSTOP);
151 			return;
152 		}
153 
154 #ifndef SMP
155 		/*
156 		 * For UP, we check to see if td is curthread (this shouldn't
157 		 * ever happen however as it would mean we are in a deadlock.)
158 		 */
159 		KASSERT(td != curthread, ("Deadlock detected"));
160 #endif
161 
162 		/*
163 		 * If on run queue move to new run queue, and quit.
164 		 * XXXKSE this gets a lot more complicated under threads
165 		 * but try anyhow.
166 		 */
167 		if (td->td_proc->p_stat == SRUN) {
168 			MPASS(td->td_blocked == NULL);
169 			remrunqueue(td);
170 			setrunqueue(td);
171 			return;
172 		}
173 
174 		/*
175 		 * If we aren't blocked on a mutex, we should be.
176 		 */
177 		KASSERT(td->td_proc->p_stat == SMTX, (
178 		    "process %d(%s):%d holds %s but isn't blocked on a mutex\n",
179 		    td->td_proc->p_pid, td->td_proc->p_comm, td->td_proc->p_stat,
180 		    m->mtx_object.lo_name));
181 
182 		/*
183 		 * Pick up the mutex that td is blocked on.
184 		 */
185 		m = td->td_blocked;
186 		MPASS(m != NULL);
187 
188 		/*
189 		 * Check if the thread needs to be moved up on
190 		 * the blocked chain
191 		 */
192 		if (td == TAILQ_FIRST(&m->mtx_blocked)) {
193 			continue;
194 		}
195 
196 		td1 = TAILQ_PREV(td, threadqueue, td_blkq);
197 		if (td1->td_ksegrp->kg_pri.pri_level <= pri) {
198 			continue;
199 		}
200 
201 		/*
202 		 * Remove thread from blocked chain and determine where
203 		 * it should be moved up to.  Since we know that td1 has
204 		 * a lower priority than td, we know that at least one
205 		 * thread in the chain has a lower priority and that
206 		 * td1 will thus not be NULL after the loop.
207 		 */
208 		TAILQ_REMOVE(&m->mtx_blocked, td, td_blkq);
209 		TAILQ_FOREACH(td1, &m->mtx_blocked, td_blkq) {
210 			MPASS(td1->td_proc->p_magic == P_MAGIC);
211 			if (td1->td_ksegrp->kg_pri.pri_level > pri)
212 				break;
213 		}
214 
215 		MPASS(td1 != NULL);
216 		TAILQ_INSERT_BEFORE(td1, td, td_blkq);
217 		CTR4(KTR_LOCK,
218 		    "propagate_priority: p %p moved before %p on [%p] %s",
219 		    td, td1, m, m->mtx_object.lo_name);
220 	}
221 }
222 
223 /*
224  * Function versions of the inlined __mtx_* macros.  These are used by
225  * modules and can also be called from assembly language if needed.
226  */
227 void
228 _mtx_lock_flags(struct mtx *m, int opts, const char *file, int line)
229 {
230 
231 	__mtx_lock_flags(m, opts, file, line);
232 }
233 
234 void
235 _mtx_unlock_flags(struct mtx *m, int opts, const char *file, int line)
236 {
237 
238 	__mtx_unlock_flags(m, opts, file, line);
239 }
240 
241 void
242 _mtx_lock_spin_flags(struct mtx *m, int opts, const char *file, int line)
243 {
244 
245 	__mtx_lock_spin_flags(m, opts, file, line);
246 }
247 
248 void
249 _mtx_unlock_spin_flags(struct mtx *m, int opts, const char *file, int line)
250 {
251 
252 	__mtx_unlock_spin_flags(m, opts, file, line);
253 }
254 
255 /*
256  * The important part of mtx_trylock{,_flags}()
257  * Tries to acquire lock `m.' We do NOT handle recursion here; we assume that
258  * if we're called, it's because we know we don't already own this lock.
259  */
260 int
261 _mtx_trylock(struct mtx *m, int opts, const char *file, int line)
262 {
263 	int rval;
264 
265 	MPASS(curthread != NULL);
266 
267 	/*
268 	 * _mtx_trylock does not accept MTX_NOSWITCH option.
269 	 */
270 	KASSERT((opts & MTX_NOSWITCH) == 0,
271 	    ("mtx_trylock() called with invalid option flag(s) %d", opts));
272 
273 	rval = _obtain_lock(m, curthread);
274 
275 	LOCK_LOG_TRY("LOCK", &m->mtx_object, opts, rval, file, line);
276 	if (rval) {
277 		/*
278 		 * We do not handle recursion in _mtx_trylock; see the
279 		 * note at the top of the routine.
280 		 */
281 		KASSERT(!mtx_recursed(m),
282 		    ("mtx_trylock() called on a recursed mutex"));
283 		WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE | LOP_TRYLOCK,
284 		    file, line);
285 	}
286 
287 	return (rval);
288 }
289 
290 /*
291  * _mtx_lock_sleep: the tougher part of acquiring an MTX_DEF lock.
292  *
293  * We call this if the lock is either contested (i.e. we need to go to
294  * sleep waiting for it), or if we need to recurse on it.
295  */
296 void
297 _mtx_lock_sleep(struct mtx *m, int opts, const char *file, int line)
298 {
299 	struct thread *td = curthread;
300 	struct ksegrp *kg = td->td_ksegrp;
301 
302 	if ((m->mtx_lock & MTX_FLAGMASK) == (uintptr_t)td) {
303 		m->mtx_recurse++;
304 		atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
305 		if (LOCK_LOG_TEST(&m->mtx_object, opts))
306 			CTR1(KTR_LOCK, "_mtx_lock_sleep: %p recursing", m);
307 		return;
308 	}
309 
310 	if (LOCK_LOG_TEST(&m->mtx_object, opts))
311 		CTR4(KTR_LOCK,
312 		    "_mtx_lock_sleep: %s contested (lock=%p) at %s:%d",
313 		    m->mtx_object.lo_name, (void *)m->mtx_lock, file, line);
314 
315 	while (!_obtain_lock(m, td)) {
316 		uintptr_t v;
317 		struct thread *td1;
318 
319 		mtx_lock_spin(&sched_lock);
320 		/*
321 		 * Check if the lock has been released while spinning for
322 		 * the sched_lock.
323 		 */
324 		if ((v = m->mtx_lock) == MTX_UNOWNED) {
325 			mtx_unlock_spin(&sched_lock);
326 			continue;
327 		}
328 
329 		/*
330 		 * The mutex was marked contested on release. This means that
331 		 * there are threads blocked on it.
332 		 */
333 		if (v == MTX_CONTESTED) {
334 			td1 = TAILQ_FIRST(&m->mtx_blocked);
335 			MPASS(td1 != NULL);
336 			m->mtx_lock = (uintptr_t)td | MTX_CONTESTED;
337 
338 			if (td1->td_ksegrp->kg_pri.pri_level < kg->kg_pri.pri_level)
339 				SET_PRIO(td, td1->td_ksegrp->kg_pri.pri_level);
340 			mtx_unlock_spin(&sched_lock);
341 			return;
342 		}
343 
344 		/*
345 		 * If the mutex isn't already contested and a failure occurs
346 		 * setting the contested bit, the mutex was either released
347 		 * or the state of the MTX_RECURSED bit changed.
348 		 */
349 		if ((v & MTX_CONTESTED) == 0 &&
350 		    !atomic_cmpset_ptr(&m->mtx_lock, (void *)v,
351 			(void *)(v | MTX_CONTESTED))) {
352 			mtx_unlock_spin(&sched_lock);
353 			continue;
354 		}
355 
356 		/*
357 		 * We deffinately must sleep for this lock.
358 		 */
359 		mtx_assert(m, MA_NOTOWNED);
360 
361 #ifdef notyet
362 		/*
363 		 * If we're borrowing an interrupted thread's VM context, we
364 		 * must clean up before going to sleep.
365 		 */
366 		if (td->td_ithd != NULL) {
367 			struct ithd *it = td->td_ithd;
368 
369 			if (it->it_interrupted) {
370 				if (LOCK_LOG_TEST(&m->mtx_object, opts))
371 					CTR2(KTR_LOCK,
372 				    "_mtx_lock_sleep: %p interrupted %p",
373 					    it, it->it_interrupted);
374 				intr_thd_fixup(it);
375 			}
376 		}
377 #endif
378 
379 		/*
380 		 * Put us on the list of threads blocked on this mutex.
381 		 */
382 		if (TAILQ_EMPTY(&m->mtx_blocked)) {
383 			td1 = (struct thread *)(m->mtx_lock & MTX_FLAGMASK);
384 			LIST_INSERT_HEAD(&td1->td_contested, m, mtx_contested);
385 			TAILQ_INSERT_TAIL(&m->mtx_blocked, td, td_blkq);
386 		} else {
387 			TAILQ_FOREACH(td1, &m->mtx_blocked, td_blkq)
388 				if (td1->td_ksegrp->kg_pri.pri_level > kg->kg_pri.pri_level)
389 					break;
390 			if (td1)
391 				TAILQ_INSERT_BEFORE(td1, td, td_blkq);
392 			else
393 				TAILQ_INSERT_TAIL(&m->mtx_blocked, td, td_blkq);
394 		}
395 
396 		/*
397 		 * Save who we're blocked on.
398 		 */
399 		td->td_blocked = m;
400 		td->td_mtxname = m->mtx_object.lo_name;
401 		td->td_proc->p_stat = SMTX;
402 		propagate_priority(td);
403 
404 		if (LOCK_LOG_TEST(&m->mtx_object, opts))
405 			CTR3(KTR_LOCK,
406 			    "_mtx_lock_sleep: p %p blocked on [%p] %s", td, m,
407 			    m->mtx_object.lo_name);
408 
409 		td->td_proc->p_stats->p_ru.ru_nvcsw++;
410 		mi_switch();
411 
412 		if (LOCK_LOG_TEST(&m->mtx_object, opts))
413 			CTR3(KTR_LOCK,
414 			  "_mtx_lock_sleep: p %p free from blocked on [%p] %s",
415 			  td, m, m->mtx_object.lo_name);
416 
417 		mtx_unlock_spin(&sched_lock);
418 	}
419 
420 	return;
421 }
422 
423 /*
424  * _mtx_lock_spin: the tougher part of acquiring an MTX_SPIN lock.
425  *
426  * This is only called if we need to actually spin for the lock. Recursion
427  * is handled inline.
428  */
429 void
430 _mtx_lock_spin(struct mtx *m, int opts, critical_t mtx_crit, const char *file,
431 	       int line)
432 {
433 	int i = 0;
434 
435 	if (LOCK_LOG_TEST(&m->mtx_object, opts))
436 		CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m);
437 
438 	for (;;) {
439 		if (_obtain_lock(m, curthread))
440 			break;
441 
442 		/* Give interrupts a chance while we spin. */
443 		critical_exit(mtx_crit);
444 		while (m->mtx_lock != MTX_UNOWNED) {
445 			if (i++ < 1000000)
446 				continue;
447 			if (i++ < 6000000)
448 				DELAY(1);
449 #ifdef DDB
450 			else if (!db_active)
451 #else
452 			else
453 #endif
454 			panic("spin lock %s held by %p for > 5 seconds",
455 			    m->mtx_object.lo_name, (void *)m->mtx_lock);
456 		}
457 		mtx_crit = critical_enter();
458 	}
459 
460 	m->mtx_savecrit = mtx_crit;
461 	if (LOCK_LOG_TEST(&m->mtx_object, opts))
462 		CTR1(KTR_LOCK, "_mtx_lock_spin: %p spin done", m);
463 
464 	return;
465 }
466 
467 /*
468  * _mtx_unlock_sleep: the tougher part of releasing an MTX_DEF lock.
469  *
470  * We are only called here if the lock is recursed or contested (i.e. we
471  * need to wake up a blocked thread).
472  */
473 void
474 _mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line)
475 {
476 	struct thread *td, *td1;
477 	struct mtx *m1;
478 	int pri;
479 	struct ksegrp *kg;
480 
481 	td = curthread;
482 	kg = td->td_ksegrp;
483 
484 	if (mtx_recursed(m)) {
485 		if (--(m->mtx_recurse) == 0)
486 			atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED);
487 		if (LOCK_LOG_TEST(&m->mtx_object, opts))
488 			CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p unrecurse", m);
489 		return;
490 	}
491 
492 	mtx_lock_spin(&sched_lock);
493 	if (LOCK_LOG_TEST(&m->mtx_object, opts))
494 		CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p contested", m);
495 
496 	td1 = TAILQ_FIRST(&m->mtx_blocked);
497 	MPASS(td->td_proc->p_magic == P_MAGIC);
498 	MPASS(td1->td_proc->p_magic == P_MAGIC);
499 
500 	TAILQ_REMOVE(&m->mtx_blocked, td1, td_blkq);
501 
502 	if (TAILQ_EMPTY(&m->mtx_blocked)) {
503 		LIST_REMOVE(m, mtx_contested);
504 		_release_lock_quick(m);
505 		if (LOCK_LOG_TEST(&m->mtx_object, opts))
506 			CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p not held", m);
507 	} else
508 		atomic_store_rel_ptr(&m->mtx_lock, (void *)MTX_CONTESTED);
509 
510 	pri = PRI_MAX;
511 	LIST_FOREACH(m1, &td->td_contested, mtx_contested) {
512 		int cp = TAILQ_FIRST(&m1->mtx_blocked)->td_ksegrp->kg_pri.pri_level;
513 		if (cp < pri)
514 			pri = cp;
515 	}
516 
517 	if (pri > kg->kg_pri.pri_native)
518 		pri = kg->kg_pri.pri_native;
519 	SET_PRIO(td, pri);
520 
521 	if (LOCK_LOG_TEST(&m->mtx_object, opts))
522 		CTR2(KTR_LOCK, "_mtx_unlock_sleep: %p contested setrunqueue %p",
523 		    m, td1);
524 
525 	td1->td_blocked = NULL;
526 	td1->td_proc->p_stat = SRUN;
527 	setrunqueue(td1);
528 
529 	if ((opts & MTX_NOSWITCH) == 0 && td1->td_ksegrp->kg_pri.pri_level < pri) {
530 #ifdef notyet
531 		if (td->td_ithd != NULL) {
532 			struct ithd *it = td->td_ithd;
533 
534 			if (it->it_interrupted) {
535 				if (LOCK_LOG_TEST(&m->mtx_object, opts))
536 					CTR2(KTR_LOCK,
537 				    "_mtx_unlock_sleep: %p interrupted %p",
538 					    it, it->it_interrupted);
539 				intr_thd_fixup(it);
540 			}
541 		}
542 #endif
543 		setrunqueue(td);
544 		if (LOCK_LOG_TEST(&m->mtx_object, opts))
545 			CTR2(KTR_LOCK,
546 			    "_mtx_unlock_sleep: %p switching out lock=%p", m,
547 			    (void *)m->mtx_lock);
548 
549 		td->td_proc->p_stats->p_ru.ru_nivcsw++;
550 		mi_switch();
551 		if (LOCK_LOG_TEST(&m->mtx_object, opts))
552 			CTR2(KTR_LOCK, "_mtx_unlock_sleep: %p resuming lock=%p",
553 			    m, (void *)m->mtx_lock);
554 	}
555 
556 	mtx_unlock_spin(&sched_lock);
557 
558 	return;
559 }
560 
561 /*
562  * All the unlocking of MTX_SPIN locks is done inline.
563  * See the _rel_spin_lock() macro for the details.
564  */
565 
566 /*
567  * The backing function for the INVARIANTS-enabled mtx_assert()
568  */
569 #ifdef INVARIANT_SUPPORT
570 void
571 _mtx_assert(struct mtx *m, int what, const char *file, int line)
572 {
573 
574 	if (panicstr != NULL)
575 		return;
576 	switch (what) {
577 	case MA_OWNED:
578 	case MA_OWNED | MA_RECURSED:
579 	case MA_OWNED | MA_NOTRECURSED:
580 		if (!mtx_owned(m))
581 			panic("mutex %s not owned at %s:%d",
582 			    m->mtx_object.lo_name, file, line);
583 		if (mtx_recursed(m)) {
584 			if ((what & MA_NOTRECURSED) != 0)
585 				panic("mutex %s recursed at %s:%d",
586 				    m->mtx_object.lo_name, file, line);
587 		} else if ((what & MA_RECURSED) != 0) {
588 			panic("mutex %s unrecursed at %s:%d",
589 			    m->mtx_object.lo_name, file, line);
590 		}
591 		break;
592 	case MA_NOTOWNED:
593 		if (mtx_owned(m))
594 			panic("mutex %s owned at %s:%d",
595 			    m->mtx_object.lo_name, file, line);
596 		break;
597 	default:
598 		panic("unknown mtx_assert at %s:%d", file, line);
599 	}
600 }
601 #endif
602 
603 /*
604  * The MUTEX_DEBUG-enabled mtx_validate()
605  *
606  * Most of these checks have been moved off into the LO_INITIALIZED flag
607  * maintained by the witness code.
608  */
609 #ifdef MUTEX_DEBUG
610 
611 void	mtx_validate __P((struct mtx *));
612 
613 void
614 mtx_validate(struct mtx *m)
615 {
616 
617 /*
618  * XXX - When kernacc() is fixed on the alpha to handle K0_SEG memory properly
619  * we can re-enable the kernacc() checks.
620  */
621 #ifndef __alpha__
622 	/*
623 	 * Can't call kernacc() from early init386(), especially when
624 	 * initializing Giant mutex, because some stuff in kernacc()
625 	 * requires Giant itself.
626 	 */
627 	if (!cold)
628 		if (!kernacc((caddr_t)m, sizeof(m),
629 		    VM_PROT_READ | VM_PROT_WRITE))
630 			panic("Can't read and write to mutex %p", m);
631 #endif
632 }
633 #endif
634 
635 /*
636  * Mutex initialization routine; initialize lock `m' of type contained in
637  * `opts' with options contained in `opts' and description `description.'
638  */
639 void
640 mtx_init(struct mtx *m, const char *description, int opts)
641 {
642 	struct lock_object *lock;
643 
644 	MPASS((opts & ~(MTX_SPIN | MTX_QUIET | MTX_RECURSE |
645 	    MTX_SLEEPABLE | MTX_NOWITNESS)) == 0);
646 
647 #ifdef MUTEX_DEBUG
648 	/* Diagnostic and error correction */
649 	mtx_validate(m);
650 #endif
651 
652 	bzero(m, sizeof(*m));
653 	lock = &m->mtx_object;
654 	if (opts & MTX_SPIN)
655 		lock->lo_class = &lock_class_mtx_spin;
656 	else
657 		lock->lo_class = &lock_class_mtx_sleep;
658 	lock->lo_name = description;
659 	if (opts & MTX_QUIET)
660 		lock->lo_flags = LO_QUIET;
661 	if (opts & MTX_RECURSE)
662 		lock->lo_flags |= LO_RECURSABLE;
663 	if (opts & MTX_SLEEPABLE)
664 		lock->lo_flags |= LO_SLEEPABLE;
665 	if ((opts & MTX_NOWITNESS) == 0)
666 		lock->lo_flags |= LO_WITNESS;
667 
668 	m->mtx_lock = MTX_UNOWNED;
669 	TAILQ_INIT(&m->mtx_blocked);
670 
671 	LOCK_LOG_INIT(lock, opts);
672 
673 	WITNESS_INIT(lock);
674 }
675 
676 /*
677  * Remove lock `m' from all_mtx queue.  We don't allow MTX_QUIET to be
678  * passed in as a flag here because if the corresponding mtx_init() was
679  * called with MTX_QUIET set, then it will already be set in the mutex's
680  * flags.
681  */
682 void
683 mtx_destroy(struct mtx *m)
684 {
685 
686 	LOCK_LOG_DESTROY(&m->mtx_object, 0);
687 
688 	if (!mtx_owned(m))
689 		MPASS(mtx_unowned(m));
690 	else {
691 		MPASS((m->mtx_lock & (MTX_RECURSED|MTX_CONTESTED)) == 0);
692 
693 		/* Tell witness this isn't locked to make it happy. */
694 		WITNESS_UNLOCK(&m->mtx_object, LOP_EXCLUSIVE | LOP_NOSWITCH,
695 		    __FILE__, __LINE__);
696 	}
697 
698 	WITNESS_DESTROY(&m->mtx_object);
699 }
700