xref: /freebsd/sys/kern/kern_mutex.c (revision eacee0ff7ec955b32e09515246bd97b6edcd2b0f)
1 /*-
2  * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  * 3. Berkeley Software Design Inc's name may not be used to endorse or
13  *    promote products derived from this software without specific prior
14  *    written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  *	from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $
29  *	and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $
30  * $FreeBSD$
31  */
32 
33 /*
34  * Machine independent bits of mutex implementation.
35  */
36 
37 #include "opt_ddb.h"
38 
39 #include <sys/param.h>
40 #include <sys/bus.h>
41 #include <sys/kernel.h>
42 #include <sys/lock.h>
43 #include <sys/malloc.h>
44 #include <sys/mutex.h>
45 #include <sys/proc.h>
46 #include <sys/resourcevar.h>
47 #include <sys/sysctl.h>
48 #include <sys/systm.h>
49 #include <sys/vmmeter.h>
50 #include <sys/ktr.h>
51 
52 #include <machine/atomic.h>
53 #include <machine/bus.h>
54 #include <machine/clock.h>
55 #include <machine/cpu.h>
56 
57 #include <ddb/ddb.h>
58 
59 #include <vm/vm.h>
60 #include <vm/vm_extern.h>
61 
62 /*
63  * Internal utility macros.
64  */
65 #define mtx_unowned(m)	((m)->mtx_lock == MTX_UNOWNED)
66 
67 #define mtx_owner(m)	(mtx_unowned((m)) ? NULL \
68 	: (struct thread *)((m)->mtx_lock & MTX_FLAGMASK))
69 
70 /*
71  * Lock classes for sleep and spin mutexes.
72  */
73 struct lock_class lock_class_mtx_sleep = {
74 	"sleep mutex",
75 	LC_SLEEPLOCK | LC_RECURSABLE
76 };
77 struct lock_class lock_class_mtx_spin = {
78 	"spin mutex",
79 	LC_SPINLOCK | LC_RECURSABLE
80 };
81 
82 /*
83  * Prototypes for non-exported routines.
84  */
85 static void	propagate_priority(struct thread *);
86 
87 static void
88 propagate_priority(struct thread *td)
89 {
90 	struct ksegrp *kg = td->td_ksegrp;
91 	int pri = td->td_priority;
92 	struct mtx *m = td->td_blocked;
93 
94 	mtx_assert(&sched_lock, MA_OWNED);
95 	for (;;) {
96 		struct thread *td1;
97 
98 		td = mtx_owner(m);
99 
100 		if (td == NULL) {
101 			/*
102 			 * This really isn't quite right. Really
103 			 * ought to bump priority of thread that
104 			 * next acquires the mutex.
105 			 */
106 			MPASS(m->mtx_lock == MTX_CONTESTED);
107 			return;
108 		}
109 		kg = td->td_ksegrp;
110 
111 		MPASS(td->td_proc->p_magic == P_MAGIC);
112 		KASSERT(td->td_proc->p_stat != SSLEEP, ("sleeping thread owns a mutex"));
113 		if (td->td_priority <= pri) /* lower is higher priority */
114 			return;
115 
116 		/*
117 		 * Bump this thread's priority.
118 		 */
119 		td->td_priority = pri;
120 
121 		/*
122 		 * If lock holder is actually running, just bump priority.
123 		 */
124 		 /* XXXKSE this test is not sufficient */
125 		if (td->td_kse && (td->td_kse->ke_oncpu != NOCPU)) {
126 			MPASS(td->td_proc->p_stat == SRUN
127 			|| td->td_proc->p_stat == SZOMB
128 			|| td->td_proc->p_stat == SSTOP);
129 			return;
130 		}
131 
132 #ifndef SMP
133 		/*
134 		 * For UP, we check to see if td is curthread (this shouldn't
135 		 * ever happen however as it would mean we are in a deadlock.)
136 		 */
137 		KASSERT(td != curthread, ("Deadlock detected"));
138 #endif
139 
140 		/*
141 		 * If on run queue move to new run queue, and quit.
142 		 * XXXKSE this gets a lot more complicated under threads
143 		 * but try anyhow.
144 		 */
145 		if (td->td_proc->p_stat == SRUN) {
146 			MPASS(td->td_blocked == NULL);
147 			remrunqueue(td);
148 			setrunqueue(td);
149 			return;
150 		}
151 
152 		/*
153 		 * If we aren't blocked on a mutex, we should be.
154 		 */
155 		KASSERT(td->td_proc->p_stat == SMTX, (
156 		    "process %d(%s):%d holds %s but isn't blocked on a mutex\n",
157 		    td->td_proc->p_pid, td->td_proc->p_comm, td->td_proc->p_stat,
158 		    m->mtx_object.lo_name));
159 
160 		/*
161 		 * Pick up the mutex that td is blocked on.
162 		 */
163 		m = td->td_blocked;
164 		MPASS(m != NULL);
165 
166 		/*
167 		 * Check if the thread needs to be moved up on
168 		 * the blocked chain
169 		 */
170 		if (td == TAILQ_FIRST(&m->mtx_blocked)) {
171 			continue;
172 		}
173 
174 		td1 = TAILQ_PREV(td, threadqueue, td_blkq);
175 		if (td1->td_priority <= pri) {
176 			continue;
177 		}
178 
179 		/*
180 		 * Remove thread from blocked chain and determine where
181 		 * it should be moved up to.  Since we know that td1 has
182 		 * a lower priority than td, we know that at least one
183 		 * thread in the chain has a lower priority and that
184 		 * td1 will thus not be NULL after the loop.
185 		 */
186 		TAILQ_REMOVE(&m->mtx_blocked, td, td_blkq);
187 		TAILQ_FOREACH(td1, &m->mtx_blocked, td_blkq) {
188 			MPASS(td1->td_proc->p_magic == P_MAGIC);
189 			if (td1->td_priority > pri)
190 				break;
191 		}
192 
193 		MPASS(td1 != NULL);
194 		TAILQ_INSERT_BEFORE(td1, td, td_blkq);
195 		CTR4(KTR_LOCK,
196 		    "propagate_priority: p %p moved before %p on [%p] %s",
197 		    td, td1, m, m->mtx_object.lo_name);
198 	}
199 }
200 
201 /*
202  * Function versions of the inlined __mtx_* macros.  These are used by
203  * modules and can also be called from assembly language if needed.
204  */
205 void
206 _mtx_lock_flags(struct mtx *m, int opts, const char *file, int line)
207 {
208 
209 	MPASS(curthread != NULL);
210 	_get_sleep_lock(m, curthread, opts, file, line);
211 	LOCK_LOG_LOCK("LOCK", &m->mtx_object, opts, m->mtx_recurse, file,
212 	    line);
213 	WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line);
214 }
215 
216 void
217 _mtx_unlock_flags(struct mtx *m, int opts, const char *file, int line)
218 {
219 
220 	MPASS(curthread != NULL);
221 	mtx_assert(m, MA_OWNED);
222  	WITNESS_UNLOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line);
223 	LOCK_LOG_LOCK("UNLOCK", &m->mtx_object, opts, m->mtx_recurse, file,
224 	    line);
225 	_rel_sleep_lock(m, curthread, opts, file, line);
226 }
227 
228 void
229 _mtx_lock_spin_flags(struct mtx *m, int opts, const char *file, int line)
230 {
231 
232 	MPASS(curthread != NULL);
233 	_get_spin_lock(m, curthread, opts, file, line);
234 	LOCK_LOG_LOCK("LOCK", &m->mtx_object, opts, m->mtx_recurse, file,
235 	    line);
236 	WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line);
237 }
238 
239 void
240 _mtx_unlock_spin_flags(struct mtx *m, int opts, const char *file, int line)
241 {
242 
243 	MPASS(curthread != NULL);
244 	mtx_assert(m, MA_OWNED);
245  	WITNESS_UNLOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line);
246 	LOCK_LOG_LOCK("UNLOCK", &m->mtx_object, opts, m->mtx_recurse, file,
247 	    line);
248 	_rel_spin_lock(m);
249 }
250 
251 /*
252  * The important part of mtx_trylock{,_flags}()
253  * Tries to acquire lock `m.' We do NOT handle recursion here; we assume that
254  * if we're called, it's because we know we don't already own this lock.
255  */
256 int
257 _mtx_trylock(struct mtx *m, int opts, const char *file, int line)
258 {
259 	int rval;
260 
261 	MPASS(curthread != NULL);
262 
263 	rval = _obtain_lock(m, curthread);
264 
265 	LOCK_LOG_TRY("LOCK", &m->mtx_object, opts, rval, file, line);
266 	if (rval) {
267 		/*
268 		 * We do not handle recursion in _mtx_trylock; see the
269 		 * note at the top of the routine.
270 		 */
271 		KASSERT(!mtx_recursed(m),
272 		    ("mtx_trylock() called on a recursed mutex"));
273 		WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE | LOP_TRYLOCK,
274 		    file, line);
275 	}
276 
277 	return (rval);
278 }
279 
280 /*
281  * _mtx_lock_sleep: the tougher part of acquiring an MTX_DEF lock.
282  *
283  * We call this if the lock is either contested (i.e. we need to go to
284  * sleep waiting for it), or if we need to recurse on it.
285  */
286 void
287 _mtx_lock_sleep(struct mtx *m, int opts, const char *file, int line)
288 {
289 	struct thread *td = curthread;
290 	struct ksegrp *kg = td->td_ksegrp;
291 
292 	if ((m->mtx_lock & MTX_FLAGMASK) == (uintptr_t)td) {
293 		m->mtx_recurse++;
294 		atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
295 		if (LOCK_LOG_TEST(&m->mtx_object, opts))
296 			CTR1(KTR_LOCK, "_mtx_lock_sleep: %p recursing", m);
297 		return;
298 	}
299 
300 	if (LOCK_LOG_TEST(&m->mtx_object, opts))
301 		CTR4(KTR_LOCK,
302 		    "_mtx_lock_sleep: %s contested (lock=%p) at %s:%d",
303 		    m->mtx_object.lo_name, (void *)m->mtx_lock, file, line);
304 
305 	while (!_obtain_lock(m, td)) {
306 		uintptr_t v;
307 		struct thread *td1;
308 
309 		mtx_lock_spin(&sched_lock);
310 		/*
311 		 * Check if the lock has been released while spinning for
312 		 * the sched_lock.
313 		 */
314 		if ((v = m->mtx_lock) == MTX_UNOWNED) {
315 			mtx_unlock_spin(&sched_lock);
316 			continue;
317 		}
318 
319 		/*
320 		 * The mutex was marked contested on release. This means that
321 		 * there are threads blocked on it.
322 		 */
323 		if (v == MTX_CONTESTED) {
324 			td1 = TAILQ_FIRST(&m->mtx_blocked);
325 			MPASS(td1 != NULL);
326 			m->mtx_lock = (uintptr_t)td | MTX_CONTESTED;
327 
328 			if (td1->td_priority < td->td_priority)
329 				td->td_priority = td1->td_priority;
330 			mtx_unlock_spin(&sched_lock);
331 			return;
332 		}
333 
334 		/*
335 		 * If the mutex isn't already contested and a failure occurs
336 		 * setting the contested bit, the mutex was either released
337 		 * or the state of the MTX_RECURSED bit changed.
338 		 */
339 		if ((v & MTX_CONTESTED) == 0 &&
340 		    !atomic_cmpset_ptr(&m->mtx_lock, (void *)v,
341 			(void *)(v | MTX_CONTESTED))) {
342 			mtx_unlock_spin(&sched_lock);
343 			continue;
344 		}
345 
346 		/*
347 		 * We deffinately must sleep for this lock.
348 		 */
349 		mtx_assert(m, MA_NOTOWNED);
350 
351 #ifdef notyet
352 		/*
353 		 * If we're borrowing an interrupted thread's VM context, we
354 		 * must clean up before going to sleep.
355 		 */
356 		if (td->td_ithd != NULL) {
357 			struct ithd *it = td->td_ithd;
358 
359 			if (it->it_interrupted) {
360 				if (LOCK_LOG_TEST(&m->mtx_object, opts))
361 					CTR2(KTR_LOCK,
362 				    "_mtx_lock_sleep: %p interrupted %p",
363 					    it, it->it_interrupted);
364 				intr_thd_fixup(it);
365 			}
366 		}
367 #endif
368 
369 		/*
370 		 * Put us on the list of threads blocked on this mutex.
371 		 */
372 		if (TAILQ_EMPTY(&m->mtx_blocked)) {
373 			td1 = mtx_owner(m);
374 			LIST_INSERT_HEAD(&td1->td_contested, m, mtx_contested);
375 			TAILQ_INSERT_TAIL(&m->mtx_blocked, td, td_blkq);
376 		} else {
377 			TAILQ_FOREACH(td1, &m->mtx_blocked, td_blkq)
378 				if (td1->td_priority > td->td_priority)
379 					break;
380 			if (td1)
381 				TAILQ_INSERT_BEFORE(td1, td, td_blkq);
382 			else
383 				TAILQ_INSERT_TAIL(&m->mtx_blocked, td, td_blkq);
384 		}
385 
386 		/*
387 		 * Save who we're blocked on.
388 		 */
389 		td->td_blocked = m;
390 		td->td_mtxname = m->mtx_object.lo_name;
391 		td->td_proc->p_stat = SMTX;
392 		propagate_priority(td);
393 
394 		if (LOCK_LOG_TEST(&m->mtx_object, opts))
395 			CTR3(KTR_LOCK,
396 			    "_mtx_lock_sleep: p %p blocked on [%p] %s", td, m,
397 			    m->mtx_object.lo_name);
398 
399 		td->td_proc->p_stats->p_ru.ru_nvcsw++;
400 		mi_switch();
401 
402 		if (LOCK_LOG_TEST(&m->mtx_object, opts))
403 			CTR3(KTR_LOCK,
404 			  "_mtx_lock_sleep: p %p free from blocked on [%p] %s",
405 			  td, m, m->mtx_object.lo_name);
406 
407 		mtx_unlock_spin(&sched_lock);
408 	}
409 
410 	return;
411 }
412 
413 /*
414  * _mtx_lock_spin: the tougher part of acquiring an MTX_SPIN lock.
415  *
416  * This is only called if we need to actually spin for the lock. Recursion
417  * is handled inline.
418  */
419 void
420 _mtx_lock_spin(struct mtx *m, int opts, const char *file, int line)
421 {
422 	int i = 0;
423 
424 	if (LOCK_LOG_TEST(&m->mtx_object, opts))
425 		CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m);
426 
427 	for (;;) {
428 		if (_obtain_lock(m, curthread))
429 			break;
430 
431 		/* Give interrupts a chance while we spin. */
432 		critical_exit();
433 		while (m->mtx_lock != MTX_UNOWNED) {
434 			if (i++ < 10000000)
435 				continue;
436 			if (i++ < 60000000)
437 				DELAY(1);
438 #ifdef DDB
439 			else if (!db_active)
440 #else
441 			else
442 #endif
443 			panic("spin lock %s held by %p for > 5 seconds",
444 			    m->mtx_object.lo_name, (void *)m->mtx_lock);
445 		}
446 		critical_enter();
447 	}
448 
449 	if (LOCK_LOG_TEST(&m->mtx_object, opts))
450 		CTR1(KTR_LOCK, "_mtx_lock_spin: %p spin done", m);
451 
452 	return;
453 }
454 
455 /*
456  * _mtx_unlock_sleep: the tougher part of releasing an MTX_DEF lock.
457  *
458  * We are only called here if the lock is recursed or contested (i.e. we
459  * need to wake up a blocked thread).
460  */
461 void
462 _mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line)
463 {
464 	struct thread *td, *td1;
465 	struct mtx *m1;
466 	int pri;
467 	struct ksegrp *kg;
468 
469 	td = curthread;
470 	kg = td->td_ksegrp;
471 
472 	if (mtx_recursed(m)) {
473 		if (--(m->mtx_recurse) == 0)
474 			atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED);
475 		if (LOCK_LOG_TEST(&m->mtx_object, opts))
476 			CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p unrecurse", m);
477 		return;
478 	}
479 
480 	mtx_lock_spin(&sched_lock);
481 	if (LOCK_LOG_TEST(&m->mtx_object, opts))
482 		CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p contested", m);
483 
484 	td1 = TAILQ_FIRST(&m->mtx_blocked);
485 	MPASS(td->td_proc->p_magic == P_MAGIC);
486 	MPASS(td1->td_proc->p_magic == P_MAGIC);
487 
488 	TAILQ_REMOVE(&m->mtx_blocked, td1, td_blkq);
489 
490 	if (TAILQ_EMPTY(&m->mtx_blocked)) {
491 		LIST_REMOVE(m, mtx_contested);
492 		_release_lock_quick(m);
493 		if (LOCK_LOG_TEST(&m->mtx_object, opts))
494 			CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p not held", m);
495 	} else
496 		atomic_store_rel_ptr(&m->mtx_lock, (void *)MTX_CONTESTED);
497 
498 	pri = PRI_MAX;
499 	LIST_FOREACH(m1, &td->td_contested, mtx_contested) {
500 		int cp = TAILQ_FIRST(&m1->mtx_blocked)->td_priority;
501 		if (cp < pri)
502 			pri = cp;
503 	}
504 
505 	if (pri > td->td_base_pri)
506 		pri = td->td_base_pri;
507 	td->td_priority = pri;
508 
509 	if (LOCK_LOG_TEST(&m->mtx_object, opts))
510 		CTR2(KTR_LOCK, "_mtx_unlock_sleep: %p contested setrunqueue %p",
511 		    m, td1);
512 
513 	td1->td_blocked = NULL;
514 	td1->td_proc->p_stat = SRUN;
515 	setrunqueue(td1);
516 
517 	if (td->td_critnest == 1 && td1->td_priority < pri) {
518 #ifdef notyet
519 		if (td->td_ithd != NULL) {
520 			struct ithd *it = td->td_ithd;
521 
522 			if (it->it_interrupted) {
523 				if (LOCK_LOG_TEST(&m->mtx_object, opts))
524 					CTR2(KTR_LOCK,
525 				    "_mtx_unlock_sleep: %p interrupted %p",
526 					    it, it->it_interrupted);
527 				intr_thd_fixup(it);
528 			}
529 		}
530 #endif
531 		setrunqueue(td);
532 		if (LOCK_LOG_TEST(&m->mtx_object, opts))
533 			CTR2(KTR_LOCK,
534 			    "_mtx_unlock_sleep: %p switching out lock=%p", m,
535 			    (void *)m->mtx_lock);
536 
537 		td->td_proc->p_stats->p_ru.ru_nivcsw++;
538 		mi_switch();
539 		if (LOCK_LOG_TEST(&m->mtx_object, opts))
540 			CTR2(KTR_LOCK, "_mtx_unlock_sleep: %p resuming lock=%p",
541 			    m, (void *)m->mtx_lock);
542 	}
543 
544 	mtx_unlock_spin(&sched_lock);
545 
546 	return;
547 }
548 
549 /*
550  * All the unlocking of MTX_SPIN locks is done inline.
551  * See the _rel_spin_lock() macro for the details.
552  */
553 
554 /*
555  * The backing function for the INVARIANTS-enabled mtx_assert()
556  */
557 #ifdef INVARIANT_SUPPORT
558 void
559 _mtx_assert(struct mtx *m, int what, const char *file, int line)
560 {
561 
562 	if (panicstr != NULL)
563 		return;
564 	switch (what) {
565 	case MA_OWNED:
566 	case MA_OWNED | MA_RECURSED:
567 	case MA_OWNED | MA_NOTRECURSED:
568 		if (!mtx_owned(m))
569 			panic("mutex %s not owned at %s:%d",
570 			    m->mtx_object.lo_name, file, line);
571 		if (mtx_recursed(m)) {
572 			if ((what & MA_NOTRECURSED) != 0)
573 				panic("mutex %s recursed at %s:%d",
574 				    m->mtx_object.lo_name, file, line);
575 		} else if ((what & MA_RECURSED) != 0) {
576 			panic("mutex %s unrecursed at %s:%d",
577 			    m->mtx_object.lo_name, file, line);
578 		}
579 		break;
580 	case MA_NOTOWNED:
581 		if (mtx_owned(m))
582 			panic("mutex %s owned at %s:%d",
583 			    m->mtx_object.lo_name, file, line);
584 		break;
585 	default:
586 		panic("unknown mtx_assert at %s:%d", file, line);
587 	}
588 }
589 #endif
590 
591 /*
592  * The MUTEX_DEBUG-enabled mtx_validate()
593  *
594  * Most of these checks have been moved off into the LO_INITIALIZED flag
595  * maintained by the witness code.
596  */
597 #ifdef MUTEX_DEBUG
598 
599 void	mtx_validate __P((struct mtx *));
600 
601 void
602 mtx_validate(struct mtx *m)
603 {
604 
605 /*
606  * XXX - When kernacc() is fixed on the alpha to handle K0_SEG memory properly
607  * we can re-enable the kernacc() checks.
608  */
609 #ifndef __alpha__
610 	/*
611 	 * Can't call kernacc() from early init386(), especially when
612 	 * initializing Giant mutex, because some stuff in kernacc()
613 	 * requires Giant itself.
614 	 */
615 	if (!cold)
616 		if (!kernacc((caddr_t)m, sizeof(m),
617 		    VM_PROT_READ | VM_PROT_WRITE))
618 			panic("Can't read and write to mutex %p", m);
619 #endif
620 }
621 #endif
622 
623 /*
624  * Mutex initialization routine; initialize lock `m' of type contained in
625  * `opts' with options contained in `opts' and description `description.'
626  */
627 void
628 mtx_init(struct mtx *m, const char *description, int opts)
629 {
630 	struct lock_object *lock;
631 
632 	MPASS((opts & ~(MTX_SPIN | MTX_QUIET | MTX_RECURSE |
633 	    MTX_SLEEPABLE | MTX_NOWITNESS)) == 0);
634 
635 #ifdef MUTEX_DEBUG
636 	/* Diagnostic and error correction */
637 	mtx_validate(m);
638 #endif
639 
640 	lock = &m->mtx_object;
641 	KASSERT((lock->lo_flags & LO_INITIALIZED) == 0,
642 	    ("mutex %s %p already initialized", description, m));
643 	bzero(m, sizeof(*m));
644 	if (opts & MTX_SPIN)
645 		lock->lo_class = &lock_class_mtx_spin;
646 	else
647 		lock->lo_class = &lock_class_mtx_sleep;
648 	lock->lo_name = description;
649 	if (opts & MTX_QUIET)
650 		lock->lo_flags = LO_QUIET;
651 	if (opts & MTX_RECURSE)
652 		lock->lo_flags |= LO_RECURSABLE;
653 	if (opts & MTX_SLEEPABLE)
654 		lock->lo_flags |= LO_SLEEPABLE;
655 	if ((opts & MTX_NOWITNESS) == 0)
656 		lock->lo_flags |= LO_WITNESS;
657 
658 	m->mtx_lock = MTX_UNOWNED;
659 	TAILQ_INIT(&m->mtx_blocked);
660 
661 	LOCK_LOG_INIT(lock, opts);
662 
663 	WITNESS_INIT(lock);
664 }
665 
666 /*
667  * Remove lock `m' from all_mtx queue.  We don't allow MTX_QUIET to be
668  * passed in as a flag here because if the corresponding mtx_init() was
669  * called with MTX_QUIET set, then it will already be set in the mutex's
670  * flags.
671  */
672 void
673 mtx_destroy(struct mtx *m)
674 {
675 
676 	LOCK_LOG_DESTROY(&m->mtx_object, 0);
677 
678 	if (!mtx_owned(m))
679 		MPASS(mtx_unowned(m));
680 	else {
681 		MPASS((m->mtx_lock & (MTX_RECURSED|MTX_CONTESTED)) == 0);
682 
683 		/* Tell witness this isn't locked to make it happy. */
684 		WITNESS_UNLOCK(&m->mtx_object, LOP_EXCLUSIVE, __FILE__,
685 		    __LINE__);
686 	}
687 
688 	WITNESS_DESTROY(&m->mtx_object);
689 }
690 
691 /*
692  * Encapsulated Giant mutex routines.  These routines provide encapsulation
693  * control for the Giant mutex, allowing sysctls to be used to turn on and
694  * off Giant around certain subsystems.  The default value for the sysctls
695  * are set to what developers believe is stable and working in regards to
696  * the Giant pushdown.  Developers should not turn off Giant via these
697  * sysctls unless they know what they are doing.
698  *
699  * Callers of mtx_lock_giant() are expected to pass the return value to an
700  * accompanying mtx_unlock_giant() later on.  If multiple subsystems are
701  * effected by a Giant wrap, all related sysctl variables must be zero for
702  * the subsystem call to operate without Giant (as determined by the caller).
703  */
704 
705 SYSCTL_NODE(_kern, OID_AUTO, giant, CTLFLAG_RD, NULL, "Giant mutex manipulation");
706 
707 static int kern_giant_all = 0;
708 SYSCTL_INT(_kern_giant, OID_AUTO, all, CTLFLAG_RW, &kern_giant_all, 0, "");
709 
710 int kern_giant_proc = 1;	/* Giant around PROC locks */
711 int kern_giant_file = 1;	/* Giant around struct file & filedesc */
712 int kern_giant_ucred = 1;	/* Giant around ucred */
713 SYSCTL_INT(_kern_giant, OID_AUTO, proc, CTLFLAG_RW, &kern_giant_proc, 0, "");
714 SYSCTL_INT(_kern_giant, OID_AUTO, file, CTLFLAG_RW, &kern_giant_file, 0, "");
715 SYSCTL_INT(_kern_giant, OID_AUTO, ucred, CTLFLAG_RW, &kern_giant_ucred, 0, "");
716 
717 int
718 mtx_lock_giant(int sysctlvar)
719 {
720 	if (sysctlvar || kern_giant_all) {
721 		mtx_lock(&Giant);
722 		return(1);
723 	}
724 	return(0);
725 }
726 
727 void
728 mtx_unlock_giant(int s)
729 {
730 	if (s)
731 		mtx_unlock(&Giant);
732 }
733 
734