xref: /freebsd/sys/kern/subr_turnstile.c (revision c17d43407fe04133a94055b0dbc7ea8965654a9f)
1 /*-
2  * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  * 3. Berkeley Software Design Inc's name may not be used to endorse or
13  *    promote products derived from this software without specific prior
14  *    written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  *	from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $
29  *	and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $
30  * $FreeBSD$
31  */
32 
33 /*
34  * Machine independent bits of mutex implementation.
35  */
36 
37 #include "opt_ddb.h"
38 
39 #include <sys/param.h>
40 #include <sys/bus.h>
41 #include <sys/kernel.h>
42 #include <sys/lock.h>
43 #include <sys/malloc.h>
44 #include <sys/mutex.h>
45 #include <sys/proc.h>
46 #include <sys/resourcevar.h>
47 #include <sys/sysctl.h>
48 #include <sys/systm.h>
49 #include <sys/vmmeter.h>
50 #include <sys/ktr.h>
51 
52 #include <machine/atomic.h>
53 #include <machine/bus.h>
54 #include <machine/clock.h>
55 #include <machine/cpu.h>
56 
57 #include <ddb/ddb.h>
58 
59 #include <vm/vm.h>
60 #include <vm/vm_extern.h>
61 
62 /*
63  * Internal utility macros.
64  */
65 #define mtx_unowned(m)	((m)->mtx_lock == MTX_UNOWNED)
66 
67 #define mtx_owner(m)	(mtx_unowned((m)) ? NULL \
68 	: (struct thread *)((m)->mtx_lock & MTX_FLAGMASK))
69 
70 /*
71  * Lock classes for sleep and spin mutexes.
72  */
73 struct lock_class lock_class_mtx_sleep = {
74 	"sleep mutex",
75 	LC_SLEEPLOCK | LC_RECURSABLE
76 };
77 struct lock_class lock_class_mtx_spin = {
78 	"spin mutex",
79 	LC_SPINLOCK | LC_RECURSABLE
80 };
81 
82 /*
83  * Prototypes for non-exported routines.
84  */
85 static void	propagate_priority(struct thread *);
86 
87 static void
88 propagate_priority(struct thread *td)
89 {
90 	int pri = td->td_priority;
91 	struct mtx *m = td->td_blocked;
92 
93 	mtx_assert(&sched_lock, MA_OWNED);
94 	for (;;) {
95 		struct thread *td1;
96 
97 		td = mtx_owner(m);
98 
99 		if (td == NULL) {
100 			/*
101 			 * This really isn't quite right. Really
102 			 * ought to bump priority of thread that
103 			 * next acquires the mutex.
104 			 */
105 			MPASS(m->mtx_lock == MTX_CONTESTED);
106 			return;
107 		}
108 
109 		MPASS(td->td_proc->p_magic == P_MAGIC);
110 		KASSERT(td->td_proc->p_stat != SSLEEP, ("sleeping thread owns a mutex"));
111 		if (td->td_priority <= pri) /* lower is higher priority */
112 			return;
113 
114 		/*
115 		 * Bump this thread's priority.
116 		 */
117 		td->td_priority = pri;
118 
119 		/*
120 		 * If lock holder is actually running, just bump priority.
121 		 */
122 		 /* XXXKSE this test is not sufficient */
123 		if (td->td_kse && (td->td_kse->ke_oncpu != NOCPU)) {
124 			MPASS(td->td_proc->p_stat == SRUN
125 			|| td->td_proc->p_stat == SZOMB
126 			|| td->td_proc->p_stat == SSTOP);
127 			return;
128 		}
129 
130 #ifndef SMP
131 		/*
132 		 * For UP, we check to see if td is curthread (this shouldn't
133 		 * ever happen however as it would mean we are in a deadlock.)
134 		 */
135 		KASSERT(td != curthread, ("Deadlock detected"));
136 #endif
137 
138 		/*
139 		 * If on run queue move to new run queue, and quit.
140 		 * XXXKSE this gets a lot more complicated under threads
141 		 * but try anyhow.
142 		 */
143 		if (td->td_proc->p_stat == SRUN) {
144 			MPASS(td->td_blocked == NULL);
145 			remrunqueue(td);
146 			setrunqueue(td);
147 			return;
148 		}
149 
150 		/*
151 		 * If we aren't blocked on a mutex, we should be.
152 		 */
153 		KASSERT(td->td_proc->p_stat == SMTX, (
154 		    "process %d(%s):%d holds %s but isn't blocked on a mutex\n",
155 		    td->td_proc->p_pid, td->td_proc->p_comm, td->td_proc->p_stat,
156 		    m->mtx_object.lo_name));
157 
158 		/*
159 		 * Pick up the mutex that td is blocked on.
160 		 */
161 		m = td->td_blocked;
162 		MPASS(m != NULL);
163 
164 		/*
165 		 * Check if the thread needs to be moved up on
166 		 * the blocked chain
167 		 */
168 		if (td == TAILQ_FIRST(&m->mtx_blocked)) {
169 			continue;
170 		}
171 
172 		td1 = TAILQ_PREV(td, threadqueue, td_blkq);
173 		if (td1->td_priority <= pri) {
174 			continue;
175 		}
176 
177 		/*
178 		 * Remove thread from blocked chain and determine where
179 		 * it should be moved up to.  Since we know that td1 has
180 		 * a lower priority than td, we know that at least one
181 		 * thread in the chain has a lower priority and that
182 		 * td1 will thus not be NULL after the loop.
183 		 */
184 		TAILQ_REMOVE(&m->mtx_blocked, td, td_blkq);
185 		TAILQ_FOREACH(td1, &m->mtx_blocked, td_blkq) {
186 			MPASS(td1->td_proc->p_magic == P_MAGIC);
187 			if (td1->td_priority > pri)
188 				break;
189 		}
190 
191 		MPASS(td1 != NULL);
192 		TAILQ_INSERT_BEFORE(td1, td, td_blkq);
193 		CTR4(KTR_LOCK,
194 		    "propagate_priority: p %p moved before %p on [%p] %s",
195 		    td, td1, m, m->mtx_object.lo_name);
196 	}
197 }
198 
199 /*
200  * Function versions of the inlined __mtx_* macros.  These are used by
201  * modules and can also be called from assembly language if needed.
202  */
203 void
204 _mtx_lock_flags(struct mtx *m, int opts, const char *file, int line)
205 {
206 
207 	MPASS(curthread != NULL);
208 	_get_sleep_lock(m, curthread, opts, file, line);
209 	LOCK_LOG_LOCK("LOCK", &m->mtx_object, opts, m->mtx_recurse, file,
210 	    line);
211 	WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line);
212 }
213 
214 void
215 _mtx_unlock_flags(struct mtx *m, int opts, const char *file, int line)
216 {
217 
218 	MPASS(curthread != NULL);
219 	mtx_assert(m, MA_OWNED);
220  	WITNESS_UNLOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line);
221 	LOCK_LOG_LOCK("UNLOCK", &m->mtx_object, opts, m->mtx_recurse, file,
222 	    line);
223 	_rel_sleep_lock(m, curthread, opts, file, line);
224 }
225 
226 void
227 _mtx_lock_spin_flags(struct mtx *m, int opts, const char *file, int line)
228 {
229 
230 	MPASS(curthread != NULL);
231 	_get_spin_lock(m, curthread, opts, file, line);
232 	LOCK_LOG_LOCK("LOCK", &m->mtx_object, opts, m->mtx_recurse, file,
233 	    line);
234 	WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line);
235 }
236 
237 void
238 _mtx_unlock_spin_flags(struct mtx *m, int opts, const char *file, int line)
239 {
240 
241 	MPASS(curthread != NULL);
242 	mtx_assert(m, MA_OWNED);
243  	WITNESS_UNLOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line);
244 	LOCK_LOG_LOCK("UNLOCK", &m->mtx_object, opts, m->mtx_recurse, file,
245 	    line);
246 	_rel_spin_lock(m);
247 }
248 
249 /*
250  * The important part of mtx_trylock{,_flags}()
251  * Tries to acquire lock `m.' We do NOT handle recursion here; we assume that
252  * if we're called, it's because we know we don't already own this lock.
253  */
254 int
255 _mtx_trylock(struct mtx *m, int opts, const char *file, int line)
256 {
257 	int rval;
258 
259 	MPASS(curthread != NULL);
260 
261 	rval = _obtain_lock(m, curthread);
262 
263 	LOCK_LOG_TRY("LOCK", &m->mtx_object, opts, rval, file, line);
264 	if (rval) {
265 		/*
266 		 * We do not handle recursion in _mtx_trylock; see the
267 		 * note at the top of the routine.
268 		 */
269 		KASSERT(!mtx_recursed(m),
270 		    ("mtx_trylock() called on a recursed mutex"));
271 		WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE | LOP_TRYLOCK,
272 		    file, line);
273 	}
274 
275 	return (rval);
276 }
277 
278 /*
279  * _mtx_lock_sleep: the tougher part of acquiring an MTX_DEF lock.
280  *
281  * We call this if the lock is either contested (i.e. we need to go to
282  * sleep waiting for it), or if we need to recurse on it.
283  */
284 void
285 _mtx_lock_sleep(struct mtx *m, int opts, const char *file, int line)
286 {
287 	struct thread *td = curthread;
288 
289 	if ((m->mtx_lock & MTX_FLAGMASK) == (uintptr_t)td) {
290 		m->mtx_recurse++;
291 		atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
292 		if (LOCK_LOG_TEST(&m->mtx_object, opts))
293 			CTR1(KTR_LOCK, "_mtx_lock_sleep: %p recursing", m);
294 		return;
295 	}
296 
297 	if (LOCK_LOG_TEST(&m->mtx_object, opts))
298 		CTR4(KTR_LOCK,
299 		    "_mtx_lock_sleep: %s contested (lock=%p) at %s:%d",
300 		    m->mtx_object.lo_name, (void *)m->mtx_lock, file, line);
301 
302 	while (!_obtain_lock(m, td)) {
303 		uintptr_t v;
304 		struct thread *td1;
305 
306 		mtx_lock_spin(&sched_lock);
307 		/*
308 		 * Check if the lock has been released while spinning for
309 		 * the sched_lock.
310 		 */
311 		if ((v = m->mtx_lock) == MTX_UNOWNED) {
312 			mtx_unlock_spin(&sched_lock);
313 			continue;
314 		}
315 
316 		/*
317 		 * The mutex was marked contested on release. This means that
318 		 * there are threads blocked on it.
319 		 */
320 		if (v == MTX_CONTESTED) {
321 			td1 = TAILQ_FIRST(&m->mtx_blocked);
322 			MPASS(td1 != NULL);
323 			m->mtx_lock = (uintptr_t)td | MTX_CONTESTED;
324 
325 			if (td1->td_priority < td->td_priority)
326 				td->td_priority = td1->td_priority;
327 			mtx_unlock_spin(&sched_lock);
328 			return;
329 		}
330 
331 		/*
332 		 * If the mutex isn't already contested and a failure occurs
333 		 * setting the contested bit, the mutex was either released
334 		 * or the state of the MTX_RECURSED bit changed.
335 		 */
336 		if ((v & MTX_CONTESTED) == 0 &&
337 		    !atomic_cmpset_ptr(&m->mtx_lock, (void *)v,
338 			(void *)(v | MTX_CONTESTED))) {
339 			mtx_unlock_spin(&sched_lock);
340 			continue;
341 		}
342 
343 		/*
344 		 * We deffinately must sleep for this lock.
345 		 */
346 		mtx_assert(m, MA_NOTOWNED);
347 
348 #ifdef notyet
349 		/*
350 		 * If we're borrowing an interrupted thread's VM context, we
351 		 * must clean up before going to sleep.
352 		 */
353 		if (td->td_ithd != NULL) {
354 			struct ithd *it = td->td_ithd;
355 
356 			if (it->it_interrupted) {
357 				if (LOCK_LOG_TEST(&m->mtx_object, opts))
358 					CTR2(KTR_LOCK,
359 				    "_mtx_lock_sleep: %p interrupted %p",
360 					    it, it->it_interrupted);
361 				intr_thd_fixup(it);
362 			}
363 		}
364 #endif
365 
366 		/*
367 		 * Put us on the list of threads blocked on this mutex.
368 		 */
369 		if (TAILQ_EMPTY(&m->mtx_blocked)) {
370 			td1 = mtx_owner(m);
371 			LIST_INSERT_HEAD(&td1->td_contested, m, mtx_contested);
372 			TAILQ_INSERT_TAIL(&m->mtx_blocked, td, td_blkq);
373 		} else {
374 			TAILQ_FOREACH(td1, &m->mtx_blocked, td_blkq)
375 				if (td1->td_priority > td->td_priority)
376 					break;
377 			if (td1)
378 				TAILQ_INSERT_BEFORE(td1, td, td_blkq);
379 			else
380 				TAILQ_INSERT_TAIL(&m->mtx_blocked, td, td_blkq);
381 		}
382 
383 		/*
384 		 * Save who we're blocked on.
385 		 */
386 		td->td_blocked = m;
387 		td->td_mtxname = m->mtx_object.lo_name;
388 		td->td_proc->p_stat = SMTX;
389 		propagate_priority(td);
390 
391 		if (LOCK_LOG_TEST(&m->mtx_object, opts))
392 			CTR3(KTR_LOCK,
393 			    "_mtx_lock_sleep: p %p blocked on [%p] %s", td, m,
394 			    m->mtx_object.lo_name);
395 
396 		td->td_proc->p_stats->p_ru.ru_nvcsw++;
397 		mi_switch();
398 
399 		if (LOCK_LOG_TEST(&m->mtx_object, opts))
400 			CTR3(KTR_LOCK,
401 			  "_mtx_lock_sleep: p %p free from blocked on [%p] %s",
402 			  td, m, m->mtx_object.lo_name);
403 
404 		mtx_unlock_spin(&sched_lock);
405 	}
406 
407 	return;
408 }
409 
410 /*
411  * _mtx_lock_spin: the tougher part of acquiring an MTX_SPIN lock.
412  *
413  * This is only called if we need to actually spin for the lock. Recursion
414  * is handled inline.
415  */
416 void
417 _mtx_lock_spin(struct mtx *m, int opts, const char *file, int line)
418 {
419 	int i = 0;
420 
421 	if (LOCK_LOG_TEST(&m->mtx_object, opts))
422 		CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m);
423 
424 	for (;;) {
425 		if (_obtain_lock(m, curthread))
426 			break;
427 
428 		/* Give interrupts a chance while we spin. */
429 		critical_exit();
430 		while (m->mtx_lock != MTX_UNOWNED) {
431 			if (i++ < 10000000)
432 				continue;
433 			if (i++ < 60000000)
434 				DELAY(1);
435 #ifdef DDB
436 			else if (!db_active)
437 #else
438 			else
439 #endif
440 			panic("spin lock %s held by %p for > 5 seconds",
441 			    m->mtx_object.lo_name, (void *)m->mtx_lock);
442 		}
443 		critical_enter();
444 	}
445 
446 	if (LOCK_LOG_TEST(&m->mtx_object, opts))
447 		CTR1(KTR_LOCK, "_mtx_lock_spin: %p spin done", m);
448 
449 	return;
450 }
451 
452 /*
453  * _mtx_unlock_sleep: the tougher part of releasing an MTX_DEF lock.
454  *
455  * We are only called here if the lock is recursed or contested (i.e. we
456  * need to wake up a blocked thread).
457  */
458 void
459 _mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line)
460 {
461 	struct thread *td, *td1;
462 	struct mtx *m1;
463 	int pri;
464 
465 	td = curthread;
466 
467 	if (mtx_recursed(m)) {
468 		if (--(m->mtx_recurse) == 0)
469 			atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED);
470 		if (LOCK_LOG_TEST(&m->mtx_object, opts))
471 			CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p unrecurse", m);
472 		return;
473 	}
474 
475 	mtx_lock_spin(&sched_lock);
476 	if (LOCK_LOG_TEST(&m->mtx_object, opts))
477 		CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p contested", m);
478 
479 	td1 = TAILQ_FIRST(&m->mtx_blocked);
480 	MPASS(td->td_proc->p_magic == P_MAGIC);
481 	MPASS(td1->td_proc->p_magic == P_MAGIC);
482 
483 	TAILQ_REMOVE(&m->mtx_blocked, td1, td_blkq);
484 
485 	if (TAILQ_EMPTY(&m->mtx_blocked)) {
486 		LIST_REMOVE(m, mtx_contested);
487 		_release_lock_quick(m);
488 		if (LOCK_LOG_TEST(&m->mtx_object, opts))
489 			CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p not held", m);
490 	} else
491 		atomic_store_rel_ptr(&m->mtx_lock, (void *)MTX_CONTESTED);
492 
493 	pri = PRI_MAX;
494 	LIST_FOREACH(m1, &td->td_contested, mtx_contested) {
495 		int cp = TAILQ_FIRST(&m1->mtx_blocked)->td_priority;
496 		if (cp < pri)
497 			pri = cp;
498 	}
499 
500 	if (pri > td->td_base_pri)
501 		pri = td->td_base_pri;
502 	td->td_priority = pri;
503 
504 	if (LOCK_LOG_TEST(&m->mtx_object, opts))
505 		CTR2(KTR_LOCK, "_mtx_unlock_sleep: %p contested setrunqueue %p",
506 		    m, td1);
507 
508 	td1->td_blocked = NULL;
509 	td1->td_proc->p_stat = SRUN;
510 	setrunqueue(td1);
511 
512 	if (td->td_critnest == 1 && td1->td_priority < pri) {
513 #ifdef notyet
514 		if (td->td_ithd != NULL) {
515 			struct ithd *it = td->td_ithd;
516 
517 			if (it->it_interrupted) {
518 				if (LOCK_LOG_TEST(&m->mtx_object, opts))
519 					CTR2(KTR_LOCK,
520 				    "_mtx_unlock_sleep: %p interrupted %p",
521 					    it, it->it_interrupted);
522 				intr_thd_fixup(it);
523 			}
524 		}
525 #endif
526 		setrunqueue(td);
527 		if (LOCK_LOG_TEST(&m->mtx_object, opts))
528 			CTR2(KTR_LOCK,
529 			    "_mtx_unlock_sleep: %p switching out lock=%p", m,
530 			    (void *)m->mtx_lock);
531 
532 		td->td_proc->p_stats->p_ru.ru_nivcsw++;
533 		mi_switch();
534 		if (LOCK_LOG_TEST(&m->mtx_object, opts))
535 			CTR2(KTR_LOCK, "_mtx_unlock_sleep: %p resuming lock=%p",
536 			    m, (void *)m->mtx_lock);
537 	}
538 
539 	mtx_unlock_spin(&sched_lock);
540 
541 	return;
542 }
543 
544 /*
545  * All the unlocking of MTX_SPIN locks is done inline.
546  * See the _rel_spin_lock() macro for the details.
547  */
548 
549 /*
550  * The backing function for the INVARIANTS-enabled mtx_assert()
551  */
552 #ifdef INVARIANT_SUPPORT
553 void
554 _mtx_assert(struct mtx *m, int what, const char *file, int line)
555 {
556 
557 	if (panicstr != NULL)
558 		return;
559 	switch (what) {
560 	case MA_OWNED:
561 	case MA_OWNED | MA_RECURSED:
562 	case MA_OWNED | MA_NOTRECURSED:
563 		if (!mtx_owned(m))
564 			panic("mutex %s not owned at %s:%d",
565 			    m->mtx_object.lo_name, file, line);
566 		if (mtx_recursed(m)) {
567 			if ((what & MA_NOTRECURSED) != 0)
568 				panic("mutex %s recursed at %s:%d",
569 				    m->mtx_object.lo_name, file, line);
570 		} else if ((what & MA_RECURSED) != 0) {
571 			panic("mutex %s unrecursed at %s:%d",
572 			    m->mtx_object.lo_name, file, line);
573 		}
574 		break;
575 	case MA_NOTOWNED:
576 		if (mtx_owned(m))
577 			panic("mutex %s owned at %s:%d",
578 			    m->mtx_object.lo_name, file, line);
579 		break;
580 	default:
581 		panic("unknown mtx_assert at %s:%d", file, line);
582 	}
583 }
584 #endif
585 
586 /*
587  * The MUTEX_DEBUG-enabled mtx_validate()
588  *
589  * Most of these checks have been moved off into the LO_INITIALIZED flag
590  * maintained by the witness code.
591  */
592 #ifdef MUTEX_DEBUG
593 
594 void	mtx_validate(struct mtx *);
595 
596 void
597 mtx_validate(struct mtx *m)
598 {
599 
600 /*
601  * XXX - When kernacc() is fixed on the alpha to handle K0_SEG memory properly
602  * we can re-enable the kernacc() checks.
603  */
604 #ifndef __alpha__
605 	/*
606 	 * Can't call kernacc() from early init386(), especially when
607 	 * initializing Giant mutex, because some stuff in kernacc()
608 	 * requires Giant itself.
609 	 */
610 	if (!cold)
611 		if (!kernacc((caddr_t)m, sizeof(m),
612 		    VM_PROT_READ | VM_PROT_WRITE))
613 			panic("Can't read and write to mutex %p", m);
614 #endif
615 }
616 #endif
617 
618 /*
619  * Mutex initialization routine; initialize lock `m' of type contained in
620  * `opts' with options contained in `opts' and description `description.'
621  */
622 void
623 mtx_init(struct mtx *m, const char *description, int opts)
624 {
625 	struct lock_object *lock;
626 
627 	MPASS((opts & ~(MTX_SPIN | MTX_QUIET | MTX_RECURSE |
628 	    MTX_SLEEPABLE | MTX_NOWITNESS)) == 0);
629 
630 #ifdef MUTEX_DEBUG
631 	/* Diagnostic and error correction */
632 	mtx_validate(m);
633 #endif
634 
635 	lock = &m->mtx_object;
636 	KASSERT((lock->lo_flags & LO_INITIALIZED) == 0,
637 	    ("mutex %s %p already initialized", description, m));
638 	bzero(m, sizeof(*m));
639 	if (opts & MTX_SPIN)
640 		lock->lo_class = &lock_class_mtx_spin;
641 	else
642 		lock->lo_class = &lock_class_mtx_sleep;
643 	lock->lo_name = description;
644 	if (opts & MTX_QUIET)
645 		lock->lo_flags = LO_QUIET;
646 	if (opts & MTX_RECURSE)
647 		lock->lo_flags |= LO_RECURSABLE;
648 	if (opts & MTX_SLEEPABLE)
649 		lock->lo_flags |= LO_SLEEPABLE;
650 	if ((opts & MTX_NOWITNESS) == 0)
651 		lock->lo_flags |= LO_WITNESS;
652 
653 	m->mtx_lock = MTX_UNOWNED;
654 	TAILQ_INIT(&m->mtx_blocked);
655 
656 	LOCK_LOG_INIT(lock, opts);
657 
658 	WITNESS_INIT(lock);
659 }
660 
661 /*
662  * Remove lock `m' from all_mtx queue.  We don't allow MTX_QUIET to be
663  * passed in as a flag here because if the corresponding mtx_init() was
664  * called with MTX_QUIET set, then it will already be set in the mutex's
665  * flags.
666  */
667 void
668 mtx_destroy(struct mtx *m)
669 {
670 
671 	LOCK_LOG_DESTROY(&m->mtx_object, 0);
672 
673 	if (!mtx_owned(m))
674 		MPASS(mtx_unowned(m));
675 	else {
676 		MPASS((m->mtx_lock & (MTX_RECURSED|MTX_CONTESTED)) == 0);
677 
678 		/* Tell witness this isn't locked to make it happy. */
679 		WITNESS_UNLOCK(&m->mtx_object, LOP_EXCLUSIVE, __FILE__,
680 		    __LINE__);
681 	}
682 
683 	WITNESS_DESTROY(&m->mtx_object);
684 }
685 
686 /*
687  * Encapsulated Giant mutex routines.  These routines provide encapsulation
688  * control for the Giant mutex, allowing sysctls to be used to turn on and
689  * off Giant around certain subsystems.  The default value for the sysctls
690  * are set to what developers believe is stable and working in regards to
691  * the Giant pushdown.  Developers should not turn off Giant via these
692  * sysctls unless they know what they are doing.
693  *
694  * Callers of mtx_lock_giant() are expected to pass the return value to an
695  * accompanying mtx_unlock_giant() later on.  If multiple subsystems are
696  * effected by a Giant wrap, all related sysctl variables must be zero for
697  * the subsystem call to operate without Giant (as determined by the caller).
698  */
699 
700 SYSCTL_NODE(_kern, OID_AUTO, giant, CTLFLAG_RD, NULL, "Giant mutex manipulation");
701 
702 static int kern_giant_all = 0;
703 SYSCTL_INT(_kern_giant, OID_AUTO, all, CTLFLAG_RW, &kern_giant_all, 0, "");
704 
705 int kern_giant_proc = 1;	/* Giant around PROC locks */
706 int kern_giant_file = 1;	/* Giant around struct file & filedesc */
707 int kern_giant_ucred = 1;	/* Giant around ucred */
708 SYSCTL_INT(_kern_giant, OID_AUTO, proc, CTLFLAG_RW, &kern_giant_proc, 0, "");
709 SYSCTL_INT(_kern_giant, OID_AUTO, file, CTLFLAG_RW, &kern_giant_file, 0, "");
710 SYSCTL_INT(_kern_giant, OID_AUTO, ucred, CTLFLAG_RW, &kern_giant_ucred, 0, "");
711 
712 int
713 mtx_lock_giant(int sysctlvar)
714 {
715 	if (sysctlvar || kern_giant_all) {
716 		mtx_lock(&Giant);
717 		return(1);
718 	}
719 	return(0);
720 }
721 
722 void
723 mtx_unlock_giant(int s)
724 {
725 	if (s)
726 		mtx_unlock(&Giant);
727 }
728 
729