xref: /freebsd/sys/kern/subr_turnstile.c (revision a220d00e74dd245b4fca59c5eca0c53963686325)
1 /*-
2  * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  * 3. Berkeley Software Design Inc's name may not be used to endorse or
13  *    promote products derived from this software without specific prior
14  *    written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  *	from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $
29  *	and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $
30  * $FreeBSD$
31  */
32 
33 /*
34  * Machine independent bits of mutex implementation and implementation of
35  * `witness' structure & related debugging routines.
36  */
37 
38 /*
39  *	Main Entry: witness
40  *	Pronunciation: 'wit-n&s
41  *	Function: noun
42  *	Etymology: Middle English witnesse, from Old English witnes knowledge,
43  *	    testimony, witness, from 2wit
44  *	Date: before 12th century
45  *	1 : attestation of a fact or event : TESTIMONY
46  *	2 : one that gives evidence; specifically : one who testifies in
47  *	    a cause or before a judicial tribunal
48  *	3 : one asked to be present at a transaction so as to be able to
49  *	    testify to its having taken place
50  *	4 : one who has personal knowledge of something
51  *	5 a : something serving as evidence or proof : SIGN
52  *	  b : public affirmation by word or example of usually
53  *	      religious faith or conviction <the heroic witness to divine
54  *	      life -- Pilot>
55  *	6 capitalized : a member of the Jehovah's Witnesses
56  */
57 
58 #include "opt_ddb.h"
59 
60 #include <sys/param.h>
61 #include <sys/bus.h>
62 #include <sys/kernel.h>
63 #include <sys/lock.h>
64 #include <sys/malloc.h>
65 #include <sys/mutex.h>
66 #include <sys/proc.h>
67 #include <sys/resourcevar.h>
68 #include <sys/sysctl.h>
69 #include <sys/systm.h>
70 #include <sys/vmmeter.h>
71 #include <sys/ktr.h>
72 
73 #include <machine/atomic.h>
74 #include <machine/bus.h>
75 #include <machine/clock.h>
76 #include <machine/cpu.h>
77 
78 #include <ddb/ddb.h>
79 
80 #include <vm/vm.h>
81 #include <vm/vm_extern.h>
82 
83 /*
84  * Internal utility macros.
85  */
86 #define mtx_unowned(m)	((m)->mtx_lock == MTX_UNOWNED)
87 
88 #define mtx_owner(m)	(mtx_unowned((m)) ? NULL \
89 	: (struct thread *)((m)->mtx_lock & MTX_FLAGMASK))
90 
91 #define SET_PRIO(td, pri)	(td)->td_ksegrp->kg_pri.pri_level = (pri)
92 
93 /*
94  * Lock classes for sleep and spin mutexes.
95  */
96 struct lock_class lock_class_mtx_sleep = {
97 	"sleep mutex",
98 	LC_SLEEPLOCK | LC_RECURSABLE
99 };
100 struct lock_class lock_class_mtx_spin = {
101 	"spin mutex",
102 	LC_SPINLOCK | LC_RECURSABLE
103 };
104 
105 /*
106  * Prototypes for non-exported routines.
107  */
108 static void	propagate_priority(struct thread *);
109 
110 static void
111 propagate_priority(struct thread *td)
112 {
113 	struct ksegrp *kg = td->td_ksegrp;
114 	int pri = kg->kg_pri.pri_level;
115 	struct mtx *m = td->td_blocked;
116 
117 	mtx_assert(&sched_lock, MA_OWNED);
118 	for (;;) {
119 		struct thread *td1;
120 
121 		td = mtx_owner(m);
122 
123 		if (td == NULL) {
124 			/*
125 			 * This really isn't quite right. Really
126 			 * ought to bump priority of thread that
127 			 * next acquires the mutex.
128 			 */
129 			MPASS(m->mtx_lock == MTX_CONTESTED);
130 			return;
131 		}
132 		kg = td->td_ksegrp;
133 
134 		MPASS(td->td_proc->p_magic == P_MAGIC);
135 		KASSERT(td->td_proc->p_stat != SSLEEP, ("sleeping thread owns a mutex"));
136 		if (kg->kg_pri.pri_level <= pri) /* lower is higher priority */
137 			return;
138 
139 		/*
140 		 * Bump this thread's priority.
141 		 */
142 		SET_PRIO(td, pri);
143 
144 		/*
145 		 * If lock holder is actually running, just bump priority.
146 		 */
147 		 /* XXXKSE this test is not sufficient */
148 		if (td->td_kse && (td->td_kse->ke_oncpu != NOCPU)) {
149 			MPASS(td->td_proc->p_stat == SRUN
150 			|| td->td_proc->p_stat == SZOMB
151 			|| td->td_proc->p_stat == SSTOP);
152 			return;
153 		}
154 
155 #ifndef SMP
156 		/*
157 		 * For UP, we check to see if td is curthread (this shouldn't
158 		 * ever happen however as it would mean we are in a deadlock.)
159 		 */
160 		KASSERT(td != curthread, ("Deadlock detected"));
161 #endif
162 
163 		/*
164 		 * If on run queue move to new run queue, and quit.
165 		 * XXXKSE this gets a lot more complicated under threads
166 		 * but try anyhow.
167 		 */
168 		if (td->td_proc->p_stat == SRUN) {
169 			MPASS(td->td_blocked == NULL);
170 			remrunqueue(td);
171 			setrunqueue(td);
172 			return;
173 		}
174 
175 		/*
176 		 * If we aren't blocked on a mutex, we should be.
177 		 */
178 		KASSERT(td->td_proc->p_stat == SMTX, (
179 		    "process %d(%s):%d holds %s but isn't blocked on a mutex\n",
180 		    td->td_proc->p_pid, td->td_proc->p_comm, td->td_proc->p_stat,
181 		    m->mtx_object.lo_name));
182 
183 		/*
184 		 * Pick up the mutex that td is blocked on.
185 		 */
186 		m = td->td_blocked;
187 		MPASS(m != NULL);
188 
189 		/*
190 		 * Check if the thread needs to be moved up on
191 		 * the blocked chain
192 		 */
193 		if (td == TAILQ_FIRST(&m->mtx_blocked)) {
194 			continue;
195 		}
196 
197 		td1 = TAILQ_PREV(td, threadqueue, td_blkq);
198 		if (td1->td_ksegrp->kg_pri.pri_level <= pri) {
199 			continue;
200 		}
201 
202 		/*
203 		 * Remove thread from blocked chain and determine where
204 		 * it should be moved up to.  Since we know that td1 has
205 		 * a lower priority than td, we know that at least one
206 		 * thread in the chain has a lower priority and that
207 		 * td1 will thus not be NULL after the loop.
208 		 */
209 		TAILQ_REMOVE(&m->mtx_blocked, td, td_blkq);
210 		TAILQ_FOREACH(td1, &m->mtx_blocked, td_blkq) {
211 			MPASS(td1->td_proc->p_magic == P_MAGIC);
212 			if (td1->td_ksegrp->kg_pri.pri_level > pri)
213 				break;
214 		}
215 
216 		MPASS(td1 != NULL);
217 		TAILQ_INSERT_BEFORE(td1, td, td_blkq);
218 		CTR4(KTR_LOCK,
219 		    "propagate_priority: p %p moved before %p on [%p] %s",
220 		    td, td1, m, m->mtx_object.lo_name);
221 	}
222 }
223 
224 /*
225  * Function versions of the inlined __mtx_* macros.  These are used by
226  * modules and can also be called from assembly language if needed.
227  */
228 void
229 _mtx_lock_flags(struct mtx *m, int opts, const char *file, int line)
230 {
231 
232 	MPASS(curthread != NULL);
233 	KASSERT((opts & MTX_NOSWITCH) == 0,
234 	    ("MTX_NOSWITCH used at %s:%d", file, line));
235 	_get_sleep_lock(m, curthread, opts, file, line);
236 	LOCK_LOG_LOCK("LOCK", &m->mtx_object, opts, m->mtx_recurse, file,
237 	    line);
238 	WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line);
239 }
240 
241 void
242 _mtx_unlock_flags(struct mtx *m, int opts, const char *file, int line)
243 {
244 
245 	MPASS(curthread != NULL);
246 	mtx_assert(m, MA_OWNED);
247  	WITNESS_UNLOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line);
248 	LOCK_LOG_LOCK("UNLOCK", &m->mtx_object, opts, m->mtx_recurse, file,
249 	    line);
250 	_rel_sleep_lock(m, curthread, opts, file, line);
251 }
252 
253 void
254 _mtx_lock_spin_flags(struct mtx *m, int opts, const char *file, int line)
255 {
256 
257 	MPASS(curthread != NULL);
258 	_get_spin_lock(m, curthread, opts, file, line);
259 	LOCK_LOG_LOCK("LOCK", &m->mtx_object, opts, m->mtx_recurse, file,
260 	    line);
261 	WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line);
262 }
263 
264 void
265 _mtx_unlock_spin_flags(struct mtx *m, int opts, const char *file, int line)
266 {
267 
268 	MPASS(curthread != NULL);
269 	mtx_assert(m, MA_OWNED);
270  	WITNESS_UNLOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line);
271 	LOCK_LOG_LOCK("UNLOCK", &m->mtx_object, opts, m->mtx_recurse, file,
272 	    line);
273 	_rel_spin_lock(m);
274 }
275 
276 /*
277  * The important part of mtx_trylock{,_flags}()
278  * Tries to acquire lock `m.' We do NOT handle recursion here; we assume that
279  * if we're called, it's because we know we don't already own this lock.
280  */
281 int
282 _mtx_trylock(struct mtx *m, int opts, const char *file, int line)
283 {
284 	int rval;
285 
286 	MPASS(curthread != NULL);
287 
288 	/*
289 	 * _mtx_trylock does not accept MTX_NOSWITCH option.
290 	 */
291 	KASSERT((opts & MTX_NOSWITCH) == 0,
292 	    ("mtx_trylock() called with invalid option flag(s) %d", opts));
293 
294 	rval = _obtain_lock(m, curthread);
295 
296 	LOCK_LOG_TRY("LOCK", &m->mtx_object, opts, rval, file, line);
297 	if (rval) {
298 		/*
299 		 * We do not handle recursion in _mtx_trylock; see the
300 		 * note at the top of the routine.
301 		 */
302 		KASSERT(!mtx_recursed(m),
303 		    ("mtx_trylock() called on a recursed mutex"));
304 		WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE | LOP_TRYLOCK,
305 		    file, line);
306 	}
307 
308 	return (rval);
309 }
310 
311 /*
312  * _mtx_lock_sleep: the tougher part of acquiring an MTX_DEF lock.
313  *
314  * We call this if the lock is either contested (i.e. we need to go to
315  * sleep waiting for it), or if we need to recurse on it.
316  */
317 void
318 _mtx_lock_sleep(struct mtx *m, int opts, const char *file, int line)
319 {
320 	struct thread *td = curthread;
321 	struct ksegrp *kg = td->td_ksegrp;
322 
323 	if ((m->mtx_lock & MTX_FLAGMASK) == (uintptr_t)td) {
324 		m->mtx_recurse++;
325 		atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
326 		if (LOCK_LOG_TEST(&m->mtx_object, opts))
327 			CTR1(KTR_LOCK, "_mtx_lock_sleep: %p recursing", m);
328 		return;
329 	}
330 
331 	if (LOCK_LOG_TEST(&m->mtx_object, opts))
332 		CTR4(KTR_LOCK,
333 		    "_mtx_lock_sleep: %s contested (lock=%p) at %s:%d",
334 		    m->mtx_object.lo_name, (void *)m->mtx_lock, file, line);
335 
336 	while (!_obtain_lock(m, td)) {
337 		uintptr_t v;
338 		struct thread *td1;
339 
340 		mtx_lock_spin(&sched_lock);
341 		/*
342 		 * Check if the lock has been released while spinning for
343 		 * the sched_lock.
344 		 */
345 		if ((v = m->mtx_lock) == MTX_UNOWNED) {
346 			mtx_unlock_spin(&sched_lock);
347 			continue;
348 		}
349 
350 		/*
351 		 * The mutex was marked contested on release. This means that
352 		 * there are threads blocked on it.
353 		 */
354 		if (v == MTX_CONTESTED) {
355 			td1 = TAILQ_FIRST(&m->mtx_blocked);
356 			MPASS(td1 != NULL);
357 			m->mtx_lock = (uintptr_t)td | MTX_CONTESTED;
358 
359 			if (td1->td_ksegrp->kg_pri.pri_level < kg->kg_pri.pri_level)
360 				SET_PRIO(td, td1->td_ksegrp->kg_pri.pri_level);
361 			mtx_unlock_spin(&sched_lock);
362 			return;
363 		}
364 
365 		/*
366 		 * If the mutex isn't already contested and a failure occurs
367 		 * setting the contested bit, the mutex was either released
368 		 * or the state of the MTX_RECURSED bit changed.
369 		 */
370 		if ((v & MTX_CONTESTED) == 0 &&
371 		    !atomic_cmpset_ptr(&m->mtx_lock, (void *)v,
372 			(void *)(v | MTX_CONTESTED))) {
373 			mtx_unlock_spin(&sched_lock);
374 			continue;
375 		}
376 
377 		/*
378 		 * We deffinately must sleep for this lock.
379 		 */
380 		mtx_assert(m, MA_NOTOWNED);
381 
382 #ifdef notyet
383 		/*
384 		 * If we're borrowing an interrupted thread's VM context, we
385 		 * must clean up before going to sleep.
386 		 */
387 		if (td->td_ithd != NULL) {
388 			struct ithd *it = td->td_ithd;
389 
390 			if (it->it_interrupted) {
391 				if (LOCK_LOG_TEST(&m->mtx_object, opts))
392 					CTR2(KTR_LOCK,
393 				    "_mtx_lock_sleep: %p interrupted %p",
394 					    it, it->it_interrupted);
395 				intr_thd_fixup(it);
396 			}
397 		}
398 #endif
399 
400 		/*
401 		 * Put us on the list of threads blocked on this mutex.
402 		 */
403 		if (TAILQ_EMPTY(&m->mtx_blocked)) {
404 			td1 = (struct thread *)(m->mtx_lock & MTX_FLAGMASK);
405 			LIST_INSERT_HEAD(&td1->td_contested, m, mtx_contested);
406 			TAILQ_INSERT_TAIL(&m->mtx_blocked, td, td_blkq);
407 		} else {
408 			TAILQ_FOREACH(td1, &m->mtx_blocked, td_blkq)
409 				if (td1->td_ksegrp->kg_pri.pri_level > kg->kg_pri.pri_level)
410 					break;
411 			if (td1)
412 				TAILQ_INSERT_BEFORE(td1, td, td_blkq);
413 			else
414 				TAILQ_INSERT_TAIL(&m->mtx_blocked, td, td_blkq);
415 		}
416 
417 		/*
418 		 * Save who we're blocked on.
419 		 */
420 		td->td_blocked = m;
421 		td->td_mtxname = m->mtx_object.lo_name;
422 		td->td_proc->p_stat = SMTX;
423 		propagate_priority(td);
424 
425 		if (LOCK_LOG_TEST(&m->mtx_object, opts))
426 			CTR3(KTR_LOCK,
427 			    "_mtx_lock_sleep: p %p blocked on [%p] %s", td, m,
428 			    m->mtx_object.lo_name);
429 
430 		td->td_proc->p_stats->p_ru.ru_nvcsw++;
431 		mi_switch();
432 
433 		if (LOCK_LOG_TEST(&m->mtx_object, opts))
434 			CTR3(KTR_LOCK,
435 			  "_mtx_lock_sleep: p %p free from blocked on [%p] %s",
436 			  td, m, m->mtx_object.lo_name);
437 
438 		mtx_unlock_spin(&sched_lock);
439 	}
440 
441 	return;
442 }
443 
444 /*
445  * _mtx_lock_spin: the tougher part of acquiring an MTX_SPIN lock.
446  *
447  * This is only called if we need to actually spin for the lock. Recursion
448  * is handled inline.
449  */
450 void
451 _mtx_lock_spin(struct mtx *m, int opts, critical_t mtx_crit, const char *file,
452 	       int line)
453 {
454 	int i = 0;
455 
456 	if (LOCK_LOG_TEST(&m->mtx_object, opts))
457 		CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m);
458 
459 	for (;;) {
460 		if (_obtain_lock(m, curthread))
461 			break;
462 
463 		/* Give interrupts a chance while we spin. */
464 		critical_exit(mtx_crit);
465 		while (m->mtx_lock != MTX_UNOWNED) {
466 			if (i++ < 1000000)
467 				continue;
468 			if (i++ < 6000000)
469 				DELAY(1);
470 #ifdef DDB
471 			else if (!db_active)
472 #else
473 			else
474 #endif
475 			panic("spin lock %s held by %p for > 5 seconds",
476 			    m->mtx_object.lo_name, (void *)m->mtx_lock);
477 		}
478 		mtx_crit = critical_enter();
479 	}
480 
481 	m->mtx_savecrit = mtx_crit;
482 	if (LOCK_LOG_TEST(&m->mtx_object, opts))
483 		CTR1(KTR_LOCK, "_mtx_lock_spin: %p spin done", m);
484 
485 	return;
486 }
487 
488 /*
489  * _mtx_unlock_sleep: the tougher part of releasing an MTX_DEF lock.
490  *
491  * We are only called here if the lock is recursed or contested (i.e. we
492  * need to wake up a blocked thread).
493  */
494 void
495 _mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line)
496 {
497 	struct thread *td, *td1;
498 	struct mtx *m1;
499 	int pri;
500 	struct ksegrp *kg;
501 
502 	td = curthread;
503 	kg = td->td_ksegrp;
504 
505 	if (mtx_recursed(m)) {
506 		if (--(m->mtx_recurse) == 0)
507 			atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED);
508 		if (LOCK_LOG_TEST(&m->mtx_object, opts))
509 			CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p unrecurse", m);
510 		return;
511 	}
512 
513 	mtx_lock_spin(&sched_lock);
514 	if (LOCK_LOG_TEST(&m->mtx_object, opts))
515 		CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p contested", m);
516 
517 	td1 = TAILQ_FIRST(&m->mtx_blocked);
518 	MPASS(td->td_proc->p_magic == P_MAGIC);
519 	MPASS(td1->td_proc->p_magic == P_MAGIC);
520 
521 	TAILQ_REMOVE(&m->mtx_blocked, td1, td_blkq);
522 
523 	if (TAILQ_EMPTY(&m->mtx_blocked)) {
524 		LIST_REMOVE(m, mtx_contested);
525 		_release_lock_quick(m);
526 		if (LOCK_LOG_TEST(&m->mtx_object, opts))
527 			CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p not held", m);
528 	} else
529 		atomic_store_rel_ptr(&m->mtx_lock, (void *)MTX_CONTESTED);
530 
531 	pri = PRI_MAX;
532 	LIST_FOREACH(m1, &td->td_contested, mtx_contested) {
533 		int cp = TAILQ_FIRST(&m1->mtx_blocked)->td_ksegrp->kg_pri.pri_level;
534 		if (cp < pri)
535 			pri = cp;
536 	}
537 
538 	if (pri > kg->kg_pri.pri_native)
539 		pri = kg->kg_pri.pri_native;
540 	SET_PRIO(td, pri);
541 
542 	if (LOCK_LOG_TEST(&m->mtx_object, opts))
543 		CTR2(KTR_LOCK, "_mtx_unlock_sleep: %p contested setrunqueue %p",
544 		    m, td1);
545 
546 	td1->td_blocked = NULL;
547 	td1->td_proc->p_stat = SRUN;
548 	setrunqueue(td1);
549 
550 	if ((opts & MTX_NOSWITCH) == 0 && td1->td_ksegrp->kg_pri.pri_level < pri) {
551 #ifdef notyet
552 		if (td->td_ithd != NULL) {
553 			struct ithd *it = td->td_ithd;
554 
555 			if (it->it_interrupted) {
556 				if (LOCK_LOG_TEST(&m->mtx_object, opts))
557 					CTR2(KTR_LOCK,
558 				    "_mtx_unlock_sleep: %p interrupted %p",
559 					    it, it->it_interrupted);
560 				intr_thd_fixup(it);
561 			}
562 		}
563 #endif
564 		setrunqueue(td);
565 		if (LOCK_LOG_TEST(&m->mtx_object, opts))
566 			CTR2(KTR_LOCK,
567 			    "_mtx_unlock_sleep: %p switching out lock=%p", m,
568 			    (void *)m->mtx_lock);
569 
570 		td->td_proc->p_stats->p_ru.ru_nivcsw++;
571 		mi_switch();
572 		if (LOCK_LOG_TEST(&m->mtx_object, opts))
573 			CTR2(KTR_LOCK, "_mtx_unlock_sleep: %p resuming lock=%p",
574 			    m, (void *)m->mtx_lock);
575 	}
576 
577 	mtx_unlock_spin(&sched_lock);
578 
579 	return;
580 }
581 
582 /*
583  * All the unlocking of MTX_SPIN locks is done inline.
584  * See the _rel_spin_lock() macro for the details.
585  */
586 
587 /*
588  * The backing function for the INVARIANTS-enabled mtx_assert()
589  */
590 #ifdef INVARIANT_SUPPORT
591 void
592 _mtx_assert(struct mtx *m, int what, const char *file, int line)
593 {
594 
595 	if (panicstr != NULL)
596 		return;
597 	switch (what) {
598 	case MA_OWNED:
599 	case MA_OWNED | MA_RECURSED:
600 	case MA_OWNED | MA_NOTRECURSED:
601 		if (!mtx_owned(m))
602 			panic("mutex %s not owned at %s:%d",
603 			    m->mtx_object.lo_name, file, line);
604 		if (mtx_recursed(m)) {
605 			if ((what & MA_NOTRECURSED) != 0)
606 				panic("mutex %s recursed at %s:%d",
607 				    m->mtx_object.lo_name, file, line);
608 		} else if ((what & MA_RECURSED) != 0) {
609 			panic("mutex %s unrecursed at %s:%d",
610 			    m->mtx_object.lo_name, file, line);
611 		}
612 		break;
613 	case MA_NOTOWNED:
614 		if (mtx_owned(m))
615 			panic("mutex %s owned at %s:%d",
616 			    m->mtx_object.lo_name, file, line);
617 		break;
618 	default:
619 		panic("unknown mtx_assert at %s:%d", file, line);
620 	}
621 }
622 #endif
623 
624 /*
625  * The MUTEX_DEBUG-enabled mtx_validate()
626  *
627  * Most of these checks have been moved off into the LO_INITIALIZED flag
628  * maintained by the witness code.
629  */
630 #ifdef MUTEX_DEBUG
631 
632 void	mtx_validate __P((struct mtx *));
633 
634 void
635 mtx_validate(struct mtx *m)
636 {
637 
638 /*
639  * XXX - When kernacc() is fixed on the alpha to handle K0_SEG memory properly
640  * we can re-enable the kernacc() checks.
641  */
642 #ifndef __alpha__
643 	/*
644 	 * Can't call kernacc() from early init386(), especially when
645 	 * initializing Giant mutex, because some stuff in kernacc()
646 	 * requires Giant itself.
647 	 */
648 	if (!cold)
649 		if (!kernacc((caddr_t)m, sizeof(m),
650 		    VM_PROT_READ | VM_PROT_WRITE))
651 			panic("Can't read and write to mutex %p", m);
652 #endif
653 }
654 #endif
655 
656 /*
657  * Mutex initialization routine; initialize lock `m' of type contained in
658  * `opts' with options contained in `opts' and description `description.'
659  */
660 void
661 mtx_init(struct mtx *m, const char *description, int opts)
662 {
663 	struct lock_object *lock;
664 
665 	MPASS((opts & ~(MTX_SPIN | MTX_QUIET | MTX_RECURSE |
666 	    MTX_SLEEPABLE | MTX_NOWITNESS)) == 0);
667 
668 #ifdef MUTEX_DEBUG
669 	/* Diagnostic and error correction */
670 	mtx_validate(m);
671 #endif
672 
673 	lock = &m->mtx_object;
674 	KASSERT((lock->lo_flags & LO_INITIALIZED) == 0,
675 	    ("mutex %s %p already initialized", description, m));
676 	bzero(m, sizeof(*m));
677 	if (opts & MTX_SPIN)
678 		lock->lo_class = &lock_class_mtx_spin;
679 	else
680 		lock->lo_class = &lock_class_mtx_sleep;
681 	lock->lo_name = description;
682 	if (opts & MTX_QUIET)
683 		lock->lo_flags = LO_QUIET;
684 	if (opts & MTX_RECURSE)
685 		lock->lo_flags |= LO_RECURSABLE;
686 	if (opts & MTX_SLEEPABLE)
687 		lock->lo_flags |= LO_SLEEPABLE;
688 	if ((opts & MTX_NOWITNESS) == 0)
689 		lock->lo_flags |= LO_WITNESS;
690 
691 	m->mtx_lock = MTX_UNOWNED;
692 	TAILQ_INIT(&m->mtx_blocked);
693 
694 	LOCK_LOG_INIT(lock, opts);
695 
696 	WITNESS_INIT(lock);
697 }
698 
699 /*
700  * Remove lock `m' from all_mtx queue.  We don't allow MTX_QUIET to be
701  * passed in as a flag here because if the corresponding mtx_init() was
702  * called with MTX_QUIET set, then it will already be set in the mutex's
703  * flags.
704  */
705 void
706 mtx_destroy(struct mtx *m)
707 {
708 
709 	LOCK_LOG_DESTROY(&m->mtx_object, 0);
710 
711 	if (!mtx_owned(m))
712 		MPASS(mtx_unowned(m));
713 	else {
714 		MPASS((m->mtx_lock & (MTX_RECURSED|MTX_CONTESTED)) == 0);
715 
716 		/* Tell witness this isn't locked to make it happy. */
717 		WITNESS_UNLOCK(&m->mtx_object, LOP_EXCLUSIVE | LOP_NOSWITCH,
718 		    __FILE__, __LINE__);
719 	}
720 
721 	WITNESS_DESTROY(&m->mtx_object);
722 }
723 
724 /*
725  * Encapsulated Giant mutex routines.  These routines provide encapsulation
726  * control for the Giant mutex, allowing sysctls to be used to turn on and
727  * off Giant around certain subsystems.  The default value for the sysctls
728  * are set to what developers believe is stable and working in regards to
729  * the Giant pushdown.  Developers should not turn off Giant via these
730  * sysctls unless they know what they are doing.
731  *
732  * Callers of mtx_lock_giant() are expected to pass the return value to an
733  * accompanying mtx_unlock_giant() later on.  If multiple subsystems are
734  * effected by a Giant wrap, all related sysctl variables must be zero for
735  * the subsystem call to operate without Giant (as determined by the caller).
736  */
737 
738 SYSCTL_NODE(_kern, OID_AUTO, giant, CTLFLAG_RD, NULL, "Giant mutex manipulation");
739 
740 static int kern_giant_all = 0;
741 SYSCTL_INT(_kern_giant, OID_AUTO, all, CTLFLAG_RW, &kern_giant_all, 0, "");
742 
743 int kern_giant_proc = 1;	/* Giant around PROC locks */
744 int kern_giant_file = 1;	/* Giant around struct file & filedesc */
745 SYSCTL_INT(_kern_giant, OID_AUTO, proc, CTLFLAG_RW, &kern_giant_proc, 0, "");
746 SYSCTL_INT(_kern_giant, OID_AUTO, file, CTLFLAG_RW, &kern_giant_file, 0, "");
747 
748 int
749 mtx_lock_giant(int sysctlvar)
750 {
751 	if (sysctlvar || kern_giant_all) {
752 		mtx_lock(&Giant);
753 		return(1);
754 	}
755 	return(0);
756 }
757 
758 void
759 mtx_unlock_giant(int s)
760 {
761 	if (s)
762 		mtx_unlock(&Giant);
763 }
764 
765