xref: /freebsd/sys/kern/kern_mutex.c (revision edf8578117e8844e02c0121147f45e4609b30680)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. Berkeley Software Design Inc's name may not be used to endorse or
15  *    promote products derived from this software without specific prior
16  *    written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  *
30  *	from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $
31  *	and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $
32  */
33 
34 /*
35  * Machine independent bits of mutex implementation.
36  */
37 
38 #include <sys/cdefs.h>
39 #include "opt_adaptive_mutexes.h"
40 #include "opt_ddb.h"
41 #include "opt_hwpmc_hooks.h"
42 #include "opt_sched.h"
43 
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/bus.h>
47 #include <sys/conf.h>
48 #include <sys/kdb.h>
49 #include <sys/kernel.h>
50 #include <sys/ktr.h>
51 #include <sys/lock.h>
52 #include <sys/malloc.h>
53 #include <sys/mutex.h>
54 #include <sys/proc.h>
55 #include <sys/resourcevar.h>
56 #include <sys/sched.h>
57 #include <sys/sbuf.h>
58 #include <sys/smp.h>
59 #include <sys/sysctl.h>
60 #include <sys/turnstile.h>
61 #include <sys/vmmeter.h>
62 #include <sys/lock_profile.h>
63 
64 #include <machine/atomic.h>
65 #include <machine/bus.h>
66 #include <machine/cpu.h>
67 
68 #include <ddb/ddb.h>
69 
70 #include <fs/devfs/devfs_int.h>
71 
72 #include <vm/vm.h>
73 #include <vm/vm_extern.h>
74 
75 #if defined(SMP) && !defined(NO_ADAPTIVE_MUTEXES)
76 #define	ADAPTIVE_MUTEXES
77 #endif
78 
79 #ifdef HWPMC_HOOKS
80 #include <sys/pmckern.h>
81 PMC_SOFT_DEFINE( , , lock, failed);
82 #endif
83 
84 /*
85  * Return the mutex address when the lock cookie address is provided.
86  * This functionality assumes that struct mtx* have a member named mtx_lock.
87  */
88 #define	mtxlock2mtx(c)	(__containerof(c, struct mtx, mtx_lock))
89 
90 /*
91  * Internal utility macros.
92  */
93 #define mtx_unowned(m)	((m)->mtx_lock == MTX_UNOWNED)
94 
95 #define	mtx_destroyed(m) ((m)->mtx_lock == MTX_DESTROYED)
96 
97 static void	assert_mtx(const struct lock_object *lock, int what);
98 #ifdef DDB
99 static void	db_show_mtx(const struct lock_object *lock);
100 #endif
101 static void	lock_mtx(struct lock_object *lock, uintptr_t how);
102 static void	lock_spin(struct lock_object *lock, uintptr_t how);
103 #ifdef KDTRACE_HOOKS
104 static int	owner_mtx(const struct lock_object *lock,
105 		    struct thread **owner);
106 #endif
107 static uintptr_t unlock_mtx(struct lock_object *lock);
108 static uintptr_t unlock_spin(struct lock_object *lock);
109 
110 /*
111  * Lock classes for sleep and spin mutexes.
112  */
113 struct lock_class lock_class_mtx_sleep = {
114 	.lc_name = "sleep mutex",
115 	.lc_flags = LC_SLEEPLOCK | LC_RECURSABLE,
116 	.lc_assert = assert_mtx,
117 #ifdef DDB
118 	.lc_ddb_show = db_show_mtx,
119 #endif
120 	.lc_lock = lock_mtx,
121 	.lc_unlock = unlock_mtx,
122 #ifdef KDTRACE_HOOKS
123 	.lc_owner = owner_mtx,
124 #endif
125 };
126 struct lock_class lock_class_mtx_spin = {
127 	.lc_name = "spin mutex",
128 	.lc_flags = LC_SPINLOCK | LC_RECURSABLE,
129 	.lc_assert = assert_mtx,
130 #ifdef DDB
131 	.lc_ddb_show = db_show_mtx,
132 #endif
133 	.lc_lock = lock_spin,
134 	.lc_unlock = unlock_spin,
135 #ifdef KDTRACE_HOOKS
136 	.lc_owner = owner_mtx,
137 #endif
138 };
139 
140 #ifdef ADAPTIVE_MUTEXES
141 #ifdef MUTEX_CUSTOM_BACKOFF
142 static SYSCTL_NODE(_debug, OID_AUTO, mtx, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
143     "mtx debugging");
144 
145 static struct lock_delay_config __read_frequently mtx_delay;
146 
147 SYSCTL_U16(_debug_mtx, OID_AUTO, delay_base, CTLFLAG_RW, &mtx_delay.base,
148     0, "");
149 SYSCTL_U16(_debug_mtx, OID_AUTO, delay_max, CTLFLAG_RW, &mtx_delay.max,
150     0, "");
151 
152 LOCK_DELAY_SYSINIT_DEFAULT(mtx_delay);
153 #else
154 #define mtx_delay	locks_delay
155 #endif
156 #endif
157 
158 #ifdef MUTEX_SPIN_CUSTOM_BACKOFF
159 static SYSCTL_NODE(_debug, OID_AUTO, mtx_spin,
160     CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
161     "mtx spin debugging");
162 
163 static struct lock_delay_config __read_frequently mtx_spin_delay;
164 
165 SYSCTL_INT(_debug_mtx_spin, OID_AUTO, delay_base, CTLFLAG_RW,
166     &mtx_spin_delay.base, 0, "");
167 SYSCTL_INT(_debug_mtx_spin, OID_AUTO, delay_max, CTLFLAG_RW,
168     &mtx_spin_delay.max, 0, "");
169 
170 LOCK_DELAY_SYSINIT_DEFAULT(mtx_spin_delay);
171 #else
172 #define mtx_spin_delay	locks_delay
173 #endif
174 
175 /*
176  * System-wide mutexes
177  */
178 struct mtx blocked_lock;
179 struct mtx __exclusive_cache_line Giant;
180 
181 static void _mtx_lock_indefinite_check(struct mtx *, struct lock_delay_arg *);
182 
183 void
184 assert_mtx(const struct lock_object *lock, int what)
185 {
186 
187 	/*
188 	 * Treat LA_LOCKED as if LA_XLOCKED was asserted.
189 	 *
190 	 * Some callers of lc_assert uses LA_LOCKED to indicate that either
191 	 * a shared lock or write lock was held, while other callers uses
192 	 * the more strict LA_XLOCKED (used as MA_OWNED).
193 	 *
194 	 * Mutex is the only lock class that can not be shared, as a result,
195 	 * we can reasonably consider the caller really intends to assert
196 	 * LA_XLOCKED when they are asserting LA_LOCKED on a mutex object.
197 	 */
198 	if (what & LA_LOCKED) {
199 		what &= ~LA_LOCKED;
200 		what |= LA_XLOCKED;
201 	}
202 	mtx_assert((const struct mtx *)lock, what);
203 }
204 
205 void
206 lock_mtx(struct lock_object *lock, uintptr_t how)
207 {
208 
209 	mtx_lock((struct mtx *)lock);
210 }
211 
212 void
213 lock_spin(struct lock_object *lock, uintptr_t how)
214 {
215 
216 	mtx_lock_spin((struct mtx *)lock);
217 }
218 
219 uintptr_t
220 unlock_mtx(struct lock_object *lock)
221 {
222 	struct mtx *m;
223 
224 	m = (struct mtx *)lock;
225 	mtx_assert(m, MA_OWNED | MA_NOTRECURSED);
226 	mtx_unlock(m);
227 	return (0);
228 }
229 
230 uintptr_t
231 unlock_spin(struct lock_object *lock)
232 {
233 	struct mtx *m;
234 
235 	m = (struct mtx *)lock;
236 	mtx_assert(m, MA_OWNED | MA_NOTRECURSED);
237 	mtx_unlock_spin(m);
238 	return (0);
239 }
240 
241 #ifdef KDTRACE_HOOKS
242 int
243 owner_mtx(const struct lock_object *lock, struct thread **owner)
244 {
245 	const struct mtx *m;
246 	uintptr_t x;
247 
248 	m = (const struct mtx *)lock;
249 	x = m->mtx_lock;
250 	*owner = (struct thread *)(x & ~MTX_FLAGMASK);
251 	return (*owner != NULL);
252 }
253 #endif
254 
255 /*
256  * Function versions of the inlined __mtx_* macros.  These are used by
257  * modules and can also be called from assembly language if needed.
258  */
259 void
260 __mtx_lock_flags(volatile uintptr_t *c, int opts, const char *file, int line)
261 {
262 	struct mtx *m;
263 	uintptr_t tid, v;
264 
265 	m = mtxlock2mtx(c);
266 
267 	KASSERT(kdb_active != 0 || SCHEDULER_STOPPED() ||
268 	    !TD_IS_IDLETHREAD(curthread),
269 	    ("mtx_lock() by idle thread %p on sleep mutex %s @ %s:%d",
270 	    curthread, m->lock_object.lo_name, file, line));
271 	KASSERT(m->mtx_lock != MTX_DESTROYED,
272 	    ("mtx_lock() of destroyed mutex @ %s:%d", file, line));
273 	KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep,
274 	    ("mtx_lock() of spin mutex %s @ %s:%d", m->lock_object.lo_name,
275 	    file, line));
276 	WITNESS_CHECKORDER(&m->lock_object, (opts & ~MTX_RECURSE) |
277 	    LOP_NEWORDER | LOP_EXCLUSIVE, file, line, NULL);
278 
279 	tid = (uintptr_t)curthread;
280 	v = MTX_UNOWNED;
281 	if (!_mtx_obtain_lock_fetch(m, &v, tid))
282 		_mtx_lock_sleep(m, v, opts, file, line);
283 	else
284 		LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(adaptive__acquire,
285 		    m, 0, 0, file, line);
286 	LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file,
287 	    line);
288 	WITNESS_LOCK(&m->lock_object, (opts & ~MTX_RECURSE) | LOP_EXCLUSIVE,
289 	    file, line);
290 	TD_LOCKS_INC(curthread);
291 }
292 
293 void
294 __mtx_unlock_flags(volatile uintptr_t *c, int opts, const char *file, int line)
295 {
296 	struct mtx *m;
297 
298 	m = mtxlock2mtx(c);
299 
300 	KASSERT(m->mtx_lock != MTX_DESTROYED,
301 	    ("mtx_unlock() of destroyed mutex @ %s:%d", file, line));
302 	KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep,
303 	    ("mtx_unlock() of spin mutex %s @ %s:%d", m->lock_object.lo_name,
304 	    file, line));
305 	WITNESS_UNLOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
306 	LOCK_LOG_LOCK("UNLOCK", &m->lock_object, opts, m->mtx_recurse, file,
307 	    line);
308 	mtx_assert(m, MA_OWNED);
309 
310 #ifdef LOCK_PROFILING
311 	__mtx_unlock_sleep(c, (uintptr_t)curthread, opts, file, line);
312 #else
313 	__mtx_unlock(m, curthread, opts, file, line);
314 #endif
315 	TD_LOCKS_DEC(curthread);
316 }
317 
318 void
319 __mtx_lock_spin_flags(volatile uintptr_t *c, int opts, const char *file,
320     int line)
321 {
322 	struct mtx *m;
323 #ifdef SMP
324 	uintptr_t tid, v;
325 #endif
326 
327 	m = mtxlock2mtx(c);
328 
329 	KASSERT(m->mtx_lock != MTX_DESTROYED,
330 	    ("mtx_lock_spin() of destroyed mutex @ %s:%d", file, line));
331 	KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
332 	    ("mtx_lock_spin() of sleep mutex %s @ %s:%d",
333 	    m->lock_object.lo_name, file, line));
334 	if (mtx_owned(m))
335 		KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0 ||
336 		    (opts & MTX_RECURSE) != 0,
337 	    ("mtx_lock_spin: recursed on non-recursive mutex %s @ %s:%d\n",
338 		    m->lock_object.lo_name, file, line));
339 	opts &= ~MTX_RECURSE;
340 	WITNESS_CHECKORDER(&m->lock_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE,
341 	    file, line, NULL);
342 #ifdef SMP
343 	spinlock_enter();
344 	tid = (uintptr_t)curthread;
345 	v = MTX_UNOWNED;
346 	if (!_mtx_obtain_lock_fetch(m, &v, tid))
347 		_mtx_lock_spin(m, v, opts, file, line);
348 	else
349 		LOCKSTAT_PROFILE_OBTAIN_SPIN_LOCK_SUCCESS(spin__acquire,
350 		    m, 0, 0, file, line);
351 #else
352 	__mtx_lock_spin(m, curthread, opts, file, line);
353 #endif
354 	LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file,
355 	    line);
356 	WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
357 }
358 
359 int
360 __mtx_trylock_spin_flags(volatile uintptr_t *c, int opts, const char *file,
361     int line)
362 {
363 	struct mtx *m;
364 
365 	if (SCHEDULER_STOPPED())
366 		return (1);
367 
368 	m = mtxlock2mtx(c);
369 
370 	KASSERT(m->mtx_lock != MTX_DESTROYED,
371 	    ("mtx_trylock_spin() of destroyed mutex @ %s:%d", file, line));
372 	KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
373 	    ("mtx_trylock_spin() of sleep mutex %s @ %s:%d",
374 	    m->lock_object.lo_name, file, line));
375 	KASSERT((opts & MTX_RECURSE) == 0,
376 	    ("mtx_trylock_spin: unsupp. opt MTX_RECURSE on mutex %s @ %s:%d\n",
377 	    m->lock_object.lo_name, file, line));
378 	if (__mtx_trylock_spin(m, curthread, opts, file, line)) {
379 		LOCK_LOG_TRY("LOCK", &m->lock_object, opts, 1, file, line);
380 		WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
381 		return (1);
382 	}
383 	LOCK_LOG_TRY("LOCK", &m->lock_object, opts, 0, file, line);
384 	return (0);
385 }
386 
387 void
388 __mtx_unlock_spin_flags(volatile uintptr_t *c, int opts, const char *file,
389     int line)
390 {
391 	struct mtx *m;
392 
393 	m = mtxlock2mtx(c);
394 
395 	KASSERT(m->mtx_lock != MTX_DESTROYED,
396 	    ("mtx_unlock_spin() of destroyed mutex @ %s:%d", file, line));
397 	KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
398 	    ("mtx_unlock_spin() of sleep mutex %s @ %s:%d",
399 	    m->lock_object.lo_name, file, line));
400 	WITNESS_UNLOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
401 	LOCK_LOG_LOCK("UNLOCK", &m->lock_object, opts, m->mtx_recurse, file,
402 	    line);
403 	mtx_assert(m, MA_OWNED);
404 
405 	__mtx_unlock_spin(m);
406 }
407 
408 /*
409  * The important part of mtx_trylock{,_flags}()
410  * Tries to acquire lock `m.'  If this function is called on a mutex that
411  * is already owned, it will recursively acquire the lock.
412  */
413 int
414 _mtx_trylock_flags_int(struct mtx *m, int opts LOCK_FILE_LINE_ARG_DEF)
415 {
416 	struct thread *td;
417 	uintptr_t tid, v;
418 #ifdef LOCK_PROFILING
419 	uint64_t waittime = 0;
420 	int contested = 0;
421 #endif
422 	int rval;
423 	bool recursed;
424 
425 	td = curthread;
426 	tid = (uintptr_t)td;
427 	if (SCHEDULER_STOPPED_TD(td))
428 		return (1);
429 
430 	KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(td),
431 	    ("mtx_trylock() by idle thread %p on sleep mutex %s @ %s:%d",
432 	    curthread, m->lock_object.lo_name, file, line));
433 	KASSERT(m->mtx_lock != MTX_DESTROYED,
434 	    ("mtx_trylock() of destroyed mutex @ %s:%d", file, line));
435 	KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep,
436 	    ("mtx_trylock() of spin mutex %s @ %s:%d", m->lock_object.lo_name,
437 	    file, line));
438 
439 	rval = 1;
440 	recursed = false;
441 	v = MTX_UNOWNED;
442 	for (;;) {
443 		if (_mtx_obtain_lock_fetch(m, &v, tid))
444 			break;
445 		if (v == MTX_UNOWNED)
446 			continue;
447 		if (v == tid &&
448 		    ((m->lock_object.lo_flags & LO_RECURSABLE) != 0 ||
449 		    (opts & MTX_RECURSE) != 0)) {
450 			m->mtx_recurse++;
451 			atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
452 			recursed = true;
453 			break;
454 		}
455 		rval = 0;
456 		break;
457 	}
458 
459 	opts &= ~MTX_RECURSE;
460 
461 	LOCK_LOG_TRY("LOCK", &m->lock_object, opts, rval, file, line);
462 	if (rval) {
463 		WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE | LOP_TRYLOCK,
464 		    file, line);
465 		TD_LOCKS_INC(curthread);
466 		if (!recursed)
467 			LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(adaptive__acquire,
468 			    m, contested, waittime, file, line);
469 	}
470 
471 	return (rval);
472 }
473 
474 int
475 _mtx_trylock_flags_(volatile uintptr_t *c, int opts, const char *file, int line)
476 {
477 	struct mtx *m;
478 
479 	m = mtxlock2mtx(c);
480 	return (_mtx_trylock_flags_int(m, opts LOCK_FILE_LINE_ARG));
481 }
482 
483 /*
484  * __mtx_lock_sleep: the tougher part of acquiring an MTX_DEF lock.
485  *
486  * We call this if the lock is either contested (i.e. we need to go to
487  * sleep waiting for it), or if we need to recurse on it.
488  */
489 #if LOCK_DEBUG > 0
490 void
491 __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t v, int opts, const char *file,
492     int line)
493 #else
494 void
495 __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t v)
496 #endif
497 {
498 	struct thread *td;
499 	struct mtx *m;
500 	struct turnstile *ts;
501 	uintptr_t tid;
502 	struct thread *owner;
503 #ifdef LOCK_PROFILING
504 	int contested = 0;
505 	uint64_t waittime = 0;
506 #endif
507 #if defined(ADAPTIVE_MUTEXES) || defined(KDTRACE_HOOKS)
508 	struct lock_delay_arg lda;
509 #endif
510 #ifdef KDTRACE_HOOKS
511 	u_int sleep_cnt = 0;
512 	int64_t sleep_time = 0;
513 	int64_t all_time = 0;
514 #endif
515 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
516 	int doing_lockprof = 0;
517 #endif
518 
519 	td = curthread;
520 	tid = (uintptr_t)td;
521 	m = mtxlock2mtx(c);
522 
523 #ifdef KDTRACE_HOOKS
524 	if (LOCKSTAT_PROFILE_ENABLED(adaptive__acquire)) {
525 		while (v == MTX_UNOWNED) {
526 			if (_mtx_obtain_lock_fetch(m, &v, tid))
527 				goto out_lockstat;
528 		}
529 		doing_lockprof = 1;
530 		all_time -= lockstat_nsecs(&m->lock_object);
531 	}
532 #endif
533 #ifdef LOCK_PROFILING
534 	doing_lockprof = 1;
535 #endif
536 
537 	if (SCHEDULER_STOPPED_TD(td))
538 		return;
539 
540 	if (__predict_false(v == MTX_UNOWNED))
541 		v = MTX_READ_VALUE(m);
542 
543 	if (__predict_false(lv_mtx_owner(v) == td)) {
544 		KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0 ||
545 		    (opts & MTX_RECURSE) != 0,
546 	    ("_mtx_lock_sleep: recursed on non-recursive mutex %s @ %s:%d\n",
547 		    m->lock_object.lo_name, file, line));
548 #if LOCK_DEBUG > 0
549 		opts &= ~MTX_RECURSE;
550 #endif
551 		m->mtx_recurse++;
552 		atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
553 		if (LOCK_LOG_TEST(&m->lock_object, opts))
554 			CTR1(KTR_LOCK, "_mtx_lock_sleep: %p recursing", m);
555 		return;
556 	}
557 #if LOCK_DEBUG > 0
558 	opts &= ~MTX_RECURSE;
559 #endif
560 
561 #if defined(ADAPTIVE_MUTEXES)
562 	lock_delay_arg_init(&lda, &mtx_delay);
563 #elif defined(KDTRACE_HOOKS)
564 	lock_delay_arg_init_noadapt(&lda);
565 #endif
566 
567 #ifdef HWPMC_HOOKS
568 	PMC_SOFT_CALL( , , lock, failed);
569 #endif
570 	lock_profile_obtain_lock_failed(&m->lock_object, false,
571 		    &contested, &waittime);
572 	if (LOCK_LOG_TEST(&m->lock_object, opts))
573 		CTR4(KTR_LOCK,
574 		    "_mtx_lock_sleep: %s contested (lock=%p) at %s:%d",
575 		    m->lock_object.lo_name, (void *)m->mtx_lock, file, line);
576 
577 	for (;;) {
578 		if (v == MTX_UNOWNED) {
579 			if (_mtx_obtain_lock_fetch(m, &v, tid))
580 				break;
581 			continue;
582 		}
583 #ifdef KDTRACE_HOOKS
584 		lda.spin_cnt++;
585 #endif
586 #ifdef ADAPTIVE_MUTEXES
587 		/*
588 		 * If the owner is running on another CPU, spin until the
589 		 * owner stops running or the state of the lock changes.
590 		 */
591 		owner = lv_mtx_owner(v);
592 		if (TD_IS_RUNNING(owner)) {
593 			if (LOCK_LOG_TEST(&m->lock_object, 0))
594 				CTR3(KTR_LOCK,
595 				    "%s: spinning on %p held by %p",
596 				    __func__, m, owner);
597 			KTR_STATE1(KTR_SCHED, "thread",
598 			    sched_tdname((struct thread *)tid),
599 			    "spinning", "lockname:\"%s\"",
600 			    m->lock_object.lo_name);
601 			do {
602 				lock_delay(&lda);
603 				v = MTX_READ_VALUE(m);
604 				owner = lv_mtx_owner(v);
605 			} while (v != MTX_UNOWNED && TD_IS_RUNNING(owner));
606 			KTR_STATE0(KTR_SCHED, "thread",
607 			    sched_tdname((struct thread *)tid),
608 			    "running");
609 			continue;
610 		}
611 #endif
612 
613 		ts = turnstile_trywait(&m->lock_object);
614 		v = MTX_READ_VALUE(m);
615 retry_turnstile:
616 
617 		/*
618 		 * Check if the lock has been released while spinning for
619 		 * the turnstile chain lock.
620 		 */
621 		if (v == MTX_UNOWNED) {
622 			turnstile_cancel(ts);
623 			continue;
624 		}
625 
626 #ifdef ADAPTIVE_MUTEXES
627 		/*
628 		 * The current lock owner might have started executing
629 		 * on another CPU (or the lock could have changed
630 		 * owners) while we were waiting on the turnstile
631 		 * chain lock.  If so, drop the turnstile lock and try
632 		 * again.
633 		 */
634 		owner = lv_mtx_owner(v);
635 		if (TD_IS_RUNNING(owner)) {
636 			turnstile_cancel(ts);
637 			continue;
638 		}
639 #endif
640 
641 		/*
642 		 * If the mutex isn't already contested and a failure occurs
643 		 * setting the contested bit, the mutex was either released
644 		 * or the state of the MTX_RECURSED bit changed.
645 		 */
646 		if ((v & MTX_CONTESTED) == 0 &&
647 		    !atomic_fcmpset_ptr(&m->mtx_lock, &v, v | MTX_CONTESTED)) {
648 			goto retry_turnstile;
649 		}
650 
651 		/*
652 		 * We definitely must sleep for this lock.
653 		 */
654 		mtx_assert(m, MA_NOTOWNED);
655 
656 		/*
657 		 * Block on the turnstile.
658 		 */
659 #ifdef KDTRACE_HOOKS
660 		sleep_time -= lockstat_nsecs(&m->lock_object);
661 #endif
662 #ifndef ADAPTIVE_MUTEXES
663 		owner = mtx_owner(m);
664 #endif
665 		MPASS(owner == mtx_owner(m));
666 		turnstile_wait(ts, owner, TS_EXCLUSIVE_QUEUE);
667 #ifdef KDTRACE_HOOKS
668 		sleep_time += lockstat_nsecs(&m->lock_object);
669 		sleep_cnt++;
670 #endif
671 		v = MTX_READ_VALUE(m);
672 	}
673 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
674 	if (__predict_true(!doing_lockprof))
675 		return;
676 #endif
677 #ifdef KDTRACE_HOOKS
678 	all_time += lockstat_nsecs(&m->lock_object);
679 	if (sleep_time)
680 		LOCKSTAT_RECORD1(adaptive__block, m, sleep_time);
681 
682 	/*
683 	 * Only record the loops spinning and not sleeping.
684 	 */
685 	if (lda.spin_cnt > sleep_cnt)
686 		LOCKSTAT_RECORD1(adaptive__spin, m, all_time - sleep_time);
687 out_lockstat:
688 #endif
689 	LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(adaptive__acquire, m, contested,
690 	    waittime, file, line);
691 }
692 
693 #ifdef SMP
694 /*
695  * _mtx_lock_spin_cookie: the tougher part of acquiring an MTX_SPIN lock.
696  *
697  * This is only called if we need to actually spin for the lock. Recursion
698  * is handled inline.
699  */
700 #if LOCK_DEBUG > 0
701 void
702 _mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t v, int opts,
703     const char *file, int line)
704 #else
705 void
706 _mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t v)
707 #endif
708 {
709 	struct mtx *m;
710 	struct lock_delay_arg lda;
711 	uintptr_t tid;
712 #ifdef LOCK_PROFILING
713 	int contested = 0;
714 	uint64_t waittime = 0;
715 #endif
716 #ifdef KDTRACE_HOOKS
717 	int64_t spin_time = 0;
718 #endif
719 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
720 	int doing_lockprof = 0;
721 #endif
722 
723 	tid = (uintptr_t)curthread;
724 	m = mtxlock2mtx(c);
725 
726 #ifdef KDTRACE_HOOKS
727 	if (LOCKSTAT_PROFILE_ENABLED(adaptive__acquire)) {
728 		while (v == MTX_UNOWNED) {
729 			if (_mtx_obtain_lock_fetch(m, &v, tid))
730 				goto out_lockstat;
731 		}
732 		doing_lockprof = 1;
733 		spin_time -= lockstat_nsecs(&m->lock_object);
734 	}
735 #endif
736 #ifdef LOCK_PROFILING
737 	doing_lockprof = 1;
738 #endif
739 
740 	if (__predict_false(v == MTX_UNOWNED))
741 		v = MTX_READ_VALUE(m);
742 
743 	if (__predict_false(v == tid)) {
744 		m->mtx_recurse++;
745 		return;
746 	}
747 
748 	if (SCHEDULER_STOPPED())
749 		return;
750 
751 	if (LOCK_LOG_TEST(&m->lock_object, opts))
752 		CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m);
753 	KTR_STATE1(KTR_SCHED, "thread", sched_tdname((struct thread *)tid),
754 	    "spinning", "lockname:\"%s\"", m->lock_object.lo_name);
755 
756 	lock_delay_arg_init(&lda, &mtx_spin_delay);
757 
758 #ifdef HWPMC_HOOKS
759 	PMC_SOFT_CALL( , , lock, failed);
760 #endif
761 	lock_profile_obtain_lock_failed(&m->lock_object, true, &contested, &waittime);
762 
763 	for (;;) {
764 		if (v == MTX_UNOWNED) {
765 			if (_mtx_obtain_lock_fetch(m, &v, tid))
766 				break;
767 			continue;
768 		}
769 		/* Give interrupts a chance while we spin. */
770 		spinlock_exit();
771 		do {
772 			if (__predict_true(lda.spin_cnt < 10000000)) {
773 				lock_delay(&lda);
774 			} else {
775 				_mtx_lock_indefinite_check(m, &lda);
776 			}
777 			v = MTX_READ_VALUE(m);
778 		} while (v != MTX_UNOWNED);
779 		spinlock_enter();
780 	}
781 
782 	if (LOCK_LOG_TEST(&m->lock_object, opts))
783 		CTR1(KTR_LOCK, "_mtx_lock_spin: %p spin done", m);
784 	KTR_STATE0(KTR_SCHED, "thread", sched_tdname((struct thread *)tid),
785 	    "running");
786 
787 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
788 	if (__predict_true(!doing_lockprof))
789 		return;
790 #endif
791 #ifdef KDTRACE_HOOKS
792 	spin_time += lockstat_nsecs(&m->lock_object);
793 	if (lda.spin_cnt != 0)
794 		LOCKSTAT_RECORD1(spin__spin, m, spin_time);
795 out_lockstat:
796 #endif
797 	LOCKSTAT_PROFILE_OBTAIN_SPIN_LOCK_SUCCESS(spin__acquire, m,
798 	    contested, waittime, file, line);
799 }
800 #endif /* SMP */
801 
802 #ifdef INVARIANTS
803 static void
804 thread_lock_validate(struct mtx *m, int opts, const char *file, int line)
805 {
806 
807 	KASSERT(m->mtx_lock != MTX_DESTROYED,
808 	    ("thread_lock() of destroyed mutex @ %s:%d", file, line));
809 	KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
810 	    ("thread_lock() of sleep mutex %s @ %s:%d",
811 	    m->lock_object.lo_name, file, line));
812 	KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) == 0,
813 	    ("thread_lock: got a recursive mutex %s @ %s:%d\n",
814 	    m->lock_object.lo_name, file, line));
815 	WITNESS_CHECKORDER(&m->lock_object,
816 	    opts | LOP_NEWORDER | LOP_EXCLUSIVE, file, line, NULL);
817 }
818 #else
819 #define thread_lock_validate(m, opts, file, line) do { } while (0)
820 #endif
821 
822 #ifndef LOCK_PROFILING
823 #if LOCK_DEBUG > 0
824 void
825 _thread_lock(struct thread *td, int opts, const char *file, int line)
826 #else
827 void
828 _thread_lock(struct thread *td)
829 #endif
830 {
831 	struct mtx *m;
832 	uintptr_t tid;
833 
834 	tid = (uintptr_t)curthread;
835 
836 	if (__predict_false(LOCKSTAT_PROFILE_ENABLED(spin__acquire)))
837 		goto slowpath_noirq;
838 	spinlock_enter();
839 	m = td->td_lock;
840 	thread_lock_validate(m, 0, file, line);
841 	if (__predict_false(m == &blocked_lock))
842 		goto slowpath_unlocked;
843 	if (__predict_false(!_mtx_obtain_lock(m, tid)))
844 		goto slowpath_unlocked;
845 	if (__predict_true(m == td->td_lock)) {
846 		WITNESS_LOCK(&m->lock_object, LOP_EXCLUSIVE, file, line);
847 		return;
848 	}
849 	_mtx_release_lock_quick(m);
850 slowpath_unlocked:
851 	spinlock_exit();
852 slowpath_noirq:
853 #if LOCK_DEBUG > 0
854 	thread_lock_flags_(td, opts, file, line);
855 #else
856 	thread_lock_flags_(td, 0, 0, 0);
857 #endif
858 }
859 #endif
860 
861 void
862 thread_lock_flags_(struct thread *td, int opts, const char *file, int line)
863 {
864 	struct mtx *m;
865 	uintptr_t tid, v;
866 	struct lock_delay_arg lda;
867 #ifdef LOCK_PROFILING
868 	int contested = 0;
869 	uint64_t waittime = 0;
870 #endif
871 #ifdef KDTRACE_HOOKS
872 	int64_t spin_time = 0;
873 #endif
874 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
875 	int doing_lockprof = 1;
876 #endif
877 
878 	tid = (uintptr_t)curthread;
879 
880 	if (SCHEDULER_STOPPED()) {
881 		/*
882 		 * Ensure that spinlock sections are balanced even when the
883 		 * scheduler is stopped, since we may otherwise inadvertently
884 		 * re-enable interrupts while dumping core.
885 		 */
886 		spinlock_enter();
887 		return;
888 	}
889 
890 	lock_delay_arg_init(&lda, &mtx_spin_delay);
891 
892 #ifdef HWPMC_HOOKS
893 	PMC_SOFT_CALL( , , lock, failed);
894 #endif
895 
896 #ifdef LOCK_PROFILING
897 	doing_lockprof = 1;
898 #elif defined(KDTRACE_HOOKS)
899 	doing_lockprof = lockstat_enabled;
900 #endif
901 #ifdef KDTRACE_HOOKS
902 	if (__predict_false(doing_lockprof))
903 		spin_time -= lockstat_nsecs(&td->td_lock->lock_object);
904 #endif
905 	spinlock_enter();
906 
907 	for (;;) {
908 retry:
909 		m = td->td_lock;
910 		thread_lock_validate(m, opts, file, line);
911 		v = MTX_READ_VALUE(m);
912 		for (;;) {
913 			if (v == MTX_UNOWNED) {
914 				if (_mtx_obtain_lock_fetch(m, &v, tid))
915 					break;
916 				continue;
917 			}
918 			MPASS(v != tid);
919 			lock_profile_obtain_lock_failed(&m->lock_object, true,
920 			    &contested, &waittime);
921 			/* Give interrupts a chance while we spin. */
922 			spinlock_exit();
923 			do {
924 				if (__predict_true(lda.spin_cnt < 10000000)) {
925 					lock_delay(&lda);
926 				} else {
927 					_mtx_lock_indefinite_check(m, &lda);
928 				}
929 				if (m != td->td_lock) {
930 					spinlock_enter();
931 					goto retry;
932 				}
933 				v = MTX_READ_VALUE(m);
934 			} while (v != MTX_UNOWNED);
935 			spinlock_enter();
936 		}
937 		if (m == td->td_lock)
938 			break;
939 		_mtx_release_lock_quick(m);
940 	}
941 	LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file,
942 	    line);
943 	WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
944 
945 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
946 	if (__predict_true(!doing_lockprof))
947 		return;
948 #endif
949 #ifdef KDTRACE_HOOKS
950 	spin_time += lockstat_nsecs(&m->lock_object);
951 #endif
952 	LOCKSTAT_PROFILE_OBTAIN_SPIN_LOCK_SUCCESS(spin__acquire, m, contested,
953 	    waittime, file, line);
954 #ifdef KDTRACE_HOOKS
955 	if (lda.spin_cnt != 0)
956 		LOCKSTAT_RECORD1(thread__spin, m, spin_time);
957 #endif
958 }
959 
960 struct mtx *
961 thread_lock_block(struct thread *td)
962 {
963 	struct mtx *lock;
964 
965 	lock = td->td_lock;
966 	mtx_assert(lock, MA_OWNED);
967 	td->td_lock = &blocked_lock;
968 
969 	return (lock);
970 }
971 
972 void
973 thread_lock_unblock(struct thread *td, struct mtx *new)
974 {
975 
976 	mtx_assert(new, MA_OWNED);
977 	KASSERT(td->td_lock == &blocked_lock,
978 	    ("thread %p lock %p not blocked_lock %p",
979 	    td, td->td_lock, &blocked_lock));
980 	atomic_store_rel_ptr((volatile void *)&td->td_lock, (uintptr_t)new);
981 }
982 
983 void
984 thread_lock_block_wait(struct thread *td)
985 {
986 
987 	while (td->td_lock == &blocked_lock)
988 		cpu_spinwait();
989 
990 	/* Acquire fence to be certain that all thread state is visible. */
991 	atomic_thread_fence_acq();
992 }
993 
994 void
995 thread_lock_set(struct thread *td, struct mtx *new)
996 {
997 	struct mtx *lock;
998 
999 	mtx_assert(new, MA_OWNED);
1000 	lock = td->td_lock;
1001 	mtx_assert(lock, MA_OWNED);
1002 	td->td_lock = new;
1003 	mtx_unlock_spin(lock);
1004 }
1005 
1006 /*
1007  * __mtx_unlock_sleep: the tougher part of releasing an MTX_DEF lock.
1008  *
1009  * We are only called here if the lock is recursed, contested (i.e. we
1010  * need to wake up a blocked thread) or lockstat probe is active.
1011  */
1012 #if LOCK_DEBUG > 0
1013 void
1014 __mtx_unlock_sleep(volatile uintptr_t *c, uintptr_t v, int opts,
1015     const char *file, int line)
1016 #else
1017 void
1018 __mtx_unlock_sleep(volatile uintptr_t *c, uintptr_t v)
1019 #endif
1020 {
1021 	struct mtx *m;
1022 	struct turnstile *ts;
1023 	uintptr_t tid;
1024 
1025 	if (SCHEDULER_STOPPED())
1026 		return;
1027 
1028 	tid = (uintptr_t)curthread;
1029 	m = mtxlock2mtx(c);
1030 
1031 	if (__predict_false(v == tid))
1032 		v = MTX_READ_VALUE(m);
1033 
1034 	if (__predict_false(v & MTX_RECURSED)) {
1035 		if (--(m->mtx_recurse) == 0)
1036 			atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED);
1037 		if (LOCK_LOG_TEST(&m->lock_object, opts))
1038 			CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p unrecurse", m);
1039 		return;
1040 	}
1041 
1042 	LOCKSTAT_PROFILE_RELEASE_LOCK(adaptive__release, m);
1043 	if (v == tid && _mtx_release_lock(m, tid))
1044 		return;
1045 
1046 	/*
1047 	 * We have to lock the chain before the turnstile so this turnstile
1048 	 * can be removed from the hash list if it is empty.
1049 	 */
1050 	turnstile_chain_lock(&m->lock_object);
1051 	_mtx_release_lock_quick(m);
1052 	ts = turnstile_lookup(&m->lock_object);
1053 	MPASS(ts != NULL);
1054 	if (LOCK_LOG_TEST(&m->lock_object, opts))
1055 		CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p contested", m);
1056 	turnstile_broadcast(ts, TS_EXCLUSIVE_QUEUE);
1057 
1058 	/*
1059 	 * This turnstile is now no longer associated with the mutex.  We can
1060 	 * unlock the chain lock so a new turnstile may take it's place.
1061 	 */
1062 	turnstile_unpend(ts);
1063 	turnstile_chain_unlock(&m->lock_object);
1064 }
1065 
1066 /*
1067  * All the unlocking of MTX_SPIN locks is done inline.
1068  * See the __mtx_unlock_spin() macro for the details.
1069  */
1070 
1071 /*
1072  * The backing function for the INVARIANTS-enabled mtx_assert()
1073  */
1074 #ifdef INVARIANT_SUPPORT
1075 void
1076 __mtx_assert(const volatile uintptr_t *c, int what, const char *file, int line)
1077 {
1078 	const struct mtx *m;
1079 
1080 	if (KERNEL_PANICKED() || dumping || SCHEDULER_STOPPED())
1081 		return;
1082 
1083 	m = mtxlock2mtx(c);
1084 
1085 	switch (what) {
1086 	case MA_OWNED:
1087 	case MA_OWNED | MA_RECURSED:
1088 	case MA_OWNED | MA_NOTRECURSED:
1089 		if (!mtx_owned(m))
1090 			panic("mutex %s not owned at %s:%d",
1091 			    m->lock_object.lo_name, file, line);
1092 		if (mtx_recursed(m)) {
1093 			if ((what & MA_NOTRECURSED) != 0)
1094 				panic("mutex %s recursed at %s:%d",
1095 				    m->lock_object.lo_name, file, line);
1096 		} else if ((what & MA_RECURSED) != 0) {
1097 			panic("mutex %s unrecursed at %s:%d",
1098 			    m->lock_object.lo_name, file, line);
1099 		}
1100 		break;
1101 	case MA_NOTOWNED:
1102 		if (mtx_owned(m))
1103 			panic("mutex %s owned at %s:%d",
1104 			    m->lock_object.lo_name, file, line);
1105 		break;
1106 	default:
1107 		panic("unknown mtx_assert at %s:%d", file, line);
1108 	}
1109 }
1110 #endif
1111 
1112 /*
1113  * General init routine used by the MTX_SYSINIT() macro.
1114  */
1115 void
1116 mtx_sysinit(void *arg)
1117 {
1118 	struct mtx_args *margs = arg;
1119 
1120 	mtx_init((struct mtx *)margs->ma_mtx, margs->ma_desc, NULL,
1121 	    margs->ma_opts);
1122 }
1123 
1124 /*
1125  * Mutex initialization routine; initialize lock `m' of type contained in
1126  * `opts' with options contained in `opts' and name `name.'  The optional
1127  * lock type `type' is used as a general lock category name for use with
1128  * witness.
1129  */
1130 void
1131 _mtx_init(volatile uintptr_t *c, const char *name, const char *type, int opts)
1132 {
1133 	struct mtx *m;
1134 	struct lock_class *class;
1135 	int flags;
1136 
1137 	m = mtxlock2mtx(c);
1138 
1139 	MPASS((opts & ~(MTX_SPIN | MTX_QUIET | MTX_RECURSE |
1140 	    MTX_NOWITNESS | MTX_DUPOK | MTX_NOPROFILE | MTX_NEW)) == 0);
1141 	ASSERT_ATOMIC_LOAD_PTR(m->mtx_lock,
1142 	    ("%s: mtx_lock not aligned for %s: %p", __func__, name,
1143 	    &m->mtx_lock));
1144 
1145 	/* Determine lock class and lock flags. */
1146 	if (opts & MTX_SPIN)
1147 		class = &lock_class_mtx_spin;
1148 	else
1149 		class = &lock_class_mtx_sleep;
1150 	flags = 0;
1151 	if (opts & MTX_QUIET)
1152 		flags |= LO_QUIET;
1153 	if (opts & MTX_RECURSE)
1154 		flags |= LO_RECURSABLE;
1155 	if ((opts & MTX_NOWITNESS) == 0)
1156 		flags |= LO_WITNESS;
1157 	if (opts & MTX_DUPOK)
1158 		flags |= LO_DUPOK;
1159 	if (opts & MTX_NOPROFILE)
1160 		flags |= LO_NOPROFILE;
1161 	if (opts & MTX_NEW)
1162 		flags |= LO_NEW;
1163 
1164 	/* Initialize mutex. */
1165 	lock_init(&m->lock_object, class, name, type, flags);
1166 
1167 	m->mtx_lock = MTX_UNOWNED;
1168 	m->mtx_recurse = 0;
1169 }
1170 
1171 /*
1172  * Remove lock `m' from all_mtx queue.  We don't allow MTX_QUIET to be
1173  * passed in as a flag here because if the corresponding mtx_init() was
1174  * called with MTX_QUIET set, then it will already be set in the mutex's
1175  * flags.
1176  */
1177 void
1178 _mtx_destroy(volatile uintptr_t *c)
1179 {
1180 	struct mtx *m;
1181 
1182 	m = mtxlock2mtx(c);
1183 
1184 	if (!mtx_owned(m))
1185 		MPASS(mtx_unowned(m));
1186 	else {
1187 		MPASS((m->mtx_lock & (MTX_RECURSED|MTX_CONTESTED)) == 0);
1188 
1189 		/* Perform the non-mtx related part of mtx_unlock_spin(). */
1190 		if (LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin) {
1191 			lock_profile_release_lock(&m->lock_object, true);
1192 			spinlock_exit();
1193 		} else {
1194 			TD_LOCKS_DEC(curthread);
1195 			lock_profile_release_lock(&m->lock_object, false);
1196 		}
1197 
1198 		/* Tell witness this isn't locked to make it happy. */
1199 		WITNESS_UNLOCK(&m->lock_object, LOP_EXCLUSIVE, __FILE__,
1200 		    __LINE__);
1201 	}
1202 
1203 	m->mtx_lock = MTX_DESTROYED;
1204 	lock_destroy(&m->lock_object);
1205 }
1206 
1207 /*
1208  * Intialize the mutex code and system mutexes.  This is called from the MD
1209  * startup code prior to mi_startup().  The per-CPU data space needs to be
1210  * setup before this is called.
1211  */
1212 void
1213 mutex_init(void)
1214 {
1215 
1216 	/* Setup turnstiles so that sleep mutexes work. */
1217 	init_turnstiles();
1218 
1219 	/*
1220 	 * Initialize mutexes.
1221 	 */
1222 	mtx_init(&Giant, "Giant", NULL, MTX_DEF | MTX_RECURSE);
1223 	mtx_init(&blocked_lock, "blocked lock", NULL, MTX_SPIN);
1224 	blocked_lock.mtx_lock = 0xdeadc0de;	/* Always blocked. */
1225 	mtx_init(&proc0.p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK);
1226 	mtx_init(&proc0.p_slock, "process slock", NULL, MTX_SPIN);
1227 	mtx_init(&proc0.p_statmtx, "pstatl", NULL, MTX_SPIN);
1228 	mtx_init(&proc0.p_itimmtx, "pitiml", NULL, MTX_SPIN);
1229 	mtx_init(&proc0.p_profmtx, "pprofl", NULL, MTX_SPIN);
1230 	mtx_init(&devmtx, "cdev", NULL, MTX_DEF);
1231 	mtx_lock(&Giant);
1232 }
1233 
1234 static void __noinline
1235 _mtx_lock_indefinite_check(struct mtx *m, struct lock_delay_arg *ldap)
1236 {
1237 	struct thread *td;
1238 
1239 	ldap->spin_cnt++;
1240 	if (ldap->spin_cnt < 60000000 || kdb_active || KERNEL_PANICKED())
1241 		cpu_lock_delay();
1242 	else {
1243 		td = mtx_owner(m);
1244 
1245 		/* If the mutex is unlocked, try again. */
1246 		if (td == NULL)
1247 			return;
1248 
1249 		printf( "spin lock %p (%s) held by %p (tid %d) too long\n",
1250 		    m, m->lock_object.lo_name, td, td->td_tid);
1251 #ifdef WITNESS
1252 		witness_display_spinlock(&m->lock_object, td, printf);
1253 #endif
1254 		panic("spin lock held too long");
1255 	}
1256 	cpu_spinwait();
1257 }
1258 
1259 void
1260 mtx_spin_wait_unlocked(struct mtx *m)
1261 {
1262 	struct lock_delay_arg lda;
1263 
1264 	KASSERT(m->mtx_lock != MTX_DESTROYED,
1265 	    ("%s() of destroyed mutex %p", __func__, m));
1266 	KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
1267 	    ("%s() of sleep mutex %p (%s)", __func__, m,
1268 	    m->lock_object.lo_name));
1269 	KASSERT(!mtx_owned(m), ("%s() waiting on myself on lock %p (%s)", __func__, m,
1270 	    m->lock_object.lo_name));
1271 
1272 	lda.spin_cnt = 0;
1273 
1274 	while (atomic_load_acq_ptr(&m->mtx_lock) != MTX_UNOWNED) {
1275 		if (__predict_true(lda.spin_cnt < 10000000)) {
1276 			cpu_spinwait();
1277 			lda.spin_cnt++;
1278 		} else {
1279 			_mtx_lock_indefinite_check(m, &lda);
1280 		}
1281 	}
1282 }
1283 
1284 void
1285 mtx_wait_unlocked(struct mtx *m)
1286 {
1287 	struct thread *owner;
1288 	uintptr_t v;
1289 
1290 	KASSERT(m->mtx_lock != MTX_DESTROYED,
1291 	    ("%s() of destroyed mutex %p", __func__, m));
1292 	KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep,
1293 	    ("%s() not a sleep mutex %p (%s)", __func__, m,
1294 	    m->lock_object.lo_name));
1295 	KASSERT(!mtx_owned(m), ("%s() waiting on myself on lock %p (%s)", __func__, m,
1296 	    m->lock_object.lo_name));
1297 
1298 	for (;;) {
1299 		v = atomic_load_acq_ptr(&m->mtx_lock);
1300 		if (v == MTX_UNOWNED) {
1301 			break;
1302 		}
1303 		owner = lv_mtx_owner(v);
1304 		if (!TD_IS_RUNNING(owner)) {
1305 			mtx_lock(m);
1306 			mtx_unlock(m);
1307 			break;
1308 		}
1309 		cpu_spinwait();
1310 	}
1311 }
1312 
1313 #ifdef DDB
1314 void
1315 db_show_mtx(const struct lock_object *lock)
1316 {
1317 	struct thread *td;
1318 	const struct mtx *m;
1319 
1320 	m = (const struct mtx *)lock;
1321 
1322 	db_printf(" flags: {");
1323 	if (LOCK_CLASS(lock) == &lock_class_mtx_spin)
1324 		db_printf("SPIN");
1325 	else
1326 		db_printf("DEF");
1327 	if (m->lock_object.lo_flags & LO_RECURSABLE)
1328 		db_printf(", RECURSE");
1329 	if (m->lock_object.lo_flags & LO_DUPOK)
1330 		db_printf(", DUPOK");
1331 	db_printf("}\n");
1332 	db_printf(" state: {");
1333 	if (mtx_unowned(m))
1334 		db_printf("UNOWNED");
1335 	else if (mtx_destroyed(m))
1336 		db_printf("DESTROYED");
1337 	else {
1338 		db_printf("OWNED");
1339 		if (m->mtx_lock & MTX_CONTESTED)
1340 			db_printf(", CONTESTED");
1341 		if (m->mtx_lock & MTX_RECURSED)
1342 			db_printf(", RECURSED");
1343 	}
1344 	db_printf("}\n");
1345 	if (!mtx_unowned(m) && !mtx_destroyed(m)) {
1346 		td = mtx_owner(m);
1347 		db_printf(" owner: %p (tid %d, pid %d, \"%s\")\n", td,
1348 		    td->td_tid, td->td_proc->p_pid, td->td_name);
1349 		if (mtx_recursed(m))
1350 			db_printf(" recursed: %d\n", m->mtx_recurse);
1351 	}
1352 }
1353 #endif
1354