xref: /freebsd/sys/kern/kern_mutex.c (revision f157ca4696f5922275d5d451736005b9332eb136)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. Berkeley Software Design Inc's name may not be used to endorse or
15  *    promote products derived from this software without specific prior
16  *    written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  *
30  *	from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $
31  *	and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $
32  */
33 
34 /*
35  * Machine independent bits of mutex implementation.
36  */
37 
38 #include <sys/cdefs.h>
39 __FBSDID("$FreeBSD$");
40 
41 #include "opt_adaptive_mutexes.h"
42 #include "opt_ddb.h"
43 #include "opt_hwpmc_hooks.h"
44 #include "opt_sched.h"
45 
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/bus.h>
49 #include <sys/conf.h>
50 #include <sys/kdb.h>
51 #include <sys/kernel.h>
52 #include <sys/ktr.h>
53 #include <sys/lock.h>
54 #include <sys/malloc.h>
55 #include <sys/mutex.h>
56 #include <sys/proc.h>
57 #include <sys/resourcevar.h>
58 #include <sys/sched.h>
59 #include <sys/sbuf.h>
60 #include <sys/smp.h>
61 #include <sys/sysctl.h>
62 #include <sys/turnstile.h>
63 #include <sys/vmmeter.h>
64 #include <sys/lock_profile.h>
65 
66 #include <machine/atomic.h>
67 #include <machine/bus.h>
68 #include <machine/cpu.h>
69 
70 #include <ddb/ddb.h>
71 
72 #include <fs/devfs/devfs_int.h>
73 
74 #include <vm/vm.h>
75 #include <vm/vm_extern.h>
76 
77 #if defined(SMP) && !defined(NO_ADAPTIVE_MUTEXES)
78 #define	ADAPTIVE_MUTEXES
79 #endif
80 
81 #ifdef HWPMC_HOOKS
82 #include <sys/pmckern.h>
83 PMC_SOFT_DEFINE( , , lock, failed);
84 #endif
85 
86 /*
87  * Return the mutex address when the lock cookie address is provided.
88  * This functionality assumes that struct mtx* have a member named mtx_lock.
89  */
90 #define	mtxlock2mtx(c)	(__containerof(c, struct mtx, mtx_lock))
91 
92 /*
93  * Internal utility macros.
94  */
95 #define mtx_unowned(m)	((m)->mtx_lock == MTX_UNOWNED)
96 
97 #define	mtx_destroyed(m) ((m)->mtx_lock == MTX_DESTROYED)
98 
99 static void	assert_mtx(const struct lock_object *lock, int what);
100 #ifdef DDB
101 static void	db_show_mtx(const struct lock_object *lock);
102 #endif
103 static void	lock_mtx(struct lock_object *lock, uintptr_t how);
104 static void	lock_spin(struct lock_object *lock, uintptr_t how);
105 #ifdef KDTRACE_HOOKS
106 static int	owner_mtx(const struct lock_object *lock,
107 		    struct thread **owner);
108 #endif
109 static uintptr_t unlock_mtx(struct lock_object *lock);
110 static uintptr_t unlock_spin(struct lock_object *lock);
111 
112 /*
113  * Lock classes for sleep and spin mutexes.
114  */
115 struct lock_class lock_class_mtx_sleep = {
116 	.lc_name = "sleep mutex",
117 	.lc_flags = LC_SLEEPLOCK | LC_RECURSABLE,
118 	.lc_assert = assert_mtx,
119 #ifdef DDB
120 	.lc_ddb_show = db_show_mtx,
121 #endif
122 	.lc_lock = lock_mtx,
123 	.lc_unlock = unlock_mtx,
124 #ifdef KDTRACE_HOOKS
125 	.lc_owner = owner_mtx,
126 #endif
127 };
128 struct lock_class lock_class_mtx_spin = {
129 	.lc_name = "spin mutex",
130 	.lc_flags = LC_SPINLOCK | LC_RECURSABLE,
131 	.lc_assert = assert_mtx,
132 #ifdef DDB
133 	.lc_ddb_show = db_show_mtx,
134 #endif
135 	.lc_lock = lock_spin,
136 	.lc_unlock = unlock_spin,
137 #ifdef KDTRACE_HOOKS
138 	.lc_owner = owner_mtx,
139 #endif
140 };
141 
142 #ifdef ADAPTIVE_MUTEXES
143 static SYSCTL_NODE(_debug, OID_AUTO, mtx, CTLFLAG_RD, NULL, "mtx debugging");
144 
145 static struct lock_delay_config __read_frequently mtx_delay;
146 
147 SYSCTL_INT(_debug_mtx, OID_AUTO, delay_base, CTLFLAG_RW, &mtx_delay.base,
148     0, "");
149 SYSCTL_INT(_debug_mtx, OID_AUTO, delay_max, CTLFLAG_RW, &mtx_delay.max,
150     0, "");
151 
152 LOCK_DELAY_SYSINIT_DEFAULT(mtx_delay);
153 #endif
154 
155 static SYSCTL_NODE(_debug, OID_AUTO, mtx_spin, CTLFLAG_RD, NULL,
156     "mtx spin debugging");
157 
158 static struct lock_delay_config __read_frequently mtx_spin_delay;
159 
160 SYSCTL_INT(_debug_mtx_spin, OID_AUTO, delay_base, CTLFLAG_RW,
161     &mtx_spin_delay.base, 0, "");
162 SYSCTL_INT(_debug_mtx_spin, OID_AUTO, delay_max, CTLFLAG_RW,
163     &mtx_spin_delay.max, 0, "");
164 
165 LOCK_DELAY_SYSINIT_DEFAULT(mtx_spin_delay);
166 
167 /*
168  * System-wide mutexes
169  */
170 struct mtx blocked_lock;
171 struct mtx __exclusive_cache_line Giant;
172 
173 static void _mtx_lock_indefinite_check(struct mtx *, struct lock_delay_arg *);
174 
175 void
176 assert_mtx(const struct lock_object *lock, int what)
177 {
178 
179 	/*
180 	 * Treat LA_LOCKED as if LA_XLOCKED was asserted.
181 	 *
182 	 * Some callers of lc_assert uses LA_LOCKED to indicate that either
183 	 * a shared lock or write lock was held, while other callers uses
184 	 * the more strict LA_XLOCKED (used as MA_OWNED).
185 	 *
186 	 * Mutex is the only lock class that can not be shared, as a result,
187 	 * we can reasonably consider the caller really intends to assert
188 	 * LA_XLOCKED when they are asserting LA_LOCKED on a mutex object.
189 	 */
190 	if (what & LA_LOCKED) {
191 		what &= ~LA_LOCKED;
192 		what |= LA_XLOCKED;
193 	}
194 	mtx_assert((const struct mtx *)lock, what);
195 }
196 
197 void
198 lock_mtx(struct lock_object *lock, uintptr_t how)
199 {
200 
201 	mtx_lock((struct mtx *)lock);
202 }
203 
204 void
205 lock_spin(struct lock_object *lock, uintptr_t how)
206 {
207 
208 	panic("spin locks can only use msleep_spin");
209 }
210 
211 uintptr_t
212 unlock_mtx(struct lock_object *lock)
213 {
214 	struct mtx *m;
215 
216 	m = (struct mtx *)lock;
217 	mtx_assert(m, MA_OWNED | MA_NOTRECURSED);
218 	mtx_unlock(m);
219 	return (0);
220 }
221 
222 uintptr_t
223 unlock_spin(struct lock_object *lock)
224 {
225 
226 	panic("spin locks can only use msleep_spin");
227 }
228 
229 #ifdef KDTRACE_HOOKS
230 int
231 owner_mtx(const struct lock_object *lock, struct thread **owner)
232 {
233 	const struct mtx *m;
234 	uintptr_t x;
235 
236 	m = (const struct mtx *)lock;
237 	x = m->mtx_lock;
238 	*owner = (struct thread *)(x & ~MTX_FLAGMASK);
239 	return (*owner != NULL);
240 }
241 #endif
242 
243 /*
244  * Function versions of the inlined __mtx_* macros.  These are used by
245  * modules and can also be called from assembly language if needed.
246  */
247 void
248 __mtx_lock_flags(volatile uintptr_t *c, int opts, const char *file, int line)
249 {
250 	struct mtx *m;
251 	uintptr_t tid, v;
252 
253 	m = mtxlock2mtx(c);
254 
255 	KASSERT(kdb_active != 0 || SCHEDULER_STOPPED() ||
256 	    !TD_IS_IDLETHREAD(curthread),
257 	    ("mtx_lock() by idle thread %p on sleep mutex %s @ %s:%d",
258 	    curthread, m->lock_object.lo_name, file, line));
259 	KASSERT(m->mtx_lock != MTX_DESTROYED,
260 	    ("mtx_lock() of destroyed mutex @ %s:%d", file, line));
261 	KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep,
262 	    ("mtx_lock() of spin mutex %s @ %s:%d", m->lock_object.lo_name,
263 	    file, line));
264 	WITNESS_CHECKORDER(&m->lock_object, (opts & ~MTX_RECURSE) |
265 	    LOP_NEWORDER | LOP_EXCLUSIVE, file, line, NULL);
266 
267 	tid = (uintptr_t)curthread;
268 	v = MTX_UNOWNED;
269 	if (!_mtx_obtain_lock_fetch(m, &v, tid))
270 		_mtx_lock_sleep(m, v, opts, file, line);
271 	else
272 		LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(adaptive__acquire,
273 		    m, 0, 0, file, line);
274 	LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file,
275 	    line);
276 	WITNESS_LOCK(&m->lock_object, (opts & ~MTX_RECURSE) | LOP_EXCLUSIVE,
277 	    file, line);
278 	TD_LOCKS_INC(curthread);
279 }
280 
281 void
282 __mtx_unlock_flags(volatile uintptr_t *c, int opts, const char *file, int line)
283 {
284 	struct mtx *m;
285 
286 	m = mtxlock2mtx(c);
287 
288 	KASSERT(m->mtx_lock != MTX_DESTROYED,
289 	    ("mtx_unlock() of destroyed mutex @ %s:%d", file, line));
290 	KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep,
291 	    ("mtx_unlock() of spin mutex %s @ %s:%d", m->lock_object.lo_name,
292 	    file, line));
293 	WITNESS_UNLOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
294 	LOCK_LOG_LOCK("UNLOCK", &m->lock_object, opts, m->mtx_recurse, file,
295 	    line);
296 	mtx_assert(m, MA_OWNED);
297 
298 #ifdef LOCK_PROFILING
299 	__mtx_unlock_sleep(c, (uintptr_t)curthread, opts, file, line);
300 #else
301 	__mtx_unlock(m, curthread, opts, file, line);
302 #endif
303 	TD_LOCKS_DEC(curthread);
304 }
305 
306 void
307 __mtx_lock_spin_flags(volatile uintptr_t *c, int opts, const char *file,
308     int line)
309 {
310 	struct mtx *m;
311 #ifdef SMP
312 	uintptr_t tid, v;
313 #endif
314 
315 	m = mtxlock2mtx(c);
316 
317 	KASSERT(m->mtx_lock != MTX_DESTROYED,
318 	    ("mtx_lock_spin() of destroyed mutex @ %s:%d", file, line));
319 	KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
320 	    ("mtx_lock_spin() of sleep mutex %s @ %s:%d",
321 	    m->lock_object.lo_name, file, line));
322 	if (mtx_owned(m))
323 		KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0 ||
324 		    (opts & MTX_RECURSE) != 0,
325 	    ("mtx_lock_spin: recursed on non-recursive mutex %s @ %s:%d\n",
326 		    m->lock_object.lo_name, file, line));
327 	opts &= ~MTX_RECURSE;
328 	WITNESS_CHECKORDER(&m->lock_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE,
329 	    file, line, NULL);
330 #ifdef SMP
331 	spinlock_enter();
332 	tid = (uintptr_t)curthread;
333 	v = MTX_UNOWNED;
334 	if (!_mtx_obtain_lock_fetch(m, &v, tid))
335 		_mtx_lock_spin(m, v, opts, file, line);
336 	else
337 		LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(spin__acquire,
338 		    m, 0, 0, file, line);
339 #else
340 	__mtx_lock_spin(m, curthread, opts, file, line);
341 #endif
342 	LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file,
343 	    line);
344 	WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
345 }
346 
347 int
348 __mtx_trylock_spin_flags(volatile uintptr_t *c, int opts, const char *file,
349     int line)
350 {
351 	struct mtx *m;
352 
353 	if (SCHEDULER_STOPPED())
354 		return (1);
355 
356 	m = mtxlock2mtx(c);
357 
358 	KASSERT(m->mtx_lock != MTX_DESTROYED,
359 	    ("mtx_trylock_spin() of destroyed mutex @ %s:%d", file, line));
360 	KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
361 	    ("mtx_trylock_spin() of sleep mutex %s @ %s:%d",
362 	    m->lock_object.lo_name, file, line));
363 	KASSERT((opts & MTX_RECURSE) == 0,
364 	    ("mtx_trylock_spin: unsupp. opt MTX_RECURSE on mutex %s @ %s:%d\n",
365 	    m->lock_object.lo_name, file, line));
366 	if (__mtx_trylock_spin(m, curthread, opts, file, line)) {
367 		LOCK_LOG_TRY("LOCK", &m->lock_object, opts, 1, file, line);
368 		WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
369 		return (1);
370 	}
371 	LOCK_LOG_TRY("LOCK", &m->lock_object, opts, 0, file, line);
372 	return (0);
373 }
374 
375 void
376 __mtx_unlock_spin_flags(volatile uintptr_t *c, int opts, const char *file,
377     int line)
378 {
379 	struct mtx *m;
380 
381 	m = mtxlock2mtx(c);
382 
383 	KASSERT(m->mtx_lock != MTX_DESTROYED,
384 	    ("mtx_unlock_spin() of destroyed mutex @ %s:%d", file, line));
385 	KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
386 	    ("mtx_unlock_spin() of sleep mutex %s @ %s:%d",
387 	    m->lock_object.lo_name, file, line));
388 	WITNESS_UNLOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
389 	LOCK_LOG_LOCK("UNLOCK", &m->lock_object, opts, m->mtx_recurse, file,
390 	    line);
391 	mtx_assert(m, MA_OWNED);
392 
393 	__mtx_unlock_spin(m);
394 }
395 
396 /*
397  * The important part of mtx_trylock{,_flags}()
398  * Tries to acquire lock `m.'  If this function is called on a mutex that
399  * is already owned, it will recursively acquire the lock.
400  */
401 int
402 _mtx_trylock_flags_int(struct mtx *m, int opts LOCK_FILE_LINE_ARG_DEF)
403 {
404 	struct thread *td;
405 	uintptr_t tid, v;
406 #ifdef LOCK_PROFILING
407 	uint64_t waittime = 0;
408 	int contested = 0;
409 #endif
410 	int rval;
411 	bool recursed;
412 
413 	td = curthread;
414 	tid = (uintptr_t)td;
415 	if (SCHEDULER_STOPPED_TD(td))
416 		return (1);
417 
418 	KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(td),
419 	    ("mtx_trylock() by idle thread %p on sleep mutex %s @ %s:%d",
420 	    curthread, m->lock_object.lo_name, file, line));
421 	KASSERT(m->mtx_lock != MTX_DESTROYED,
422 	    ("mtx_trylock() of destroyed mutex @ %s:%d", file, line));
423 	KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep,
424 	    ("mtx_trylock() of spin mutex %s @ %s:%d", m->lock_object.lo_name,
425 	    file, line));
426 
427 	rval = 1;
428 	recursed = false;
429 	v = MTX_UNOWNED;
430 	for (;;) {
431 		if (_mtx_obtain_lock_fetch(m, &v, tid))
432 			break;
433 		if (v == MTX_UNOWNED)
434 			continue;
435 		if (v == tid &&
436 		    ((m->lock_object.lo_flags & LO_RECURSABLE) != 0 ||
437 		    (opts & MTX_RECURSE) != 0)) {
438 			m->mtx_recurse++;
439 			atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
440 			recursed = true;
441 			break;
442 		}
443 		rval = 0;
444 		break;
445 	}
446 
447 	opts &= ~MTX_RECURSE;
448 
449 	LOCK_LOG_TRY("LOCK", &m->lock_object, opts, rval, file, line);
450 	if (rval) {
451 		WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE | LOP_TRYLOCK,
452 		    file, line);
453 		TD_LOCKS_INC(curthread);
454 		if (!recursed)
455 			LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(adaptive__acquire,
456 			    m, contested, waittime, file, line);
457 	}
458 
459 	return (rval);
460 }
461 
462 int
463 _mtx_trylock_flags_(volatile uintptr_t *c, int opts, const char *file, int line)
464 {
465 	struct mtx *m;
466 
467 	m = mtxlock2mtx(c);
468 	return (_mtx_trylock_flags_int(m, opts LOCK_FILE_LINE_ARG));
469 }
470 
471 /*
472  * __mtx_lock_sleep: the tougher part of acquiring an MTX_DEF lock.
473  *
474  * We call this if the lock is either contested (i.e. we need to go to
475  * sleep waiting for it), or if we need to recurse on it.
476  */
477 #if LOCK_DEBUG > 0
478 void
479 __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t v, int opts, const char *file,
480     int line)
481 #else
482 void
483 __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t v)
484 #endif
485 {
486 	struct thread *td;
487 	struct mtx *m;
488 	struct turnstile *ts;
489 	uintptr_t tid;
490 	struct thread *owner;
491 #ifdef LOCK_PROFILING
492 	int contested = 0;
493 	uint64_t waittime = 0;
494 #endif
495 #if defined(ADAPTIVE_MUTEXES) || defined(KDTRACE_HOOKS)
496 	struct lock_delay_arg lda;
497 #endif
498 #ifdef KDTRACE_HOOKS
499 	u_int sleep_cnt = 0;
500 	int64_t sleep_time = 0;
501 	int64_t all_time = 0;
502 #endif
503 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
504 	int doing_lockprof = 0;
505 #endif
506 
507 	td = curthread;
508 	tid = (uintptr_t)td;
509 	m = mtxlock2mtx(c);
510 
511 #ifdef KDTRACE_HOOKS
512 	if (LOCKSTAT_PROFILE_ENABLED(adaptive__acquire)) {
513 		while (v == MTX_UNOWNED) {
514 			if (_mtx_obtain_lock_fetch(m, &v, tid))
515 				goto out_lockstat;
516 		}
517 		doing_lockprof = 1;
518 		all_time -= lockstat_nsecs(&m->lock_object);
519 	}
520 #endif
521 #ifdef LOCK_PROFILING
522 	doing_lockprof = 1;
523 #endif
524 
525 	if (SCHEDULER_STOPPED_TD(td))
526 		return;
527 
528 #if defined(ADAPTIVE_MUTEXES)
529 	lock_delay_arg_init(&lda, &mtx_delay);
530 #elif defined(KDTRACE_HOOKS)
531 	lock_delay_arg_init(&lda, NULL);
532 #endif
533 
534 	if (__predict_false(v == MTX_UNOWNED))
535 		v = MTX_READ_VALUE(m);
536 
537 	if (__predict_false(lv_mtx_owner(v) == td)) {
538 		KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0 ||
539 		    (opts & MTX_RECURSE) != 0,
540 	    ("_mtx_lock_sleep: recursed on non-recursive mutex %s @ %s:%d\n",
541 		    m->lock_object.lo_name, file, line));
542 #if LOCK_DEBUG > 0
543 		opts &= ~MTX_RECURSE;
544 #endif
545 		m->mtx_recurse++;
546 		atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
547 		if (LOCK_LOG_TEST(&m->lock_object, opts))
548 			CTR1(KTR_LOCK, "_mtx_lock_sleep: %p recursing", m);
549 		return;
550 	}
551 #if LOCK_DEBUG > 0
552 	opts &= ~MTX_RECURSE;
553 #endif
554 
555 #ifdef HWPMC_HOOKS
556 	PMC_SOFT_CALL( , , lock, failed);
557 #endif
558 	lock_profile_obtain_lock_failed(&m->lock_object,
559 		    &contested, &waittime);
560 	if (LOCK_LOG_TEST(&m->lock_object, opts))
561 		CTR4(KTR_LOCK,
562 		    "_mtx_lock_sleep: %s contested (lock=%p) at %s:%d",
563 		    m->lock_object.lo_name, (void *)m->mtx_lock, file, line);
564 
565 	for (;;) {
566 		if (v == MTX_UNOWNED) {
567 			if (_mtx_obtain_lock_fetch(m, &v, tid))
568 				break;
569 			continue;
570 		}
571 #ifdef KDTRACE_HOOKS
572 		lda.spin_cnt++;
573 #endif
574 #ifdef ADAPTIVE_MUTEXES
575 		/*
576 		 * If the owner is running on another CPU, spin until the
577 		 * owner stops running or the state of the lock changes.
578 		 */
579 		owner = lv_mtx_owner(v);
580 		if (TD_IS_RUNNING(owner)) {
581 			if (LOCK_LOG_TEST(&m->lock_object, 0))
582 				CTR3(KTR_LOCK,
583 				    "%s: spinning on %p held by %p",
584 				    __func__, m, owner);
585 			KTR_STATE1(KTR_SCHED, "thread",
586 			    sched_tdname((struct thread *)tid),
587 			    "spinning", "lockname:\"%s\"",
588 			    m->lock_object.lo_name);
589 			do {
590 				lock_delay(&lda);
591 				v = MTX_READ_VALUE(m);
592 				owner = lv_mtx_owner(v);
593 			} while (v != MTX_UNOWNED && TD_IS_RUNNING(owner));
594 			KTR_STATE0(KTR_SCHED, "thread",
595 			    sched_tdname((struct thread *)tid),
596 			    "running");
597 			continue;
598 		}
599 #endif
600 
601 		ts = turnstile_trywait(&m->lock_object);
602 		v = MTX_READ_VALUE(m);
603 retry_turnstile:
604 
605 		/*
606 		 * Check if the lock has been released while spinning for
607 		 * the turnstile chain lock.
608 		 */
609 		if (v == MTX_UNOWNED) {
610 			turnstile_cancel(ts);
611 			continue;
612 		}
613 
614 #ifdef ADAPTIVE_MUTEXES
615 		/*
616 		 * The current lock owner might have started executing
617 		 * on another CPU (or the lock could have changed
618 		 * owners) while we were waiting on the turnstile
619 		 * chain lock.  If so, drop the turnstile lock and try
620 		 * again.
621 		 */
622 		owner = lv_mtx_owner(v);
623 		if (TD_IS_RUNNING(owner)) {
624 			turnstile_cancel(ts);
625 			continue;
626 		}
627 #endif
628 
629 		/*
630 		 * If the mutex isn't already contested and a failure occurs
631 		 * setting the contested bit, the mutex was either released
632 		 * or the state of the MTX_RECURSED bit changed.
633 		 */
634 		if ((v & MTX_CONTESTED) == 0 &&
635 		    !atomic_fcmpset_ptr(&m->mtx_lock, &v, v | MTX_CONTESTED)) {
636 			goto retry_turnstile;
637 		}
638 
639 		/*
640 		 * We definitely must sleep for this lock.
641 		 */
642 		mtx_assert(m, MA_NOTOWNED);
643 
644 		/*
645 		 * Block on the turnstile.
646 		 */
647 #ifdef KDTRACE_HOOKS
648 		sleep_time -= lockstat_nsecs(&m->lock_object);
649 #endif
650 #ifndef ADAPTIVE_MUTEXES
651 		owner = mtx_owner(m);
652 #endif
653 		MPASS(owner == mtx_owner(m));
654 		turnstile_wait(ts, owner, TS_EXCLUSIVE_QUEUE);
655 #ifdef KDTRACE_HOOKS
656 		sleep_time += lockstat_nsecs(&m->lock_object);
657 		sleep_cnt++;
658 #endif
659 		v = MTX_READ_VALUE(m);
660 	}
661 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
662 	if (__predict_true(!doing_lockprof))
663 		return;
664 #endif
665 #ifdef KDTRACE_HOOKS
666 	all_time += lockstat_nsecs(&m->lock_object);
667 	if (sleep_time)
668 		LOCKSTAT_RECORD1(adaptive__block, m, sleep_time);
669 
670 	/*
671 	 * Only record the loops spinning and not sleeping.
672 	 */
673 	if (lda.spin_cnt > sleep_cnt)
674 		LOCKSTAT_RECORD1(adaptive__spin, m, all_time - sleep_time);
675 out_lockstat:
676 #endif
677 	LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(adaptive__acquire, m, contested,
678 	    waittime, file, line);
679 }
680 
681 #ifdef SMP
682 /*
683  * _mtx_lock_spin_cookie: the tougher part of acquiring an MTX_SPIN lock.
684  *
685  * This is only called if we need to actually spin for the lock. Recursion
686  * is handled inline.
687  */
688 #if LOCK_DEBUG > 0
689 void
690 _mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t v, int opts,
691     const char *file, int line)
692 #else
693 void
694 _mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t v)
695 #endif
696 {
697 	struct mtx *m;
698 	struct lock_delay_arg lda;
699 	uintptr_t tid;
700 #ifdef LOCK_PROFILING
701 	int contested = 0;
702 	uint64_t waittime = 0;
703 #endif
704 #ifdef KDTRACE_HOOKS
705 	int64_t spin_time = 0;
706 #endif
707 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
708 	int doing_lockprof = 0;
709 #endif
710 
711 	tid = (uintptr_t)curthread;
712 	m = mtxlock2mtx(c);
713 
714 #ifdef KDTRACE_HOOKS
715 	if (LOCKSTAT_PROFILE_ENABLED(adaptive__acquire)) {
716 		while (v == MTX_UNOWNED) {
717 			if (_mtx_obtain_lock_fetch(m, &v, tid))
718 				goto out_lockstat;
719 		}
720 		doing_lockprof = 1;
721 		spin_time -= lockstat_nsecs(&m->lock_object);
722 	}
723 #endif
724 #ifdef LOCK_PROFILING
725 	doing_lockprof = 1;
726 #endif
727 
728 	if (__predict_false(v == MTX_UNOWNED))
729 		v = MTX_READ_VALUE(m);
730 
731 	if (__predict_false(v == tid)) {
732 		m->mtx_recurse++;
733 		return;
734 	}
735 
736 	if (SCHEDULER_STOPPED())
737 		return;
738 
739 	lock_delay_arg_init(&lda, &mtx_spin_delay);
740 
741 	if (LOCK_LOG_TEST(&m->lock_object, opts))
742 		CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m);
743 	KTR_STATE1(KTR_SCHED, "thread", sched_tdname((struct thread *)tid),
744 	    "spinning", "lockname:\"%s\"", m->lock_object.lo_name);
745 
746 #ifdef HWPMC_HOOKS
747 	PMC_SOFT_CALL( , , lock, failed);
748 #endif
749 	lock_profile_obtain_lock_failed(&m->lock_object, &contested, &waittime);
750 
751 	for (;;) {
752 		if (v == MTX_UNOWNED) {
753 			if (_mtx_obtain_lock_fetch(m, &v, tid))
754 				break;
755 			continue;
756 		}
757 		/* Give interrupts a chance while we spin. */
758 		spinlock_exit();
759 		do {
760 			if (__predict_true(lda.spin_cnt < 10000000)) {
761 				lock_delay(&lda);
762 			} else {
763 				_mtx_lock_indefinite_check(m, &lda);
764 			}
765 			v = MTX_READ_VALUE(m);
766 		} while (v != MTX_UNOWNED);
767 		spinlock_enter();
768 	}
769 
770 	if (LOCK_LOG_TEST(&m->lock_object, opts))
771 		CTR1(KTR_LOCK, "_mtx_lock_spin: %p spin done", m);
772 	KTR_STATE0(KTR_SCHED, "thread", sched_tdname((struct thread *)tid),
773 	    "running");
774 
775 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
776 	if (__predict_true(!doing_lockprof))
777 		return;
778 #endif
779 #ifdef KDTRACE_HOOKS
780 	spin_time += lockstat_nsecs(&m->lock_object);
781 	if (lda.spin_cnt != 0)
782 		LOCKSTAT_RECORD1(spin__spin, m, spin_time);
783 out_lockstat:
784 #endif
785 	LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(spin__acquire, m,
786 	    contested, waittime, file, line);
787 }
788 #endif /* SMP */
789 
790 #ifdef INVARIANTS
791 static void
792 thread_lock_validate(struct mtx *m, int opts, const char *file, int line)
793 {
794 
795 	KASSERT(m->mtx_lock != MTX_DESTROYED,
796 	    ("thread_lock() of destroyed mutex @ %s:%d", file, line));
797 	KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
798 	    ("thread_lock() of sleep mutex %s @ %s:%d",
799 	    m->lock_object.lo_name, file, line));
800 	KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) == 0,
801 	    ("thread_lock: got a recursive mutex %s @ %s:%d\n",
802 	    m->lock_object.lo_name, file, line));
803 	WITNESS_CHECKORDER(&m->lock_object,
804 	    opts | LOP_NEWORDER | LOP_EXCLUSIVE, file, line, NULL);
805 }
806 #else
807 #define thread_lock_validate(m, opts, file, line) do { } while (0)
808 #endif
809 
810 #ifndef LOCK_PROFILING
811 #if LOCK_DEBUG > 0
812 void
813 _thread_lock(struct thread *td, int opts, const char *file, int line)
814 #else
815 void
816 _thread_lock(struct thread *td)
817 #endif
818 {
819 	struct mtx *m;
820 	uintptr_t tid;
821 
822 	tid = (uintptr_t)curthread;
823 
824 	if (__predict_false(LOCKSTAT_PROFILE_ENABLED(spin__acquire)))
825 		goto slowpath_noirq;
826 	spinlock_enter();
827 	m = td->td_lock;
828 	thread_lock_validate(m, 0, file, line);
829 	if (__predict_false(m == &blocked_lock))
830 		goto slowpath_unlocked;
831 	if (__predict_false(!_mtx_obtain_lock(m, tid)))
832 		goto slowpath_unlocked;
833 	if (__predict_true(m == td->td_lock)) {
834 		WITNESS_LOCK(&m->lock_object, LOP_EXCLUSIVE, file, line);
835 		return;
836 	}
837 	_mtx_release_lock_quick(m);
838 slowpath_unlocked:
839 	spinlock_exit();
840 slowpath_noirq:
841 #if LOCK_DEBUG > 0
842 	thread_lock_flags_(td, opts, file, line);
843 #else
844 	thread_lock_flags_(td, 0, 0, 0);
845 #endif
846 }
847 #endif
848 
849 void
850 thread_lock_flags_(struct thread *td, int opts, const char *file, int line)
851 {
852 	struct mtx *m;
853 	uintptr_t tid, v;
854 	struct lock_delay_arg lda;
855 #ifdef LOCK_PROFILING
856 	int contested = 0;
857 	uint64_t waittime = 0;
858 #endif
859 #ifdef KDTRACE_HOOKS
860 	int64_t spin_time = 0;
861 #endif
862 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
863 	int doing_lockprof = 1;
864 #endif
865 
866 	tid = (uintptr_t)curthread;
867 
868 	if (SCHEDULER_STOPPED()) {
869 		/*
870 		 * Ensure that spinlock sections are balanced even when the
871 		 * scheduler is stopped, since we may otherwise inadvertently
872 		 * re-enable interrupts while dumping core.
873 		 */
874 		spinlock_enter();
875 		return;
876 	}
877 
878 	lock_delay_arg_init(&lda, &mtx_spin_delay);
879 
880 #ifdef HWPMC_HOOKS
881 	PMC_SOFT_CALL( , , lock, failed);
882 #endif
883 
884 #ifdef LOCK_PROFILING
885 	doing_lockprof = 1;
886 #elif defined(KDTRACE_HOOKS)
887 	doing_lockprof = lockstat_enabled;
888 	if (__predict_false(doing_lockprof))
889 		spin_time -= lockstat_nsecs(&td->td_lock->lock_object);
890 #endif
891 	spinlock_enter();
892 
893 	for (;;) {
894 retry:
895 		m = td->td_lock;
896 		thread_lock_validate(m, opts, file, line);
897 		v = MTX_READ_VALUE(m);
898 		for (;;) {
899 			if (v == MTX_UNOWNED) {
900 				if (_mtx_obtain_lock_fetch(m, &v, tid))
901 					break;
902 				continue;
903 			}
904 			MPASS(v != tid);
905 			lock_profile_obtain_lock_failed(&m->lock_object,
906 			    &contested, &waittime);
907 			/* Give interrupts a chance while we spin. */
908 			spinlock_exit();
909 			do {
910 				if (__predict_true(lda.spin_cnt < 10000000)) {
911 					lock_delay(&lda);
912 				} else {
913 					_mtx_lock_indefinite_check(m, &lda);
914 				}
915 				if (m != td->td_lock) {
916 					spinlock_enter();
917 					goto retry;
918 				}
919 				v = MTX_READ_VALUE(m);
920 			} while (v != MTX_UNOWNED);
921 			spinlock_enter();
922 		}
923 		if (m == td->td_lock)
924 			break;
925 		_mtx_release_lock_quick(m);
926 	}
927 	LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file,
928 	    line);
929 	WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
930 
931 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
932 	if (__predict_true(!doing_lockprof))
933 		return;
934 #endif
935 #ifdef KDTRACE_HOOKS
936 	spin_time += lockstat_nsecs(&m->lock_object);
937 #endif
938 	LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(spin__acquire, m, contested,
939 	    waittime, file, line);
940 #ifdef KDTRACE_HOOKS
941 	if (lda.spin_cnt != 0)
942 		LOCKSTAT_RECORD1(thread__spin, m, spin_time);
943 #endif
944 }
945 
946 struct mtx *
947 thread_lock_block(struct thread *td)
948 {
949 	struct mtx *lock;
950 
951 	lock = td->td_lock;
952 	mtx_assert(lock, MA_OWNED);
953 	td->td_lock = &blocked_lock;
954 
955 	return (lock);
956 }
957 
958 void
959 thread_lock_unblock(struct thread *td, struct mtx *new)
960 {
961 
962 	mtx_assert(new, MA_OWNED);
963 	KASSERT(td->td_lock == &blocked_lock,
964 	    ("thread %p lock %p not blocked_lock %p",
965 	    td, td->td_lock, &blocked_lock));
966 	atomic_store_rel_ptr((volatile void *)&td->td_lock, (uintptr_t)new);
967 }
968 
969 void
970 thread_lock_block_wait(struct thread *td)
971 {
972 
973 	while (td->td_lock == &blocked_lock)
974 		cpu_spinwait();
975 
976 	/* Acquire fence to be certain that all thread state is visible. */
977 	atomic_thread_fence_acq();
978 }
979 
980 void
981 thread_lock_set(struct thread *td, struct mtx *new)
982 {
983 	struct mtx *lock;
984 
985 	mtx_assert(new, MA_OWNED);
986 	lock = td->td_lock;
987 	mtx_assert(lock, MA_OWNED);
988 	td->td_lock = new;
989 	mtx_unlock_spin(lock);
990 }
991 
992 /*
993  * __mtx_unlock_sleep: the tougher part of releasing an MTX_DEF lock.
994  *
995  * We are only called here if the lock is recursed, contested (i.e. we
996  * need to wake up a blocked thread) or lockstat probe is active.
997  */
998 #if LOCK_DEBUG > 0
999 void
1000 __mtx_unlock_sleep(volatile uintptr_t *c, uintptr_t v, int opts,
1001     const char *file, int line)
1002 #else
1003 void
1004 __mtx_unlock_sleep(volatile uintptr_t *c, uintptr_t v)
1005 #endif
1006 {
1007 	struct mtx *m;
1008 	struct turnstile *ts;
1009 	uintptr_t tid;
1010 
1011 	if (SCHEDULER_STOPPED())
1012 		return;
1013 
1014 	tid = (uintptr_t)curthread;
1015 	m = mtxlock2mtx(c);
1016 
1017 	if (__predict_false(v == tid))
1018 		v = MTX_READ_VALUE(m);
1019 
1020 	if (__predict_false(v & MTX_RECURSED)) {
1021 		if (--(m->mtx_recurse) == 0)
1022 			atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED);
1023 		if (LOCK_LOG_TEST(&m->lock_object, opts))
1024 			CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p unrecurse", m);
1025 		return;
1026 	}
1027 
1028 	LOCKSTAT_PROFILE_RELEASE_LOCK(adaptive__release, m);
1029 	if (v == tid && _mtx_release_lock(m, tid))
1030 		return;
1031 
1032 	/*
1033 	 * We have to lock the chain before the turnstile so this turnstile
1034 	 * can be removed from the hash list if it is empty.
1035 	 */
1036 	turnstile_chain_lock(&m->lock_object);
1037 	_mtx_release_lock_quick(m);
1038 	ts = turnstile_lookup(&m->lock_object);
1039 	MPASS(ts != NULL);
1040 	if (LOCK_LOG_TEST(&m->lock_object, opts))
1041 		CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p contested", m);
1042 	turnstile_broadcast(ts, TS_EXCLUSIVE_QUEUE);
1043 
1044 	/*
1045 	 * This turnstile is now no longer associated with the mutex.  We can
1046 	 * unlock the chain lock so a new turnstile may take it's place.
1047 	 */
1048 	turnstile_unpend(ts);
1049 	turnstile_chain_unlock(&m->lock_object);
1050 }
1051 
1052 /*
1053  * All the unlocking of MTX_SPIN locks is done inline.
1054  * See the __mtx_unlock_spin() macro for the details.
1055  */
1056 
1057 /*
1058  * The backing function for the INVARIANTS-enabled mtx_assert()
1059  */
1060 #ifdef INVARIANT_SUPPORT
1061 void
1062 __mtx_assert(const volatile uintptr_t *c, int what, const char *file, int line)
1063 {
1064 	const struct mtx *m;
1065 
1066 	if (panicstr != NULL || dumping || SCHEDULER_STOPPED())
1067 		return;
1068 
1069 	m = mtxlock2mtx(c);
1070 
1071 	switch (what) {
1072 	case MA_OWNED:
1073 	case MA_OWNED | MA_RECURSED:
1074 	case MA_OWNED | MA_NOTRECURSED:
1075 		if (!mtx_owned(m))
1076 			panic("mutex %s not owned at %s:%d",
1077 			    m->lock_object.lo_name, file, line);
1078 		if (mtx_recursed(m)) {
1079 			if ((what & MA_NOTRECURSED) != 0)
1080 				panic("mutex %s recursed at %s:%d",
1081 				    m->lock_object.lo_name, file, line);
1082 		} else if ((what & MA_RECURSED) != 0) {
1083 			panic("mutex %s unrecursed at %s:%d",
1084 			    m->lock_object.lo_name, file, line);
1085 		}
1086 		break;
1087 	case MA_NOTOWNED:
1088 		if (mtx_owned(m))
1089 			panic("mutex %s owned at %s:%d",
1090 			    m->lock_object.lo_name, file, line);
1091 		break;
1092 	default:
1093 		panic("unknown mtx_assert at %s:%d", file, line);
1094 	}
1095 }
1096 #endif
1097 
1098 /*
1099  * General init routine used by the MTX_SYSINIT() macro.
1100  */
1101 void
1102 mtx_sysinit(void *arg)
1103 {
1104 	struct mtx_args *margs = arg;
1105 
1106 	mtx_init((struct mtx *)margs->ma_mtx, margs->ma_desc, NULL,
1107 	    margs->ma_opts);
1108 }
1109 
1110 /*
1111  * Mutex initialization routine; initialize lock `m' of type contained in
1112  * `opts' with options contained in `opts' and name `name.'  The optional
1113  * lock type `type' is used as a general lock category name for use with
1114  * witness.
1115  */
1116 void
1117 _mtx_init(volatile uintptr_t *c, const char *name, const char *type, int opts)
1118 {
1119 	struct mtx *m;
1120 	struct lock_class *class;
1121 	int flags;
1122 
1123 	m = mtxlock2mtx(c);
1124 
1125 	MPASS((opts & ~(MTX_SPIN | MTX_QUIET | MTX_RECURSE |
1126 	    MTX_NOWITNESS | MTX_DUPOK | MTX_NOPROFILE | MTX_NEW)) == 0);
1127 	ASSERT_ATOMIC_LOAD_PTR(m->mtx_lock,
1128 	    ("%s: mtx_lock not aligned for %s: %p", __func__, name,
1129 	    &m->mtx_lock));
1130 
1131 	/* Determine lock class and lock flags. */
1132 	if (opts & MTX_SPIN)
1133 		class = &lock_class_mtx_spin;
1134 	else
1135 		class = &lock_class_mtx_sleep;
1136 	flags = 0;
1137 	if (opts & MTX_QUIET)
1138 		flags |= LO_QUIET;
1139 	if (opts & MTX_RECURSE)
1140 		flags |= LO_RECURSABLE;
1141 	if ((opts & MTX_NOWITNESS) == 0)
1142 		flags |= LO_WITNESS;
1143 	if (opts & MTX_DUPOK)
1144 		flags |= LO_DUPOK;
1145 	if (opts & MTX_NOPROFILE)
1146 		flags |= LO_NOPROFILE;
1147 	if (opts & MTX_NEW)
1148 		flags |= LO_NEW;
1149 
1150 	/* Initialize mutex. */
1151 	lock_init(&m->lock_object, class, name, type, flags);
1152 
1153 	m->mtx_lock = MTX_UNOWNED;
1154 	m->mtx_recurse = 0;
1155 }
1156 
1157 /*
1158  * Remove lock `m' from all_mtx queue.  We don't allow MTX_QUIET to be
1159  * passed in as a flag here because if the corresponding mtx_init() was
1160  * called with MTX_QUIET set, then it will already be set in the mutex's
1161  * flags.
1162  */
1163 void
1164 _mtx_destroy(volatile uintptr_t *c)
1165 {
1166 	struct mtx *m;
1167 
1168 	m = mtxlock2mtx(c);
1169 
1170 	if (!mtx_owned(m))
1171 		MPASS(mtx_unowned(m));
1172 	else {
1173 		MPASS((m->mtx_lock & (MTX_RECURSED|MTX_CONTESTED)) == 0);
1174 
1175 		/* Perform the non-mtx related part of mtx_unlock_spin(). */
1176 		if (LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin)
1177 			spinlock_exit();
1178 		else
1179 			TD_LOCKS_DEC(curthread);
1180 
1181 		lock_profile_release_lock(&m->lock_object);
1182 		/* Tell witness this isn't locked to make it happy. */
1183 		WITNESS_UNLOCK(&m->lock_object, LOP_EXCLUSIVE, __FILE__,
1184 		    __LINE__);
1185 	}
1186 
1187 	m->mtx_lock = MTX_DESTROYED;
1188 	lock_destroy(&m->lock_object);
1189 }
1190 
1191 /*
1192  * Intialize the mutex code and system mutexes.  This is called from the MD
1193  * startup code prior to mi_startup().  The per-CPU data space needs to be
1194  * setup before this is called.
1195  */
1196 void
1197 mutex_init(void)
1198 {
1199 
1200 	/* Setup turnstiles so that sleep mutexes work. */
1201 	init_turnstiles();
1202 
1203 	/*
1204 	 * Initialize mutexes.
1205 	 */
1206 	mtx_init(&Giant, "Giant", NULL, MTX_DEF | MTX_RECURSE);
1207 	mtx_init(&blocked_lock, "blocked lock", NULL, MTX_SPIN);
1208 	blocked_lock.mtx_lock = 0xdeadc0de;	/* Always blocked. */
1209 	mtx_init(&proc0.p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK);
1210 	mtx_init(&proc0.p_slock, "process slock", NULL, MTX_SPIN);
1211 	mtx_init(&proc0.p_statmtx, "pstatl", NULL, MTX_SPIN);
1212 	mtx_init(&proc0.p_itimmtx, "pitiml", NULL, MTX_SPIN);
1213 	mtx_init(&proc0.p_profmtx, "pprofl", NULL, MTX_SPIN);
1214 	mtx_init(&devmtx, "cdev", NULL, MTX_DEF);
1215 	mtx_lock(&Giant);
1216 }
1217 
1218 static void __noinline
1219 _mtx_lock_indefinite_check(struct mtx *m, struct lock_delay_arg *ldap)
1220 {
1221 	struct thread *td;
1222 
1223 	ldap->spin_cnt++;
1224 	if (ldap->spin_cnt < 60000000 || kdb_active || panicstr != NULL)
1225 		cpu_lock_delay();
1226 	else {
1227 		td = mtx_owner(m);
1228 
1229 		/* If the mutex is unlocked, try again. */
1230 		if (td == NULL)
1231 			return;
1232 
1233 		printf( "spin lock %p (%s) held by %p (tid %d) too long\n",
1234 		    m, m->lock_object.lo_name, td, td->td_tid);
1235 #ifdef WITNESS
1236 		witness_display_spinlock(&m->lock_object, td, printf);
1237 #endif
1238 		panic("spin lock held too long");
1239 	}
1240 	cpu_spinwait();
1241 }
1242 
1243 void
1244 mtx_spin_wait_unlocked(struct mtx *m)
1245 {
1246 	struct lock_delay_arg lda;
1247 
1248 	KASSERT(m->mtx_lock != MTX_DESTROYED,
1249 	    ("%s() of destroyed mutex %p", __func__, m));
1250 	KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
1251 	    ("%s() of sleep mutex %p (%s)", __func__, m,
1252 	    m->lock_object.lo_name));
1253 	KASSERT(!mtx_owned(m), ("%s() waiting on myself on lock %p (%s)", __func__, m,
1254 	    m->lock_object.lo_name));
1255 
1256 	lda.spin_cnt = 0;
1257 
1258 	while (atomic_load_acq_ptr(&m->mtx_lock) != MTX_UNOWNED) {
1259 		if (__predict_true(lda.spin_cnt < 10000000)) {
1260 			cpu_spinwait();
1261 			lda.spin_cnt++;
1262 		} else {
1263 			_mtx_lock_indefinite_check(m, &lda);
1264 		}
1265 	}
1266 }
1267 
1268 #ifdef DDB
1269 void
1270 db_show_mtx(const struct lock_object *lock)
1271 {
1272 	struct thread *td;
1273 	const struct mtx *m;
1274 
1275 	m = (const struct mtx *)lock;
1276 
1277 	db_printf(" flags: {");
1278 	if (LOCK_CLASS(lock) == &lock_class_mtx_spin)
1279 		db_printf("SPIN");
1280 	else
1281 		db_printf("DEF");
1282 	if (m->lock_object.lo_flags & LO_RECURSABLE)
1283 		db_printf(", RECURSE");
1284 	if (m->lock_object.lo_flags & LO_DUPOK)
1285 		db_printf(", DUPOK");
1286 	db_printf("}\n");
1287 	db_printf(" state: {");
1288 	if (mtx_unowned(m))
1289 		db_printf("UNOWNED");
1290 	else if (mtx_destroyed(m))
1291 		db_printf("DESTROYED");
1292 	else {
1293 		db_printf("OWNED");
1294 		if (m->mtx_lock & MTX_CONTESTED)
1295 			db_printf(", CONTESTED");
1296 		if (m->mtx_lock & MTX_RECURSED)
1297 			db_printf(", RECURSED");
1298 	}
1299 	db_printf("}\n");
1300 	if (!mtx_unowned(m) && !mtx_destroyed(m)) {
1301 		td = mtx_owner(m);
1302 		db_printf(" owner: %p (tid %d, pid %d, \"%s\")\n", td,
1303 		    td->td_tid, td->td_proc->p_pid, td->td_name);
1304 		if (mtx_recursed(m))
1305 			db_printf(" recursed: %d\n", m->mtx_recurse);
1306 	}
1307 }
1308 #endif
1309