xref: /freebsd/sys/kern/kern_mutex.c (revision fcf596178b5f2be36424ecbc1b6a3224b29c91d2)
1 /*-
2  * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  * 3. Berkeley Software Design Inc's name may not be used to endorse or
13  *    promote products derived from this software without specific prior
14  *    written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  *	from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $
29  *	and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $
30  */
31 
32 /*
33  * Machine independent bits of mutex implementation.
34  */
35 
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
38 
39 #include "opt_adaptive_mutexes.h"
40 #include "opt_ddb.h"
41 #include "opt_hwpmc_hooks.h"
42 #include "opt_sched.h"
43 
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/bus.h>
47 #include <sys/conf.h>
48 #include <sys/kdb.h>
49 #include <sys/kernel.h>
50 #include <sys/ktr.h>
51 #include <sys/lock.h>
52 #include <sys/malloc.h>
53 #include <sys/mutex.h>
54 #include <sys/proc.h>
55 #include <sys/resourcevar.h>
56 #include <sys/sched.h>
57 #include <sys/sbuf.h>
58 #include <sys/smp.h>
59 #include <sys/sysctl.h>
60 #include <sys/turnstile.h>
61 #include <sys/vmmeter.h>
62 #include <sys/lock_profile.h>
63 
64 #include <machine/atomic.h>
65 #include <machine/bus.h>
66 #include <machine/cpu.h>
67 
68 #include <ddb/ddb.h>
69 
70 #include <fs/devfs/devfs_int.h>
71 
72 #include <vm/vm.h>
73 #include <vm/vm_extern.h>
74 
75 #if defined(SMP) && !defined(NO_ADAPTIVE_MUTEXES)
76 #define	ADAPTIVE_MUTEXES
77 #endif
78 
79 #ifdef HWPMC_HOOKS
80 #include <sys/pmckern.h>
81 PMC_SOFT_DEFINE( , , lock, failed);
82 #endif
83 
84 /*
85  * Return the mutex address when the lock cookie address is provided.
86  * This functionality assumes that struct mtx* have a member named mtx_lock.
87  */
88 #define	mtxlock2mtx(c)	(__containerof(c, struct mtx, mtx_lock))
89 
90 /*
91  * Internal utility macros.
92  */
93 #define mtx_unowned(m)	((m)->mtx_lock == MTX_UNOWNED)
94 
95 #define	mtx_destroyed(m) ((m)->mtx_lock == MTX_DESTROYED)
96 
97 static void	assert_mtx(const struct lock_object *lock, int what);
98 #ifdef DDB
99 static void	db_show_mtx(const struct lock_object *lock);
100 #endif
101 static void	lock_mtx(struct lock_object *lock, uintptr_t how);
102 static void	lock_spin(struct lock_object *lock, uintptr_t how);
103 #ifdef KDTRACE_HOOKS
104 static int	owner_mtx(const struct lock_object *lock,
105 		    struct thread **owner);
106 #endif
107 static uintptr_t unlock_mtx(struct lock_object *lock);
108 static uintptr_t unlock_spin(struct lock_object *lock);
109 
110 /*
111  * Lock classes for sleep and spin mutexes.
112  */
113 struct lock_class lock_class_mtx_sleep = {
114 	.lc_name = "sleep mutex",
115 	.lc_flags = LC_SLEEPLOCK | LC_RECURSABLE,
116 	.lc_assert = assert_mtx,
117 #ifdef DDB
118 	.lc_ddb_show = db_show_mtx,
119 #endif
120 	.lc_lock = lock_mtx,
121 	.lc_unlock = unlock_mtx,
122 #ifdef KDTRACE_HOOKS
123 	.lc_owner = owner_mtx,
124 #endif
125 };
126 struct lock_class lock_class_mtx_spin = {
127 	.lc_name = "spin mutex",
128 	.lc_flags = LC_SPINLOCK | LC_RECURSABLE,
129 	.lc_assert = assert_mtx,
130 #ifdef DDB
131 	.lc_ddb_show = db_show_mtx,
132 #endif
133 	.lc_lock = lock_spin,
134 	.lc_unlock = unlock_spin,
135 #ifdef KDTRACE_HOOKS
136 	.lc_owner = owner_mtx,
137 #endif
138 };
139 
140 #ifdef ADAPTIVE_MUTEXES
141 static SYSCTL_NODE(_debug, OID_AUTO, mtx, CTLFLAG_RD, NULL, "mtx debugging");
142 
143 static struct lock_delay_config __read_mostly mtx_delay = {
144 	.initial	= 1000,
145 	.step		= 500,
146 	.min		= 100,
147 	.max		= 5000,
148 };
149 
150 SYSCTL_INT(_debug_mtx, OID_AUTO, delay_initial, CTLFLAG_RW, &mtx_delay.initial,
151     0, "");
152 SYSCTL_INT(_debug_mtx, OID_AUTO, delay_step, CTLFLAG_RW, &mtx_delay.step,
153     0, "");
154 SYSCTL_INT(_debug_mtx, OID_AUTO, delay_min, CTLFLAG_RW, &mtx_delay.min,
155     0, "");
156 SYSCTL_INT(_debug_mtx, OID_AUTO, delay_max, CTLFLAG_RW, &mtx_delay.max,
157     0, "");
158 
159 static void
160 mtx_delay_sysinit(void *dummy)
161 {
162 
163 	mtx_delay.initial = mp_ncpus * 25;
164 	mtx_delay.step = (mp_ncpus * 25) / 2;
165 	mtx_delay.min = mp_ncpus * 5;
166 	mtx_delay.max = mp_ncpus * 25 * 10;
167 }
168 LOCK_DELAY_SYSINIT(mtx_delay_sysinit);
169 #endif
170 
171 static SYSCTL_NODE(_debug, OID_AUTO, mtx_spin, CTLFLAG_RD, NULL,
172     "mtx spin debugging");
173 
174 static struct lock_delay_config __read_mostly mtx_spin_delay = {
175 	.initial        = 1000,
176 	.step           = 500,
177 	.min            = 100,
178 	.max            = 5000,
179 };
180 
181 SYSCTL_INT(_debug_mtx_spin, OID_AUTO, delay_initial, CTLFLAG_RW,
182     &mtx_spin_delay.initial, 0, "");
183 SYSCTL_INT(_debug_mtx_spin, OID_AUTO, delay_step, CTLFLAG_RW, &mtx_spin_delay.step,
184     0, "");
185 SYSCTL_INT(_debug_mtx_spin, OID_AUTO, delay_min, CTLFLAG_RW, &mtx_spin_delay.min,
186     0, "");
187 SYSCTL_INT(_debug_mtx_spin, OID_AUTO, delay_max, CTLFLAG_RW, &mtx_spin_delay.max,
188     0, "");
189 
190 static void
191 mtx_spin_delay_sysinit(void *dummy)
192 {
193 
194 	mtx_spin_delay.initial = mp_ncpus * 25;
195 	mtx_spin_delay.step = (mp_ncpus * 25) / 2;
196 	mtx_spin_delay.min = mp_ncpus * 5;
197 	mtx_spin_delay.max = mp_ncpus * 25 * 10;
198 }
199 LOCK_DELAY_SYSINIT(mtx_spin_delay_sysinit);
200 
201 /*
202  * System-wide mutexes
203  */
204 struct mtx blocked_lock;
205 struct mtx Giant;
206 
207 void
208 assert_mtx(const struct lock_object *lock, int what)
209 {
210 
211 	mtx_assert((const struct mtx *)lock, what);
212 }
213 
214 void
215 lock_mtx(struct lock_object *lock, uintptr_t how)
216 {
217 
218 	mtx_lock((struct mtx *)lock);
219 }
220 
221 void
222 lock_spin(struct lock_object *lock, uintptr_t how)
223 {
224 
225 	panic("spin locks can only use msleep_spin");
226 }
227 
228 uintptr_t
229 unlock_mtx(struct lock_object *lock)
230 {
231 	struct mtx *m;
232 
233 	m = (struct mtx *)lock;
234 	mtx_assert(m, MA_OWNED | MA_NOTRECURSED);
235 	mtx_unlock(m);
236 	return (0);
237 }
238 
239 uintptr_t
240 unlock_spin(struct lock_object *lock)
241 {
242 
243 	panic("spin locks can only use msleep_spin");
244 }
245 
246 #ifdef KDTRACE_HOOKS
247 int
248 owner_mtx(const struct lock_object *lock, struct thread **owner)
249 {
250 	const struct mtx *m;
251 	uintptr_t x;
252 
253 	m = (const struct mtx *)lock;
254 	x = m->mtx_lock;
255 	*owner = (struct thread *)(x & ~MTX_FLAGMASK);
256 	return (x != MTX_UNOWNED);
257 }
258 #endif
259 
260 /*
261  * Function versions of the inlined __mtx_* macros.  These are used by
262  * modules and can also be called from assembly language if needed.
263  */
264 void
265 __mtx_lock_flags(volatile uintptr_t *c, int opts, const char *file, int line)
266 {
267 	struct mtx *m;
268 	uintptr_t tid, v;
269 
270 	if (SCHEDULER_STOPPED())
271 		return;
272 
273 	m = mtxlock2mtx(c);
274 
275 	KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
276 	    ("mtx_lock() by idle thread %p on sleep mutex %s @ %s:%d",
277 	    curthread, m->lock_object.lo_name, file, line));
278 	KASSERT(m->mtx_lock != MTX_DESTROYED,
279 	    ("mtx_lock() of destroyed mutex @ %s:%d", file, line));
280 	KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep,
281 	    ("mtx_lock() of spin mutex %s @ %s:%d", m->lock_object.lo_name,
282 	    file, line));
283 	WITNESS_CHECKORDER(&m->lock_object, (opts & ~MTX_RECURSE) |
284 	    LOP_NEWORDER | LOP_EXCLUSIVE, file, line, NULL);
285 
286 	tid = (uintptr_t)curthread;
287 	v = MTX_UNOWNED;
288 	if (!_mtx_obtain_lock_fetch(m, &v, tid))
289 		_mtx_lock_sleep(m, v, tid, opts, file, line);
290 	else
291 		LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(adaptive__acquire,
292 		    m, 0, 0, file, line);
293 	LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file,
294 	    line);
295 	WITNESS_LOCK(&m->lock_object, (opts & ~MTX_RECURSE) | LOP_EXCLUSIVE,
296 	    file, line);
297 	TD_LOCKS_INC(curthread);
298 }
299 
300 void
301 __mtx_unlock_flags(volatile uintptr_t *c, int opts, const char *file, int line)
302 {
303 	struct mtx *m;
304 
305 	if (SCHEDULER_STOPPED())
306 		return;
307 
308 	m = mtxlock2mtx(c);
309 
310 	KASSERT(m->mtx_lock != MTX_DESTROYED,
311 	    ("mtx_unlock() of destroyed mutex @ %s:%d", file, line));
312 	KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep,
313 	    ("mtx_unlock() of spin mutex %s @ %s:%d", m->lock_object.lo_name,
314 	    file, line));
315 	WITNESS_UNLOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
316 	LOCK_LOG_LOCK("UNLOCK", &m->lock_object, opts, m->mtx_recurse, file,
317 	    line);
318 	mtx_assert(m, MA_OWNED);
319 
320 	__mtx_unlock_sleep(c, opts, file, line);
321 	TD_LOCKS_DEC(curthread);
322 }
323 
324 void
325 __mtx_lock_spin_flags(volatile uintptr_t *c, int opts, const char *file,
326     int line)
327 {
328 	struct mtx *m;
329 
330 	if (SCHEDULER_STOPPED())
331 		return;
332 
333 	m = mtxlock2mtx(c);
334 
335 	KASSERT(m->mtx_lock != MTX_DESTROYED,
336 	    ("mtx_lock_spin() of destroyed mutex @ %s:%d", file, line));
337 	KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
338 	    ("mtx_lock_spin() of sleep mutex %s @ %s:%d",
339 	    m->lock_object.lo_name, file, line));
340 	if (mtx_owned(m))
341 		KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0 ||
342 		    (opts & MTX_RECURSE) != 0,
343 	    ("mtx_lock_spin: recursed on non-recursive mutex %s @ %s:%d\n",
344 		    m->lock_object.lo_name, file, line));
345 	opts &= ~MTX_RECURSE;
346 	WITNESS_CHECKORDER(&m->lock_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE,
347 	    file, line, NULL);
348 	__mtx_lock_spin(m, curthread, opts, file, line);
349 	LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file,
350 	    line);
351 	WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
352 }
353 
354 int
355 __mtx_trylock_spin_flags(volatile uintptr_t *c, int opts, const char *file,
356     int line)
357 {
358 	struct mtx *m;
359 
360 	if (SCHEDULER_STOPPED())
361 		return (1);
362 
363 	m = mtxlock2mtx(c);
364 
365 	KASSERT(m->mtx_lock != MTX_DESTROYED,
366 	    ("mtx_trylock_spin() of destroyed mutex @ %s:%d", file, line));
367 	KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
368 	    ("mtx_trylock_spin() of sleep mutex %s @ %s:%d",
369 	    m->lock_object.lo_name, file, line));
370 	KASSERT((opts & MTX_RECURSE) == 0,
371 	    ("mtx_trylock_spin: unsupp. opt MTX_RECURSE on mutex %s @ %s:%d\n",
372 	    m->lock_object.lo_name, file, line));
373 	if (__mtx_trylock_spin(m, curthread, opts, file, line)) {
374 		LOCK_LOG_TRY("LOCK", &m->lock_object, opts, 1, file, line);
375 		WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
376 		return (1);
377 	}
378 	LOCK_LOG_TRY("LOCK", &m->lock_object, opts, 0, file, line);
379 	return (0);
380 }
381 
382 void
383 __mtx_unlock_spin_flags(volatile uintptr_t *c, int opts, const char *file,
384     int line)
385 {
386 	struct mtx *m;
387 
388 	if (SCHEDULER_STOPPED())
389 		return;
390 
391 	m = mtxlock2mtx(c);
392 
393 	KASSERT(m->mtx_lock != MTX_DESTROYED,
394 	    ("mtx_unlock_spin() of destroyed mutex @ %s:%d", file, line));
395 	KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
396 	    ("mtx_unlock_spin() of sleep mutex %s @ %s:%d",
397 	    m->lock_object.lo_name, file, line));
398 	WITNESS_UNLOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
399 	LOCK_LOG_LOCK("UNLOCK", &m->lock_object, opts, m->mtx_recurse, file,
400 	    line);
401 	mtx_assert(m, MA_OWNED);
402 
403 	__mtx_unlock_spin(m);
404 }
405 
406 /*
407  * The important part of mtx_trylock{,_flags}()
408  * Tries to acquire lock `m.'  If this function is called on a mutex that
409  * is already owned, it will recursively acquire the lock.
410  */
411 int
412 _mtx_trylock_flags_(volatile uintptr_t *c, int opts, const char *file, int line)
413 {
414 	struct mtx *m;
415 #ifdef LOCK_PROFILING
416 	uint64_t waittime = 0;
417 	int contested = 0;
418 #endif
419 	int rval;
420 
421 	if (SCHEDULER_STOPPED())
422 		return (1);
423 
424 	m = mtxlock2mtx(c);
425 
426 	KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
427 	    ("mtx_trylock() by idle thread %p on sleep mutex %s @ %s:%d",
428 	    curthread, m->lock_object.lo_name, file, line));
429 	KASSERT(m->mtx_lock != MTX_DESTROYED,
430 	    ("mtx_trylock() of destroyed mutex @ %s:%d", file, line));
431 	KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep,
432 	    ("mtx_trylock() of spin mutex %s @ %s:%d", m->lock_object.lo_name,
433 	    file, line));
434 
435 	if (mtx_owned(m) && ((m->lock_object.lo_flags & LO_RECURSABLE) != 0 ||
436 	    (opts & MTX_RECURSE) != 0)) {
437 		m->mtx_recurse++;
438 		atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
439 		rval = 1;
440 	} else
441 		rval = _mtx_obtain_lock(m, (uintptr_t)curthread);
442 	opts &= ~MTX_RECURSE;
443 
444 	LOCK_LOG_TRY("LOCK", &m->lock_object, opts, rval, file, line);
445 	if (rval) {
446 		WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE | LOP_TRYLOCK,
447 		    file, line);
448 		TD_LOCKS_INC(curthread);
449 		if (m->mtx_recurse == 0)
450 			LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(adaptive__acquire,
451 			    m, contested, waittime, file, line);
452 
453 	}
454 
455 	return (rval);
456 }
457 
458 /*
459  * __mtx_lock_sleep: the tougher part of acquiring an MTX_DEF lock.
460  *
461  * We call this if the lock is either contested (i.e. we need to go to
462  * sleep waiting for it), or if we need to recurse on it.
463  */
464 void
465 __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t v, uintptr_t tid, int opts,
466     const char *file, int line)
467 {
468 	struct mtx *m;
469 	struct turnstile *ts;
470 #ifdef ADAPTIVE_MUTEXES
471 	volatile struct thread *owner;
472 #endif
473 #ifdef KTR
474 	int cont_logged = 0;
475 #endif
476 #ifdef LOCK_PROFILING
477 	int contested = 0;
478 	uint64_t waittime = 0;
479 #endif
480 #if defined(ADAPTIVE_MUTEXES) || defined(KDTRACE_HOOKS)
481 	struct lock_delay_arg lda;
482 #endif
483 #ifdef KDTRACE_HOOKS
484 	u_int sleep_cnt = 0;
485 	int64_t sleep_time = 0;
486 	int64_t all_time = 0;
487 #endif
488 
489 	if (SCHEDULER_STOPPED())
490 		return;
491 
492 #if defined(ADAPTIVE_MUTEXES)
493 	lock_delay_arg_init(&lda, &mtx_delay);
494 #elif defined(KDTRACE_HOOKS)
495 	lock_delay_arg_init(&lda, NULL);
496 #endif
497 	m = mtxlock2mtx(c);
498 
499 	if (__predict_false(lv_mtx_owner(v) == (struct thread *)tid)) {
500 		KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0 ||
501 		    (opts & MTX_RECURSE) != 0,
502 	    ("_mtx_lock_sleep: recursed on non-recursive mutex %s @ %s:%d\n",
503 		    m->lock_object.lo_name, file, line));
504 		opts &= ~MTX_RECURSE;
505 		m->mtx_recurse++;
506 		atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
507 		if (LOCK_LOG_TEST(&m->lock_object, opts))
508 			CTR1(KTR_LOCK, "_mtx_lock_sleep: %p recursing", m);
509 		return;
510 	}
511 	opts &= ~MTX_RECURSE;
512 
513 #ifdef HWPMC_HOOKS
514 	PMC_SOFT_CALL( , , lock, failed);
515 #endif
516 	lock_profile_obtain_lock_failed(&m->lock_object,
517 		    &contested, &waittime);
518 	if (LOCK_LOG_TEST(&m->lock_object, opts))
519 		CTR4(KTR_LOCK,
520 		    "_mtx_lock_sleep: %s contested (lock=%p) at %s:%d",
521 		    m->lock_object.lo_name, (void *)m->mtx_lock, file, line);
522 #ifdef KDTRACE_HOOKS
523 	all_time -= lockstat_nsecs(&m->lock_object);
524 #endif
525 
526 	for (;;) {
527 		if (v == MTX_UNOWNED) {
528 			if (_mtx_obtain_lock_fetch(m, &v, tid))
529 				break;
530 			continue;
531 		}
532 #ifdef KDTRACE_HOOKS
533 		lda.spin_cnt++;
534 #endif
535 #ifdef ADAPTIVE_MUTEXES
536 		/*
537 		 * If the owner is running on another CPU, spin until the
538 		 * owner stops running or the state of the lock changes.
539 		 */
540 		owner = lv_mtx_owner(v);
541 		if (TD_IS_RUNNING(owner)) {
542 			if (LOCK_LOG_TEST(&m->lock_object, 0))
543 				CTR3(KTR_LOCK,
544 				    "%s: spinning on %p held by %p",
545 				    __func__, m, owner);
546 			KTR_STATE1(KTR_SCHED, "thread",
547 			    sched_tdname((struct thread *)tid),
548 			    "spinning", "lockname:\"%s\"",
549 			    m->lock_object.lo_name);
550 			do {
551 				lock_delay(&lda);
552 				v = MTX_READ_VALUE(m);
553 				owner = lv_mtx_owner(v);
554 			} while (v != MTX_UNOWNED && TD_IS_RUNNING(owner));
555 			KTR_STATE0(KTR_SCHED, "thread",
556 			    sched_tdname((struct thread *)tid),
557 			    "running");
558 			continue;
559 		}
560 #endif
561 
562 		ts = turnstile_trywait(&m->lock_object);
563 		v = MTX_READ_VALUE(m);
564 
565 		/*
566 		 * Check if the lock has been released while spinning for
567 		 * the turnstile chain lock.
568 		 */
569 		if (v == MTX_UNOWNED) {
570 			turnstile_cancel(ts);
571 			continue;
572 		}
573 
574 #ifdef ADAPTIVE_MUTEXES
575 		/*
576 		 * The current lock owner might have started executing
577 		 * on another CPU (or the lock could have changed
578 		 * owners) while we were waiting on the turnstile
579 		 * chain lock.  If so, drop the turnstile lock and try
580 		 * again.
581 		 */
582 		owner = lv_mtx_owner(v);
583 		if (TD_IS_RUNNING(owner)) {
584 			turnstile_cancel(ts);
585 			continue;
586 		}
587 #endif
588 
589 		/*
590 		 * If the mutex isn't already contested and a failure occurs
591 		 * setting the contested bit, the mutex was either released
592 		 * or the state of the MTX_RECURSED bit changed.
593 		 */
594 		if ((v & MTX_CONTESTED) == 0 &&
595 		    !atomic_cmpset_ptr(&m->mtx_lock, v, v | MTX_CONTESTED)) {
596 			turnstile_cancel(ts);
597 			v = MTX_READ_VALUE(m);
598 			continue;
599 		}
600 
601 		/*
602 		 * We definitely must sleep for this lock.
603 		 */
604 		mtx_assert(m, MA_NOTOWNED);
605 
606 #ifdef KTR
607 		if (!cont_logged) {
608 			CTR6(KTR_CONTENTION,
609 			    "contention: %p at %s:%d wants %s, taken by %s:%d",
610 			    (void *)tid, file, line, m->lock_object.lo_name,
611 			    WITNESS_FILE(&m->lock_object),
612 			    WITNESS_LINE(&m->lock_object));
613 			cont_logged = 1;
614 		}
615 #endif
616 
617 		/*
618 		 * Block on the turnstile.
619 		 */
620 #ifdef KDTRACE_HOOKS
621 		sleep_time -= lockstat_nsecs(&m->lock_object);
622 #endif
623 		turnstile_wait(ts, mtx_owner(m), TS_EXCLUSIVE_QUEUE);
624 #ifdef KDTRACE_HOOKS
625 		sleep_time += lockstat_nsecs(&m->lock_object);
626 		sleep_cnt++;
627 #endif
628 		v = MTX_READ_VALUE(m);
629 	}
630 #ifdef KDTRACE_HOOKS
631 	all_time += lockstat_nsecs(&m->lock_object);
632 #endif
633 #ifdef KTR
634 	if (cont_logged) {
635 		CTR4(KTR_CONTENTION,
636 		    "contention end: %s acquired by %p at %s:%d",
637 		    m->lock_object.lo_name, (void *)tid, file, line);
638 	}
639 #endif
640 	LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(adaptive__acquire, m, contested,
641 	    waittime, file, line);
642 #ifdef KDTRACE_HOOKS
643 	if (sleep_time)
644 		LOCKSTAT_RECORD1(adaptive__block, m, sleep_time);
645 
646 	/*
647 	 * Only record the loops spinning and not sleeping.
648 	 */
649 	if (lda.spin_cnt > sleep_cnt)
650 		LOCKSTAT_RECORD1(adaptive__spin, m, all_time - sleep_time);
651 #endif
652 }
653 
654 static void
655 _mtx_lock_spin_failed(struct mtx *m)
656 {
657 	struct thread *td;
658 
659 	td = mtx_owner(m);
660 
661 	/* If the mutex is unlocked, try again. */
662 	if (td == NULL)
663 		return;
664 
665 	printf( "spin lock %p (%s) held by %p (tid %d) too long\n",
666 	    m, m->lock_object.lo_name, td, td->td_tid);
667 #ifdef WITNESS
668 	witness_display_spinlock(&m->lock_object, td, printf);
669 #endif
670 	panic("spin lock held too long");
671 }
672 
673 #ifdef SMP
674 /*
675  * _mtx_lock_spin_cookie: the tougher part of acquiring an MTX_SPIN lock.
676  *
677  * This is only called if we need to actually spin for the lock. Recursion
678  * is handled inline.
679  */
680 void
681 _mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t v, uintptr_t tid,
682     int opts, const char *file, int line)
683 {
684 	struct mtx *m;
685 	struct lock_delay_arg lda;
686 #ifdef LOCK_PROFILING
687 	int contested = 0;
688 	uint64_t waittime = 0;
689 #endif
690 #ifdef KDTRACE_HOOKS
691 	int64_t spin_time = 0;
692 #endif
693 
694 	if (SCHEDULER_STOPPED())
695 		return;
696 
697 	lock_delay_arg_init(&lda, &mtx_spin_delay);
698 	m = mtxlock2mtx(c);
699 
700 	if (LOCK_LOG_TEST(&m->lock_object, opts))
701 		CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m);
702 	KTR_STATE1(KTR_SCHED, "thread", sched_tdname((struct thread *)tid),
703 	    "spinning", "lockname:\"%s\"", m->lock_object.lo_name);
704 
705 #ifdef HWPMC_HOOKS
706 	PMC_SOFT_CALL( , , lock, failed);
707 #endif
708 	lock_profile_obtain_lock_failed(&m->lock_object, &contested, &waittime);
709 #ifdef KDTRACE_HOOKS
710 	spin_time -= lockstat_nsecs(&m->lock_object);
711 #endif
712 	for (;;) {
713 		if (v == MTX_UNOWNED) {
714 			if (_mtx_obtain_lock_fetch(m, &v, tid))
715 				break;
716 			continue;
717 		}
718 		/* Give interrupts a chance while we spin. */
719 		spinlock_exit();
720 		do {
721 			if (lda.spin_cnt < 10000000) {
722 				lock_delay(&lda);
723 			} else {
724 				lda.spin_cnt++;
725 				if (lda.spin_cnt < 60000000 || kdb_active ||
726 				    panicstr != NULL)
727 					DELAY(1);
728 				else
729 					_mtx_lock_spin_failed(m);
730 				cpu_spinwait();
731 			}
732 			v = MTX_READ_VALUE(m);
733 		} while (v != MTX_UNOWNED);
734 		spinlock_enter();
735 	}
736 #ifdef KDTRACE_HOOKS
737 	spin_time += lockstat_nsecs(&m->lock_object);
738 #endif
739 
740 	if (LOCK_LOG_TEST(&m->lock_object, opts))
741 		CTR1(KTR_LOCK, "_mtx_lock_spin: %p spin done", m);
742 	KTR_STATE0(KTR_SCHED, "thread", sched_tdname((struct thread *)tid),
743 	    "running");
744 
745 #ifdef KDTRACE_HOOKS
746 	LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(spin__acquire, m,
747 	    contested, waittime, file, line);
748 	if (spin_time != 0)
749 		LOCKSTAT_RECORD1(spin__spin, m, spin_time);
750 #endif
751 }
752 #endif /* SMP */
753 
754 void
755 thread_lock_flags_(struct thread *td, int opts, const char *file, int line)
756 {
757 	struct mtx *m;
758 	uintptr_t tid, v;
759 	struct lock_delay_arg lda;
760 #ifdef LOCK_PROFILING
761 	int contested = 0;
762 	uint64_t waittime = 0;
763 #endif
764 #ifdef KDTRACE_HOOKS
765 	int64_t spin_time = 0;
766 #endif
767 
768 	tid = (uintptr_t)curthread;
769 
770 	if (SCHEDULER_STOPPED()) {
771 		/*
772 		 * Ensure that spinlock sections are balanced even when the
773 		 * scheduler is stopped, since we may otherwise inadvertently
774 		 * re-enable interrupts while dumping core.
775 		 */
776 		spinlock_enter();
777 		return;
778 	}
779 
780 	lock_delay_arg_init(&lda, &mtx_spin_delay);
781 
782 #ifdef KDTRACE_HOOKS
783 	spin_time -= lockstat_nsecs(&td->td_lock->lock_object);
784 #endif
785 	for (;;) {
786 retry:
787 		v = MTX_UNOWNED;
788 		spinlock_enter();
789 		m = td->td_lock;
790 		KASSERT(m->mtx_lock != MTX_DESTROYED,
791 		    ("thread_lock() of destroyed mutex @ %s:%d", file, line));
792 		KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
793 		    ("thread_lock() of sleep mutex %s @ %s:%d",
794 		    m->lock_object.lo_name, file, line));
795 		if (mtx_owned(m))
796 			KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0,
797 	    ("thread_lock: recursed on non-recursive mutex %s @ %s:%d\n",
798 			    m->lock_object.lo_name, file, line));
799 		WITNESS_CHECKORDER(&m->lock_object,
800 		    opts | LOP_NEWORDER | LOP_EXCLUSIVE, file, line, NULL);
801 		for (;;) {
802 			if (_mtx_obtain_lock_fetch(m, &v, tid))
803 				break;
804 			if (v == MTX_UNOWNED)
805 				continue;
806 			if (v == tid) {
807 				m->mtx_recurse++;
808 				break;
809 			}
810 #ifdef HWPMC_HOOKS
811 			PMC_SOFT_CALL( , , lock, failed);
812 #endif
813 			lock_profile_obtain_lock_failed(&m->lock_object,
814 			    &contested, &waittime);
815 			/* Give interrupts a chance while we spin. */
816 			spinlock_exit();
817 			do {
818 				if (lda.spin_cnt < 10000000) {
819 					lock_delay(&lda);
820 				} else {
821 					lda.spin_cnt++;
822 					if (lda.spin_cnt < 60000000 ||
823 					    kdb_active || panicstr != NULL)
824 						DELAY(1);
825 					else
826 						_mtx_lock_spin_failed(m);
827 					cpu_spinwait();
828 				}
829 				if (m != td->td_lock)
830 					goto retry;
831 				v = MTX_READ_VALUE(m);
832 			} while (v != MTX_UNOWNED);
833 			spinlock_enter();
834 		}
835 		if (m == td->td_lock)
836 			break;
837 		__mtx_unlock_spin(m);	/* does spinlock_exit() */
838 	}
839 #ifdef KDTRACE_HOOKS
840 	spin_time += lockstat_nsecs(&m->lock_object);
841 #endif
842 	if (m->mtx_recurse == 0)
843 		LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(spin__acquire, m,
844 		    contested, waittime, file, line);
845 	LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file,
846 	    line);
847 	WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
848 #ifdef KDTRACE_HOOKS
849 	if (spin_time != 0)
850 		LOCKSTAT_RECORD1(thread__spin, m, spin_time);
851 #endif
852 }
853 
854 struct mtx *
855 thread_lock_block(struct thread *td)
856 {
857 	struct mtx *lock;
858 
859 	THREAD_LOCK_ASSERT(td, MA_OWNED);
860 	lock = td->td_lock;
861 	td->td_lock = &blocked_lock;
862 	mtx_unlock_spin(lock);
863 
864 	return (lock);
865 }
866 
867 void
868 thread_lock_unblock(struct thread *td, struct mtx *new)
869 {
870 	mtx_assert(new, MA_OWNED);
871 	MPASS(td->td_lock == &blocked_lock);
872 	atomic_store_rel_ptr((volatile void *)&td->td_lock, (uintptr_t)new);
873 }
874 
875 void
876 thread_lock_set(struct thread *td, struct mtx *new)
877 {
878 	struct mtx *lock;
879 
880 	mtx_assert(new, MA_OWNED);
881 	THREAD_LOCK_ASSERT(td, MA_OWNED);
882 	lock = td->td_lock;
883 	td->td_lock = new;
884 	mtx_unlock_spin(lock);
885 }
886 
887 /*
888  * __mtx_unlock_sleep: the tougher part of releasing an MTX_DEF lock.
889  *
890  * We are only called here if the lock is recursed or contested (i.e. we
891  * need to wake up a blocked thread).
892  */
893 void
894 __mtx_unlock_sleep(volatile uintptr_t *c, int opts, const char *file, int line)
895 {
896 	struct mtx *m;
897 	struct turnstile *ts;
898 
899 	if (SCHEDULER_STOPPED())
900 		return;
901 
902 	m = mtxlock2mtx(c);
903 
904 	if (!mtx_recursed(m)) {
905 		LOCKSTAT_PROFILE_RELEASE_LOCK(adaptive__release, m);
906 		if (_mtx_release_lock(m, (uintptr_t)curthread))
907 			return;
908 	} else {
909 		if (--(m->mtx_recurse) == 0)
910 			atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED);
911 		if (LOCK_LOG_TEST(&m->lock_object, opts))
912 			CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p unrecurse", m);
913 		return;
914 	}
915 
916 	/*
917 	 * We have to lock the chain before the turnstile so this turnstile
918 	 * can be removed from the hash list if it is empty.
919 	 */
920 	turnstile_chain_lock(&m->lock_object);
921 	ts = turnstile_lookup(&m->lock_object);
922 	if (LOCK_LOG_TEST(&m->lock_object, opts))
923 		CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p contested", m);
924 	MPASS(ts != NULL);
925 	turnstile_broadcast(ts, TS_EXCLUSIVE_QUEUE);
926 	_mtx_release_lock_quick(m);
927 
928 	/*
929 	 * This turnstile is now no longer associated with the mutex.  We can
930 	 * unlock the chain lock so a new turnstile may take it's place.
931 	 */
932 	turnstile_unpend(ts, TS_EXCLUSIVE_LOCK);
933 	turnstile_chain_unlock(&m->lock_object);
934 }
935 
936 /*
937  * All the unlocking of MTX_SPIN locks is done inline.
938  * See the __mtx_unlock_spin() macro for the details.
939  */
940 
941 /*
942  * The backing function for the INVARIANTS-enabled mtx_assert()
943  */
944 #ifdef INVARIANT_SUPPORT
945 void
946 __mtx_assert(const volatile uintptr_t *c, int what, const char *file, int line)
947 {
948 	const struct mtx *m;
949 
950 	if (panicstr != NULL || dumping || SCHEDULER_STOPPED())
951 		return;
952 
953 	m = mtxlock2mtx(c);
954 
955 	switch (what) {
956 	case MA_OWNED:
957 	case MA_OWNED | MA_RECURSED:
958 	case MA_OWNED | MA_NOTRECURSED:
959 		if (!mtx_owned(m))
960 			panic("mutex %s not owned at %s:%d",
961 			    m->lock_object.lo_name, file, line);
962 		if (mtx_recursed(m)) {
963 			if ((what & MA_NOTRECURSED) != 0)
964 				panic("mutex %s recursed at %s:%d",
965 				    m->lock_object.lo_name, file, line);
966 		} else if ((what & MA_RECURSED) != 0) {
967 			panic("mutex %s unrecursed at %s:%d",
968 			    m->lock_object.lo_name, file, line);
969 		}
970 		break;
971 	case MA_NOTOWNED:
972 		if (mtx_owned(m))
973 			panic("mutex %s owned at %s:%d",
974 			    m->lock_object.lo_name, file, line);
975 		break;
976 	default:
977 		panic("unknown mtx_assert at %s:%d", file, line);
978 	}
979 }
980 #endif
981 
982 /*
983  * General init routine used by the MTX_SYSINIT() macro.
984  */
985 void
986 mtx_sysinit(void *arg)
987 {
988 	struct mtx_args *margs = arg;
989 
990 	mtx_init((struct mtx *)margs->ma_mtx, margs->ma_desc, NULL,
991 	    margs->ma_opts);
992 }
993 
994 /*
995  * Mutex initialization routine; initialize lock `m' of type contained in
996  * `opts' with options contained in `opts' and name `name.'  The optional
997  * lock type `type' is used as a general lock category name for use with
998  * witness.
999  */
1000 void
1001 _mtx_init(volatile uintptr_t *c, const char *name, const char *type, int opts)
1002 {
1003 	struct mtx *m;
1004 	struct lock_class *class;
1005 	int flags;
1006 
1007 	m = mtxlock2mtx(c);
1008 
1009 	MPASS((opts & ~(MTX_SPIN | MTX_QUIET | MTX_RECURSE |
1010 	    MTX_NOWITNESS | MTX_DUPOK | MTX_NOPROFILE | MTX_NEW)) == 0);
1011 	ASSERT_ATOMIC_LOAD_PTR(m->mtx_lock,
1012 	    ("%s: mtx_lock not aligned for %s: %p", __func__, name,
1013 	    &m->mtx_lock));
1014 
1015 	/* Determine lock class and lock flags. */
1016 	if (opts & MTX_SPIN)
1017 		class = &lock_class_mtx_spin;
1018 	else
1019 		class = &lock_class_mtx_sleep;
1020 	flags = 0;
1021 	if (opts & MTX_QUIET)
1022 		flags |= LO_QUIET;
1023 	if (opts & MTX_RECURSE)
1024 		flags |= LO_RECURSABLE;
1025 	if ((opts & MTX_NOWITNESS) == 0)
1026 		flags |= LO_WITNESS;
1027 	if (opts & MTX_DUPOK)
1028 		flags |= LO_DUPOK;
1029 	if (opts & MTX_NOPROFILE)
1030 		flags |= LO_NOPROFILE;
1031 	if (opts & MTX_NEW)
1032 		flags |= LO_NEW;
1033 
1034 	/* Initialize mutex. */
1035 	lock_init(&m->lock_object, class, name, type, flags);
1036 
1037 	m->mtx_lock = MTX_UNOWNED;
1038 	m->mtx_recurse = 0;
1039 }
1040 
1041 /*
1042  * Remove lock `m' from all_mtx queue.  We don't allow MTX_QUIET to be
1043  * passed in as a flag here because if the corresponding mtx_init() was
1044  * called with MTX_QUIET set, then it will already be set in the mutex's
1045  * flags.
1046  */
1047 void
1048 _mtx_destroy(volatile uintptr_t *c)
1049 {
1050 	struct mtx *m;
1051 
1052 	m = mtxlock2mtx(c);
1053 
1054 	if (!mtx_owned(m))
1055 		MPASS(mtx_unowned(m));
1056 	else {
1057 		MPASS((m->mtx_lock & (MTX_RECURSED|MTX_CONTESTED)) == 0);
1058 
1059 		/* Perform the non-mtx related part of mtx_unlock_spin(). */
1060 		if (LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin)
1061 			spinlock_exit();
1062 		else
1063 			TD_LOCKS_DEC(curthread);
1064 
1065 		lock_profile_release_lock(&m->lock_object);
1066 		/* Tell witness this isn't locked to make it happy. */
1067 		WITNESS_UNLOCK(&m->lock_object, LOP_EXCLUSIVE, __FILE__,
1068 		    __LINE__);
1069 	}
1070 
1071 	m->mtx_lock = MTX_DESTROYED;
1072 	lock_destroy(&m->lock_object);
1073 }
1074 
1075 /*
1076  * Intialize the mutex code and system mutexes.  This is called from the MD
1077  * startup code prior to mi_startup().  The per-CPU data space needs to be
1078  * setup before this is called.
1079  */
1080 void
1081 mutex_init(void)
1082 {
1083 
1084 	/* Setup turnstiles so that sleep mutexes work. */
1085 	init_turnstiles();
1086 
1087 	/*
1088 	 * Initialize mutexes.
1089 	 */
1090 	mtx_init(&Giant, "Giant", NULL, MTX_DEF | MTX_RECURSE);
1091 	mtx_init(&blocked_lock, "blocked lock", NULL, MTX_SPIN);
1092 	blocked_lock.mtx_lock = 0xdeadc0de;	/* Always blocked. */
1093 	mtx_init(&proc0.p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK);
1094 	mtx_init(&proc0.p_slock, "process slock", NULL, MTX_SPIN);
1095 	mtx_init(&proc0.p_statmtx, "pstatl", NULL, MTX_SPIN);
1096 	mtx_init(&proc0.p_itimmtx, "pitiml", NULL, MTX_SPIN);
1097 	mtx_init(&proc0.p_profmtx, "pprofl", NULL, MTX_SPIN);
1098 	mtx_init(&devmtx, "cdev", NULL, MTX_DEF);
1099 	mtx_lock(&Giant);
1100 }
1101 
1102 #ifdef DDB
1103 void
1104 db_show_mtx(const struct lock_object *lock)
1105 {
1106 	struct thread *td;
1107 	const struct mtx *m;
1108 
1109 	m = (const struct mtx *)lock;
1110 
1111 	db_printf(" flags: {");
1112 	if (LOCK_CLASS(lock) == &lock_class_mtx_spin)
1113 		db_printf("SPIN");
1114 	else
1115 		db_printf("DEF");
1116 	if (m->lock_object.lo_flags & LO_RECURSABLE)
1117 		db_printf(", RECURSE");
1118 	if (m->lock_object.lo_flags & LO_DUPOK)
1119 		db_printf(", DUPOK");
1120 	db_printf("}\n");
1121 	db_printf(" state: {");
1122 	if (mtx_unowned(m))
1123 		db_printf("UNOWNED");
1124 	else if (mtx_destroyed(m))
1125 		db_printf("DESTROYED");
1126 	else {
1127 		db_printf("OWNED");
1128 		if (m->mtx_lock & MTX_CONTESTED)
1129 			db_printf(", CONTESTED");
1130 		if (m->mtx_lock & MTX_RECURSED)
1131 			db_printf(", RECURSED");
1132 	}
1133 	db_printf("}\n");
1134 	if (!mtx_unowned(m) && !mtx_destroyed(m)) {
1135 		td = mtx_owner(m);
1136 		db_printf(" owner: %p (tid %d, pid %d, \"%s\")\n", td,
1137 		    td->td_tid, td->td_proc->p_pid, td->td_name);
1138 		if (mtx_recursed(m))
1139 			db_printf(" recursed: %d\n", m->mtx_recurse);
1140 	}
1141 }
1142 #endif
1143