xref: /freebsd/sys/kern/kern_mutex.c (revision 43a15a22c62345091fc4a2ea2bec529acda7c61f)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. Berkeley Software Design Inc's name may not be used to endorse or
15  *    promote products derived from this software without specific prior
16  *    written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  *
30  *	from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $
31  *	and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $
32  */
33 
34 /*
35  * Machine independent bits of mutex implementation.
36  */
37 
38 #include <sys/cdefs.h>
39 #include "opt_adaptive_mutexes.h"
40 #include "opt_ddb.h"
41 #include "opt_hwpmc_hooks.h"
42 #include "opt_sched.h"
43 
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/bus.h>
47 #include <sys/conf.h>
48 #include <sys/kdb.h>
49 #include <sys/kernel.h>
50 #include <sys/ktr.h>
51 #include <sys/lock.h>
52 #include <sys/malloc.h>
53 #include <sys/mutex.h>
54 #include <sys/proc.h>
55 #include <sys/resourcevar.h>
56 #include <sys/sched.h>
57 #include <sys/sbuf.h>
58 #include <sys/smp.h>
59 #include <sys/sysctl.h>
60 #include <sys/turnstile.h>
61 #include <sys/vmmeter.h>
62 #include <sys/lock_profile.h>
63 
64 #include <machine/atomic.h>
65 #include <machine/bus.h>
66 #include <machine/cpu.h>
67 
68 #include <ddb/ddb.h>
69 
70 #include <fs/devfs/devfs_int.h>
71 
72 #include <vm/vm.h>
73 #include <vm/vm_extern.h>
74 
75 #if defined(SMP) && !defined(NO_ADAPTIVE_MUTEXES)
76 #define	ADAPTIVE_MUTEXES
77 #endif
78 
79 #ifdef HWPMC_HOOKS
80 #include <sys/pmckern.h>
81 PMC_SOFT_DEFINE( , , lock, failed);
82 #endif
83 
84 /*
85  * Return the mutex address when the lock cookie address is provided.
86  * This functionality assumes that struct mtx* have a member named mtx_lock.
87  */
88 #define	mtxlock2mtx(c)	(__containerof(c, struct mtx, mtx_lock))
89 
90 /*
91  * Internal utility macros.
92  */
93 #define mtx_unowned(m)	((m)->mtx_lock == MTX_UNOWNED)
94 
95 #define	mtx_destroyed(m) ((m)->mtx_lock == MTX_DESTROYED)
96 
97 static void	assert_mtx(const struct lock_object *lock, int what);
98 #ifdef DDB
99 static void	db_show_mtx(const struct lock_object *lock);
100 #endif
101 static void	lock_mtx(struct lock_object *lock, uintptr_t how);
102 static void	lock_spin(struct lock_object *lock, uintptr_t how);
103 static int	trylock_mtx(struct lock_object *lock, uintptr_t how);
104 static int	trylock_spin(struct lock_object *lock, uintptr_t how);
105 #ifdef KDTRACE_HOOKS
106 static int	owner_mtx(const struct lock_object *lock,
107 		    struct thread **owner);
108 #endif
109 static uintptr_t unlock_mtx(struct lock_object *lock);
110 static uintptr_t unlock_spin(struct lock_object *lock);
111 
112 /*
113  * Lock classes for sleep and spin mutexes.
114  */
115 struct lock_class lock_class_mtx_sleep = {
116 	.lc_name = "sleep mutex",
117 	.lc_flags = LC_SLEEPLOCK | LC_RECURSABLE,
118 	.lc_assert = assert_mtx,
119 #ifdef DDB
120 	.lc_ddb_show = db_show_mtx,
121 #endif
122 	.lc_lock = lock_mtx,
123 	.lc_trylock = trylock_mtx,
124 	.lc_unlock = unlock_mtx,
125 #ifdef KDTRACE_HOOKS
126 	.lc_owner = owner_mtx,
127 #endif
128 };
129 struct lock_class lock_class_mtx_spin = {
130 	.lc_name = "spin mutex",
131 	.lc_flags = LC_SPINLOCK | LC_RECURSABLE,
132 	.lc_assert = assert_mtx,
133 #ifdef DDB
134 	.lc_ddb_show = db_show_mtx,
135 #endif
136 	.lc_lock = lock_spin,
137 	.lc_trylock = trylock_spin,
138 	.lc_unlock = unlock_spin,
139 #ifdef KDTRACE_HOOKS
140 	.lc_owner = owner_mtx,
141 #endif
142 };
143 
144 #ifdef ADAPTIVE_MUTEXES
145 #ifdef MUTEX_CUSTOM_BACKOFF
146 static SYSCTL_NODE(_debug, OID_AUTO, mtx, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
147     "mtx debugging");
148 
149 static struct lock_delay_config __read_frequently mtx_delay;
150 
151 SYSCTL_U16(_debug_mtx, OID_AUTO, delay_base, CTLFLAG_RW, &mtx_delay.base,
152     0, "");
153 SYSCTL_U16(_debug_mtx, OID_AUTO, delay_max, CTLFLAG_RW, &mtx_delay.max,
154     0, "");
155 
156 LOCK_DELAY_SYSINIT_DEFAULT(mtx_delay);
157 #else
158 #define mtx_delay	locks_delay
159 #endif
160 #endif
161 
162 #ifdef MUTEX_SPIN_CUSTOM_BACKOFF
163 static SYSCTL_NODE(_debug, OID_AUTO, mtx_spin,
164     CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
165     "mtx spin debugging");
166 
167 static struct lock_delay_config __read_frequently mtx_spin_delay;
168 
169 SYSCTL_INT(_debug_mtx_spin, OID_AUTO, delay_base, CTLFLAG_RW,
170     &mtx_spin_delay.base, 0, "");
171 SYSCTL_INT(_debug_mtx_spin, OID_AUTO, delay_max, CTLFLAG_RW,
172     &mtx_spin_delay.max, 0, "");
173 
174 LOCK_DELAY_SYSINIT_DEFAULT(mtx_spin_delay);
175 #else
176 #define mtx_spin_delay	locks_delay
177 #endif
178 
179 /*
180  * System-wide mutexes
181  */
182 struct mtx blocked_lock;
183 struct mtx __exclusive_cache_line Giant;
184 
185 static void _mtx_lock_indefinite_check(struct mtx *, struct lock_delay_arg *);
186 
187 static void
assert_mtx(const struct lock_object * lock,int what)188 assert_mtx(const struct lock_object *lock, int what)
189 {
190 
191 	/*
192 	 * Treat LA_LOCKED as if LA_XLOCKED was asserted.
193 	 *
194 	 * Some callers of lc_assert uses LA_LOCKED to indicate that either
195 	 * a shared lock or write lock was held, while other callers uses
196 	 * the more strict LA_XLOCKED (used as MA_OWNED).
197 	 *
198 	 * Mutex is the only lock class that can not be shared, as a result,
199 	 * we can reasonably consider the caller really intends to assert
200 	 * LA_XLOCKED when they are asserting LA_LOCKED on a mutex object.
201 	 */
202 	if (what & LA_LOCKED) {
203 		what &= ~LA_LOCKED;
204 		what |= LA_XLOCKED;
205 	}
206 	mtx_assert((const struct mtx *)lock, what);
207 }
208 
209 static void
lock_mtx(struct lock_object * lock,uintptr_t how)210 lock_mtx(struct lock_object *lock, uintptr_t how)
211 {
212 
213 	mtx_lock((struct mtx *)lock);
214 }
215 
216 static void
lock_spin(struct lock_object * lock,uintptr_t how)217 lock_spin(struct lock_object *lock, uintptr_t how)
218 {
219 
220 	mtx_lock_spin((struct mtx *)lock);
221 }
222 
223 static int
trylock_mtx(struct lock_object * lock,uintptr_t how)224 trylock_mtx(struct lock_object *lock, uintptr_t how)
225 {
226 
227 	return (mtx_trylock((struct mtx *)lock));
228 }
229 
230 static int
trylock_spin(struct lock_object * lock,uintptr_t how)231 trylock_spin(struct lock_object *lock, uintptr_t how)
232 {
233 
234 	return (mtx_trylock_spin((struct mtx *)lock));
235 }
236 
237 static uintptr_t
unlock_mtx(struct lock_object * lock)238 unlock_mtx(struct lock_object *lock)
239 {
240 	struct mtx *m;
241 
242 	m = (struct mtx *)lock;
243 	mtx_assert(m, MA_OWNED | MA_NOTRECURSED);
244 	mtx_unlock(m);
245 	return (0);
246 }
247 
248 static uintptr_t
unlock_spin(struct lock_object * lock)249 unlock_spin(struct lock_object *lock)
250 {
251 	struct mtx *m;
252 
253 	m = (struct mtx *)lock;
254 	mtx_assert(m, MA_OWNED | MA_NOTRECURSED);
255 	mtx_unlock_spin(m);
256 	return (0);
257 }
258 
259 #ifdef KDTRACE_HOOKS
260 static int
owner_mtx(const struct lock_object * lock,struct thread ** owner)261 owner_mtx(const struct lock_object *lock, struct thread **owner)
262 {
263 	const struct mtx *m;
264 	uintptr_t x;
265 
266 	m = (const struct mtx *)lock;
267 	x = m->mtx_lock;
268 	*owner = (struct thread *)(x & ~MTX_FLAGMASK);
269 	return (*owner != NULL);
270 }
271 #endif
272 
273 /*
274  * Function versions of the inlined __mtx_* macros.  These are used by
275  * modules and can also be called from assembly language if needed.
276  */
277 void
__mtx_lock_flags(volatile uintptr_t * c,int opts,const char * file,int line)278 __mtx_lock_flags(volatile uintptr_t *c, int opts, const char *file, int line)
279 {
280 	struct mtx *m;
281 	uintptr_t tid, v;
282 
283 	m = mtxlock2mtx(c);
284 
285 	KASSERT(kdb_active != 0 || SCHEDULER_STOPPED() ||
286 	    !TD_IS_IDLETHREAD(curthread),
287 	    ("mtx_lock() by idle thread %p on mutex %p @ %s:%d",
288 	    curthread, m, file, line));
289 	KASSERT(m->mtx_lock != MTX_DESTROYED,
290 	    ("mtx_lock() of destroyed mutex %p @ %s:%d", m, file, line));
291 	KASSERT(LOCK_CLASS(&m->lock_object) != &lock_class_mtx_spin,
292 	    ("mtx_lock() of spin mutex %s @ %s:%d", m->lock_object.lo_name,
293 	    file, line));
294 	WITNESS_CHECKORDER(&m->lock_object, (opts & ~MTX_RECURSE) |
295 	    LOP_NEWORDER | LOP_EXCLUSIVE, file, line, NULL);
296 
297 	tid = (uintptr_t)curthread;
298 	v = MTX_UNOWNED;
299 	if (!_mtx_obtain_lock_fetch(m, &v, tid))
300 		_mtx_lock_sleep(m, v, opts, file, line);
301 	else
302 		LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(adaptive__acquire,
303 		    m, 0, 0, file, line);
304 	LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file,
305 	    line);
306 	WITNESS_LOCK(&m->lock_object, (opts & ~MTX_RECURSE) | LOP_EXCLUSIVE,
307 	    file, line);
308 	TD_LOCKS_INC(curthread);
309 }
310 
311 void
__mtx_unlock_flags(volatile uintptr_t * c,int opts,const char * file,int line)312 __mtx_unlock_flags(volatile uintptr_t *c, int opts, const char *file, int line)
313 {
314 	struct mtx *m;
315 
316 	m = mtxlock2mtx(c);
317 
318 	KASSERT(m->mtx_lock != MTX_DESTROYED,
319 	    ("mtx_unlock() of destroyed mutex %p @ %s:%d", m, file, line));
320 	KASSERT(LOCK_CLASS(&m->lock_object) != &lock_class_mtx_spin,
321 	    ("mtx_unlock() of spin mutex %s @ %s:%d", m->lock_object.lo_name,
322 	    file, line));
323 	WITNESS_UNLOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
324 	LOCK_LOG_LOCK("UNLOCK", &m->lock_object, opts, m->mtx_recurse, file,
325 	    line);
326 	mtx_assert(m, MA_OWNED);
327 
328 #ifdef LOCK_PROFILING
329 	__mtx_unlock_sleep(c, (uintptr_t)curthread, opts, file, line);
330 #else
331 	__mtx_unlock(m, curthread, opts, file, line);
332 #endif
333 	TD_LOCKS_DEC(curthread);
334 }
335 
336 void
__mtx_lock_spin_flags(volatile uintptr_t * c,int opts,const char * file,int line)337 __mtx_lock_spin_flags(volatile uintptr_t *c, int opts, const char *file,
338     int line)
339 {
340 	struct mtx *m;
341 #ifdef SMP
342 	uintptr_t tid, v;
343 #endif
344 
345 	m = mtxlock2mtx(c);
346 
347 	KASSERT(m->mtx_lock != MTX_DESTROYED,
348 	    ("mtx_lock_spin() of destroyed mutex %p @ %s:%d", m, file, line));
349 	KASSERT(LOCK_CLASS(&m->lock_object) != &lock_class_mtx_sleep,
350 	    ("mtx_lock_spin() of sleep mutex %s @ %s:%d",
351 	    m->lock_object.lo_name, file, line));
352 	if (mtx_owned(m))
353 		KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0 ||
354 		    (opts & MTX_RECURSE) != 0,
355 	    ("mtx_lock_spin: recursed on non-recursive mutex %s @ %s:%d\n",
356 		    m->lock_object.lo_name, file, line));
357 	opts &= ~MTX_RECURSE;
358 	WITNESS_CHECKORDER(&m->lock_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE,
359 	    file, line, NULL);
360 #ifdef SMP
361 	spinlock_enter();
362 	tid = (uintptr_t)curthread;
363 	v = MTX_UNOWNED;
364 	if (!_mtx_obtain_lock_fetch(m, &v, tid))
365 		_mtx_lock_spin(m, v, opts, file, line);
366 	else
367 		LOCKSTAT_PROFILE_OBTAIN_SPIN_LOCK_SUCCESS(spin__acquire,
368 		    m, 0, 0, file, line);
369 #else
370 	__mtx_lock_spin(m, curthread, opts, file, line);
371 #endif
372 	LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file,
373 	    line);
374 	WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
375 }
376 
377 int
__mtx_trylock_spin_flags(volatile uintptr_t * c,int opts,const char * file,int line)378 __mtx_trylock_spin_flags(volatile uintptr_t *c, int opts, const char *file,
379     int line)
380 {
381 	struct mtx *m;
382 
383 	if (SCHEDULER_STOPPED())
384 		return (1);
385 
386 	m = mtxlock2mtx(c);
387 
388 	KASSERT(m->mtx_lock != MTX_DESTROYED,
389 	    ("mtx_trylock_spin() of destroyed mutex %p @ %s:%d", m, file,
390 	    line));
391 	KASSERT(LOCK_CLASS(&m->lock_object) != &lock_class_mtx_sleep,
392 	    ("mtx_trylock_spin() of sleep mutex %s @ %s:%d",
393 	    m->lock_object.lo_name, file, line));
394 	KASSERT((opts & MTX_RECURSE) == 0,
395 	    ("mtx_trylock_spin: unsupp. opt MTX_RECURSE on mutex %s @ %s:%d\n",
396 	    m->lock_object.lo_name, file, line));
397 	if (__mtx_trylock_spin(m, curthread, opts, file, line)) {
398 		LOCK_LOG_TRY("LOCK", &m->lock_object, opts, 1, file, line);
399 		WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
400 		return (1);
401 	}
402 	LOCK_LOG_TRY("LOCK", &m->lock_object, opts, 0, file, line);
403 	return (0);
404 }
405 
406 void
__mtx_unlock_spin_flags(volatile uintptr_t * c,int opts,const char * file,int line)407 __mtx_unlock_spin_flags(volatile uintptr_t *c, int opts, const char *file,
408     int line)
409 {
410 	struct mtx *m;
411 
412 	m = mtxlock2mtx(c);
413 
414 	KASSERT(m->mtx_lock != MTX_DESTROYED,
415 	    ("mtx_unlock_spin() of destroyed mutex %p @ %s:%d", m, file,
416 	    line));
417 	KASSERT(LOCK_CLASS(&m->lock_object) != &lock_class_mtx_sleep,
418 	    ("mtx_unlock_spin() of sleep mutex %s @ %s:%d",
419 	    m->lock_object.lo_name, file, line));
420 	WITNESS_UNLOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
421 	LOCK_LOG_LOCK("UNLOCK", &m->lock_object, opts, m->mtx_recurse, file,
422 	    line);
423 	mtx_assert(m, MA_OWNED);
424 
425 	__mtx_unlock_spin(m);
426 }
427 
428 /*
429  * The important part of mtx_trylock{,_flags}()
430  * Tries to acquire lock `m.'  If this function is called on a mutex that
431  * is already owned, it will recursively acquire the lock.
432  */
433 int
_mtx_trylock_flags_int(struct mtx * m,int opts LOCK_FILE_LINE_ARG_DEF)434 _mtx_trylock_flags_int(struct mtx *m, int opts LOCK_FILE_LINE_ARG_DEF)
435 {
436 	struct thread *td;
437 	uintptr_t tid, v;
438 #ifdef LOCK_PROFILING
439 	uint64_t waittime = 0;
440 	int contested = 0;
441 #endif
442 	int rval;
443 	bool recursed;
444 
445 	td = curthread;
446 	tid = (uintptr_t)td;
447 	if (SCHEDULER_STOPPED())
448 		return (1);
449 
450 	KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(td),
451 	    ("mtx_trylock() by idle thread %p on mutex %p @ %s:%d",
452 	    curthread, m, file, line));
453 	KASSERT(m->mtx_lock != MTX_DESTROYED,
454 	    ("mtx_trylock() of destroyed mutex %p @ %s:%d", m, file, line));
455 	KASSERT(LOCK_CLASS(&m->lock_object) != &lock_class_mtx_spin,
456 	    ("mtx_trylock() of spin mutex %s @ %s:%d", m->lock_object.lo_name,
457 	    file, line));
458 
459 	rval = 1;
460 	recursed = false;
461 	v = MTX_UNOWNED;
462 	for (;;) {
463 		if (_mtx_obtain_lock_fetch(m, &v, tid))
464 			break;
465 		if (v == MTX_UNOWNED)
466 			continue;
467 		if (v == tid &&
468 		    ((m->lock_object.lo_flags & LO_RECURSABLE) != 0 ||
469 		    (opts & MTX_RECURSE) != 0)) {
470 			m->mtx_recurse++;
471 			atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
472 			recursed = true;
473 			break;
474 		}
475 		rval = 0;
476 		break;
477 	}
478 
479 	opts &= ~MTX_RECURSE;
480 
481 	LOCK_LOG_TRY("LOCK", &m->lock_object, opts, rval, file, line);
482 	if (rval) {
483 		WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE | LOP_TRYLOCK,
484 		    file, line);
485 		TD_LOCKS_INC(curthread);
486 		if (!recursed)
487 			LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(adaptive__acquire,
488 			    m, contested, waittime, file, line);
489 	}
490 
491 	return (rval);
492 }
493 
494 int
_mtx_trylock_flags_(volatile uintptr_t * c,int opts,const char * file,int line)495 _mtx_trylock_flags_(volatile uintptr_t *c, int opts, const char *file, int line)
496 {
497 	struct mtx *m;
498 
499 	m = mtxlock2mtx(c);
500 	return (_mtx_trylock_flags_int(m, opts LOCK_FILE_LINE_ARG));
501 }
502 
503 /*
504  * __mtx_lock_sleep: the tougher part of acquiring an MTX_DEF lock.
505  *
506  * We call this if the lock is either contested (i.e. we need to go to
507  * sleep waiting for it), or if we need to recurse on it.
508  */
509 #if LOCK_DEBUG > 0
510 void
__mtx_lock_sleep(volatile uintptr_t * c,uintptr_t v,int opts,const char * file,int line)511 __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t v, int opts, const char *file,
512     int line)
513 #else
514 void
515 __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t v)
516 #endif
517 {
518 	struct thread *td;
519 	struct mtx *m;
520 	struct turnstile *ts;
521 	uintptr_t tid;
522 	struct thread *owner;
523 #ifdef LOCK_PROFILING
524 	int contested = 0;
525 	uint64_t waittime = 0;
526 #endif
527 #if defined(ADAPTIVE_MUTEXES) || defined(KDTRACE_HOOKS)
528 	struct lock_delay_arg lda;
529 #endif
530 #ifdef KDTRACE_HOOKS
531 	u_int sleep_cnt = 0;
532 	int64_t sleep_time = 0;
533 	int64_t all_time = 0;
534 #endif
535 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
536 	int doing_lockprof = 0;
537 #endif
538 
539 	td = curthread;
540 	tid = (uintptr_t)td;
541 	m = mtxlock2mtx(c);
542 
543 #ifdef KDTRACE_HOOKS
544 	if (LOCKSTAT_PROFILE_ENABLED(adaptive__acquire)) {
545 		while (v == MTX_UNOWNED) {
546 			if (_mtx_obtain_lock_fetch(m, &v, tid))
547 				goto out_lockstat;
548 		}
549 		doing_lockprof = 1;
550 		all_time -= lockstat_nsecs(&m->lock_object);
551 	}
552 #endif
553 #ifdef LOCK_PROFILING
554 	doing_lockprof = 1;
555 #endif
556 
557 	if (SCHEDULER_STOPPED())
558 		return;
559 
560 	if (__predict_false(v == MTX_UNOWNED))
561 		v = MTX_READ_VALUE(m);
562 
563 	if (__predict_false(lv_mtx_owner(v) == td)) {
564 		KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0 ||
565 		    (opts & MTX_RECURSE) != 0,
566 	    ("_mtx_lock_sleep: recursed on non-recursive mutex %s @ %s:%d\n",
567 		    m->lock_object.lo_name, file, line));
568 #if LOCK_DEBUG > 0
569 		opts &= ~MTX_RECURSE;
570 #endif
571 		m->mtx_recurse++;
572 		atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
573 		if (LOCK_LOG_TEST(&m->lock_object, opts))
574 			CTR1(KTR_LOCK, "_mtx_lock_sleep: %p recursing", m);
575 		return;
576 	}
577 #if LOCK_DEBUG > 0
578 	opts &= ~MTX_RECURSE;
579 #endif
580 
581 #if defined(ADAPTIVE_MUTEXES)
582 	lock_delay_arg_init(&lda, &mtx_delay);
583 #elif defined(KDTRACE_HOOKS)
584 	lock_delay_arg_init_noadapt(&lda);
585 #endif
586 
587 #ifdef HWPMC_HOOKS
588 	PMC_SOFT_CALL( , , lock, failed);
589 #endif
590 	lock_profile_obtain_lock_failed(&m->lock_object, false,
591 		    &contested, &waittime);
592 	if (LOCK_LOG_TEST(&m->lock_object, opts))
593 		CTR4(KTR_LOCK,
594 		    "_mtx_lock_sleep: %s contested (lock=%p) at %s:%d",
595 		    m->lock_object.lo_name, (void *)m->mtx_lock, file, line);
596 
597 	THREAD_CONTENDS_ON_LOCK(&m->lock_object);
598 
599 	for (;;) {
600 		if (v == MTX_UNOWNED) {
601 			if (_mtx_obtain_lock_fetch(m, &v, tid))
602 				break;
603 			continue;
604 		}
605 #ifdef KDTRACE_HOOKS
606 		lda.spin_cnt++;
607 #endif
608 #ifdef ADAPTIVE_MUTEXES
609 		/*
610 		 * If the owner is running on another CPU, spin until the
611 		 * owner stops running or the state of the lock changes.
612 		 */
613 		owner = lv_mtx_owner(v);
614 		if (TD_IS_RUNNING(owner)) {
615 			if (LOCK_LOG_TEST(&m->lock_object, 0))
616 				CTR3(KTR_LOCK,
617 				    "%s: spinning on %p held by %p",
618 				    __func__, m, owner);
619 			KTR_STATE1(KTR_SCHED, "thread",
620 			    sched_tdname((struct thread *)tid),
621 			    "spinning", "lockname:\"%s\"",
622 			    m->lock_object.lo_name);
623 			do {
624 				lock_delay(&lda);
625 				v = MTX_READ_VALUE(m);
626 				owner = lv_mtx_owner(v);
627 			} while (v != MTX_UNOWNED && TD_IS_RUNNING(owner));
628 			KTR_STATE0(KTR_SCHED, "thread",
629 			    sched_tdname((struct thread *)tid),
630 			    "running");
631 			continue;
632 		}
633 #endif
634 
635 		ts = turnstile_trywait(&m->lock_object);
636 		v = MTX_READ_VALUE(m);
637 retry_turnstile:
638 
639 		/*
640 		 * Check if the lock has been released while spinning for
641 		 * the turnstile chain lock.
642 		 */
643 		if (v == MTX_UNOWNED) {
644 			turnstile_cancel(ts);
645 			continue;
646 		}
647 
648 #ifdef ADAPTIVE_MUTEXES
649 		/*
650 		 * The current lock owner might have started executing
651 		 * on another CPU (or the lock could have changed
652 		 * owners) while we were waiting on the turnstile
653 		 * chain lock.  If so, drop the turnstile lock and try
654 		 * again.
655 		 */
656 		owner = lv_mtx_owner(v);
657 		if (TD_IS_RUNNING(owner)) {
658 			turnstile_cancel(ts);
659 			continue;
660 		}
661 #endif
662 
663 		/*
664 		 * If the mutex isn't already contested and a failure occurs
665 		 * setting the contested bit, the mutex was either released
666 		 * or the state of the MTX_RECURSED bit changed.
667 		 */
668 		if ((v & MTX_CONTESTED) == 0 &&
669 		    !atomic_fcmpset_ptr(&m->mtx_lock, &v, v | MTX_CONTESTED)) {
670 			goto retry_turnstile;
671 		}
672 
673 		/*
674 		 * We definitely must sleep for this lock.
675 		 */
676 		mtx_assert(m, MA_NOTOWNED);
677 
678 		/*
679 		 * Block on the turnstile.
680 		 */
681 #ifdef KDTRACE_HOOKS
682 		sleep_time -= lockstat_nsecs(&m->lock_object);
683 #endif
684 #ifndef ADAPTIVE_MUTEXES
685 		owner = mtx_owner(m);
686 #endif
687 		MPASS(owner == mtx_owner(m));
688 		turnstile_wait(ts, owner, TS_EXCLUSIVE_QUEUE);
689 #ifdef KDTRACE_HOOKS
690 		sleep_time += lockstat_nsecs(&m->lock_object);
691 		sleep_cnt++;
692 #endif
693 		v = MTX_READ_VALUE(m);
694 	}
695 	THREAD_CONTENTION_DONE(&m->lock_object);
696 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
697 	if (__predict_true(!doing_lockprof))
698 		return;
699 #endif
700 #ifdef KDTRACE_HOOKS
701 	all_time += lockstat_nsecs(&m->lock_object);
702 	if (sleep_time)
703 		LOCKSTAT_RECORD1(adaptive__block, m, sleep_time);
704 
705 	/*
706 	 * Only record the loops spinning and not sleeping.
707 	 */
708 	if (lda.spin_cnt > sleep_cnt)
709 		LOCKSTAT_RECORD1(adaptive__spin, m, all_time - sleep_time);
710 out_lockstat:
711 #endif
712 	LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(adaptive__acquire, m, contested,
713 	    waittime, file, line);
714 }
715 
716 #ifdef SMP
717 /*
718  * _mtx_lock_spin_cookie: the tougher part of acquiring an MTX_SPIN lock.
719  *
720  * This is only called if we need to actually spin for the lock. Recursion
721  * is handled inline.
722  */
723 #if LOCK_DEBUG > 0
724 void
_mtx_lock_spin_cookie(volatile uintptr_t * c,uintptr_t v,int opts,const char * file,int line)725 _mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t v, int opts,
726     const char *file, int line)
727 #else
728 void
729 _mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t v)
730 #endif
731 {
732 	struct mtx *m;
733 	struct lock_delay_arg lda;
734 	uintptr_t tid;
735 #ifdef LOCK_PROFILING
736 	int contested = 0;
737 	uint64_t waittime = 0;
738 #endif
739 #ifdef KDTRACE_HOOKS
740 	int64_t spin_time = 0;
741 #endif
742 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
743 	int doing_lockprof = 0;
744 #endif
745 
746 	tid = (uintptr_t)curthread;
747 	m = mtxlock2mtx(c);
748 
749 #ifdef KDTRACE_HOOKS
750 	if (LOCKSTAT_PROFILE_ENABLED(adaptive__acquire)) {
751 		while (v == MTX_UNOWNED) {
752 			if (_mtx_obtain_lock_fetch(m, &v, tid))
753 				goto out_lockstat;
754 		}
755 		doing_lockprof = 1;
756 		spin_time -= lockstat_nsecs(&m->lock_object);
757 	}
758 #endif
759 #ifdef LOCK_PROFILING
760 	doing_lockprof = 1;
761 #endif
762 
763 	if (__predict_false(v == MTX_UNOWNED))
764 		v = MTX_READ_VALUE(m);
765 
766 	if (__predict_false(v == tid)) {
767 		m->mtx_recurse++;
768 		return;
769 	}
770 
771 	if (SCHEDULER_STOPPED())
772 		return;
773 
774 	if (LOCK_LOG_TEST(&m->lock_object, opts))
775 		CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m);
776 	KTR_STATE1(KTR_SCHED, "thread", sched_tdname((struct thread *)tid),
777 	    "spinning", "lockname:\"%s\"", m->lock_object.lo_name);
778 
779 	lock_delay_arg_init(&lda, &mtx_spin_delay);
780 
781 #ifdef HWPMC_HOOKS
782 	PMC_SOFT_CALL( , , lock, failed);
783 #endif
784 	lock_profile_obtain_lock_failed(&m->lock_object, true, &contested, &waittime);
785 
786 	for (;;) {
787 		if (v == MTX_UNOWNED) {
788 			if (_mtx_obtain_lock_fetch(m, &v, tid))
789 				break;
790 			continue;
791 		}
792 		/* Give interrupts a chance while we spin. */
793 		spinlock_exit();
794 		do {
795 			if (__predict_true(lda.spin_cnt < 10000000)) {
796 				lock_delay(&lda);
797 			} else {
798 				_mtx_lock_indefinite_check(m, &lda);
799 			}
800 			v = MTX_READ_VALUE(m);
801 		} while (v != MTX_UNOWNED);
802 		spinlock_enter();
803 	}
804 
805 	if (LOCK_LOG_TEST(&m->lock_object, opts))
806 		CTR1(KTR_LOCK, "_mtx_lock_spin: %p spin done", m);
807 	KTR_STATE0(KTR_SCHED, "thread", sched_tdname((struct thread *)tid),
808 	    "running");
809 
810 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
811 	if (__predict_true(!doing_lockprof))
812 		return;
813 #endif
814 #ifdef KDTRACE_HOOKS
815 	spin_time += lockstat_nsecs(&m->lock_object);
816 	if (lda.spin_cnt != 0)
817 		LOCKSTAT_RECORD1(spin__spin, m, spin_time);
818 out_lockstat:
819 #endif
820 	LOCKSTAT_PROFILE_OBTAIN_SPIN_LOCK_SUCCESS(spin__acquire, m,
821 	    contested, waittime, file, line);
822 }
823 #endif /* SMP */
824 
825 #ifdef INVARIANTS
826 static void
thread_lock_validate(struct mtx * m,int opts,const char * file,int line)827 thread_lock_validate(struct mtx *m, int opts, const char *file, int line)
828 {
829 
830 	KASSERT(m->mtx_lock != MTX_DESTROYED,
831 	    ("thread_lock() of destroyed mutex %p @ %s:%d", m, file, line));
832 	KASSERT(LOCK_CLASS(&m->lock_object) != &lock_class_mtx_sleep,
833 	    ("thread_lock() of sleep mutex %s @ %s:%d",
834 	    m->lock_object.lo_name, file, line));
835 	KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) == 0,
836 	    ("thread_lock: got a recursive mutex %s @ %s:%d\n",
837 	    m->lock_object.lo_name, file, line));
838 	WITNESS_CHECKORDER(&m->lock_object,
839 	    opts | LOP_NEWORDER | LOP_EXCLUSIVE, file, line, NULL);
840 }
841 #else
842 #define thread_lock_validate(m, opts, file, line) do { } while (0)
843 #endif
844 
845 #ifndef LOCK_PROFILING
846 #if LOCK_DEBUG > 0
847 void
_thread_lock(struct thread * td,int opts,const char * file,int line)848 _thread_lock(struct thread *td, int opts, const char *file, int line)
849 #else
850 void
851 _thread_lock(struct thread *td)
852 #endif
853 {
854 	struct mtx *m;
855 	uintptr_t tid;
856 
857 	tid = (uintptr_t)curthread;
858 
859 	if (__predict_false(LOCKSTAT_PROFILE_ENABLED(spin__acquire)))
860 		goto slowpath_noirq;
861 	spinlock_enter();
862 	m = td->td_lock;
863 	thread_lock_validate(m, 0, file, line);
864 	if (__predict_false(m == &blocked_lock))
865 		goto slowpath_unlocked;
866 	if (__predict_false(!_mtx_obtain_lock(m, tid)))
867 		goto slowpath_unlocked;
868 	if (__predict_true(m == td->td_lock)) {
869 		WITNESS_LOCK(&m->lock_object, LOP_EXCLUSIVE, file, line);
870 		return;
871 	}
872 	_mtx_release_lock_quick(m);
873 slowpath_unlocked:
874 	spinlock_exit();
875 slowpath_noirq:
876 #if LOCK_DEBUG > 0
877 	thread_lock_flags_(td, opts, file, line);
878 #else
879 	thread_lock_flags_(td, 0, 0, 0);
880 #endif
881 }
882 #endif
883 
884 void
thread_lock_flags_(struct thread * td,int opts,const char * file,int line)885 thread_lock_flags_(struct thread *td, int opts, const char *file, int line)
886 {
887 	struct mtx *m;
888 	uintptr_t tid, v;
889 	struct lock_delay_arg lda;
890 #ifdef LOCK_PROFILING
891 	int contested = 0;
892 	uint64_t waittime = 0;
893 #endif
894 #ifdef KDTRACE_HOOKS
895 	int64_t spin_time = 0;
896 #endif
897 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
898 	int doing_lockprof = 1;
899 #endif
900 
901 	tid = (uintptr_t)curthread;
902 
903 	if (SCHEDULER_STOPPED()) {
904 		/*
905 		 * Ensure that spinlock sections are balanced even when the
906 		 * scheduler is stopped, since we may otherwise inadvertently
907 		 * re-enable interrupts while dumping core.
908 		 */
909 		spinlock_enter();
910 		return;
911 	}
912 
913 	lock_delay_arg_init(&lda, &mtx_spin_delay);
914 
915 #ifdef HWPMC_HOOKS
916 	PMC_SOFT_CALL( , , lock, failed);
917 #endif
918 
919 #ifdef LOCK_PROFILING
920 	doing_lockprof = 1;
921 #elif defined(KDTRACE_HOOKS)
922 	doing_lockprof = lockstat_enabled;
923 #endif
924 #ifdef KDTRACE_HOOKS
925 	if (__predict_false(doing_lockprof))
926 		spin_time -= lockstat_nsecs(&td->td_lock->lock_object);
927 #endif
928 	spinlock_enter();
929 
930 	for (;;) {
931 retry:
932 		m = td->td_lock;
933 		thread_lock_validate(m, opts, file, line);
934 		v = MTX_READ_VALUE(m);
935 		for (;;) {
936 			if (v == MTX_UNOWNED) {
937 				if (_mtx_obtain_lock_fetch(m, &v, tid))
938 					break;
939 				continue;
940 			}
941 			MPASS(v != tid);
942 			lock_profile_obtain_lock_failed(&m->lock_object, true,
943 			    &contested, &waittime);
944 			/* Give interrupts a chance while we spin. */
945 			spinlock_exit();
946 			do {
947 				if (__predict_true(lda.spin_cnt < 10000000)) {
948 					lock_delay(&lda);
949 				} else {
950 					_mtx_lock_indefinite_check(m, &lda);
951 				}
952 				if (m != td->td_lock) {
953 					spinlock_enter();
954 					goto retry;
955 				}
956 				v = MTX_READ_VALUE(m);
957 			} while (v != MTX_UNOWNED);
958 			spinlock_enter();
959 		}
960 		if (m == td->td_lock)
961 			break;
962 		_mtx_release_lock_quick(m);
963 	}
964 	LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file,
965 	    line);
966 	WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
967 
968 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
969 	if (__predict_true(!doing_lockprof))
970 		return;
971 #endif
972 #ifdef KDTRACE_HOOKS
973 	spin_time += lockstat_nsecs(&m->lock_object);
974 #endif
975 	LOCKSTAT_PROFILE_OBTAIN_SPIN_LOCK_SUCCESS(spin__acquire, m, contested,
976 	    waittime, file, line);
977 #ifdef KDTRACE_HOOKS
978 	if (lda.spin_cnt != 0)
979 		LOCKSTAT_RECORD1(thread__spin, m, spin_time);
980 #endif
981 }
982 
983 struct mtx *
thread_lock_block(struct thread * td)984 thread_lock_block(struct thread *td)
985 {
986 	struct mtx *lock;
987 
988 	lock = td->td_lock;
989 	mtx_assert(lock, MA_OWNED);
990 	td->td_lock = &blocked_lock;
991 
992 	return (lock);
993 }
994 
995 void
thread_lock_unblock(struct thread * td,struct mtx * new)996 thread_lock_unblock(struct thread *td, struct mtx *new)
997 {
998 
999 	mtx_assert(new, MA_OWNED);
1000 	KASSERT(td->td_lock == &blocked_lock,
1001 	    ("thread %p lock %p not blocked_lock %p",
1002 	    td, td->td_lock, &blocked_lock));
1003 	atomic_store_rel_ptr((volatile void *)&td->td_lock, (uintptr_t)new);
1004 }
1005 
1006 void
thread_lock_block_wait(struct thread * td)1007 thread_lock_block_wait(struct thread *td)
1008 {
1009 
1010 	while (td->td_lock == &blocked_lock)
1011 		cpu_spinwait();
1012 
1013 	/* Acquire fence to be certain that all thread state is visible. */
1014 	atomic_thread_fence_acq();
1015 }
1016 
1017 void
thread_lock_set(struct thread * td,struct mtx * new)1018 thread_lock_set(struct thread *td, struct mtx *new)
1019 {
1020 	struct mtx *lock;
1021 
1022 	mtx_assert(new, MA_OWNED);
1023 	lock = td->td_lock;
1024 	mtx_assert(lock, MA_OWNED);
1025 	td->td_lock = new;
1026 	mtx_unlock_spin(lock);
1027 }
1028 
1029 /*
1030  * __mtx_unlock_sleep: the tougher part of releasing an MTX_DEF lock.
1031  *
1032  * We are only called here if the lock is recursed, contested (i.e. we
1033  * need to wake up a blocked thread) or lockstat probe is active.
1034  */
1035 #if LOCK_DEBUG > 0
1036 void
__mtx_unlock_sleep(volatile uintptr_t * c,uintptr_t v,int opts,const char * file,int line)1037 __mtx_unlock_sleep(volatile uintptr_t *c, uintptr_t v, int opts,
1038     const char *file, int line)
1039 #else
1040 void
1041 __mtx_unlock_sleep(volatile uintptr_t *c, uintptr_t v)
1042 #endif
1043 {
1044 	struct mtx *m;
1045 	struct turnstile *ts;
1046 	uintptr_t tid;
1047 
1048 	if (SCHEDULER_STOPPED())
1049 		return;
1050 
1051 	tid = (uintptr_t)curthread;
1052 	m = mtxlock2mtx(c);
1053 
1054 	if (__predict_false(v == tid))
1055 		v = MTX_READ_VALUE(m);
1056 
1057 	if (__predict_false(v & MTX_RECURSED)) {
1058 		if (--(m->mtx_recurse) == 0)
1059 			atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED);
1060 		if (LOCK_LOG_TEST(&m->lock_object, opts))
1061 			CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p unrecurse", m);
1062 		return;
1063 	}
1064 
1065 	LOCKSTAT_PROFILE_RELEASE_LOCK(adaptive__release, m);
1066 	if (v == tid && _mtx_release_lock(m, tid))
1067 		return;
1068 
1069 	/*
1070 	 * We have to lock the chain before the turnstile so this turnstile
1071 	 * can be removed from the hash list if it is empty.
1072 	 */
1073 	turnstile_chain_lock(&m->lock_object);
1074 	_mtx_release_lock_quick(m);
1075 	ts = turnstile_lookup(&m->lock_object);
1076 	if (__predict_false(ts == NULL)) {
1077 		panic("got NULL turnstile on mutex %p v %p", m, (void *)v);
1078 	}
1079 	if (LOCK_LOG_TEST(&m->lock_object, opts))
1080 		CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p contested", m);
1081 	turnstile_broadcast(ts, TS_EXCLUSIVE_QUEUE);
1082 
1083 	/*
1084 	 * This turnstile is now no longer associated with the mutex.  We can
1085 	 * unlock the chain lock so a new turnstile may take it's place.
1086 	 */
1087 	turnstile_unpend(ts);
1088 	turnstile_chain_unlock(&m->lock_object);
1089 }
1090 
1091 /*
1092  * All the unlocking of MTX_SPIN locks is done inline.
1093  * See the __mtx_unlock_spin() macro for the details.
1094  */
1095 
1096 /*
1097  * The backing function for the INVARIANTS-enabled mtx_assert()
1098  */
1099 #ifdef INVARIANT_SUPPORT
1100 void
__mtx_assert(const volatile uintptr_t * c,int what,const char * file,int line)1101 __mtx_assert(const volatile uintptr_t *c, int what, const char *file, int line)
1102 {
1103 	const struct mtx *m;
1104 
1105 	if (KERNEL_PANICKED() || dumping || SCHEDULER_STOPPED())
1106 		return;
1107 
1108 	m = mtxlock2mtx(c);
1109 
1110 	switch (what) {
1111 	case MA_OWNED:
1112 	case MA_OWNED | MA_RECURSED:
1113 	case MA_OWNED | MA_NOTRECURSED:
1114 		if (!mtx_owned(m))
1115 			panic("mutex %s not owned at %s:%d",
1116 			    m->lock_object.lo_name, file, line);
1117 		if (mtx_recursed(m)) {
1118 			if ((what & MA_NOTRECURSED) != 0)
1119 				panic("mutex %s recursed at %s:%d",
1120 				    m->lock_object.lo_name, file, line);
1121 		} else if ((what & MA_RECURSED) != 0) {
1122 			panic("mutex %s unrecursed at %s:%d",
1123 			    m->lock_object.lo_name, file, line);
1124 		}
1125 		break;
1126 	case MA_NOTOWNED:
1127 		if (mtx_owned(m))
1128 			panic("mutex %s owned at %s:%d",
1129 			    m->lock_object.lo_name, file, line);
1130 		break;
1131 	default:
1132 		panic("unknown mtx_assert at %s:%d", file, line);
1133 	}
1134 }
1135 #endif
1136 
1137 /*
1138  * General init routine used by the MTX_SYSINIT() macro.
1139  */
1140 void
mtx_sysinit(void * arg)1141 mtx_sysinit(void *arg)
1142 {
1143 	struct mtx_args *margs = arg;
1144 
1145 	mtx_init((struct mtx *)margs->ma_mtx, margs->ma_desc, NULL,
1146 	    margs->ma_opts);
1147 }
1148 
1149 /*
1150  * Mutex initialization routine; initialize lock `m' of type contained in
1151  * `opts' with options contained in `opts' and name `name.'  The optional
1152  * lock type `type' is used as a general lock category name for use with
1153  * witness.
1154  */
1155 void
_mtx_init(volatile uintptr_t * c,const char * name,const char * type,int opts)1156 _mtx_init(volatile uintptr_t *c, const char *name, const char *type, int opts)
1157 {
1158 	struct mtx *m;
1159 	struct lock_class *class;
1160 	int flags;
1161 
1162 	m = mtxlock2mtx(c);
1163 
1164 	MPASS((opts & ~(MTX_SPIN | MTX_QUIET | MTX_RECURSE |
1165 	    MTX_NOWITNESS | MTX_DUPOK | MTX_NOPROFILE | MTX_NEW)) == 0);
1166 	ASSERT_ATOMIC_LOAD_PTR(m->mtx_lock,
1167 	    ("%s: mtx_lock not aligned for %s: %p", __func__, name,
1168 	    &m->mtx_lock));
1169 
1170 	/* Determine lock class and lock flags. */
1171 	if (opts & MTX_SPIN)
1172 		class = &lock_class_mtx_spin;
1173 	else
1174 		class = &lock_class_mtx_sleep;
1175 	flags = 0;
1176 	if (opts & MTX_QUIET)
1177 		flags |= LO_QUIET;
1178 	if (opts & MTX_RECURSE)
1179 		flags |= LO_RECURSABLE;
1180 	if ((opts & MTX_NOWITNESS) == 0)
1181 		flags |= LO_WITNESS;
1182 	if (opts & MTX_DUPOK)
1183 		flags |= LO_DUPOK;
1184 	if (opts & MTX_NOPROFILE)
1185 		flags |= LO_NOPROFILE;
1186 	if (opts & MTX_NEW)
1187 		flags |= LO_NEW;
1188 
1189 	/* Initialize mutex. */
1190 	lock_init(&m->lock_object, class, name, type, flags);
1191 
1192 	m->mtx_lock = MTX_UNOWNED;
1193 	m->mtx_recurse = 0;
1194 }
1195 
1196 /*
1197  * Remove lock `m' from all_mtx queue.  We don't allow MTX_QUIET to be
1198  * passed in as a flag here because if the corresponding mtx_init() was
1199  * called with MTX_QUIET set, then it will already be set in the mutex's
1200  * flags.
1201  */
1202 void
_mtx_destroy(volatile uintptr_t * c)1203 _mtx_destroy(volatile uintptr_t *c)
1204 {
1205 	struct mtx *m;
1206 
1207 	m = mtxlock2mtx(c);
1208 
1209 	if (!mtx_owned(m))
1210 		MPASS(mtx_unowned(m));
1211 	else {
1212 		MPASS((m->mtx_lock & (MTX_RECURSED|MTX_CONTESTED)) == 0);
1213 
1214 		/* Perform the non-mtx related part of mtx_unlock_spin(). */
1215 		if (LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin) {
1216 			lock_profile_release_lock(&m->lock_object, true);
1217 			spinlock_exit();
1218 		} else {
1219 			TD_LOCKS_DEC(curthread);
1220 			lock_profile_release_lock(&m->lock_object, false);
1221 		}
1222 
1223 		/* Tell witness this isn't locked to make it happy. */
1224 		WITNESS_UNLOCK(&m->lock_object, LOP_EXCLUSIVE, __FILE__,
1225 		    __LINE__);
1226 	}
1227 
1228 	m->mtx_lock = MTX_DESTROYED;
1229 	lock_destroy(&m->lock_object);
1230 }
1231 
1232 /*
1233  * Intialize the mutex code and system mutexes.  This is called from the MD
1234  * startup code prior to mi_startup().  The per-CPU data space needs to be
1235  * setup before this is called.
1236  */
1237 void
mutex_init(void)1238 mutex_init(void)
1239 {
1240 
1241 	/* Setup turnstiles so that sleep mutexes work. */
1242 	init_turnstiles();
1243 
1244 	/*
1245 	 * Initialize mutexes.
1246 	 */
1247 	mtx_init(&Giant, "Giant", NULL, MTX_DEF | MTX_RECURSE);
1248 	mtx_init(&blocked_lock, "blocked lock", NULL, MTX_SPIN);
1249 	blocked_lock.mtx_lock = 0xdeadc0de;	/* Always blocked. */
1250 	mtx_init(&proc0.p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK);
1251 	mtx_init(&proc0.p_slock, "process slock", NULL, MTX_SPIN);
1252 	mtx_init(&proc0.p_statmtx, "pstatl", NULL, MTX_SPIN);
1253 	mtx_init(&proc0.p_itimmtx, "pitiml", NULL, MTX_SPIN);
1254 	mtx_init(&proc0.p_profmtx, "pprofl", NULL, MTX_SPIN);
1255 	mtx_init(&devmtx, "cdev", NULL, MTX_DEF);
1256 	mtx_lock(&Giant);
1257 }
1258 
1259 static void __noinline
_mtx_lock_indefinite_check(struct mtx * m,struct lock_delay_arg * ldap)1260 _mtx_lock_indefinite_check(struct mtx *m, struct lock_delay_arg *ldap)
1261 {
1262 	struct thread *td;
1263 
1264 	ldap->spin_cnt++;
1265 	if (ldap->spin_cnt < 60000000 || kdb_active || KERNEL_PANICKED())
1266 		cpu_lock_delay();
1267 	else {
1268 		td = mtx_owner(m);
1269 
1270 		/* If the mutex is unlocked, try again. */
1271 		if (td == NULL)
1272 			return;
1273 
1274 		printf( "spin lock %p (%s) held by %p (tid %d) too long\n",
1275 		    m, m->lock_object.lo_name, td, td->td_tid);
1276 #ifdef WITNESS
1277 		witness_display_spinlock(&m->lock_object, td, printf);
1278 #endif
1279 		panic("spin lock held too long");
1280 	}
1281 	cpu_spinwait();
1282 }
1283 
1284 void
mtx_spin_wait_unlocked(struct mtx * m)1285 mtx_spin_wait_unlocked(struct mtx *m)
1286 {
1287 	struct lock_delay_arg lda;
1288 
1289 	KASSERT(m->mtx_lock != MTX_DESTROYED,
1290 	    ("%s() of destroyed mutex %p", __func__, m));
1291 	KASSERT(LOCK_CLASS(&m->lock_object) != &lock_class_mtx_sleep,
1292 	    ("%s() of sleep mutex %p (%s)", __func__, m,
1293 	    m->lock_object.lo_name));
1294 	KASSERT(!mtx_owned(m), ("%s() waiting on myself on lock %p (%s)", __func__, m,
1295 	    m->lock_object.lo_name));
1296 
1297 	lda.spin_cnt = 0;
1298 
1299 	while (atomic_load_acq_ptr(&m->mtx_lock) != MTX_UNOWNED) {
1300 		if (__predict_true(lda.spin_cnt < 10000000)) {
1301 			cpu_spinwait();
1302 			lda.spin_cnt++;
1303 		} else {
1304 			_mtx_lock_indefinite_check(m, &lda);
1305 		}
1306 	}
1307 }
1308 
1309 void
mtx_wait_unlocked(struct mtx * m)1310 mtx_wait_unlocked(struct mtx *m)
1311 {
1312 	struct thread *owner;
1313 	uintptr_t v;
1314 
1315 	KASSERT(m->mtx_lock != MTX_DESTROYED,
1316 	    ("%s() of destroyed mutex %p", __func__, m));
1317 	KASSERT(LOCK_CLASS(&m->lock_object) != &lock_class_mtx_spin,
1318 	    ("%s() of spin mutex %p (%s)", __func__, m,
1319 	    m->lock_object.lo_name));
1320 	KASSERT(!mtx_owned(m), ("%s() waiting on myself on lock %p (%s)", __func__, m,
1321 	    m->lock_object.lo_name));
1322 
1323 	for (;;) {
1324 		v = atomic_load_acq_ptr(&m->mtx_lock);
1325 		if (v == MTX_UNOWNED) {
1326 			break;
1327 		}
1328 		owner = lv_mtx_owner(v);
1329 		if (!TD_IS_RUNNING(owner)) {
1330 			mtx_lock(m);
1331 			mtx_unlock(m);
1332 			break;
1333 		}
1334 		cpu_spinwait();
1335 	}
1336 }
1337 
1338 #ifdef DDB
1339 static void
db_show_mtx(const struct lock_object * lock)1340 db_show_mtx(const struct lock_object *lock)
1341 {
1342 	struct thread *td;
1343 	const struct mtx *m;
1344 
1345 	m = (const struct mtx *)lock;
1346 
1347 	db_printf(" flags: {");
1348 	if (LOCK_CLASS(lock) == &lock_class_mtx_spin)
1349 		db_printf("SPIN");
1350 	else
1351 		db_printf("DEF");
1352 	if (m->lock_object.lo_flags & LO_RECURSABLE)
1353 		db_printf(", RECURSE");
1354 	if (m->lock_object.lo_flags & LO_DUPOK)
1355 		db_printf(", DUPOK");
1356 	db_printf("}\n");
1357 	db_printf(" state: {");
1358 	if (mtx_unowned(m))
1359 		db_printf("UNOWNED");
1360 	else if (mtx_destroyed(m))
1361 		db_printf("DESTROYED");
1362 	else {
1363 		db_printf("OWNED");
1364 		if (m->mtx_lock & MTX_CONTESTED)
1365 			db_printf(", CONTESTED");
1366 		if (m->mtx_lock & MTX_RECURSED)
1367 			db_printf(", RECURSED");
1368 	}
1369 	db_printf("}\n");
1370 	if (!mtx_unowned(m) && !mtx_destroyed(m)) {
1371 		td = mtx_owner(m);
1372 		db_printf(" owner: %p (tid %d, pid %d, \"%s\")\n", td,
1373 		    td->td_tid, td->td_proc->p_pid, td->td_name);
1374 		if (mtx_recursed(m))
1375 			db_printf(" recursed: %d\n", m->mtx_recurse);
1376 	}
1377 }
1378 #endif
1379