xref: /freebsd/sys/kern/kern_mutex.c (revision f5f47d5068fb97df18eb114a66ae8ef51a0b3c8c)
1 /*-
2  * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  * 3. Berkeley Software Design Inc's name may not be used to endorse or
13  *    promote products derived from this software without specific prior
14  *    written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  *	from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $
29  *	and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $
30  */
31 
32 /*
33  * Machine independent bits of mutex implementation.
34  */
35 
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
38 
39 #include "opt_adaptive_mutexes.h"
40 #include "opt_ddb.h"
41 #include "opt_global.h"
42 #include "opt_hwpmc_hooks.h"
43 #include "opt_kdtrace.h"
44 #include "opt_sched.h"
45 
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/bus.h>
49 #include <sys/conf.h>
50 #include <sys/kdb.h>
51 #include <sys/kernel.h>
52 #include <sys/ktr.h>
53 #include <sys/lock.h>
54 #include <sys/malloc.h>
55 #include <sys/mutex.h>
56 #include <sys/proc.h>
57 #include <sys/resourcevar.h>
58 #include <sys/sched.h>
59 #include <sys/sbuf.h>
60 #include <sys/sysctl.h>
61 #include <sys/turnstile.h>
62 #include <sys/vmmeter.h>
63 #include <sys/lock_profile.h>
64 
65 #include <machine/atomic.h>
66 #include <machine/bus.h>
67 #include <machine/cpu.h>
68 
69 #include <ddb/ddb.h>
70 
71 #include <fs/devfs/devfs_int.h>
72 
73 #include <vm/vm.h>
74 #include <vm/vm_extern.h>
75 
76 #if defined(SMP) && !defined(NO_ADAPTIVE_MUTEXES)
77 #define	ADAPTIVE_MUTEXES
78 #endif
79 
80 #ifdef HWPMC_HOOKS
81 #include <sys/pmckern.h>
82 PMC_SOFT_DEFINE( , , lock, failed);
83 #endif
84 
85 /*
86  * Internal utility macros.
87  */
88 #define mtx_unowned(m)	((m)->mtx_lock == MTX_UNOWNED)
89 
90 #define	mtx_destroyed(m) ((m)->mtx_lock == MTX_DESTROYED)
91 
92 #define	mtx_owner(m)	((struct thread *)((m)->mtx_lock & ~MTX_FLAGMASK))
93 
94 static void	assert_mtx(const struct lock_object *lock, int what);
95 #ifdef DDB
96 static void	db_show_mtx(const struct lock_object *lock);
97 #endif
98 static void	lock_mtx(struct lock_object *lock, int how);
99 static void	lock_spin(struct lock_object *lock, int how);
100 #ifdef KDTRACE_HOOKS
101 static int	owner_mtx(const struct lock_object *lock,
102 		    struct thread **owner);
103 #endif
104 static int	unlock_mtx(struct lock_object *lock);
105 static int	unlock_spin(struct lock_object *lock);
106 
107 /*
108  * Lock classes for sleep and spin mutexes.
109  */
110 struct lock_class lock_class_mtx_sleep = {
111 	.lc_name = "sleep mutex",
112 	.lc_flags = LC_SLEEPLOCK | LC_RECURSABLE,
113 	.lc_assert = assert_mtx,
114 #ifdef DDB
115 	.lc_ddb_show = db_show_mtx,
116 #endif
117 	.lc_lock = lock_mtx,
118 	.lc_unlock = unlock_mtx,
119 #ifdef KDTRACE_HOOKS
120 	.lc_owner = owner_mtx,
121 #endif
122 };
123 struct lock_class lock_class_mtx_spin = {
124 	.lc_name = "spin mutex",
125 	.lc_flags = LC_SPINLOCK | LC_RECURSABLE,
126 	.lc_assert = assert_mtx,
127 #ifdef DDB
128 	.lc_ddb_show = db_show_mtx,
129 #endif
130 	.lc_lock = lock_spin,
131 	.lc_unlock = unlock_spin,
132 #ifdef KDTRACE_HOOKS
133 	.lc_owner = owner_mtx,
134 #endif
135 };
136 
137 /*
138  * System-wide mutexes
139  */
140 struct mtx blocked_lock;
141 struct mtx Giant;
142 
143 void
144 assert_mtx(const struct lock_object *lock, int what)
145 {
146 
147 	mtx_assert((const struct mtx *)lock, what);
148 }
149 
150 void
151 lock_mtx(struct lock_object *lock, int how)
152 {
153 
154 	mtx_lock((struct mtx *)lock);
155 }
156 
157 void
158 lock_spin(struct lock_object *lock, int how)
159 {
160 
161 	panic("spin locks can only use msleep_spin");
162 }
163 
164 int
165 unlock_mtx(struct lock_object *lock)
166 {
167 	struct mtx *m;
168 
169 	m = (struct mtx *)lock;
170 	mtx_assert(m, MA_OWNED | MA_NOTRECURSED);
171 	mtx_unlock(m);
172 	return (0);
173 }
174 
175 int
176 unlock_spin(struct lock_object *lock)
177 {
178 
179 	panic("spin locks can only use msleep_spin");
180 }
181 
182 #ifdef KDTRACE_HOOKS
183 int
184 owner_mtx(const struct lock_object *lock, struct thread **owner)
185 {
186 	const struct mtx *m = (const struct mtx *)lock;
187 
188 	*owner = mtx_owner(m);
189 	return (mtx_unowned(m) == 0);
190 }
191 #endif
192 
193 /*
194  * Function versions of the inlined __mtx_* macros.  These are used by
195  * modules and can also be called from assembly language if needed.
196  */
197 void
198 _mtx_lock_flags(struct mtx *m, int opts, const char *file, int line)
199 {
200 
201 	if (SCHEDULER_STOPPED())
202 		return;
203 	MPASS(curthread != NULL);
204 	KASSERT(m->mtx_lock != MTX_DESTROYED,
205 	    ("mtx_lock() of destroyed mutex @ %s:%d", file, line));
206 	KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep,
207 	    ("mtx_lock() of spin mutex %s @ %s:%d", m->lock_object.lo_name,
208 	    file, line));
209 	WITNESS_CHECKORDER(&m->lock_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE,
210 	    file, line, NULL);
211 
212 	__mtx_lock(m, curthread, opts, file, line);
213 	LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file,
214 	    line);
215 	WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
216 	curthread->td_locks++;
217 }
218 
219 void
220 _mtx_unlock_flags(struct mtx *m, int opts, const char *file, int line)
221 {
222 
223 	if (SCHEDULER_STOPPED())
224 		return;
225 	MPASS(curthread != NULL);
226 	KASSERT(m->mtx_lock != MTX_DESTROYED,
227 	    ("mtx_unlock() of destroyed mutex @ %s:%d", file, line));
228 	KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep,
229 	    ("mtx_unlock() of spin mutex %s @ %s:%d", m->lock_object.lo_name,
230 	    file, line));
231 	curthread->td_locks--;
232 	WITNESS_UNLOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
233 	LOCK_LOG_LOCK("UNLOCK", &m->lock_object, opts, m->mtx_recurse, file,
234 	    line);
235 	mtx_assert(m, MA_OWNED);
236 
237 	if (m->mtx_recurse == 0)
238 		LOCKSTAT_PROFILE_RELEASE_LOCK(LS_MTX_UNLOCK_RELEASE, m);
239 	__mtx_unlock(m, curthread, opts, file, line);
240 }
241 
242 void
243 _mtx_lock_spin_flags(struct mtx *m, int opts, const char *file, int line)
244 {
245 
246 	if (SCHEDULER_STOPPED())
247 		return;
248 	MPASS(curthread != NULL);
249 	KASSERT(m->mtx_lock != MTX_DESTROYED,
250 	    ("mtx_lock_spin() of destroyed mutex @ %s:%d", file, line));
251 	KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
252 	    ("mtx_lock_spin() of sleep mutex %s @ %s:%d",
253 	    m->lock_object.lo_name, file, line));
254 	if (mtx_owned(m))
255 		KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0,
256 	    ("mtx_lock_spin: recursed on non-recursive mutex %s @ %s:%d\n",
257 		    m->lock_object.lo_name, file, line));
258 	WITNESS_CHECKORDER(&m->lock_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE,
259 	    file, line, NULL);
260 	__mtx_lock_spin(m, curthread, opts, file, line);
261 	LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file,
262 	    line);
263 	WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
264 }
265 
266 void
267 _mtx_unlock_spin_flags(struct mtx *m, int opts, const char *file, int line)
268 {
269 
270 	if (SCHEDULER_STOPPED())
271 		return;
272 	MPASS(curthread != NULL);
273 	KASSERT(m->mtx_lock != MTX_DESTROYED,
274 	    ("mtx_unlock_spin() of destroyed mutex @ %s:%d", file, line));
275 	KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
276 	    ("mtx_unlock_spin() of sleep mutex %s @ %s:%d",
277 	    m->lock_object.lo_name, file, line));
278 	WITNESS_UNLOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
279 	LOCK_LOG_LOCK("UNLOCK", &m->lock_object, opts, m->mtx_recurse, file,
280 	    line);
281 	mtx_assert(m, MA_OWNED);
282 
283 	__mtx_unlock_spin(m);
284 }
285 
286 /*
287  * The important part of mtx_trylock{,_flags}()
288  * Tries to acquire lock `m.'  If this function is called on a mutex that
289  * is already owned, it will recursively acquire the lock.
290  */
291 int
292 mtx_trylock_flags_(struct mtx *m, int opts, const char *file, int line)
293 {
294 #ifdef LOCK_PROFILING
295 	uint64_t waittime = 0;
296 	int contested = 0;
297 #endif
298 	int rval;
299 
300 	if (SCHEDULER_STOPPED())
301 		return (1);
302 
303 	MPASS(curthread != NULL);
304 	KASSERT(m->mtx_lock != MTX_DESTROYED,
305 	    ("mtx_trylock() of destroyed mutex @ %s:%d", file, line));
306 	KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep,
307 	    ("mtx_trylock() of spin mutex %s @ %s:%d", m->lock_object.lo_name,
308 	    file, line));
309 
310 	if (mtx_owned(m) && (m->lock_object.lo_flags & LO_RECURSABLE) != 0) {
311 		m->mtx_recurse++;
312 		atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
313 		rval = 1;
314 	} else
315 		rval = _mtx_obtain_lock(m, (uintptr_t)curthread);
316 
317 	LOCK_LOG_TRY("LOCK", &m->lock_object, opts, rval, file, line);
318 	if (rval) {
319 		WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE | LOP_TRYLOCK,
320 		    file, line);
321 		curthread->td_locks++;
322 		if (m->mtx_recurse == 0)
323 			LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_MTX_LOCK_ACQUIRE,
324 			    m, contested, waittime, file, line);
325 
326 	}
327 
328 	return (rval);
329 }
330 
331 /*
332  * _mtx_lock_sleep: the tougher part of acquiring an MTX_DEF lock.
333  *
334  * We call this if the lock is either contested (i.e. we need to go to
335  * sleep waiting for it), or if we need to recurse on it.
336  */
337 void
338 _mtx_lock_sleep(struct mtx *m, uintptr_t tid, int opts, const char *file,
339     int line)
340 {
341 	struct turnstile *ts;
342 	uintptr_t v;
343 #ifdef ADAPTIVE_MUTEXES
344 	volatile struct thread *owner;
345 #endif
346 #ifdef KTR
347 	int cont_logged = 0;
348 #endif
349 #ifdef LOCK_PROFILING
350 	int contested = 0;
351 	uint64_t waittime = 0;
352 #endif
353 #ifdef KDTRACE_HOOKS
354 	uint64_t spin_cnt = 0;
355 	uint64_t sleep_cnt = 0;
356 	int64_t sleep_time = 0;
357 #endif
358 
359 	if (SCHEDULER_STOPPED())
360 		return;
361 
362 	if (mtx_owned(m)) {
363 		KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0,
364 	    ("_mtx_lock_sleep: recursed on non-recursive mutex %s @ %s:%d\n",
365 		    m->lock_object.lo_name, file, line));
366 		m->mtx_recurse++;
367 		atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
368 		if (LOCK_LOG_TEST(&m->lock_object, opts))
369 			CTR1(KTR_LOCK, "_mtx_lock_sleep: %p recursing", m);
370 		return;
371 	}
372 
373 #ifdef HWPMC_HOOKS
374 	PMC_SOFT_CALL( , , lock, failed);
375 #endif
376 	lock_profile_obtain_lock_failed(&m->lock_object,
377 		    &contested, &waittime);
378 	if (LOCK_LOG_TEST(&m->lock_object, opts))
379 		CTR4(KTR_LOCK,
380 		    "_mtx_lock_sleep: %s contested (lock=%p) at %s:%d",
381 		    m->lock_object.lo_name, (void *)m->mtx_lock, file, line);
382 
383 	while (!_mtx_obtain_lock(m, tid)) {
384 #ifdef KDTRACE_HOOKS
385 		spin_cnt++;
386 #endif
387 #ifdef ADAPTIVE_MUTEXES
388 		/*
389 		 * If the owner is running on another CPU, spin until the
390 		 * owner stops running or the state of the lock changes.
391 		 */
392 		v = m->mtx_lock;
393 		if (v != MTX_UNOWNED) {
394 			owner = (struct thread *)(v & ~MTX_FLAGMASK);
395 			if (TD_IS_RUNNING(owner)) {
396 				if (LOCK_LOG_TEST(&m->lock_object, 0))
397 					CTR3(KTR_LOCK,
398 					    "%s: spinning on %p held by %p",
399 					    __func__, m, owner);
400 				while (mtx_owner(m) == owner &&
401 				    TD_IS_RUNNING(owner)) {
402 					cpu_spinwait();
403 #ifdef KDTRACE_HOOKS
404 					spin_cnt++;
405 #endif
406 				}
407 				continue;
408 			}
409 		}
410 #endif
411 
412 		ts = turnstile_trywait(&m->lock_object);
413 		v = m->mtx_lock;
414 
415 		/*
416 		 * Check if the lock has been released while spinning for
417 		 * the turnstile chain lock.
418 		 */
419 		if (v == MTX_UNOWNED) {
420 			turnstile_cancel(ts);
421 			continue;
422 		}
423 
424 #ifdef ADAPTIVE_MUTEXES
425 		/*
426 		 * The current lock owner might have started executing
427 		 * on another CPU (or the lock could have changed
428 		 * owners) while we were waiting on the turnstile
429 		 * chain lock.  If so, drop the turnstile lock and try
430 		 * again.
431 		 */
432 		owner = (struct thread *)(v & ~MTX_FLAGMASK);
433 		if (TD_IS_RUNNING(owner)) {
434 			turnstile_cancel(ts);
435 			continue;
436 		}
437 #endif
438 
439 		/*
440 		 * If the mutex isn't already contested and a failure occurs
441 		 * setting the contested bit, the mutex was either released
442 		 * or the state of the MTX_RECURSED bit changed.
443 		 */
444 		if ((v & MTX_CONTESTED) == 0 &&
445 		    !atomic_cmpset_ptr(&m->mtx_lock, v, v | MTX_CONTESTED)) {
446 			turnstile_cancel(ts);
447 			continue;
448 		}
449 
450 		/*
451 		 * We definitely must sleep for this lock.
452 		 */
453 		mtx_assert(m, MA_NOTOWNED);
454 
455 #ifdef KTR
456 		if (!cont_logged) {
457 			CTR6(KTR_CONTENTION,
458 			    "contention: %p at %s:%d wants %s, taken by %s:%d",
459 			    (void *)tid, file, line, m->lock_object.lo_name,
460 			    WITNESS_FILE(&m->lock_object),
461 			    WITNESS_LINE(&m->lock_object));
462 			cont_logged = 1;
463 		}
464 #endif
465 
466 		/*
467 		 * Block on the turnstile.
468 		 */
469 #ifdef KDTRACE_HOOKS
470 		sleep_time -= lockstat_nsecs();
471 #endif
472 		turnstile_wait(ts, mtx_owner(m), TS_EXCLUSIVE_QUEUE);
473 #ifdef KDTRACE_HOOKS
474 		sleep_time += lockstat_nsecs();
475 		sleep_cnt++;
476 #endif
477 	}
478 #ifdef KTR
479 	if (cont_logged) {
480 		CTR4(KTR_CONTENTION,
481 		    "contention end: %s acquired by %p at %s:%d",
482 		    m->lock_object.lo_name, (void *)tid, file, line);
483 	}
484 #endif
485 	LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_MTX_LOCK_ACQUIRE, m, contested,
486 	    waittime, file, line);
487 #ifdef KDTRACE_HOOKS
488 	if (sleep_time)
489 		LOCKSTAT_RECORD1(LS_MTX_LOCK_BLOCK, m, sleep_time);
490 
491 	/*
492 	 * Only record the loops spinning and not sleeping.
493 	 */
494 	if (spin_cnt > sleep_cnt)
495 		LOCKSTAT_RECORD1(LS_MTX_LOCK_SPIN, m, (spin_cnt - sleep_cnt));
496 #endif
497 }
498 
499 static void
500 _mtx_lock_spin_failed(struct mtx *m)
501 {
502 	struct thread *td;
503 
504 	td = mtx_owner(m);
505 
506 	/* If the mutex is unlocked, try again. */
507 	if (td == NULL)
508 		return;
509 
510 	printf( "spin lock %p (%s) held by %p (tid %d) too long\n",
511 	    m, m->lock_object.lo_name, td, td->td_tid);
512 #ifdef WITNESS
513 	witness_display_spinlock(&m->lock_object, td, printf);
514 #endif
515 	panic("spin lock held too long");
516 }
517 
518 #ifdef SMP
519 /*
520  * _mtx_lock_spin: the tougher part of acquiring an MTX_SPIN lock.
521  *
522  * This is only called if we need to actually spin for the lock. Recursion
523  * is handled inline.
524  */
525 void
526 _mtx_lock_spin(struct mtx *m, uintptr_t tid, int opts, const char *file,
527     int line)
528 {
529 	int i = 0;
530 #ifdef LOCK_PROFILING
531 	int contested = 0;
532 	uint64_t waittime = 0;
533 #endif
534 
535 	if (SCHEDULER_STOPPED())
536 		return;
537 
538 	if (LOCK_LOG_TEST(&m->lock_object, opts))
539 		CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m);
540 
541 #ifdef HWPMC_HOOKS
542 	PMC_SOFT_CALL( , , lock, failed);
543 #endif
544 	lock_profile_obtain_lock_failed(&m->lock_object, &contested, &waittime);
545 	while (!_mtx_obtain_lock(m, tid)) {
546 
547 		/* Give interrupts a chance while we spin. */
548 		spinlock_exit();
549 		while (m->mtx_lock != MTX_UNOWNED) {
550 			if (i++ < 10000000) {
551 				cpu_spinwait();
552 				continue;
553 			}
554 			if (i < 60000000 || kdb_active || panicstr != NULL)
555 				DELAY(1);
556 			else
557 				_mtx_lock_spin_failed(m);
558 			cpu_spinwait();
559 		}
560 		spinlock_enter();
561 	}
562 
563 	if (LOCK_LOG_TEST(&m->lock_object, opts))
564 		CTR1(KTR_LOCK, "_mtx_lock_spin: %p spin done", m);
565 
566 	LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_MTX_SPIN_LOCK_ACQUIRE, m,
567 	    contested, waittime, (file), (line));
568 	LOCKSTAT_RECORD1(LS_MTX_SPIN_LOCK_SPIN, m, i);
569 }
570 #endif /* SMP */
571 
572 void
573 thread_lock_flags_(struct thread *td, int opts, const char *file, int line)
574 {
575 	struct mtx *m;
576 	uintptr_t tid;
577 	int i;
578 #ifdef LOCK_PROFILING
579 	int contested = 0;
580 	uint64_t waittime = 0;
581 #endif
582 #ifdef KDTRACE_HOOKS
583 	uint64_t spin_cnt = 0;
584 #endif
585 
586 	i = 0;
587 	tid = (uintptr_t)curthread;
588 
589 	if (SCHEDULER_STOPPED())
590 		return;
591 
592 	for (;;) {
593 retry:
594 		spinlock_enter();
595 		m = td->td_lock;
596 		KASSERT(m->mtx_lock != MTX_DESTROYED,
597 		    ("thread_lock() of destroyed mutex @ %s:%d", file, line));
598 		KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
599 		    ("thread_lock() of sleep mutex %s @ %s:%d",
600 		    m->lock_object.lo_name, file, line));
601 		if (mtx_owned(m))
602 			KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0,
603 	    ("thread_lock: recursed on non-recursive mutex %s @ %s:%d\n",
604 			    m->lock_object.lo_name, file, line));
605 		WITNESS_CHECKORDER(&m->lock_object,
606 		    opts | LOP_NEWORDER | LOP_EXCLUSIVE, file, line, NULL);
607 		while (!_mtx_obtain_lock(m, tid)) {
608 #ifdef KDTRACE_HOOKS
609 			spin_cnt++;
610 #endif
611 			if (m->mtx_lock == tid) {
612 				m->mtx_recurse++;
613 				break;
614 			}
615 #ifdef HWPMC_HOOKS
616 			PMC_SOFT_CALL( , , lock, failed);
617 #endif
618 			lock_profile_obtain_lock_failed(&m->lock_object,
619 			    &contested, &waittime);
620 			/* Give interrupts a chance while we spin. */
621 			spinlock_exit();
622 			while (m->mtx_lock != MTX_UNOWNED) {
623 				if (i++ < 10000000)
624 					cpu_spinwait();
625 				else if (i < 60000000 ||
626 				    kdb_active || panicstr != NULL)
627 					DELAY(1);
628 				else
629 					_mtx_lock_spin_failed(m);
630 				cpu_spinwait();
631 				if (m != td->td_lock)
632 					goto retry;
633 			}
634 			spinlock_enter();
635 		}
636 		if (m == td->td_lock)
637 			break;
638 		__mtx_unlock_spin(m);	/* does spinlock_exit() */
639 #ifdef KDTRACE_HOOKS
640 		spin_cnt++;
641 #endif
642 	}
643 	if (m->mtx_recurse == 0)
644 		LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_MTX_SPIN_LOCK_ACQUIRE,
645 		    m, contested, waittime, (file), (line));
646 	LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file,
647 	    line);
648 	WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
649 	LOCKSTAT_RECORD1(LS_THREAD_LOCK_SPIN, m, spin_cnt);
650 }
651 
652 struct mtx *
653 thread_lock_block(struct thread *td)
654 {
655 	struct mtx *lock;
656 
657 	THREAD_LOCK_ASSERT(td, MA_OWNED);
658 	lock = td->td_lock;
659 	td->td_lock = &blocked_lock;
660 	mtx_unlock_spin(lock);
661 
662 	return (lock);
663 }
664 
665 void
666 thread_lock_unblock(struct thread *td, struct mtx *new)
667 {
668 	mtx_assert(new, MA_OWNED);
669 	MPASS(td->td_lock == &blocked_lock);
670 	atomic_store_rel_ptr((volatile void *)&td->td_lock, (uintptr_t)new);
671 }
672 
673 void
674 thread_lock_set(struct thread *td, struct mtx *new)
675 {
676 	struct mtx *lock;
677 
678 	mtx_assert(new, MA_OWNED);
679 	THREAD_LOCK_ASSERT(td, MA_OWNED);
680 	lock = td->td_lock;
681 	td->td_lock = new;
682 	mtx_unlock_spin(lock);
683 }
684 
685 /*
686  * _mtx_unlock_sleep: the tougher part of releasing an MTX_DEF lock.
687  *
688  * We are only called here if the lock is recursed or contested (i.e. we
689  * need to wake up a blocked thread).
690  */
691 void
692 _mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line)
693 {
694 	struct turnstile *ts;
695 
696 	if (SCHEDULER_STOPPED())
697 		return;
698 
699 	if (mtx_recursed(m)) {
700 		if (--(m->mtx_recurse) == 0)
701 			atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED);
702 		if (LOCK_LOG_TEST(&m->lock_object, opts))
703 			CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p unrecurse", m);
704 		return;
705 	}
706 
707 	/*
708 	 * We have to lock the chain before the turnstile so this turnstile
709 	 * can be removed from the hash list if it is empty.
710 	 */
711 	turnstile_chain_lock(&m->lock_object);
712 	ts = turnstile_lookup(&m->lock_object);
713 	if (LOCK_LOG_TEST(&m->lock_object, opts))
714 		CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p contested", m);
715 	MPASS(ts != NULL);
716 	turnstile_broadcast(ts, TS_EXCLUSIVE_QUEUE);
717 	_mtx_release_lock_quick(m);
718 
719 	/*
720 	 * This turnstile is now no longer associated with the mutex.  We can
721 	 * unlock the chain lock so a new turnstile may take it's place.
722 	 */
723 	turnstile_unpend(ts, TS_EXCLUSIVE_LOCK);
724 	turnstile_chain_unlock(&m->lock_object);
725 }
726 
727 /*
728  * All the unlocking of MTX_SPIN locks is done inline.
729  * See the __mtx_unlock_spin() macro for the details.
730  */
731 
732 /*
733  * The backing function for the INVARIANTS-enabled mtx_assert()
734  */
735 #ifdef INVARIANT_SUPPORT
736 void
737 _mtx_assert(const struct mtx *m, int what, const char *file, int line)
738 {
739 
740 	if (panicstr != NULL || dumping)
741 		return;
742 	switch (what) {
743 	case MA_OWNED:
744 	case MA_OWNED | MA_RECURSED:
745 	case MA_OWNED | MA_NOTRECURSED:
746 		if (!mtx_owned(m))
747 			panic("mutex %s not owned at %s:%d",
748 			    m->lock_object.lo_name, file, line);
749 		if (mtx_recursed(m)) {
750 			if ((what & MA_NOTRECURSED) != 0)
751 				panic("mutex %s recursed at %s:%d",
752 				    m->lock_object.lo_name, file, line);
753 		} else if ((what & MA_RECURSED) != 0) {
754 			panic("mutex %s unrecursed at %s:%d",
755 			    m->lock_object.lo_name, file, line);
756 		}
757 		break;
758 	case MA_NOTOWNED:
759 		if (mtx_owned(m))
760 			panic("mutex %s owned at %s:%d",
761 			    m->lock_object.lo_name, file, line);
762 		break;
763 	default:
764 		panic("unknown mtx_assert at %s:%d", file, line);
765 	}
766 }
767 #endif
768 
769 /*
770  * The MUTEX_DEBUG-enabled mtx_validate()
771  *
772  * Most of these checks have been moved off into the LO_INITIALIZED flag
773  * maintained by the witness code.
774  */
775 #ifdef MUTEX_DEBUG
776 
777 void	mtx_validate(struct mtx *);
778 
779 void
780 mtx_validate(struct mtx *m)
781 {
782 
783 /*
784  * XXX: When kernacc() does not require Giant we can reenable this check
785  */
786 #ifdef notyet
787 	/*
788 	 * Can't call kernacc() from early init386(), especially when
789 	 * initializing Giant mutex, because some stuff in kernacc()
790 	 * requires Giant itself.
791 	 */
792 	if (!cold)
793 		if (!kernacc((caddr_t)m, sizeof(m),
794 		    VM_PROT_READ | VM_PROT_WRITE))
795 			panic("Can't read and write to mutex %p", m);
796 #endif
797 }
798 #endif
799 
800 /*
801  * General init routine used by the MTX_SYSINIT() macro.
802  */
803 void
804 mtx_sysinit(void *arg)
805 {
806 	struct mtx_args *margs = arg;
807 
808 	mtx_init(margs->ma_mtx, margs->ma_desc, NULL, margs->ma_opts);
809 }
810 
811 /*
812  * Mutex initialization routine; initialize lock `m' of type contained in
813  * `opts' with options contained in `opts' and name `name.'  The optional
814  * lock type `type' is used as a general lock category name for use with
815  * witness.
816  */
817 void
818 mtx_init(struct mtx *m, const char *name, const char *type, int opts)
819 {
820 	struct lock_class *class;
821 	int flags;
822 
823 	MPASS((opts & ~(MTX_SPIN | MTX_QUIET | MTX_RECURSE |
824 		MTX_NOWITNESS | MTX_DUPOK | MTX_NOPROFILE)) == 0);
825 	ASSERT_ATOMIC_LOAD_PTR(m->mtx_lock,
826 	    ("%s: mtx_lock not aligned for %s: %p", __func__, name,
827 	    &m->mtx_lock));
828 
829 #ifdef MUTEX_DEBUG
830 	/* Diagnostic and error correction */
831 	mtx_validate(m);
832 #endif
833 
834 	/* Determine lock class and lock flags. */
835 	if (opts & MTX_SPIN)
836 		class = &lock_class_mtx_spin;
837 	else
838 		class = &lock_class_mtx_sleep;
839 	flags = 0;
840 	if (opts & MTX_QUIET)
841 		flags |= LO_QUIET;
842 	if (opts & MTX_RECURSE)
843 		flags |= LO_RECURSABLE;
844 	if ((opts & MTX_NOWITNESS) == 0)
845 		flags |= LO_WITNESS;
846 	if (opts & MTX_DUPOK)
847 		flags |= LO_DUPOK;
848 	if (opts & MTX_NOPROFILE)
849 		flags |= LO_NOPROFILE;
850 
851 	/* Initialize mutex. */
852 	m->mtx_lock = MTX_UNOWNED;
853 	m->mtx_recurse = 0;
854 
855 	lock_init(&m->lock_object, class, name, type, flags);
856 }
857 
858 /*
859  * Remove lock `m' from all_mtx queue.  We don't allow MTX_QUIET to be
860  * passed in as a flag here because if the corresponding mtx_init() was
861  * called with MTX_QUIET set, then it will already be set in the mutex's
862  * flags.
863  */
864 void
865 mtx_destroy(struct mtx *m)
866 {
867 
868 	if (!mtx_owned(m))
869 		MPASS(mtx_unowned(m));
870 	else {
871 		MPASS((m->mtx_lock & (MTX_RECURSED|MTX_CONTESTED)) == 0);
872 
873 		/* Perform the non-mtx related part of mtx_unlock_spin(). */
874 		if (LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin)
875 			spinlock_exit();
876 		else
877 			curthread->td_locks--;
878 
879 		lock_profile_release_lock(&m->lock_object);
880 		/* Tell witness this isn't locked to make it happy. */
881 		WITNESS_UNLOCK(&m->lock_object, LOP_EXCLUSIVE, __FILE__,
882 		    __LINE__);
883 	}
884 
885 	m->mtx_lock = MTX_DESTROYED;
886 	lock_destroy(&m->lock_object);
887 }
888 
889 /*
890  * Intialize the mutex code and system mutexes.  This is called from the MD
891  * startup code prior to mi_startup().  The per-CPU data space needs to be
892  * setup before this is called.
893  */
894 void
895 mutex_init(void)
896 {
897 
898 	/* Setup turnstiles so that sleep mutexes work. */
899 	init_turnstiles();
900 
901 	/*
902 	 * Initialize mutexes.
903 	 */
904 	mtx_init(&Giant, "Giant", NULL, MTX_DEF | MTX_RECURSE);
905 	mtx_init(&blocked_lock, "blocked lock", NULL, MTX_SPIN);
906 	blocked_lock.mtx_lock = 0xdeadc0de;	/* Always blocked. */
907 	mtx_init(&proc0.p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK);
908 	mtx_init(&proc0.p_slock, "process slock", NULL, MTX_SPIN | MTX_RECURSE);
909 	mtx_init(&devmtx, "cdev", NULL, MTX_DEF);
910 	mtx_lock(&Giant);
911 }
912 
913 #ifdef DDB
914 void
915 db_show_mtx(const struct lock_object *lock)
916 {
917 	struct thread *td;
918 	const struct mtx *m;
919 
920 	m = (const struct mtx *)lock;
921 
922 	db_printf(" flags: {");
923 	if (LOCK_CLASS(lock) == &lock_class_mtx_spin)
924 		db_printf("SPIN");
925 	else
926 		db_printf("DEF");
927 	if (m->lock_object.lo_flags & LO_RECURSABLE)
928 		db_printf(", RECURSE");
929 	if (m->lock_object.lo_flags & LO_DUPOK)
930 		db_printf(", DUPOK");
931 	db_printf("}\n");
932 	db_printf(" state: {");
933 	if (mtx_unowned(m))
934 		db_printf("UNOWNED");
935 	else if (mtx_destroyed(m))
936 		db_printf("DESTROYED");
937 	else {
938 		db_printf("OWNED");
939 		if (m->mtx_lock & MTX_CONTESTED)
940 			db_printf(", CONTESTED");
941 		if (m->mtx_lock & MTX_RECURSED)
942 			db_printf(", RECURSED");
943 	}
944 	db_printf("}\n");
945 	if (!mtx_unowned(m) && !mtx_destroyed(m)) {
946 		td = mtx_owner(m);
947 		db_printf(" owner: %p (tid %d, pid %d, \"%s\")\n", td,
948 		    td->td_tid, td->td_proc->p_pid, td->td_name);
949 		if (mtx_recursed(m))
950 			db_printf(" recursed: %d\n", m->mtx_recurse);
951 	}
952 }
953 #endif
954