xref: /freebsd/sys/kern/kern_mutex.c (revision d056fa046c6a91b90cd98165face0e42a33a5173)
1 /*-
2  * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  * 3. Berkeley Software Design Inc's name may not be used to endorse or
13  *    promote products derived from this software without specific prior
14  *    written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  *	from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $
29  *	and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $
30  */
31 
32 /*
33  * Machine independent bits of mutex implementation.
34  */
35 
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
38 
39 #include "opt_adaptive_mutexes.h"
40 #include "opt_ddb.h"
41 #include "opt_mprof.h"
42 #include "opt_mutex_wake_all.h"
43 #include "opt_sched.h"
44 
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/bus.h>
48 #include <sys/conf.h>
49 #include <sys/kdb.h>
50 #include <sys/kernel.h>
51 #include <sys/ktr.h>
52 #include <sys/lock.h>
53 #include <sys/malloc.h>
54 #include <sys/mutex.h>
55 #include <sys/proc.h>
56 #include <sys/resourcevar.h>
57 #include <sys/sched.h>
58 #include <sys/sbuf.h>
59 #include <sys/sysctl.h>
60 #include <sys/turnstile.h>
61 #include <sys/vmmeter.h>
62 
63 #include <machine/atomic.h>
64 #include <machine/bus.h>
65 #include <machine/cpu.h>
66 
67 #include <ddb/ddb.h>
68 
69 #include <fs/devfs/devfs_int.h>
70 
71 #include <vm/vm.h>
72 #include <vm/vm_extern.h>
73 
74 /*
75  * Force MUTEX_WAKE_ALL for now.
76  * single thread wakeup needs fixes to avoid race conditions with
77  * priority inheritance.
78  */
79 #ifndef MUTEX_WAKE_ALL
80 #define MUTEX_WAKE_ALL
81 #endif
82 
83 /*
84  * Internal utility macros.
85  */
86 #define mtx_unowned(m)	((m)->mtx_lock == MTX_UNOWNED)
87 
88 #define	mtx_owner(m)	((struct thread *)((m)->mtx_lock & ~MTX_FLAGMASK))
89 
90 #ifdef DDB
91 static void	db_show_mtx(struct lock_object *lock);
92 #endif
93 
94 /*
95  * Lock classes for sleep and spin mutexes.
96  */
97 struct lock_class lock_class_mtx_sleep = {
98 	"sleep mutex",
99 	LC_SLEEPLOCK | LC_RECURSABLE,
100 #ifdef DDB
101 	db_show_mtx
102 #endif
103 };
104 struct lock_class lock_class_mtx_spin = {
105 	"spin mutex",
106 	LC_SPINLOCK | LC_RECURSABLE,
107 #ifdef DDB
108 	db_show_mtx
109 #endif
110 };
111 
112 /*
113  * System-wide mutexes
114  */
115 struct mtx sched_lock;
116 struct mtx Giant;
117 
118 #ifdef MUTEX_PROFILING
119 SYSCTL_NODE(_debug, OID_AUTO, mutex, CTLFLAG_RD, NULL, "mutex debugging");
120 SYSCTL_NODE(_debug_mutex, OID_AUTO, prof, CTLFLAG_RD, NULL, "mutex profiling");
121 static int mutex_prof_enable = 0;
122 SYSCTL_INT(_debug_mutex_prof, OID_AUTO, enable, CTLFLAG_RW,
123     &mutex_prof_enable, 0, "Enable tracing of mutex holdtime");
124 
125 struct mutex_prof {
126 	const char	*name;
127 	const char	*file;
128 	int		line;
129 	uintmax_t	cnt_max;
130 	uintmax_t	cnt_tot;
131 	uintmax_t	cnt_cur;
132 	uintmax_t	cnt_contest_holding;
133 	uintmax_t	cnt_contest_locking;
134 	struct mutex_prof *next;
135 };
136 
137 /*
138  * mprof_buf is a static pool of profiling records to avoid possible
139  * reentrance of the memory allocation functions.
140  *
141  * Note: NUM_MPROF_BUFFERS must be smaller than MPROF_HASH_SIZE.
142  */
143 #ifdef MPROF_BUFFERS
144 #define NUM_MPROF_BUFFERS	MPROF_BUFFERS
145 #else
146 #define	NUM_MPROF_BUFFERS	1000
147 #endif
148 static struct mutex_prof mprof_buf[NUM_MPROF_BUFFERS];
149 static int first_free_mprof_buf;
150 #ifndef MPROF_HASH_SIZE
151 #define	MPROF_HASH_SIZE		1009
152 #endif
153 #if NUM_MPROF_BUFFERS >= MPROF_HASH_SIZE
154 #error MPROF_BUFFERS must be larger than MPROF_HASH_SIZE
155 #endif
156 static struct mutex_prof *mprof_hash[MPROF_HASH_SIZE];
157 /* SWAG: sbuf size = avg stat. line size * number of locks */
158 #define MPROF_SBUF_SIZE		256 * 400
159 
160 static int mutex_prof_acquisitions;
161 SYSCTL_INT(_debug_mutex_prof, OID_AUTO, acquisitions, CTLFLAG_RD,
162     &mutex_prof_acquisitions, 0, "Number of mutex acquistions recorded");
163 static int mutex_prof_records;
164 SYSCTL_INT(_debug_mutex_prof, OID_AUTO, records, CTLFLAG_RD,
165     &mutex_prof_records, 0, "Number of profiling records");
166 static int mutex_prof_maxrecords = NUM_MPROF_BUFFERS;
167 SYSCTL_INT(_debug_mutex_prof, OID_AUTO, maxrecords, CTLFLAG_RD,
168     &mutex_prof_maxrecords, 0, "Maximum number of profiling records");
169 static int mutex_prof_rejected;
170 SYSCTL_INT(_debug_mutex_prof, OID_AUTO, rejected, CTLFLAG_RD,
171     &mutex_prof_rejected, 0, "Number of rejected profiling records");
172 static int mutex_prof_hashsize = MPROF_HASH_SIZE;
173 SYSCTL_INT(_debug_mutex_prof, OID_AUTO, hashsize, CTLFLAG_RD,
174     &mutex_prof_hashsize, 0, "Hash size");
175 static int mutex_prof_collisions = 0;
176 SYSCTL_INT(_debug_mutex_prof, OID_AUTO, collisions, CTLFLAG_RD,
177     &mutex_prof_collisions, 0, "Number of hash collisions");
178 
179 /*
180  * mprof_mtx protects the profiling buffers and the hash.
181  */
182 static struct mtx mprof_mtx;
183 MTX_SYSINIT(mprof, &mprof_mtx, "mutex profiling lock", MTX_SPIN | MTX_QUIET);
184 
185 static u_int64_t
186 nanoseconds(void)
187 {
188 	struct timespec tv;
189 
190 	nanotime(&tv);
191 	return (tv.tv_sec * (u_int64_t)1000000000 + tv.tv_nsec);
192 }
193 
194 static int
195 dump_mutex_prof_stats(SYSCTL_HANDLER_ARGS)
196 {
197 	struct sbuf *sb;
198 	int error, i;
199 	static int multiplier = 1;
200 
201 	if (first_free_mprof_buf == 0)
202 		return (SYSCTL_OUT(req, "No locking recorded",
203 		    sizeof("No locking recorded")));
204 
205 retry_sbufops:
206 	sb = sbuf_new(NULL, NULL, MPROF_SBUF_SIZE * multiplier, SBUF_FIXEDLEN);
207 	sbuf_printf(sb, "\n%6s %12s %11s %5s %12s %12s %s\n",
208 	    "max", "total", "count", "avg", "cnt_hold", "cnt_lock", "name");
209 	/*
210 	 * XXX this spinlock seems to be by far the largest perpetrator
211 	 * of spinlock latency (1.6 msec on an Athlon1600 was recorded
212 	 * even before I pessimized it further by moving the average
213 	 * computation here).
214 	 */
215 	mtx_lock_spin(&mprof_mtx);
216 	for (i = 0; i < first_free_mprof_buf; ++i) {
217 		sbuf_printf(sb, "%6ju %12ju %11ju %5ju %12ju %12ju %s:%d (%s)\n",
218 		    mprof_buf[i].cnt_max / 1000,
219 		    mprof_buf[i].cnt_tot / 1000,
220 		    mprof_buf[i].cnt_cur,
221 		    mprof_buf[i].cnt_cur == 0 ? (uintmax_t)0 :
222 			mprof_buf[i].cnt_tot / (mprof_buf[i].cnt_cur * 1000),
223 		    mprof_buf[i].cnt_contest_holding,
224 		    mprof_buf[i].cnt_contest_locking,
225 		    mprof_buf[i].file, mprof_buf[i].line, mprof_buf[i].name);
226 		if (sbuf_overflowed(sb)) {
227 			mtx_unlock_spin(&mprof_mtx);
228 			sbuf_delete(sb);
229 			multiplier++;
230 			goto retry_sbufops;
231 		}
232 	}
233 	mtx_unlock_spin(&mprof_mtx);
234 	sbuf_finish(sb);
235 	error = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb) + 1);
236 	sbuf_delete(sb);
237 	return (error);
238 }
239 SYSCTL_PROC(_debug_mutex_prof, OID_AUTO, stats, CTLTYPE_STRING | CTLFLAG_RD,
240     NULL, 0, dump_mutex_prof_stats, "A", "Mutex profiling statistics");
241 
242 static int
243 reset_mutex_prof_stats(SYSCTL_HANDLER_ARGS)
244 {
245 	int error, v;
246 
247 	if (first_free_mprof_buf == 0)
248 		return (0);
249 
250 	v = 0;
251 	error = sysctl_handle_int(oidp, &v, 0, req);
252 	if (error)
253 		return (error);
254 	if (req->newptr == NULL)
255 		return (error);
256 	if (v == 0)
257 		return (0);
258 
259 	mtx_lock_spin(&mprof_mtx);
260 	bzero(mprof_buf, sizeof(*mprof_buf) * first_free_mprof_buf);
261 	bzero(mprof_hash, sizeof(struct mtx *) * MPROF_HASH_SIZE);
262 	first_free_mprof_buf = 0;
263 	mtx_unlock_spin(&mprof_mtx);
264 	return (0);
265 }
266 SYSCTL_PROC(_debug_mutex_prof, OID_AUTO, reset, CTLTYPE_INT | CTLFLAG_RW,
267     NULL, 0, reset_mutex_prof_stats, "I", "Reset mutex profiling statistics");
268 #endif
269 
270 /*
271  * Function versions of the inlined __mtx_* macros.  These are used by
272  * modules and can also be called from assembly language if needed.
273  */
274 void
275 _mtx_lock_flags(struct mtx *m, int opts, const char *file, int line)
276 {
277 
278 	MPASS(curthread != NULL);
279 	KASSERT(LOCK_CLASS(&m->mtx_object) == &lock_class_mtx_sleep,
280 	    ("mtx_lock() of spin mutex %s @ %s:%d", m->mtx_object.lo_name,
281 	    file, line));
282 	WITNESS_CHECKORDER(&m->mtx_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE,
283 	    file, line);
284 	_get_sleep_lock(m, curthread, opts, file, line);
285 	LOCK_LOG_LOCK("LOCK", &m->mtx_object, opts, m->mtx_recurse, file,
286 	    line);
287 	WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line);
288 #ifdef MUTEX_PROFILING
289 	/* don't reset the timer when/if recursing */
290 	if (m->mtx_acqtime == 0) {
291 		m->mtx_filename = file;
292 		m->mtx_lineno = line;
293 		m->mtx_acqtime = mutex_prof_enable ? nanoseconds() : 0;
294 		++mutex_prof_acquisitions;
295 	}
296 #endif
297 }
298 
299 void
300 _mtx_unlock_flags(struct mtx *m, int opts, const char *file, int line)
301 {
302 
303 	MPASS(curthread != NULL);
304 	KASSERT(LOCK_CLASS(&m->mtx_object) == &lock_class_mtx_sleep,
305 	    ("mtx_unlock() of spin mutex %s @ %s:%d", m->mtx_object.lo_name,
306 	    file, line));
307 	WITNESS_UNLOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line);
308 	LOCK_LOG_LOCK("UNLOCK", &m->mtx_object, opts, m->mtx_recurse, file,
309 	    line);
310 	mtx_assert(m, MA_OWNED);
311 #ifdef MUTEX_PROFILING
312 	if (m->mtx_acqtime != 0) {
313 		static const char *unknown = "(unknown)";
314 		struct mutex_prof *mpp;
315 		u_int64_t acqtime, now;
316 		const char *p, *q;
317 		volatile u_int hash;
318 
319 		now = nanoseconds();
320 		acqtime = m->mtx_acqtime;
321 		m->mtx_acqtime = 0;
322 		if (now <= acqtime)
323 			goto out;
324 		for (p = m->mtx_filename;
325 		    p != NULL && strncmp(p, "../", 3) == 0; p += 3)
326 			/* nothing */ ;
327 		if (p == NULL || *p == '\0')
328 			p = unknown;
329 		for (hash = m->mtx_lineno, q = p; *q != '\0'; ++q)
330 			hash = (hash * 2 + *q) % MPROF_HASH_SIZE;
331 		mtx_lock_spin(&mprof_mtx);
332 		for (mpp = mprof_hash[hash]; mpp != NULL; mpp = mpp->next)
333 			if (mpp->line == m->mtx_lineno &&
334 			    strcmp(mpp->file, p) == 0)
335 				break;
336 		if (mpp == NULL) {
337 			/* Just exit if we cannot get a trace buffer */
338 			if (first_free_mprof_buf >= NUM_MPROF_BUFFERS) {
339 				++mutex_prof_rejected;
340 				goto unlock;
341 			}
342 			mpp = &mprof_buf[first_free_mprof_buf++];
343 			mpp->name = mtx_name(m);
344 			mpp->file = p;
345 			mpp->line = m->mtx_lineno;
346 			mpp->next = mprof_hash[hash];
347 			if (mprof_hash[hash] != NULL)
348 				++mutex_prof_collisions;
349 			mprof_hash[hash] = mpp;
350 			++mutex_prof_records;
351 		}
352 		/*
353 		 * Record if the mutex has been held longer now than ever
354 		 * before.
355 		 */
356 		if (now - acqtime > mpp->cnt_max)
357 			mpp->cnt_max = now - acqtime;
358 		mpp->cnt_tot += now - acqtime;
359 		mpp->cnt_cur++;
360 		/*
361 		 * There's a small race, really we should cmpxchg
362 		 * 0 with the current value, but that would bill
363 		 * the contention to the wrong lock instance if
364 		 * it followed this also.
365 		 */
366 		mpp->cnt_contest_holding += m->mtx_contest_holding;
367 		m->mtx_contest_holding = 0;
368 		mpp->cnt_contest_locking += m->mtx_contest_locking;
369 		m->mtx_contest_locking = 0;
370 unlock:
371 		mtx_unlock_spin(&mprof_mtx);
372 	}
373 out:
374 #endif
375 	_rel_sleep_lock(m, curthread, opts, file, line);
376 }
377 
378 void
379 _mtx_lock_spin_flags(struct mtx *m, int opts, const char *file, int line)
380 {
381 
382 	MPASS(curthread != NULL);
383 	KASSERT(LOCK_CLASS(&m->mtx_object) == &lock_class_mtx_spin,
384 	    ("mtx_lock_spin() of sleep mutex %s @ %s:%d",
385 	    m->mtx_object.lo_name, file, line));
386 	WITNESS_CHECKORDER(&m->mtx_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE,
387 	    file, line);
388 	_get_spin_lock(m, curthread, opts, file, line);
389 	LOCK_LOG_LOCK("LOCK", &m->mtx_object, opts, m->mtx_recurse, file,
390 	    line);
391 	WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line);
392 }
393 
394 void
395 _mtx_unlock_spin_flags(struct mtx *m, int opts, const char *file, int line)
396 {
397 
398 	MPASS(curthread != NULL);
399 	KASSERT(LOCK_CLASS(&m->mtx_object) == &lock_class_mtx_spin,
400 	    ("mtx_unlock_spin() of sleep mutex %s @ %s:%d",
401 	    m->mtx_object.lo_name, file, line));
402 	WITNESS_UNLOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line);
403 	LOCK_LOG_LOCK("UNLOCK", &m->mtx_object, opts, m->mtx_recurse, file,
404 	    line);
405 	mtx_assert(m, MA_OWNED);
406 	_rel_spin_lock(m);
407 }
408 
409 /*
410  * The important part of mtx_trylock{,_flags}()
411  * Tries to acquire lock `m.'  If this function is called on a mutex that
412  * is already owned, it will recursively acquire the lock.
413  */
414 int
415 _mtx_trylock(struct mtx *m, int opts, const char *file, int line)
416 {
417 	int rval;
418 
419 	MPASS(curthread != NULL);
420 	KASSERT(LOCK_CLASS(&m->mtx_object) == &lock_class_mtx_sleep,
421 	    ("mtx_trylock() of spin mutex %s @ %s:%d", m->mtx_object.lo_name,
422 	    file, line));
423 
424 	if (mtx_owned(m) && (m->mtx_object.lo_flags & LO_RECURSABLE) != 0) {
425 		m->mtx_recurse++;
426 		atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
427 		rval = 1;
428 	} else
429 		rval = _obtain_lock(m, (uintptr_t)curthread);
430 
431 	LOCK_LOG_TRY("LOCK", &m->mtx_object, opts, rval, file, line);
432 	if (rval)
433 		WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE | LOP_TRYLOCK,
434 		    file, line);
435 
436 	return (rval);
437 }
438 
439 /*
440  * _mtx_lock_sleep: the tougher part of acquiring an MTX_DEF lock.
441  *
442  * We call this if the lock is either contested (i.e. we need to go to
443  * sleep waiting for it), or if we need to recurse on it.
444  */
445 void
446 _mtx_lock_sleep(struct mtx *m, uintptr_t tid, int opts, const char *file,
447     int line)
448 {
449 #if defined(SMP) && !defined(NO_ADAPTIVE_MUTEXES)
450 	volatile struct thread *owner;
451 #endif
452 	uintptr_t v;
453 #ifdef KTR
454 	int cont_logged = 0;
455 #endif
456 #ifdef MUTEX_PROFILING
457 	int contested;
458 #endif
459 
460 	if (mtx_owned(m)) {
461 		KASSERT((m->mtx_object.lo_flags & LO_RECURSABLE) != 0,
462 	    ("_mtx_lock_sleep: recursed on non-recursive mutex %s @ %s:%d\n",
463 		    m->mtx_object.lo_name, file, line));
464 		m->mtx_recurse++;
465 		atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
466 		if (LOCK_LOG_TEST(&m->mtx_object, opts))
467 			CTR1(KTR_LOCK, "_mtx_lock_sleep: %p recursing", m);
468 		return;
469 	}
470 
471 	if (LOCK_LOG_TEST(&m->mtx_object, opts))
472 		CTR4(KTR_LOCK,
473 		    "_mtx_lock_sleep: %s contested (lock=%p) at %s:%d",
474 		    m->mtx_object.lo_name, (void *)m->mtx_lock, file, line);
475 
476 #ifdef MUTEX_PROFILING
477 	contested = 0;
478 #endif
479 	while (!_obtain_lock(m, tid)) {
480 #ifdef MUTEX_PROFILING
481 		contested = 1;
482 		atomic_add_int(&m->mtx_contest_holding, 1);
483 #endif
484 		turnstile_lock(&m->mtx_object);
485 		v = m->mtx_lock;
486 
487 		/*
488 		 * Check if the lock has been released while spinning for
489 		 * the turnstile chain lock.
490 		 */
491 		if (v == MTX_UNOWNED) {
492 			turnstile_release(&m->mtx_object);
493 			cpu_spinwait();
494 			continue;
495 		}
496 
497 #ifdef MUTEX_WAKE_ALL
498 		MPASS(v != MTX_CONTESTED);
499 #else
500 		/*
501 		 * The mutex was marked contested on release. This means that
502 		 * there are other threads blocked on it.  Grab ownership of
503 		 * it and propagate its priority to the current thread if
504 		 * necessary.
505 		 */
506 		if (v == MTX_CONTESTED) {
507 			m->mtx_lock = tid | MTX_CONTESTED;
508 			turnstile_claim(&m->mtx_object);
509 			break;
510 		}
511 #endif
512 
513 		/*
514 		 * If the mutex isn't already contested and a failure occurs
515 		 * setting the contested bit, the mutex was either released
516 		 * or the state of the MTX_RECURSED bit changed.
517 		 */
518 		if ((v & MTX_CONTESTED) == 0 &&
519 		    !atomic_cmpset_ptr(&m->mtx_lock, v, v | MTX_CONTESTED)) {
520 			turnstile_release(&m->mtx_object);
521 			cpu_spinwait();
522 			continue;
523 		}
524 
525 #if defined(SMP) && !defined(NO_ADAPTIVE_MUTEXES)
526 		/*
527 		 * If the current owner of the lock is executing on another
528 		 * CPU, spin instead of blocking.
529 		 */
530 		owner = (struct thread *)(v & ~MTX_FLAGMASK);
531 #ifdef ADAPTIVE_GIANT
532 		if (TD_IS_RUNNING(owner)) {
533 #else
534 		if (m != &Giant && TD_IS_RUNNING(owner)) {
535 #endif
536 			turnstile_release(&m->mtx_object);
537 			while (mtx_owner(m) == owner && TD_IS_RUNNING(owner)) {
538 				cpu_spinwait();
539 			}
540 			continue;
541 		}
542 #endif	/* SMP && !NO_ADAPTIVE_MUTEXES */
543 
544 		/*
545 		 * We definitely must sleep for this lock.
546 		 */
547 		mtx_assert(m, MA_NOTOWNED);
548 
549 #ifdef KTR
550 		if (!cont_logged) {
551 			CTR6(KTR_CONTENTION,
552 			    "contention: %p at %s:%d wants %s, taken by %s:%d",
553 			    (void *)tid, file, line, m->mtx_object.lo_name,
554 			    WITNESS_FILE(&m->mtx_object),
555 			    WITNESS_LINE(&m->mtx_object));
556 			cont_logged = 1;
557 		}
558 #endif
559 
560 		/*
561 		 * Block on the turnstile.
562 		 */
563 		turnstile_wait(&m->mtx_object, mtx_owner(m),
564 		    TS_EXCLUSIVE_QUEUE);
565 	}
566 
567 #ifdef KTR
568 	if (cont_logged) {
569 		CTR4(KTR_CONTENTION,
570 		    "contention end: %s acquired by %p at %s:%d",
571 		    m->mtx_object.lo_name, (void *)tid, file, line);
572 	}
573 #endif
574 #ifdef MUTEX_PROFILING
575 	if (contested)
576 		m->mtx_contest_locking++;
577 	m->mtx_contest_holding = 0;
578 #endif
579 	return;
580 }
581 
582 #ifdef SMP
583 /*
584  * _mtx_lock_spin: the tougher part of acquiring an MTX_SPIN lock.
585  *
586  * This is only called if we need to actually spin for the lock. Recursion
587  * is handled inline.
588  */
589 void
590 _mtx_lock_spin(struct mtx *m, uintptr_t tid, int opts, const char *file,
591     int line)
592 {
593 	int i = 0;
594 
595 	if (LOCK_LOG_TEST(&m->mtx_object, opts))
596 		CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m);
597 
598 	while (!_obtain_lock(m, tid)) {
599 
600 		/* Give interrupts a chance while we spin. */
601 		spinlock_exit();
602 		while (m->mtx_lock != MTX_UNOWNED) {
603 			if (i++ < 10000000) {
604 				cpu_spinwait();
605 				continue;
606 			}
607 			if (i < 60000000)
608 				DELAY(1);
609 			else if (!kdb_active && !panicstr) {
610 				printf("spin lock %s held by %p for > 5 seconds\n",
611 				    m->mtx_object.lo_name, (void *)m->mtx_lock);
612 #ifdef WITNESS
613 				witness_display_spinlock(&m->mtx_object,
614 				    mtx_owner(m));
615 #endif
616 				panic("spin lock held too long");
617 			}
618 			cpu_spinwait();
619 		}
620 		spinlock_enter();
621 	}
622 
623 	if (LOCK_LOG_TEST(&m->mtx_object, opts))
624 		CTR1(KTR_LOCK, "_mtx_lock_spin: %p spin done", m);
625 
626 	return;
627 }
628 #endif /* SMP */
629 
630 /*
631  * _mtx_unlock_sleep: the tougher part of releasing an MTX_DEF lock.
632  *
633  * We are only called here if the lock is recursed or contested (i.e. we
634  * need to wake up a blocked thread).
635  */
636 void
637 _mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line)
638 {
639 	struct turnstile *ts;
640 #ifndef PREEMPTION
641 	struct thread *td, *td1;
642 #endif
643 
644 	if (mtx_recursed(m)) {
645 		if (--(m->mtx_recurse) == 0)
646 			atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED);
647 		if (LOCK_LOG_TEST(&m->mtx_object, opts))
648 			CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p unrecurse", m);
649 		return;
650 	}
651 
652 	turnstile_lock(&m->mtx_object);
653 	ts = turnstile_lookup(&m->mtx_object);
654 	if (LOCK_LOG_TEST(&m->mtx_object, opts))
655 		CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p contested", m);
656 
657 #if defined(SMP) && !defined(NO_ADAPTIVE_MUTEXES)
658 	if (ts == NULL) {
659 		_release_lock_quick(m);
660 		if (LOCK_LOG_TEST(&m->mtx_object, opts))
661 			CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p no sleepers", m);
662 		turnstile_release(&m->mtx_object);
663 		return;
664 	}
665 #else
666 	MPASS(ts != NULL);
667 #endif
668 #ifndef PREEMPTION
669 	/* XXX */
670 	td1 = turnstile_head(ts, TS_EXCLUSIVE_QUEUE);
671 #endif
672 #ifdef MUTEX_WAKE_ALL
673 	turnstile_broadcast(ts, TS_EXCLUSIVE_QUEUE);
674 	_release_lock_quick(m);
675 #else
676 	if (turnstile_signal(ts, TS_EXCLUSIVE_QUEUE)) {
677 		_release_lock_quick(m);
678 		if (LOCK_LOG_TEST(&m->mtx_object, opts))
679 			CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p not held", m);
680 	} else {
681 		m->mtx_lock = MTX_CONTESTED;
682 		if (LOCK_LOG_TEST(&m->mtx_object, opts))
683 			CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p still contested",
684 			    m);
685 	}
686 #endif
687 	turnstile_unpend(ts, TS_EXCLUSIVE_LOCK);
688 
689 #ifndef PREEMPTION
690 	/*
691 	 * XXX: This is just a hack until preemption is done.  However,
692 	 * once preemption is done we need to either wrap the
693 	 * turnstile_signal() and release of the actual lock in an
694 	 * extra critical section or change the preemption code to
695 	 * always just set a flag and never do instant-preempts.
696 	 */
697 	td = curthread;
698 	if (td->td_critnest > 0 || td1->td_priority >= td->td_priority)
699 		return;
700 	mtx_lock_spin(&sched_lock);
701 	if (!TD_IS_RUNNING(td1)) {
702 #ifdef notyet
703 		if (td->td_ithd != NULL) {
704 			struct ithd *it = td->td_ithd;
705 
706 			if (it->it_interrupted) {
707 				if (LOCK_LOG_TEST(&m->mtx_object, opts))
708 					CTR2(KTR_LOCK,
709 				    "_mtx_unlock_sleep: %p interrupted %p",
710 					    it, it->it_interrupted);
711 				intr_thd_fixup(it);
712 			}
713 		}
714 #endif
715 		if (LOCK_LOG_TEST(&m->mtx_object, opts))
716 			CTR2(KTR_LOCK,
717 			    "_mtx_unlock_sleep: %p switching out lock=%p", m,
718 			    (void *)m->mtx_lock);
719 
720 		mi_switch(SW_INVOL, NULL);
721 		if (LOCK_LOG_TEST(&m->mtx_object, opts))
722 			CTR2(KTR_LOCK, "_mtx_unlock_sleep: %p resuming lock=%p",
723 			    m, (void *)m->mtx_lock);
724 	}
725 	mtx_unlock_spin(&sched_lock);
726 #endif
727 
728 	return;
729 }
730 
731 /*
732  * All the unlocking of MTX_SPIN locks is done inline.
733  * See the _rel_spin_lock() macro for the details.
734  */
735 
736 /*
737  * The backing function for the INVARIANTS-enabled mtx_assert()
738  */
739 #ifdef INVARIANT_SUPPORT
740 void
741 _mtx_assert(struct mtx *m, int what, const char *file, int line)
742 {
743 
744 	if (panicstr != NULL || dumping)
745 		return;
746 	switch (what) {
747 	case MA_OWNED:
748 	case MA_OWNED | MA_RECURSED:
749 	case MA_OWNED | MA_NOTRECURSED:
750 		if (!mtx_owned(m))
751 			panic("mutex %s not owned at %s:%d",
752 			    m->mtx_object.lo_name, file, line);
753 		if (mtx_recursed(m)) {
754 			if ((what & MA_NOTRECURSED) != 0)
755 				panic("mutex %s recursed at %s:%d",
756 				    m->mtx_object.lo_name, file, line);
757 		} else if ((what & MA_RECURSED) != 0) {
758 			panic("mutex %s unrecursed at %s:%d",
759 			    m->mtx_object.lo_name, file, line);
760 		}
761 		break;
762 	case MA_NOTOWNED:
763 		if (mtx_owned(m))
764 			panic("mutex %s owned at %s:%d",
765 			    m->mtx_object.lo_name, file, line);
766 		break;
767 	default:
768 		panic("unknown mtx_assert at %s:%d", file, line);
769 	}
770 }
771 #endif
772 
773 /*
774  * The MUTEX_DEBUG-enabled mtx_validate()
775  *
776  * Most of these checks have been moved off into the LO_INITIALIZED flag
777  * maintained by the witness code.
778  */
779 #ifdef MUTEX_DEBUG
780 
781 void	mtx_validate(struct mtx *);
782 
783 void
784 mtx_validate(struct mtx *m)
785 {
786 
787 /*
788  * XXX: When kernacc() does not require Giant we can reenable this check
789  */
790 #ifdef notyet
791 	/*
792 	 * Can't call kernacc() from early init386(), especially when
793 	 * initializing Giant mutex, because some stuff in kernacc()
794 	 * requires Giant itself.
795 	 */
796 	if (!cold)
797 		if (!kernacc((caddr_t)m, sizeof(m),
798 		    VM_PROT_READ | VM_PROT_WRITE))
799 			panic("Can't read and write to mutex %p", m);
800 #endif
801 }
802 #endif
803 
804 /*
805  * General init routine used by the MTX_SYSINIT() macro.
806  */
807 void
808 mtx_sysinit(void *arg)
809 {
810 	struct mtx_args *margs = arg;
811 
812 	mtx_init(margs->ma_mtx, margs->ma_desc, NULL, margs->ma_opts);
813 }
814 
815 /*
816  * Mutex initialization routine; initialize lock `m' of type contained in
817  * `opts' with options contained in `opts' and name `name.'  The optional
818  * lock type `type' is used as a general lock category name for use with
819  * witness.
820  */
821 void
822 mtx_init(struct mtx *m, const char *name, const char *type, int opts)
823 {
824 	struct lock_class *class;
825 	int flags;
826 
827 	MPASS((opts & ~(MTX_SPIN | MTX_QUIET | MTX_RECURSE |
828 	    MTX_NOWITNESS | MTX_DUPOK)) == 0);
829 
830 #ifdef MUTEX_DEBUG
831 	/* Diagnostic and error correction */
832 	mtx_validate(m);
833 #endif
834 
835 	/* Determine lock class and lock flags. */
836 	if (opts & MTX_SPIN)
837 		class = &lock_class_mtx_spin;
838 	else
839 		class = &lock_class_mtx_sleep;
840 	flags = 0;
841 	if (opts & MTX_QUIET)
842 		flags |= LO_QUIET;
843 	if (opts & MTX_RECURSE)
844 		flags |= LO_RECURSABLE;
845 	if ((opts & MTX_NOWITNESS) == 0)
846 		flags |= LO_WITNESS;
847 	if (opts & MTX_DUPOK)
848 		flags |= LO_DUPOK;
849 
850 	/* Initialize mutex. */
851 	m->mtx_lock = MTX_UNOWNED;
852 	m->mtx_recurse = 0;
853 #ifdef MUTEX_PROFILING
854 	m->mtx_acqtime = 0;
855 	m->mtx_filename = NULL;
856 	m->mtx_lineno = 0;
857 	m->mtx_contest_holding = 0;
858 	m->mtx_contest_locking = 0;
859 #endif
860 
861 	lock_init(&m->mtx_object, class, name, type, flags);
862 }
863 
864 /*
865  * Remove lock `m' from all_mtx queue.  We don't allow MTX_QUIET to be
866  * passed in as a flag here because if the corresponding mtx_init() was
867  * called with MTX_QUIET set, then it will already be set in the mutex's
868  * flags.
869  */
870 void
871 mtx_destroy(struct mtx *m)
872 {
873 
874 	if (!mtx_owned(m))
875 		MPASS(mtx_unowned(m));
876 	else {
877 		MPASS((m->mtx_lock & (MTX_RECURSED|MTX_CONTESTED)) == 0);
878 
879 		/* Perform the non-mtx related part of mtx_unlock_spin(). */
880 		if (LOCK_CLASS(&m->mtx_object) == &lock_class_mtx_spin)
881 			spinlock_exit();
882 
883 		/* Tell witness this isn't locked to make it happy. */
884 		WITNESS_UNLOCK(&m->mtx_object, LOP_EXCLUSIVE, __FILE__,
885 		    __LINE__);
886 	}
887 
888 	lock_destroy(&m->mtx_object);
889 }
890 
891 /*
892  * Intialize the mutex code and system mutexes.  This is called from the MD
893  * startup code prior to mi_startup().  The per-CPU data space needs to be
894  * setup before this is called.
895  */
896 void
897 mutex_init(void)
898 {
899 
900 	/* Setup turnstiles so that sleep mutexes work. */
901 	init_turnstiles();
902 
903 	/*
904 	 * Initialize mutexes.
905 	 */
906 	mtx_init(&Giant, "Giant", NULL, MTX_DEF | MTX_RECURSE);
907 	mtx_init(&sched_lock, "sched lock", NULL, MTX_SPIN | MTX_RECURSE);
908 	mtx_init(&proc0.p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK);
909 	mtx_init(&devmtx, "cdev", NULL, MTX_DEF);
910 	mtx_lock(&Giant);
911 }
912 
913 #ifdef DDB
914 void
915 db_show_mtx(struct lock_object *lock)
916 {
917 	struct thread *td;
918 	struct mtx *m;
919 
920 	m = (struct mtx *)lock;
921 
922 	db_printf(" flags: {");
923 	if (LOCK_CLASS(lock) == &lock_class_mtx_spin)
924 		db_printf("SPIN");
925 	else
926 		db_printf("DEF");
927 	if (m->mtx_object.lo_flags & LO_RECURSABLE)
928 		db_printf(", RECURSE");
929 	if (m->mtx_object.lo_flags & LO_DUPOK)
930 		db_printf(", DUPOK");
931 	db_printf("}\n");
932 	db_printf(" state: {");
933 	if (mtx_unowned(m))
934 		db_printf("UNOWNED");
935 	else {
936 		db_printf("OWNED");
937 		if (m->mtx_lock & MTX_CONTESTED)
938 			db_printf(", CONTESTED");
939 		if (m->mtx_lock & MTX_RECURSED)
940 			db_printf(", RECURSED");
941 	}
942 	db_printf("}\n");
943 	if (!mtx_unowned(m)) {
944 		td = mtx_owner(m);
945 		db_printf(" owner: %p (tid %d, pid %d, \"%s\")\n", td,
946 		    td->td_tid, td->td_proc->p_pid, td->td_proc->p_comm);
947 		if (mtx_recursed(m))
948 			db_printf(" recursed: %d\n", m->mtx_recurse);
949 	}
950 }
951 #endif
952