xref: /freebsd/sys/kern/kern_mutex.c (revision f0adf7f5cdd241db2f2c817683191a6ef64a4e95)
1 /*-
2  * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  * 3. Berkeley Software Design Inc's name may not be used to endorse or
13  *    promote products derived from this software without specific prior
14  *    written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  *	from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $
29  *	and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $
30  */
31 
32 /*
33  * Machine independent bits of mutex implementation.
34  */
35 
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
38 
39 #include "opt_adaptive_mutexes.h"
40 #include "opt_ddb.h"
41 #include "opt_mutex_wake_all.h"
42 
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/bus.h>
46 #include <sys/kdb.h>
47 #include <sys/kernel.h>
48 #include <sys/ktr.h>
49 #include <sys/lock.h>
50 #include <sys/malloc.h>
51 #include <sys/mutex.h>
52 #include <sys/proc.h>
53 #include <sys/resourcevar.h>
54 #include <sys/sched.h>
55 #include <sys/sbuf.h>
56 #include <sys/sysctl.h>
57 #include <sys/turnstile.h>
58 #include <sys/vmmeter.h>
59 
60 #include <machine/atomic.h>
61 #include <machine/bus.h>
62 #include <machine/clock.h>
63 #include <machine/cpu.h>
64 
65 #include <ddb/ddb.h>
66 
67 #include <vm/vm.h>
68 #include <vm/vm_extern.h>
69 
70 /*
71  * Internal utility macros.
72  */
73 #define mtx_unowned(m)	((m)->mtx_lock == MTX_UNOWNED)
74 
75 #define mtx_owner(m)	(mtx_unowned((m)) ? NULL \
76 	: (struct thread *)((m)->mtx_lock & MTX_FLAGMASK))
77 
78 /*
79  * Lock classes for sleep and spin mutexes.
80  */
81 struct lock_class lock_class_mtx_sleep = {
82 	"sleep mutex",
83 	LC_SLEEPLOCK | LC_RECURSABLE
84 };
85 struct lock_class lock_class_mtx_spin = {
86 	"spin mutex",
87 	LC_SPINLOCK | LC_RECURSABLE
88 };
89 
90 /*
91  * System-wide mutexes
92  */
93 struct mtx sched_lock;
94 struct mtx Giant;
95 
96 #ifdef MUTEX_PROFILING
97 SYSCTL_NODE(_debug, OID_AUTO, mutex, CTLFLAG_RD, NULL, "mutex debugging");
98 SYSCTL_NODE(_debug_mutex, OID_AUTO, prof, CTLFLAG_RD, NULL, "mutex profiling");
99 static int mutex_prof_enable = 0;
100 SYSCTL_INT(_debug_mutex_prof, OID_AUTO, enable, CTLFLAG_RW,
101     &mutex_prof_enable, 0, "Enable tracing of mutex holdtime");
102 
103 struct mutex_prof {
104 	const char	*name;
105 	const char	*file;
106 	int		line;
107 	uintmax_t	cnt_max;
108 	uintmax_t	cnt_tot;
109 	uintmax_t	cnt_cur;
110 	uintmax_t	cnt_contest_holding;
111 	uintmax_t	cnt_contest_locking;
112 	struct mutex_prof *next;
113 };
114 
115 /*
116  * mprof_buf is a static pool of profiling records to avoid possible
117  * reentrance of the memory allocation functions.
118  *
119  * Note: NUM_MPROF_BUFFERS must be smaller than MPROF_HASH_SIZE.
120  */
121 #define	NUM_MPROF_BUFFERS	1000
122 static struct mutex_prof mprof_buf[NUM_MPROF_BUFFERS];
123 static int first_free_mprof_buf;
124 #define	MPROF_HASH_SIZE		1009
125 static struct mutex_prof *mprof_hash[MPROF_HASH_SIZE];
126 /* SWAG: sbuf size = avg stat. line size * number of locks */
127 #define MPROF_SBUF_SIZE		256 * 400
128 
129 static int mutex_prof_acquisitions;
130 SYSCTL_INT(_debug_mutex_prof, OID_AUTO, acquisitions, CTLFLAG_RD,
131     &mutex_prof_acquisitions, 0, "Number of mutex acquistions recorded");
132 static int mutex_prof_records;
133 SYSCTL_INT(_debug_mutex_prof, OID_AUTO, records, CTLFLAG_RD,
134     &mutex_prof_records, 0, "Number of profiling records");
135 static int mutex_prof_maxrecords = NUM_MPROF_BUFFERS;
136 SYSCTL_INT(_debug_mutex_prof, OID_AUTO, maxrecords, CTLFLAG_RD,
137     &mutex_prof_maxrecords, 0, "Maximum number of profiling records");
138 static int mutex_prof_rejected;
139 SYSCTL_INT(_debug_mutex_prof, OID_AUTO, rejected, CTLFLAG_RD,
140     &mutex_prof_rejected, 0, "Number of rejected profiling records");
141 static int mutex_prof_hashsize = MPROF_HASH_SIZE;
142 SYSCTL_INT(_debug_mutex_prof, OID_AUTO, hashsize, CTLFLAG_RD,
143     &mutex_prof_hashsize, 0, "Hash size");
144 static int mutex_prof_collisions = 0;
145 SYSCTL_INT(_debug_mutex_prof, OID_AUTO, collisions, CTLFLAG_RD,
146     &mutex_prof_collisions, 0, "Number of hash collisions");
147 
148 /*
149  * mprof_mtx protects the profiling buffers and the hash.
150  */
151 static struct mtx mprof_mtx;
152 MTX_SYSINIT(mprof, &mprof_mtx, "mutex profiling lock", MTX_SPIN | MTX_QUIET);
153 
154 static u_int64_t
155 nanoseconds(void)
156 {
157 	struct timespec tv;
158 
159 	nanotime(&tv);
160 	return (tv.tv_sec * (u_int64_t)1000000000 + tv.tv_nsec);
161 }
162 
163 static int
164 dump_mutex_prof_stats(SYSCTL_HANDLER_ARGS)
165 {
166 	struct sbuf *sb;
167 	int error, i;
168 	static int multiplier = 1;
169 
170 	if (first_free_mprof_buf == 0)
171 		return (SYSCTL_OUT(req, "No locking recorded",
172 		    sizeof("No locking recorded")));
173 
174 retry_sbufops:
175 	sb = sbuf_new(NULL, NULL, MPROF_SBUF_SIZE * multiplier, SBUF_FIXEDLEN);
176 	sbuf_printf(sb, "%6s %12s %11s %5s %12s %12s %s\n",
177 	    "max", "total", "count", "avg", "cnt_hold", "cnt_lock", "name");
178 	/*
179 	 * XXX this spinlock seems to be by far the largest perpetrator
180 	 * of spinlock latency (1.6 msec on an Athlon1600 was recorded
181 	 * even before I pessimized it further by moving the average
182 	 * computation here).
183 	 */
184 	mtx_lock_spin(&mprof_mtx);
185 	for (i = 0; i < first_free_mprof_buf; ++i) {
186 		sbuf_printf(sb, "%6ju %12ju %11ju %5ju %12ju %12ju %s:%d (%s)\n",
187 		    mprof_buf[i].cnt_max / 1000,
188 		    mprof_buf[i].cnt_tot / 1000,
189 		    mprof_buf[i].cnt_cur,
190 		    mprof_buf[i].cnt_cur == 0 ? (uintmax_t)0 :
191 			mprof_buf[i].cnt_tot / (mprof_buf[i].cnt_cur * 1000),
192 		    mprof_buf[i].cnt_contest_holding,
193 		    mprof_buf[i].cnt_contest_locking,
194 		    mprof_buf[i].file, mprof_buf[i].line, mprof_buf[i].name);
195 		if (sbuf_overflowed(sb)) {
196 			mtx_unlock_spin(&mprof_mtx);
197 			sbuf_delete(sb);
198 			multiplier++;
199 			goto retry_sbufops;
200 		}
201 	}
202 	mtx_unlock_spin(&mprof_mtx);
203 	sbuf_finish(sb);
204 	error = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb) + 1);
205 	sbuf_delete(sb);
206 	return (error);
207 }
208 SYSCTL_PROC(_debug_mutex_prof, OID_AUTO, stats, CTLTYPE_STRING | CTLFLAG_RD,
209     NULL, 0, dump_mutex_prof_stats, "A", "Mutex profiling statistics");
210 
211 static int
212 reset_mutex_prof_stats(SYSCTL_HANDLER_ARGS)
213 {
214 	int error, v;
215 
216 	if (first_free_mprof_buf == 0)
217 		return (0);
218 
219 	v = 0;
220 	error = sysctl_handle_int(oidp, &v, 0, req);
221 	if (error)
222 		return (error);
223 	if (req->newptr == NULL)
224 		return (error);
225 	if (v == 0)
226 		return (0);
227 
228 	mtx_lock_spin(&mprof_mtx);
229 	bzero(mprof_buf, sizeof(*mprof_buf) * first_free_mprof_buf);
230 	bzero(mprof_hash, sizeof(struct mtx *) * MPROF_HASH_SIZE);
231 	first_free_mprof_buf = 0;
232 	mtx_unlock_spin(&mprof_mtx);
233 	return (0);
234 }
235 SYSCTL_PROC(_debug_mutex_prof, OID_AUTO, reset, CTLTYPE_INT | CTLFLAG_RW,
236     NULL, 0, reset_mutex_prof_stats, "I", "Reset mutex profiling statistics");
237 #endif
238 
239 /*
240  * Function versions of the inlined __mtx_* macros.  These are used by
241  * modules and can also be called from assembly language if needed.
242  */
243 void
244 _mtx_lock_flags(struct mtx *m, int opts, const char *file, int line)
245 {
246 
247 	MPASS(curthread != NULL);
248 	KASSERT(m->mtx_object.lo_class == &lock_class_mtx_sleep,
249 	    ("mtx_lock() of spin mutex %s @ %s:%d", m->mtx_object.lo_name,
250 	    file, line));
251 	WITNESS_CHECKORDER(&m->mtx_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE,
252 	    file, line);
253 	_get_sleep_lock(m, curthread, opts, file, line);
254 	LOCK_LOG_LOCK("LOCK", &m->mtx_object, opts, m->mtx_recurse, file,
255 	    line);
256 	WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line);
257 #ifdef MUTEX_PROFILING
258 	/* don't reset the timer when/if recursing */
259 	if (m->mtx_acqtime == 0) {
260 		m->mtx_filename = file;
261 		m->mtx_lineno = line;
262 		m->mtx_acqtime = mutex_prof_enable ? nanoseconds() : 0;
263 		++mutex_prof_acquisitions;
264 	}
265 #endif
266 }
267 
268 void
269 _mtx_unlock_flags(struct mtx *m, int opts, const char *file, int line)
270 {
271 
272 	MPASS(curthread != NULL);
273 	KASSERT(m->mtx_object.lo_class == &lock_class_mtx_sleep,
274 	    ("mtx_unlock() of spin mutex %s @ %s:%d", m->mtx_object.lo_name,
275 	    file, line));
276 	WITNESS_UNLOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line);
277 	LOCK_LOG_LOCK("UNLOCK", &m->mtx_object, opts, m->mtx_recurse, file,
278 	    line);
279 	mtx_assert(m, MA_OWNED);
280 #ifdef MUTEX_PROFILING
281 	if (m->mtx_acqtime != 0) {
282 		static const char *unknown = "(unknown)";
283 		struct mutex_prof *mpp;
284 		u_int64_t acqtime, now;
285 		const char *p, *q;
286 		volatile u_int hash;
287 
288 		now = nanoseconds();
289 		acqtime = m->mtx_acqtime;
290 		m->mtx_acqtime = 0;
291 		if (now <= acqtime)
292 			goto out;
293 		for (p = m->mtx_filename;
294 		    p != NULL && strncmp(p, "../", 3) == 0; p += 3)
295 			/* nothing */ ;
296 		if (p == NULL || *p == '\0')
297 			p = unknown;
298 		for (hash = m->mtx_lineno, q = p; *q != '\0'; ++q)
299 			hash = (hash * 2 + *q) % MPROF_HASH_SIZE;
300 		mtx_lock_spin(&mprof_mtx);
301 		for (mpp = mprof_hash[hash]; mpp != NULL; mpp = mpp->next)
302 			if (mpp->line == m->mtx_lineno &&
303 			    strcmp(mpp->file, p) == 0)
304 				break;
305 		if (mpp == NULL) {
306 			/* Just exit if we cannot get a trace buffer */
307 			if (first_free_mprof_buf >= NUM_MPROF_BUFFERS) {
308 				++mutex_prof_rejected;
309 				goto unlock;
310 			}
311 			mpp = &mprof_buf[first_free_mprof_buf++];
312 			mpp->name = mtx_name(m);
313 			mpp->file = p;
314 			mpp->line = m->mtx_lineno;
315 			mpp->next = mprof_hash[hash];
316 			if (mprof_hash[hash] != NULL)
317 				++mutex_prof_collisions;
318 			mprof_hash[hash] = mpp;
319 			++mutex_prof_records;
320 		}
321 		/*
322 		 * Record if the mutex has been held longer now than ever
323 		 * before.
324 		 */
325 		if (now - acqtime > mpp->cnt_max)
326 			mpp->cnt_max = now - acqtime;
327 		mpp->cnt_tot += now - acqtime;
328 		mpp->cnt_cur++;
329 		/*
330 		 * There's a small race, really we should cmpxchg
331 		 * 0 with the current value, but that would bill
332 		 * the contention to the wrong lock instance if
333 		 * it followed this also.
334 		 */
335 		mpp->cnt_contest_holding += m->mtx_contest_holding;
336 		m->mtx_contest_holding = 0;
337 		mpp->cnt_contest_locking += m->mtx_contest_locking;
338 		m->mtx_contest_locking = 0;
339 unlock:
340 		mtx_unlock_spin(&mprof_mtx);
341 	}
342 out:
343 #endif
344 	_rel_sleep_lock(m, curthread, opts, file, line);
345 }
346 
347 void
348 _mtx_lock_spin_flags(struct mtx *m, int opts, const char *file, int line)
349 {
350 
351 	MPASS(curthread != NULL);
352 	KASSERT(m->mtx_object.lo_class == &lock_class_mtx_spin,
353 	    ("mtx_lock_spin() of sleep mutex %s @ %s:%d",
354 	    m->mtx_object.lo_name, file, line));
355 	WITNESS_CHECKORDER(&m->mtx_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE,
356 	    file, line);
357 #if defined(SMP) || LOCK_DEBUG > 0 || 1
358 	_get_spin_lock(m, curthread, opts, file, line);
359 #else
360 	critical_enter();
361 #endif
362 	LOCK_LOG_LOCK("LOCK", &m->mtx_object, opts, m->mtx_recurse, file,
363 	    line);
364 	WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line);
365 }
366 
367 void
368 _mtx_unlock_spin_flags(struct mtx *m, int opts, const char *file, int line)
369 {
370 
371 	MPASS(curthread != NULL);
372 	KASSERT(m->mtx_object.lo_class == &lock_class_mtx_spin,
373 	    ("mtx_unlock_spin() of sleep mutex %s @ %s:%d",
374 	    m->mtx_object.lo_name, file, line));
375 	WITNESS_UNLOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line);
376 	LOCK_LOG_LOCK("UNLOCK", &m->mtx_object, opts, m->mtx_recurse, file,
377 	    line);
378 	mtx_assert(m, MA_OWNED);
379 #if defined(SMP) || LOCK_DEBUG > 0 || 1
380 	_rel_spin_lock(m);
381 #else
382 	critical_exit();
383 #endif
384 }
385 
386 /*
387  * The important part of mtx_trylock{,_flags}()
388  * Tries to acquire lock `m.'  If this function is called on a mutex that
389  * is already owned, it will recursively acquire the lock.
390  */
391 int
392 _mtx_trylock(struct mtx *m, int opts, const char *file, int line)
393 {
394 	int rval;
395 
396 	MPASS(curthread != NULL);
397 
398 	if (mtx_owned(m) && (m->mtx_object.lo_flags & LO_RECURSABLE) != 0) {
399 		m->mtx_recurse++;
400 		atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
401 		rval = 1;
402 	} else
403 		rval = _obtain_lock(m, curthread);
404 
405 	LOCK_LOG_TRY("LOCK", &m->mtx_object, opts, rval, file, line);
406 	if (rval)
407 		WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE | LOP_TRYLOCK,
408 		    file, line);
409 
410 	return (rval);
411 }
412 
413 /*
414  * _mtx_lock_sleep: the tougher part of acquiring an MTX_DEF lock.
415  *
416  * We call this if the lock is either contested (i.e. we need to go to
417  * sleep waiting for it), or if we need to recurse on it.
418  */
419 void
420 _mtx_lock_sleep(struct mtx *m, int opts, const char *file, int line)
421 {
422 	struct turnstile *ts;
423 	struct thread *td = curthread;
424 #if defined(SMP) && !defined(NO_ADAPTIVE_MUTEXES)
425 	struct thread *owner;
426 #endif
427 	uintptr_t v;
428 #ifdef KTR
429 	int cont_logged = 0;
430 #endif
431 #ifdef MUTEX_PROFILING
432 	int contested;
433 #endif
434 
435 	if (mtx_owned(m)) {
436 		KASSERT((m->mtx_object.lo_flags & LO_RECURSABLE) != 0,
437 	    ("_mtx_lock_sleep: recursed on non-recursive mutex %s @ %s:%d\n",
438 		    m->mtx_object.lo_name, file, line));
439 		m->mtx_recurse++;
440 		atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
441 		if (LOCK_LOG_TEST(&m->mtx_object, opts))
442 			CTR1(KTR_LOCK, "_mtx_lock_sleep: %p recursing", m);
443 		return;
444 	}
445 
446 	if (LOCK_LOG_TEST(&m->mtx_object, opts))
447 		CTR4(KTR_LOCK,
448 		    "_mtx_lock_sleep: %s contested (lock=%p) at %s:%d",
449 		    m->mtx_object.lo_name, (void *)m->mtx_lock, file, line);
450 
451 #ifdef MUTEX_PROFILING
452 	contested = 0;
453 #endif
454 	while (!_obtain_lock(m, td)) {
455 #ifdef MUTEX_PROFILING
456 		contested = 1;
457 		atomic_add_int(&m->mtx_contest_holding, 1);
458 #endif
459 		ts = turnstile_lookup(&m->mtx_object);
460 		v = m->mtx_lock;
461 
462 		/*
463 		 * Check if the lock has been released while spinning for
464 		 * the turnstile chain lock.
465 		 */
466 		if (v == MTX_UNOWNED) {
467 			turnstile_release(&m->mtx_object);
468 #if defined(__i386__) || defined(__amd64__)
469 			ia32_pause();
470 #endif
471 			continue;
472 		}
473 
474 #ifdef MUTEX_WAKE_ALL
475 		MPASS(v != MTX_CONTESTED);
476 #else
477 		/*
478 		 * The mutex was marked contested on release. This means that
479 		 * there are other threads blocked on it.  Grab ownership of
480 		 * it and propagate its priority to the current thread if
481 		 * necessary.
482 		 */
483 		if (v == MTX_CONTESTED) {
484 			MPASS(ts != NULL);
485 			m->mtx_lock = (uintptr_t)td | MTX_CONTESTED;
486 			turnstile_claim(ts);
487 			break;
488 		}
489 #endif
490 
491 		/*
492 		 * If the mutex isn't already contested and a failure occurs
493 		 * setting the contested bit, the mutex was either released
494 		 * or the state of the MTX_RECURSED bit changed.
495 		 */
496 		if ((v & MTX_CONTESTED) == 0 &&
497 		    !atomic_cmpset_ptr(&m->mtx_lock, (void *)v,
498 			(void *)(v | MTX_CONTESTED))) {
499 			turnstile_release(&m->mtx_object);
500 #if defined(__i386__) || defined(__amd64__)
501 			ia32_pause();
502 #endif
503 			continue;
504 		}
505 
506 #if defined(SMP) && !defined(NO_ADAPTIVE_MUTEXES)
507 		/*
508 		 * If the current owner of the lock is executing on another
509 		 * CPU, spin instead of blocking.
510 		 */
511 		owner = (struct thread *)(v & MTX_FLAGMASK);
512 		if (m != &Giant && TD_IS_RUNNING(owner)) {
513 			turnstile_release(&m->mtx_object);
514 			while (mtx_owner(m) == owner && TD_IS_RUNNING(owner)) {
515 #if defined(__i386__) || defined(__amd64__)
516 				ia32_pause();
517 #endif
518 			}
519 			continue;
520 		}
521 #endif	/* SMP && !NO_ADAPTIVE_MUTEXES */
522 
523 		/*
524 		 * We definitely must sleep for this lock.
525 		 */
526 		mtx_assert(m, MA_NOTOWNED);
527 
528 #ifdef KTR
529 		if (!cont_logged) {
530 			CTR6(KTR_CONTENTION,
531 			    "contention: %p at %s:%d wants %s, taken by %s:%d",
532 			    td, file, line, m->mtx_object.lo_name,
533 			    WITNESS_FILE(&m->mtx_object),
534 			    WITNESS_LINE(&m->mtx_object));
535 			cont_logged = 1;
536 		}
537 #endif
538 
539 		/*
540 		 * Block on the turnstile.
541 		 */
542 		turnstile_wait(ts, &m->mtx_object, mtx_owner(m));
543 	}
544 
545 #ifdef KTR
546 	if (cont_logged) {
547 		CTR4(KTR_CONTENTION,
548 		    "contention end: %s acquired by %p at %s:%d",
549 		    m->mtx_object.lo_name, td, file, line);
550 	}
551 #endif
552 #ifdef MUTEX_PROFILING
553 	if (contested)
554 		m->mtx_contest_locking++;
555 	m->mtx_contest_holding = 0;
556 #endif
557 	return;
558 }
559 
560 /*
561  * _mtx_lock_spin: the tougher part of acquiring an MTX_SPIN lock.
562  *
563  * This is only called if we need to actually spin for the lock. Recursion
564  * is handled inline.
565  */
566 void
567 _mtx_lock_spin(struct mtx *m, int opts, const char *file, int line)
568 {
569 	int i = 0;
570 
571 	if (LOCK_LOG_TEST(&m->mtx_object, opts))
572 		CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m);
573 
574 	for (;;) {
575 		if (_obtain_lock(m, curthread))
576 			break;
577 
578 		/* Give interrupts a chance while we spin. */
579 		critical_exit();
580 		while (m->mtx_lock != MTX_UNOWNED) {
581 			if (i++ < 10000000) {
582 #if defined(__i386__) || defined(__amd64__)
583 				ia32_pause();
584 #endif
585 				continue;
586 			}
587 			if (i < 60000000)
588 				DELAY(1);
589 			else if (!kdb_active) {
590 				printf("spin lock %s held by %p for > 5 seconds\n",
591 				    m->mtx_object.lo_name, (void *)m->mtx_lock);
592 #ifdef WITNESS
593 				witness_display_spinlock(&m->mtx_object,
594 				    mtx_owner(m));
595 #endif
596 				panic("spin lock held too long");
597 			}
598 #if defined(__i386__) || defined(__amd64__)
599 			ia32_pause();
600 #endif
601 		}
602 		critical_enter();
603 	}
604 
605 	if (LOCK_LOG_TEST(&m->mtx_object, opts))
606 		CTR1(KTR_LOCK, "_mtx_lock_spin: %p spin done", m);
607 
608 	return;
609 }
610 
611 /*
612  * _mtx_unlock_sleep: the tougher part of releasing an MTX_DEF lock.
613  *
614  * We are only called here if the lock is recursed or contested (i.e. we
615  * need to wake up a blocked thread).
616  */
617 void
618 _mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line)
619 {
620 	struct turnstile *ts;
621 #ifndef PREEMPTION
622 	struct thread *td, *td1;
623 #endif
624 
625 	if (mtx_recursed(m)) {
626 		if (--(m->mtx_recurse) == 0)
627 			atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED);
628 		if (LOCK_LOG_TEST(&m->mtx_object, opts))
629 			CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p unrecurse", m);
630 		return;
631 	}
632 
633 	ts = turnstile_lookup(&m->mtx_object);
634 	if (LOCK_LOG_TEST(&m->mtx_object, opts))
635 		CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p contested", m);
636 
637 #if defined(SMP) && !defined(NO_ADAPTIVE_MUTEXES)
638 	if (ts == NULL) {
639 		_release_lock_quick(m);
640 		if (LOCK_LOG_TEST(&m->mtx_object, opts))
641 			CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p no sleepers", m);
642 		turnstile_release(&m->mtx_object);
643 		return;
644 	}
645 #else
646 	MPASS(ts != NULL);
647 #endif
648 #ifndef PREEMPTION
649 	/* XXX */
650 	td1 = turnstile_head(ts);
651 #endif
652 #ifdef MUTEX_WAKE_ALL
653 	turnstile_broadcast(ts);
654 	_release_lock_quick(m);
655 #else
656 	if (turnstile_signal(ts)) {
657 		_release_lock_quick(m);
658 		if (LOCK_LOG_TEST(&m->mtx_object, opts))
659 			CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p not held", m);
660 	} else {
661 		m->mtx_lock = MTX_CONTESTED;
662 		if (LOCK_LOG_TEST(&m->mtx_object, opts))
663 			CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p still contested",
664 			    m);
665 	}
666 #endif
667 	turnstile_unpend(ts);
668 
669 #ifndef PREEMPTION
670 	/*
671 	 * XXX: This is just a hack until preemption is done.  However,
672 	 * once preemption is done we need to either wrap the
673 	 * turnstile_signal() and release of the actual lock in an
674 	 * extra critical section or change the preemption code to
675 	 * always just set a flag and never do instant-preempts.
676 	 */
677 	td = curthread;
678 	if (td->td_critnest > 0 || td1->td_priority >= td->td_priority)
679 		return;
680 	mtx_lock_spin(&sched_lock);
681 	if (!TD_IS_RUNNING(td1)) {
682 #ifdef notyet
683 		if (td->td_ithd != NULL) {
684 			struct ithd *it = td->td_ithd;
685 
686 			if (it->it_interrupted) {
687 				if (LOCK_LOG_TEST(&m->mtx_object, opts))
688 					CTR2(KTR_LOCK,
689 				    "_mtx_unlock_sleep: %p interrupted %p",
690 					    it, it->it_interrupted);
691 				intr_thd_fixup(it);
692 			}
693 		}
694 #endif
695 		if (LOCK_LOG_TEST(&m->mtx_object, opts))
696 			CTR2(KTR_LOCK,
697 			    "_mtx_unlock_sleep: %p switching out lock=%p", m,
698 			    (void *)m->mtx_lock);
699 
700 		mi_switch(SW_INVOL, NULL);
701 		if (LOCK_LOG_TEST(&m->mtx_object, opts))
702 			CTR2(KTR_LOCK, "_mtx_unlock_sleep: %p resuming lock=%p",
703 			    m, (void *)m->mtx_lock);
704 	}
705 	mtx_unlock_spin(&sched_lock);
706 #endif
707 
708 	return;
709 }
710 
711 /*
712  * All the unlocking of MTX_SPIN locks is done inline.
713  * See the _rel_spin_lock() macro for the details.
714  */
715 
716 /*
717  * The backing function for the INVARIANTS-enabled mtx_assert()
718  */
719 #ifdef INVARIANT_SUPPORT
720 void
721 _mtx_assert(struct mtx *m, int what, const char *file, int line)
722 {
723 
724 	if (panicstr != NULL)
725 		return;
726 	switch (what) {
727 	case MA_OWNED:
728 	case MA_OWNED | MA_RECURSED:
729 	case MA_OWNED | MA_NOTRECURSED:
730 		if (!mtx_owned(m))
731 			panic("mutex %s not owned at %s:%d",
732 			    m->mtx_object.lo_name, file, line);
733 		if (mtx_recursed(m)) {
734 			if ((what & MA_NOTRECURSED) != 0)
735 				panic("mutex %s recursed at %s:%d",
736 				    m->mtx_object.lo_name, file, line);
737 		} else if ((what & MA_RECURSED) != 0) {
738 			panic("mutex %s unrecursed at %s:%d",
739 			    m->mtx_object.lo_name, file, line);
740 		}
741 		break;
742 	case MA_NOTOWNED:
743 		if (mtx_owned(m))
744 			panic("mutex %s owned at %s:%d",
745 			    m->mtx_object.lo_name, file, line);
746 		break;
747 	default:
748 		panic("unknown mtx_assert at %s:%d", file, line);
749 	}
750 }
751 #endif
752 
753 /*
754  * The MUTEX_DEBUG-enabled mtx_validate()
755  *
756  * Most of these checks have been moved off into the LO_INITIALIZED flag
757  * maintained by the witness code.
758  */
759 #ifdef MUTEX_DEBUG
760 
761 void	mtx_validate(struct mtx *);
762 
763 void
764 mtx_validate(struct mtx *m)
765 {
766 
767 /*
768  * XXX: When kernacc() does not require Giant we can reenable this check
769  */
770 #ifdef notyet
771 /*
772  * XXX - When kernacc() is fixed on the alpha to handle K0_SEG memory properly
773  * we can re-enable the kernacc() checks.
774  */
775 #ifndef __alpha__
776 	/*
777 	 * Can't call kernacc() from early init386(), especially when
778 	 * initializing Giant mutex, because some stuff in kernacc()
779 	 * requires Giant itself.
780 	 */
781 	if (!cold)
782 		if (!kernacc((caddr_t)m, sizeof(m),
783 		    VM_PROT_READ | VM_PROT_WRITE))
784 			panic("Can't read and write to mutex %p", m);
785 #endif
786 #endif
787 }
788 #endif
789 
790 /*
791  * General init routine used by the MTX_SYSINIT() macro.
792  */
793 void
794 mtx_sysinit(void *arg)
795 {
796 	struct mtx_args *margs = arg;
797 
798 	mtx_init(margs->ma_mtx, margs->ma_desc, NULL, margs->ma_opts);
799 }
800 
801 /*
802  * Mutex initialization routine; initialize lock `m' of type contained in
803  * `opts' with options contained in `opts' and name `name.'  The optional
804  * lock type `type' is used as a general lock category name for use with
805  * witness.
806  */
807 void
808 mtx_init(struct mtx *m, const char *name, const char *type, int opts)
809 {
810 	struct lock_object *lock;
811 
812 	MPASS((opts & ~(MTX_SPIN | MTX_QUIET | MTX_RECURSE |
813 	    MTX_NOWITNESS | MTX_DUPOK)) == 0);
814 
815 #ifdef MUTEX_DEBUG
816 	/* Diagnostic and error correction */
817 	mtx_validate(m);
818 #endif
819 
820 	lock = &m->mtx_object;
821 	KASSERT((lock->lo_flags & LO_INITIALIZED) == 0,
822 	    ("mutex \"%s\" %p already initialized", name, m));
823 	bzero(m, sizeof(*m));
824 	if (opts & MTX_SPIN)
825 		lock->lo_class = &lock_class_mtx_spin;
826 	else
827 		lock->lo_class = &lock_class_mtx_sleep;
828 	lock->lo_name = name;
829 	lock->lo_type = type != NULL ? type : name;
830 	if (opts & MTX_QUIET)
831 		lock->lo_flags = LO_QUIET;
832 	if (opts & MTX_RECURSE)
833 		lock->lo_flags |= LO_RECURSABLE;
834 	if ((opts & MTX_NOWITNESS) == 0)
835 		lock->lo_flags |= LO_WITNESS;
836 	if (opts & MTX_DUPOK)
837 		lock->lo_flags |= LO_DUPOK;
838 
839 	m->mtx_lock = MTX_UNOWNED;
840 
841 	LOCK_LOG_INIT(lock, opts);
842 
843 	WITNESS_INIT(lock);
844 }
845 
846 /*
847  * Remove lock `m' from all_mtx queue.  We don't allow MTX_QUIET to be
848  * passed in as a flag here because if the corresponding mtx_init() was
849  * called with MTX_QUIET set, then it will already be set in the mutex's
850  * flags.
851  */
852 void
853 mtx_destroy(struct mtx *m)
854 {
855 
856 	LOCK_LOG_DESTROY(&m->mtx_object, 0);
857 
858 	if (!mtx_owned(m))
859 		MPASS(mtx_unowned(m));
860 	else {
861 		MPASS((m->mtx_lock & (MTX_RECURSED|MTX_CONTESTED)) == 0);
862 
863 		/* Tell witness this isn't locked to make it happy. */
864 		WITNESS_UNLOCK(&m->mtx_object, LOP_EXCLUSIVE, __FILE__,
865 		    __LINE__);
866 	}
867 
868 	WITNESS_DESTROY(&m->mtx_object);
869 }
870 
871 /*
872  * Intialize the mutex code and system mutexes.  This is called from the MD
873  * startup code prior to mi_startup().  The per-CPU data space needs to be
874  * setup before this is called.
875  */
876 void
877 mutex_init(void)
878 {
879 
880 	/* Setup thread0 so that mutexes work. */
881 	LIST_INIT(&thread0.td_contested);
882 
883 	/* Setup turnstiles so that sleep mutexes work. */
884 	init_turnstiles();
885 
886 	/*
887 	 * Initialize mutexes.
888 	 */
889 	mtx_init(&Giant, "Giant", NULL, MTX_DEF | MTX_RECURSE);
890 	mtx_init(&sched_lock, "sched lock", NULL, MTX_SPIN | MTX_RECURSE);
891 	mtx_init(&proc0.p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK);
892 	mtx_lock(&Giant);
893 }
894