xref: /freebsd/sys/kern/kern_mutex.c (revision 09e8dea79366f1e5b3a73e8a271b26e4b6bf2e6a)
1 /*-
2  * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  * 3. Berkeley Software Design Inc's name may not be used to endorse or
13  *    promote products derived from this software without specific prior
14  *    written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  *	from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $
29  *	and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $
30  * $FreeBSD$
31  */
32 
33 /*
34  * Machine independent bits of mutex implementation.
35  */
36 
37 #include "opt_adaptive_mutexes.h"
38 #include "opt_ddb.h"
39 
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/bus.h>
43 #include <sys/kernel.h>
44 #include <sys/ktr.h>
45 #include <sys/lock.h>
46 #include <sys/malloc.h>
47 #include <sys/mutex.h>
48 #include <sys/proc.h>
49 #include <sys/resourcevar.h>
50 #include <sys/sbuf.h>
51 #include <sys/stdint.h>
52 #include <sys/sysctl.h>
53 #include <sys/vmmeter.h>
54 
55 #include <machine/atomic.h>
56 #include <machine/bus.h>
57 #include <machine/clock.h>
58 #include <machine/cpu.h>
59 
60 #include <ddb/ddb.h>
61 
62 #include <vm/vm.h>
63 #include <vm/vm_extern.h>
64 
65 /*
66  * Internal utility macros.
67  */
68 #define mtx_unowned(m)	((m)->mtx_lock == MTX_UNOWNED)
69 
70 #define mtx_owner(m)	(mtx_unowned((m)) ? NULL \
71 	: (struct thread *)((m)->mtx_lock & MTX_FLAGMASK))
72 
73 /* XXXKSE This test will change. */
74 #define	thread_running(td)						\
75 	((td)->td_kse != NULL && (td)->td_kse->ke_oncpu != NOCPU)
76 
77 /*
78  * Lock classes for sleep and spin mutexes.
79  */
80 struct lock_class lock_class_mtx_sleep = {
81 	"sleep mutex",
82 	LC_SLEEPLOCK | LC_RECURSABLE
83 };
84 struct lock_class lock_class_mtx_spin = {
85 	"spin mutex",
86 	LC_SPINLOCK | LC_RECURSABLE
87 };
88 
89 /*
90  * System-wide mutexes
91  */
92 struct mtx sched_lock;
93 struct mtx Giant;
94 
95 /*
96  * Prototypes for non-exported routines.
97  */
98 static void	propagate_priority(struct thread *);
99 
100 static void
101 propagate_priority(struct thread *td)
102 {
103 	int pri = td->td_priority;
104 	struct mtx *m = td->td_blocked;
105 
106 	mtx_assert(&sched_lock, MA_OWNED);
107 	for (;;) {
108 		struct thread *td1;
109 
110 		td = mtx_owner(m);
111 
112 		if (td == NULL) {
113 			/*
114 			 * This really isn't quite right. Really
115 			 * ought to bump priority of thread that
116 			 * next acquires the mutex.
117 			 */
118 			MPASS(m->mtx_lock == MTX_CONTESTED);
119 			return;
120 		}
121 
122 		MPASS(td->td_proc->p_magic == P_MAGIC);
123 		KASSERT(td->td_proc->p_stat != SSLEEP, ("sleeping thread owns a mutex"));
124 		if (td->td_priority <= pri) /* lower is higher priority */
125 			return;
126 
127 		/*
128 		 * Bump this thread's priority.
129 		 */
130 		td->td_priority = pri;
131 
132 		/*
133 		 * If lock holder is actually running, just bump priority.
134 		 */
135 		if (thread_running(td)) {
136 			MPASS(td->td_proc->p_stat == SRUN
137 			|| td->td_proc->p_stat == SZOMB
138 			|| td->td_proc->p_stat == SSTOP);
139 			return;
140 		}
141 
142 #ifndef SMP
143 		/*
144 		 * For UP, we check to see if td is curthread (this shouldn't
145 		 * ever happen however as it would mean we are in a deadlock.)
146 		 */
147 		KASSERT(td != curthread, ("Deadlock detected"));
148 #endif
149 
150 		/*
151 		 * If on run queue move to new run queue, and quit.
152 		 * XXXKSE this gets a lot more complicated under threads
153 		 * but try anyhow.
154 		 */
155 		if (td->td_proc->p_stat == SRUN) {
156 			MPASS(td->td_blocked == NULL);
157 			remrunqueue(td);
158 			setrunqueue(td);
159 			return;
160 		}
161 
162 		/*
163 		 * If we aren't blocked on a mutex, we should be.
164 		 */
165 		KASSERT(td->td_proc->p_stat == SMTX, (
166 		    "process %d(%s):%d holds %s but isn't blocked on a mutex\n",
167 		    td->td_proc->p_pid, td->td_proc->p_comm, td->td_proc->p_stat,
168 		    m->mtx_object.lo_name));
169 
170 		/*
171 		 * Pick up the mutex that td is blocked on.
172 		 */
173 		m = td->td_blocked;
174 		MPASS(m != NULL);
175 
176 		/*
177 		 * Check if the thread needs to be moved up on
178 		 * the blocked chain
179 		 */
180 		if (td == TAILQ_FIRST(&m->mtx_blocked)) {
181 			continue;
182 		}
183 
184 		td1 = TAILQ_PREV(td, threadqueue, td_blkq);
185 		if (td1->td_priority <= pri) {
186 			continue;
187 		}
188 
189 		/*
190 		 * Remove thread from blocked chain and determine where
191 		 * it should be moved up to.  Since we know that td1 has
192 		 * a lower priority than td, we know that at least one
193 		 * thread in the chain has a lower priority and that
194 		 * td1 will thus not be NULL after the loop.
195 		 */
196 		TAILQ_REMOVE(&m->mtx_blocked, td, td_blkq);
197 		TAILQ_FOREACH(td1, &m->mtx_blocked, td_blkq) {
198 			MPASS(td1->td_proc->p_magic == P_MAGIC);
199 			if (td1->td_priority > pri)
200 				break;
201 		}
202 
203 		MPASS(td1 != NULL);
204 		TAILQ_INSERT_BEFORE(td1, td, td_blkq);
205 		CTR4(KTR_LOCK,
206 		    "propagate_priority: p %p moved before %p on [%p] %s",
207 		    td, td1, m, m->mtx_object.lo_name);
208 	}
209 }
210 
211 #ifdef MUTEX_PROFILING
212 SYSCTL_NODE(_debug, OID_AUTO, mutex, CTLFLAG_RD, NULL, "mutex debugging");
213 SYSCTL_NODE(_debug_mutex, OID_AUTO, prof, CTLFLAG_RD, NULL, "mutex profiling");
214 static int mutex_prof_enable = 0;
215 SYSCTL_INT(_debug_mutex_prof, OID_AUTO, enable, CTLFLAG_RW,
216     &mutex_prof_enable, 0, "Enable tracing of mutex holdtime");
217 
218 struct mutex_prof {
219 	const char *name;
220 	const char *file;
221 	int line;
222 #define MPROF_MAX 0
223 #define MPROF_TOT 1
224 #define MPROF_CNT 2
225 #define MPROF_AVG 3
226 	uintmax_t counter[4];
227 	struct mutex_prof *next;
228 };
229 
230 /*
231  * mprof_buf is a static pool of profiling records to avoid possible
232  * reentrance of the memory allocation functions.
233  *
234  * Note: NUM_MPROF_BUFFERS must be smaller than MPROF_HASH_SIZE.
235  */
236 #define NUM_MPROF_BUFFERS 1000
237 static struct mutex_prof mprof_buf[NUM_MPROF_BUFFERS];
238 static int first_free_mprof_buf;
239 #define MPROF_HASH_SIZE 1009
240 static struct mutex_prof *mprof_hash[MPROF_HASH_SIZE];
241 
242 static int mutex_prof_acquisitions;
243 SYSCTL_INT(_debug_mutex_prof, OID_AUTO, acquisitions, CTLFLAG_RD,
244     &mutex_prof_acquisitions, 0, "Number of mutex acquistions recorded");
245 static int mutex_prof_records;
246 SYSCTL_INT(_debug_mutex_prof, OID_AUTO, records, CTLFLAG_RD,
247     &mutex_prof_records, 0, "Number of profiling records");
248 static int mutex_prof_maxrecords = NUM_MPROF_BUFFERS;
249 SYSCTL_INT(_debug_mutex_prof, OID_AUTO, maxrecords, CTLFLAG_RD,
250     &mutex_prof_maxrecords, 0, "Maximum number of profiling records");
251 static int mutex_prof_rejected;
252 SYSCTL_INT(_debug_mutex_prof, OID_AUTO, rejected, CTLFLAG_RD,
253     &mutex_prof_rejected, 0, "Number of rejected profiling records");
254 static int mutex_prof_hashsize = MPROF_HASH_SIZE;
255 SYSCTL_INT(_debug_mutex_prof, OID_AUTO, hashsize, CTLFLAG_RD,
256     &mutex_prof_hashsize, 0, "Hash size");
257 static int mutex_prof_collisions = 0;
258 SYSCTL_INT(_debug_mutex_prof, OID_AUTO, collisions, CTLFLAG_RD,
259     &mutex_prof_collisions, 0, "Number of hash collisions");
260 
261 /*
262  * mprof_mtx protects the profiling buffers and the hash.
263  */
264 static struct mtx mprof_mtx;
265 MTX_SYSINIT(mprof, &mprof_mtx, "mutex profiling lock", MTX_SPIN | MTX_QUIET);
266 
267 static u_int64_t
268 nanoseconds(void)
269 {
270 	struct timespec tv;
271 
272 	nanotime(&tv);
273 	return (tv.tv_sec * (u_int64_t)1000000000 + tv.tv_nsec);
274 }
275 
276 static int
277 dump_mutex_prof_stats(SYSCTL_HANDLER_ARGS)
278 {
279 	struct sbuf *sb;
280 	int error, i;
281 
282 	if (first_free_mprof_buf == 0)
283 		return SYSCTL_OUT(req, "No locking recorded",
284 		    sizeof("No locking recorded"));
285 
286 	sb = sbuf_new(NULL, NULL, 1024, SBUF_AUTOEXTEND);
287 	sbuf_printf(sb, "%12s %12s %12s %12s %s\n",
288 	    "max", "total", "count", "average", "name");
289 	mtx_lock_spin(&mprof_mtx);
290 	for (i = 0; i < first_free_mprof_buf; ++i)
291 		sbuf_printf(sb, "%12ju %12ju %12ju %12ju %s:%d (%s)\n",
292 		    mprof_buf[i].counter[MPROF_MAX] / 1000,
293 		    mprof_buf[i].counter[MPROF_TOT] / 1000,
294 		    mprof_buf[i].counter[MPROF_CNT],
295 		    mprof_buf[i].counter[MPROF_AVG] / 1000,
296 		    mprof_buf[i].file, mprof_buf[i].line, mprof_buf[i].name);
297 	mtx_unlock_spin(&mprof_mtx);
298 	sbuf_finish(sb);
299 	error = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb) + 1);
300 	sbuf_delete(sb);
301 	return (error);
302 }
303 SYSCTL_PROC(_debug_mutex_prof, OID_AUTO, stats, CTLTYPE_STRING|CTLFLAG_RD,
304     NULL, 0, dump_mutex_prof_stats, "A", "Mutex profiling statistics");
305 #endif
306 
307 /*
308  * Function versions of the inlined __mtx_* macros.  These are used by
309  * modules and can also be called from assembly language if needed.
310  */
311 void
312 _mtx_lock_flags(struct mtx *m, int opts, const char *file, int line)
313 {
314 
315 	MPASS(curthread != NULL);
316 	_get_sleep_lock(m, curthread, opts, file, line);
317 	LOCK_LOG_LOCK("LOCK", &m->mtx_object, opts, m->mtx_recurse, file,
318 	    line);
319 	WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line);
320 #ifdef MUTEX_PROFILING
321 	/* don't reset the timer when/if recursing */
322 	if (m->acqtime == 0) {
323 		m->file = file;
324 		m->line = line;
325 		m->acqtime = mutex_prof_enable ? nanoseconds() : 0;
326 		++mutex_prof_acquisitions;
327 	}
328 #endif
329 }
330 
331 void
332 _mtx_unlock_flags(struct mtx *m, int opts, const char *file, int line)
333 {
334 
335 	MPASS(curthread != NULL);
336 	mtx_assert(m, MA_OWNED);
337 #ifdef MUTEX_PROFILING
338 	if (m->acqtime != 0) {
339 		static const char *unknown = "(unknown)";
340 		struct mutex_prof *mpp;
341 		u_int64_t acqtime, now;
342 		const char *p, *q;
343 		volatile u_int hash;
344 
345 		now = nanoseconds();
346 		acqtime = m->acqtime;
347 		m->acqtime = 0;
348 		if (now <= acqtime)
349 			goto out;
350 		for (p = file; strncmp(p, "../", 3) == 0; p += 3)
351 			/* nothing */ ;
352 		if (p == NULL || *p == '\0')
353 			p = unknown;
354 		for (hash = line, q = p; *q != '\0'; ++q)
355 			hash = (hash * 2 + *q) % MPROF_HASH_SIZE;
356 		mtx_lock_spin(&mprof_mtx);
357 		for (mpp = mprof_hash[hash]; mpp != NULL; mpp = mpp->next)
358 			if (mpp->line == line && strcmp(mpp->file, p) == 0)
359 				break;
360 		if (mpp == NULL) {
361 			/* Just exit if we cannot get a trace buffer */
362 			if (first_free_mprof_buf >= NUM_MPROF_BUFFERS) {
363 				++mutex_prof_rejected;
364 				goto unlock;
365 			}
366 			mpp = &mprof_buf[first_free_mprof_buf++];
367 			mpp->name = mtx_name(m);
368 			mpp->file = p;
369 			mpp->line = line;
370 			mpp->next = mprof_hash[hash];
371 			if (mprof_hash[hash] != NULL)
372 				++mutex_prof_collisions;
373 			mprof_hash[hash] = mpp;
374 			++mutex_prof_records;
375 		}
376 		/*
377 		 * Record if the mutex has been held longer now than ever
378 		 * before
379 		 */
380 		if ((now - acqtime) > mpp->counter[MPROF_MAX])
381 			mpp->counter[MPROF_MAX] = now - acqtime;
382 		mpp->counter[MPROF_TOT] += now - acqtime;
383 		mpp->counter[MPROF_CNT] += 1;
384 		mpp->counter[MPROF_AVG] =
385 		    mpp->counter[MPROF_TOT] / mpp->counter[MPROF_CNT];
386 unlock:
387 		mtx_unlock_spin(&mprof_mtx);
388 	}
389 out:
390 #endif
391  	WITNESS_UNLOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line);
392 	LOCK_LOG_LOCK("UNLOCK", &m->mtx_object, opts, m->mtx_recurse, file,
393 	    line);
394 	_rel_sleep_lock(m, curthread, opts, file, line);
395 }
396 
397 void
398 _mtx_lock_spin_flags(struct mtx *m, int opts, const char *file, int line)
399 {
400 
401 	MPASS(curthread != NULL);
402 #if defined(SMP) || LOCK_DEBUG > 0
403 	_get_spin_lock(m, curthread, opts, file, line);
404 #else
405 	critical_enter();
406 #endif
407 	LOCK_LOG_LOCK("LOCK", &m->mtx_object, opts, m->mtx_recurse, file,
408 	    line);
409 	WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line);
410 }
411 
412 void
413 _mtx_unlock_spin_flags(struct mtx *m, int opts, const char *file, int line)
414 {
415 
416 	MPASS(curthread != NULL);
417 	mtx_assert(m, MA_OWNED);
418  	WITNESS_UNLOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line);
419 	LOCK_LOG_LOCK("UNLOCK", &m->mtx_object, opts, m->mtx_recurse, file,
420 	    line);
421 #if defined(SMP) || LOCK_DEBUG > 0
422 	_rel_spin_lock(m);
423 #else
424 	critical_exit();
425 #endif
426 }
427 
428 /*
429  * The important part of mtx_trylock{,_flags}()
430  * Tries to acquire lock `m.' We do NOT handle recursion here; we assume that
431  * if we're called, it's because we know we don't already own this lock.
432  */
433 int
434 _mtx_trylock(struct mtx *m, int opts, const char *file, int line)
435 {
436 	int rval;
437 
438 	MPASS(curthread != NULL);
439 
440 	rval = _obtain_lock(m, curthread);
441 
442 	LOCK_LOG_TRY("LOCK", &m->mtx_object, opts, rval, file, line);
443 	if (rval) {
444 		/*
445 		 * We do not handle recursion in _mtx_trylock; see the
446 		 * note at the top of the routine.
447 		 */
448 		KASSERT(!mtx_recursed(m),
449 		    ("mtx_trylock() called on a recursed mutex"));
450 		WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE | LOP_TRYLOCK,
451 		    file, line);
452 	}
453 
454 	return (rval);
455 }
456 
457 /*
458  * _mtx_lock_sleep: the tougher part of acquiring an MTX_DEF lock.
459  *
460  * We call this if the lock is either contested (i.e. we need to go to
461  * sleep waiting for it), or if we need to recurse on it.
462  */
463 void
464 _mtx_lock_sleep(struct mtx *m, int opts, const char *file, int line)
465 {
466 	struct thread *td = curthread;
467 #if defined(SMP) && defined(ADAPTIVE_MUTEXES)
468 	struct thread *owner;
469 #endif
470 
471 	if ((m->mtx_lock & MTX_FLAGMASK) == (uintptr_t)td) {
472 		m->mtx_recurse++;
473 		atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
474 		if (LOCK_LOG_TEST(&m->mtx_object, opts))
475 			CTR1(KTR_LOCK, "_mtx_lock_sleep: %p recursing", m);
476 		return;
477 	}
478 
479 	if (LOCK_LOG_TEST(&m->mtx_object, opts))
480 		CTR4(KTR_LOCK,
481 		    "_mtx_lock_sleep: %s contested (lock=%p) at %s:%d",
482 		    m->mtx_object.lo_name, (void *)m->mtx_lock, file, line);
483 
484 	while (!_obtain_lock(m, td)) {
485 		uintptr_t v;
486 		struct thread *td1;
487 
488 		mtx_lock_spin(&sched_lock);
489 		/*
490 		 * Check if the lock has been released while spinning for
491 		 * the sched_lock.
492 		 */
493 		if ((v = m->mtx_lock) == MTX_UNOWNED) {
494 			mtx_unlock_spin(&sched_lock);
495 #ifdef __i386__
496 			ia32_pause();
497 #endif
498 			continue;
499 		}
500 
501 		/*
502 		 * The mutex was marked contested on release. This means that
503 		 * there are threads blocked on it.
504 		 */
505 		if (v == MTX_CONTESTED) {
506 			td1 = TAILQ_FIRST(&m->mtx_blocked);
507 			MPASS(td1 != NULL);
508 			m->mtx_lock = (uintptr_t)td | MTX_CONTESTED;
509 
510 			if (td1->td_priority < td->td_priority)
511 				td->td_priority = td1->td_priority;
512 			mtx_unlock_spin(&sched_lock);
513 			return;
514 		}
515 
516 		/*
517 		 * If the mutex isn't already contested and a failure occurs
518 		 * setting the contested bit, the mutex was either released
519 		 * or the state of the MTX_RECURSED bit changed.
520 		 */
521 		if ((v & MTX_CONTESTED) == 0 &&
522 		    !atomic_cmpset_ptr(&m->mtx_lock, (void *)v,
523 			(void *)(v | MTX_CONTESTED))) {
524 			mtx_unlock_spin(&sched_lock);
525 #ifdef __i386__
526 			ia32_pause();
527 #endif
528 			continue;
529 		}
530 
531 #if defined(SMP) && defined(ADAPTIVE_MUTEXES)
532 		/*
533 		 * If the current owner of the lock is executing on another
534 		 * CPU, spin instead of blocking.
535 		 */
536 		owner = (struct thread *)(v & MTX_FLAGMASK);
537 		if (m != &Giant && thread_running(owner)) {
538 			mtx_unlock_spin(&sched_lock);
539 			while (mtx_owner(m) == owner && thread_running(owner)) {
540 #ifdef __i386__
541 				ia32_pause();
542 #endif
543 			}
544 			continue;
545 		}
546 #endif	/* SMP && ADAPTIVE_MUTEXES */
547 
548 		/*
549 		 * We definitely must sleep for this lock.
550 		 */
551 		mtx_assert(m, MA_NOTOWNED);
552 
553 #ifdef notyet
554 		/*
555 		 * If we're borrowing an interrupted thread's VM context, we
556 		 * must clean up before going to sleep.
557 		 */
558 		if (td->td_ithd != NULL) {
559 			struct ithd *it = td->td_ithd;
560 
561 			if (it->it_interrupted) {
562 				if (LOCK_LOG_TEST(&m->mtx_object, opts))
563 					CTR2(KTR_LOCK,
564 				    "_mtx_lock_sleep: %p interrupted %p",
565 					    it, it->it_interrupted);
566 				intr_thd_fixup(it);
567 			}
568 		}
569 #endif
570 
571 		/*
572 		 * Put us on the list of threads blocked on this mutex.
573 		 */
574 		if (TAILQ_EMPTY(&m->mtx_blocked)) {
575 			td1 = mtx_owner(m);
576 			LIST_INSERT_HEAD(&td1->td_contested, m, mtx_contested);
577 			TAILQ_INSERT_TAIL(&m->mtx_blocked, td, td_blkq);
578 		} else {
579 			TAILQ_FOREACH(td1, &m->mtx_blocked, td_blkq)
580 				if (td1->td_priority > td->td_priority)
581 					break;
582 			if (td1)
583 				TAILQ_INSERT_BEFORE(td1, td, td_blkq);
584 			else
585 				TAILQ_INSERT_TAIL(&m->mtx_blocked, td, td_blkq);
586 		}
587 
588 		/*
589 		 * Save who we're blocked on.
590 		 */
591 		td->td_blocked = m;
592 		td->td_mtxname = m->mtx_object.lo_name;
593 		td->td_proc->p_stat = SMTX;
594 		propagate_priority(td);
595 
596 		if (LOCK_LOG_TEST(&m->mtx_object, opts))
597 			CTR3(KTR_LOCK,
598 			    "_mtx_lock_sleep: p %p blocked on [%p] %s", td, m,
599 			    m->mtx_object.lo_name);
600 
601 		td->td_proc->p_stats->p_ru.ru_nvcsw++;
602 		mi_switch();
603 
604 		if (LOCK_LOG_TEST(&m->mtx_object, opts))
605 			CTR3(KTR_LOCK,
606 			  "_mtx_lock_sleep: p %p free from blocked on [%p] %s",
607 			  td, m, m->mtx_object.lo_name);
608 
609 		mtx_unlock_spin(&sched_lock);
610 	}
611 
612 	return;
613 }
614 
615 /*
616  * _mtx_lock_spin: the tougher part of acquiring an MTX_SPIN lock.
617  *
618  * This is only called if we need to actually spin for the lock. Recursion
619  * is handled inline.
620  */
621 void
622 _mtx_lock_spin(struct mtx *m, int opts, const char *file, int line)
623 {
624 	int i = 0;
625 
626 	if (LOCK_LOG_TEST(&m->mtx_object, opts))
627 		CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m);
628 
629 	for (;;) {
630 		if (_obtain_lock(m, curthread))
631 			break;
632 
633 		/* Give interrupts a chance while we spin. */
634 		critical_exit();
635 		while (m->mtx_lock != MTX_UNOWNED) {
636 			if (i++ < 10000000) {
637 #ifdef __i386__
638 				ia32_pause();
639 #endif
640 				continue;
641 			}
642 			if (i < 60000000)
643 				DELAY(1);
644 #ifdef DDB
645 			else if (!db_active)
646 #else
647 			else
648 #endif
649 				panic("spin lock %s held by %p for > 5 seconds",
650 				    m->mtx_object.lo_name, (void *)m->mtx_lock);
651 #ifdef __i386__
652 			ia32_pause();
653 #endif
654 		}
655 		critical_enter();
656 	}
657 
658 	if (LOCK_LOG_TEST(&m->mtx_object, opts))
659 		CTR1(KTR_LOCK, "_mtx_lock_spin: %p spin done", m);
660 
661 	return;
662 }
663 
664 /*
665  * _mtx_unlock_sleep: the tougher part of releasing an MTX_DEF lock.
666  *
667  * We are only called here if the lock is recursed or contested (i.e. we
668  * need to wake up a blocked thread).
669  */
670 void
671 _mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line)
672 {
673 	struct thread *td, *td1;
674 	struct mtx *m1;
675 	int pri;
676 
677 	td = curthread;
678 
679 	if (mtx_recursed(m)) {
680 		if (--(m->mtx_recurse) == 0)
681 			atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED);
682 		if (LOCK_LOG_TEST(&m->mtx_object, opts))
683 			CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p unrecurse", m);
684 		return;
685 	}
686 
687 	mtx_lock_spin(&sched_lock);
688 	if (LOCK_LOG_TEST(&m->mtx_object, opts))
689 		CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p contested", m);
690 
691 	td1 = TAILQ_FIRST(&m->mtx_blocked);
692 #if defined(SMP) && defined(ADAPTIVE_MUTEXES)
693 	if (td1 == NULL) {
694 		_release_lock_quick(m);
695 		if (LOCK_LOG_TEST(&m->mtx_object, opts))
696 			CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p no sleepers", m);
697 		mtx_unlock_spin(&sched_lock);
698 		return;
699 	}
700 #endif
701 	MPASS(td->td_proc->p_magic == P_MAGIC);
702 	MPASS(td1->td_proc->p_magic == P_MAGIC);
703 
704 	TAILQ_REMOVE(&m->mtx_blocked, td1, td_blkq);
705 
706 	if (TAILQ_EMPTY(&m->mtx_blocked)) {
707 		LIST_REMOVE(m, mtx_contested);
708 		_release_lock_quick(m);
709 		if (LOCK_LOG_TEST(&m->mtx_object, opts))
710 			CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p not held", m);
711 	} else
712 		atomic_store_rel_ptr(&m->mtx_lock, (void *)MTX_CONTESTED);
713 
714 	pri = PRI_MAX;
715 	LIST_FOREACH(m1, &td->td_contested, mtx_contested) {
716 		int cp = TAILQ_FIRST(&m1->mtx_blocked)->td_priority;
717 		if (cp < pri)
718 			pri = cp;
719 	}
720 
721 	if (pri > td->td_base_pri)
722 		pri = td->td_base_pri;
723 	td->td_priority = pri;
724 
725 	if (LOCK_LOG_TEST(&m->mtx_object, opts))
726 		CTR2(KTR_LOCK, "_mtx_unlock_sleep: %p contested setrunqueue %p",
727 		    m, td1);
728 
729 	td1->td_blocked = NULL;
730 	td1->td_proc->p_stat = SRUN;
731 	setrunqueue(td1);
732 
733 	if (td->td_critnest == 1 && td1->td_priority < pri) {
734 #ifdef notyet
735 		if (td->td_ithd != NULL) {
736 			struct ithd *it = td->td_ithd;
737 
738 			if (it->it_interrupted) {
739 				if (LOCK_LOG_TEST(&m->mtx_object, opts))
740 					CTR2(KTR_LOCK,
741 				    "_mtx_unlock_sleep: %p interrupted %p",
742 					    it, it->it_interrupted);
743 				intr_thd_fixup(it);
744 			}
745 		}
746 #endif
747 		setrunqueue(td);
748 		if (LOCK_LOG_TEST(&m->mtx_object, opts))
749 			CTR2(KTR_LOCK,
750 			    "_mtx_unlock_sleep: %p switching out lock=%p", m,
751 			    (void *)m->mtx_lock);
752 
753 		td->td_proc->p_stats->p_ru.ru_nivcsw++;
754 		mi_switch();
755 		if (LOCK_LOG_TEST(&m->mtx_object, opts))
756 			CTR2(KTR_LOCK, "_mtx_unlock_sleep: %p resuming lock=%p",
757 			    m, (void *)m->mtx_lock);
758 	}
759 
760 	mtx_unlock_spin(&sched_lock);
761 
762 	return;
763 }
764 
765 /*
766  * All the unlocking of MTX_SPIN locks is done inline.
767  * See the _rel_spin_lock() macro for the details.
768  */
769 
770 /*
771  * The backing function for the INVARIANTS-enabled mtx_assert()
772  */
773 #ifdef INVARIANT_SUPPORT
774 void
775 _mtx_assert(struct mtx *m, int what, const char *file, int line)
776 {
777 
778 	if (panicstr != NULL)
779 		return;
780 	switch (what) {
781 	case MA_OWNED:
782 	case MA_OWNED | MA_RECURSED:
783 	case MA_OWNED | MA_NOTRECURSED:
784 		if (!mtx_owned(m))
785 			panic("mutex %s not owned at %s:%d",
786 			    m->mtx_object.lo_name, file, line);
787 		if (mtx_recursed(m)) {
788 			if ((what & MA_NOTRECURSED) != 0)
789 				panic("mutex %s recursed at %s:%d",
790 				    m->mtx_object.lo_name, file, line);
791 		} else if ((what & MA_RECURSED) != 0) {
792 			panic("mutex %s unrecursed at %s:%d",
793 			    m->mtx_object.lo_name, file, line);
794 		}
795 		break;
796 	case MA_NOTOWNED:
797 		if (mtx_owned(m))
798 			panic("mutex %s owned at %s:%d",
799 			    m->mtx_object.lo_name, file, line);
800 		break;
801 	default:
802 		panic("unknown mtx_assert at %s:%d", file, line);
803 	}
804 }
805 #endif
806 
807 /*
808  * The MUTEX_DEBUG-enabled mtx_validate()
809  *
810  * Most of these checks have been moved off into the LO_INITIALIZED flag
811  * maintained by the witness code.
812  */
813 #ifdef MUTEX_DEBUG
814 
815 void	mtx_validate(struct mtx *);
816 
817 void
818 mtx_validate(struct mtx *m)
819 {
820 
821 /*
822  * XXX - When kernacc() is fixed on the alpha to handle K0_SEG memory properly
823  * we can re-enable the kernacc() checks.
824  */
825 #ifndef __alpha__
826 	/*
827 	 * Can't call kernacc() from early init386(), especially when
828 	 * initializing Giant mutex, because some stuff in kernacc()
829 	 * requires Giant itself.
830 	 */
831 	if (!cold)
832 		if (!kernacc((caddr_t)m, sizeof(m),
833 		    VM_PROT_READ | VM_PROT_WRITE))
834 			panic("Can't read and write to mutex %p", m);
835 #endif
836 }
837 #endif
838 
839 /*
840  * General init routine used by the MTX_SYSINIT() macro.
841  */
842 void
843 mtx_sysinit(void *arg)
844 {
845 	struct mtx_args *margs = arg;
846 
847 	mtx_init(margs->ma_mtx, margs->ma_desc, NULL, margs->ma_opts);
848 }
849 
850 /*
851  * Mutex initialization routine; initialize lock `m' of type contained in
852  * `opts' with options contained in `opts' and name `name.'  The optional
853  * lock type `type' is used as a general lock category name for use with
854  * witness.
855  */
856 void
857 mtx_init(struct mtx *m, const char *name, const char *type, int opts)
858 {
859 	struct lock_object *lock;
860 
861 	MPASS((opts & ~(MTX_SPIN | MTX_QUIET | MTX_RECURSE |
862 	    MTX_SLEEPABLE | MTX_NOWITNESS | MTX_DUPOK)) == 0);
863 
864 #ifdef MUTEX_DEBUG
865 	/* Diagnostic and error correction */
866 	mtx_validate(m);
867 #endif
868 
869 	lock = &m->mtx_object;
870 	KASSERT((lock->lo_flags & LO_INITIALIZED) == 0,
871 	    ("mutex %s %p already initialized", name, m));
872 	bzero(m, sizeof(*m));
873 	if (opts & MTX_SPIN)
874 		lock->lo_class = &lock_class_mtx_spin;
875 	else
876 		lock->lo_class = &lock_class_mtx_sleep;
877 	lock->lo_name = name;
878 	lock->lo_type = type != NULL ? type : name;
879 	if (opts & MTX_QUIET)
880 		lock->lo_flags = LO_QUIET;
881 	if (opts & MTX_RECURSE)
882 		lock->lo_flags |= LO_RECURSABLE;
883 	if (opts & MTX_SLEEPABLE)
884 		lock->lo_flags |= LO_SLEEPABLE;
885 	if ((opts & MTX_NOWITNESS) == 0)
886 		lock->lo_flags |= LO_WITNESS;
887 	if (opts & MTX_DUPOK)
888 		lock->lo_flags |= LO_DUPOK;
889 
890 	m->mtx_lock = MTX_UNOWNED;
891 	TAILQ_INIT(&m->mtx_blocked);
892 
893 	LOCK_LOG_INIT(lock, opts);
894 
895 	WITNESS_INIT(lock);
896 }
897 
898 /*
899  * Remove lock `m' from all_mtx queue.  We don't allow MTX_QUIET to be
900  * passed in as a flag here because if the corresponding mtx_init() was
901  * called with MTX_QUIET set, then it will already be set in the mutex's
902  * flags.
903  */
904 void
905 mtx_destroy(struct mtx *m)
906 {
907 
908 	LOCK_LOG_DESTROY(&m->mtx_object, 0);
909 
910 	if (!mtx_owned(m))
911 		MPASS(mtx_unowned(m));
912 	else {
913 		MPASS((m->mtx_lock & (MTX_RECURSED|MTX_CONTESTED)) == 0);
914 
915 		/* Tell witness this isn't locked to make it happy. */
916 		WITNESS_UNLOCK(&m->mtx_object, LOP_EXCLUSIVE, __FILE__,
917 		    __LINE__);
918 	}
919 
920 	WITNESS_DESTROY(&m->mtx_object);
921 }
922 
923 /*
924  * Intialize the mutex code and system mutexes.  This is called from the MD
925  * startup code prior to mi_startup().  The per-CPU data space needs to be
926  * setup before this is called.
927  */
928 void
929 mutex_init(void)
930 {
931 
932 	/* Setup thread0 so that mutexes work. */
933 	LIST_INIT(&thread0.td_contested);
934 
935 	/*
936 	 * Initialize mutexes.
937 	 */
938 	mtx_init(&Giant, "Giant", NULL, MTX_DEF | MTX_RECURSE);
939 	mtx_init(&sched_lock, "sched lock", NULL, MTX_SPIN | MTX_RECURSE);
940 	mtx_init(&proc0.p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK);
941 	mtx_lock(&Giant);
942 }
943 
944 /*
945  * Encapsulated Giant mutex routines.  These routines provide encapsulation
946  * control for the Giant mutex, allowing sysctls to be used to turn on and
947  * off Giant around certain subsystems.  The default value for the sysctls
948  * are set to what developers believe is stable and working in regards to
949  * the Giant pushdown.  Developers should not turn off Giant via these
950  * sysctls unless they know what they are doing.
951  *
952  * Callers of mtx_lock_giant() are expected to pass the return value to an
953  * accompanying mtx_unlock_giant() later on.  If multiple subsystems are
954  * effected by a Giant wrap, all related sysctl variables must be zero for
955  * the subsystem call to operate without Giant (as determined by the caller).
956  */
957 
958 SYSCTL_NODE(_kern, OID_AUTO, giant, CTLFLAG_RD, NULL, "Giant mutex manipulation");
959 
960 static int kern_giant_all = 0;
961 SYSCTL_INT(_kern_giant, OID_AUTO, all, CTLFLAG_RW, &kern_giant_all, 0, "");
962 
963 int kern_giant_proc = 1;	/* Giant around PROC locks */
964 int kern_giant_file = 1;	/* Giant around struct file & filedesc */
965 int kern_giant_ucred = 1;	/* Giant around ucred */
966 SYSCTL_INT(_kern_giant, OID_AUTO, proc, CTLFLAG_RW, &kern_giant_proc, 0, "");
967 SYSCTL_INT(_kern_giant, OID_AUTO, file, CTLFLAG_RW, &kern_giant_file, 0, "");
968 SYSCTL_INT(_kern_giant, OID_AUTO, ucred, CTLFLAG_RW, &kern_giant_ucred, 0, "");
969 
970 int
971 mtx_lock_giant(int sysctlvar)
972 {
973 	if (sysctlvar || kern_giant_all) {
974 		mtx_lock(&Giant);
975 		return(1);
976 	}
977 	return(0);
978 }
979 
980 void
981 mtx_unlock_giant(int s)
982 {
983 	if (s)
984 		mtx_unlock(&Giant);
985 }
986 
987