xref: /freebsd/sys/kern/kern_mutex.c (revision c4f6a2a9e1b1879b618c436ab4f56ff75c73a0f5)
1 /*-
2  * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  * 3. Berkeley Software Design Inc's name may not be used to endorse or
13  *    promote products derived from this software without specific prior
14  *    written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  *	from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $
29  *	and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $
30  * $FreeBSD$
31  */
32 
33 /*
34  * Machine independent bits of mutex implementation.
35  */
36 
37 #include "opt_adaptive_mutexes.h"
38 #include "opt_ddb.h"
39 
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/bus.h>
43 #include <sys/kernel.h>
44 #include <sys/ktr.h>
45 #include <sys/lock.h>
46 #include <sys/malloc.h>
47 #include <sys/mutex.h>
48 #include <sys/proc.h>
49 #include <sys/resourcevar.h>
50 #include <sys/sbuf.h>
51 #include <sys/stdint.h>
52 #include <sys/sysctl.h>
53 #include <sys/vmmeter.h>
54 
55 #include <machine/atomic.h>
56 #include <machine/bus.h>
57 #include <machine/clock.h>
58 #include <machine/cpu.h>
59 
60 #include <ddb/ddb.h>
61 
62 #include <vm/vm.h>
63 #include <vm/vm_extern.h>
64 
65 /*
66  * Internal utility macros.
67  */
68 #define mtx_unowned(m)	((m)->mtx_lock == MTX_UNOWNED)
69 
70 #define mtx_owner(m)	(mtx_unowned((m)) ? NULL \
71 	: (struct thread *)((m)->mtx_lock & MTX_FLAGMASK))
72 
73 /* XXXKSE This test will change. */
74 #define	thread_running(td)						\
75 	((td)->td_kse != NULL && (td)->td_kse->ke_oncpu != NOCPU)
76 
77 /*
78  * Lock classes for sleep and spin mutexes.
79  */
80 struct lock_class lock_class_mtx_sleep = {
81 	"sleep mutex",
82 	LC_SLEEPLOCK | LC_RECURSABLE
83 };
84 struct lock_class lock_class_mtx_spin = {
85 	"spin mutex",
86 	LC_SPINLOCK | LC_RECURSABLE
87 };
88 
89 /*
90  * System-wide mutexes
91  */
92 struct mtx sched_lock;
93 struct mtx Giant;
94 
95 /*
96  * Prototypes for non-exported routines.
97  */
98 static void	propagate_priority(struct thread *);
99 
100 static void
101 propagate_priority(struct thread *td)
102 {
103 	int pri = td->td_priority;
104 	struct mtx *m = td->td_blocked;
105 
106 	mtx_assert(&sched_lock, MA_OWNED);
107 	for (;;) {
108 		struct thread *td1;
109 
110 		td = mtx_owner(m);
111 
112 		if (td == NULL) {
113 			/*
114 			 * This really isn't quite right. Really
115 			 * ought to bump priority of thread that
116 			 * next acquires the mutex.
117 			 */
118 			MPASS(m->mtx_lock == MTX_CONTESTED);
119 			return;
120 		}
121 
122 		KASSERT(td->td_state != TDS_SURPLUS, ("Mutex owner SURPLUS"));
123 		MPASS(td->td_proc != NULL);
124 		MPASS(td->td_proc->p_magic == P_MAGIC);
125 		KASSERT(td->td_state != TDS_SLP,
126 		    ("sleeping thread owns a mutex"));
127 		if (td->td_priority <= pri) /* lower is higher priority */
128 			return;
129 
130 
131 		/*
132 		 * If lock holder is actually running, just bump priority.
133 		 */
134 		if (td->td_state == TDS_RUNNING) {
135 			td->td_priority = pri;
136 			return;
137 		}
138 
139 #ifndef SMP
140 		/*
141 		 * For UP, we check to see if td is curthread (this shouldn't
142 		 * ever happen however as it would mean we are in a deadlock.)
143 		 */
144 		KASSERT(td != curthread, ("Deadlock detected"));
145 #endif
146 
147 		/*
148 		 * If on run queue move to new run queue, and quit.
149 		 * XXXKSE this gets a lot more complicated under threads
150 		 * but try anyhow.
151 		 * We should have a special call to do this more efficiently.
152 		 */
153 		if (td->td_state == TDS_RUNQ) {
154 			MPASS(td->td_blocked == NULL);
155 			remrunqueue(td);
156 			td->td_priority = pri;
157 			setrunqueue(td);
158 			return;
159 		}
160 		/*
161 		 * Adjust for any other cases.
162 		 */
163 		td->td_priority = pri;
164 
165 		/*
166 		 * If we aren't blocked on a mutex, we should be.
167 		 */
168 		KASSERT(td->td_state == TDS_MTX, (
169 		    "process %d(%s):%d holds %s but isn't blocked on a mutex\n",
170 		    td->td_proc->p_pid, td->td_proc->p_comm, td->td_state,
171 		    m->mtx_object.lo_name));
172 
173 		/*
174 		 * Pick up the mutex that td is blocked on.
175 		 */
176 		m = td->td_blocked;
177 		MPASS(m != NULL);
178 
179 		/*
180 		 * Check if the thread needs to be moved up on
181 		 * the blocked chain
182 		 */
183 		if (td == TAILQ_FIRST(&m->mtx_blocked)) {
184 			continue;
185 		}
186 
187 		td1 = TAILQ_PREV(td, threadqueue, td_blkq);
188 		if (td1->td_priority <= pri) {
189 			continue;
190 		}
191 
192 		/*
193 		 * Remove thread from blocked chain and determine where
194 		 * it should be moved up to.  Since we know that td1 has
195 		 * a lower priority than td, we know that at least one
196 		 * thread in the chain has a lower priority and that
197 		 * td1 will thus not be NULL after the loop.
198 		 */
199 		TAILQ_REMOVE(&m->mtx_blocked, td, td_blkq);
200 		TAILQ_FOREACH(td1, &m->mtx_blocked, td_blkq) {
201 			MPASS(td1->td_proc->p_magic == P_MAGIC);
202 			if (td1->td_priority > pri)
203 				break;
204 		}
205 
206 		MPASS(td1 != NULL);
207 		TAILQ_INSERT_BEFORE(td1, td, td_blkq);
208 		CTR4(KTR_LOCK,
209 		    "propagate_priority: p %p moved before %p on [%p] %s",
210 		    td, td1, m, m->mtx_object.lo_name);
211 	}
212 }
213 
214 #ifdef MUTEX_PROFILING
215 SYSCTL_NODE(_debug, OID_AUTO, mutex, CTLFLAG_RD, NULL, "mutex debugging");
216 SYSCTL_NODE(_debug_mutex, OID_AUTO, prof, CTLFLAG_RD, NULL, "mutex profiling");
217 static int mutex_prof_enable = 0;
218 SYSCTL_INT(_debug_mutex_prof, OID_AUTO, enable, CTLFLAG_RW,
219     &mutex_prof_enable, 0, "Enable tracing of mutex holdtime");
220 
221 struct mutex_prof {
222 	const char *name;
223 	const char *file;
224 	int line;
225 #define MPROF_MAX 0
226 #define MPROF_TOT 1
227 #define MPROF_CNT 2
228 #define MPROF_AVG 3
229 	uintmax_t counter[4];
230 	struct mutex_prof *next;
231 };
232 
233 /*
234  * mprof_buf is a static pool of profiling records to avoid possible
235  * reentrance of the memory allocation functions.
236  *
237  * Note: NUM_MPROF_BUFFERS must be smaller than MPROF_HASH_SIZE.
238  */
239 #define NUM_MPROF_BUFFERS 1000
240 static struct mutex_prof mprof_buf[NUM_MPROF_BUFFERS];
241 static int first_free_mprof_buf;
242 #define MPROF_HASH_SIZE 1009
243 static struct mutex_prof *mprof_hash[MPROF_HASH_SIZE];
244 
245 static int mutex_prof_acquisitions;
246 SYSCTL_INT(_debug_mutex_prof, OID_AUTO, acquisitions, CTLFLAG_RD,
247     &mutex_prof_acquisitions, 0, "Number of mutex acquistions recorded");
248 static int mutex_prof_records;
249 SYSCTL_INT(_debug_mutex_prof, OID_AUTO, records, CTLFLAG_RD,
250     &mutex_prof_records, 0, "Number of profiling records");
251 static int mutex_prof_maxrecords = NUM_MPROF_BUFFERS;
252 SYSCTL_INT(_debug_mutex_prof, OID_AUTO, maxrecords, CTLFLAG_RD,
253     &mutex_prof_maxrecords, 0, "Maximum number of profiling records");
254 static int mutex_prof_rejected;
255 SYSCTL_INT(_debug_mutex_prof, OID_AUTO, rejected, CTLFLAG_RD,
256     &mutex_prof_rejected, 0, "Number of rejected profiling records");
257 static int mutex_prof_hashsize = MPROF_HASH_SIZE;
258 SYSCTL_INT(_debug_mutex_prof, OID_AUTO, hashsize, CTLFLAG_RD,
259     &mutex_prof_hashsize, 0, "Hash size");
260 static int mutex_prof_collisions = 0;
261 SYSCTL_INT(_debug_mutex_prof, OID_AUTO, collisions, CTLFLAG_RD,
262     &mutex_prof_collisions, 0, "Number of hash collisions");
263 
264 /*
265  * mprof_mtx protects the profiling buffers and the hash.
266  */
267 static struct mtx mprof_mtx;
268 MTX_SYSINIT(mprof, &mprof_mtx, "mutex profiling lock", MTX_SPIN | MTX_QUIET);
269 
270 static u_int64_t
271 nanoseconds(void)
272 {
273 	struct timespec tv;
274 
275 	nanotime(&tv);
276 	return (tv.tv_sec * (u_int64_t)1000000000 + tv.tv_nsec);
277 }
278 
279 static int
280 dump_mutex_prof_stats(SYSCTL_HANDLER_ARGS)
281 {
282 	struct sbuf *sb;
283 	int error, i;
284 
285 	if (first_free_mprof_buf == 0)
286 		return SYSCTL_OUT(req, "No locking recorded",
287 		    sizeof("No locking recorded"));
288 
289 	sb = sbuf_new(NULL, NULL, 1024, SBUF_AUTOEXTEND);
290 	sbuf_printf(sb, "%12s %12s %12s %12s %s\n",
291 	    "max", "total", "count", "average", "name");
292 	mtx_lock_spin(&mprof_mtx);
293 	for (i = 0; i < first_free_mprof_buf; ++i)
294 		sbuf_printf(sb, "%12ju %12ju %12ju %12ju %s:%d (%s)\n",
295 		    mprof_buf[i].counter[MPROF_MAX] / 1000,
296 		    mprof_buf[i].counter[MPROF_TOT] / 1000,
297 		    mprof_buf[i].counter[MPROF_CNT],
298 		    mprof_buf[i].counter[MPROF_AVG] / 1000,
299 		    mprof_buf[i].file, mprof_buf[i].line, mprof_buf[i].name);
300 	mtx_unlock_spin(&mprof_mtx);
301 	sbuf_finish(sb);
302 	error = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb) + 1);
303 	sbuf_delete(sb);
304 	return (error);
305 }
306 SYSCTL_PROC(_debug_mutex_prof, OID_AUTO, stats, CTLTYPE_STRING|CTLFLAG_RD,
307     NULL, 0, dump_mutex_prof_stats, "A", "Mutex profiling statistics");
308 #endif
309 
310 /*
311  * Function versions of the inlined __mtx_* macros.  These are used by
312  * modules and can also be called from assembly language if needed.
313  */
314 void
315 _mtx_lock_flags(struct mtx *m, int opts, const char *file, int line)
316 {
317 
318 	MPASS(curthread != NULL);
319 	_get_sleep_lock(m, curthread, opts, file, line);
320 	LOCK_LOG_LOCK("LOCK", &m->mtx_object, opts, m->mtx_recurse, file,
321 	    line);
322 	WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line);
323 #ifdef MUTEX_PROFILING
324 	/* don't reset the timer when/if recursing */
325 	if (m->mtx_acqtime == 0) {
326 		m->mtx_filename = file;
327 		m->mtx_lineno = line;
328 		m->mtx_acqtime = mutex_prof_enable ? nanoseconds() : 0;
329 		++mutex_prof_acquisitions;
330 	}
331 #endif
332 }
333 
334 void
335 _mtx_unlock_flags(struct mtx *m, int opts, const char *file, int line)
336 {
337 
338 	MPASS(curthread != NULL);
339 	mtx_assert(m, MA_OWNED);
340 #ifdef MUTEX_PROFILING
341 	if (m->mtx_acqtime != 0) {
342 		static const char *unknown = "(unknown)";
343 		struct mutex_prof *mpp;
344 		u_int64_t acqtime, now;
345 		const char *p, *q;
346 		volatile u_int hash;
347 
348 		now = nanoseconds();
349 		acqtime = m->mtx_acqtime;
350 		m->mtx_acqtime = 0;
351 		if (now <= acqtime)
352 			goto out;
353 		for (p = m->mtx_filename; strncmp(p, "../", 3) == 0; p += 3)
354 			/* nothing */ ;
355 		if (p == NULL || *p == '\0')
356 			p = unknown;
357 		for (hash = m->mtx_lineno, q = p; *q != '\0'; ++q)
358 			hash = (hash * 2 + *q) % MPROF_HASH_SIZE;
359 		mtx_lock_spin(&mprof_mtx);
360 		for (mpp = mprof_hash[hash]; mpp != NULL; mpp = mpp->next)
361 			if (mpp->line == m->mtx_lineno &&
362 			    strcmp(mpp->file, p) == 0)
363 				break;
364 		if (mpp == NULL) {
365 			/* Just exit if we cannot get a trace buffer */
366 			if (first_free_mprof_buf >= NUM_MPROF_BUFFERS) {
367 				++mutex_prof_rejected;
368 				goto unlock;
369 			}
370 			mpp = &mprof_buf[first_free_mprof_buf++];
371 			mpp->name = mtx_name(m);
372 			mpp->file = p;
373 			mpp->line = m->mtx_lineno;
374 			mpp->next = mprof_hash[hash];
375 			if (mprof_hash[hash] != NULL)
376 				++mutex_prof_collisions;
377 			mprof_hash[hash] = mpp;
378 			++mutex_prof_records;
379 		}
380 		/*
381 		 * Record if the mutex has been held longer now than ever
382 		 * before
383 		 */
384 		if ((now - acqtime) > mpp->counter[MPROF_MAX])
385 			mpp->counter[MPROF_MAX] = now - acqtime;
386 		mpp->counter[MPROF_TOT] += now - acqtime;
387 		mpp->counter[MPROF_CNT] += 1;
388 		mpp->counter[MPROF_AVG] =
389 		    mpp->counter[MPROF_TOT] / mpp->counter[MPROF_CNT];
390 unlock:
391 		mtx_unlock_spin(&mprof_mtx);
392 	}
393 out:
394 #endif
395  	WITNESS_UNLOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line);
396 	LOCK_LOG_LOCK("UNLOCK", &m->mtx_object, opts, m->mtx_recurse, file,
397 	    line);
398 	_rel_sleep_lock(m, curthread, opts, file, line);
399 }
400 
401 void
402 _mtx_lock_spin_flags(struct mtx *m, int opts, const char *file, int line)
403 {
404 
405 	MPASS(curthread != NULL);
406 #if defined(SMP) || LOCK_DEBUG > 0 || 1
407 	_get_spin_lock(m, curthread, opts, file, line);
408 #else
409 	critical_enter();
410 #endif
411 	LOCK_LOG_LOCK("LOCK", &m->mtx_object, opts, m->mtx_recurse, file,
412 	    line);
413 	WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line);
414 }
415 
416 void
417 _mtx_unlock_spin_flags(struct mtx *m, int opts, const char *file, int line)
418 {
419 
420 	MPASS(curthread != NULL);
421 	mtx_assert(m, MA_OWNED);
422  	WITNESS_UNLOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line);
423 	LOCK_LOG_LOCK("UNLOCK", &m->mtx_object, opts, m->mtx_recurse, file,
424 	    line);
425 #if defined(SMP) || LOCK_DEBUG > 0 || 1
426 	_rel_spin_lock(m);
427 #else
428 	critical_exit();
429 #endif
430 }
431 
432 /*
433  * The important part of mtx_trylock{,_flags}()
434  * Tries to acquire lock `m.' We do NOT handle recursion here; we assume that
435  * if we're called, it's because we know we don't already own this lock.
436  */
437 int
438 _mtx_trylock(struct mtx *m, int opts, const char *file, int line)
439 {
440 	int rval;
441 
442 	MPASS(curthread != NULL);
443 
444 	rval = _obtain_lock(m, curthread);
445 
446 	LOCK_LOG_TRY("LOCK", &m->mtx_object, opts, rval, file, line);
447 	if (rval) {
448 		/*
449 		 * We do not handle recursion in _mtx_trylock; see the
450 		 * note at the top of the routine.
451 		 */
452 		KASSERT(!mtx_recursed(m),
453 		    ("mtx_trylock() called on a recursed mutex"));
454 		WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE | LOP_TRYLOCK,
455 		    file, line);
456 	}
457 
458 	return (rval);
459 }
460 
461 /*
462  * _mtx_lock_sleep: the tougher part of acquiring an MTX_DEF lock.
463  *
464  * We call this if the lock is either contested (i.e. we need to go to
465  * sleep waiting for it), or if we need to recurse on it.
466  */
467 void
468 _mtx_lock_sleep(struct mtx *m, int opts, const char *file, int line)
469 {
470 	struct thread *td = curthread;
471 #if defined(SMP) && defined(ADAPTIVE_MUTEXES)
472 	struct thread *owner;
473 #endif
474 
475 	if ((m->mtx_lock & MTX_FLAGMASK) == (uintptr_t)td) {
476 		m->mtx_recurse++;
477 		atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
478 		if (LOCK_LOG_TEST(&m->mtx_object, opts))
479 			CTR1(KTR_LOCK, "_mtx_lock_sleep: %p recursing", m);
480 		return;
481 	}
482 
483 	if (LOCK_LOG_TEST(&m->mtx_object, opts))
484 		CTR4(KTR_LOCK,
485 		    "_mtx_lock_sleep: %s contested (lock=%p) at %s:%d",
486 		    m->mtx_object.lo_name, (void *)m->mtx_lock, file, line);
487 
488 	while (!_obtain_lock(m, td)) {
489 		uintptr_t v;
490 		struct thread *td1;
491 
492 		mtx_lock_spin(&sched_lock);
493 		/*
494 		 * Check if the lock has been released while spinning for
495 		 * the sched_lock.
496 		 */
497 		if ((v = m->mtx_lock) == MTX_UNOWNED) {
498 			mtx_unlock_spin(&sched_lock);
499 #ifdef __i386__
500 			ia32_pause();
501 #endif
502 			continue;
503 		}
504 
505 		/*
506 		 * The mutex was marked contested on release. This means that
507 		 * there are threads blocked on it.
508 		 */
509 		if (v == MTX_CONTESTED) {
510 			td1 = TAILQ_FIRST(&m->mtx_blocked);
511 			MPASS(td1 != NULL);
512 			m->mtx_lock = (uintptr_t)td | MTX_CONTESTED;
513 
514 			if (td1->td_priority < td->td_priority)
515 				td->td_priority = td1->td_priority;
516 			mtx_unlock_spin(&sched_lock);
517 			return;
518 		}
519 
520 		/*
521 		 * If the mutex isn't already contested and a failure occurs
522 		 * setting the contested bit, the mutex was either released
523 		 * or the state of the MTX_RECURSED bit changed.
524 		 */
525 		if ((v & MTX_CONTESTED) == 0 &&
526 		    !atomic_cmpset_ptr(&m->mtx_lock, (void *)v,
527 			(void *)(v | MTX_CONTESTED))) {
528 			mtx_unlock_spin(&sched_lock);
529 #ifdef __i386__
530 			ia32_pause();
531 #endif
532 			continue;
533 		}
534 
535 #if defined(SMP) && defined(ADAPTIVE_MUTEXES)
536 		/*
537 		 * If the current owner of the lock is executing on another
538 		 * CPU, spin instead of blocking.
539 		 */
540 		owner = (struct thread *)(v & MTX_FLAGMASK);
541 		if (m != &Giant && thread_running(owner)) {
542 			mtx_unlock_spin(&sched_lock);
543 			while (mtx_owner(m) == owner && thread_running(owner)) {
544 #ifdef __i386__
545 				ia32_pause();
546 #endif
547 			}
548 			continue;
549 		}
550 #endif	/* SMP && ADAPTIVE_MUTEXES */
551 
552 		/*
553 		 * We definitely must sleep for this lock.
554 		 */
555 		mtx_assert(m, MA_NOTOWNED);
556 
557 #ifdef notyet
558 		/*
559 		 * If we're borrowing an interrupted thread's VM context, we
560 		 * must clean up before going to sleep.
561 		 */
562 		if (td->td_ithd != NULL) {
563 			struct ithd *it = td->td_ithd;
564 
565 			if (it->it_interrupted) {
566 				if (LOCK_LOG_TEST(&m->mtx_object, opts))
567 					CTR2(KTR_LOCK,
568 				    "_mtx_lock_sleep: %p interrupted %p",
569 					    it, it->it_interrupted);
570 				intr_thd_fixup(it);
571 			}
572 		}
573 #endif
574 
575 		/*
576 		 * Put us on the list of threads blocked on this mutex.
577 		 */
578 		if (TAILQ_EMPTY(&m->mtx_blocked)) {
579 			td1 = mtx_owner(m);
580 			LIST_INSERT_HEAD(&td1->td_contested, m, mtx_contested);
581 			TAILQ_INSERT_TAIL(&m->mtx_blocked, td, td_blkq);
582 		} else {
583 			TAILQ_FOREACH(td1, &m->mtx_blocked, td_blkq)
584 				if (td1->td_priority > td->td_priority)
585 					break;
586 			if (td1)
587 				TAILQ_INSERT_BEFORE(td1, td, td_blkq);
588 			else
589 				TAILQ_INSERT_TAIL(&m->mtx_blocked, td, td_blkq);
590 		}
591 
592 		/*
593 		 * Save who we're blocked on.
594 		 */
595 		td->td_blocked = m;
596 		td->td_mtxname = m->mtx_object.lo_name;
597 		td->td_state = TDS_MTX;
598 		propagate_priority(td);
599 
600 		if (LOCK_LOG_TEST(&m->mtx_object, opts))
601 			CTR3(KTR_LOCK,
602 			    "_mtx_lock_sleep: p %p blocked on [%p] %s", td, m,
603 			    m->mtx_object.lo_name);
604 
605 		td->td_proc->p_stats->p_ru.ru_nvcsw++;
606 		mi_switch();
607 
608 		if (LOCK_LOG_TEST(&m->mtx_object, opts))
609 			CTR3(KTR_LOCK,
610 			  "_mtx_lock_sleep: p %p free from blocked on [%p] %s",
611 			  td, m, m->mtx_object.lo_name);
612 
613 		mtx_unlock_spin(&sched_lock);
614 	}
615 
616 	return;
617 }
618 
619 /*
620  * _mtx_lock_spin: the tougher part of acquiring an MTX_SPIN lock.
621  *
622  * This is only called if we need to actually spin for the lock. Recursion
623  * is handled inline.
624  */
625 void
626 _mtx_lock_spin(struct mtx *m, int opts, const char *file, int line)
627 {
628 	int i = 0;
629 
630 	if (LOCK_LOG_TEST(&m->mtx_object, opts))
631 		CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m);
632 
633 	for (;;) {
634 		if (_obtain_lock(m, curthread))
635 			break;
636 
637 		/* Give interrupts a chance while we spin. */
638 		critical_exit();
639 		while (m->mtx_lock != MTX_UNOWNED) {
640 			if (i++ < 10000000) {
641 #ifdef __i386__
642 				ia32_pause();
643 #endif
644 				continue;
645 			}
646 			if (i < 60000000)
647 				DELAY(1);
648 #ifdef DDB
649 			else if (!db_active)
650 #else
651 			else
652 #endif
653 				panic("spin lock %s held by %p for > 5 seconds",
654 				    m->mtx_object.lo_name, (void *)m->mtx_lock);
655 #ifdef __i386__
656 			ia32_pause();
657 #endif
658 		}
659 		critical_enter();
660 	}
661 
662 	if (LOCK_LOG_TEST(&m->mtx_object, opts))
663 		CTR1(KTR_LOCK, "_mtx_lock_spin: %p spin done", m);
664 
665 	return;
666 }
667 
668 /*
669  * _mtx_unlock_sleep: the tougher part of releasing an MTX_DEF lock.
670  *
671  * We are only called here if the lock is recursed or contested (i.e. we
672  * need to wake up a blocked thread).
673  */
674 void
675 _mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line)
676 {
677 	struct thread *td, *td1;
678 	struct mtx *m1;
679 	int pri;
680 
681 	td = curthread;
682 
683 	if (mtx_recursed(m)) {
684 		if (--(m->mtx_recurse) == 0)
685 			atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED);
686 		if (LOCK_LOG_TEST(&m->mtx_object, opts))
687 			CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p unrecurse", m);
688 		return;
689 	}
690 
691 	mtx_lock_spin(&sched_lock);
692 	if (LOCK_LOG_TEST(&m->mtx_object, opts))
693 		CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p contested", m);
694 
695 	td1 = TAILQ_FIRST(&m->mtx_blocked);
696 #if defined(SMP) && defined(ADAPTIVE_MUTEXES)
697 	if (td1 == NULL) {
698 		_release_lock_quick(m);
699 		if (LOCK_LOG_TEST(&m->mtx_object, opts))
700 			CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p no sleepers", m);
701 		mtx_unlock_spin(&sched_lock);
702 		return;
703 	}
704 #endif
705 	MPASS(td->td_proc->p_magic == P_MAGIC);
706 	MPASS(td1->td_proc->p_magic == P_MAGIC);
707 
708 	TAILQ_REMOVE(&m->mtx_blocked, td1, td_blkq);
709 
710 	if (TAILQ_EMPTY(&m->mtx_blocked)) {
711 		LIST_REMOVE(m, mtx_contested);
712 		_release_lock_quick(m);
713 		if (LOCK_LOG_TEST(&m->mtx_object, opts))
714 			CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p not held", m);
715 	} else
716 		atomic_store_rel_ptr(&m->mtx_lock, (void *)MTX_CONTESTED);
717 
718 	pri = PRI_MAX;
719 	LIST_FOREACH(m1, &td->td_contested, mtx_contested) {
720 		int cp = TAILQ_FIRST(&m1->mtx_blocked)->td_priority;
721 		if (cp < pri)
722 			pri = cp;
723 	}
724 
725 	if (pri > td->td_base_pri)
726 		pri = td->td_base_pri;
727 	td->td_priority = pri;
728 
729 	if (LOCK_LOG_TEST(&m->mtx_object, opts))
730 		CTR2(KTR_LOCK, "_mtx_unlock_sleep: %p contested setrunqueue %p",
731 		    m, td1);
732 
733 	td1->td_blocked = NULL;
734 	setrunqueue(td1);
735 
736 	if (td->td_critnest == 1 && td1->td_priority < pri) {
737 #ifdef notyet
738 		if (td->td_ithd != NULL) {
739 			struct ithd *it = td->td_ithd;
740 
741 			if (it->it_interrupted) {
742 				if (LOCK_LOG_TEST(&m->mtx_object, opts))
743 					CTR2(KTR_LOCK,
744 				    "_mtx_unlock_sleep: %p interrupted %p",
745 					    it, it->it_interrupted);
746 				intr_thd_fixup(it);
747 			}
748 		}
749 #endif
750 		if (LOCK_LOG_TEST(&m->mtx_object, opts))
751 			CTR2(KTR_LOCK,
752 			    "_mtx_unlock_sleep: %p switching out lock=%p", m,
753 			    (void *)m->mtx_lock);
754 
755 		td->td_proc->p_stats->p_ru.ru_nivcsw++;
756 		mi_switch();
757 		if (LOCK_LOG_TEST(&m->mtx_object, opts))
758 			CTR2(KTR_LOCK, "_mtx_unlock_sleep: %p resuming lock=%p",
759 			    m, (void *)m->mtx_lock);
760 	}
761 
762 	mtx_unlock_spin(&sched_lock);
763 
764 	return;
765 }
766 
767 /*
768  * All the unlocking of MTX_SPIN locks is done inline.
769  * See the _rel_spin_lock() macro for the details.
770  */
771 
772 /*
773  * The backing function for the INVARIANTS-enabled mtx_assert()
774  */
775 #ifdef INVARIANT_SUPPORT
776 void
777 _mtx_assert(struct mtx *m, int what, const char *file, int line)
778 {
779 
780 	if (panicstr != NULL)
781 		return;
782 	switch (what) {
783 	case MA_OWNED:
784 	case MA_OWNED | MA_RECURSED:
785 	case MA_OWNED | MA_NOTRECURSED:
786 		if (!mtx_owned(m))
787 			panic("mutex %s not owned at %s:%d",
788 			    m->mtx_object.lo_name, file, line);
789 		if (mtx_recursed(m)) {
790 			if ((what & MA_NOTRECURSED) != 0)
791 				panic("mutex %s recursed at %s:%d",
792 				    m->mtx_object.lo_name, file, line);
793 		} else if ((what & MA_RECURSED) != 0) {
794 			panic("mutex %s unrecursed at %s:%d",
795 			    m->mtx_object.lo_name, file, line);
796 		}
797 		break;
798 	case MA_NOTOWNED:
799 		if (mtx_owned(m))
800 			panic("mutex %s owned at %s:%d",
801 			    m->mtx_object.lo_name, file, line);
802 		break;
803 	default:
804 		panic("unknown mtx_assert at %s:%d", file, line);
805 	}
806 }
807 #endif
808 
809 /*
810  * The MUTEX_DEBUG-enabled mtx_validate()
811  *
812  * Most of these checks have been moved off into the LO_INITIALIZED flag
813  * maintained by the witness code.
814  */
815 #ifdef MUTEX_DEBUG
816 
817 void	mtx_validate(struct mtx *);
818 
819 void
820 mtx_validate(struct mtx *m)
821 {
822 
823 /*
824  * XXX - When kernacc() is fixed on the alpha to handle K0_SEG memory properly
825  * we can re-enable the kernacc() checks.
826  */
827 #ifndef __alpha__
828 	/*
829 	 * Can't call kernacc() from early init386(), especially when
830 	 * initializing Giant mutex, because some stuff in kernacc()
831 	 * requires Giant itself.
832 	 */
833 	if (!cold)
834 		if (!kernacc((caddr_t)m, sizeof(m),
835 		    VM_PROT_READ | VM_PROT_WRITE))
836 			panic("Can't read and write to mutex %p", m);
837 #endif
838 }
839 #endif
840 
841 /*
842  * General init routine used by the MTX_SYSINIT() macro.
843  */
844 void
845 mtx_sysinit(void *arg)
846 {
847 	struct mtx_args *margs = arg;
848 
849 	mtx_init(margs->ma_mtx, margs->ma_desc, NULL, margs->ma_opts);
850 }
851 
852 /*
853  * Mutex initialization routine; initialize lock `m' of type contained in
854  * `opts' with options contained in `opts' and name `name.'  The optional
855  * lock type `type' is used as a general lock category name for use with
856  * witness.
857  */
858 void
859 mtx_init(struct mtx *m, const char *name, const char *type, int opts)
860 {
861 	struct lock_object *lock;
862 
863 	MPASS((opts & ~(MTX_SPIN | MTX_QUIET | MTX_RECURSE |
864 	    MTX_SLEEPABLE | MTX_NOWITNESS | MTX_DUPOK)) == 0);
865 
866 #ifdef MUTEX_DEBUG
867 	/* Diagnostic and error correction */
868 	mtx_validate(m);
869 #endif
870 
871 	lock = &m->mtx_object;
872 	KASSERT((lock->lo_flags & LO_INITIALIZED) == 0,
873 	    ("mutex %s %p already initialized", name, m));
874 	bzero(m, sizeof(*m));
875 	if (opts & MTX_SPIN)
876 		lock->lo_class = &lock_class_mtx_spin;
877 	else
878 		lock->lo_class = &lock_class_mtx_sleep;
879 	lock->lo_name = name;
880 	lock->lo_type = type != NULL ? type : name;
881 	if (opts & MTX_QUIET)
882 		lock->lo_flags = LO_QUIET;
883 	if (opts & MTX_RECURSE)
884 		lock->lo_flags |= LO_RECURSABLE;
885 	if (opts & MTX_SLEEPABLE)
886 		lock->lo_flags |= LO_SLEEPABLE;
887 	if ((opts & MTX_NOWITNESS) == 0)
888 		lock->lo_flags |= LO_WITNESS;
889 	if (opts & MTX_DUPOK)
890 		lock->lo_flags |= LO_DUPOK;
891 
892 	m->mtx_lock = MTX_UNOWNED;
893 	TAILQ_INIT(&m->mtx_blocked);
894 
895 	LOCK_LOG_INIT(lock, opts);
896 
897 	WITNESS_INIT(lock);
898 }
899 
900 /*
901  * Remove lock `m' from all_mtx queue.  We don't allow MTX_QUIET to be
902  * passed in as a flag here because if the corresponding mtx_init() was
903  * called with MTX_QUIET set, then it will already be set in the mutex's
904  * flags.
905  */
906 void
907 mtx_destroy(struct mtx *m)
908 {
909 
910 	LOCK_LOG_DESTROY(&m->mtx_object, 0);
911 
912 	if (!mtx_owned(m))
913 		MPASS(mtx_unowned(m));
914 	else {
915 		MPASS((m->mtx_lock & (MTX_RECURSED|MTX_CONTESTED)) == 0);
916 
917 		/* Tell witness this isn't locked to make it happy. */
918 		WITNESS_UNLOCK(&m->mtx_object, LOP_EXCLUSIVE, __FILE__,
919 		    __LINE__);
920 	}
921 
922 	WITNESS_DESTROY(&m->mtx_object);
923 }
924 
925 /*
926  * Intialize the mutex code and system mutexes.  This is called from the MD
927  * startup code prior to mi_startup().  The per-CPU data space needs to be
928  * setup before this is called.
929  */
930 void
931 mutex_init(void)
932 {
933 
934 	/* Setup thread0 so that mutexes work. */
935 	LIST_INIT(&thread0.td_contested);
936 
937 	/*
938 	 * Initialize mutexes.
939 	 */
940 	mtx_init(&Giant, "Giant", NULL, MTX_DEF | MTX_RECURSE);
941 	mtx_init(&sched_lock, "sched lock", NULL, MTX_SPIN | MTX_RECURSE);
942 	mtx_init(&proc0.p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK);
943 	mtx_lock(&Giant);
944 }
945 
946 /*
947  * Encapsulated Giant mutex routines.  These routines provide encapsulation
948  * control for the Giant mutex, allowing sysctls to be used to turn on and
949  * off Giant around certain subsystems.  The default value for the sysctls
950  * are set to what developers believe is stable and working in regards to
951  * the Giant pushdown.  Developers should not turn off Giant via these
952  * sysctls unless they know what they are doing.
953  *
954  * Callers of mtx_lock_giant() are expected to pass the return value to an
955  * accompanying mtx_unlock_giant() later on.  If multiple subsystems are
956  * effected by a Giant wrap, all related sysctl variables must be zero for
957  * the subsystem call to operate without Giant (as determined by the caller).
958  */
959 
960 SYSCTL_NODE(_kern, OID_AUTO, giant, CTLFLAG_RD, NULL, "Giant mutex manipulation");
961 
962 static int kern_giant_all = 0;
963 SYSCTL_INT(_kern_giant, OID_AUTO, all, CTLFLAG_RW, &kern_giant_all, 0, "");
964 
965 int kern_giant_proc = 1;	/* Giant around PROC locks */
966 int kern_giant_file = 1;	/* Giant around struct file & filedesc */
967 int kern_giant_ucred = 1;	/* Giant around ucred */
968 SYSCTL_INT(_kern_giant, OID_AUTO, proc, CTLFLAG_RW, &kern_giant_proc, 0, "");
969 SYSCTL_INT(_kern_giant, OID_AUTO, file, CTLFLAG_RW, &kern_giant_file, 0, "");
970 SYSCTL_INT(_kern_giant, OID_AUTO, ucred, CTLFLAG_RW, &kern_giant_ucred, 0, "");
971 
972 int
973 mtx_lock_giant(int sysctlvar)
974 {
975 	if (sysctlvar || kern_giant_all) {
976 		mtx_lock(&Giant);
977 		return(1);
978 	}
979 	return(0);
980 }
981 
982 void
983 mtx_unlock_giant(int s)
984 {
985 	if (s)
986 		mtx_unlock(&Giant);
987 }
988 
989