xref: /freebsd/sys/kern/kern_mutex.c (revision ee2ea5ceafed78a5bd9810beb9e3ca927180c226)
1 /*-
2  * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  * 3. Berkeley Software Design Inc's name may not be used to endorse or
13  *    promote products derived from this software without specific prior
14  *    written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  *	from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $
29  *	and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $
30  * $FreeBSD$
31  */
32 
33 /*
34  * Machine independent bits of mutex implementation.
35  */
36 
37 #include "opt_ddb.h"
38 
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/bus.h>
42 #include <sys/kernel.h>
43 #include <sys/ktr.h>
44 #include <sys/lock.h>
45 #include <sys/malloc.h>
46 #include <sys/mutex.h>
47 #include <sys/proc.h>
48 #include <sys/resourcevar.h>
49 #include <sys/sbuf.h>
50 #include <sys/sysctl.h>
51 #include <sys/vmmeter.h>
52 
53 #include <machine/atomic.h>
54 #include <machine/bus.h>
55 #include <machine/clock.h>
56 #include <machine/cpu.h>
57 
58 #include <ddb/ddb.h>
59 
60 #include <vm/vm.h>
61 #include <vm/vm_extern.h>
62 
63 /*
64  * Internal utility macros.
65  */
66 #define mtx_unowned(m)	((m)->mtx_lock == MTX_UNOWNED)
67 
68 #define mtx_owner(m)	(mtx_unowned((m)) ? NULL \
69 	: (struct thread *)((m)->mtx_lock & MTX_FLAGMASK))
70 
71 /*
72  * Lock classes for sleep and spin mutexes.
73  */
74 struct lock_class lock_class_mtx_sleep = {
75 	"sleep mutex",
76 	LC_SLEEPLOCK | LC_RECURSABLE
77 };
78 struct lock_class lock_class_mtx_spin = {
79 	"spin mutex",
80 	LC_SPINLOCK | LC_RECURSABLE
81 };
82 
83 /*
84  * System-wide mutexes
85  */
86 struct mtx sched_lock;
87 struct mtx Giant;
88 
89 /*
90  * Prototypes for non-exported routines.
91  */
92 static void	propagate_priority(struct thread *);
93 
94 static void
95 propagate_priority(struct thread *td)
96 {
97 	int pri = td->td_priority;
98 	struct mtx *m = td->td_blocked;
99 
100 	mtx_assert(&sched_lock, MA_OWNED);
101 	for (;;) {
102 		struct thread *td1;
103 
104 		td = mtx_owner(m);
105 
106 		if (td == NULL) {
107 			/*
108 			 * This really isn't quite right. Really
109 			 * ought to bump priority of thread that
110 			 * next acquires the mutex.
111 			 */
112 			MPASS(m->mtx_lock == MTX_CONTESTED);
113 			return;
114 		}
115 
116 		MPASS(td->td_proc->p_magic == P_MAGIC);
117 		KASSERT(td->td_proc->p_stat != SSLEEP, ("sleeping thread owns a mutex"));
118 		if (td->td_priority <= pri) /* lower is higher priority */
119 			return;
120 
121 		/*
122 		 * Bump this thread's priority.
123 		 */
124 		td->td_priority = pri;
125 
126 		/*
127 		 * If lock holder is actually running, just bump priority.
128 		 */
129 		 /* XXXKSE this test is not sufficient */
130 		if (td->td_kse && (td->td_kse->ke_oncpu != NOCPU)) {
131 			MPASS(td->td_proc->p_stat == SRUN
132 			|| td->td_proc->p_stat == SZOMB
133 			|| td->td_proc->p_stat == SSTOP);
134 			return;
135 		}
136 
137 #ifndef SMP
138 		/*
139 		 * For UP, we check to see if td is curthread (this shouldn't
140 		 * ever happen however as it would mean we are in a deadlock.)
141 		 */
142 		KASSERT(td != curthread, ("Deadlock detected"));
143 #endif
144 
145 		/*
146 		 * If on run queue move to new run queue, and quit.
147 		 * XXXKSE this gets a lot more complicated under threads
148 		 * but try anyhow.
149 		 */
150 		if (td->td_proc->p_stat == SRUN) {
151 			MPASS(td->td_blocked == NULL);
152 			remrunqueue(td);
153 			setrunqueue(td);
154 			return;
155 		}
156 
157 		/*
158 		 * If we aren't blocked on a mutex, we should be.
159 		 */
160 		KASSERT(td->td_proc->p_stat == SMTX, (
161 		    "process %d(%s):%d holds %s but isn't blocked on a mutex\n",
162 		    td->td_proc->p_pid, td->td_proc->p_comm, td->td_proc->p_stat,
163 		    m->mtx_object.lo_name));
164 
165 		/*
166 		 * Pick up the mutex that td is blocked on.
167 		 */
168 		m = td->td_blocked;
169 		MPASS(m != NULL);
170 
171 		/*
172 		 * Check if the thread needs to be moved up on
173 		 * the blocked chain
174 		 */
175 		if (td == TAILQ_FIRST(&m->mtx_blocked)) {
176 			continue;
177 		}
178 
179 		td1 = TAILQ_PREV(td, threadqueue, td_blkq);
180 		if (td1->td_priority <= pri) {
181 			continue;
182 		}
183 
184 		/*
185 		 * Remove thread from blocked chain and determine where
186 		 * it should be moved up to.  Since we know that td1 has
187 		 * a lower priority than td, we know that at least one
188 		 * thread in the chain has a lower priority and that
189 		 * td1 will thus not be NULL after the loop.
190 		 */
191 		TAILQ_REMOVE(&m->mtx_blocked, td, td_blkq);
192 		TAILQ_FOREACH(td1, &m->mtx_blocked, td_blkq) {
193 			MPASS(td1->td_proc->p_magic == P_MAGIC);
194 			if (td1->td_priority > pri)
195 				break;
196 		}
197 
198 		MPASS(td1 != NULL);
199 		TAILQ_INSERT_BEFORE(td1, td, td_blkq);
200 		CTR4(KTR_LOCK,
201 		    "propagate_priority: p %p moved before %p on [%p] %s",
202 		    td, td1, m, m->mtx_object.lo_name);
203 	}
204 }
205 
206 #ifdef MUTEX_PROFILING
207 SYSCTL_NODE(_debug, OID_AUTO, mutex, CTLFLAG_RD, NULL, "mutex debugging");
208 SYSCTL_NODE(_debug_mutex, OID_AUTO, prof, CTLFLAG_RD, NULL, "mutex profiling");
209 static int mutex_prof_enable = 0;
210 SYSCTL_INT(_debug_mutex_prof, OID_AUTO, enable, CTLFLAG_RW,
211     &mutex_prof_enable, 0, "Enable tracing of mutex holdtime");
212 
213 struct mutex_prof {
214 	const char *name;
215 	const char *file;
216 	int line;
217 #define MPROF_MAX 0
218 #define MPROF_TOT 1
219 #define MPROF_CNT 2
220 #define MPROF_AVG 3
221 	u_int64_t counter[4];
222 	struct mutex_prof *next;
223 };
224 
225 /*
226  * mprof_buf is a static pool of profiling records to avoid possible
227  * reentrance of the memory allocation functions.
228  *
229  * Note: NUM_MPROF_BUFFERS must be smaller than MPROF_HASH_SIZE.
230  */
231 #define NUM_MPROF_BUFFERS 1000
232 static struct mutex_prof mprof_buf[NUM_MPROF_BUFFERS];
233 static int first_free_mprof_buf;
234 #define MPROF_HASH_SIZE 1009
235 static struct mutex_prof *mprof_hash[MPROF_HASH_SIZE];
236 
237 static int mutex_prof_acquisitions;
238 SYSCTL_INT(_debug_mutex_prof, OID_AUTO, acquisitions, CTLFLAG_RD,
239     &mutex_prof_acquisitions, 0, "Number of mutex acquistions recorded");
240 static int mutex_prof_records;
241 SYSCTL_INT(_debug_mutex_prof, OID_AUTO, records, CTLFLAG_RD,
242     &mutex_prof_records, 0, "Number of profiling records");
243 static int mutex_prof_maxrecords = NUM_MPROF_BUFFERS;
244 SYSCTL_INT(_debug_mutex_prof, OID_AUTO, maxrecords, CTLFLAG_RD,
245     &mutex_prof_maxrecords, 0, "Maximum number of profiling records");
246 static int mutex_prof_rejected;
247 SYSCTL_INT(_debug_mutex_prof, OID_AUTO, rejected, CTLFLAG_RD,
248     &mutex_prof_rejected, 0, "Number of rejected profiling records");
249 static int mutex_prof_hashsize = MPROF_HASH_SIZE;
250 SYSCTL_INT(_debug_mutex_prof, OID_AUTO, hashsize, CTLFLAG_RD,
251     &mutex_prof_hashsize, 0, "Hash size");
252 static int mutex_prof_collisions = 0;
253 SYSCTL_INT(_debug_mutex_prof, OID_AUTO, collisions, CTLFLAG_RD,
254     &mutex_prof_collisions, 0, "Number of hash collisions");
255 
256 /*
257  * mprof_mtx protects the profiling buffers and the hash.
258  */
259 static struct mtx mprof_mtx;
260 MTX_SYSINIT(mprof, &mprof_mtx, "mutex profiling lock", MTX_SPIN | MTX_QUIET);
261 
262 static u_int64_t
263 nanoseconds(void)
264 {
265 	struct timespec tv;
266 
267 	nanotime(&tv);
268 	return (tv.tv_sec * (u_int64_t)1000000000 + tv.tv_nsec);
269 }
270 
271 static int
272 dump_mutex_prof_stats(SYSCTL_HANDLER_ARGS)
273 {
274 	struct sbuf *sb;
275 	int error, i;
276 
277 	if (first_free_mprof_buf == 0)
278 		return SYSCTL_OUT(req, "No locking recorded",
279 		    sizeof("No locking recorded"));
280 
281 	sb = sbuf_new(NULL, NULL, 1024, SBUF_AUTOEXTEND);
282 	sbuf_printf(sb, "%12s %12s %12s %12s %s\n",
283 	    "max", "total", "count", "average", "name");
284 	mtx_lock_spin(&mprof_mtx);
285 	for (i = 0; i < first_free_mprof_buf; ++i)
286 		sbuf_printf(sb, "%12llu %12llu %12llu %12llu %s:%d (%s)\n",
287 		    mprof_buf[i].counter[MPROF_MAX] / 1000,
288 		    mprof_buf[i].counter[MPROF_TOT] / 1000,
289 		    mprof_buf[i].counter[MPROF_CNT],
290 		    mprof_buf[i].counter[MPROF_AVG] / 1000,
291 		    mprof_buf[i].file, mprof_buf[i].line, mprof_buf[i].name);
292 	mtx_unlock_spin(&mprof_mtx);
293 	sbuf_finish(sb);
294 	error = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb) + 1);
295 	sbuf_delete(sb);
296 	return (error);
297 }
298 SYSCTL_PROC(_debug_mutex_prof, OID_AUTO, stats, CTLTYPE_STRING|CTLFLAG_RD,
299     NULL, 0, dump_mutex_prof_stats, "A", "Mutex profiling statistics");
300 #endif
301 
302 /*
303  * Function versions of the inlined __mtx_* macros.  These are used by
304  * modules and can also be called from assembly language if needed.
305  */
306 void
307 _mtx_lock_flags(struct mtx *m, int opts, const char *file, int line)
308 {
309 
310 	MPASS(curthread != NULL);
311 	_get_sleep_lock(m, curthread, opts, file, line);
312 	LOCK_LOG_LOCK("LOCK", &m->mtx_object, opts, m->mtx_recurse, file,
313 	    line);
314 	WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line);
315 #ifdef MUTEX_PROFILING
316 	/* don't reset the timer when/if recursing */
317 	if (m->acqtime == 0) {
318 		m->file = file;
319 		m->line = line;
320 		m->acqtime = mutex_prof_enable ? nanoseconds() : 0;
321 		++mutex_prof_acquisitions;
322 	}
323 #endif
324 }
325 
326 void
327 _mtx_unlock_flags(struct mtx *m, int opts, const char *file, int line)
328 {
329 
330 	MPASS(curthread != NULL);
331 	mtx_assert(m, MA_OWNED);
332 #ifdef MUTEX_PROFILING
333 	if (m->acqtime != 0) {
334 		static const char *unknown = "(unknown)";
335 		struct mutex_prof *mpp;
336 		u_int64_t acqtime, now;
337 		const char *p, *q;
338 		volatile u_int hash;
339 
340 		now = nanoseconds();
341 		acqtime = m->acqtime;
342 		m->acqtime = 0;
343 		if (now <= acqtime)
344 			goto out;
345 		for (p = file; strncmp(p, "../", 3) == 0; p += 3)
346 			/* nothing */ ;
347 		if (p == NULL || *p == '\0')
348 			p = unknown;
349 		for (hash = line, q = p; *q != '\0'; ++q)
350 			hash = (hash * 2 + *q) % MPROF_HASH_SIZE;
351 		mtx_lock_spin(&mprof_mtx);
352 		for (mpp = mprof_hash[hash]; mpp != NULL; mpp = mpp->next)
353 			if (mpp->line == line && strcmp(mpp->file, p) == 0)
354 				break;
355 		if (mpp == NULL) {
356 			/* Just exit if we cannot get a trace buffer */
357 			if (first_free_mprof_buf >= NUM_MPROF_BUFFERS) {
358 				++mutex_prof_rejected;
359 				goto unlock;
360 			}
361 			mpp = &mprof_buf[first_free_mprof_buf++];
362 			mpp->name = mtx_name(m);
363 			mpp->file = p;
364 			mpp->line = line;
365 			mpp->next = mprof_hash[hash];
366 			if (mprof_hash[hash] != NULL)
367 				++mutex_prof_collisions;
368 			mprof_hash[hash] = mpp;
369 			++mutex_prof_records;
370 		}
371 		/*
372 		 * Record if the mutex has been held longer now than ever
373 		 * before
374 		 */
375 		if ((now - acqtime) > mpp->counter[MPROF_MAX])
376 			mpp->counter[MPROF_MAX] = now - acqtime;
377 		mpp->counter[MPROF_TOT] += now - acqtime;
378 		mpp->counter[MPROF_CNT] += 1;
379 		mpp->counter[MPROF_AVG] =
380 		    mpp->counter[MPROF_TOT] / mpp->counter[MPROF_CNT];
381 unlock:
382 		mtx_unlock_spin(&mprof_mtx);
383 	}
384 out:
385 #endif
386  	WITNESS_UNLOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line);
387 	LOCK_LOG_LOCK("UNLOCK", &m->mtx_object, opts, m->mtx_recurse, file,
388 	    line);
389 	_rel_sleep_lock(m, curthread, opts, file, line);
390 }
391 
392 void
393 _mtx_lock_spin_flags(struct mtx *m, int opts, const char *file, int line)
394 {
395 
396 	MPASS(curthread != NULL);
397 	_get_spin_lock(m, curthread, opts, file, line);
398 	LOCK_LOG_LOCK("LOCK", &m->mtx_object, opts, m->mtx_recurse, file,
399 	    line);
400 	WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line);
401 }
402 
403 void
404 _mtx_unlock_spin_flags(struct mtx *m, int opts, const char *file, int line)
405 {
406 
407 	MPASS(curthread != NULL);
408 	mtx_assert(m, MA_OWNED);
409  	WITNESS_UNLOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line);
410 	LOCK_LOG_LOCK("UNLOCK", &m->mtx_object, opts, m->mtx_recurse, file,
411 	    line);
412 	_rel_spin_lock(m);
413 }
414 
415 /*
416  * The important part of mtx_trylock{,_flags}()
417  * Tries to acquire lock `m.' We do NOT handle recursion here; we assume that
418  * if we're called, it's because we know we don't already own this lock.
419  */
420 int
421 _mtx_trylock(struct mtx *m, int opts, const char *file, int line)
422 {
423 	int rval;
424 
425 	MPASS(curthread != NULL);
426 
427 	rval = _obtain_lock(m, curthread);
428 
429 	LOCK_LOG_TRY("LOCK", &m->mtx_object, opts, rval, file, line);
430 	if (rval) {
431 		/*
432 		 * We do not handle recursion in _mtx_trylock; see the
433 		 * note at the top of the routine.
434 		 */
435 		KASSERT(!mtx_recursed(m),
436 		    ("mtx_trylock() called on a recursed mutex"));
437 		WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE | LOP_TRYLOCK,
438 		    file, line);
439 	}
440 
441 	return (rval);
442 }
443 
444 /*
445  * _mtx_lock_sleep: the tougher part of acquiring an MTX_DEF lock.
446  *
447  * We call this if the lock is either contested (i.e. we need to go to
448  * sleep waiting for it), or if we need to recurse on it.
449  */
450 void
451 _mtx_lock_sleep(struct mtx *m, int opts, const char *file, int line)
452 {
453 	struct thread *td = curthread;
454 
455 	if ((m->mtx_lock & MTX_FLAGMASK) == (uintptr_t)td) {
456 		m->mtx_recurse++;
457 		atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
458 		if (LOCK_LOG_TEST(&m->mtx_object, opts))
459 			CTR1(KTR_LOCK, "_mtx_lock_sleep: %p recursing", m);
460 		return;
461 	}
462 
463 	if (LOCK_LOG_TEST(&m->mtx_object, opts))
464 		CTR4(KTR_LOCK,
465 		    "_mtx_lock_sleep: %s contested (lock=%p) at %s:%d",
466 		    m->mtx_object.lo_name, (void *)m->mtx_lock, file, line);
467 
468 	while (!_obtain_lock(m, td)) {
469 		uintptr_t v;
470 		struct thread *td1;
471 
472 		mtx_lock_spin(&sched_lock);
473 		/*
474 		 * Check if the lock has been released while spinning for
475 		 * the sched_lock.
476 		 */
477 		if ((v = m->mtx_lock) == MTX_UNOWNED) {
478 			mtx_unlock_spin(&sched_lock);
479 			continue;
480 		}
481 
482 		/*
483 		 * The mutex was marked contested on release. This means that
484 		 * there are threads blocked on it.
485 		 */
486 		if (v == MTX_CONTESTED) {
487 			td1 = TAILQ_FIRST(&m->mtx_blocked);
488 			MPASS(td1 != NULL);
489 			m->mtx_lock = (uintptr_t)td | MTX_CONTESTED;
490 
491 			if (td1->td_priority < td->td_priority)
492 				td->td_priority = td1->td_priority;
493 			mtx_unlock_spin(&sched_lock);
494 			return;
495 		}
496 
497 		/*
498 		 * If the mutex isn't already contested and a failure occurs
499 		 * setting the contested bit, the mutex was either released
500 		 * or the state of the MTX_RECURSED bit changed.
501 		 */
502 		if ((v & MTX_CONTESTED) == 0 &&
503 		    !atomic_cmpset_ptr(&m->mtx_lock, (void *)v,
504 			(void *)(v | MTX_CONTESTED))) {
505 			mtx_unlock_spin(&sched_lock);
506 			continue;
507 		}
508 
509 		/*
510 		 * We definitely must sleep for this lock.
511 		 */
512 		mtx_assert(m, MA_NOTOWNED);
513 
514 #ifdef notyet
515 		/*
516 		 * If we're borrowing an interrupted thread's VM context, we
517 		 * must clean up before going to sleep.
518 		 */
519 		if (td->td_ithd != NULL) {
520 			struct ithd *it = td->td_ithd;
521 
522 			if (it->it_interrupted) {
523 				if (LOCK_LOG_TEST(&m->mtx_object, opts))
524 					CTR2(KTR_LOCK,
525 				    "_mtx_lock_sleep: %p interrupted %p",
526 					    it, it->it_interrupted);
527 				intr_thd_fixup(it);
528 			}
529 		}
530 #endif
531 
532 		/*
533 		 * Put us on the list of threads blocked on this mutex.
534 		 */
535 		if (TAILQ_EMPTY(&m->mtx_blocked)) {
536 			td1 = mtx_owner(m);
537 			LIST_INSERT_HEAD(&td1->td_contested, m, mtx_contested);
538 			TAILQ_INSERT_TAIL(&m->mtx_blocked, td, td_blkq);
539 		} else {
540 			TAILQ_FOREACH(td1, &m->mtx_blocked, td_blkq)
541 				if (td1->td_priority > td->td_priority)
542 					break;
543 			if (td1)
544 				TAILQ_INSERT_BEFORE(td1, td, td_blkq);
545 			else
546 				TAILQ_INSERT_TAIL(&m->mtx_blocked, td, td_blkq);
547 		}
548 
549 		/*
550 		 * Save who we're blocked on.
551 		 */
552 		td->td_blocked = m;
553 		td->td_mtxname = m->mtx_object.lo_name;
554 		td->td_proc->p_stat = SMTX;
555 		propagate_priority(td);
556 
557 		if (LOCK_LOG_TEST(&m->mtx_object, opts))
558 			CTR3(KTR_LOCK,
559 			    "_mtx_lock_sleep: p %p blocked on [%p] %s", td, m,
560 			    m->mtx_object.lo_name);
561 
562 		td->td_proc->p_stats->p_ru.ru_nvcsw++;
563 		mi_switch();
564 
565 		if (LOCK_LOG_TEST(&m->mtx_object, opts))
566 			CTR3(KTR_LOCK,
567 			  "_mtx_lock_sleep: p %p free from blocked on [%p] %s",
568 			  td, m, m->mtx_object.lo_name);
569 
570 		mtx_unlock_spin(&sched_lock);
571 	}
572 
573 	return;
574 }
575 
576 /*
577  * _mtx_lock_spin: the tougher part of acquiring an MTX_SPIN lock.
578  *
579  * This is only called if we need to actually spin for the lock. Recursion
580  * is handled inline.
581  */
582 void
583 _mtx_lock_spin(struct mtx *m, int opts, const char *file, int line)
584 {
585 	int i = 0;
586 
587 	if (LOCK_LOG_TEST(&m->mtx_object, opts))
588 		CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m);
589 
590 	for (;;) {
591 		if (_obtain_lock(m, curthread))
592 			break;
593 
594 		/* Give interrupts a chance while we spin. */
595 		critical_exit();
596 		while (m->mtx_lock != MTX_UNOWNED) {
597 			if (i++ < 10000000)
598 				continue;
599 			if (i++ < 60000000)
600 				DELAY(1);
601 #ifdef DDB
602 			else if (!db_active)
603 #else
604 			else
605 #endif
606 			panic("spin lock %s held by %p for > 5 seconds",
607 			    m->mtx_object.lo_name, (void *)m->mtx_lock);
608 		}
609 		critical_enter();
610 	}
611 
612 	if (LOCK_LOG_TEST(&m->mtx_object, opts))
613 		CTR1(KTR_LOCK, "_mtx_lock_spin: %p spin done", m);
614 
615 	return;
616 }
617 
618 /*
619  * _mtx_unlock_sleep: the tougher part of releasing an MTX_DEF lock.
620  *
621  * We are only called here if the lock is recursed or contested (i.e. we
622  * need to wake up a blocked thread).
623  */
624 void
625 _mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line)
626 {
627 	struct thread *td, *td1;
628 	struct mtx *m1;
629 	int pri;
630 
631 	td = curthread;
632 
633 	if (mtx_recursed(m)) {
634 		if (--(m->mtx_recurse) == 0)
635 			atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED);
636 		if (LOCK_LOG_TEST(&m->mtx_object, opts))
637 			CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p unrecurse", m);
638 		return;
639 	}
640 
641 	mtx_lock_spin(&sched_lock);
642 	if (LOCK_LOG_TEST(&m->mtx_object, opts))
643 		CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p contested", m);
644 
645 	td1 = TAILQ_FIRST(&m->mtx_blocked);
646 	MPASS(td->td_proc->p_magic == P_MAGIC);
647 	MPASS(td1->td_proc->p_magic == P_MAGIC);
648 
649 	TAILQ_REMOVE(&m->mtx_blocked, td1, td_blkq);
650 
651 	if (TAILQ_EMPTY(&m->mtx_blocked)) {
652 		LIST_REMOVE(m, mtx_contested);
653 		_release_lock_quick(m);
654 		if (LOCK_LOG_TEST(&m->mtx_object, opts))
655 			CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p not held", m);
656 	} else
657 		atomic_store_rel_ptr(&m->mtx_lock, (void *)MTX_CONTESTED);
658 
659 	pri = PRI_MAX;
660 	LIST_FOREACH(m1, &td->td_contested, mtx_contested) {
661 		int cp = TAILQ_FIRST(&m1->mtx_blocked)->td_priority;
662 		if (cp < pri)
663 			pri = cp;
664 	}
665 
666 	if (pri > td->td_base_pri)
667 		pri = td->td_base_pri;
668 	td->td_priority = pri;
669 
670 	if (LOCK_LOG_TEST(&m->mtx_object, opts))
671 		CTR2(KTR_LOCK, "_mtx_unlock_sleep: %p contested setrunqueue %p",
672 		    m, td1);
673 
674 	td1->td_blocked = NULL;
675 	td1->td_proc->p_stat = SRUN;
676 	setrunqueue(td1);
677 
678 	if (td->td_critnest == 1 && td1->td_priority < pri) {
679 #ifdef notyet
680 		if (td->td_ithd != NULL) {
681 			struct ithd *it = td->td_ithd;
682 
683 			if (it->it_interrupted) {
684 				if (LOCK_LOG_TEST(&m->mtx_object, opts))
685 					CTR2(KTR_LOCK,
686 				    "_mtx_unlock_sleep: %p interrupted %p",
687 					    it, it->it_interrupted);
688 				intr_thd_fixup(it);
689 			}
690 		}
691 #endif
692 		setrunqueue(td);
693 		if (LOCK_LOG_TEST(&m->mtx_object, opts))
694 			CTR2(KTR_LOCK,
695 			    "_mtx_unlock_sleep: %p switching out lock=%p", m,
696 			    (void *)m->mtx_lock);
697 
698 		td->td_proc->p_stats->p_ru.ru_nivcsw++;
699 		mi_switch();
700 		if (LOCK_LOG_TEST(&m->mtx_object, opts))
701 			CTR2(KTR_LOCK, "_mtx_unlock_sleep: %p resuming lock=%p",
702 			    m, (void *)m->mtx_lock);
703 	}
704 
705 	mtx_unlock_spin(&sched_lock);
706 
707 	return;
708 }
709 
710 /*
711  * All the unlocking of MTX_SPIN locks is done inline.
712  * See the _rel_spin_lock() macro for the details.
713  */
714 
715 /*
716  * The backing function for the INVARIANTS-enabled mtx_assert()
717  */
718 #ifdef INVARIANT_SUPPORT
719 void
720 _mtx_assert(struct mtx *m, int what, const char *file, int line)
721 {
722 
723 	if (panicstr != NULL)
724 		return;
725 	switch (what) {
726 	case MA_OWNED:
727 	case MA_OWNED | MA_RECURSED:
728 	case MA_OWNED | MA_NOTRECURSED:
729 		if (!mtx_owned(m))
730 			panic("mutex %s not owned at %s:%d",
731 			    m->mtx_object.lo_name, file, line);
732 		if (mtx_recursed(m)) {
733 			if ((what & MA_NOTRECURSED) != 0)
734 				panic("mutex %s recursed at %s:%d",
735 				    m->mtx_object.lo_name, file, line);
736 		} else if ((what & MA_RECURSED) != 0) {
737 			panic("mutex %s unrecursed at %s:%d",
738 			    m->mtx_object.lo_name, file, line);
739 		}
740 		break;
741 	case MA_NOTOWNED:
742 		if (mtx_owned(m))
743 			panic("mutex %s owned at %s:%d",
744 			    m->mtx_object.lo_name, file, line);
745 		break;
746 	default:
747 		panic("unknown mtx_assert at %s:%d", file, line);
748 	}
749 }
750 #endif
751 
752 /*
753  * The MUTEX_DEBUG-enabled mtx_validate()
754  *
755  * Most of these checks have been moved off into the LO_INITIALIZED flag
756  * maintained by the witness code.
757  */
758 #ifdef MUTEX_DEBUG
759 
760 void	mtx_validate(struct mtx *);
761 
762 void
763 mtx_validate(struct mtx *m)
764 {
765 
766 /*
767  * XXX - When kernacc() is fixed on the alpha to handle K0_SEG memory properly
768  * we can re-enable the kernacc() checks.
769  */
770 #ifndef __alpha__
771 	/*
772 	 * Can't call kernacc() from early init386(), especially when
773 	 * initializing Giant mutex, because some stuff in kernacc()
774 	 * requires Giant itself.
775 	 */
776 	if (!cold)
777 		if (!kernacc((caddr_t)m, sizeof(m),
778 		    VM_PROT_READ | VM_PROT_WRITE))
779 			panic("Can't read and write to mutex %p", m);
780 #endif
781 }
782 #endif
783 
784 /*
785  * General init routine used by the MTX_SYSINIT() macro.
786  */
787 void
788 mtx_sysinit(void *arg)
789 {
790 	struct mtx_args *margs = arg;
791 
792 	mtx_init(margs->ma_mtx, margs->ma_desc, NULL, margs->ma_opts);
793 }
794 
795 /*
796  * Mutex initialization routine; initialize lock `m' of type contained in
797  * `opts' with options contained in `opts' and name `name.'  The optional
798  * lock type `type' is used as a general lock category name for use with
799  * witness.
800  */
801 void
802 mtx_init(struct mtx *m, const char *name, const char *type, int opts)
803 {
804 	struct lock_object *lock;
805 
806 	MPASS((opts & ~(MTX_SPIN | MTX_QUIET | MTX_RECURSE |
807 	    MTX_SLEEPABLE | MTX_NOWITNESS | MTX_DUPOK)) == 0);
808 
809 #ifdef MUTEX_DEBUG
810 	/* Diagnostic and error correction */
811 	mtx_validate(m);
812 #endif
813 
814 	lock = &m->mtx_object;
815 	KASSERT((lock->lo_flags & LO_INITIALIZED) == 0,
816 	    ("mutex %s %p already initialized", name, m));
817 	bzero(m, sizeof(*m));
818 	if (opts & MTX_SPIN)
819 		lock->lo_class = &lock_class_mtx_spin;
820 	else
821 		lock->lo_class = &lock_class_mtx_sleep;
822 	lock->lo_name = name;
823 	lock->lo_type = type != NULL ? type : name;
824 	if (opts & MTX_QUIET)
825 		lock->lo_flags = LO_QUIET;
826 	if (opts & MTX_RECURSE)
827 		lock->lo_flags |= LO_RECURSABLE;
828 	if (opts & MTX_SLEEPABLE)
829 		lock->lo_flags |= LO_SLEEPABLE;
830 	if ((opts & MTX_NOWITNESS) == 0)
831 		lock->lo_flags |= LO_WITNESS;
832 	if (opts & MTX_DUPOK)
833 		lock->lo_flags |= LO_DUPOK;
834 
835 	m->mtx_lock = MTX_UNOWNED;
836 	TAILQ_INIT(&m->mtx_blocked);
837 
838 	LOCK_LOG_INIT(lock, opts);
839 
840 	WITNESS_INIT(lock);
841 }
842 
843 /*
844  * Remove lock `m' from all_mtx queue.  We don't allow MTX_QUIET to be
845  * passed in as a flag here because if the corresponding mtx_init() was
846  * called with MTX_QUIET set, then it will already be set in the mutex's
847  * flags.
848  */
849 void
850 mtx_destroy(struct mtx *m)
851 {
852 
853 	LOCK_LOG_DESTROY(&m->mtx_object, 0);
854 
855 	if (!mtx_owned(m))
856 		MPASS(mtx_unowned(m));
857 	else {
858 		MPASS((m->mtx_lock & (MTX_RECURSED|MTX_CONTESTED)) == 0);
859 
860 		/* Tell witness this isn't locked to make it happy. */
861 		WITNESS_UNLOCK(&m->mtx_object, LOP_EXCLUSIVE, __FILE__,
862 		    __LINE__);
863 	}
864 
865 	WITNESS_DESTROY(&m->mtx_object);
866 }
867 
868 /*
869  * Intialize the mutex code and system mutexes.  This is called from the MD
870  * startup code prior to mi_startup().  The per-CPU data space needs to be
871  * setup before this is called.
872  */
873 void
874 mutex_init(void)
875 {
876 
877 	/* Setup thread0 so that mutexes work. */
878 	LIST_INIT(&thread0.td_contested);
879 
880 	/*
881 	 * Initialize mutexes.
882 	 */
883 	mtx_init(&Giant, "Giant", NULL, MTX_DEF | MTX_RECURSE);
884 	mtx_init(&sched_lock, "sched lock", NULL, MTX_SPIN | MTX_RECURSE);
885 	mtx_init(&proc0.p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK);
886 	mtx_lock(&Giant);
887 }
888 
889 /*
890  * Encapsulated Giant mutex routines.  These routines provide encapsulation
891  * control for the Giant mutex, allowing sysctls to be used to turn on and
892  * off Giant around certain subsystems.  The default value for the sysctls
893  * are set to what developers believe is stable and working in regards to
894  * the Giant pushdown.  Developers should not turn off Giant via these
895  * sysctls unless they know what they are doing.
896  *
897  * Callers of mtx_lock_giant() are expected to pass the return value to an
898  * accompanying mtx_unlock_giant() later on.  If multiple subsystems are
899  * effected by a Giant wrap, all related sysctl variables must be zero for
900  * the subsystem call to operate without Giant (as determined by the caller).
901  */
902 
903 SYSCTL_NODE(_kern, OID_AUTO, giant, CTLFLAG_RD, NULL, "Giant mutex manipulation");
904 
905 static int kern_giant_all = 0;
906 SYSCTL_INT(_kern_giant, OID_AUTO, all, CTLFLAG_RW, &kern_giant_all, 0, "");
907 
908 int kern_giant_proc = 1;	/* Giant around PROC locks */
909 int kern_giant_file = 1;	/* Giant around struct file & filedesc */
910 int kern_giant_ucred = 1;	/* Giant around ucred */
911 SYSCTL_INT(_kern_giant, OID_AUTO, proc, CTLFLAG_RW, &kern_giant_proc, 0, "");
912 SYSCTL_INT(_kern_giant, OID_AUTO, file, CTLFLAG_RW, &kern_giant_file, 0, "");
913 SYSCTL_INT(_kern_giant, OID_AUTO, ucred, CTLFLAG_RW, &kern_giant_ucred, 0, "");
914 
915 int
916 mtx_lock_giant(int sysctlvar)
917 {
918 	if (sysctlvar || kern_giant_all) {
919 		mtx_lock(&Giant);
920 		return(1);
921 	}
922 	return(0);
923 }
924 
925 void
926 mtx_unlock_giant(int s)
927 {
928 	if (s)
929 		mtx_unlock(&Giant);
930 }
931 
932