xref: /freebsd/sys/kern/subr_turnstile.c (revision f9218d3d4fd34f082473b3a021c6d4d109fb47cf)
1 /*-
2  * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  * 3. Berkeley Software Design Inc's name may not be used to endorse or
13  *    promote products derived from this software without specific prior
14  *    written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  *	from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $
29  *	and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $
30  * $FreeBSD$
31  */
32 
33 /*
34  * Machine independent bits of mutex implementation.
35  */
36 
37 #include "opt_adaptive_mutexes.h"
38 #include "opt_ddb.h"
39 
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/bus.h>
43 #include <sys/kernel.h>
44 #include <sys/ktr.h>
45 #include <sys/lock.h>
46 #include <sys/malloc.h>
47 #include <sys/mutex.h>
48 #include <sys/proc.h>
49 #include <sys/resourcevar.h>
50 #include <sys/sched.h>
51 #include <sys/sbuf.h>
52 #include <sys/stdint.h>
53 #include <sys/sysctl.h>
54 #include <sys/vmmeter.h>
55 
56 #include <machine/atomic.h>
57 #include <machine/bus.h>
58 #include <machine/clock.h>
59 #include <machine/cpu.h>
60 
61 #include <ddb/ddb.h>
62 
63 #include <vm/vm.h>
64 #include <vm/vm_extern.h>
65 
66 /*
67  * Internal utility macros.
68  */
69 #define mtx_unowned(m)	((m)->mtx_lock == MTX_UNOWNED)
70 
71 #define mtx_owner(m)	(mtx_unowned((m)) ? NULL \
72 	: (struct thread *)((m)->mtx_lock & MTX_FLAGMASK))
73 
74 /* XXXKSE This test will change. */
75 #define	thread_running(td)						\
76 	((td)->td_kse != NULL && (td)->td_kse->ke_oncpu != NOCPU)
77 
78 /*
79  * Lock classes for sleep and spin mutexes.
80  */
81 struct lock_class lock_class_mtx_sleep = {
82 	"sleep mutex",
83 	LC_SLEEPLOCK | LC_RECURSABLE
84 };
85 struct lock_class lock_class_mtx_spin = {
86 	"spin mutex",
87 	LC_SPINLOCK | LC_RECURSABLE
88 };
89 
90 /*
91  * System-wide mutexes
92  */
93 struct mtx sched_lock;
94 struct mtx Giant;
95 
96 /*
97  * Prototypes for non-exported routines.
98  */
99 static void	propagate_priority(struct thread *);
100 
101 static void
102 propagate_priority(struct thread *td)
103 {
104 	int pri = td->td_priority;
105 	struct mtx *m = td->td_blocked;
106 
107 	mtx_assert(&sched_lock, MA_OWNED);
108 	for (;;) {
109 		struct thread *td1;
110 
111 		td = mtx_owner(m);
112 
113 		if (td == NULL) {
114 			/*
115 			 * This really isn't quite right. Really
116 			 * ought to bump priority of thread that
117 			 * next acquires the mutex.
118 			 */
119 			MPASS(m->mtx_lock == MTX_CONTESTED);
120 			return;
121 		}
122 
123 		MPASS(td->td_proc != NULL);
124 		MPASS(td->td_proc->p_magic == P_MAGIC);
125 		KASSERT(!TD_IS_SLEEPING(td), ("sleeping thread owns a mutex"));
126 		if (td->td_priority <= pri) /* lower is higher priority */
127 			return;
128 
129 
130 		/*
131 		 * If lock holder is actually running, just bump priority.
132 		 */
133 		if (TD_IS_RUNNING(td)) {
134 			td->td_priority = pri;
135 			return;
136 		}
137 
138 #ifndef SMP
139 		/*
140 		 * For UP, we check to see if td is curthread (this shouldn't
141 		 * ever happen however as it would mean we are in a deadlock.)
142 		 */
143 		KASSERT(td != curthread, ("Deadlock detected"));
144 #endif
145 
146 		/*
147 		 * If on run queue move to new run queue, and quit.
148 		 * XXXKSE this gets a lot more complicated under threads
149 		 * but try anyhow.
150 		 */
151 		if (TD_ON_RUNQ(td)) {
152 			MPASS(td->td_blocked == NULL);
153 			sched_prio(td, pri);
154 			return;
155 		}
156 		/*
157 		 * Adjust for any other cases.
158 		 */
159 		td->td_priority = pri;
160 
161 		/*
162 		 * If we aren't blocked on a mutex, we should be.
163 		 */
164 		KASSERT(TD_ON_LOCK(td), (
165 		    "process %d(%s):%d holds %s but isn't blocked on a mutex\n",
166 		    td->td_proc->p_pid, td->td_proc->p_comm, td->td_state,
167 		    m->mtx_object.lo_name));
168 
169 		/*
170 		 * Pick up the mutex that td is blocked on.
171 		 */
172 		m = td->td_blocked;
173 		MPASS(m != NULL);
174 
175 		/*
176 		 * Check if the thread needs to be moved up on
177 		 * the blocked chain
178 		 */
179 		if (td == TAILQ_FIRST(&m->mtx_blocked)) {
180 			continue;
181 		}
182 
183 		td1 = TAILQ_PREV(td, threadqueue, td_lockq);
184 		if (td1->td_priority <= pri) {
185 			continue;
186 		}
187 
188 		/*
189 		 * Remove thread from blocked chain and determine where
190 		 * it should be moved up to.  Since we know that td1 has
191 		 * a lower priority than td, we know that at least one
192 		 * thread in the chain has a lower priority and that
193 		 * td1 will thus not be NULL after the loop.
194 		 */
195 		TAILQ_REMOVE(&m->mtx_blocked, td, td_lockq);
196 		TAILQ_FOREACH(td1, &m->mtx_blocked, td_lockq) {
197 			MPASS(td1->td_proc->p_magic == P_MAGIC);
198 			if (td1->td_priority > pri)
199 				break;
200 		}
201 
202 		MPASS(td1 != NULL);
203 		TAILQ_INSERT_BEFORE(td1, td, td_lockq);
204 		CTR4(KTR_LOCK,
205 		    "propagate_priority: p %p moved before %p on [%p] %s",
206 		    td, td1, m, m->mtx_object.lo_name);
207 	}
208 }
209 
210 #ifdef MUTEX_PROFILING
211 SYSCTL_NODE(_debug, OID_AUTO, mutex, CTLFLAG_RD, NULL, "mutex debugging");
212 SYSCTL_NODE(_debug_mutex, OID_AUTO, prof, CTLFLAG_RD, NULL, "mutex profiling");
213 static int mutex_prof_enable = 0;
214 SYSCTL_INT(_debug_mutex_prof, OID_AUTO, enable, CTLFLAG_RW,
215     &mutex_prof_enable, 0, "Enable tracing of mutex holdtime");
216 
217 struct mutex_prof {
218 	const char	*name;
219 	const char	*file;
220 	int		line;
221 	uintmax_t	cnt_max;
222 	uintmax_t	cnt_tot;
223 	uintmax_t	cnt_cur;
224 	struct mutex_prof *next;
225 };
226 
227 /*
228  * mprof_buf is a static pool of profiling records to avoid possible
229  * reentrance of the memory allocation functions.
230  *
231  * Note: NUM_MPROF_BUFFERS must be smaller than MPROF_HASH_SIZE.
232  */
233 #define	NUM_MPROF_BUFFERS	1000
234 static struct mutex_prof mprof_buf[NUM_MPROF_BUFFERS];
235 static int first_free_mprof_buf;
236 #define	MPROF_HASH_SIZE		1009
237 static struct mutex_prof *mprof_hash[MPROF_HASH_SIZE];
238 /* SWAG: sbuf size = avg stat. line size * number of locks */
239 #define MPROF_SBUF_SIZE		256 * 400
240 
241 static int mutex_prof_acquisitions;
242 SYSCTL_INT(_debug_mutex_prof, OID_AUTO, acquisitions, CTLFLAG_RD,
243     &mutex_prof_acquisitions, 0, "Number of mutex acquistions recorded");
244 static int mutex_prof_records;
245 SYSCTL_INT(_debug_mutex_prof, OID_AUTO, records, CTLFLAG_RD,
246     &mutex_prof_records, 0, "Number of profiling records");
247 static int mutex_prof_maxrecords = NUM_MPROF_BUFFERS;
248 SYSCTL_INT(_debug_mutex_prof, OID_AUTO, maxrecords, CTLFLAG_RD,
249     &mutex_prof_maxrecords, 0, "Maximum number of profiling records");
250 static int mutex_prof_rejected;
251 SYSCTL_INT(_debug_mutex_prof, OID_AUTO, rejected, CTLFLAG_RD,
252     &mutex_prof_rejected, 0, "Number of rejected profiling records");
253 static int mutex_prof_hashsize = MPROF_HASH_SIZE;
254 SYSCTL_INT(_debug_mutex_prof, OID_AUTO, hashsize, CTLFLAG_RD,
255     &mutex_prof_hashsize, 0, "Hash size");
256 static int mutex_prof_collisions = 0;
257 SYSCTL_INT(_debug_mutex_prof, OID_AUTO, collisions, CTLFLAG_RD,
258     &mutex_prof_collisions, 0, "Number of hash collisions");
259 
260 /*
261  * mprof_mtx protects the profiling buffers and the hash.
262  */
263 static struct mtx mprof_mtx;
264 MTX_SYSINIT(mprof, &mprof_mtx, "mutex profiling lock", MTX_SPIN | MTX_QUIET);
265 
266 static u_int64_t
267 nanoseconds(void)
268 {
269 	struct timespec tv;
270 
271 	nanotime(&tv);
272 	return (tv.tv_sec * (u_int64_t)1000000000 + tv.tv_nsec);
273 }
274 
275 static int
276 dump_mutex_prof_stats(SYSCTL_HANDLER_ARGS)
277 {
278 	struct sbuf *sb;
279 	int error, i;
280 	static int multiplier = 1;
281 
282 	if (first_free_mprof_buf == 0)
283 		return (SYSCTL_OUT(req, "No locking recorded",
284 		    sizeof("No locking recorded")));
285 
286 retry_sbufops:
287 	sb = sbuf_new(NULL, NULL, MPROF_SBUF_SIZE * multiplier, SBUF_FIXEDLEN);
288 	sbuf_printf(sb, "%6s %12s %11s %5s %s\n",
289 	    "max", "total", "count", "avg", "name");
290 	/*
291 	 * XXX this spinlock seems to be by far the largest perpetrator
292 	 * of spinlock latency (1.6 msec on an Athlon1600 was recorded
293 	 * even before I pessimized it further by moving the average
294 	 * computation here).
295 	 */
296 	mtx_lock_spin(&mprof_mtx);
297 	for (i = 0; i < first_free_mprof_buf; ++i) {
298 		sbuf_printf(sb, "%6ju %12ju %11ju %5ju %s:%d (%s)\n",
299 		    mprof_buf[i].cnt_max / 1000,
300 		    mprof_buf[i].cnt_tot / 1000,
301 		    mprof_buf[i].cnt_cur,
302 		    mprof_buf[i].cnt_cur == 0 ? (uintmax_t)0 :
303 			mprof_buf[i].cnt_tot / (mprof_buf[i].cnt_cur * 1000),
304 		    mprof_buf[i].file, mprof_buf[i].line, mprof_buf[i].name);
305 		if (sbuf_overflowed(sb)) {
306 			mtx_unlock_spin(&mprof_mtx);
307 			sbuf_delete(sb);
308 			multiplier++;
309 			goto retry_sbufops;
310 		}
311 	}
312 	mtx_unlock_spin(&mprof_mtx);
313 	sbuf_finish(sb);
314 	error = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb) + 1);
315 	sbuf_delete(sb);
316 	return (error);
317 }
318 SYSCTL_PROC(_debug_mutex_prof, OID_AUTO, stats, CTLTYPE_STRING | CTLFLAG_RD,
319     NULL, 0, dump_mutex_prof_stats, "A", "Mutex profiling statistics");
320 #endif
321 
322 /*
323  * Function versions of the inlined __mtx_* macros.  These are used by
324  * modules and can also be called from assembly language if needed.
325  */
326 void
327 _mtx_lock_flags(struct mtx *m, int opts, const char *file, int line)
328 {
329 
330 	MPASS(curthread != NULL);
331 	KASSERT(m->mtx_object.lo_class == &lock_class_mtx_sleep,
332 	    ("mtx_lock() of spin mutex %s @ %s:%d", m->mtx_object.lo_name,
333 	    file, line));
334 	_get_sleep_lock(m, curthread, opts, file, line);
335 	LOCK_LOG_LOCK("LOCK", &m->mtx_object, opts, m->mtx_recurse, file,
336 	    line);
337 	WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line);
338 #ifdef MUTEX_PROFILING
339 	/* don't reset the timer when/if recursing */
340 	if (m->mtx_acqtime == 0) {
341 		m->mtx_filename = file;
342 		m->mtx_lineno = line;
343 		m->mtx_acqtime = mutex_prof_enable ? nanoseconds() : 0;
344 		++mutex_prof_acquisitions;
345 	}
346 #endif
347 }
348 
349 void
350 _mtx_unlock_flags(struct mtx *m, int opts, const char *file, int line)
351 {
352 
353 	MPASS(curthread != NULL);
354 	KASSERT(m->mtx_object.lo_class == &lock_class_mtx_sleep,
355 	    ("mtx_unlock() of spin mutex %s @ %s:%d", m->mtx_object.lo_name,
356 	    file, line));
357 	WITNESS_UNLOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line);
358 	LOCK_LOG_LOCK("UNLOCK", &m->mtx_object, opts, m->mtx_recurse, file,
359 	    line);
360 	mtx_assert(m, MA_OWNED);
361 #ifdef MUTEX_PROFILING
362 	if (m->mtx_acqtime != 0) {
363 		static const char *unknown = "(unknown)";
364 		struct mutex_prof *mpp;
365 		u_int64_t acqtime, now;
366 		const char *p, *q;
367 		volatile u_int hash;
368 
369 		now = nanoseconds();
370 		acqtime = m->mtx_acqtime;
371 		m->mtx_acqtime = 0;
372 		if (now <= acqtime)
373 			goto out;
374 		for (p = m->mtx_filename;
375 		    p != NULL && strncmp(p, "../", 3) == 0; p += 3)
376 			/* nothing */ ;
377 		if (p == NULL || *p == '\0')
378 			p = unknown;
379 		for (hash = m->mtx_lineno, q = p; *q != '\0'; ++q)
380 			hash = (hash * 2 + *q) % MPROF_HASH_SIZE;
381 		mtx_lock_spin(&mprof_mtx);
382 		for (mpp = mprof_hash[hash]; mpp != NULL; mpp = mpp->next)
383 			if (mpp->line == m->mtx_lineno &&
384 			    strcmp(mpp->file, p) == 0)
385 				break;
386 		if (mpp == NULL) {
387 			/* Just exit if we cannot get a trace buffer */
388 			if (first_free_mprof_buf >= NUM_MPROF_BUFFERS) {
389 				++mutex_prof_rejected;
390 				goto unlock;
391 			}
392 			mpp = &mprof_buf[first_free_mprof_buf++];
393 			mpp->name = mtx_name(m);
394 			mpp->file = p;
395 			mpp->line = m->mtx_lineno;
396 			mpp->next = mprof_hash[hash];
397 			if (mprof_hash[hash] != NULL)
398 				++mutex_prof_collisions;
399 			mprof_hash[hash] = mpp;
400 			++mutex_prof_records;
401 		}
402 		/*
403 		 * Record if the mutex has been held longer now than ever
404 		 * before.
405 		 */
406 		if (now - acqtime > mpp->cnt_max)
407 			mpp->cnt_max = now - acqtime;
408 		mpp->cnt_tot += now - acqtime;
409 		mpp->cnt_cur++;
410 unlock:
411 		mtx_unlock_spin(&mprof_mtx);
412 	}
413 out:
414 #endif
415 	_rel_sleep_lock(m, curthread, opts, file, line);
416 }
417 
418 void
419 _mtx_lock_spin_flags(struct mtx *m, int opts, const char *file, int line)
420 {
421 
422 	MPASS(curthread != NULL);
423 	KASSERT(m->mtx_object.lo_class == &lock_class_mtx_spin,
424 	    ("mtx_lock_spin() of sleep mutex %s @ %s:%d",
425 	    m->mtx_object.lo_name, file, line));
426 #if defined(SMP) || LOCK_DEBUG > 0 || 1
427 	_get_spin_lock(m, curthread, opts, file, line);
428 #else
429 	critical_enter();
430 #endif
431 	LOCK_LOG_LOCK("LOCK", &m->mtx_object, opts, m->mtx_recurse, file,
432 	    line);
433 	WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line);
434 }
435 
436 void
437 _mtx_unlock_spin_flags(struct mtx *m, int opts, const char *file, int line)
438 {
439 
440 	MPASS(curthread != NULL);
441 	KASSERT(m->mtx_object.lo_class == &lock_class_mtx_spin,
442 	    ("mtx_unlock_spin() of sleep mutex %s @ %s:%d",
443 	    m->mtx_object.lo_name, file, line));
444 	WITNESS_UNLOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line);
445 	LOCK_LOG_LOCK("UNLOCK", &m->mtx_object, opts, m->mtx_recurse, file,
446 	    line);
447 	mtx_assert(m, MA_OWNED);
448 #if defined(SMP) || LOCK_DEBUG > 0 || 1
449 	_rel_spin_lock(m);
450 #else
451 	critical_exit();
452 #endif
453 }
454 
455 /*
456  * The important part of mtx_trylock{,_flags}()
457  * Tries to acquire lock `m.' We do NOT handle recursion here; we assume that
458  * if we're called, it's because we know we don't already own this lock.
459  */
460 int
461 _mtx_trylock(struct mtx *m, int opts, const char *file, int line)
462 {
463 	int rval;
464 
465 	MPASS(curthread != NULL);
466 
467 	rval = _obtain_lock(m, curthread);
468 
469 	LOCK_LOG_TRY("LOCK", &m->mtx_object, opts, rval, file, line);
470 	if (rval) {
471 		/*
472 		 * We do not handle recursion in _mtx_trylock; see the
473 		 * note at the top of the routine.
474 		 */
475 		KASSERT(!mtx_recursed(m),
476 		    ("mtx_trylock() called on a recursed mutex"));
477 		WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE | LOP_TRYLOCK,
478 		    file, line);
479 	}
480 
481 	return (rval);
482 }
483 
484 /*
485  * _mtx_lock_sleep: the tougher part of acquiring an MTX_DEF lock.
486  *
487  * We call this if the lock is either contested (i.e. we need to go to
488  * sleep waiting for it), or if we need to recurse on it.
489  */
490 void
491 _mtx_lock_sleep(struct mtx *m, int opts, const char *file, int line)
492 {
493 	struct thread *td = curthread;
494 #if defined(SMP) && defined(ADAPTIVE_MUTEXES)
495 	struct thread *owner;
496 #endif
497 #ifdef KTR
498 	int cont_logged = 0;
499 #endif
500 
501 	if ((m->mtx_lock & MTX_FLAGMASK) == (uintptr_t)td) {
502 		m->mtx_recurse++;
503 		atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
504 		if (LOCK_LOG_TEST(&m->mtx_object, opts))
505 			CTR1(KTR_LOCK, "_mtx_lock_sleep: %p recursing", m);
506 		return;
507 	}
508 
509 	if (LOCK_LOG_TEST(&m->mtx_object, opts))
510 		CTR4(KTR_LOCK,
511 		    "_mtx_lock_sleep: %s contested (lock=%p) at %s:%d",
512 		    m->mtx_object.lo_name, (void *)m->mtx_lock, file, line);
513 
514 	while (!_obtain_lock(m, td)) {
515 		uintptr_t v;
516 		struct thread *td1;
517 
518 		mtx_lock_spin(&sched_lock);
519 		/*
520 		 * Check if the lock has been released while spinning for
521 		 * the sched_lock.
522 		 */
523 		if ((v = m->mtx_lock) == MTX_UNOWNED) {
524 			mtx_unlock_spin(&sched_lock);
525 #ifdef __i386__
526 			ia32_pause();
527 #endif
528 			continue;
529 		}
530 
531 		/*
532 		 * The mutex was marked contested on release. This means that
533 		 * there are threads blocked on it.
534 		 */
535 		if (v == MTX_CONTESTED) {
536 			td1 = TAILQ_FIRST(&m->mtx_blocked);
537 			MPASS(td1 != NULL);
538 			m->mtx_lock = (uintptr_t)td | MTX_CONTESTED;
539 
540 			if (td1->td_priority < td->td_priority)
541 				td->td_priority = td1->td_priority;
542 			mtx_unlock_spin(&sched_lock);
543 			return;
544 		}
545 
546 		/*
547 		 * If the mutex isn't already contested and a failure occurs
548 		 * setting the contested bit, the mutex was either released
549 		 * or the state of the MTX_RECURSED bit changed.
550 		 */
551 		if ((v & MTX_CONTESTED) == 0 &&
552 		    !atomic_cmpset_ptr(&m->mtx_lock, (void *)v,
553 			(void *)(v | MTX_CONTESTED))) {
554 			mtx_unlock_spin(&sched_lock);
555 #ifdef __i386__
556 			ia32_pause();
557 #endif
558 			continue;
559 		}
560 
561 #if defined(SMP) && defined(ADAPTIVE_MUTEXES)
562 		/*
563 		 * If the current owner of the lock is executing on another
564 		 * CPU, spin instead of blocking.
565 		 */
566 		owner = (struct thread *)(v & MTX_FLAGMASK);
567 		if (m != &Giant && thread_running(owner)) {
568 			mtx_unlock_spin(&sched_lock);
569 			while (mtx_owner(m) == owner && thread_running(owner)) {
570 #ifdef __i386__
571 				ia32_pause();
572 #endif
573 			}
574 			continue;
575 		}
576 #endif	/* SMP && ADAPTIVE_MUTEXES */
577 
578 		/*
579 		 * We definitely must sleep for this lock.
580 		 */
581 		mtx_assert(m, MA_NOTOWNED);
582 
583 #ifdef notyet
584 		/*
585 		 * If we're borrowing an interrupted thread's VM context, we
586 		 * must clean up before going to sleep.
587 		 */
588 		if (td->td_ithd != NULL) {
589 			struct ithd *it = td->td_ithd;
590 
591 			if (it->it_interrupted) {
592 				if (LOCK_LOG_TEST(&m->mtx_object, opts))
593 					CTR2(KTR_LOCK,
594 				    "_mtx_lock_sleep: %p interrupted %p",
595 					    it, it->it_interrupted);
596 				intr_thd_fixup(it);
597 			}
598 		}
599 #endif
600 
601 		/*
602 		 * Put us on the list of threads blocked on this mutex.
603 		 */
604 		if (TAILQ_EMPTY(&m->mtx_blocked)) {
605 			td1 = mtx_owner(m);
606 			LIST_INSERT_HEAD(&td1->td_contested, m, mtx_contested);
607 			TAILQ_INSERT_TAIL(&m->mtx_blocked, td, td_lockq);
608 		} else {
609 			TAILQ_FOREACH(td1, &m->mtx_blocked, td_lockq)
610 				if (td1->td_priority > td->td_priority)
611 					break;
612 			if (td1)
613 				TAILQ_INSERT_BEFORE(td1, td, td_lockq);
614 			else
615 				TAILQ_INSERT_TAIL(&m->mtx_blocked, td, td_lockq);
616 		}
617 #ifdef KTR
618 		if (!cont_logged) {
619 			CTR6(KTR_CONTENTION,
620 			    "contention: %p at %s:%d wants %s, taken by %s:%d",
621 			    td, file, line, m->mtx_object.lo_name,
622 			    WITNESS_FILE(&m->mtx_object),
623 			    WITNESS_LINE(&m->mtx_object));
624 			cont_logged = 1;
625 		}
626 #endif
627 
628 		/*
629 		 * Save who we're blocked on.
630 		 */
631 		td->td_blocked = m;
632 		td->td_lockname = m->mtx_object.lo_name;
633 		TD_SET_LOCK(td);
634 		propagate_priority(td);
635 
636 		if (LOCK_LOG_TEST(&m->mtx_object, opts))
637 			CTR3(KTR_LOCK,
638 			    "_mtx_lock_sleep: p %p blocked on [%p] %s", td, m,
639 			    m->mtx_object.lo_name);
640 
641 		td->td_proc->p_stats->p_ru.ru_nvcsw++;
642 		mi_switch();
643 
644 		if (LOCK_LOG_TEST(&m->mtx_object, opts))
645 			CTR3(KTR_LOCK,
646 			  "_mtx_lock_sleep: p %p free from blocked on [%p] %s",
647 			  td, m, m->mtx_object.lo_name);
648 
649 		mtx_unlock_spin(&sched_lock);
650 	}
651 
652 #ifdef KTR
653 	if (cont_logged) {
654 		CTR4(KTR_CONTENTION,
655 		    "contention end: %s acquired by %p at %s:%d",
656 		    m->mtx_object.lo_name, td, file, line);
657 	}
658 #endif
659 	return;
660 }
661 
662 /*
663  * _mtx_lock_spin: the tougher part of acquiring an MTX_SPIN lock.
664  *
665  * This is only called if we need to actually spin for the lock. Recursion
666  * is handled inline.
667  */
668 void
669 _mtx_lock_spin(struct mtx *m, int opts, const char *file, int line)
670 {
671 	int i = 0;
672 
673 	if (LOCK_LOG_TEST(&m->mtx_object, opts))
674 		CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m);
675 
676 	for (;;) {
677 		if (_obtain_lock(m, curthread))
678 			break;
679 
680 		/* Give interrupts a chance while we spin. */
681 		critical_exit();
682 		while (m->mtx_lock != MTX_UNOWNED) {
683 			if (i++ < 10000000) {
684 #ifdef __i386__
685 				ia32_pause();
686 #endif
687 				continue;
688 			}
689 			if (i < 60000000)
690 				DELAY(1);
691 #ifdef DDB
692 			else if (!db_active)
693 #else
694 			else
695 #endif
696 				panic("spin lock %s held by %p for > 5 seconds",
697 				    m->mtx_object.lo_name, (void *)m->mtx_lock);
698 #ifdef __i386__
699 			ia32_pause();
700 #endif
701 		}
702 		critical_enter();
703 	}
704 
705 	if (LOCK_LOG_TEST(&m->mtx_object, opts))
706 		CTR1(KTR_LOCK, "_mtx_lock_spin: %p spin done", m);
707 
708 	return;
709 }
710 
711 /*
712  * _mtx_unlock_sleep: the tougher part of releasing an MTX_DEF lock.
713  *
714  * We are only called here if the lock is recursed or contested (i.e. we
715  * need to wake up a blocked thread).
716  */
717 void
718 _mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line)
719 {
720 	struct thread *td, *td1;
721 	struct mtx *m1;
722 	int pri;
723 
724 	td = curthread;
725 
726 	if (mtx_recursed(m)) {
727 		if (--(m->mtx_recurse) == 0)
728 			atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED);
729 		if (LOCK_LOG_TEST(&m->mtx_object, opts))
730 			CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p unrecurse", m);
731 		return;
732 	}
733 
734 	mtx_lock_spin(&sched_lock);
735 	if (LOCK_LOG_TEST(&m->mtx_object, opts))
736 		CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p contested", m);
737 
738 	td1 = TAILQ_FIRST(&m->mtx_blocked);
739 #if defined(SMP) && defined(ADAPTIVE_MUTEXES)
740 	if (td1 == NULL) {
741 		_release_lock_quick(m);
742 		if (LOCK_LOG_TEST(&m->mtx_object, opts))
743 			CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p no sleepers", m);
744 		mtx_unlock_spin(&sched_lock);
745 		return;
746 	}
747 #endif
748 	MPASS(td->td_proc->p_magic == P_MAGIC);
749 	MPASS(td1->td_proc->p_magic == P_MAGIC);
750 
751 	TAILQ_REMOVE(&m->mtx_blocked, td1, td_lockq);
752 
753 	if (TAILQ_EMPTY(&m->mtx_blocked)) {
754 		LIST_REMOVE(m, mtx_contested);
755 		_release_lock_quick(m);
756 		if (LOCK_LOG_TEST(&m->mtx_object, opts))
757 			CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p not held", m);
758 	} else
759 		atomic_store_rel_ptr(&m->mtx_lock, (void *)MTX_CONTESTED);
760 
761 	pri = PRI_MAX;
762 	LIST_FOREACH(m1, &td->td_contested, mtx_contested) {
763 		int cp = TAILQ_FIRST(&m1->mtx_blocked)->td_priority;
764 		if (cp < pri)
765 			pri = cp;
766 	}
767 
768 	if (pri > td->td_base_pri)
769 		pri = td->td_base_pri;
770 	td->td_priority = pri;
771 
772 	if (LOCK_LOG_TEST(&m->mtx_object, opts))
773 		CTR2(KTR_LOCK, "_mtx_unlock_sleep: %p contested setrunqueue %p",
774 		    m, td1);
775 
776 	td1->td_blocked = NULL;
777 	TD_CLR_LOCK(td1);
778 	if (!TD_CAN_RUN(td1)) {
779 		mtx_unlock_spin(&sched_lock);
780 		return;
781 	}
782 	setrunqueue(td1);
783 
784 	if (td->td_critnest == 1 && td1->td_priority < pri) {
785 #ifdef notyet
786 		if (td->td_ithd != NULL) {
787 			struct ithd *it = td->td_ithd;
788 
789 			if (it->it_interrupted) {
790 				if (LOCK_LOG_TEST(&m->mtx_object, opts))
791 					CTR2(KTR_LOCK,
792 				    "_mtx_unlock_sleep: %p interrupted %p",
793 					    it, it->it_interrupted);
794 				intr_thd_fixup(it);
795 			}
796 		}
797 #endif
798 		if (LOCK_LOG_TEST(&m->mtx_object, opts))
799 			CTR2(KTR_LOCK,
800 			    "_mtx_unlock_sleep: %p switching out lock=%p", m,
801 			    (void *)m->mtx_lock);
802 
803 		td->td_proc->p_stats->p_ru.ru_nivcsw++;
804 		mi_switch();
805 		if (LOCK_LOG_TEST(&m->mtx_object, opts))
806 			CTR2(KTR_LOCK, "_mtx_unlock_sleep: %p resuming lock=%p",
807 			    m, (void *)m->mtx_lock);
808 	}
809 
810 	mtx_unlock_spin(&sched_lock);
811 
812 	return;
813 }
814 
815 /*
816  * All the unlocking of MTX_SPIN locks is done inline.
817  * See the _rel_spin_lock() macro for the details.
818  */
819 
820 /*
821  * The backing function for the INVARIANTS-enabled mtx_assert()
822  */
823 #ifdef INVARIANT_SUPPORT
824 void
825 _mtx_assert(struct mtx *m, int what, const char *file, int line)
826 {
827 
828 	if (panicstr != NULL)
829 		return;
830 	switch (what) {
831 	case MA_OWNED:
832 	case MA_OWNED | MA_RECURSED:
833 	case MA_OWNED | MA_NOTRECURSED:
834 		if (!mtx_owned(m))
835 			panic("mutex %s not owned at %s:%d",
836 			    m->mtx_object.lo_name, file, line);
837 		if (mtx_recursed(m)) {
838 			if ((what & MA_NOTRECURSED) != 0)
839 				panic("mutex %s recursed at %s:%d",
840 				    m->mtx_object.lo_name, file, line);
841 		} else if ((what & MA_RECURSED) != 0) {
842 			panic("mutex %s unrecursed at %s:%d",
843 			    m->mtx_object.lo_name, file, line);
844 		}
845 		break;
846 	case MA_NOTOWNED:
847 		if (mtx_owned(m))
848 			panic("mutex %s owned at %s:%d",
849 			    m->mtx_object.lo_name, file, line);
850 		break;
851 	default:
852 		panic("unknown mtx_assert at %s:%d", file, line);
853 	}
854 }
855 #endif
856 
857 /*
858  * The MUTEX_DEBUG-enabled mtx_validate()
859  *
860  * Most of these checks have been moved off into the LO_INITIALIZED flag
861  * maintained by the witness code.
862  */
863 #ifdef MUTEX_DEBUG
864 
865 void	mtx_validate(struct mtx *);
866 
867 void
868 mtx_validate(struct mtx *m)
869 {
870 
871 /*
872  * XXX: When kernacc() does not require Giant we can reenable this check
873  */
874 #ifdef notyet
875 /*
876  * XXX - When kernacc() is fixed on the alpha to handle K0_SEG memory properly
877  * we can re-enable the kernacc() checks.
878  */
879 #ifndef __alpha__
880 	/*
881 	 * Can't call kernacc() from early init386(), especially when
882 	 * initializing Giant mutex, because some stuff in kernacc()
883 	 * requires Giant itself.
884 	 */
885 	if (!cold)
886 		if (!kernacc((caddr_t)m, sizeof(m),
887 		    VM_PROT_READ | VM_PROT_WRITE))
888 			panic("Can't read and write to mutex %p", m);
889 #endif
890 #endif
891 }
892 #endif
893 
894 /*
895  * General init routine used by the MTX_SYSINIT() macro.
896  */
897 void
898 mtx_sysinit(void *arg)
899 {
900 	struct mtx_args *margs = arg;
901 
902 	mtx_init(margs->ma_mtx, margs->ma_desc, NULL, margs->ma_opts);
903 }
904 
905 /*
906  * Mutex initialization routine; initialize lock `m' of type contained in
907  * `opts' with options contained in `opts' and name `name.'  The optional
908  * lock type `type' is used as a general lock category name for use with
909  * witness.
910  */
911 void
912 mtx_init(struct mtx *m, const char *name, const char *type, int opts)
913 {
914 	struct lock_object *lock;
915 
916 	MPASS((opts & ~(MTX_SPIN | MTX_QUIET | MTX_RECURSE |
917 	    MTX_SLEEPABLE | MTX_NOWITNESS | MTX_DUPOK)) == 0);
918 
919 #ifdef MUTEX_DEBUG
920 	/* Diagnostic and error correction */
921 	mtx_validate(m);
922 #endif
923 
924 	lock = &m->mtx_object;
925 	KASSERT((lock->lo_flags & LO_INITIALIZED) == 0,
926 	    ("mutex %s %p already initialized", name, m));
927 	bzero(m, sizeof(*m));
928 	if (opts & MTX_SPIN)
929 		lock->lo_class = &lock_class_mtx_spin;
930 	else
931 		lock->lo_class = &lock_class_mtx_sleep;
932 	lock->lo_name = name;
933 	lock->lo_type = type != NULL ? type : name;
934 	if (opts & MTX_QUIET)
935 		lock->lo_flags = LO_QUIET;
936 	if (opts & MTX_RECURSE)
937 		lock->lo_flags |= LO_RECURSABLE;
938 	if (opts & MTX_SLEEPABLE)
939 		lock->lo_flags |= LO_SLEEPABLE;
940 	if ((opts & MTX_NOWITNESS) == 0)
941 		lock->lo_flags |= LO_WITNESS;
942 	if (opts & MTX_DUPOK)
943 		lock->lo_flags |= LO_DUPOK;
944 
945 	m->mtx_lock = MTX_UNOWNED;
946 	TAILQ_INIT(&m->mtx_blocked);
947 
948 	LOCK_LOG_INIT(lock, opts);
949 
950 	WITNESS_INIT(lock);
951 }
952 
953 /*
954  * Remove lock `m' from all_mtx queue.  We don't allow MTX_QUIET to be
955  * passed in as a flag here because if the corresponding mtx_init() was
956  * called with MTX_QUIET set, then it will already be set in the mutex's
957  * flags.
958  */
959 void
960 mtx_destroy(struct mtx *m)
961 {
962 
963 	LOCK_LOG_DESTROY(&m->mtx_object, 0);
964 
965 	if (!mtx_owned(m))
966 		MPASS(mtx_unowned(m));
967 	else {
968 		MPASS((m->mtx_lock & (MTX_RECURSED|MTX_CONTESTED)) == 0);
969 
970 		/* Tell witness this isn't locked to make it happy. */
971 		WITNESS_UNLOCK(&m->mtx_object, LOP_EXCLUSIVE, __FILE__,
972 		    __LINE__);
973 	}
974 
975 	WITNESS_DESTROY(&m->mtx_object);
976 }
977 
978 /*
979  * Intialize the mutex code and system mutexes.  This is called from the MD
980  * startup code prior to mi_startup().  The per-CPU data space needs to be
981  * setup before this is called.
982  */
983 void
984 mutex_init(void)
985 {
986 
987 	/* Setup thread0 so that mutexes work. */
988 	LIST_INIT(&thread0.td_contested);
989 
990 	/*
991 	 * Initialize mutexes.
992 	 */
993 	mtx_init(&Giant, "Giant", NULL, MTX_DEF | MTX_RECURSE);
994 	mtx_init(&sched_lock, "sched lock", NULL, MTX_SPIN | MTX_RECURSE);
995 	mtx_init(&proc0.p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK);
996 	mtx_lock(&Giant);
997 }
998 
999 /*
1000  * Encapsulated Giant mutex routines.  These routines provide encapsulation
1001  * control for the Giant mutex, allowing sysctls to be used to turn on and
1002  * off Giant around certain subsystems.  The default value for the sysctls
1003  * are set to what developers believe is stable and working in regards to
1004  * the Giant pushdown.  Developers should not turn off Giant via these
1005  * sysctls unless they know what they are doing.
1006  *
1007  * Callers of mtx_lock_giant() are expected to pass the return value to an
1008  * accompanying mtx_unlock_giant() later on.  If multiple subsystems are
1009  * effected by a Giant wrap, all related sysctl variables must be zero for
1010  * the subsystem call to operate without Giant (as determined by the caller).
1011  */
1012 
1013 SYSCTL_NODE(_kern, OID_AUTO, giant, CTLFLAG_RD, NULL, "Giant mutex manipulation");
1014 
1015 static int kern_giant_all = 0;
1016 SYSCTL_INT(_kern_giant, OID_AUTO, all, CTLFLAG_RW, &kern_giant_all, 0, "");
1017 
1018 int kern_giant_proc = 1;	/* Giant around PROC locks */
1019 int kern_giant_file = 1;	/* Giant around struct file & filedesc */
1020 int kern_giant_ucred = 1;	/* Giant around ucred */
1021 SYSCTL_INT(_kern_giant, OID_AUTO, proc, CTLFLAG_RW, &kern_giant_proc, 0, "");
1022 SYSCTL_INT(_kern_giant, OID_AUTO, file, CTLFLAG_RW, &kern_giant_file, 0, "");
1023 SYSCTL_INT(_kern_giant, OID_AUTO, ucred, CTLFLAG_RW, &kern_giant_ucred, 0, "");
1024 
1025 int
1026 mtx_lock_giant(int sysctlvar)
1027 {
1028 	if (sysctlvar || kern_giant_all) {
1029 		mtx_lock(&Giant);
1030 		return(1);
1031 	}
1032 	return(0);
1033 }
1034 
1035 void
1036 mtx_unlock_giant(int s)
1037 {
1038 	if (s)
1039 		mtx_unlock(&Giant);
1040 }
1041