xref: /freebsd/sys/kern/subr_turnstile.c (revision 6af83ee0d2941d18880b6aaa2b4facd1d30c6106)
1 /*-
2  * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  * 3. Berkeley Software Design Inc's name may not be used to endorse or
13  *    promote products derived from this software without specific prior
14  *    written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  *	from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $
29  *	and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $
30  */
31 
32 /*
33  * Implementation of turnstiles used to hold queue of threads blocked on
34  * non-sleepable locks.  Sleepable locks use condition variables to
35  * implement their queues.  Turnstiles differ from a sleep queue in that
36  * turnstile queue's are assigned to a lock held by an owning thread.  Thus,
37  * when one thread is enqueued onto a turnstile, it can lend its priority
38  * to the owning thread.
39  *
40  * We wish to avoid bloating locks with an embedded turnstile and we do not
41  * want to use back-pointers in the locks for the same reason.  Thus, we
42  * use a similar approach to that of Solaris 7 as described in Solaris
43  * Internals by Jim Mauro and Richard McDougall.  Turnstiles are looked up
44  * in a hash table based on the address of the lock.  Each entry in the
45  * hash table is a linked-lists of turnstiles and is called a turnstile
46  * chain.  Each chain contains a spin mutex that protects all of the
47  * turnstiles in the chain.
48  *
49  * Each time a thread is created, a turnstile is malloc'd and attached to
50  * that thread.  When a thread blocks on a lock, if it is the first thread
51  * to block, it lends its turnstile to the lock.  If the lock already has
52  * a turnstile, then it gives its turnstile to the lock's turnstile's free
53  * list.  When a thread is woken up, it takes a turnstile from the free list
54  * if there are any other waiters.  If it is the only thread blocked on the
55  * lock, then it reclaims the turnstile associated with the lock and removes
56  * it from the hash table.
57  */
58 
59 #include "opt_turnstile_profiling.h"
60 
61 #include <sys/cdefs.h>
62 __FBSDID("$FreeBSD$");
63 
64 #include <sys/param.h>
65 #include <sys/systm.h>
66 #include <sys/kernel.h>
67 #include <sys/ktr.h>
68 #include <sys/lock.h>
69 #include <sys/malloc.h>
70 #include <sys/mutex.h>
71 #include <sys/proc.h>
72 #include <sys/queue.h>
73 #include <sys/resourcevar.h>
74 #include <sys/sched.h>
75 #include <sys/sysctl.h>
76 #include <sys/turnstile.h>
77 
78 /*
79  * Constants for the hash table of turnstile chains.  TC_SHIFT is a magic
80  * number chosen because the sleep queue's use the same value for the
81  * shift.  Basically, we ignore the lower 8 bits of the address.
82  * TC_TABLESIZE must be a power of two for TC_MASK to work properly.
83  */
84 #define	TC_TABLESIZE	128			/* Must be power of 2. */
85 #define	TC_MASK		(TC_TABLESIZE - 1)
86 #define	TC_SHIFT	8
87 #define	TC_HASH(lock)	(((uintptr_t)(lock) >> TC_SHIFT) & TC_MASK)
88 #define	TC_LOOKUP(lock)	&turnstile_chains[TC_HASH(lock)]
89 
90 /*
91  * There are three different lists of turnstiles as follows.  The list
92  * connected by ts_link entries is a per-thread list of all the turnstiles
93  * attached to locks that we own.  This is used to fixup our priority when
94  * a lock is released.  The other two lists use the ts_hash entries.  The
95  * first of these two is the turnstile chain list that a turnstile is on
96  * when it is attached to a lock.  The second list to use ts_hash is the
97  * free list hung off of a turnstile that is attached to a lock.
98  *
99  * Each turnstile contains two lists of threads.  The ts_blocked list is
100  * a linked list of threads blocked on the turnstile's lock.  The
101  * ts_pending list is a linked list of threads previously awakened by
102  * turnstile_signal() or turnstile_wait() that are waiting to be put on
103  * the run queue.
104  *
105  * Locking key:
106  *  c - turnstile chain lock
107  *  q - td_contested lock
108  */
109 struct turnstile {
110 	TAILQ_HEAD(, thread) ts_blocked;	/* (c + q) Blocked threads. */
111 	TAILQ_HEAD(, thread) ts_pending;	/* (c) Pending threads. */
112 	LIST_ENTRY(turnstile) ts_hash;		/* (c) Chain and free list. */
113 	LIST_ENTRY(turnstile) ts_link;		/* (q) Contested locks. */
114 	LIST_HEAD(, turnstile) ts_free;		/* (c) Free turnstiles. */
115 	struct lock_object *ts_lockobj;		/* (c) Lock we reference. */
116 	struct thread *ts_owner;		/* (c + q) Who owns the lock. */
117 };
118 
119 struct turnstile_chain {
120 	LIST_HEAD(, turnstile) tc_turnstiles;	/* List of turnstiles. */
121 	struct mtx tc_lock;			/* Spin lock for this chain. */
122 #ifdef TURNSTILE_PROFILING
123 	u_int	tc_depth;			/* Length of tc_queues. */
124 	u_int	tc_max_depth;			/* Max length of tc_queues. */
125 #endif
126 };
127 
128 #ifdef TURNSTILE_PROFILING
129 u_int turnstile_max_depth;
130 SYSCTL_NODE(_debug, OID_AUTO, turnstile, CTLFLAG_RD, 0, "turnstile profiling");
131 SYSCTL_NODE(_debug_turnstile, OID_AUTO, chains, CTLFLAG_RD, 0,
132     "turnstile chain stats");
133 SYSCTL_UINT(_debug_turnstile, OID_AUTO, max_depth, CTLFLAG_RD,
134     &turnstile_max_depth, 0, "maxmimum depth achieved of a single chain");
135 #endif
136 static struct mtx td_contested_lock;
137 static struct turnstile_chain turnstile_chains[TC_TABLESIZE];
138 
139 static MALLOC_DEFINE(M_TURNSTILE, "turnstiles", "turnstiles");
140 
141 /*
142  * Prototypes for non-exported routines.
143  */
144 static void	init_turnstile0(void *dummy);
145 #ifdef TURNSTILE_PROFILING
146 static void	init_turnstile_profiling(void *arg);
147 #endif
148 static void	propagate_priority(struct thread *td);
149 static int	turnstile_adjust_thread(struct turnstile *ts,
150 		    struct thread *td);
151 static void	turnstile_setowner(struct turnstile *ts, struct thread *owner);
152 
153 /*
154  * Walks the chain of turnstiles and their owners to propagate the priority
155  * of the thread being blocked to all the threads holding locks that have to
156  * release their locks before this thread can run again.
157  */
158 static void
159 propagate_priority(struct thread *td)
160 {
161 	struct turnstile_chain *tc;
162 	struct turnstile *ts;
163 	int pri;
164 
165 	mtx_assert(&sched_lock, MA_OWNED);
166 	pri = td->td_priority;
167 	ts = td->td_blocked;
168 	for (;;) {
169 		td = ts->ts_owner;
170 
171 		if (td == NULL) {
172 			/*
173 			 * This really isn't quite right. Really
174 			 * ought to bump priority of thread that
175 			 * next acquires the lock.
176 			 */
177 			return;
178 		}
179 
180 		MPASS(td->td_proc != NULL);
181 		MPASS(td->td_proc->p_magic == P_MAGIC);
182 
183 		/*
184 		 * XXX: The owner of a turnstile can be stale if it is the
185 		 * first thread to grab a slock of a sx lock.  In that case
186 		 * it is possible for us to be at SSLEEP or some other
187 		 * weird state.  We should probably just return if the state
188 		 * isn't SRUN or SLOCK.
189 		 */
190 		KASSERT(!TD_IS_SLEEPING(td),
191 		    ("sleeping thread (tid %d) owns a non-sleepable lock",
192 		    td->td_tid));
193 
194 		/*
195 		 * If this thread already has higher priority than the
196 		 * thread that is being blocked, we are finished.
197 		 */
198 		if (td->td_priority <= pri)
199 			return;
200 
201 		/*
202 		 * Bump this thread's priority.
203 		 */
204 		sched_lend_prio(td, pri);
205 
206 		/*
207 		 * If lock holder is actually running or on the run queue
208 		 * then we are done.
209 		 */
210 		if (TD_IS_RUNNING(td) || TD_ON_RUNQ(td)) {
211 			MPASS(td->td_blocked == NULL);
212 			return;
213 		}
214 
215 #ifndef SMP
216 		/*
217 		 * For UP, we check to see if td is curthread (this shouldn't
218 		 * ever happen however as it would mean we are in a deadlock.)
219 		 */
220 		KASSERT(td != curthread, ("Deadlock detected"));
221 #endif
222 
223 		/*
224 		 * If we aren't blocked on a lock, we should be.
225 		 */
226 		KASSERT(TD_ON_LOCK(td), (
227 		    "thread %d(%s):%d holds %s but isn't blocked on a lock\n",
228 		    td->td_tid, td->td_proc->p_comm, td->td_state,
229 		    ts->ts_lockobj->lo_name));
230 
231 		/*
232 		 * Pick up the lock that td is blocked on.
233 		 */
234 		ts = td->td_blocked;
235 		MPASS(ts != NULL);
236 		tc = TC_LOOKUP(ts->ts_lockobj);
237 		mtx_lock_spin(&tc->tc_lock);
238 
239 		/* Resort td on the list if needed. */
240 		if (!turnstile_adjust_thread(ts, td)) {
241 			mtx_unlock_spin(&tc->tc_lock);
242 			return;
243 		}
244 		mtx_unlock_spin(&tc->tc_lock);
245 	}
246 }
247 
248 /*
249  * Adjust the thread's position on a turnstile after its priority has been
250  * changed.
251  */
252 static int
253 turnstile_adjust_thread(struct turnstile *ts, struct thread *td)
254 {
255 	struct turnstile_chain *tc;
256 	struct thread *td1, *td2;
257 
258 	mtx_assert(&sched_lock, MA_OWNED);
259 	MPASS(TD_ON_LOCK(td));
260 
261 	/*
262 	 * This thread may not be blocked on this turnstile anymore
263 	 * but instead might already be woken up on another CPU
264 	 * that is waiting on sched_lock in turnstile_unpend() to
265 	 * finish waking this thread up.  We can detect this case
266 	 * by checking to see if this thread has been given a
267 	 * turnstile by either turnstile_signal() or
268 	 * turnstile_broadcast().  In this case, treat the thread as
269 	 * if it was already running.
270 	 */
271 	if (td->td_turnstile != NULL)
272 		return (0);
273 
274 	/*
275 	 * Check if the thread needs to be moved on the blocked chain.
276 	 * It needs to be moved if either its priority is lower than
277 	 * the previous thread or higher than the next thread.
278 	 */
279 	tc = TC_LOOKUP(ts->ts_lockobj);
280 	mtx_assert(&tc->tc_lock, MA_OWNED);
281 	td1 = TAILQ_PREV(td, threadqueue, td_lockq);
282 	td2 = TAILQ_NEXT(td, td_lockq);
283 	if ((td1 != NULL && td->td_priority < td1->td_priority) ||
284 	    (td2 != NULL && td->td_priority > td2->td_priority)) {
285 
286 		/*
287 		 * Remove thread from blocked chain and determine where
288 		 * it should be moved to.
289 		 */
290 		mtx_lock_spin(&td_contested_lock);
291 		TAILQ_REMOVE(&ts->ts_blocked, td, td_lockq);
292 		TAILQ_FOREACH(td1, &ts->ts_blocked, td_lockq) {
293 			MPASS(td1->td_proc->p_magic == P_MAGIC);
294 			if (td1->td_priority > td->td_priority)
295 				break;
296 		}
297 
298 		if (td1 == NULL)
299 			TAILQ_INSERT_TAIL(&ts->ts_blocked, td, td_lockq);
300 		else
301 			TAILQ_INSERT_BEFORE(td1, td, td_lockq);
302 		mtx_unlock_spin(&td_contested_lock);
303 		if (td1 == NULL)
304 			CTR3(KTR_LOCK,
305 		    "turnstile_adjust_thread: td %d put at tail on [%p] %s",
306 			    td->td_tid, ts->ts_lockobj, ts->ts_lockobj->lo_name);
307 		else
308 			CTR4(KTR_LOCK,
309 		    "turnstile_adjust_thread: td %d moved before %d on [%p] %s",
310 			    td->td_tid, td1->td_tid, ts->ts_lockobj,
311 			    ts->ts_lockobj->lo_name);
312 	}
313 	return (1);
314 }
315 
316 /*
317  * Early initialization of turnstiles.  This is not done via a SYSINIT()
318  * since this needs to be initialized very early when mutexes are first
319  * initialized.
320  */
321 void
322 init_turnstiles(void)
323 {
324 	int i;
325 
326 	for (i = 0; i < TC_TABLESIZE; i++) {
327 		LIST_INIT(&turnstile_chains[i].tc_turnstiles);
328 		mtx_init(&turnstile_chains[i].tc_lock, "turnstile chain",
329 		    NULL, MTX_SPIN);
330 	}
331 	mtx_init(&td_contested_lock, "td_contested", NULL, MTX_SPIN);
332 	thread0.td_turnstile = NULL;
333 }
334 
335 #ifdef TURNSTILE_PROFILING
336 static void
337 init_turnstile_profiling(void *arg)
338 {
339 	struct sysctl_oid *chain_oid;
340 	char chain_name[10];
341 	int i;
342 
343 	for (i = 0; i < TC_TABLESIZE; i++) {
344 		snprintf(chain_name, sizeof(chain_name), "%d", i);
345 		chain_oid = SYSCTL_ADD_NODE(NULL,
346 		    SYSCTL_STATIC_CHILDREN(_debug_turnstile_chains), OID_AUTO,
347 		    chain_name, CTLFLAG_RD, NULL, "turnstile chain stats");
348 		SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO,
349 		    "depth", CTLFLAG_RD, &turnstile_chains[i].tc_depth, 0,
350 		    NULL);
351 		SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO,
352 		    "max_depth", CTLFLAG_RD, &turnstile_chains[i].tc_max_depth,
353 		    0, NULL);
354 	}
355 }
356 SYSINIT(turnstile_profiling, SI_SUB_LOCK, SI_ORDER_ANY,
357     init_turnstile_profiling, NULL);
358 #endif
359 
360 static void
361 init_turnstile0(void *dummy)
362 {
363 
364 	thread0.td_turnstile = turnstile_alloc();
365 }
366 SYSINIT(turnstile0, SI_SUB_LOCK, SI_ORDER_ANY, init_turnstile0, NULL);
367 
368 /*
369  * Update a thread on the turnstile list after it's priority has been changed.
370  * The old priority is passed in as an argument.
371  */
372 void
373 turnstile_adjust(struct thread *td, u_char oldpri)
374 {
375 	struct turnstile_chain *tc;
376 	struct turnstile *ts;
377 
378 	mtx_assert(&sched_lock, MA_OWNED);
379 	MPASS(TD_ON_LOCK(td));
380 
381 	/*
382 	 * Pick up the lock that td is blocked on.
383 	 */
384 	ts = td->td_blocked;
385 	MPASS(ts != NULL);
386 	tc = TC_LOOKUP(ts->ts_lockobj);
387 	mtx_lock_spin(&tc->tc_lock);
388 
389 	/* Resort the turnstile on the list. */
390 	if (!turnstile_adjust_thread(ts, td)) {
391 		mtx_unlock_spin(&tc->tc_lock);
392 		return;
393 	}
394 
395 	/*
396 	 * If our priority was lowered and we are at the head of the
397 	 * turnstile, then propagate our new priority up the chain.
398 	 * Note that we currently don't try to revoke lent priorities
399 	 * when our priority goes up.
400 	 */
401 	if (td == TAILQ_FIRST(&ts->ts_blocked) && td->td_priority < oldpri) {
402 		mtx_unlock_spin(&tc->tc_lock);
403 		propagate_priority(td);
404 	} else
405 		mtx_unlock_spin(&tc->tc_lock);
406 }
407 
408 /*
409  * Set the owner of the lock this turnstile is attached to.
410  */
411 static void
412 turnstile_setowner(struct turnstile *ts, struct thread *owner)
413 {
414 
415 	mtx_assert(&td_contested_lock, MA_OWNED);
416 	MPASS(owner->td_proc->p_magic == P_MAGIC);
417 	MPASS(ts->ts_owner == NULL);
418 	ts->ts_owner = owner;
419 	LIST_INSERT_HEAD(&owner->td_contested, ts, ts_link);
420 }
421 
422 /*
423  * Malloc a turnstile for a new thread, initialize it and return it.
424  */
425 struct turnstile *
426 turnstile_alloc(void)
427 {
428 	struct turnstile *ts;
429 
430 	ts = malloc(sizeof(struct turnstile), M_TURNSTILE, M_WAITOK | M_ZERO);
431 	TAILQ_INIT(&ts->ts_blocked);
432 	TAILQ_INIT(&ts->ts_pending);
433 	LIST_INIT(&ts->ts_free);
434 	return (ts);
435 }
436 
437 /*
438  * Free a turnstile when a thread is destroyed.
439  */
440 void
441 turnstile_free(struct turnstile *ts)
442 {
443 
444 	MPASS(ts != NULL);
445 	MPASS(TAILQ_EMPTY(&ts->ts_blocked));
446 	MPASS(TAILQ_EMPTY(&ts->ts_pending));
447 	free(ts, M_TURNSTILE);
448 }
449 
450 /*
451  * Lock the turnstile chain associated with the specified lock.
452  */
453 void
454 turnstile_lock(struct lock_object *lock)
455 {
456 	struct turnstile_chain *tc;
457 
458 	tc = TC_LOOKUP(lock);
459 	mtx_lock_spin(&tc->tc_lock);
460 }
461 
462 /*
463  * Look up the turnstile for a lock in the hash table locking the associated
464  * turnstile chain along the way.  If no turnstile is found in the hash
465  * table, NULL is returned.
466  */
467 struct turnstile *
468 turnstile_lookup(struct lock_object *lock)
469 {
470 	struct turnstile_chain *tc;
471 	struct turnstile *ts;
472 
473 	tc = TC_LOOKUP(lock);
474 	mtx_assert(&tc->tc_lock, MA_OWNED);
475 	LIST_FOREACH(ts, &tc->tc_turnstiles, ts_hash)
476 		if (ts->ts_lockobj == lock)
477 			return (ts);
478 	return (NULL);
479 }
480 
481 /*
482  * Unlock the turnstile chain associated with a given lock.
483  */
484 void
485 turnstile_release(struct lock_object *lock)
486 {
487 	struct turnstile_chain *tc;
488 
489 	tc = TC_LOOKUP(lock);
490 	mtx_unlock_spin(&tc->tc_lock);
491 }
492 
493 /*
494  * Take ownership of a turnstile and adjust the priority of the new
495  * owner appropriately.
496  */
497 void
498 turnstile_claim(struct lock_object *lock)
499 {
500 	struct turnstile_chain *tc;
501 	struct turnstile *ts;
502 	struct thread *td, *owner;
503 
504 	tc = TC_LOOKUP(lock);
505 	mtx_assert(&tc->tc_lock, MA_OWNED);
506 	ts = turnstile_lookup(lock);
507 	MPASS(ts != NULL);
508 
509 	owner = curthread;
510 	mtx_lock_spin(&td_contested_lock);
511 	turnstile_setowner(ts, owner);
512 	mtx_unlock_spin(&td_contested_lock);
513 
514 	td = TAILQ_FIRST(&ts->ts_blocked);
515 	MPASS(td != NULL);
516 	MPASS(td->td_proc->p_magic == P_MAGIC);
517 	mtx_unlock_spin(&tc->tc_lock);
518 
519 	/*
520 	 * Update the priority of the new owner if needed.
521 	 */
522 	mtx_lock_spin(&sched_lock);
523 	if (td->td_priority < owner->td_priority)
524 		sched_lend_prio(owner, td->td_priority);
525 	mtx_unlock_spin(&sched_lock);
526 }
527 
528 /*
529  * Block the current thread on the turnstile assicated with 'lock'.  This
530  * function will context switch and not return until this thread has been
531  * woken back up.  This function must be called with the appropriate
532  * turnstile chain locked and will return with it unlocked.
533  */
534 void
535 turnstile_wait(struct lock_object *lock, struct thread *owner)
536 {
537 	struct turnstile_chain *tc;
538 	struct turnstile *ts;
539 	struct thread *td, *td1;
540 
541 	td = curthread;
542 	tc = TC_LOOKUP(lock);
543 	mtx_assert(&tc->tc_lock, MA_OWNED);
544 	MPASS(td->td_turnstile != NULL);
545 	MPASS(owner != NULL);
546 	MPASS(owner->td_proc->p_magic == P_MAGIC);
547 
548 	/* Look up the turnstile associated with the lock 'lock'. */
549 	ts = turnstile_lookup(lock);
550 
551 	/*
552 	 * If the lock does not already have a turnstile, use this thread's
553 	 * turnstile.  Otherwise insert the current thread into the
554 	 * turnstile already in use by this lock.
555 	 */
556 	if (ts == NULL) {
557 #ifdef TURNSTILE_PROFILING
558 		tc->tc_depth++;
559 		if (tc->tc_depth > tc->tc_max_depth) {
560 			tc->tc_max_depth = tc->tc_depth;
561 			if (tc->tc_max_depth > turnstile_max_depth)
562 				turnstile_max_depth = tc->tc_max_depth;
563 		}
564 #endif
565 		ts = td->td_turnstile;
566 		LIST_INSERT_HEAD(&tc->tc_turnstiles, ts, ts_hash);
567 		KASSERT(TAILQ_EMPTY(&ts->ts_pending),
568 		    ("thread's turnstile has pending threads"));
569 		KASSERT(TAILQ_EMPTY(&ts->ts_blocked),
570 		    ("thread's turnstile has a non-empty queue"));
571 		KASSERT(LIST_EMPTY(&ts->ts_free),
572 		    ("thread's turnstile has a non-empty free list"));
573 		KASSERT(ts->ts_lockobj == NULL, ("stale ts_lockobj pointer"));
574 		ts->ts_lockobj = lock;
575 		mtx_lock_spin(&td_contested_lock);
576 		TAILQ_INSERT_TAIL(&ts->ts_blocked, td, td_lockq);
577 		turnstile_setowner(ts, owner);
578 		mtx_unlock_spin(&td_contested_lock);
579 	} else {
580 		TAILQ_FOREACH(td1, &ts->ts_blocked, td_lockq)
581 			if (td1->td_priority > td->td_priority)
582 				break;
583 		mtx_lock_spin(&td_contested_lock);
584 		if (td1 != NULL)
585 			TAILQ_INSERT_BEFORE(td1, td, td_lockq);
586 		else
587 			TAILQ_INSERT_TAIL(&ts->ts_blocked, td, td_lockq);
588 		mtx_unlock_spin(&td_contested_lock);
589 		MPASS(td->td_turnstile != NULL);
590 		LIST_INSERT_HEAD(&ts->ts_free, td->td_turnstile, ts_hash);
591 		MPASS(owner == ts->ts_owner);
592 	}
593 	td->td_turnstile = NULL;
594 	mtx_unlock_spin(&tc->tc_lock);
595 
596 	mtx_lock_spin(&sched_lock);
597 	/*
598 	 * Handle race condition where a thread on another CPU that owns
599 	 * lock 'lock' could have woken us in between us dropping the
600 	 * turnstile chain lock and acquiring the sched_lock.
601 	 */
602 	if (td->td_flags & TDF_TSNOBLOCK) {
603 		td->td_flags &= ~TDF_TSNOBLOCK;
604 		mtx_unlock_spin(&sched_lock);
605 		return;
606 	}
607 
608 #ifdef notyet
609 	/*
610 	 * If we're borrowing an interrupted thread's VM context, we
611 	 * must clean up before going to sleep.
612 	 */
613 	if (td->td_ithd != NULL) {
614 		struct ithd *it = td->td_ithd;
615 
616 		if (it->it_interrupted) {
617 			if (LOCK_LOG_TEST(lock, 0))
618 				CTR3(KTR_LOCK, "%s: %p interrupted %p",
619 				    __func__, it, it->it_interrupted);
620 			intr_thd_fixup(it);
621 		}
622 	}
623 #endif
624 
625 	/* Save who we are blocked on and switch. */
626 	td->td_blocked = ts;
627 	td->td_lockname = lock->lo_name;
628 	TD_SET_LOCK(td);
629 	propagate_priority(td);
630 
631 	if (LOCK_LOG_TEST(lock, 0))
632 		CTR4(KTR_LOCK, "%s: td %d blocked on [%p] %s", __func__,
633 		    td->td_tid, lock, lock->lo_name);
634 
635 	mi_switch(SW_VOL, NULL);
636 
637 	if (LOCK_LOG_TEST(lock, 0))
638 		CTR4(KTR_LOCK, "%s: td %d free from blocked on [%p] %s",
639 		    __func__, td->td_tid, lock, lock->lo_name);
640 
641 	mtx_unlock_spin(&sched_lock);
642 }
643 
644 /*
645  * Pick the highest priority thread on this turnstile and put it on the
646  * pending list.  This must be called with the turnstile chain locked.
647  */
648 int
649 turnstile_signal(struct turnstile *ts)
650 {
651 	struct turnstile_chain *tc;
652 	struct thread *td;
653 	int empty;
654 
655 	MPASS(ts != NULL);
656 	MPASS(curthread->td_proc->p_magic == P_MAGIC);
657 	MPASS(ts->ts_owner == curthread);
658 	tc = TC_LOOKUP(ts->ts_lockobj);
659 	mtx_assert(&tc->tc_lock, MA_OWNED);
660 
661 	/*
662 	 * Pick the highest priority thread blocked on this lock and
663 	 * move it to the pending list.
664 	 */
665 	td = TAILQ_FIRST(&ts->ts_blocked);
666 	MPASS(td->td_proc->p_magic == P_MAGIC);
667 	mtx_lock_spin(&td_contested_lock);
668 	TAILQ_REMOVE(&ts->ts_blocked, td, td_lockq);
669 	mtx_unlock_spin(&td_contested_lock);
670 	TAILQ_INSERT_TAIL(&ts->ts_pending, td, td_lockq);
671 
672 	/*
673 	 * If the turnstile is now empty, remove it from its chain and
674 	 * give it to the about-to-be-woken thread.  Otherwise take a
675 	 * turnstile from the free list and give it to the thread.
676 	 */
677 	empty = TAILQ_EMPTY(&ts->ts_blocked);
678 	if (empty) {
679 		MPASS(LIST_EMPTY(&ts->ts_free));
680 #ifdef TURNSTILE_PROFILING
681 		tc->tc_depth--;
682 #endif
683 	} else
684 		ts = LIST_FIRST(&ts->ts_free);
685 	MPASS(ts != NULL);
686 	LIST_REMOVE(ts, ts_hash);
687 	td->td_turnstile = ts;
688 
689 	return (empty);
690 }
691 
692 /*
693  * Put all blocked threads on the pending list.  This must be called with
694  * the turnstile chain locked.
695  */
696 void
697 turnstile_broadcast(struct turnstile *ts)
698 {
699 	struct turnstile_chain *tc;
700 	struct turnstile *ts1;
701 	struct thread *td;
702 
703 	MPASS(ts != NULL);
704 	MPASS(curthread->td_proc->p_magic == P_MAGIC);
705 	MPASS(ts->ts_owner == curthread);
706 	tc = TC_LOOKUP(ts->ts_lockobj);
707 	mtx_assert(&tc->tc_lock, MA_OWNED);
708 
709 	/*
710 	 * Transfer the blocked list to the pending list.
711 	 */
712 	mtx_lock_spin(&td_contested_lock);
713 	TAILQ_CONCAT(&ts->ts_pending, &ts->ts_blocked, td_lockq);
714 	mtx_unlock_spin(&td_contested_lock);
715 
716 	/*
717 	 * Give a turnstile to each thread.  The last thread gets
718 	 * this turnstile.
719 	 */
720 	TAILQ_FOREACH(td, &ts->ts_pending, td_lockq) {
721 		if (LIST_EMPTY(&ts->ts_free)) {
722 			MPASS(TAILQ_NEXT(td, td_lockq) == NULL);
723 			ts1 = ts;
724 #ifdef TURNSTILE_PROFILING
725 			tc->tc_depth--;
726 #endif
727 		} else
728 			ts1 = LIST_FIRST(&ts->ts_free);
729 		MPASS(ts1 != NULL);
730 		LIST_REMOVE(ts1, ts_hash);
731 		td->td_turnstile = ts1;
732 	}
733 }
734 
735 /*
736  * Wakeup all threads on the pending list and adjust the priority of the
737  * current thread appropriately.  This must be called with the turnstile
738  * chain locked.
739  */
740 void
741 turnstile_unpend(struct turnstile *ts)
742 {
743 	TAILQ_HEAD( ,thread) pending_threads;
744 	struct turnstile_chain *tc;
745 	struct thread *td;
746 	u_char cp, pri;
747 
748 	MPASS(ts != NULL);
749 	MPASS(ts->ts_owner == curthread);
750 	tc = TC_LOOKUP(ts->ts_lockobj);
751 	mtx_assert(&tc->tc_lock, MA_OWNED);
752 	MPASS(!TAILQ_EMPTY(&ts->ts_pending));
753 
754 	/*
755 	 * Move the list of pending threads out of the turnstile and
756 	 * into a local variable.
757 	 */
758 	TAILQ_INIT(&pending_threads);
759 	TAILQ_CONCAT(&pending_threads, &ts->ts_pending, td_lockq);
760 #ifdef INVARIANTS
761 	if (TAILQ_EMPTY(&ts->ts_blocked))
762 		ts->ts_lockobj = NULL;
763 #endif
764 
765 	/*
766 	 * Remove the turnstile from this thread's list of contested locks
767 	 * since this thread doesn't own it anymore.  New threads will
768 	 * not be blocking on the turnstile until it is claimed by a new
769 	 * owner.
770 	 */
771 	mtx_lock_spin(&td_contested_lock);
772 	ts->ts_owner = NULL;
773 	LIST_REMOVE(ts, ts_link);
774 	mtx_unlock_spin(&td_contested_lock);
775 	critical_enter();
776 	mtx_unlock_spin(&tc->tc_lock);
777 
778 	/*
779 	 * Adjust the priority of curthread based on other contested
780 	 * locks it owns.  Don't lower the priority below the base
781 	 * priority however.
782 	 */
783 	td = curthread;
784 	pri = PRI_MAX;
785 	mtx_lock_spin(&sched_lock);
786 	mtx_lock_spin(&td_contested_lock);
787 	LIST_FOREACH(ts, &td->td_contested, ts_link) {
788 		cp = TAILQ_FIRST(&ts->ts_blocked)->td_priority;
789 		if (cp < pri)
790 			pri = cp;
791 	}
792 	mtx_unlock_spin(&td_contested_lock);
793 	sched_unlend_prio(td, pri);
794 
795 	/*
796 	 * Wake up all the pending threads.  If a thread is not blocked
797 	 * on a lock, then it is currently executing on another CPU in
798 	 * turnstile_wait() or sitting on a run queue waiting to resume
799 	 * in turnstile_wait().  Set a flag to force it to try to acquire
800 	 * the lock again instead of blocking.
801 	 */
802 	while (!TAILQ_EMPTY(&pending_threads)) {
803 		td = TAILQ_FIRST(&pending_threads);
804 		TAILQ_REMOVE(&pending_threads, td, td_lockq);
805 		MPASS(td->td_proc->p_magic == P_MAGIC);
806 		if (TD_ON_LOCK(td)) {
807 			td->td_blocked = NULL;
808 			td->td_lockname = NULL;
809 			TD_CLR_LOCK(td);
810 			MPASS(TD_CAN_RUN(td));
811 			setrunqueue(td, SRQ_BORING);
812 		} else {
813 			td->td_flags |= TDF_TSNOBLOCK;
814 			MPASS(TD_IS_RUNNING(td) || TD_ON_RUNQ(td));
815 		}
816 	}
817 	critical_exit();
818 	mtx_unlock_spin(&sched_lock);
819 }
820 
821 /*
822  * Return the first thread in a turnstile.
823  */
824 struct thread *
825 turnstile_head(struct turnstile *ts)
826 {
827 #ifdef INVARIANTS
828 	struct turnstile_chain *tc;
829 
830 	MPASS(ts != NULL);
831 	tc = TC_LOOKUP(ts->ts_lockobj);
832 	mtx_assert(&tc->tc_lock, MA_OWNED);
833 #endif
834 	return (TAILQ_FIRST(&ts->ts_blocked));
835 }
836 
837 /*
838  * Returns true if a turnstile is empty.
839  */
840 int
841 turnstile_empty(struct turnstile *ts)
842 {
843 #ifdef INVARIANTS
844 	struct turnstile_chain *tc;
845 
846 	MPASS(ts != NULL);
847 	tc = TC_LOOKUP(ts->ts_lockobj);
848 	mtx_assert(&tc->tc_lock, MA_OWNED);
849 #endif
850 	return (TAILQ_EMPTY(&ts->ts_blocked));
851 }
852