xref: /freebsd/sys/kern/subr_turnstile.c (revision 5b7de7e19e20333bd3788d5480173b5e66cec66b)
10384fff8SJason Evans /*-
20384fff8SJason Evans  * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved.
30384fff8SJason Evans  *
40384fff8SJason Evans  * Redistribution and use in source and binary forms, with or without
50384fff8SJason Evans  * modification, are permitted provided that the following conditions
60384fff8SJason Evans  * are met:
70384fff8SJason Evans  * 1. Redistributions of source code must retain the above copyright
80384fff8SJason Evans  *    notice, this list of conditions and the following disclaimer.
90384fff8SJason Evans  * 2. Redistributions in binary form must reproduce the above copyright
100384fff8SJason Evans  *    notice, this list of conditions and the following disclaimer in the
110384fff8SJason Evans  *    documentation and/or other materials provided with the distribution.
120384fff8SJason Evans  * 3. Berkeley Software Design Inc's name may not be used to endorse or
130384fff8SJason Evans  *    promote products derived from this software without specific prior
140384fff8SJason Evans  *    written permission.
150384fff8SJason Evans  *
160384fff8SJason Evans  * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
170384fff8SJason Evans  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
180384fff8SJason Evans  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
190384fff8SJason Evans  * ARE DISCLAIMED.  IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
200384fff8SJason Evans  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
210384fff8SJason Evans  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
220384fff8SJason Evans  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
230384fff8SJason Evans  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
240384fff8SJason Evans  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
250384fff8SJason Evans  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
260384fff8SJason Evans  * SUCH DAMAGE.
270384fff8SJason Evans  *
280384fff8SJason Evans  *	from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $
2936412d79SJohn Baldwin  *	and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $
300384fff8SJason Evans  */
310384fff8SJason Evans 
320384fff8SJason Evans /*
33961a7b24SJohn Baldwin  * Implementation of turnstiles used to hold queue of threads blocked on
34961a7b24SJohn Baldwin  * non-sleepable locks.  Sleepable locks use condition variables to
35961a7b24SJohn Baldwin  * implement their queues.  Turnstiles differ from a sleep queue in that
36961a7b24SJohn Baldwin  * turnstile queue's are assigned to a lock held by an owning thread.  Thus,
37961a7b24SJohn Baldwin  * when one thread is enqueued onto a turnstile, it can lend its priority
38961a7b24SJohn Baldwin  * to the owning thread.
39961a7b24SJohn Baldwin  *
40961a7b24SJohn Baldwin  * We wish to avoid bloating locks with an embedded turnstile and we do not
41961a7b24SJohn Baldwin  * want to use back-pointers in the locks for the same reason.  Thus, we
42961a7b24SJohn Baldwin  * use a similar approach to that of Solaris 7 as described in Solaris
43961a7b24SJohn Baldwin  * Internals by Jim Mauro and Richard McDougall.  Turnstiles are looked up
44961a7b24SJohn Baldwin  * in a hash table based on the address of the lock.  Each entry in the
45961a7b24SJohn Baldwin  * hash table is a linked-lists of turnstiles and is called a turnstile
46961a7b24SJohn Baldwin  * chain.  Each chain contains a spin mutex that protects all of the
47961a7b24SJohn Baldwin  * turnstiles in the chain.
48961a7b24SJohn Baldwin  *
49961a7b24SJohn Baldwin  * Each time a thread is created, a turnstile is malloc'd and attached to
50961a7b24SJohn Baldwin  * that thread.  When a thread blocks on a lock, if it is the first thread
51961a7b24SJohn Baldwin  * to block, it lends its turnstile to the lock.  If the lock already has
52961a7b24SJohn Baldwin  * a turnstile, then it gives its turnstile to the lock's turnstile's free
53861a7db5SJohn Baldwin  * list.  When a thread is woken up, it takes a turnstile from the free list
54961a7b24SJohn Baldwin  * if there are any other waiters.  If it is the only thread blocked on the
55961a7b24SJohn Baldwin  * lock, then it reclaims the turnstile associated with the lock and removes
56961a7b24SJohn Baldwin  * it from the hash table.
57961a7b24SJohn Baldwin  *
58961a7b24SJohn Baldwin  * XXX: We should probably implement some sort of sleep queue that condition
59961a7b24SJohn Baldwin  * variables and sleepqueue's share.  On Solaris condition variables are
60961a7b24SJohn Baldwin  * implemented using a hash table of sleep queues similar to our current
61961a7b24SJohn Baldwin  * sleep queues.  We might want to investigate doing that ourselves.
620384fff8SJason Evans  */
630384fff8SJason Evans 
64677b542eSDavid E. O'Brien #include <sys/cdefs.h>
65677b542eSDavid E. O'Brien __FBSDID("$FreeBSD$");
66677b542eSDavid E. O'Brien 
670384fff8SJason Evans #include <sys/param.h>
686c35e809SDag-Erling Smørgrav #include <sys/systm.h>
6936412d79SJohn Baldwin #include <sys/kernel.h>
706c35e809SDag-Erling Smørgrav #include <sys/ktr.h>
7119284646SJohn Baldwin #include <sys/lock.h>
72fb919e4dSMark Murray #include <sys/malloc.h>
7319284646SJohn Baldwin #include <sys/mutex.h>
740384fff8SJason Evans #include <sys/proc.h>
75961a7b24SJohn Baldwin #include <sys/queue.h>
76c4f7a187SJohn Baldwin #include <sys/resourcevar.h>
77961a7b24SJohn Baldwin #include <sys/turnstile.h>
78b43179fbSJeff Roberson #include <sys/sched.h>
7936412d79SJohn Baldwin 
800cde2e34SJason Evans /*
81961a7b24SJohn Baldwin  * Constants for the hash table of turnstile chains.  TC_SHIFT is a magic
82961a7b24SJohn Baldwin  * number chosen because the sleep queue's use the same value for the
83961a7b24SJohn Baldwin  * shift.  Basically, we ignore the lower 8 bits of the address.
84961a7b24SJohn Baldwin  * TC_TABLESIZE must be a power of two for TC_MASK to work properly.
850cde2e34SJason Evans  */
86961a7b24SJohn Baldwin #define	TC_TABLESIZE	128			/* Must be power of 2. */
87961a7b24SJohn Baldwin #define	TC_MASK		(TC_TABLESIZE - 1)
88961a7b24SJohn Baldwin #define	TC_SHIFT	8
89961a7b24SJohn Baldwin #define	TC_HASH(lock)	(((uintptr_t)(lock) >> TC_SHIFT) & TC_MASK)
90961a7b24SJohn Baldwin #define	TC_LOOKUP(lock)	&turnstile_chains[TC_HASH(lock)]
919ed346baSBosko Milekic 
920cde2e34SJason Evans /*
93961a7b24SJohn Baldwin  * There are three different lists of turnstiles as follows.  The list
94961a7b24SJohn Baldwin  * connected by ts_link entries is a per-thread list of all the turnstiles
95961a7b24SJohn Baldwin  * attached to locks that we own.  This is used to fixup our priority when
96961a7b24SJohn Baldwin  * a lock is released.  The other two lists use the ts_hash entries.  The
975b7de7e1SJohn Baldwin  * first of these two is the turnstile chain list that a turnstile is on
985b7de7e1SJohn Baldwin  * when it is attached to a lock.  The second list to use ts_hash is the
995b7de7e1SJohn Baldwin  * free list hung off of a turnstile that is attached to a lock.
100961a7b24SJohn Baldwin  *
101961a7b24SJohn Baldwin  * Each turnstile contains two lists of threads.  The ts_blocked list is
102961a7b24SJohn Baldwin  * a linked list of threads blocked on the turnstile's lock.  The
103961a7b24SJohn Baldwin  * ts_pending list is a linked list of threads previously awoken by
104961a7b24SJohn Baldwin  * turnstile_signal() or turnstile_wait() that are waiting to be put on
105961a7b24SJohn Baldwin  * the run queue.
106961a7b24SJohn Baldwin  *
107961a7b24SJohn Baldwin  * Locking key:
108961a7b24SJohn Baldwin  *  c - turnstile chain lock
109961a7b24SJohn Baldwin  *  q - td_contested lock
1100cde2e34SJason Evans  */
111961a7b24SJohn Baldwin struct turnstile {
112961a7b24SJohn Baldwin 	TAILQ_HEAD(, thread) ts_blocked;	/* (c + q) Blocked threads. */
113961a7b24SJohn Baldwin 	TAILQ_HEAD(, thread) ts_pending;	/* (c) Pending threads. */
114961a7b24SJohn Baldwin 	LIST_ENTRY(turnstile) ts_hash;		/* (c) Chain and free list. */
115961a7b24SJohn Baldwin 	LIST_ENTRY(turnstile) ts_link;		/* (q) Contested locks. */
116961a7b24SJohn Baldwin 	LIST_HEAD(, turnstile) ts_free;		/* (c) Free turnstiles. */
117961a7b24SJohn Baldwin 	struct lock_object *ts_lockobj;		/* (c) Lock we reference. */
11879a13d01SJohn Baldwin 	struct thread *ts_owner;		/* (c + q) Who owns the lock. */
1198484de75SJohn Baldwin };
1208484de75SJohn Baldwin 
121961a7b24SJohn Baldwin struct turnstile_chain {
122961a7b24SJohn Baldwin 	LIST_HEAD(, turnstile) tc_turnstiles;	/* List of turnstiles. */
123961a7b24SJohn Baldwin 	struct mtx tc_lock;			/* Spin lock for this chain. */
124961a7b24SJohn Baldwin };
125961a7b24SJohn Baldwin 
126961a7b24SJohn Baldwin static struct mtx td_contested_lock;
127961a7b24SJohn Baldwin static struct turnstile_chain turnstile_chains[TC_TABLESIZE];
128961a7b24SJohn Baldwin 
129961a7b24SJohn Baldwin MALLOC_DEFINE(M_TURNSTILE, "turnstiles", "turnstiles");
130c53c013bSJohn Baldwin 
131c53c013bSJohn Baldwin /*
1329ed346baSBosko Milekic  * Prototypes for non-exported routines.
1339ed346baSBosko Milekic  */
134961a7b24SJohn Baldwin static void	init_turnstile0(void *dummy);
135b40ce416SJulian Elischer static void	propagate_priority(struct thread *);
136961a7b24SJohn Baldwin static void	turnstile_setowner(struct turnstile *ts, struct thread *owner);
13736412d79SJohn Baldwin 
138961a7b24SJohn Baldwin /*
139961a7b24SJohn Baldwin  * Walks the chain of turnstiles and their owners to propagate the priority
140961a7b24SJohn Baldwin  * of the thread being blocked to all the threads holding locks that have to
141961a7b24SJohn Baldwin  * release their locks before this thread can run again.
142961a7b24SJohn Baldwin  */
14336412d79SJohn Baldwin static void
144b40ce416SJulian Elischer propagate_priority(struct thread *td)
14536412d79SJohn Baldwin {
146961a7b24SJohn Baldwin 	struct turnstile_chain *tc;
147961a7b24SJohn Baldwin 	struct turnstile *ts;
148961a7b24SJohn Baldwin 	struct thread *td1;
149961a7b24SJohn Baldwin 	int pri;
15036412d79SJohn Baldwin 
1511bd0eefbSJohn Baldwin 	mtx_assert(&sched_lock, MA_OWNED);
152961a7b24SJohn Baldwin 	pri = td->td_priority;
153961a7b24SJohn Baldwin 	ts = td->td_blocked;
15436412d79SJohn Baldwin 	for (;;) {
155961a7b24SJohn Baldwin 		td = ts->ts_owner;
15636412d79SJohn Baldwin 
157b40ce416SJulian Elischer 		if (td == NULL) {
15836412d79SJohn Baldwin 			/*
15936412d79SJohn Baldwin 			 * This really isn't quite right. Really
160b40ce416SJulian Elischer 			 * ought to bump priority of thread that
161961a7b24SJohn Baldwin 			 * next acquires the lock.
16236412d79SJohn Baldwin 			 */
16336412d79SJohn Baldwin 			return;
16436412d79SJohn Baldwin 		}
1659ed346baSBosko Milekic 
166e602ba25SJulian Elischer 		MPASS(td->td_proc != NULL);
167b40ce416SJulian Elischer 		MPASS(td->td_proc->p_magic == P_MAGIC);
1681bd0eefbSJohn Baldwin 
169961a7b24SJohn Baldwin 		/*
170961a7b24SJohn Baldwin 		 * XXX: The owner of a turnstile can be stale if it is the
171961a7b24SJohn Baldwin 		 * first thread to grab a slock of a sx lock.  In that case
172961a7b24SJohn Baldwin 		 * it is possible for us to be at SSLEEP or some other
173961a7b24SJohn Baldwin 		 * weird state.  We should probably just return if the state
174961a7b24SJohn Baldwin 		 * isn't SRUN or SLOCK.
175961a7b24SJohn Baldwin 		 */
176961a7b24SJohn Baldwin 		KASSERT(!TD_IS_SLEEPING(td),
177961a7b24SJohn Baldwin 		    ("sleeping thread (pid %d) owns a non-sleepable lock",
178961a7b24SJohn Baldwin 		    td->td_proc->p_pid));
179961a7b24SJohn Baldwin 
180961a7b24SJohn Baldwin 		/*
181961a7b24SJohn Baldwin 		 * If this thread already has higher priority than the
182961a7b24SJohn Baldwin 		 * thread that is being blocked, we are finished.
183961a7b24SJohn Baldwin 		 */
184961a7b24SJohn Baldwin 		if (td->td_priority <= pri)
185961a7b24SJohn Baldwin 			return;
1861bd0eefbSJohn Baldwin 
18736412d79SJohn Baldwin 		/*
18836412d79SJohn Baldwin 		 * If lock holder is actually running, just bump priority.
18936412d79SJohn Baldwin 		 */
19071fad9fdSJulian Elischer 		if (TD_IS_RUNNING(td)) {
191e602ba25SJulian Elischer 			td->td_priority = pri;
19236412d79SJohn Baldwin 			return;
19336412d79SJohn Baldwin 		}
194d5a08a60SJake Burkholder 
1951b43703bSJohn Baldwin #ifndef SMP
1961b43703bSJohn Baldwin 		/*
197b40ce416SJulian Elischer 		 * For UP, we check to see if td is curthread (this shouldn't
1981b43703bSJohn Baldwin 		 * ever happen however as it would mean we are in a deadlock.)
1991b43703bSJohn Baldwin 		 */
200b40ce416SJulian Elischer 		KASSERT(td != curthread, ("Deadlock detected"));
2011b43703bSJohn Baldwin #endif
2021b43703bSJohn Baldwin 
20336412d79SJohn Baldwin 		/*
204b40ce416SJulian Elischer 		 * If on run queue move to new run queue, and quit.
205b40ce416SJulian Elischer 		 * XXXKSE this gets a lot more complicated under threads
206b40ce416SJulian Elischer 		 * but try anyhow.
20736412d79SJohn Baldwin 		 */
20871fad9fdSJulian Elischer 		if (TD_ON_RUNQ(td)) {
209b40ce416SJulian Elischer 			MPASS(td->td_blocked == NULL);
210b43179fbSJeff Roberson 			sched_prio(td, pri);
21136412d79SJohn Baldwin 			return;
21236412d79SJohn Baldwin 		}
213961a7b24SJohn Baldwin 
214e602ba25SJulian Elischer 		/*
215961a7b24SJohn Baldwin 		 * Bump this thread's priority.
216e602ba25SJulian Elischer 		 */
217e602ba25SJulian Elischer 		td->td_priority = pri;
21836412d79SJohn Baldwin 
21936412d79SJohn Baldwin 		/*
220961a7b24SJohn Baldwin 		 * If we aren't blocked on a lock, we should be.
22136412d79SJohn Baldwin 		 */
222551cf4e1SJohn Baldwin 		KASSERT(TD_ON_LOCK(td), (
223961a7b24SJohn Baldwin 		    "process %d(%s):%d holds %s but isn't blocked on a lock\n",
224e602ba25SJulian Elischer 		    td->td_proc->p_pid, td->td_proc->p_comm, td->td_state,
225961a7b24SJohn Baldwin 		    ts->ts_lockobj->lo_name));
22636412d79SJohn Baldwin 
22736412d79SJohn Baldwin 		/*
228961a7b24SJohn Baldwin 		 * Pick up the lock that td is blocked on.
22936412d79SJohn Baldwin 		 */
230961a7b24SJohn Baldwin 		ts = td->td_blocked;
231961a7b24SJohn Baldwin 		MPASS(ts != NULL);
232961a7b24SJohn Baldwin 		tc = TC_LOOKUP(ts->ts_lockobj);
233961a7b24SJohn Baldwin 		mtx_lock_spin(&tc->tc_lock);
23436412d79SJohn Baldwin 
23536412d79SJohn Baldwin 		/*
2366b6bd95eSJohn Baldwin 		 * This thread may not be blocked on this turnstile anymore
2376b6bd95eSJohn Baldwin 		 * but instead might already be woken up on another CPU
2386b6bd95eSJohn Baldwin 		 * that is waiting on sched_lock in turnstile_unpend() to
2396b6bd95eSJohn Baldwin 		 * finish waking this thread up.  We can detect this case
2406b6bd95eSJohn Baldwin 		 * by checking to see if this thread has been given a
2416b6bd95eSJohn Baldwin 		 * turnstile by either turnstile_signal() or
2426b6bd95eSJohn Baldwin 		 * turnstile_wakeup().  In this case, treat the thread as
2436b6bd95eSJohn Baldwin 		 * if it was already running.
24479a13d01SJohn Baldwin 		 */
2456b6bd95eSJohn Baldwin 		if (td->td_turnstile != NULL) {
24679a13d01SJohn Baldwin 			mtx_unlock_spin(&tc->tc_lock);
24779a13d01SJohn Baldwin 			return;
24879a13d01SJohn Baldwin 		}
24979a13d01SJohn Baldwin 
25079a13d01SJohn Baldwin 		/*
251b40ce416SJulian Elischer 		 * Check if the thread needs to be moved up on
252961a7b24SJohn Baldwin 		 * the blocked chain.  It doesn't need to be moved
253961a7b24SJohn Baldwin 		 * if it is already at the head of the list or if
254961a7b24SJohn Baldwin 		 * the item in front of it still has a higher priority.
25536412d79SJohn Baldwin 		 */
256961a7b24SJohn Baldwin 		if (td == TAILQ_FIRST(&ts->ts_blocked)) {
257961a7b24SJohn Baldwin 			mtx_unlock_spin(&tc->tc_lock);
2581bd0eefbSJohn Baldwin 			continue;
2591bd0eefbSJohn Baldwin 		}
2609ed346baSBosko Milekic 
261551cf4e1SJohn Baldwin 		td1 = TAILQ_PREV(td, threadqueue, td_lockq);
2622c100766SJulian Elischer 		if (td1->td_priority <= pri) {
263961a7b24SJohn Baldwin 			mtx_unlock_spin(&tc->tc_lock);
26436412d79SJohn Baldwin 			continue;
26536412d79SJohn Baldwin 		}
26636412d79SJohn Baldwin 
26736412d79SJohn Baldwin 		/*
268b40ce416SJulian Elischer 		 * Remove thread from blocked chain and determine where
269b40ce416SJulian Elischer 		 * it should be moved up to.  Since we know that td1 has
270b40ce416SJulian Elischer 		 * a lower priority than td, we know that at least one
271b40ce416SJulian Elischer 		 * thread in the chain has a lower priority and that
272b40ce416SJulian Elischer 		 * td1 will thus not be NULL after the loop.
27336412d79SJohn Baldwin 		 */
274961a7b24SJohn Baldwin 		mtx_lock_spin(&td_contested_lock);
275961a7b24SJohn Baldwin 		TAILQ_REMOVE(&ts->ts_blocked, td, td_lockq);
276961a7b24SJohn Baldwin 		TAILQ_FOREACH(td1, &ts->ts_blocked, td_lockq) {
277b40ce416SJulian Elischer 			MPASS(td1->td_proc->p_magic == P_MAGIC);
2782c100766SJulian Elischer 			if (td1->td_priority > pri)
27936412d79SJohn Baldwin 				break;
28036412d79SJohn Baldwin 		}
2819ed346baSBosko Milekic 
282b40ce416SJulian Elischer 		MPASS(td1 != NULL);
283551cf4e1SJohn Baldwin 		TAILQ_INSERT_BEFORE(td1, td, td_lockq);
284961a7b24SJohn Baldwin 		mtx_unlock_spin(&td_contested_lock);
28536412d79SJohn Baldwin 		CTR4(KTR_LOCK,
286961a7b24SJohn Baldwin 		    "propagate_priority: td %p moved before %p on [%p] %s",
287961a7b24SJohn Baldwin 		    td, td1, ts->ts_lockobj, ts->ts_lockobj->lo_name);
288961a7b24SJohn Baldwin 		mtx_unlock_spin(&tc->tc_lock);
28936412d79SJohn Baldwin 	}
29036412d79SJohn Baldwin }
29136412d79SJohn Baldwin 
2926c35e809SDag-Erling Smørgrav /*
293961a7b24SJohn Baldwin  * Early initialization of turnstiles.  This is not done via a SYSINIT()
294961a7b24SJohn Baldwin  * since this needs to be initialized very early when mutexes are first
295961a7b24SJohn Baldwin  * initialized.
2966283b7d0SJohn Baldwin  */
2976283b7d0SJohn Baldwin void
298961a7b24SJohn Baldwin init_turnstiles(void)
2996283b7d0SJohn Baldwin {
300961a7b24SJohn Baldwin 	int i;
3016283b7d0SJohn Baldwin 
302961a7b24SJohn Baldwin 	for (i = 0; i < TC_TABLESIZE; i++) {
303961a7b24SJohn Baldwin 		LIST_INIT(&turnstile_chains[i].tc_turnstiles);
304961a7b24SJohn Baldwin 		mtx_init(&turnstile_chains[i].tc_lock, "turnstile chain",
305961a7b24SJohn Baldwin 		    NULL, MTX_SPIN);
3066c35e809SDag-Erling Smørgrav 	}
307961a7b24SJohn Baldwin 	mtx_init(&td_contested_lock, "td_contested", NULL, MTX_SPIN);
308961a7b24SJohn Baldwin 	thread0.td_turnstile = NULL;
3096283b7d0SJohn Baldwin }
3106283b7d0SJohn Baldwin 
311961a7b24SJohn Baldwin static void
312961a7b24SJohn Baldwin init_turnstile0(void *dummy)
3136283b7d0SJohn Baldwin {
3146283b7d0SJohn Baldwin 
315961a7b24SJohn Baldwin 	thread0.td_turnstile = turnstile_alloc();
316961a7b24SJohn Baldwin }
317961a7b24SJohn Baldwin SYSINIT(turnstile0, SI_SUB_LOCK, SI_ORDER_ANY, init_turnstile0, NULL);
3186c35e809SDag-Erling Smørgrav 
319961a7b24SJohn Baldwin /*
320961a7b24SJohn Baldwin  * Set the owner of the lock this turnstile is attached to.
321961a7b24SJohn Baldwin  */
322961a7b24SJohn Baldwin static void
323961a7b24SJohn Baldwin turnstile_setowner(struct turnstile *ts, struct thread *owner)
324961a7b24SJohn Baldwin {
325961a7b24SJohn Baldwin 
326961a7b24SJohn Baldwin 	mtx_assert(&td_contested_lock, MA_OWNED);
327961a7b24SJohn Baldwin 	MPASS(owner->td_proc->p_magic == P_MAGIC);
328961a7b24SJohn Baldwin 	MPASS(ts->ts_owner == NULL);
329961a7b24SJohn Baldwin 	ts->ts_owner = owner;
330961a7b24SJohn Baldwin 	LIST_INSERT_HEAD(&owner->td_contested, ts, ts_link);
331961a7b24SJohn Baldwin }
332961a7b24SJohn Baldwin 
333961a7b24SJohn Baldwin /*
334961a7b24SJohn Baldwin  * Malloc a turnstile for a new thread, initialize it and return it.
335961a7b24SJohn Baldwin  */
336961a7b24SJohn Baldwin struct turnstile *
337961a7b24SJohn Baldwin turnstile_alloc(void)
338961a7b24SJohn Baldwin {
339961a7b24SJohn Baldwin 	struct turnstile *ts;
340961a7b24SJohn Baldwin 
341961a7b24SJohn Baldwin 	ts = malloc(sizeof(struct turnstile), M_TURNSTILE, M_WAITOK | M_ZERO);
342961a7b24SJohn Baldwin 	TAILQ_INIT(&ts->ts_blocked);
343961a7b24SJohn Baldwin 	TAILQ_INIT(&ts->ts_pending);
344961a7b24SJohn Baldwin 	LIST_INIT(&ts->ts_free);
345961a7b24SJohn Baldwin 	return (ts);
346961a7b24SJohn Baldwin }
347961a7b24SJohn Baldwin 
348961a7b24SJohn Baldwin /*
349961a7b24SJohn Baldwin  * Free a turnstile when a thread is destroyed.
350961a7b24SJohn Baldwin  */
351961a7b24SJohn Baldwin void
352961a7b24SJohn Baldwin turnstile_free(struct turnstile *ts)
353961a7b24SJohn Baldwin {
354961a7b24SJohn Baldwin 
355961a7b24SJohn Baldwin 	MPASS(ts != NULL);
356961a7b24SJohn Baldwin 	MPASS(TAILQ_EMPTY(&ts->ts_blocked));
357961a7b24SJohn Baldwin 	MPASS(TAILQ_EMPTY(&ts->ts_pending));
358961a7b24SJohn Baldwin 	free(ts, M_TURNSTILE);
359961a7b24SJohn Baldwin }
360961a7b24SJohn Baldwin 
361961a7b24SJohn Baldwin /*
362961a7b24SJohn Baldwin  * Look up the turnstile for a lock in the hash table locking the associated
363961a7b24SJohn Baldwin  * turnstile chain along the way.  Return with the turnstile chain locked.
364961a7b24SJohn Baldwin  * If no turnstile is found in the hash table, NULL is returned.
365961a7b24SJohn Baldwin  */
366961a7b24SJohn Baldwin struct turnstile *
367961a7b24SJohn Baldwin turnstile_lookup(struct lock_object *lock)
368961a7b24SJohn Baldwin {
369961a7b24SJohn Baldwin 	struct turnstile_chain *tc;
370961a7b24SJohn Baldwin 	struct turnstile *ts;
371961a7b24SJohn Baldwin 
372961a7b24SJohn Baldwin 	tc = TC_LOOKUP(lock);
373961a7b24SJohn Baldwin 	mtx_lock_spin(&tc->tc_lock);
374961a7b24SJohn Baldwin 	LIST_FOREACH(ts, &tc->tc_turnstiles, ts_hash)
375961a7b24SJohn Baldwin 		if (ts->ts_lockobj == lock)
376961a7b24SJohn Baldwin 			return (ts);
377961a7b24SJohn Baldwin 	return (NULL);
378961a7b24SJohn Baldwin }
379961a7b24SJohn Baldwin 
380961a7b24SJohn Baldwin /*
381961a7b24SJohn Baldwin  * Unlock the turnstile chain associated with a given lock.
382961a7b24SJohn Baldwin  */
383961a7b24SJohn Baldwin void
384961a7b24SJohn Baldwin turnstile_release(struct lock_object *lock)
385961a7b24SJohn Baldwin {
386961a7b24SJohn Baldwin 	struct turnstile_chain *tc;
387961a7b24SJohn Baldwin 
388961a7b24SJohn Baldwin 	tc = TC_LOOKUP(lock);
389961a7b24SJohn Baldwin 	mtx_unlock_spin(&tc->tc_lock);
390961a7b24SJohn Baldwin }
391961a7b24SJohn Baldwin 
392961a7b24SJohn Baldwin /*
393961a7b24SJohn Baldwin  * Take ownership of a turnstile and adjust the priority of the new
394961a7b24SJohn Baldwin  * owner appropriately.
395961a7b24SJohn Baldwin  */
396961a7b24SJohn Baldwin void
397961a7b24SJohn Baldwin turnstile_claim(struct turnstile *ts)
398961a7b24SJohn Baldwin {
399961a7b24SJohn Baldwin 	struct turnstile_chain *tc;
400961a7b24SJohn Baldwin 	struct thread *td, *owner;
401961a7b24SJohn Baldwin 
402961a7b24SJohn Baldwin 	tc = TC_LOOKUP(ts->ts_lockobj);
403961a7b24SJohn Baldwin 	mtx_assert(&tc->tc_lock, MA_OWNED);
404961a7b24SJohn Baldwin 
405961a7b24SJohn Baldwin 	owner = curthread;
406961a7b24SJohn Baldwin 	mtx_lock_spin(&td_contested_lock);
407961a7b24SJohn Baldwin 	turnstile_setowner(ts, owner);
408961a7b24SJohn Baldwin 	mtx_unlock_spin(&td_contested_lock);
409961a7b24SJohn Baldwin 
410961a7b24SJohn Baldwin 	td = TAILQ_FIRST(&ts->ts_blocked);
411961a7b24SJohn Baldwin 	MPASS(td != NULL);
412961a7b24SJohn Baldwin 	MPASS(td->td_proc->p_magic == P_MAGIC);
413961a7b24SJohn Baldwin 	mtx_unlock_spin(&tc->tc_lock);
414961a7b24SJohn Baldwin 
415961a7b24SJohn Baldwin 	/*
416961a7b24SJohn Baldwin 	 * Update the priority of the new owner if needed.
417961a7b24SJohn Baldwin 	 */
418961a7b24SJohn Baldwin 	mtx_lock_spin(&sched_lock);
419961a7b24SJohn Baldwin 	if (td->td_priority < owner->td_priority)
420961a7b24SJohn Baldwin 		owner->td_priority = td->td_priority;
421961a7b24SJohn Baldwin 	mtx_unlock_spin(&sched_lock);
422961a7b24SJohn Baldwin }
423961a7b24SJohn Baldwin 
424961a7b24SJohn Baldwin /*
425961a7b24SJohn Baldwin  * Block the current thread on the turnstile ts.  This function will context
426961a7b24SJohn Baldwin  * switch and not return until this thread has been woken back up.  This
427961a7b24SJohn Baldwin  * function must be called with the appropriate turnstile chain locked and
428961a7b24SJohn Baldwin  * will return with it unlocked.
429961a7b24SJohn Baldwin  */
430961a7b24SJohn Baldwin void
431961a7b24SJohn Baldwin turnstile_wait(struct turnstile *ts, struct lock_object *lock,
432961a7b24SJohn Baldwin     struct thread *owner)
433961a7b24SJohn Baldwin {
434961a7b24SJohn Baldwin 	struct turnstile_chain *tc;
435961a7b24SJohn Baldwin 	struct thread *td, *td1;
436961a7b24SJohn Baldwin 
437961a7b24SJohn Baldwin 	td = curthread;
438961a7b24SJohn Baldwin 	tc = TC_LOOKUP(lock);
439961a7b24SJohn Baldwin 	mtx_assert(&tc->tc_lock, MA_OWNED);
440961a7b24SJohn Baldwin 	MPASS(td->td_turnstile != NULL);
441961a7b24SJohn Baldwin 	MPASS(owner != NULL);
442961a7b24SJohn Baldwin 	MPASS(owner->td_proc->p_magic == P_MAGIC);
443961a7b24SJohn Baldwin 
444961a7b24SJohn Baldwin 	/* If the passed in turnstile is NULL, use this thread's turnstile. */
445961a7b24SJohn Baldwin 	if (ts == NULL) {
446961a7b24SJohn Baldwin 		ts = td->td_turnstile;
447961a7b24SJohn Baldwin 		LIST_INSERT_HEAD(&tc->tc_turnstiles, ts, ts_hash);
448961a7b24SJohn Baldwin 		KASSERT(TAILQ_EMPTY(&ts->ts_pending),
449961a7b24SJohn Baldwin 		    ("thread's turnstile has pending threads"));
450961a7b24SJohn Baldwin 		KASSERT(TAILQ_EMPTY(&ts->ts_blocked),
451961a7b24SJohn Baldwin 		    ("thread's turnstile has a non-empty queue"));
452961a7b24SJohn Baldwin 		KASSERT(LIST_EMPTY(&ts->ts_free),
453961a7b24SJohn Baldwin 		    ("thread's turnstile has a non-empty free list"));
454961a7b24SJohn Baldwin 		KASSERT(ts->ts_lockobj == NULL, ("stale ts_lockobj pointer"));
455961a7b24SJohn Baldwin 		ts->ts_lockobj = lock;
456961a7b24SJohn Baldwin 		mtx_lock_spin(&td_contested_lock);
457961a7b24SJohn Baldwin 		TAILQ_INSERT_TAIL(&ts->ts_blocked, td, td_lockq);
458961a7b24SJohn Baldwin 		turnstile_setowner(ts, owner);
459961a7b24SJohn Baldwin 		mtx_unlock_spin(&td_contested_lock);
460961a7b24SJohn Baldwin 	} else {
461961a7b24SJohn Baldwin 		TAILQ_FOREACH(td1, &ts->ts_blocked, td_lockq)
462961a7b24SJohn Baldwin 			if (td1->td_priority > td->td_priority)
4636c35e809SDag-Erling Smørgrav 				break;
464961a7b24SJohn Baldwin 		mtx_lock_spin(&td_contested_lock);
465961a7b24SJohn Baldwin 		if (td1 != NULL)
466961a7b24SJohn Baldwin 			TAILQ_INSERT_BEFORE(td1, td, td_lockq);
467961a7b24SJohn Baldwin 		else
468961a7b24SJohn Baldwin 			TAILQ_INSERT_TAIL(&ts->ts_blocked, td, td_lockq);
469961a7b24SJohn Baldwin 		mtx_unlock_spin(&td_contested_lock);
470961a7b24SJohn Baldwin 		MPASS(td->td_turnstile != NULL);
471961a7b24SJohn Baldwin 		LIST_INSERT_HEAD(&ts->ts_free, td->td_turnstile, ts_hash);
472961a7b24SJohn Baldwin 		MPASS(owner == ts->ts_owner);
4736c35e809SDag-Erling Smørgrav 	}
474961a7b24SJohn Baldwin 	td->td_turnstile = NULL;
475961a7b24SJohn Baldwin 	mtx_unlock_spin(&tc->tc_lock);
47636412d79SJohn Baldwin 
4779ed346baSBosko Milekic 	mtx_lock_spin(&sched_lock);
47836412d79SJohn Baldwin 	/*
479961a7b24SJohn Baldwin 	 * Handle race condition where a thread on another CPU that owns
480961a7b24SJohn Baldwin 	 * lock 'lock' could have woken us in between us dropping the
481961a7b24SJohn Baldwin 	 * turnstile chain lock and acquiring the sched_lock.
48236412d79SJohn Baldwin 	 */
483961a7b24SJohn Baldwin 	if (td->td_flags & TDF_TSNOBLOCK) {
484961a7b24SJohn Baldwin 		td->td_flags &= ~TDF_TSNOBLOCK;
4859ed346baSBosko Milekic 		mtx_unlock_spin(&sched_lock);
48636412d79SJohn Baldwin 		return;
48736412d79SJohn Baldwin 	}
4889ed346baSBosko Milekic 
48936412d79SJohn Baldwin #ifdef notyet
49036412d79SJohn Baldwin 	/*
4919ed346baSBosko Milekic 	 * If we're borrowing an interrupted thread's VM context, we
4929ed346baSBosko Milekic 	 * must clean up before going to sleep.
49336412d79SJohn Baldwin 	 */
494b40ce416SJulian Elischer 	if (td->td_ithd != NULL) {
495b40ce416SJulian Elischer 		struct ithd *it = td->td_ithd;
49636412d79SJohn Baldwin 
49736412d79SJohn Baldwin 		if (it->it_interrupted) {
498961a7b24SJohn Baldwin 			if (LOCK_LOG_TEST(lock, 0))
499961a7b24SJohn Baldwin 				CTR3(KTR_LOCK, "%s: %p interrupted %p",
500961a7b24SJohn Baldwin 				    __func__, it, it->it_interrupted);
50136412d79SJohn Baldwin 			intr_thd_fixup(it);
50236412d79SJohn Baldwin 		}
50336412d79SJohn Baldwin 	}
50436412d79SJohn Baldwin #endif
50536412d79SJohn Baldwin 
506961a7b24SJohn Baldwin 	/* Save who we are blocked on and switch. */
507961a7b24SJohn Baldwin 	td->td_blocked = ts;
508961a7b24SJohn Baldwin 	td->td_lockname = lock->lo_name;
509551cf4e1SJohn Baldwin 	TD_SET_LOCK(td);
510b40ce416SJulian Elischer 	propagate_priority(td);
5119ed346baSBosko Milekic 
512961a7b24SJohn Baldwin 	if (LOCK_LOG_TEST(lock, 0))
513961a7b24SJohn Baldwin 		CTR4(KTR_LOCK, "%s: td %p blocked on [%p] %s", __func__, td,
514961a7b24SJohn Baldwin 		    lock, lock->lo_name);
5159ed346baSBosko Milekic 
51629bcc451SJeff Roberson 	mi_switch(SW_VOL);
5179ed346baSBosko Milekic 
518961a7b24SJohn Baldwin 	if (LOCK_LOG_TEST(lock, 0))
519961a7b24SJohn Baldwin 		CTR4(KTR_LOCK, "%s: td %p free from blocked on [%p] %s",
520961a7b24SJohn Baldwin 		    __func__, td, lock, lock->lo_name);
5219ed346baSBosko Milekic 
5229ed346baSBosko Milekic 	mtx_unlock_spin(&sched_lock);
52336412d79SJohn Baldwin }
5249ed346baSBosko Milekic 
525961a7b24SJohn Baldwin /*
526961a7b24SJohn Baldwin  * Pick the highest priority thread on this turnstile and put it on the
527961a7b24SJohn Baldwin  * pending list.  This must be called with the turnstile chain locked.
528961a7b24SJohn Baldwin  */
529961a7b24SJohn Baldwin int
530961a7b24SJohn Baldwin turnstile_signal(struct turnstile *ts)
531961a7b24SJohn Baldwin {
532961a7b24SJohn Baldwin 	struct turnstile_chain *tc;
533961a7b24SJohn Baldwin 	struct thread *td;
534961a7b24SJohn Baldwin 	int empty;
535961a7b24SJohn Baldwin 
536961a7b24SJohn Baldwin 	MPASS(ts != NULL);
537961a7b24SJohn Baldwin 	MPASS(curthread->td_proc->p_magic == P_MAGIC);
538961a7b24SJohn Baldwin 	MPASS(ts->ts_owner == curthread);
539961a7b24SJohn Baldwin 	tc = TC_LOOKUP(ts->ts_lockobj);
540961a7b24SJohn Baldwin 	mtx_assert(&tc->tc_lock, MA_OWNED);
5419ed346baSBosko Milekic 
5429ed346baSBosko Milekic 	/*
543961a7b24SJohn Baldwin 	 * Pick the highest priority thread blocked on this lock and
544961a7b24SJohn Baldwin 	 * move it to the pending list.
5459ed346baSBosko Milekic 	 */
546961a7b24SJohn Baldwin 	td = TAILQ_FIRST(&ts->ts_blocked);
547b40ce416SJulian Elischer 	MPASS(td->td_proc->p_magic == P_MAGIC);
548961a7b24SJohn Baldwin 	mtx_lock_spin(&td_contested_lock);
549961a7b24SJohn Baldwin 	TAILQ_REMOVE(&ts->ts_blocked, td, td_lockq);
550961a7b24SJohn Baldwin 	mtx_unlock_spin(&td_contested_lock);
551961a7b24SJohn Baldwin 	TAILQ_INSERT_TAIL(&ts->ts_pending, td, td_lockq);
5529ed346baSBosko Milekic 
553961a7b24SJohn Baldwin 	/*
554961a7b24SJohn Baldwin 	 * If the turnstile is now empty, remove it from its chain and
555961a7b24SJohn Baldwin 	 * give it to the about-to-be-woken thread.  Otherwise take a
556961a7b24SJohn Baldwin 	 * turnstile from the free list and give it to the thread.
557961a7b24SJohn Baldwin 	 */
558961a7b24SJohn Baldwin 	empty = TAILQ_EMPTY(&ts->ts_blocked);
559961a7b24SJohn Baldwin 	if (empty)
560961a7b24SJohn Baldwin 		MPASS(LIST_EMPTY(&ts->ts_free));
561961a7b24SJohn Baldwin 	else
562961a7b24SJohn Baldwin 		ts = LIST_FIRST(&ts->ts_free);
563da1d503bSJohn Baldwin 	MPASS(ts != NULL);
564961a7b24SJohn Baldwin 	LIST_REMOVE(ts, ts_hash);
565961a7b24SJohn Baldwin 	td->td_turnstile = ts;
5669ed346baSBosko Milekic 
567961a7b24SJohn Baldwin 	return (empty);
568961a7b24SJohn Baldwin }
569961a7b24SJohn Baldwin 
570961a7b24SJohn Baldwin /*
571961a7b24SJohn Baldwin  * Put all blocked threads on the pending list.  This must be called with
572961a7b24SJohn Baldwin  * the turnstile chain locked.
573961a7b24SJohn Baldwin  */
574961a7b24SJohn Baldwin void
575961a7b24SJohn Baldwin turnstile_wakeup(struct turnstile *ts)
576961a7b24SJohn Baldwin {
577961a7b24SJohn Baldwin 	struct turnstile_chain *tc;
578961a7b24SJohn Baldwin 	struct turnstile *ts1;
579961a7b24SJohn Baldwin 	struct thread *td;
580961a7b24SJohn Baldwin 
581961a7b24SJohn Baldwin 	MPASS(ts != NULL);
582961a7b24SJohn Baldwin 	MPASS(curthread->td_proc->p_magic == P_MAGIC);
583961a7b24SJohn Baldwin 	MPASS(ts->ts_owner == curthread);
584961a7b24SJohn Baldwin 	tc = TC_LOOKUP(ts->ts_lockobj);
585961a7b24SJohn Baldwin 	mtx_assert(&tc->tc_lock, MA_OWNED);
586961a7b24SJohn Baldwin 
587961a7b24SJohn Baldwin 	/*
588961a7b24SJohn Baldwin 	 * Transfer the blocked list to the pending list.
589961a7b24SJohn Baldwin 	 */
590961a7b24SJohn Baldwin 	mtx_lock_spin(&td_contested_lock);
591961a7b24SJohn Baldwin 	TAILQ_CONCAT(&ts->ts_pending, &ts->ts_blocked, td_lockq);
592961a7b24SJohn Baldwin 	mtx_unlock_spin(&td_contested_lock);
593961a7b24SJohn Baldwin 
594961a7b24SJohn Baldwin 	/*
595961a7b24SJohn Baldwin 	 * Give a turnstile to each thread.  The last thread gets
596961a7b24SJohn Baldwin 	 * this turnstile.
597961a7b24SJohn Baldwin 	 */
598961a7b24SJohn Baldwin 	TAILQ_FOREACH(td, &ts->ts_pending, td_lockq) {
599961a7b24SJohn Baldwin 		if (LIST_EMPTY(&ts->ts_free)) {
600961a7b24SJohn Baldwin 			MPASS(TAILQ_NEXT(td, td_lockq) == NULL);
601961a7b24SJohn Baldwin 			ts1 = ts;
60236412d79SJohn Baldwin 		} else
603961a7b24SJohn Baldwin 			ts1 = LIST_FIRST(&ts->ts_free);
604da1d503bSJohn Baldwin 		MPASS(ts1 != NULL);
605961a7b24SJohn Baldwin 		LIST_REMOVE(ts1, ts_hash);
606961a7b24SJohn Baldwin 		td->td_turnstile = ts1;
607961a7b24SJohn Baldwin 	}
608961a7b24SJohn Baldwin }
6099ed346baSBosko Milekic 
610961a7b24SJohn Baldwin /*
611961a7b24SJohn Baldwin  * Wakeup all threads on the pending list and adjust the priority of the
612961a7b24SJohn Baldwin  * current thread appropriately.  This must be called with the turnstile
613961a7b24SJohn Baldwin  * chain locked.
614961a7b24SJohn Baldwin  */
615961a7b24SJohn Baldwin void
616961a7b24SJohn Baldwin turnstile_unpend(struct turnstile *ts)
617961a7b24SJohn Baldwin {
618961a7b24SJohn Baldwin 	TAILQ_HEAD( ,thread) pending_threads;
619961a7b24SJohn Baldwin 	struct turnstile_chain *tc;
620961a7b24SJohn Baldwin 	struct thread *td;
621961a7b24SJohn Baldwin 	int cp, pri;
622961a7b24SJohn Baldwin 
623961a7b24SJohn Baldwin 	MPASS(ts != NULL);
624961a7b24SJohn Baldwin 	MPASS(ts->ts_owner == curthread);
625961a7b24SJohn Baldwin 	tc = TC_LOOKUP(ts->ts_lockobj);
626961a7b24SJohn Baldwin 	mtx_assert(&tc->tc_lock, MA_OWNED);
627961a7b24SJohn Baldwin 	MPASS(!TAILQ_EMPTY(&ts->ts_pending));
628961a7b24SJohn Baldwin 
629961a7b24SJohn Baldwin 	/*
630961a7b24SJohn Baldwin 	 * Move the list of pending threads out of the turnstile and
631961a7b24SJohn Baldwin 	 * into a local variable.
632961a7b24SJohn Baldwin 	 */
633961a7b24SJohn Baldwin 	TAILQ_INIT(&pending_threads);
634961a7b24SJohn Baldwin 	TAILQ_CONCAT(&pending_threads, &ts->ts_pending, td_lockq);
635961a7b24SJohn Baldwin #ifdef INVARIANTS
636961a7b24SJohn Baldwin 	if (TAILQ_EMPTY(&ts->ts_blocked))
637961a7b24SJohn Baldwin 		ts->ts_lockobj = NULL;
638961a7b24SJohn Baldwin #endif
639961a7b24SJohn Baldwin 
640961a7b24SJohn Baldwin 	/*
641961a7b24SJohn Baldwin 	 * Remove the turnstile from this thread's list of contested locks
642961a7b24SJohn Baldwin 	 * since this thread doesn't own it anymore.  New threads will
643961a7b24SJohn Baldwin 	 * not be blocking on the turnstile until it is claimed by a new
644961a7b24SJohn Baldwin 	 * owner.
645961a7b24SJohn Baldwin 	 */
646961a7b24SJohn Baldwin 	mtx_lock_spin(&td_contested_lock);
647961a7b24SJohn Baldwin 	ts->ts_owner = NULL;
648961a7b24SJohn Baldwin 	LIST_REMOVE(ts, ts_link);
649961a7b24SJohn Baldwin 	mtx_unlock_spin(&td_contested_lock);
650961a7b24SJohn Baldwin 	mtx_unlock_spin(&tc->tc_lock);
651961a7b24SJohn Baldwin 
652961a7b24SJohn Baldwin 	/*
653961a7b24SJohn Baldwin 	 * Adjust the priority of curthread based on other contested
654961a7b24SJohn Baldwin 	 * locks it owns.  Don't lower the priority below the base
655961a7b24SJohn Baldwin 	 * priority however.
656961a7b24SJohn Baldwin 	 */
657961a7b24SJohn Baldwin 	td = curthread;
658d5a08a60SJake Burkholder 	pri = PRI_MAX;
659961a7b24SJohn Baldwin 	mtx_lock_spin(&sched_lock);
660961a7b24SJohn Baldwin 	mtx_lock_spin(&td_contested_lock);
661961a7b24SJohn Baldwin 	LIST_FOREACH(ts, &td->td_contested, ts_link) {
662961a7b24SJohn Baldwin 		cp = TAILQ_FIRST(&ts->ts_blocked)->td_priority;
66336412d79SJohn Baldwin 		if (cp < pri)
66436412d79SJohn Baldwin 			pri = cp;
66536412d79SJohn Baldwin 	}
666961a7b24SJohn Baldwin 	mtx_unlock_spin(&td_contested_lock);
6672c100766SJulian Elischer 	if (pri > td->td_base_pri)
6682c100766SJulian Elischer 		pri = td->td_base_pri;
6692c100766SJulian Elischer 	td->td_priority = pri;
6709ed346baSBosko Milekic 
671961a7b24SJohn Baldwin 	/*
672961a7b24SJohn Baldwin 	 * Wake up all the pending threads.  If a thread is not blocked
673961a7b24SJohn Baldwin 	 * on a lock, then it is currently executing on another CPU in
67467ba8678SJohn Baldwin 	 * turnstile_wait() or sitting on a run queue waiting to resume
67567ba8678SJohn Baldwin 	 * in turnstile_wait().  Set a flag to force it to try to acquire
676961a7b24SJohn Baldwin 	 * the lock again instead of blocking.
677961a7b24SJohn Baldwin 	 */
678961a7b24SJohn Baldwin 	while (!TAILQ_EMPTY(&pending_threads)) {
679961a7b24SJohn Baldwin 		td = TAILQ_FIRST(&pending_threads);
680961a7b24SJohn Baldwin 		TAILQ_REMOVE(&pending_threads, td, td_lockq);
681961a7b24SJohn Baldwin 		MPASS(td->td_proc->p_magic == P_MAGIC);
682961a7b24SJohn Baldwin 		if (TD_ON_LOCK(td)) {
683961a7b24SJohn Baldwin 			td->td_blocked = NULL;
684961a7b24SJohn Baldwin 			td->td_lockname = NULL;
685961a7b24SJohn Baldwin 			TD_CLR_LOCK(td);
686961a7b24SJohn Baldwin 			MPASS(TD_CAN_RUN(td));
687961a7b24SJohn Baldwin 			setrunqueue(td);
688961a7b24SJohn Baldwin 		} else {
689961a7b24SJohn Baldwin 			td->td_flags |= TDF_TSNOBLOCK;
69067ba8678SJohn Baldwin 			MPASS(TD_IS_RUNNING(td) || TD_ON_RUNQ(td));
691961a7b24SJohn Baldwin 		}
692961a7b24SJohn Baldwin 	}
693e0817317SJulian Elischer 	mtx_unlock_spin(&sched_lock);
6949ed346baSBosko Milekic }
6959ed346baSBosko Milekic 
6969ed346baSBosko Milekic /*
697961a7b24SJohn Baldwin  * Return the first thread in a turnstile.
6989ed346baSBosko Milekic  */
699961a7b24SJohn Baldwin struct thread *
700961a7b24SJohn Baldwin turnstile_head(struct turnstile *ts)
7010cde2e34SJason Evans {
702961a7b24SJohn Baldwin #ifdef INVARIANTS
703961a7b24SJohn Baldwin 	struct turnstile_chain *tc;
7045cb0fbe4SJohn Baldwin 
705961a7b24SJohn Baldwin 	MPASS(ts != NULL);
706961a7b24SJohn Baldwin 	tc = TC_LOOKUP(ts->ts_lockobj);
707961a7b24SJohn Baldwin 	mtx_assert(&tc->tc_lock, MA_OWNED);
7080cde2e34SJason Evans #endif
709961a7b24SJohn Baldwin 	return (TAILQ_FIRST(&ts->ts_blocked));
710961a7b24SJohn Baldwin }
7110cde2e34SJason Evans 
7129ed346baSBosko Milekic /*
713961a7b24SJohn Baldwin  * Returns true if a turnstile is empty.
7149ed346baSBosko Milekic  */
715961a7b24SJohn Baldwin int
716961a7b24SJohn Baldwin turnstile_empty(struct turnstile *ts)
71736412d79SJohn Baldwin {
718961a7b24SJohn Baldwin #ifdef INVARIANTS
719961a7b24SJohn Baldwin 	struct turnstile_chain *tc;
72036412d79SJohn Baldwin 
721961a7b24SJohn Baldwin 	MPASS(ts != NULL);
722961a7b24SJohn Baldwin 	tc = TC_LOOKUP(ts->ts_lockobj);
723961a7b24SJohn Baldwin 	mtx_assert(&tc->tc_lock, MA_OWNED);
72436412d79SJohn Baldwin #endif
725961a7b24SJohn Baldwin 	return (TAILQ_EMPTY(&ts->ts_blocked));
726c53c013bSJohn Baldwin }
727