10384fff8SJason Evans /*- 20384fff8SJason Evans * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved. 30384fff8SJason Evans * 40384fff8SJason Evans * Redistribution and use in source and binary forms, with or without 50384fff8SJason Evans * modification, are permitted provided that the following conditions 60384fff8SJason Evans * are met: 70384fff8SJason Evans * 1. Redistributions of source code must retain the above copyright 80384fff8SJason Evans * notice, this list of conditions and the following disclaimer. 90384fff8SJason Evans * 2. Redistributions in binary form must reproduce the above copyright 100384fff8SJason Evans * notice, this list of conditions and the following disclaimer in the 110384fff8SJason Evans * documentation and/or other materials provided with the distribution. 120384fff8SJason Evans * 3. Berkeley Software Design Inc's name may not be used to endorse or 130384fff8SJason Evans * promote products derived from this software without specific prior 140384fff8SJason Evans * written permission. 150384fff8SJason Evans * 160384fff8SJason Evans * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND 170384fff8SJason Evans * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 180384fff8SJason Evans * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 190384fff8SJason Evans * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE 200384fff8SJason Evans * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 210384fff8SJason Evans * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 220384fff8SJason Evans * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 230384fff8SJason Evans * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 240384fff8SJason Evans * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 250384fff8SJason Evans * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 260384fff8SJason Evans * SUCH DAMAGE. 270384fff8SJason Evans * 280384fff8SJason Evans * from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $ 2936412d79SJohn Baldwin * and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $ 300384fff8SJason Evans */ 310384fff8SJason Evans 320384fff8SJason Evans /* 33961a7b24SJohn Baldwin * Implementation of turnstiles used to hold queue of threads blocked on 34961a7b24SJohn Baldwin * non-sleepable locks. Sleepable locks use condition variables to 35961a7b24SJohn Baldwin * implement their queues. Turnstiles differ from a sleep queue in that 36961a7b24SJohn Baldwin * turnstile queue's are assigned to a lock held by an owning thread. Thus, 37961a7b24SJohn Baldwin * when one thread is enqueued onto a turnstile, it can lend its priority 38961a7b24SJohn Baldwin * to the owning thread. 39961a7b24SJohn Baldwin * 40961a7b24SJohn Baldwin * We wish to avoid bloating locks with an embedded turnstile and we do not 41961a7b24SJohn Baldwin * want to use back-pointers in the locks for the same reason. Thus, we 42961a7b24SJohn Baldwin * use a similar approach to that of Solaris 7 as described in Solaris 43961a7b24SJohn Baldwin * Internals by Jim Mauro and Richard McDougall. Turnstiles are looked up 44961a7b24SJohn Baldwin * in a hash table based on the address of the lock. Each entry in the 45961a7b24SJohn Baldwin * hash table is a linked-lists of turnstiles and is called a turnstile 46961a7b24SJohn Baldwin * chain. Each chain contains a spin mutex that protects all of the 47961a7b24SJohn Baldwin * turnstiles in the chain. 48961a7b24SJohn Baldwin * 49961a7b24SJohn Baldwin * Each time a thread is created, a turnstile is malloc'd and attached to 50961a7b24SJohn Baldwin * that thread. When a thread blocks on a lock, if it is the first thread 51961a7b24SJohn Baldwin * to block, it lends its turnstile to the lock. If the lock already has 52961a7b24SJohn Baldwin * a turnstile, then it gives its turnstile to the lock's turnstile's free 53861a7db5SJohn Baldwin * list. When a thread is woken up, it takes a turnstile from the free list 54961a7b24SJohn Baldwin * if there are any other waiters. If it is the only thread blocked on the 55961a7b24SJohn Baldwin * lock, then it reclaims the turnstile associated with the lock and removes 56961a7b24SJohn Baldwin * it from the hash table. 570384fff8SJason Evans */ 580384fff8SJason Evans 59677b542eSDavid E. O'Brien #include <sys/cdefs.h> 60677b542eSDavid E. O'Brien __FBSDID("$FreeBSD$"); 61677b542eSDavid E. O'Brien 620384fff8SJason Evans #include <sys/param.h> 636c35e809SDag-Erling Smørgrav #include <sys/systm.h> 6436412d79SJohn Baldwin #include <sys/kernel.h> 656c35e809SDag-Erling Smørgrav #include <sys/ktr.h> 6619284646SJohn Baldwin #include <sys/lock.h> 67fb919e4dSMark Murray #include <sys/malloc.h> 6819284646SJohn Baldwin #include <sys/mutex.h> 690384fff8SJason Evans #include <sys/proc.h> 70961a7b24SJohn Baldwin #include <sys/queue.h> 71c4f7a187SJohn Baldwin #include <sys/resourcevar.h> 72961a7b24SJohn Baldwin #include <sys/turnstile.h> 73b43179fbSJeff Roberson #include <sys/sched.h> 7436412d79SJohn Baldwin 750cde2e34SJason Evans /* 76961a7b24SJohn Baldwin * Constants for the hash table of turnstile chains. TC_SHIFT is a magic 77961a7b24SJohn Baldwin * number chosen because the sleep queue's use the same value for the 78961a7b24SJohn Baldwin * shift. Basically, we ignore the lower 8 bits of the address. 79961a7b24SJohn Baldwin * TC_TABLESIZE must be a power of two for TC_MASK to work properly. 800cde2e34SJason Evans */ 81961a7b24SJohn Baldwin #define TC_TABLESIZE 128 /* Must be power of 2. */ 82961a7b24SJohn Baldwin #define TC_MASK (TC_TABLESIZE - 1) 83961a7b24SJohn Baldwin #define TC_SHIFT 8 84961a7b24SJohn Baldwin #define TC_HASH(lock) (((uintptr_t)(lock) >> TC_SHIFT) & TC_MASK) 85961a7b24SJohn Baldwin #define TC_LOOKUP(lock) &turnstile_chains[TC_HASH(lock)] 869ed346baSBosko Milekic 870cde2e34SJason Evans /* 88961a7b24SJohn Baldwin * There are three different lists of turnstiles as follows. The list 89961a7b24SJohn Baldwin * connected by ts_link entries is a per-thread list of all the turnstiles 90961a7b24SJohn Baldwin * attached to locks that we own. This is used to fixup our priority when 91961a7b24SJohn Baldwin * a lock is released. The other two lists use the ts_hash entries. The 925b7de7e1SJohn Baldwin * first of these two is the turnstile chain list that a turnstile is on 935b7de7e1SJohn Baldwin * when it is attached to a lock. The second list to use ts_hash is the 945b7de7e1SJohn Baldwin * free list hung off of a turnstile that is attached to a lock. 95961a7b24SJohn Baldwin * 96961a7b24SJohn Baldwin * Each turnstile contains two lists of threads. The ts_blocked list is 97961a7b24SJohn Baldwin * a linked list of threads blocked on the turnstile's lock. The 98595bc82aSJohn Baldwin * ts_pending list is a linked list of threads previously awakened by 99961a7b24SJohn Baldwin * turnstile_signal() or turnstile_wait() that are waiting to be put on 100961a7b24SJohn Baldwin * the run queue. 101961a7b24SJohn Baldwin * 102961a7b24SJohn Baldwin * Locking key: 103961a7b24SJohn Baldwin * c - turnstile chain lock 104961a7b24SJohn Baldwin * q - td_contested lock 1050cde2e34SJason Evans */ 106961a7b24SJohn Baldwin struct turnstile { 107961a7b24SJohn Baldwin TAILQ_HEAD(, thread) ts_blocked; /* (c + q) Blocked threads. */ 108961a7b24SJohn Baldwin TAILQ_HEAD(, thread) ts_pending; /* (c) Pending threads. */ 109961a7b24SJohn Baldwin LIST_ENTRY(turnstile) ts_hash; /* (c) Chain and free list. */ 110961a7b24SJohn Baldwin LIST_ENTRY(turnstile) ts_link; /* (q) Contested locks. */ 111961a7b24SJohn Baldwin LIST_HEAD(, turnstile) ts_free; /* (c) Free turnstiles. */ 112961a7b24SJohn Baldwin struct lock_object *ts_lockobj; /* (c) Lock we reference. */ 11379a13d01SJohn Baldwin struct thread *ts_owner; /* (c + q) Who owns the lock. */ 1148484de75SJohn Baldwin }; 1158484de75SJohn Baldwin 116961a7b24SJohn Baldwin struct turnstile_chain { 117961a7b24SJohn Baldwin LIST_HEAD(, turnstile) tc_turnstiles; /* List of turnstiles. */ 118961a7b24SJohn Baldwin struct mtx tc_lock; /* Spin lock for this chain. */ 119961a7b24SJohn Baldwin }; 120961a7b24SJohn Baldwin 121961a7b24SJohn Baldwin static struct mtx td_contested_lock; 122961a7b24SJohn Baldwin static struct turnstile_chain turnstile_chains[TC_TABLESIZE]; 123961a7b24SJohn Baldwin 124961a7b24SJohn Baldwin MALLOC_DEFINE(M_TURNSTILE, "turnstiles", "turnstiles"); 125c53c013bSJohn Baldwin 126c53c013bSJohn Baldwin /* 1279ed346baSBosko Milekic * Prototypes for non-exported routines. 1289ed346baSBosko Milekic */ 129961a7b24SJohn Baldwin static void init_turnstile0(void *dummy); 130b40ce416SJulian Elischer static void propagate_priority(struct thread *); 131961a7b24SJohn Baldwin static void turnstile_setowner(struct turnstile *ts, struct thread *owner); 13236412d79SJohn Baldwin 133961a7b24SJohn Baldwin /* 134961a7b24SJohn Baldwin * Walks the chain of turnstiles and their owners to propagate the priority 135961a7b24SJohn Baldwin * of the thread being blocked to all the threads holding locks that have to 136961a7b24SJohn Baldwin * release their locks before this thread can run again. 137961a7b24SJohn Baldwin */ 13836412d79SJohn Baldwin static void 139b40ce416SJulian Elischer propagate_priority(struct thread *td) 14036412d79SJohn Baldwin { 141961a7b24SJohn Baldwin struct turnstile_chain *tc; 142961a7b24SJohn Baldwin struct turnstile *ts; 143961a7b24SJohn Baldwin struct thread *td1; 144961a7b24SJohn Baldwin int pri; 14536412d79SJohn Baldwin 1461bd0eefbSJohn Baldwin mtx_assert(&sched_lock, MA_OWNED); 147961a7b24SJohn Baldwin pri = td->td_priority; 148961a7b24SJohn Baldwin ts = td->td_blocked; 14936412d79SJohn Baldwin for (;;) { 150961a7b24SJohn Baldwin td = ts->ts_owner; 15136412d79SJohn Baldwin 152b40ce416SJulian Elischer if (td == NULL) { 15336412d79SJohn Baldwin /* 15436412d79SJohn Baldwin * This really isn't quite right. Really 155b40ce416SJulian Elischer * ought to bump priority of thread that 156961a7b24SJohn Baldwin * next acquires the lock. 15736412d79SJohn Baldwin */ 15836412d79SJohn Baldwin return; 15936412d79SJohn Baldwin } 1609ed346baSBosko Milekic 161e602ba25SJulian Elischer MPASS(td->td_proc != NULL); 162b40ce416SJulian Elischer MPASS(td->td_proc->p_magic == P_MAGIC); 1631bd0eefbSJohn Baldwin 164961a7b24SJohn Baldwin /* 165961a7b24SJohn Baldwin * XXX: The owner of a turnstile can be stale if it is the 166961a7b24SJohn Baldwin * first thread to grab a slock of a sx lock. In that case 167961a7b24SJohn Baldwin * it is possible for us to be at SSLEEP or some other 168961a7b24SJohn Baldwin * weird state. We should probably just return if the state 169961a7b24SJohn Baldwin * isn't SRUN or SLOCK. 170961a7b24SJohn Baldwin */ 171961a7b24SJohn Baldwin KASSERT(!TD_IS_SLEEPING(td), 172961a7b24SJohn Baldwin ("sleeping thread (pid %d) owns a non-sleepable lock", 173961a7b24SJohn Baldwin td->td_proc->p_pid)); 174961a7b24SJohn Baldwin 175961a7b24SJohn Baldwin /* 176961a7b24SJohn Baldwin * If this thread already has higher priority than the 177961a7b24SJohn Baldwin * thread that is being blocked, we are finished. 178961a7b24SJohn Baldwin */ 179961a7b24SJohn Baldwin if (td->td_priority <= pri) 180961a7b24SJohn Baldwin return; 1811bd0eefbSJohn Baldwin 18236412d79SJohn Baldwin /* 18336412d79SJohn Baldwin * If lock holder is actually running, just bump priority. 18436412d79SJohn Baldwin */ 18571fad9fdSJulian Elischer if (TD_IS_RUNNING(td)) { 186e602ba25SJulian Elischer td->td_priority = pri; 18736412d79SJohn Baldwin return; 18836412d79SJohn Baldwin } 189d5a08a60SJake Burkholder 1901b43703bSJohn Baldwin #ifndef SMP 1911b43703bSJohn Baldwin /* 192b40ce416SJulian Elischer * For UP, we check to see if td is curthread (this shouldn't 1931b43703bSJohn Baldwin * ever happen however as it would mean we are in a deadlock.) 1941b43703bSJohn Baldwin */ 195b40ce416SJulian Elischer KASSERT(td != curthread, ("Deadlock detected")); 1961b43703bSJohn Baldwin #endif 1971b43703bSJohn Baldwin 19836412d79SJohn Baldwin /* 199b40ce416SJulian Elischer * If on run queue move to new run queue, and quit. 200b40ce416SJulian Elischer * XXXKSE this gets a lot more complicated under threads 201b40ce416SJulian Elischer * but try anyhow. 20236412d79SJohn Baldwin */ 20371fad9fdSJulian Elischer if (TD_ON_RUNQ(td)) { 204b40ce416SJulian Elischer MPASS(td->td_blocked == NULL); 205b43179fbSJeff Roberson sched_prio(td, pri); 20636412d79SJohn Baldwin return; 20736412d79SJohn Baldwin } 208961a7b24SJohn Baldwin 209e602ba25SJulian Elischer /* 210961a7b24SJohn Baldwin * Bump this thread's priority. 211e602ba25SJulian Elischer */ 212e602ba25SJulian Elischer td->td_priority = pri; 21336412d79SJohn Baldwin 21436412d79SJohn Baldwin /* 215961a7b24SJohn Baldwin * If we aren't blocked on a lock, we should be. 21636412d79SJohn Baldwin */ 217551cf4e1SJohn Baldwin KASSERT(TD_ON_LOCK(td), ( 218961a7b24SJohn Baldwin "process %d(%s):%d holds %s but isn't blocked on a lock\n", 219e602ba25SJulian Elischer td->td_proc->p_pid, td->td_proc->p_comm, td->td_state, 220961a7b24SJohn Baldwin ts->ts_lockobj->lo_name)); 22136412d79SJohn Baldwin 22236412d79SJohn Baldwin /* 223961a7b24SJohn Baldwin * Pick up the lock that td is blocked on. 22436412d79SJohn Baldwin */ 225961a7b24SJohn Baldwin ts = td->td_blocked; 226961a7b24SJohn Baldwin MPASS(ts != NULL); 227961a7b24SJohn Baldwin tc = TC_LOOKUP(ts->ts_lockobj); 228961a7b24SJohn Baldwin mtx_lock_spin(&tc->tc_lock); 22936412d79SJohn Baldwin 23036412d79SJohn Baldwin /* 2316b6bd95eSJohn Baldwin * This thread may not be blocked on this turnstile anymore 2326b6bd95eSJohn Baldwin * but instead might already be woken up on another CPU 2336b6bd95eSJohn Baldwin * that is waiting on sched_lock in turnstile_unpend() to 2346b6bd95eSJohn Baldwin * finish waking this thread up. We can detect this case 2356b6bd95eSJohn Baldwin * by checking to see if this thread has been given a 2366b6bd95eSJohn Baldwin * turnstile by either turnstile_signal() or 2376b6bd95eSJohn Baldwin * turnstile_wakeup(). In this case, treat the thread as 2386b6bd95eSJohn Baldwin * if it was already running. 23979a13d01SJohn Baldwin */ 2406b6bd95eSJohn Baldwin if (td->td_turnstile != NULL) { 24179a13d01SJohn Baldwin mtx_unlock_spin(&tc->tc_lock); 24279a13d01SJohn Baldwin return; 24379a13d01SJohn Baldwin } 24479a13d01SJohn Baldwin 24579a13d01SJohn Baldwin /* 246b40ce416SJulian Elischer * Check if the thread needs to be moved up on 247961a7b24SJohn Baldwin * the blocked chain. It doesn't need to be moved 248961a7b24SJohn Baldwin * if it is already at the head of the list or if 249961a7b24SJohn Baldwin * the item in front of it still has a higher priority. 25036412d79SJohn Baldwin */ 251961a7b24SJohn Baldwin if (td == TAILQ_FIRST(&ts->ts_blocked)) { 252961a7b24SJohn Baldwin mtx_unlock_spin(&tc->tc_lock); 2531bd0eefbSJohn Baldwin continue; 2541bd0eefbSJohn Baldwin } 2559ed346baSBosko Milekic 256551cf4e1SJohn Baldwin td1 = TAILQ_PREV(td, threadqueue, td_lockq); 2572c100766SJulian Elischer if (td1->td_priority <= pri) { 258961a7b24SJohn Baldwin mtx_unlock_spin(&tc->tc_lock); 25936412d79SJohn Baldwin continue; 26036412d79SJohn Baldwin } 26136412d79SJohn Baldwin 26236412d79SJohn Baldwin /* 263b40ce416SJulian Elischer * Remove thread from blocked chain and determine where 264b40ce416SJulian Elischer * it should be moved up to. Since we know that td1 has 265b40ce416SJulian Elischer * a lower priority than td, we know that at least one 266b40ce416SJulian Elischer * thread in the chain has a lower priority and that 267b40ce416SJulian Elischer * td1 will thus not be NULL after the loop. 26836412d79SJohn Baldwin */ 269961a7b24SJohn Baldwin mtx_lock_spin(&td_contested_lock); 270961a7b24SJohn Baldwin TAILQ_REMOVE(&ts->ts_blocked, td, td_lockq); 271961a7b24SJohn Baldwin TAILQ_FOREACH(td1, &ts->ts_blocked, td_lockq) { 272b40ce416SJulian Elischer MPASS(td1->td_proc->p_magic == P_MAGIC); 2732c100766SJulian Elischer if (td1->td_priority > pri) 27436412d79SJohn Baldwin break; 27536412d79SJohn Baldwin } 2769ed346baSBosko Milekic 277b40ce416SJulian Elischer MPASS(td1 != NULL); 278551cf4e1SJohn Baldwin TAILQ_INSERT_BEFORE(td1, td, td_lockq); 279961a7b24SJohn Baldwin mtx_unlock_spin(&td_contested_lock); 28036412d79SJohn Baldwin CTR4(KTR_LOCK, 281961a7b24SJohn Baldwin "propagate_priority: td %p moved before %p on [%p] %s", 282961a7b24SJohn Baldwin td, td1, ts->ts_lockobj, ts->ts_lockobj->lo_name); 283961a7b24SJohn Baldwin mtx_unlock_spin(&tc->tc_lock); 28436412d79SJohn Baldwin } 28536412d79SJohn Baldwin } 28636412d79SJohn Baldwin 2876c35e809SDag-Erling Smørgrav /* 288961a7b24SJohn Baldwin * Early initialization of turnstiles. This is not done via a SYSINIT() 289961a7b24SJohn Baldwin * since this needs to be initialized very early when mutexes are first 290961a7b24SJohn Baldwin * initialized. 2916283b7d0SJohn Baldwin */ 2926283b7d0SJohn Baldwin void 293961a7b24SJohn Baldwin init_turnstiles(void) 2946283b7d0SJohn Baldwin { 295961a7b24SJohn Baldwin int i; 2966283b7d0SJohn Baldwin 297961a7b24SJohn Baldwin for (i = 0; i < TC_TABLESIZE; i++) { 298961a7b24SJohn Baldwin LIST_INIT(&turnstile_chains[i].tc_turnstiles); 299961a7b24SJohn Baldwin mtx_init(&turnstile_chains[i].tc_lock, "turnstile chain", 300961a7b24SJohn Baldwin NULL, MTX_SPIN); 3016c35e809SDag-Erling Smørgrav } 302961a7b24SJohn Baldwin mtx_init(&td_contested_lock, "td_contested", NULL, MTX_SPIN); 303961a7b24SJohn Baldwin thread0.td_turnstile = NULL; 3046283b7d0SJohn Baldwin } 3056283b7d0SJohn Baldwin 306961a7b24SJohn Baldwin static void 307961a7b24SJohn Baldwin init_turnstile0(void *dummy) 3086283b7d0SJohn Baldwin { 3096283b7d0SJohn Baldwin 310961a7b24SJohn Baldwin thread0.td_turnstile = turnstile_alloc(); 311961a7b24SJohn Baldwin } 312961a7b24SJohn Baldwin SYSINIT(turnstile0, SI_SUB_LOCK, SI_ORDER_ANY, init_turnstile0, NULL); 3136c35e809SDag-Erling Smørgrav 314961a7b24SJohn Baldwin /* 315961a7b24SJohn Baldwin * Set the owner of the lock this turnstile is attached to. 316961a7b24SJohn Baldwin */ 317961a7b24SJohn Baldwin static void 318961a7b24SJohn Baldwin turnstile_setowner(struct turnstile *ts, struct thread *owner) 319961a7b24SJohn Baldwin { 320961a7b24SJohn Baldwin 321961a7b24SJohn Baldwin mtx_assert(&td_contested_lock, MA_OWNED); 322961a7b24SJohn Baldwin MPASS(owner->td_proc->p_magic == P_MAGIC); 323961a7b24SJohn Baldwin MPASS(ts->ts_owner == NULL); 324961a7b24SJohn Baldwin ts->ts_owner = owner; 325961a7b24SJohn Baldwin LIST_INSERT_HEAD(&owner->td_contested, ts, ts_link); 326961a7b24SJohn Baldwin } 327961a7b24SJohn Baldwin 328961a7b24SJohn Baldwin /* 329961a7b24SJohn Baldwin * Malloc a turnstile for a new thread, initialize it and return it. 330961a7b24SJohn Baldwin */ 331961a7b24SJohn Baldwin struct turnstile * 332961a7b24SJohn Baldwin turnstile_alloc(void) 333961a7b24SJohn Baldwin { 334961a7b24SJohn Baldwin struct turnstile *ts; 335961a7b24SJohn Baldwin 336961a7b24SJohn Baldwin ts = malloc(sizeof(struct turnstile), M_TURNSTILE, M_WAITOK | M_ZERO); 337961a7b24SJohn Baldwin TAILQ_INIT(&ts->ts_blocked); 338961a7b24SJohn Baldwin TAILQ_INIT(&ts->ts_pending); 339961a7b24SJohn Baldwin LIST_INIT(&ts->ts_free); 340961a7b24SJohn Baldwin return (ts); 341961a7b24SJohn Baldwin } 342961a7b24SJohn Baldwin 343961a7b24SJohn Baldwin /* 344961a7b24SJohn Baldwin * Free a turnstile when a thread is destroyed. 345961a7b24SJohn Baldwin */ 346961a7b24SJohn Baldwin void 347961a7b24SJohn Baldwin turnstile_free(struct turnstile *ts) 348961a7b24SJohn Baldwin { 349961a7b24SJohn Baldwin 350961a7b24SJohn Baldwin MPASS(ts != NULL); 351961a7b24SJohn Baldwin MPASS(TAILQ_EMPTY(&ts->ts_blocked)); 352961a7b24SJohn Baldwin MPASS(TAILQ_EMPTY(&ts->ts_pending)); 353961a7b24SJohn Baldwin free(ts, M_TURNSTILE); 354961a7b24SJohn Baldwin } 355961a7b24SJohn Baldwin 356961a7b24SJohn Baldwin /* 357961a7b24SJohn Baldwin * Look up the turnstile for a lock in the hash table locking the associated 358961a7b24SJohn Baldwin * turnstile chain along the way. Return with the turnstile chain locked. 359961a7b24SJohn Baldwin * If no turnstile is found in the hash table, NULL is returned. 360961a7b24SJohn Baldwin */ 361961a7b24SJohn Baldwin struct turnstile * 362961a7b24SJohn Baldwin turnstile_lookup(struct lock_object *lock) 363961a7b24SJohn Baldwin { 364961a7b24SJohn Baldwin struct turnstile_chain *tc; 365961a7b24SJohn Baldwin struct turnstile *ts; 366961a7b24SJohn Baldwin 367961a7b24SJohn Baldwin tc = TC_LOOKUP(lock); 368961a7b24SJohn Baldwin mtx_lock_spin(&tc->tc_lock); 369961a7b24SJohn Baldwin LIST_FOREACH(ts, &tc->tc_turnstiles, ts_hash) 370961a7b24SJohn Baldwin if (ts->ts_lockobj == lock) 371961a7b24SJohn Baldwin return (ts); 372961a7b24SJohn Baldwin return (NULL); 373961a7b24SJohn Baldwin } 374961a7b24SJohn Baldwin 375961a7b24SJohn Baldwin /* 376961a7b24SJohn Baldwin * Unlock the turnstile chain associated with a given lock. 377961a7b24SJohn Baldwin */ 378961a7b24SJohn Baldwin void 379961a7b24SJohn Baldwin turnstile_release(struct lock_object *lock) 380961a7b24SJohn Baldwin { 381961a7b24SJohn Baldwin struct turnstile_chain *tc; 382961a7b24SJohn Baldwin 383961a7b24SJohn Baldwin tc = TC_LOOKUP(lock); 384961a7b24SJohn Baldwin mtx_unlock_spin(&tc->tc_lock); 385961a7b24SJohn Baldwin } 386961a7b24SJohn Baldwin 387961a7b24SJohn Baldwin /* 388961a7b24SJohn Baldwin * Take ownership of a turnstile and adjust the priority of the new 389961a7b24SJohn Baldwin * owner appropriately. 390961a7b24SJohn Baldwin */ 391961a7b24SJohn Baldwin void 392961a7b24SJohn Baldwin turnstile_claim(struct turnstile *ts) 393961a7b24SJohn Baldwin { 394961a7b24SJohn Baldwin struct turnstile_chain *tc; 395961a7b24SJohn Baldwin struct thread *td, *owner; 396961a7b24SJohn Baldwin 397961a7b24SJohn Baldwin tc = TC_LOOKUP(ts->ts_lockobj); 398961a7b24SJohn Baldwin mtx_assert(&tc->tc_lock, MA_OWNED); 399961a7b24SJohn Baldwin 400961a7b24SJohn Baldwin owner = curthread; 401961a7b24SJohn Baldwin mtx_lock_spin(&td_contested_lock); 402961a7b24SJohn Baldwin turnstile_setowner(ts, owner); 403961a7b24SJohn Baldwin mtx_unlock_spin(&td_contested_lock); 404961a7b24SJohn Baldwin 405961a7b24SJohn Baldwin td = TAILQ_FIRST(&ts->ts_blocked); 406961a7b24SJohn Baldwin MPASS(td != NULL); 407961a7b24SJohn Baldwin MPASS(td->td_proc->p_magic == P_MAGIC); 408961a7b24SJohn Baldwin mtx_unlock_spin(&tc->tc_lock); 409961a7b24SJohn Baldwin 410961a7b24SJohn Baldwin /* 411961a7b24SJohn Baldwin * Update the priority of the new owner if needed. 412961a7b24SJohn Baldwin */ 413961a7b24SJohn Baldwin mtx_lock_spin(&sched_lock); 414961a7b24SJohn Baldwin if (td->td_priority < owner->td_priority) 415961a7b24SJohn Baldwin owner->td_priority = td->td_priority; 416961a7b24SJohn Baldwin mtx_unlock_spin(&sched_lock); 417961a7b24SJohn Baldwin } 418961a7b24SJohn Baldwin 419961a7b24SJohn Baldwin /* 420961a7b24SJohn Baldwin * Block the current thread on the turnstile ts. This function will context 421961a7b24SJohn Baldwin * switch and not return until this thread has been woken back up. This 422961a7b24SJohn Baldwin * function must be called with the appropriate turnstile chain locked and 423961a7b24SJohn Baldwin * will return with it unlocked. 424961a7b24SJohn Baldwin */ 425961a7b24SJohn Baldwin void 426961a7b24SJohn Baldwin turnstile_wait(struct turnstile *ts, struct lock_object *lock, 427961a7b24SJohn Baldwin struct thread *owner) 428961a7b24SJohn Baldwin { 429961a7b24SJohn Baldwin struct turnstile_chain *tc; 430961a7b24SJohn Baldwin struct thread *td, *td1; 431961a7b24SJohn Baldwin 432961a7b24SJohn Baldwin td = curthread; 433961a7b24SJohn Baldwin tc = TC_LOOKUP(lock); 434961a7b24SJohn Baldwin mtx_assert(&tc->tc_lock, MA_OWNED); 435961a7b24SJohn Baldwin MPASS(td->td_turnstile != NULL); 436961a7b24SJohn Baldwin MPASS(owner != NULL); 437961a7b24SJohn Baldwin MPASS(owner->td_proc->p_magic == P_MAGIC); 438961a7b24SJohn Baldwin 439961a7b24SJohn Baldwin /* If the passed in turnstile is NULL, use this thread's turnstile. */ 440961a7b24SJohn Baldwin if (ts == NULL) { 441961a7b24SJohn Baldwin ts = td->td_turnstile; 442961a7b24SJohn Baldwin LIST_INSERT_HEAD(&tc->tc_turnstiles, ts, ts_hash); 443961a7b24SJohn Baldwin KASSERT(TAILQ_EMPTY(&ts->ts_pending), 444961a7b24SJohn Baldwin ("thread's turnstile has pending threads")); 445961a7b24SJohn Baldwin KASSERT(TAILQ_EMPTY(&ts->ts_blocked), 446961a7b24SJohn Baldwin ("thread's turnstile has a non-empty queue")); 447961a7b24SJohn Baldwin KASSERT(LIST_EMPTY(&ts->ts_free), 448961a7b24SJohn Baldwin ("thread's turnstile has a non-empty free list")); 449961a7b24SJohn Baldwin KASSERT(ts->ts_lockobj == NULL, ("stale ts_lockobj pointer")); 450961a7b24SJohn Baldwin ts->ts_lockobj = lock; 451961a7b24SJohn Baldwin mtx_lock_spin(&td_contested_lock); 452961a7b24SJohn Baldwin TAILQ_INSERT_TAIL(&ts->ts_blocked, td, td_lockq); 453961a7b24SJohn Baldwin turnstile_setowner(ts, owner); 454961a7b24SJohn Baldwin mtx_unlock_spin(&td_contested_lock); 455961a7b24SJohn Baldwin } else { 456961a7b24SJohn Baldwin TAILQ_FOREACH(td1, &ts->ts_blocked, td_lockq) 457961a7b24SJohn Baldwin if (td1->td_priority > td->td_priority) 4586c35e809SDag-Erling Smørgrav break; 459961a7b24SJohn Baldwin mtx_lock_spin(&td_contested_lock); 460961a7b24SJohn Baldwin if (td1 != NULL) 461961a7b24SJohn Baldwin TAILQ_INSERT_BEFORE(td1, td, td_lockq); 462961a7b24SJohn Baldwin else 463961a7b24SJohn Baldwin TAILQ_INSERT_TAIL(&ts->ts_blocked, td, td_lockq); 464961a7b24SJohn Baldwin mtx_unlock_spin(&td_contested_lock); 465961a7b24SJohn Baldwin MPASS(td->td_turnstile != NULL); 466961a7b24SJohn Baldwin LIST_INSERT_HEAD(&ts->ts_free, td->td_turnstile, ts_hash); 467961a7b24SJohn Baldwin MPASS(owner == ts->ts_owner); 4686c35e809SDag-Erling Smørgrav } 469961a7b24SJohn Baldwin td->td_turnstile = NULL; 470961a7b24SJohn Baldwin mtx_unlock_spin(&tc->tc_lock); 47136412d79SJohn Baldwin 4729ed346baSBosko Milekic mtx_lock_spin(&sched_lock); 47336412d79SJohn Baldwin /* 474961a7b24SJohn Baldwin * Handle race condition where a thread on another CPU that owns 475961a7b24SJohn Baldwin * lock 'lock' could have woken us in between us dropping the 476961a7b24SJohn Baldwin * turnstile chain lock and acquiring the sched_lock. 47736412d79SJohn Baldwin */ 478961a7b24SJohn Baldwin if (td->td_flags & TDF_TSNOBLOCK) { 479961a7b24SJohn Baldwin td->td_flags &= ~TDF_TSNOBLOCK; 4809ed346baSBosko Milekic mtx_unlock_spin(&sched_lock); 48136412d79SJohn Baldwin return; 48236412d79SJohn Baldwin } 4839ed346baSBosko Milekic 48436412d79SJohn Baldwin #ifdef notyet 48536412d79SJohn Baldwin /* 4869ed346baSBosko Milekic * If we're borrowing an interrupted thread's VM context, we 4879ed346baSBosko Milekic * must clean up before going to sleep. 48836412d79SJohn Baldwin */ 489b40ce416SJulian Elischer if (td->td_ithd != NULL) { 490b40ce416SJulian Elischer struct ithd *it = td->td_ithd; 49136412d79SJohn Baldwin 49236412d79SJohn Baldwin if (it->it_interrupted) { 493961a7b24SJohn Baldwin if (LOCK_LOG_TEST(lock, 0)) 494961a7b24SJohn Baldwin CTR3(KTR_LOCK, "%s: %p interrupted %p", 495961a7b24SJohn Baldwin __func__, it, it->it_interrupted); 49636412d79SJohn Baldwin intr_thd_fixup(it); 49736412d79SJohn Baldwin } 49836412d79SJohn Baldwin } 49936412d79SJohn Baldwin #endif 50036412d79SJohn Baldwin 501961a7b24SJohn Baldwin /* Save who we are blocked on and switch. */ 502961a7b24SJohn Baldwin td->td_blocked = ts; 503961a7b24SJohn Baldwin td->td_lockname = lock->lo_name; 504551cf4e1SJohn Baldwin TD_SET_LOCK(td); 505b40ce416SJulian Elischer propagate_priority(td); 5069ed346baSBosko Milekic 507961a7b24SJohn Baldwin if (LOCK_LOG_TEST(lock, 0)) 508961a7b24SJohn Baldwin CTR4(KTR_LOCK, "%s: td %p blocked on [%p] %s", __func__, td, 509961a7b24SJohn Baldwin lock, lock->lo_name); 5109ed346baSBosko Milekic 51129bcc451SJeff Roberson mi_switch(SW_VOL); 5129ed346baSBosko Milekic 513961a7b24SJohn Baldwin if (LOCK_LOG_TEST(lock, 0)) 514961a7b24SJohn Baldwin CTR4(KTR_LOCK, "%s: td %p free from blocked on [%p] %s", 515961a7b24SJohn Baldwin __func__, td, lock, lock->lo_name); 5169ed346baSBosko Milekic 5179ed346baSBosko Milekic mtx_unlock_spin(&sched_lock); 51836412d79SJohn Baldwin } 5199ed346baSBosko Milekic 520961a7b24SJohn Baldwin /* 521961a7b24SJohn Baldwin * Pick the highest priority thread on this turnstile and put it on the 522961a7b24SJohn Baldwin * pending list. This must be called with the turnstile chain locked. 523961a7b24SJohn Baldwin */ 524961a7b24SJohn Baldwin int 525961a7b24SJohn Baldwin turnstile_signal(struct turnstile *ts) 526961a7b24SJohn Baldwin { 527961a7b24SJohn Baldwin struct turnstile_chain *tc; 528961a7b24SJohn Baldwin struct thread *td; 529961a7b24SJohn Baldwin int empty; 530961a7b24SJohn Baldwin 531961a7b24SJohn Baldwin MPASS(ts != NULL); 532961a7b24SJohn Baldwin MPASS(curthread->td_proc->p_magic == P_MAGIC); 533961a7b24SJohn Baldwin MPASS(ts->ts_owner == curthread); 534961a7b24SJohn Baldwin tc = TC_LOOKUP(ts->ts_lockobj); 535961a7b24SJohn Baldwin mtx_assert(&tc->tc_lock, MA_OWNED); 5369ed346baSBosko Milekic 5379ed346baSBosko Milekic /* 538961a7b24SJohn Baldwin * Pick the highest priority thread blocked on this lock and 539961a7b24SJohn Baldwin * move it to the pending list. 5409ed346baSBosko Milekic */ 541961a7b24SJohn Baldwin td = TAILQ_FIRST(&ts->ts_blocked); 542b40ce416SJulian Elischer MPASS(td->td_proc->p_magic == P_MAGIC); 543961a7b24SJohn Baldwin mtx_lock_spin(&td_contested_lock); 544961a7b24SJohn Baldwin TAILQ_REMOVE(&ts->ts_blocked, td, td_lockq); 545961a7b24SJohn Baldwin mtx_unlock_spin(&td_contested_lock); 546961a7b24SJohn Baldwin TAILQ_INSERT_TAIL(&ts->ts_pending, td, td_lockq); 5479ed346baSBosko Milekic 548961a7b24SJohn Baldwin /* 549961a7b24SJohn Baldwin * If the turnstile is now empty, remove it from its chain and 550961a7b24SJohn Baldwin * give it to the about-to-be-woken thread. Otherwise take a 551961a7b24SJohn Baldwin * turnstile from the free list and give it to the thread. 552961a7b24SJohn Baldwin */ 553961a7b24SJohn Baldwin empty = TAILQ_EMPTY(&ts->ts_blocked); 554961a7b24SJohn Baldwin if (empty) 555961a7b24SJohn Baldwin MPASS(LIST_EMPTY(&ts->ts_free)); 556961a7b24SJohn Baldwin else 557961a7b24SJohn Baldwin ts = LIST_FIRST(&ts->ts_free); 558da1d503bSJohn Baldwin MPASS(ts != NULL); 559961a7b24SJohn Baldwin LIST_REMOVE(ts, ts_hash); 560961a7b24SJohn Baldwin td->td_turnstile = ts; 5619ed346baSBosko Milekic 562961a7b24SJohn Baldwin return (empty); 563961a7b24SJohn Baldwin } 564961a7b24SJohn Baldwin 565961a7b24SJohn Baldwin /* 566961a7b24SJohn Baldwin * Put all blocked threads on the pending list. This must be called with 567961a7b24SJohn Baldwin * the turnstile chain locked. 568961a7b24SJohn Baldwin */ 569961a7b24SJohn Baldwin void 570961a7b24SJohn Baldwin turnstile_wakeup(struct turnstile *ts) 571961a7b24SJohn Baldwin { 572961a7b24SJohn Baldwin struct turnstile_chain *tc; 573961a7b24SJohn Baldwin struct turnstile *ts1; 574961a7b24SJohn Baldwin struct thread *td; 575961a7b24SJohn Baldwin 576961a7b24SJohn Baldwin MPASS(ts != NULL); 577961a7b24SJohn Baldwin MPASS(curthread->td_proc->p_magic == P_MAGIC); 578961a7b24SJohn Baldwin MPASS(ts->ts_owner == curthread); 579961a7b24SJohn Baldwin tc = TC_LOOKUP(ts->ts_lockobj); 580961a7b24SJohn Baldwin mtx_assert(&tc->tc_lock, MA_OWNED); 581961a7b24SJohn Baldwin 582961a7b24SJohn Baldwin /* 583961a7b24SJohn Baldwin * Transfer the blocked list to the pending list. 584961a7b24SJohn Baldwin */ 585961a7b24SJohn Baldwin mtx_lock_spin(&td_contested_lock); 586961a7b24SJohn Baldwin TAILQ_CONCAT(&ts->ts_pending, &ts->ts_blocked, td_lockq); 587961a7b24SJohn Baldwin mtx_unlock_spin(&td_contested_lock); 588961a7b24SJohn Baldwin 589961a7b24SJohn Baldwin /* 590961a7b24SJohn Baldwin * Give a turnstile to each thread. The last thread gets 591961a7b24SJohn Baldwin * this turnstile. 592961a7b24SJohn Baldwin */ 593961a7b24SJohn Baldwin TAILQ_FOREACH(td, &ts->ts_pending, td_lockq) { 594961a7b24SJohn Baldwin if (LIST_EMPTY(&ts->ts_free)) { 595961a7b24SJohn Baldwin MPASS(TAILQ_NEXT(td, td_lockq) == NULL); 596961a7b24SJohn Baldwin ts1 = ts; 59736412d79SJohn Baldwin } else 598961a7b24SJohn Baldwin ts1 = LIST_FIRST(&ts->ts_free); 599da1d503bSJohn Baldwin MPASS(ts1 != NULL); 600961a7b24SJohn Baldwin LIST_REMOVE(ts1, ts_hash); 601961a7b24SJohn Baldwin td->td_turnstile = ts1; 602961a7b24SJohn Baldwin } 603961a7b24SJohn Baldwin } 6049ed346baSBosko Milekic 605961a7b24SJohn Baldwin /* 606961a7b24SJohn Baldwin * Wakeup all threads on the pending list and adjust the priority of the 607961a7b24SJohn Baldwin * current thread appropriately. This must be called with the turnstile 608961a7b24SJohn Baldwin * chain locked. 609961a7b24SJohn Baldwin */ 610961a7b24SJohn Baldwin void 611961a7b24SJohn Baldwin turnstile_unpend(struct turnstile *ts) 612961a7b24SJohn Baldwin { 613961a7b24SJohn Baldwin TAILQ_HEAD( ,thread) pending_threads; 614961a7b24SJohn Baldwin struct turnstile_chain *tc; 615961a7b24SJohn Baldwin struct thread *td; 616961a7b24SJohn Baldwin int cp, pri; 617961a7b24SJohn Baldwin 618961a7b24SJohn Baldwin MPASS(ts != NULL); 619961a7b24SJohn Baldwin MPASS(ts->ts_owner == curthread); 620961a7b24SJohn Baldwin tc = TC_LOOKUP(ts->ts_lockobj); 621961a7b24SJohn Baldwin mtx_assert(&tc->tc_lock, MA_OWNED); 622961a7b24SJohn Baldwin MPASS(!TAILQ_EMPTY(&ts->ts_pending)); 623961a7b24SJohn Baldwin 624961a7b24SJohn Baldwin /* 625961a7b24SJohn Baldwin * Move the list of pending threads out of the turnstile and 626961a7b24SJohn Baldwin * into a local variable. 627961a7b24SJohn Baldwin */ 628961a7b24SJohn Baldwin TAILQ_INIT(&pending_threads); 629961a7b24SJohn Baldwin TAILQ_CONCAT(&pending_threads, &ts->ts_pending, td_lockq); 630961a7b24SJohn Baldwin #ifdef INVARIANTS 631961a7b24SJohn Baldwin if (TAILQ_EMPTY(&ts->ts_blocked)) 632961a7b24SJohn Baldwin ts->ts_lockobj = NULL; 633961a7b24SJohn Baldwin #endif 634961a7b24SJohn Baldwin 635961a7b24SJohn Baldwin /* 636961a7b24SJohn Baldwin * Remove the turnstile from this thread's list of contested locks 637961a7b24SJohn Baldwin * since this thread doesn't own it anymore. New threads will 638961a7b24SJohn Baldwin * not be blocking on the turnstile until it is claimed by a new 639961a7b24SJohn Baldwin * owner. 640961a7b24SJohn Baldwin */ 641961a7b24SJohn Baldwin mtx_lock_spin(&td_contested_lock); 642961a7b24SJohn Baldwin ts->ts_owner = NULL; 643961a7b24SJohn Baldwin LIST_REMOVE(ts, ts_link); 644961a7b24SJohn Baldwin mtx_unlock_spin(&td_contested_lock); 645961a7b24SJohn Baldwin mtx_unlock_spin(&tc->tc_lock); 646961a7b24SJohn Baldwin 647961a7b24SJohn Baldwin /* 648961a7b24SJohn Baldwin * Adjust the priority of curthread based on other contested 649961a7b24SJohn Baldwin * locks it owns. Don't lower the priority below the base 650961a7b24SJohn Baldwin * priority however. 651961a7b24SJohn Baldwin */ 652961a7b24SJohn Baldwin td = curthread; 653d5a08a60SJake Burkholder pri = PRI_MAX; 654961a7b24SJohn Baldwin mtx_lock_spin(&sched_lock); 655961a7b24SJohn Baldwin mtx_lock_spin(&td_contested_lock); 656961a7b24SJohn Baldwin LIST_FOREACH(ts, &td->td_contested, ts_link) { 657961a7b24SJohn Baldwin cp = TAILQ_FIRST(&ts->ts_blocked)->td_priority; 65836412d79SJohn Baldwin if (cp < pri) 65936412d79SJohn Baldwin pri = cp; 66036412d79SJohn Baldwin } 661961a7b24SJohn Baldwin mtx_unlock_spin(&td_contested_lock); 6622c100766SJulian Elischer if (pri > td->td_base_pri) 6632c100766SJulian Elischer pri = td->td_base_pri; 6642c100766SJulian Elischer td->td_priority = pri; 6659ed346baSBosko Milekic 666961a7b24SJohn Baldwin /* 667961a7b24SJohn Baldwin * Wake up all the pending threads. If a thread is not blocked 668961a7b24SJohn Baldwin * on a lock, then it is currently executing on another CPU in 66967ba8678SJohn Baldwin * turnstile_wait() or sitting on a run queue waiting to resume 67067ba8678SJohn Baldwin * in turnstile_wait(). Set a flag to force it to try to acquire 671961a7b24SJohn Baldwin * the lock again instead of blocking. 672961a7b24SJohn Baldwin */ 673961a7b24SJohn Baldwin while (!TAILQ_EMPTY(&pending_threads)) { 674961a7b24SJohn Baldwin td = TAILQ_FIRST(&pending_threads); 675961a7b24SJohn Baldwin TAILQ_REMOVE(&pending_threads, td, td_lockq); 676961a7b24SJohn Baldwin MPASS(td->td_proc->p_magic == P_MAGIC); 677961a7b24SJohn Baldwin if (TD_ON_LOCK(td)) { 678961a7b24SJohn Baldwin td->td_blocked = NULL; 679961a7b24SJohn Baldwin td->td_lockname = NULL; 680961a7b24SJohn Baldwin TD_CLR_LOCK(td); 681961a7b24SJohn Baldwin MPASS(TD_CAN_RUN(td)); 682961a7b24SJohn Baldwin setrunqueue(td); 683961a7b24SJohn Baldwin } else { 684961a7b24SJohn Baldwin td->td_flags |= TDF_TSNOBLOCK; 68567ba8678SJohn Baldwin MPASS(TD_IS_RUNNING(td) || TD_ON_RUNQ(td)); 686961a7b24SJohn Baldwin } 687961a7b24SJohn Baldwin } 688e0817317SJulian Elischer mtx_unlock_spin(&sched_lock); 6899ed346baSBosko Milekic } 6909ed346baSBosko Milekic 6919ed346baSBosko Milekic /* 692961a7b24SJohn Baldwin * Return the first thread in a turnstile. 6939ed346baSBosko Milekic */ 694961a7b24SJohn Baldwin struct thread * 695961a7b24SJohn Baldwin turnstile_head(struct turnstile *ts) 6960cde2e34SJason Evans { 697961a7b24SJohn Baldwin #ifdef INVARIANTS 698961a7b24SJohn Baldwin struct turnstile_chain *tc; 6995cb0fbe4SJohn Baldwin 700961a7b24SJohn Baldwin MPASS(ts != NULL); 701961a7b24SJohn Baldwin tc = TC_LOOKUP(ts->ts_lockobj); 702961a7b24SJohn Baldwin mtx_assert(&tc->tc_lock, MA_OWNED); 7030cde2e34SJason Evans #endif 704961a7b24SJohn Baldwin return (TAILQ_FIRST(&ts->ts_blocked)); 705961a7b24SJohn Baldwin } 7060cde2e34SJason Evans 7079ed346baSBosko Milekic /* 708961a7b24SJohn Baldwin * Returns true if a turnstile is empty. 7099ed346baSBosko Milekic */ 710961a7b24SJohn Baldwin int 711961a7b24SJohn Baldwin turnstile_empty(struct turnstile *ts) 71236412d79SJohn Baldwin { 713961a7b24SJohn Baldwin #ifdef INVARIANTS 714961a7b24SJohn Baldwin struct turnstile_chain *tc; 71536412d79SJohn Baldwin 716961a7b24SJohn Baldwin MPASS(ts != NULL); 717961a7b24SJohn Baldwin tc = TC_LOOKUP(ts->ts_lockobj); 718961a7b24SJohn Baldwin mtx_assert(&tc->tc_lock, MA_OWNED); 71936412d79SJohn Baldwin #endif 720961a7b24SJohn Baldwin return (TAILQ_EMPTY(&ts->ts_blocked)); 721c53c013bSJohn Baldwin } 722