10384fff8SJason Evans /*- 20384fff8SJason Evans * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved. 30384fff8SJason Evans * 40384fff8SJason Evans * Redistribution and use in source and binary forms, with or without 50384fff8SJason Evans * modification, are permitted provided that the following conditions 60384fff8SJason Evans * are met: 70384fff8SJason Evans * 1. Redistributions of source code must retain the above copyright 80384fff8SJason Evans * notice, this list of conditions and the following disclaimer. 90384fff8SJason Evans * 2. Redistributions in binary form must reproduce the above copyright 100384fff8SJason Evans * notice, this list of conditions and the following disclaimer in the 110384fff8SJason Evans * documentation and/or other materials provided with the distribution. 120384fff8SJason Evans * 3. Berkeley Software Design Inc's name may not be used to endorse or 130384fff8SJason Evans * promote products derived from this software without specific prior 140384fff8SJason Evans * written permission. 150384fff8SJason Evans * 160384fff8SJason Evans * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND 170384fff8SJason Evans * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 180384fff8SJason Evans * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 190384fff8SJason Evans * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE 200384fff8SJason Evans * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 210384fff8SJason Evans * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 220384fff8SJason Evans * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 230384fff8SJason Evans * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 240384fff8SJason Evans * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 250384fff8SJason Evans * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 260384fff8SJason Evans * SUCH DAMAGE. 270384fff8SJason Evans * 280384fff8SJason Evans * from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $ 2936412d79SJohn Baldwin * and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $ 300384fff8SJason Evans */ 310384fff8SJason Evans 320384fff8SJason Evans /* 33961a7b24SJohn Baldwin * Implementation of turnstiles used to hold queue of threads blocked on 34961a7b24SJohn Baldwin * non-sleepable locks. Sleepable locks use condition variables to 35961a7b24SJohn Baldwin * implement their queues. Turnstiles differ from a sleep queue in that 36961a7b24SJohn Baldwin * turnstile queue's are assigned to a lock held by an owning thread. Thus, 37961a7b24SJohn Baldwin * when one thread is enqueued onto a turnstile, it can lend its priority 38961a7b24SJohn Baldwin * to the owning thread. 39961a7b24SJohn Baldwin * 40961a7b24SJohn Baldwin * We wish to avoid bloating locks with an embedded turnstile and we do not 41961a7b24SJohn Baldwin * want to use back-pointers in the locks for the same reason. Thus, we 42961a7b24SJohn Baldwin * use a similar approach to that of Solaris 7 as described in Solaris 43961a7b24SJohn Baldwin * Internals by Jim Mauro and Richard McDougall. Turnstiles are looked up 44961a7b24SJohn Baldwin * in a hash table based on the address of the lock. Each entry in the 45961a7b24SJohn Baldwin * hash table is a linked-lists of turnstiles and is called a turnstile 46961a7b24SJohn Baldwin * chain. Each chain contains a spin mutex that protects all of the 47961a7b24SJohn Baldwin * turnstiles in the chain. 48961a7b24SJohn Baldwin * 49961a7b24SJohn Baldwin * Each time a thread is created, a turnstile is malloc'd and attached to 50961a7b24SJohn Baldwin * that thread. When a thread blocks on a lock, if it is the first thread 51961a7b24SJohn Baldwin * to block, it lends its turnstile to the lock. If the lock already has 52961a7b24SJohn Baldwin * a turnstile, then it gives its turnstile to the lock's turnstile's free 53861a7db5SJohn Baldwin * list. When a thread is woken up, it takes a turnstile from the free list 54961a7b24SJohn Baldwin * if there are any other waiters. If it is the only thread blocked on the 55961a7b24SJohn Baldwin * lock, then it reclaims the turnstile associated with the lock and removes 56961a7b24SJohn Baldwin * it from the hash table. 570384fff8SJason Evans */ 580384fff8SJason Evans 59ef0ebfc3SJohn Baldwin #include "opt_turnstile_profiling.h" 60ef0ebfc3SJohn Baldwin 61677b542eSDavid E. O'Brien #include <sys/cdefs.h> 62677b542eSDavid E. O'Brien __FBSDID("$FreeBSD$"); 63677b542eSDavid E. O'Brien 640384fff8SJason Evans #include <sys/param.h> 656c35e809SDag-Erling Smørgrav #include <sys/systm.h> 6636412d79SJohn Baldwin #include <sys/kernel.h> 676c35e809SDag-Erling Smørgrav #include <sys/ktr.h> 6819284646SJohn Baldwin #include <sys/lock.h> 69fb919e4dSMark Murray #include <sys/malloc.h> 7019284646SJohn Baldwin #include <sys/mutex.h> 710384fff8SJason Evans #include <sys/proc.h> 72961a7b24SJohn Baldwin #include <sys/queue.h> 73c4f7a187SJohn Baldwin #include <sys/resourcevar.h> 74b43179fbSJeff Roberson #include <sys/sched.h> 75ef0ebfc3SJohn Baldwin #include <sys/sysctl.h> 76ef0ebfc3SJohn Baldwin #include <sys/turnstile.h> 7736412d79SJohn Baldwin 780cde2e34SJason Evans /* 79961a7b24SJohn Baldwin * Constants for the hash table of turnstile chains. TC_SHIFT is a magic 80961a7b24SJohn Baldwin * number chosen because the sleep queue's use the same value for the 81961a7b24SJohn Baldwin * shift. Basically, we ignore the lower 8 bits of the address. 82961a7b24SJohn Baldwin * TC_TABLESIZE must be a power of two for TC_MASK to work properly. 830cde2e34SJason Evans */ 84961a7b24SJohn Baldwin #define TC_TABLESIZE 128 /* Must be power of 2. */ 85961a7b24SJohn Baldwin #define TC_MASK (TC_TABLESIZE - 1) 86961a7b24SJohn Baldwin #define TC_SHIFT 8 87961a7b24SJohn Baldwin #define TC_HASH(lock) (((uintptr_t)(lock) >> TC_SHIFT) & TC_MASK) 88961a7b24SJohn Baldwin #define TC_LOOKUP(lock) &turnstile_chains[TC_HASH(lock)] 899ed346baSBosko Milekic 900cde2e34SJason Evans /* 91961a7b24SJohn Baldwin * There are three different lists of turnstiles as follows. The list 92961a7b24SJohn Baldwin * connected by ts_link entries is a per-thread list of all the turnstiles 93961a7b24SJohn Baldwin * attached to locks that we own. This is used to fixup our priority when 94961a7b24SJohn Baldwin * a lock is released. The other two lists use the ts_hash entries. The 955b7de7e1SJohn Baldwin * first of these two is the turnstile chain list that a turnstile is on 965b7de7e1SJohn Baldwin * when it is attached to a lock. The second list to use ts_hash is the 975b7de7e1SJohn Baldwin * free list hung off of a turnstile that is attached to a lock. 98961a7b24SJohn Baldwin * 99961a7b24SJohn Baldwin * Each turnstile contains two lists of threads. The ts_blocked list is 100961a7b24SJohn Baldwin * a linked list of threads blocked on the turnstile's lock. The 101595bc82aSJohn Baldwin * ts_pending list is a linked list of threads previously awakened by 102961a7b24SJohn Baldwin * turnstile_signal() or turnstile_wait() that are waiting to be put on 103961a7b24SJohn Baldwin * the run queue. 104961a7b24SJohn Baldwin * 105961a7b24SJohn Baldwin * Locking key: 106961a7b24SJohn Baldwin * c - turnstile chain lock 107961a7b24SJohn Baldwin * q - td_contested lock 1080cde2e34SJason Evans */ 109961a7b24SJohn Baldwin struct turnstile { 110961a7b24SJohn Baldwin TAILQ_HEAD(, thread) ts_blocked; /* (c + q) Blocked threads. */ 111961a7b24SJohn Baldwin TAILQ_HEAD(, thread) ts_pending; /* (c) Pending threads. */ 112961a7b24SJohn Baldwin LIST_ENTRY(turnstile) ts_hash; /* (c) Chain and free list. */ 113961a7b24SJohn Baldwin LIST_ENTRY(turnstile) ts_link; /* (q) Contested locks. */ 114961a7b24SJohn Baldwin LIST_HEAD(, turnstile) ts_free; /* (c) Free turnstiles. */ 115961a7b24SJohn Baldwin struct lock_object *ts_lockobj; /* (c) Lock we reference. */ 11679a13d01SJohn Baldwin struct thread *ts_owner; /* (c + q) Who owns the lock. */ 1178484de75SJohn Baldwin }; 1188484de75SJohn Baldwin 119961a7b24SJohn Baldwin struct turnstile_chain { 120961a7b24SJohn Baldwin LIST_HEAD(, turnstile) tc_turnstiles; /* List of turnstiles. */ 121961a7b24SJohn Baldwin struct mtx tc_lock; /* Spin lock for this chain. */ 122ef0ebfc3SJohn Baldwin #ifdef TURNSTILE_PROFILING 123ef0ebfc3SJohn Baldwin u_int tc_depth; /* Length of tc_queues. */ 124ef0ebfc3SJohn Baldwin u_int tc_max_depth; /* Max length of tc_queues. */ 125ef0ebfc3SJohn Baldwin #endif 126961a7b24SJohn Baldwin }; 127961a7b24SJohn Baldwin 128ef0ebfc3SJohn Baldwin #ifdef TURNSTILE_PROFILING 129ef0ebfc3SJohn Baldwin u_int turnstile_max_depth; 130ef0ebfc3SJohn Baldwin SYSCTL_NODE(_debug, OID_AUTO, turnstile, CTLFLAG_RD, 0, "turnstile profiling"); 131ef0ebfc3SJohn Baldwin SYSCTL_NODE(_debug_turnstile, OID_AUTO, chains, CTLFLAG_RD, 0, 132ef0ebfc3SJohn Baldwin "turnstile chain stats"); 133ef0ebfc3SJohn Baldwin SYSCTL_UINT(_debug_turnstile, OID_AUTO, max_depth, CTLFLAG_RD, 134ef0ebfc3SJohn Baldwin &turnstile_max_depth, 0, "maxmimum depth achieved of a single chain"); 135ef0ebfc3SJohn Baldwin #endif 136961a7b24SJohn Baldwin static struct mtx td_contested_lock; 137961a7b24SJohn Baldwin static struct turnstile_chain turnstile_chains[TC_TABLESIZE]; 138961a7b24SJohn Baldwin 139961a7b24SJohn Baldwin MALLOC_DEFINE(M_TURNSTILE, "turnstiles", "turnstiles"); 140c53c013bSJohn Baldwin 141c53c013bSJohn Baldwin /* 1429ed346baSBosko Milekic * Prototypes for non-exported routines. 1439ed346baSBosko Milekic */ 144961a7b24SJohn Baldwin static void init_turnstile0(void *dummy); 14501bd10e1SJohn Baldwin #ifdef TURNSTILE_PROFILING 14601bd10e1SJohn Baldwin static void init_turnstile_profiling(void *arg); 14701bd10e1SJohn Baldwin #endif 148b40ce416SJulian Elischer static void propagate_priority(struct thread *); 149961a7b24SJohn Baldwin static void turnstile_setowner(struct turnstile *ts, struct thread *owner); 15036412d79SJohn Baldwin 151961a7b24SJohn Baldwin /* 152961a7b24SJohn Baldwin * Walks the chain of turnstiles and their owners to propagate the priority 153961a7b24SJohn Baldwin * of the thread being blocked to all the threads holding locks that have to 154961a7b24SJohn Baldwin * release their locks before this thread can run again. 155961a7b24SJohn Baldwin */ 15636412d79SJohn Baldwin static void 157b40ce416SJulian Elischer propagate_priority(struct thread *td) 15836412d79SJohn Baldwin { 159961a7b24SJohn Baldwin struct turnstile_chain *tc; 160961a7b24SJohn Baldwin struct turnstile *ts; 161961a7b24SJohn Baldwin struct thread *td1; 162961a7b24SJohn Baldwin int pri; 16336412d79SJohn Baldwin 1641bd0eefbSJohn Baldwin mtx_assert(&sched_lock, MA_OWNED); 165961a7b24SJohn Baldwin pri = td->td_priority; 166961a7b24SJohn Baldwin ts = td->td_blocked; 16736412d79SJohn Baldwin for (;;) { 168961a7b24SJohn Baldwin td = ts->ts_owner; 16936412d79SJohn Baldwin 170b40ce416SJulian Elischer if (td == NULL) { 17136412d79SJohn Baldwin /* 17236412d79SJohn Baldwin * This really isn't quite right. Really 173b40ce416SJulian Elischer * ought to bump priority of thread that 174961a7b24SJohn Baldwin * next acquires the lock. 17536412d79SJohn Baldwin */ 17636412d79SJohn Baldwin return; 17736412d79SJohn Baldwin } 1789ed346baSBosko Milekic 179e602ba25SJulian Elischer MPASS(td->td_proc != NULL); 180b40ce416SJulian Elischer MPASS(td->td_proc->p_magic == P_MAGIC); 1811bd0eefbSJohn Baldwin 182961a7b24SJohn Baldwin /* 183961a7b24SJohn Baldwin * XXX: The owner of a turnstile can be stale if it is the 184961a7b24SJohn Baldwin * first thread to grab a slock of a sx lock. In that case 185961a7b24SJohn Baldwin * it is possible for us to be at SSLEEP or some other 186961a7b24SJohn Baldwin * weird state. We should probably just return if the state 187961a7b24SJohn Baldwin * isn't SRUN or SLOCK. 188961a7b24SJohn Baldwin */ 189961a7b24SJohn Baldwin KASSERT(!TD_IS_SLEEPING(td), 190961a7b24SJohn Baldwin ("sleeping thread (pid %d) owns a non-sleepable lock", 191961a7b24SJohn Baldwin td->td_proc->p_pid)); 192961a7b24SJohn Baldwin 193961a7b24SJohn Baldwin /* 194961a7b24SJohn Baldwin * If this thread already has higher priority than the 195961a7b24SJohn Baldwin * thread that is being blocked, we are finished. 196961a7b24SJohn Baldwin */ 197961a7b24SJohn Baldwin if (td->td_priority <= pri) 198961a7b24SJohn Baldwin return; 1991bd0eefbSJohn Baldwin 20036412d79SJohn Baldwin /* 20136412d79SJohn Baldwin * If lock holder is actually running, just bump priority. 20236412d79SJohn Baldwin */ 20371fad9fdSJulian Elischer if (TD_IS_RUNNING(td)) { 204e602ba25SJulian Elischer td->td_priority = pri; 20536412d79SJohn Baldwin return; 20636412d79SJohn Baldwin } 207d5a08a60SJake Burkholder 2081b43703bSJohn Baldwin #ifndef SMP 2091b43703bSJohn Baldwin /* 210b40ce416SJulian Elischer * For UP, we check to see if td is curthread (this shouldn't 2111b43703bSJohn Baldwin * ever happen however as it would mean we are in a deadlock.) 2121b43703bSJohn Baldwin */ 213b40ce416SJulian Elischer KASSERT(td != curthread, ("Deadlock detected")); 2141b43703bSJohn Baldwin #endif 2151b43703bSJohn Baldwin 21636412d79SJohn Baldwin /* 217b40ce416SJulian Elischer * If on run queue move to new run queue, and quit. 218b40ce416SJulian Elischer * XXXKSE this gets a lot more complicated under threads 219b40ce416SJulian Elischer * but try anyhow. 22036412d79SJohn Baldwin */ 22171fad9fdSJulian Elischer if (TD_ON_RUNQ(td)) { 222b40ce416SJulian Elischer MPASS(td->td_blocked == NULL); 223b43179fbSJeff Roberson sched_prio(td, pri); 22436412d79SJohn Baldwin return; 22536412d79SJohn Baldwin } 226961a7b24SJohn Baldwin 227e602ba25SJulian Elischer /* 228961a7b24SJohn Baldwin * Bump this thread's priority. 229e602ba25SJulian Elischer */ 230e602ba25SJulian Elischer td->td_priority = pri; 23136412d79SJohn Baldwin 23236412d79SJohn Baldwin /* 233961a7b24SJohn Baldwin * If we aren't blocked on a lock, we should be. 23436412d79SJohn Baldwin */ 235551cf4e1SJohn Baldwin KASSERT(TD_ON_LOCK(td), ( 236961a7b24SJohn Baldwin "process %d(%s):%d holds %s but isn't blocked on a lock\n", 237e602ba25SJulian Elischer td->td_proc->p_pid, td->td_proc->p_comm, td->td_state, 238961a7b24SJohn Baldwin ts->ts_lockobj->lo_name)); 23936412d79SJohn Baldwin 24036412d79SJohn Baldwin /* 241961a7b24SJohn Baldwin * Pick up the lock that td is blocked on. 24236412d79SJohn Baldwin */ 243961a7b24SJohn Baldwin ts = td->td_blocked; 244961a7b24SJohn Baldwin MPASS(ts != NULL); 245961a7b24SJohn Baldwin tc = TC_LOOKUP(ts->ts_lockobj); 246961a7b24SJohn Baldwin mtx_lock_spin(&tc->tc_lock); 24736412d79SJohn Baldwin 24836412d79SJohn Baldwin /* 2496b6bd95eSJohn Baldwin * This thread may not be blocked on this turnstile anymore 2506b6bd95eSJohn Baldwin * but instead might already be woken up on another CPU 2516b6bd95eSJohn Baldwin * that is waiting on sched_lock in turnstile_unpend() to 2526b6bd95eSJohn Baldwin * finish waking this thread up. We can detect this case 2536b6bd95eSJohn Baldwin * by checking to see if this thread has been given a 2546b6bd95eSJohn Baldwin * turnstile by either turnstile_signal() or 255ef2c0ba7SJohn Baldwin * turnstile_broadcast(). In this case, treat the thread as 2566b6bd95eSJohn Baldwin * if it was already running. 25779a13d01SJohn Baldwin */ 2586b6bd95eSJohn Baldwin if (td->td_turnstile != NULL) { 25979a13d01SJohn Baldwin mtx_unlock_spin(&tc->tc_lock); 26079a13d01SJohn Baldwin return; 26179a13d01SJohn Baldwin } 26279a13d01SJohn Baldwin 26379a13d01SJohn Baldwin /* 264b40ce416SJulian Elischer * Check if the thread needs to be moved up on 265961a7b24SJohn Baldwin * the blocked chain. It doesn't need to be moved 266961a7b24SJohn Baldwin * if it is already at the head of the list or if 267961a7b24SJohn Baldwin * the item in front of it still has a higher priority. 26836412d79SJohn Baldwin */ 269961a7b24SJohn Baldwin if (td == TAILQ_FIRST(&ts->ts_blocked)) { 270961a7b24SJohn Baldwin mtx_unlock_spin(&tc->tc_lock); 2711bd0eefbSJohn Baldwin continue; 2721bd0eefbSJohn Baldwin } 2739ed346baSBosko Milekic 274551cf4e1SJohn Baldwin td1 = TAILQ_PREV(td, threadqueue, td_lockq); 2752c100766SJulian Elischer if (td1->td_priority <= pri) { 276961a7b24SJohn Baldwin mtx_unlock_spin(&tc->tc_lock); 27736412d79SJohn Baldwin continue; 27836412d79SJohn Baldwin } 27936412d79SJohn Baldwin 28036412d79SJohn Baldwin /* 281b40ce416SJulian Elischer * Remove thread from blocked chain and determine where 282b40ce416SJulian Elischer * it should be moved up to. Since we know that td1 has 283b40ce416SJulian Elischer * a lower priority than td, we know that at least one 284b40ce416SJulian Elischer * thread in the chain has a lower priority and that 285b40ce416SJulian Elischer * td1 will thus not be NULL after the loop. 28636412d79SJohn Baldwin */ 287961a7b24SJohn Baldwin mtx_lock_spin(&td_contested_lock); 288961a7b24SJohn Baldwin TAILQ_REMOVE(&ts->ts_blocked, td, td_lockq); 289961a7b24SJohn Baldwin TAILQ_FOREACH(td1, &ts->ts_blocked, td_lockq) { 290b40ce416SJulian Elischer MPASS(td1->td_proc->p_magic == P_MAGIC); 2912c100766SJulian Elischer if (td1->td_priority > pri) 29236412d79SJohn Baldwin break; 29336412d79SJohn Baldwin } 2949ed346baSBosko Milekic 295b40ce416SJulian Elischer MPASS(td1 != NULL); 296551cf4e1SJohn Baldwin TAILQ_INSERT_BEFORE(td1, td, td_lockq); 297961a7b24SJohn Baldwin mtx_unlock_spin(&td_contested_lock); 29836412d79SJohn Baldwin CTR4(KTR_LOCK, 299961a7b24SJohn Baldwin "propagate_priority: td %p moved before %p on [%p] %s", 300961a7b24SJohn Baldwin td, td1, ts->ts_lockobj, ts->ts_lockobj->lo_name); 301961a7b24SJohn Baldwin mtx_unlock_spin(&tc->tc_lock); 30236412d79SJohn Baldwin } 30336412d79SJohn Baldwin } 30436412d79SJohn Baldwin 3056c35e809SDag-Erling Smørgrav /* 306961a7b24SJohn Baldwin * Early initialization of turnstiles. This is not done via a SYSINIT() 307961a7b24SJohn Baldwin * since this needs to be initialized very early when mutexes are first 308961a7b24SJohn Baldwin * initialized. 3096283b7d0SJohn Baldwin */ 3106283b7d0SJohn Baldwin void 311961a7b24SJohn Baldwin init_turnstiles(void) 3126283b7d0SJohn Baldwin { 313961a7b24SJohn Baldwin int i; 3146283b7d0SJohn Baldwin 315961a7b24SJohn Baldwin for (i = 0; i < TC_TABLESIZE; i++) { 316961a7b24SJohn Baldwin LIST_INIT(&turnstile_chains[i].tc_turnstiles); 317961a7b24SJohn Baldwin mtx_init(&turnstile_chains[i].tc_lock, "turnstile chain", 318961a7b24SJohn Baldwin NULL, MTX_SPIN); 31901bd10e1SJohn Baldwin } 32001bd10e1SJohn Baldwin mtx_init(&td_contested_lock, "td_contested", NULL, MTX_SPIN); 32101bd10e1SJohn Baldwin thread0.td_turnstile = NULL; 32201bd10e1SJohn Baldwin } 32301bd10e1SJohn Baldwin 324ef0ebfc3SJohn Baldwin #ifdef TURNSTILE_PROFILING 32501bd10e1SJohn Baldwin static void 32601bd10e1SJohn Baldwin init_turnstile_profiling(void *arg) 32701bd10e1SJohn Baldwin { 32801bd10e1SJohn Baldwin struct sysctl_oid *chain_oid; 32901bd10e1SJohn Baldwin char chain_name[10]; 33001bd10e1SJohn Baldwin int i; 33101bd10e1SJohn Baldwin 33201bd10e1SJohn Baldwin for (i = 0; i < TC_TABLESIZE; i++) { 333ef0ebfc3SJohn Baldwin snprintf(chain_name, sizeof(chain_name), "%d", i); 334ef0ebfc3SJohn Baldwin chain_oid = SYSCTL_ADD_NODE(NULL, 335ef0ebfc3SJohn Baldwin SYSCTL_STATIC_CHILDREN(_debug_turnstile_chains), OID_AUTO, 336ef0ebfc3SJohn Baldwin chain_name, CTLFLAG_RD, NULL, "turnstile chain stats"); 337ef0ebfc3SJohn Baldwin SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO, 338ef0ebfc3SJohn Baldwin "depth", CTLFLAG_RD, &turnstile_chains[i].tc_depth, 0, 339ef0ebfc3SJohn Baldwin NULL); 340ef0ebfc3SJohn Baldwin SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO, 341ef0ebfc3SJohn Baldwin "max_depth", CTLFLAG_RD, &turnstile_chains[i].tc_max_depth, 342ef0ebfc3SJohn Baldwin 0, NULL); 34301bd10e1SJohn Baldwin } 34401bd10e1SJohn Baldwin } 34501bd10e1SJohn Baldwin SYSINIT(turnstile_profiling, SI_SUB_LOCK, SI_ORDER_ANY, 34601bd10e1SJohn Baldwin init_turnstile_profiling, NULL); 347ef0ebfc3SJohn Baldwin #endif 3486283b7d0SJohn Baldwin 349961a7b24SJohn Baldwin static void 350961a7b24SJohn Baldwin init_turnstile0(void *dummy) 3516283b7d0SJohn Baldwin { 3526283b7d0SJohn Baldwin 353961a7b24SJohn Baldwin thread0.td_turnstile = turnstile_alloc(); 354961a7b24SJohn Baldwin } 355961a7b24SJohn Baldwin SYSINIT(turnstile0, SI_SUB_LOCK, SI_ORDER_ANY, init_turnstile0, NULL); 3566c35e809SDag-Erling Smørgrav 357961a7b24SJohn Baldwin /* 358961a7b24SJohn Baldwin * Set the owner of the lock this turnstile is attached to. 359961a7b24SJohn Baldwin */ 360961a7b24SJohn Baldwin static void 361961a7b24SJohn Baldwin turnstile_setowner(struct turnstile *ts, struct thread *owner) 362961a7b24SJohn Baldwin { 363961a7b24SJohn Baldwin 364961a7b24SJohn Baldwin mtx_assert(&td_contested_lock, MA_OWNED); 365961a7b24SJohn Baldwin MPASS(owner->td_proc->p_magic == P_MAGIC); 366961a7b24SJohn Baldwin MPASS(ts->ts_owner == NULL); 367961a7b24SJohn Baldwin ts->ts_owner = owner; 368961a7b24SJohn Baldwin LIST_INSERT_HEAD(&owner->td_contested, ts, ts_link); 369961a7b24SJohn Baldwin } 370961a7b24SJohn Baldwin 371961a7b24SJohn Baldwin /* 372961a7b24SJohn Baldwin * Malloc a turnstile for a new thread, initialize it and return it. 373961a7b24SJohn Baldwin */ 374961a7b24SJohn Baldwin struct turnstile * 375961a7b24SJohn Baldwin turnstile_alloc(void) 376961a7b24SJohn Baldwin { 377961a7b24SJohn Baldwin struct turnstile *ts; 378961a7b24SJohn Baldwin 379961a7b24SJohn Baldwin ts = malloc(sizeof(struct turnstile), M_TURNSTILE, M_WAITOK | M_ZERO); 380961a7b24SJohn Baldwin TAILQ_INIT(&ts->ts_blocked); 381961a7b24SJohn Baldwin TAILQ_INIT(&ts->ts_pending); 382961a7b24SJohn Baldwin LIST_INIT(&ts->ts_free); 383961a7b24SJohn Baldwin return (ts); 384961a7b24SJohn Baldwin } 385961a7b24SJohn Baldwin 386961a7b24SJohn Baldwin /* 387961a7b24SJohn Baldwin * Free a turnstile when a thread is destroyed. 388961a7b24SJohn Baldwin */ 389961a7b24SJohn Baldwin void 390961a7b24SJohn Baldwin turnstile_free(struct turnstile *ts) 391961a7b24SJohn Baldwin { 392961a7b24SJohn Baldwin 393961a7b24SJohn Baldwin MPASS(ts != NULL); 394961a7b24SJohn Baldwin MPASS(TAILQ_EMPTY(&ts->ts_blocked)); 395961a7b24SJohn Baldwin MPASS(TAILQ_EMPTY(&ts->ts_pending)); 396961a7b24SJohn Baldwin free(ts, M_TURNSTILE); 397961a7b24SJohn Baldwin } 398961a7b24SJohn Baldwin 399961a7b24SJohn Baldwin /* 4002ff0e645SJohn Baldwin * Lock the turnstile chain associated with the specified lock. 4012ff0e645SJohn Baldwin */ 4022ff0e645SJohn Baldwin void 4032ff0e645SJohn Baldwin turnstile_lock(struct lock_object *lock) 4042ff0e645SJohn Baldwin { 4052ff0e645SJohn Baldwin struct turnstile_chain *tc; 4062ff0e645SJohn Baldwin 4072ff0e645SJohn Baldwin tc = TC_LOOKUP(lock); 4082ff0e645SJohn Baldwin mtx_lock_spin(&tc->tc_lock); 4092ff0e645SJohn Baldwin } 4102ff0e645SJohn Baldwin 4112ff0e645SJohn Baldwin /* 412961a7b24SJohn Baldwin * Look up the turnstile for a lock in the hash table locking the associated 4132ff0e645SJohn Baldwin * turnstile chain along the way. If no turnstile is found in the hash 4142ff0e645SJohn Baldwin * table, NULL is returned. 415961a7b24SJohn Baldwin */ 416961a7b24SJohn Baldwin struct turnstile * 417961a7b24SJohn Baldwin turnstile_lookup(struct lock_object *lock) 418961a7b24SJohn Baldwin { 419961a7b24SJohn Baldwin struct turnstile_chain *tc; 420961a7b24SJohn Baldwin struct turnstile *ts; 421961a7b24SJohn Baldwin 422961a7b24SJohn Baldwin tc = TC_LOOKUP(lock); 4232ff0e645SJohn Baldwin mtx_assert(&tc->tc_lock, MA_OWNED); 424961a7b24SJohn Baldwin LIST_FOREACH(ts, &tc->tc_turnstiles, ts_hash) 425961a7b24SJohn Baldwin if (ts->ts_lockobj == lock) 426961a7b24SJohn Baldwin return (ts); 427961a7b24SJohn Baldwin return (NULL); 428961a7b24SJohn Baldwin } 429961a7b24SJohn Baldwin 430961a7b24SJohn Baldwin /* 431961a7b24SJohn Baldwin * Unlock the turnstile chain associated with a given lock. 432961a7b24SJohn Baldwin */ 433961a7b24SJohn Baldwin void 434961a7b24SJohn Baldwin turnstile_release(struct lock_object *lock) 435961a7b24SJohn Baldwin { 436961a7b24SJohn Baldwin struct turnstile_chain *tc; 437961a7b24SJohn Baldwin 438961a7b24SJohn Baldwin tc = TC_LOOKUP(lock); 439961a7b24SJohn Baldwin mtx_unlock_spin(&tc->tc_lock); 440961a7b24SJohn Baldwin } 441961a7b24SJohn Baldwin 442961a7b24SJohn Baldwin /* 443961a7b24SJohn Baldwin * Take ownership of a turnstile and adjust the priority of the new 444961a7b24SJohn Baldwin * owner appropriately. 445961a7b24SJohn Baldwin */ 446961a7b24SJohn Baldwin void 4472ff0e645SJohn Baldwin turnstile_claim(struct lock_object *lock) 448961a7b24SJohn Baldwin { 449961a7b24SJohn Baldwin struct turnstile_chain *tc; 4502ff0e645SJohn Baldwin struct turnstile *ts; 451961a7b24SJohn Baldwin struct thread *td, *owner; 452961a7b24SJohn Baldwin 4532ff0e645SJohn Baldwin tc = TC_LOOKUP(lock); 454961a7b24SJohn Baldwin mtx_assert(&tc->tc_lock, MA_OWNED); 4552ff0e645SJohn Baldwin ts = turnstile_lookup(lock); 4562ff0e645SJohn Baldwin MPASS(ts != NULL); 457961a7b24SJohn Baldwin 458961a7b24SJohn Baldwin owner = curthread; 459961a7b24SJohn Baldwin mtx_lock_spin(&td_contested_lock); 460961a7b24SJohn Baldwin turnstile_setowner(ts, owner); 461961a7b24SJohn Baldwin mtx_unlock_spin(&td_contested_lock); 462961a7b24SJohn Baldwin 463961a7b24SJohn Baldwin td = TAILQ_FIRST(&ts->ts_blocked); 464961a7b24SJohn Baldwin MPASS(td != NULL); 465961a7b24SJohn Baldwin MPASS(td->td_proc->p_magic == P_MAGIC); 466961a7b24SJohn Baldwin mtx_unlock_spin(&tc->tc_lock); 467961a7b24SJohn Baldwin 468961a7b24SJohn Baldwin /* 469961a7b24SJohn Baldwin * Update the priority of the new owner if needed. 470961a7b24SJohn Baldwin */ 471961a7b24SJohn Baldwin mtx_lock_spin(&sched_lock); 472961a7b24SJohn Baldwin if (td->td_priority < owner->td_priority) 473961a7b24SJohn Baldwin owner->td_priority = td->td_priority; 474961a7b24SJohn Baldwin mtx_unlock_spin(&sched_lock); 475961a7b24SJohn Baldwin } 476961a7b24SJohn Baldwin 477961a7b24SJohn Baldwin /* 4782ff0e645SJohn Baldwin * Block the current thread on the turnstile assicated with 'lock'. This 4792ff0e645SJohn Baldwin * function will context switch and not return until this thread has been 4802ff0e645SJohn Baldwin * woken back up. This function must be called with the appropriate 4812ff0e645SJohn Baldwin * turnstile chain locked and will return with it unlocked. 482961a7b24SJohn Baldwin */ 483961a7b24SJohn Baldwin void 4842ff0e645SJohn Baldwin turnstile_wait(struct lock_object *lock, struct thread *owner) 485961a7b24SJohn Baldwin { 486961a7b24SJohn Baldwin struct turnstile_chain *tc; 4872ff0e645SJohn Baldwin struct turnstile *ts; 488961a7b24SJohn Baldwin struct thread *td, *td1; 489961a7b24SJohn Baldwin 490961a7b24SJohn Baldwin td = curthread; 491961a7b24SJohn Baldwin tc = TC_LOOKUP(lock); 492961a7b24SJohn Baldwin mtx_assert(&tc->tc_lock, MA_OWNED); 493961a7b24SJohn Baldwin MPASS(td->td_turnstile != NULL); 494961a7b24SJohn Baldwin MPASS(owner != NULL); 495961a7b24SJohn Baldwin MPASS(owner->td_proc->p_magic == P_MAGIC); 496961a7b24SJohn Baldwin 4972ff0e645SJohn Baldwin /* Look up the turnstile associated with the lock 'lock'. */ 4982ff0e645SJohn Baldwin ts = turnstile_lookup(lock); 4992ff0e645SJohn Baldwin 5002ff0e645SJohn Baldwin /* 5012ff0e645SJohn Baldwin * If the lock does not already have a turnstile, use this thread's 5022ff0e645SJohn Baldwin * turnstile. Otherwise insert the current thread into the 5032ff0e645SJohn Baldwin * turnstile already in use by this lock. 5042ff0e645SJohn Baldwin */ 505961a7b24SJohn Baldwin if (ts == NULL) { 506ef0ebfc3SJohn Baldwin #ifdef TURNSTILE_PROFILING 507ef0ebfc3SJohn Baldwin tc->tc_depth++; 508ef0ebfc3SJohn Baldwin if (tc->tc_depth > tc->tc_max_depth) { 509ef0ebfc3SJohn Baldwin tc->tc_max_depth = tc->tc_depth; 510ef0ebfc3SJohn Baldwin if (tc->tc_max_depth > turnstile_max_depth) 511ef0ebfc3SJohn Baldwin turnstile_max_depth = tc->tc_max_depth; 512ef0ebfc3SJohn Baldwin } 513ef0ebfc3SJohn Baldwin #endif 514961a7b24SJohn Baldwin ts = td->td_turnstile; 515961a7b24SJohn Baldwin LIST_INSERT_HEAD(&tc->tc_turnstiles, ts, ts_hash); 516961a7b24SJohn Baldwin KASSERT(TAILQ_EMPTY(&ts->ts_pending), 517961a7b24SJohn Baldwin ("thread's turnstile has pending threads")); 518961a7b24SJohn Baldwin KASSERT(TAILQ_EMPTY(&ts->ts_blocked), 519961a7b24SJohn Baldwin ("thread's turnstile has a non-empty queue")); 520961a7b24SJohn Baldwin KASSERT(LIST_EMPTY(&ts->ts_free), 521961a7b24SJohn Baldwin ("thread's turnstile has a non-empty free list")); 522961a7b24SJohn Baldwin KASSERT(ts->ts_lockobj == NULL, ("stale ts_lockobj pointer")); 523961a7b24SJohn Baldwin ts->ts_lockobj = lock; 524961a7b24SJohn Baldwin mtx_lock_spin(&td_contested_lock); 525961a7b24SJohn Baldwin TAILQ_INSERT_TAIL(&ts->ts_blocked, td, td_lockq); 526961a7b24SJohn Baldwin turnstile_setowner(ts, owner); 527961a7b24SJohn Baldwin mtx_unlock_spin(&td_contested_lock); 528961a7b24SJohn Baldwin } else { 529961a7b24SJohn Baldwin TAILQ_FOREACH(td1, &ts->ts_blocked, td_lockq) 530961a7b24SJohn Baldwin if (td1->td_priority > td->td_priority) 5316c35e809SDag-Erling Smørgrav break; 532961a7b24SJohn Baldwin mtx_lock_spin(&td_contested_lock); 533961a7b24SJohn Baldwin if (td1 != NULL) 534961a7b24SJohn Baldwin TAILQ_INSERT_BEFORE(td1, td, td_lockq); 535961a7b24SJohn Baldwin else 536961a7b24SJohn Baldwin TAILQ_INSERT_TAIL(&ts->ts_blocked, td, td_lockq); 537961a7b24SJohn Baldwin mtx_unlock_spin(&td_contested_lock); 538961a7b24SJohn Baldwin MPASS(td->td_turnstile != NULL); 539961a7b24SJohn Baldwin LIST_INSERT_HEAD(&ts->ts_free, td->td_turnstile, ts_hash); 540961a7b24SJohn Baldwin MPASS(owner == ts->ts_owner); 5416c35e809SDag-Erling Smørgrav } 542961a7b24SJohn Baldwin td->td_turnstile = NULL; 543961a7b24SJohn Baldwin mtx_unlock_spin(&tc->tc_lock); 54436412d79SJohn Baldwin 5459ed346baSBosko Milekic mtx_lock_spin(&sched_lock); 54636412d79SJohn Baldwin /* 547961a7b24SJohn Baldwin * Handle race condition where a thread on another CPU that owns 548961a7b24SJohn Baldwin * lock 'lock' could have woken us in between us dropping the 549961a7b24SJohn Baldwin * turnstile chain lock and acquiring the sched_lock. 55036412d79SJohn Baldwin */ 551961a7b24SJohn Baldwin if (td->td_flags & TDF_TSNOBLOCK) { 552961a7b24SJohn Baldwin td->td_flags &= ~TDF_TSNOBLOCK; 5539ed346baSBosko Milekic mtx_unlock_spin(&sched_lock); 55436412d79SJohn Baldwin return; 55536412d79SJohn Baldwin } 5569ed346baSBosko Milekic 55736412d79SJohn Baldwin #ifdef notyet 55836412d79SJohn Baldwin /* 5599ed346baSBosko Milekic * If we're borrowing an interrupted thread's VM context, we 5609ed346baSBosko Milekic * must clean up before going to sleep. 56136412d79SJohn Baldwin */ 562b40ce416SJulian Elischer if (td->td_ithd != NULL) { 563b40ce416SJulian Elischer struct ithd *it = td->td_ithd; 56436412d79SJohn Baldwin 56536412d79SJohn Baldwin if (it->it_interrupted) { 566961a7b24SJohn Baldwin if (LOCK_LOG_TEST(lock, 0)) 567961a7b24SJohn Baldwin CTR3(KTR_LOCK, "%s: %p interrupted %p", 568961a7b24SJohn Baldwin __func__, it, it->it_interrupted); 56936412d79SJohn Baldwin intr_thd_fixup(it); 57036412d79SJohn Baldwin } 57136412d79SJohn Baldwin } 57236412d79SJohn Baldwin #endif 57336412d79SJohn Baldwin 574961a7b24SJohn Baldwin /* Save who we are blocked on and switch. */ 575961a7b24SJohn Baldwin td->td_blocked = ts; 576961a7b24SJohn Baldwin td->td_lockname = lock->lo_name; 577551cf4e1SJohn Baldwin TD_SET_LOCK(td); 578b40ce416SJulian Elischer propagate_priority(td); 5799ed346baSBosko Milekic 580961a7b24SJohn Baldwin if (LOCK_LOG_TEST(lock, 0)) 581961a7b24SJohn Baldwin CTR4(KTR_LOCK, "%s: td %p blocked on [%p] %s", __func__, td, 582961a7b24SJohn Baldwin lock, lock->lo_name); 5839ed346baSBosko Milekic 584bf0acc27SJohn Baldwin mi_switch(SW_VOL, NULL); 5859ed346baSBosko Milekic 586961a7b24SJohn Baldwin if (LOCK_LOG_TEST(lock, 0)) 587961a7b24SJohn Baldwin CTR4(KTR_LOCK, "%s: td %p free from blocked on [%p] %s", 588961a7b24SJohn Baldwin __func__, td, lock, lock->lo_name); 5899ed346baSBosko Milekic 5909ed346baSBosko Milekic mtx_unlock_spin(&sched_lock); 59136412d79SJohn Baldwin } 5929ed346baSBosko Milekic 593961a7b24SJohn Baldwin /* 594961a7b24SJohn Baldwin * Pick the highest priority thread on this turnstile and put it on the 595961a7b24SJohn Baldwin * pending list. This must be called with the turnstile chain locked. 596961a7b24SJohn Baldwin */ 597961a7b24SJohn Baldwin int 598961a7b24SJohn Baldwin turnstile_signal(struct turnstile *ts) 599961a7b24SJohn Baldwin { 600961a7b24SJohn Baldwin struct turnstile_chain *tc; 601961a7b24SJohn Baldwin struct thread *td; 602961a7b24SJohn Baldwin int empty; 603961a7b24SJohn Baldwin 604961a7b24SJohn Baldwin MPASS(ts != NULL); 605961a7b24SJohn Baldwin MPASS(curthread->td_proc->p_magic == P_MAGIC); 606961a7b24SJohn Baldwin MPASS(ts->ts_owner == curthread); 607961a7b24SJohn Baldwin tc = TC_LOOKUP(ts->ts_lockobj); 608961a7b24SJohn Baldwin mtx_assert(&tc->tc_lock, MA_OWNED); 6099ed346baSBosko Milekic 6109ed346baSBosko Milekic /* 611961a7b24SJohn Baldwin * Pick the highest priority thread blocked on this lock and 612961a7b24SJohn Baldwin * move it to the pending list. 6139ed346baSBosko Milekic */ 614961a7b24SJohn Baldwin td = TAILQ_FIRST(&ts->ts_blocked); 615b40ce416SJulian Elischer MPASS(td->td_proc->p_magic == P_MAGIC); 616961a7b24SJohn Baldwin mtx_lock_spin(&td_contested_lock); 617961a7b24SJohn Baldwin TAILQ_REMOVE(&ts->ts_blocked, td, td_lockq); 618961a7b24SJohn Baldwin mtx_unlock_spin(&td_contested_lock); 619961a7b24SJohn Baldwin TAILQ_INSERT_TAIL(&ts->ts_pending, td, td_lockq); 6209ed346baSBosko Milekic 621961a7b24SJohn Baldwin /* 622961a7b24SJohn Baldwin * If the turnstile is now empty, remove it from its chain and 623961a7b24SJohn Baldwin * give it to the about-to-be-woken thread. Otherwise take a 624961a7b24SJohn Baldwin * turnstile from the free list and give it to the thread. 625961a7b24SJohn Baldwin */ 626961a7b24SJohn Baldwin empty = TAILQ_EMPTY(&ts->ts_blocked); 627ef0ebfc3SJohn Baldwin if (empty) { 628961a7b24SJohn Baldwin MPASS(LIST_EMPTY(&ts->ts_free)); 629ef0ebfc3SJohn Baldwin #ifdef TURNSTILE_PROFILING 630ef0ebfc3SJohn Baldwin tc->tc_depth--; 631ef0ebfc3SJohn Baldwin #endif 632ef0ebfc3SJohn Baldwin } else 633961a7b24SJohn Baldwin ts = LIST_FIRST(&ts->ts_free); 634da1d503bSJohn Baldwin MPASS(ts != NULL); 635961a7b24SJohn Baldwin LIST_REMOVE(ts, ts_hash); 636961a7b24SJohn Baldwin td->td_turnstile = ts; 6379ed346baSBosko Milekic 638961a7b24SJohn Baldwin return (empty); 639961a7b24SJohn Baldwin } 640961a7b24SJohn Baldwin 641961a7b24SJohn Baldwin /* 642961a7b24SJohn Baldwin * Put all blocked threads on the pending list. This must be called with 643961a7b24SJohn Baldwin * the turnstile chain locked. 644961a7b24SJohn Baldwin */ 645961a7b24SJohn Baldwin void 646ef2c0ba7SJohn Baldwin turnstile_broadcast(struct turnstile *ts) 647961a7b24SJohn Baldwin { 648961a7b24SJohn Baldwin struct turnstile_chain *tc; 649961a7b24SJohn Baldwin struct turnstile *ts1; 650961a7b24SJohn Baldwin struct thread *td; 651961a7b24SJohn Baldwin 652961a7b24SJohn Baldwin MPASS(ts != NULL); 653961a7b24SJohn Baldwin MPASS(curthread->td_proc->p_magic == P_MAGIC); 654961a7b24SJohn Baldwin MPASS(ts->ts_owner == curthread); 655961a7b24SJohn Baldwin tc = TC_LOOKUP(ts->ts_lockobj); 656961a7b24SJohn Baldwin mtx_assert(&tc->tc_lock, MA_OWNED); 657961a7b24SJohn Baldwin 658961a7b24SJohn Baldwin /* 659961a7b24SJohn Baldwin * Transfer the blocked list to the pending list. 660961a7b24SJohn Baldwin */ 661961a7b24SJohn Baldwin mtx_lock_spin(&td_contested_lock); 662961a7b24SJohn Baldwin TAILQ_CONCAT(&ts->ts_pending, &ts->ts_blocked, td_lockq); 663961a7b24SJohn Baldwin mtx_unlock_spin(&td_contested_lock); 664961a7b24SJohn Baldwin 665961a7b24SJohn Baldwin /* 666961a7b24SJohn Baldwin * Give a turnstile to each thread. The last thread gets 667961a7b24SJohn Baldwin * this turnstile. 668961a7b24SJohn Baldwin */ 669961a7b24SJohn Baldwin TAILQ_FOREACH(td, &ts->ts_pending, td_lockq) { 670961a7b24SJohn Baldwin if (LIST_EMPTY(&ts->ts_free)) { 671961a7b24SJohn Baldwin MPASS(TAILQ_NEXT(td, td_lockq) == NULL); 672961a7b24SJohn Baldwin ts1 = ts; 673ef0ebfc3SJohn Baldwin #ifdef TURNSTILE_PROFILING 674ef0ebfc3SJohn Baldwin tc->tc_depth--; 675ef0ebfc3SJohn Baldwin #endif 67636412d79SJohn Baldwin } else 677961a7b24SJohn Baldwin ts1 = LIST_FIRST(&ts->ts_free); 678da1d503bSJohn Baldwin MPASS(ts1 != NULL); 679961a7b24SJohn Baldwin LIST_REMOVE(ts1, ts_hash); 680961a7b24SJohn Baldwin td->td_turnstile = ts1; 681961a7b24SJohn Baldwin } 682961a7b24SJohn Baldwin } 6839ed346baSBosko Milekic 684961a7b24SJohn Baldwin /* 685961a7b24SJohn Baldwin * Wakeup all threads on the pending list and adjust the priority of the 686961a7b24SJohn Baldwin * current thread appropriately. This must be called with the turnstile 687961a7b24SJohn Baldwin * chain locked. 688961a7b24SJohn Baldwin */ 689961a7b24SJohn Baldwin void 690961a7b24SJohn Baldwin turnstile_unpend(struct turnstile *ts) 691961a7b24SJohn Baldwin { 692961a7b24SJohn Baldwin TAILQ_HEAD( ,thread) pending_threads; 693961a7b24SJohn Baldwin struct turnstile_chain *tc; 694961a7b24SJohn Baldwin struct thread *td; 695961a7b24SJohn Baldwin int cp, pri; 696961a7b24SJohn Baldwin 697961a7b24SJohn Baldwin MPASS(ts != NULL); 698961a7b24SJohn Baldwin MPASS(ts->ts_owner == curthread); 699961a7b24SJohn Baldwin tc = TC_LOOKUP(ts->ts_lockobj); 700961a7b24SJohn Baldwin mtx_assert(&tc->tc_lock, MA_OWNED); 701961a7b24SJohn Baldwin MPASS(!TAILQ_EMPTY(&ts->ts_pending)); 702961a7b24SJohn Baldwin 703961a7b24SJohn Baldwin /* 704961a7b24SJohn Baldwin * Move the list of pending threads out of the turnstile and 705961a7b24SJohn Baldwin * into a local variable. 706961a7b24SJohn Baldwin */ 707961a7b24SJohn Baldwin TAILQ_INIT(&pending_threads); 708961a7b24SJohn Baldwin TAILQ_CONCAT(&pending_threads, &ts->ts_pending, td_lockq); 709961a7b24SJohn Baldwin #ifdef INVARIANTS 710961a7b24SJohn Baldwin if (TAILQ_EMPTY(&ts->ts_blocked)) 711961a7b24SJohn Baldwin ts->ts_lockobj = NULL; 712961a7b24SJohn Baldwin #endif 713961a7b24SJohn Baldwin 714961a7b24SJohn Baldwin /* 715961a7b24SJohn Baldwin * Remove the turnstile from this thread's list of contested locks 716961a7b24SJohn Baldwin * since this thread doesn't own it anymore. New threads will 717961a7b24SJohn Baldwin * not be blocking on the turnstile until it is claimed by a new 718961a7b24SJohn Baldwin * owner. 719961a7b24SJohn Baldwin */ 720961a7b24SJohn Baldwin mtx_lock_spin(&td_contested_lock); 721961a7b24SJohn Baldwin ts->ts_owner = NULL; 722961a7b24SJohn Baldwin LIST_REMOVE(ts, ts_link); 723961a7b24SJohn Baldwin mtx_unlock_spin(&td_contested_lock); 724b8597527SJohn Baldwin critical_enter(); 725961a7b24SJohn Baldwin mtx_unlock_spin(&tc->tc_lock); 726961a7b24SJohn Baldwin 727961a7b24SJohn Baldwin /* 728961a7b24SJohn Baldwin * Adjust the priority of curthread based on other contested 729961a7b24SJohn Baldwin * locks it owns. Don't lower the priority below the base 730961a7b24SJohn Baldwin * priority however. 731961a7b24SJohn Baldwin */ 732961a7b24SJohn Baldwin td = curthread; 733d5a08a60SJake Burkholder pri = PRI_MAX; 734961a7b24SJohn Baldwin mtx_lock_spin(&sched_lock); 735961a7b24SJohn Baldwin mtx_lock_spin(&td_contested_lock); 736961a7b24SJohn Baldwin LIST_FOREACH(ts, &td->td_contested, ts_link) { 737961a7b24SJohn Baldwin cp = TAILQ_FIRST(&ts->ts_blocked)->td_priority; 73836412d79SJohn Baldwin if (cp < pri) 73936412d79SJohn Baldwin pri = cp; 74036412d79SJohn Baldwin } 741961a7b24SJohn Baldwin mtx_unlock_spin(&td_contested_lock); 7422c100766SJulian Elischer if (pri > td->td_base_pri) 7432c100766SJulian Elischer pri = td->td_base_pri; 7442c100766SJulian Elischer td->td_priority = pri; 7459ed346baSBosko Milekic 746961a7b24SJohn Baldwin /* 747961a7b24SJohn Baldwin * Wake up all the pending threads. If a thread is not blocked 748961a7b24SJohn Baldwin * on a lock, then it is currently executing on another CPU in 74967ba8678SJohn Baldwin * turnstile_wait() or sitting on a run queue waiting to resume 75067ba8678SJohn Baldwin * in turnstile_wait(). Set a flag to force it to try to acquire 751961a7b24SJohn Baldwin * the lock again instead of blocking. 752961a7b24SJohn Baldwin */ 753961a7b24SJohn Baldwin while (!TAILQ_EMPTY(&pending_threads)) { 754961a7b24SJohn Baldwin td = TAILQ_FIRST(&pending_threads); 755961a7b24SJohn Baldwin TAILQ_REMOVE(&pending_threads, td, td_lockq); 756961a7b24SJohn Baldwin MPASS(td->td_proc->p_magic == P_MAGIC); 757961a7b24SJohn Baldwin if (TD_ON_LOCK(td)) { 758961a7b24SJohn Baldwin td->td_blocked = NULL; 759961a7b24SJohn Baldwin td->td_lockname = NULL; 760961a7b24SJohn Baldwin TD_CLR_LOCK(td); 761961a7b24SJohn Baldwin MPASS(TD_CAN_RUN(td)); 7622630e4c9SJulian Elischer setrunqueue(td, SRQ_BORING); 763961a7b24SJohn Baldwin } else { 764961a7b24SJohn Baldwin td->td_flags |= TDF_TSNOBLOCK; 76567ba8678SJohn Baldwin MPASS(TD_IS_RUNNING(td) || TD_ON_RUNQ(td)); 766961a7b24SJohn Baldwin } 767961a7b24SJohn Baldwin } 768b8597527SJohn Baldwin critical_exit(); 769e0817317SJulian Elischer mtx_unlock_spin(&sched_lock); 7709ed346baSBosko Milekic } 7719ed346baSBosko Milekic 7729ed346baSBosko Milekic /* 773961a7b24SJohn Baldwin * Return the first thread in a turnstile. 7749ed346baSBosko Milekic */ 775961a7b24SJohn Baldwin struct thread * 776961a7b24SJohn Baldwin turnstile_head(struct turnstile *ts) 7770cde2e34SJason Evans { 778961a7b24SJohn Baldwin #ifdef INVARIANTS 779961a7b24SJohn Baldwin struct turnstile_chain *tc; 7805cb0fbe4SJohn Baldwin 781961a7b24SJohn Baldwin MPASS(ts != NULL); 782961a7b24SJohn Baldwin tc = TC_LOOKUP(ts->ts_lockobj); 783961a7b24SJohn Baldwin mtx_assert(&tc->tc_lock, MA_OWNED); 7840cde2e34SJason Evans #endif 785961a7b24SJohn Baldwin return (TAILQ_FIRST(&ts->ts_blocked)); 786961a7b24SJohn Baldwin } 7870cde2e34SJason Evans 7889ed346baSBosko Milekic /* 789961a7b24SJohn Baldwin * Returns true if a turnstile is empty. 7909ed346baSBosko Milekic */ 791961a7b24SJohn Baldwin int 792961a7b24SJohn Baldwin turnstile_empty(struct turnstile *ts) 79336412d79SJohn Baldwin { 794961a7b24SJohn Baldwin #ifdef INVARIANTS 795961a7b24SJohn Baldwin struct turnstile_chain *tc; 79636412d79SJohn Baldwin 797961a7b24SJohn Baldwin MPASS(ts != NULL); 798961a7b24SJohn Baldwin tc = TC_LOOKUP(ts->ts_lockobj); 799961a7b24SJohn Baldwin mtx_assert(&tc->tc_lock, MA_OWNED); 80036412d79SJohn Baldwin #endif 801961a7b24SJohn Baldwin return (TAILQ_EMPTY(&ts->ts_blocked)); 802c53c013bSJohn Baldwin } 803