17c478bd9Sstevel@tonic-gate /* 27c478bd9Sstevel@tonic-gate * CDDL HEADER START 37c478bd9Sstevel@tonic-gate * 47c478bd9Sstevel@tonic-gate * The contents of this file are subject to the terms of the 50efe5e54Sdv142724 * Common Development and Distribution License (the "License"). 60efe5e54Sdv142724 * You may not use this file except in compliance with the License. 77c478bd9Sstevel@tonic-gate * 87c478bd9Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 97c478bd9Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 107c478bd9Sstevel@tonic-gate * See the License for the specific language governing permissions 117c478bd9Sstevel@tonic-gate * and limitations under the License. 127c478bd9Sstevel@tonic-gate * 137c478bd9Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 147c478bd9Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 157c478bd9Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 167c478bd9Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 177c478bd9Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 187c478bd9Sstevel@tonic-gate * 197c478bd9Sstevel@tonic-gate * CDDL HEADER END 207c478bd9Sstevel@tonic-gate */ 217c478bd9Sstevel@tonic-gate /* 22*e603b7d4Spm145316 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 237c478bd9Sstevel@tonic-gate * Use is subject to license terms. 247c478bd9Sstevel@tonic-gate */ 257c478bd9Sstevel@tonic-gate 267c478bd9Sstevel@tonic-gate #pragma ident "%Z%%M% %I% %E% SMI" 277c478bd9Sstevel@tonic-gate 287c478bd9Sstevel@tonic-gate /* 297c478bd9Sstevel@tonic-gate * Big Theory Statement for mutual exclusion locking primitives. 307c478bd9Sstevel@tonic-gate * 317c478bd9Sstevel@tonic-gate * A mutex serializes multiple threads so that only one thread 327c478bd9Sstevel@tonic-gate * (the "owner" of the mutex) is active at a time. See mutex(9F) 337c478bd9Sstevel@tonic-gate * for a full description of the interfaces and programming model. 347c478bd9Sstevel@tonic-gate * The rest of this comment describes the implementation. 357c478bd9Sstevel@tonic-gate * 367c478bd9Sstevel@tonic-gate * Mutexes come in two flavors: adaptive and spin. mutex_init(9F) 377c478bd9Sstevel@tonic-gate * determines the type based solely on the iblock cookie (PIL) argument. 387c478bd9Sstevel@tonic-gate * PIL > LOCK_LEVEL implies a spin lock; everything else is adaptive. 397c478bd9Sstevel@tonic-gate * 407c478bd9Sstevel@tonic-gate * Spin mutexes block interrupts and spin until the lock becomes available. 417c478bd9Sstevel@tonic-gate * A thread may not sleep, or call any function that might sleep, while 427c478bd9Sstevel@tonic-gate * holding a spin mutex. With few exceptions, spin mutexes should only 437c478bd9Sstevel@tonic-gate * be used to synchronize with interrupt handlers. 447c478bd9Sstevel@tonic-gate * 457c478bd9Sstevel@tonic-gate * Adaptive mutexes (the default type) spin if the owner is running on 467c478bd9Sstevel@tonic-gate * another CPU and block otherwise. This policy is based on the assumption 477c478bd9Sstevel@tonic-gate * that mutex hold times are typically short enough that the time spent 487c478bd9Sstevel@tonic-gate * spinning is less than the time it takes to block. If you need mutual 497c478bd9Sstevel@tonic-gate * exclusion semantics with long hold times, consider an rwlock(9F) as 507c478bd9Sstevel@tonic-gate * RW_WRITER. Better still, reconsider the algorithm: if it requires 517c478bd9Sstevel@tonic-gate * mutual exclusion for long periods of time, it's probably not scalable. 527c478bd9Sstevel@tonic-gate * 537c478bd9Sstevel@tonic-gate * Adaptive mutexes are overwhelmingly more common than spin mutexes, 547c478bd9Sstevel@tonic-gate * so mutex_enter() assumes that the lock is adaptive. We get away 557c478bd9Sstevel@tonic-gate * with this by structuring mutexes so that an attempt to acquire a 567c478bd9Sstevel@tonic-gate * spin mutex as adaptive always fails. When mutex_enter() fails 577c478bd9Sstevel@tonic-gate * it punts to mutex_vector_enter(), which does all the hard stuff. 587c478bd9Sstevel@tonic-gate * 597c478bd9Sstevel@tonic-gate * mutex_vector_enter() first checks the type. If it's spin mutex, 607c478bd9Sstevel@tonic-gate * we just call lock_set_spl() and return. If it's an adaptive mutex, 617c478bd9Sstevel@tonic-gate * we check to see what the owner is doing. If the owner is running, 627c478bd9Sstevel@tonic-gate * we spin until the lock becomes available; if not, we mark the lock 637c478bd9Sstevel@tonic-gate * as having waiters and block. 647c478bd9Sstevel@tonic-gate * 657c478bd9Sstevel@tonic-gate * Blocking on a mutex is surprisingly delicate dance because, for speed, 667c478bd9Sstevel@tonic-gate * mutex_exit() doesn't use an atomic instruction. Thus we have to work 677c478bd9Sstevel@tonic-gate * a little harder in the (rarely-executed) blocking path to make sure 687c478bd9Sstevel@tonic-gate * we don't block on a mutex that's just been released -- otherwise we 697c478bd9Sstevel@tonic-gate * might never be woken up. 707c478bd9Sstevel@tonic-gate * 717c478bd9Sstevel@tonic-gate * The logic for synchronizing mutex_vector_enter() with mutex_exit() 727c478bd9Sstevel@tonic-gate * in the face of preemption and relaxed memory ordering is as follows: 737c478bd9Sstevel@tonic-gate * 747c478bd9Sstevel@tonic-gate * (1) Preemption in the middle of mutex_exit() must cause mutex_exit() 757c478bd9Sstevel@tonic-gate * to restart. Each platform must enforce this by checking the 767c478bd9Sstevel@tonic-gate * interrupted PC in the interrupt handler (or on return from trap -- 777c478bd9Sstevel@tonic-gate * whichever is more convenient for the platform). If the PC 787c478bd9Sstevel@tonic-gate * lies within the critical region of mutex_exit(), the interrupt 797c478bd9Sstevel@tonic-gate * handler must reset the PC back to the beginning of mutex_exit(). 807c478bd9Sstevel@tonic-gate * The critical region consists of all instructions up to, but not 817c478bd9Sstevel@tonic-gate * including, the store that clears the lock (which, of course, 827c478bd9Sstevel@tonic-gate * must never be executed twice.) 837c478bd9Sstevel@tonic-gate * 847c478bd9Sstevel@tonic-gate * This ensures that the owner will always check for waiters after 857c478bd9Sstevel@tonic-gate * resuming from a previous preemption. 867c478bd9Sstevel@tonic-gate * 877c478bd9Sstevel@tonic-gate * (2) A thread resuming in mutex_exit() does (at least) the following: 887c478bd9Sstevel@tonic-gate * 897c478bd9Sstevel@tonic-gate * when resuming: set CPU_THREAD = owner 907c478bd9Sstevel@tonic-gate * membar #StoreLoad 917c478bd9Sstevel@tonic-gate * 927c478bd9Sstevel@tonic-gate * in mutex_exit: check waiters bit; do wakeup if set 937c478bd9Sstevel@tonic-gate * membar #LoadStore|#StoreStore 947c478bd9Sstevel@tonic-gate * clear owner 957c478bd9Sstevel@tonic-gate * (at this point, other threads may or may not grab 967c478bd9Sstevel@tonic-gate * the lock, and we may or may not reacquire it) 977c478bd9Sstevel@tonic-gate * 987c478bd9Sstevel@tonic-gate * when blocking: membar #StoreStore (due to disp_lock_enter()) 997c478bd9Sstevel@tonic-gate * set CPU_THREAD = (possibly) someone else 1007c478bd9Sstevel@tonic-gate * 1017c478bd9Sstevel@tonic-gate * (3) A thread blocking in mutex_vector_enter() does the following: 1027c478bd9Sstevel@tonic-gate * 1037c478bd9Sstevel@tonic-gate * set waiters bit 1047c478bd9Sstevel@tonic-gate * membar #StoreLoad (via membar_enter()) 1057c478bd9Sstevel@tonic-gate * check CPU_THREAD for each CPU; abort if owner running 1067c478bd9Sstevel@tonic-gate * membar #LoadLoad (via membar_consumer()) 1077c478bd9Sstevel@tonic-gate * check owner and waiters bit; abort if either changed 1087c478bd9Sstevel@tonic-gate * block 1097c478bd9Sstevel@tonic-gate * 1107c478bd9Sstevel@tonic-gate * Thus the global memory orderings for (2) and (3) are as follows: 1117c478bd9Sstevel@tonic-gate * 1127c478bd9Sstevel@tonic-gate * (2M) mutex_exit() memory order: 1137c478bd9Sstevel@tonic-gate * 1147c478bd9Sstevel@tonic-gate * STORE CPU_THREAD = owner 1157c478bd9Sstevel@tonic-gate * LOAD waiters bit 1167c478bd9Sstevel@tonic-gate * STORE owner = NULL 1177c478bd9Sstevel@tonic-gate * STORE CPU_THREAD = (possibly) someone else 1187c478bd9Sstevel@tonic-gate * 1197c478bd9Sstevel@tonic-gate * (3M) mutex_vector_enter() memory order: 1207c478bd9Sstevel@tonic-gate * 1217c478bd9Sstevel@tonic-gate * STORE waiters bit = 1 1227c478bd9Sstevel@tonic-gate * LOAD CPU_THREAD for each CPU 1237c478bd9Sstevel@tonic-gate * LOAD owner and waiters bit 1247c478bd9Sstevel@tonic-gate * 1257c478bd9Sstevel@tonic-gate * It has been verified by exhaustive simulation that all possible global 1267c478bd9Sstevel@tonic-gate * memory orderings of (2M) interleaved with (3M) result in correct 1277c478bd9Sstevel@tonic-gate * behavior. Moreover, these ordering constraints are minimal: changing 1287c478bd9Sstevel@tonic-gate * the ordering of anything in (2M) or (3M) breaks the algorithm, creating 1297c478bd9Sstevel@tonic-gate * windows for missed wakeups. Note: the possibility that other threads 1307c478bd9Sstevel@tonic-gate * may grab the lock after the owner drops it can be factored out of the 1317c478bd9Sstevel@tonic-gate * memory ordering analysis because mutex_vector_enter() won't block 1327c478bd9Sstevel@tonic-gate * if the lock isn't still owned by the same thread. 1337c478bd9Sstevel@tonic-gate * 1347c478bd9Sstevel@tonic-gate * The only requirements of code outside the mutex implementation are 1357c478bd9Sstevel@tonic-gate * (1) mutex_exit() preemption fixup in interrupt handlers or trap return, 1367c478bd9Sstevel@tonic-gate * and (2) a membar #StoreLoad after setting CPU_THREAD in resume(). 1377c478bd9Sstevel@tonic-gate * Note: idle threads cannot grab adaptive locks (since they cannot block), 1387c478bd9Sstevel@tonic-gate * so the membar may be safely omitted when resuming an idle thread. 1397c478bd9Sstevel@tonic-gate * 1407c478bd9Sstevel@tonic-gate * When a mutex has waiters, mutex_vector_exit() has several options: 1417c478bd9Sstevel@tonic-gate * 1427c478bd9Sstevel@tonic-gate * (1) Choose a waiter and make that thread the owner before waking it; 1437c478bd9Sstevel@tonic-gate * this is known as "direct handoff" of ownership. 1447c478bd9Sstevel@tonic-gate * 1457c478bd9Sstevel@tonic-gate * (2) Drop the lock and wake one waiter. 1467c478bd9Sstevel@tonic-gate * 1477c478bd9Sstevel@tonic-gate * (3) Drop the lock, clear the waiters bit, and wake all waiters. 1487c478bd9Sstevel@tonic-gate * 1497c478bd9Sstevel@tonic-gate * In many ways (1) is the cleanest solution, but if a lock is moderately 1507c478bd9Sstevel@tonic-gate * contended it defeats the adaptive spin logic. If we make some other 1517c478bd9Sstevel@tonic-gate * thread the owner, but he's not ONPROC yet, then all other threads on 1527c478bd9Sstevel@tonic-gate * other cpus that try to get the lock will conclude that the owner is 1537c478bd9Sstevel@tonic-gate * blocked, so they'll block too. And so on -- it escalates quickly, 1547c478bd9Sstevel@tonic-gate * with every thread taking the blocking path rather than the spin path. 1557c478bd9Sstevel@tonic-gate * Thus, direct handoff is *not* a good idea for adaptive mutexes. 1567c478bd9Sstevel@tonic-gate * 1577c478bd9Sstevel@tonic-gate * Option (2) is the next most natural-seeming option, but it has several 1587c478bd9Sstevel@tonic-gate * annoying properties. If there's more than one waiter, we must preserve 1597c478bd9Sstevel@tonic-gate * the waiters bit on an unheld lock. On cas-capable platforms, where 1607c478bd9Sstevel@tonic-gate * the waiters bit is part of the lock word, this means that both 0x0 1617c478bd9Sstevel@tonic-gate * and 0x1 represent unheld locks, so we have to cas against *both*. 1627c478bd9Sstevel@tonic-gate * Priority inheritance also gets more complicated, because a lock can 1637c478bd9Sstevel@tonic-gate * have waiters but no owner to whom priority can be willed. So while 1647c478bd9Sstevel@tonic-gate * it is possible to make option (2) work, it's surprisingly vile. 1657c478bd9Sstevel@tonic-gate * 1667c478bd9Sstevel@tonic-gate * Option (3), the least-intuitive at first glance, is what we actually do. 1677c478bd9Sstevel@tonic-gate * It has the advantage that because you always wake all waiters, you 1687c478bd9Sstevel@tonic-gate * never have to preserve the waiters bit. Waking all waiters seems like 1697c478bd9Sstevel@tonic-gate * begging for a thundering herd problem, but consider: under option (2), 1707c478bd9Sstevel@tonic-gate * every thread that grabs and drops the lock will wake one waiter -- so 1717c478bd9Sstevel@tonic-gate * if the lock is fairly active, all waiters will be awakened very quickly 1727c478bd9Sstevel@tonic-gate * anyway. Moreover, this is how adaptive locks are *supposed* to work. 1737c478bd9Sstevel@tonic-gate * The blocking case is rare; the more common case (by 3-4 orders of 1747c478bd9Sstevel@tonic-gate * magnitude) is that one or more threads spin waiting to get the lock. 1757c478bd9Sstevel@tonic-gate * Only direct handoff can prevent the thundering herd problem, but as 1767c478bd9Sstevel@tonic-gate * mentioned earlier, that would tend to defeat the adaptive spin logic. 1777c478bd9Sstevel@tonic-gate * In practice, option (3) works well because the blocking case is rare. 1787c478bd9Sstevel@tonic-gate */ 1797c478bd9Sstevel@tonic-gate 1807c478bd9Sstevel@tonic-gate /* 1817c478bd9Sstevel@tonic-gate * delayed lock retry with exponential delay for spin locks 1827c478bd9Sstevel@tonic-gate * 1837c478bd9Sstevel@tonic-gate * It is noted above that for both the spin locks and the adaptive locks, 1847c478bd9Sstevel@tonic-gate * spinning is the dominate mode of operation. So long as there is only 1857c478bd9Sstevel@tonic-gate * one thread waiting on a lock, the naive spin loop works very well in 1867c478bd9Sstevel@tonic-gate * cache based architectures. The lock data structure is pulled into the 1877c478bd9Sstevel@tonic-gate * cache of the processor with the waiting/spinning thread and no further 1887c478bd9Sstevel@tonic-gate * memory traffic is generated until the lock is released. Unfortunately, 1897c478bd9Sstevel@tonic-gate * once two or more threads are waiting on a lock, the naive spin has 1907c478bd9Sstevel@tonic-gate * the property of generating maximum memory traffic from each spinning 1917c478bd9Sstevel@tonic-gate * thread as the spinning threads contend for the lock data structure. 1927c478bd9Sstevel@tonic-gate * 1937c478bd9Sstevel@tonic-gate * By executing a delay loop before retrying a lock, a waiting thread 1947c478bd9Sstevel@tonic-gate * can reduce its memory traffic by a large factor, depending on the 1957c478bd9Sstevel@tonic-gate * size of the delay loop. A large delay loop greatly reduced the memory 1967c478bd9Sstevel@tonic-gate * traffic, but has the drawback of having a period of time when 1977c478bd9Sstevel@tonic-gate * no thread is attempting to gain the lock even though several threads 1987c478bd9Sstevel@tonic-gate * might be waiting. A small delay loop has the drawback of not 1997c478bd9Sstevel@tonic-gate * much reduction in memory traffic, but reduces the potential idle time. 2007c478bd9Sstevel@tonic-gate * The theory of the exponential delay code is to start with a short 2017c478bd9Sstevel@tonic-gate * delay loop and double the waiting time on each iteration, up to 2027c478bd9Sstevel@tonic-gate * a preselected maximum. The BACKOFF_BASE provides the equivalent 2037c478bd9Sstevel@tonic-gate * of 2 to 3 memory references delay for US-III+ and US-IV architectures. 2047c478bd9Sstevel@tonic-gate * The BACKOFF_CAP is the equivalent of 50 to 100 memory references of 2057c478bd9Sstevel@tonic-gate * time (less than 12 microseconds for a 1000 MHz system). 2067c478bd9Sstevel@tonic-gate * 2077c478bd9Sstevel@tonic-gate * To determine appropriate BACKOFF_BASE and BACKOFF_CAP values, 2087c478bd9Sstevel@tonic-gate * studies on US-III+ and US-IV systems using 1 to 66 threads were 2097c478bd9Sstevel@tonic-gate * done. A range of possible values were studied. 2107c478bd9Sstevel@tonic-gate * Performance differences below 10 threads were not large. For 2117c478bd9Sstevel@tonic-gate * systems with more threads, substantial increases in total lock 2127c478bd9Sstevel@tonic-gate * throughput was observed with the given values. For cases where 2137c478bd9Sstevel@tonic-gate * more than 20 threads were waiting on the same lock, lock throughput 2147c478bd9Sstevel@tonic-gate * increased by a factor of 5 or more using the backoff algorithm. 215*e603b7d4Spm145316 * 216*e603b7d4Spm145316 * Some platforms may provide their own platform specific delay code, 217*e603b7d4Spm145316 * using plat_lock_delay(backoff). If it is available, plat_lock_delay 218*e603b7d4Spm145316 * is executed instead of the default delay code. 2197c478bd9Sstevel@tonic-gate */ 2207c478bd9Sstevel@tonic-gate 221*e603b7d4Spm145316 #pragma weak plat_lock_delay 222*e603b7d4Spm145316 2237c478bd9Sstevel@tonic-gate #include <sys/param.h> 2247c478bd9Sstevel@tonic-gate #include <sys/time.h> 2257c478bd9Sstevel@tonic-gate #include <sys/cpuvar.h> 2267c478bd9Sstevel@tonic-gate #include <sys/thread.h> 2277c478bd9Sstevel@tonic-gate #include <sys/debug.h> 2287c478bd9Sstevel@tonic-gate #include <sys/cmn_err.h> 2297c478bd9Sstevel@tonic-gate #include <sys/sobject.h> 2307c478bd9Sstevel@tonic-gate #include <sys/turnstile.h> 2317c478bd9Sstevel@tonic-gate #include <sys/systm.h> 2327c478bd9Sstevel@tonic-gate #include <sys/mutex_impl.h> 2337c478bd9Sstevel@tonic-gate #include <sys/spl.h> 2347c478bd9Sstevel@tonic-gate #include <sys/lockstat.h> 2357c478bd9Sstevel@tonic-gate #include <sys/atomic.h> 2367c478bd9Sstevel@tonic-gate #include <sys/cpu.h> 2377c478bd9Sstevel@tonic-gate #include <sys/stack.h> 2387c478bd9Sstevel@tonic-gate 2397c478bd9Sstevel@tonic-gate #define BACKOFF_BASE 50 2407c478bd9Sstevel@tonic-gate #define BACKOFF_CAP 1600 2417c478bd9Sstevel@tonic-gate 2427c478bd9Sstevel@tonic-gate /* 2437c478bd9Sstevel@tonic-gate * The sobj_ops vector exports a set of functions needed when a thread 2447c478bd9Sstevel@tonic-gate * is asleep on a synchronization object of this type. 2457c478bd9Sstevel@tonic-gate */ 2467c478bd9Sstevel@tonic-gate static sobj_ops_t mutex_sobj_ops = { 2477c478bd9Sstevel@tonic-gate SOBJ_MUTEX, mutex_owner, turnstile_stay_asleep, turnstile_change_pri 2487c478bd9Sstevel@tonic-gate }; 2497c478bd9Sstevel@tonic-gate 2507c478bd9Sstevel@tonic-gate /* 2517c478bd9Sstevel@tonic-gate * If the system panics on a mutex, save the address of the offending 2527c478bd9Sstevel@tonic-gate * mutex in panic_mutex_addr, and save the contents in panic_mutex. 2537c478bd9Sstevel@tonic-gate */ 2547c478bd9Sstevel@tonic-gate static mutex_impl_t panic_mutex; 2557c478bd9Sstevel@tonic-gate static mutex_impl_t *panic_mutex_addr; 2567c478bd9Sstevel@tonic-gate 2577c478bd9Sstevel@tonic-gate static void 2587c478bd9Sstevel@tonic-gate mutex_panic(char *msg, mutex_impl_t *lp) 2597c478bd9Sstevel@tonic-gate { 2607c478bd9Sstevel@tonic-gate if (panicstr) 2617c478bd9Sstevel@tonic-gate return; 2627c478bd9Sstevel@tonic-gate 2637c478bd9Sstevel@tonic-gate if (casptr(&panic_mutex_addr, NULL, lp) == NULL) 2647c478bd9Sstevel@tonic-gate panic_mutex = *lp; 2657c478bd9Sstevel@tonic-gate 2667c478bd9Sstevel@tonic-gate panic("%s, lp=%p owner=%p thread=%p", 2677c478bd9Sstevel@tonic-gate msg, lp, MUTEX_OWNER(&panic_mutex), curthread); 2687c478bd9Sstevel@tonic-gate } 2697c478bd9Sstevel@tonic-gate 2707c478bd9Sstevel@tonic-gate /* 2717c478bd9Sstevel@tonic-gate * mutex_vector_enter() is called from the assembly mutex_enter() routine 2727c478bd9Sstevel@tonic-gate * if the lock is held or is not of type MUTEX_ADAPTIVE. 2737c478bd9Sstevel@tonic-gate */ 2747c478bd9Sstevel@tonic-gate void 2757c478bd9Sstevel@tonic-gate mutex_vector_enter(mutex_impl_t *lp) 2767c478bd9Sstevel@tonic-gate { 2777c478bd9Sstevel@tonic-gate kthread_id_t owner; 2787c478bd9Sstevel@tonic-gate hrtime_t sleep_time = 0; /* how long we slept */ 2797c478bd9Sstevel@tonic-gate uint_t spin_count = 0; /* how many times we spun */ 2807c478bd9Sstevel@tonic-gate cpu_t *cpup, *last_cpu; 2817c478bd9Sstevel@tonic-gate extern cpu_t *cpu_list; 2827c478bd9Sstevel@tonic-gate turnstile_t *ts; 2837c478bd9Sstevel@tonic-gate volatile mutex_impl_t *vlp = (volatile mutex_impl_t *)lp; 2847c478bd9Sstevel@tonic-gate int backoff; /* current backoff */ 2857c478bd9Sstevel@tonic-gate int backctr; /* ctr for backoff */ 2860efe5e54Sdv142724 int sleep_count = 0; 2877c478bd9Sstevel@tonic-gate 2887c478bd9Sstevel@tonic-gate ASSERT_STACK_ALIGNED(); 2897c478bd9Sstevel@tonic-gate 2907c478bd9Sstevel@tonic-gate if (MUTEX_TYPE_SPIN(lp)) { 2917c478bd9Sstevel@tonic-gate lock_set_spl(&lp->m_spin.m_spinlock, lp->m_spin.m_minspl, 2927c478bd9Sstevel@tonic-gate &lp->m_spin.m_oldspl); 2937c478bd9Sstevel@tonic-gate return; 2947c478bd9Sstevel@tonic-gate } 2957c478bd9Sstevel@tonic-gate 2967c478bd9Sstevel@tonic-gate if (!MUTEX_TYPE_ADAPTIVE(lp)) { 2977c478bd9Sstevel@tonic-gate mutex_panic("mutex_enter: bad mutex", lp); 2987c478bd9Sstevel@tonic-gate return; 2997c478bd9Sstevel@tonic-gate } 3007c478bd9Sstevel@tonic-gate 3017c478bd9Sstevel@tonic-gate /* 3027c478bd9Sstevel@tonic-gate * Adaptive mutexes must not be acquired from above LOCK_LEVEL. 3037c478bd9Sstevel@tonic-gate * We can migrate after loading CPU but before checking CPU_ON_INTR, 3047c478bd9Sstevel@tonic-gate * so we must verify by disabling preemption and loading CPU again. 3057c478bd9Sstevel@tonic-gate */ 3067c478bd9Sstevel@tonic-gate cpup = CPU; 3077c478bd9Sstevel@tonic-gate if (CPU_ON_INTR(cpup) && !panicstr) { 3087c478bd9Sstevel@tonic-gate kpreempt_disable(); 3097c478bd9Sstevel@tonic-gate if (CPU_ON_INTR(CPU)) 3107c478bd9Sstevel@tonic-gate mutex_panic("mutex_enter: adaptive at high PIL", lp); 3117c478bd9Sstevel@tonic-gate kpreempt_enable(); 3127c478bd9Sstevel@tonic-gate } 3137c478bd9Sstevel@tonic-gate 3147c478bd9Sstevel@tonic-gate CPU_STATS_ADDQ(cpup, sys, mutex_adenters, 1); 3157c478bd9Sstevel@tonic-gate 316*e603b7d4Spm145316 if (&plat_lock_delay) { 317*e603b7d4Spm145316 backoff = 0; 318*e603b7d4Spm145316 } else { 3197c478bd9Sstevel@tonic-gate backoff = BACKOFF_BASE; 320*e603b7d4Spm145316 } 3217c478bd9Sstevel@tonic-gate 3227c478bd9Sstevel@tonic-gate for (;;) { 3237c478bd9Sstevel@tonic-gate spin: 3247c478bd9Sstevel@tonic-gate spin_count++; 3257c478bd9Sstevel@tonic-gate /* 3267c478bd9Sstevel@tonic-gate * Add an exponential backoff delay before trying again 3277c478bd9Sstevel@tonic-gate * to touch the mutex data structure. 3287c478bd9Sstevel@tonic-gate * the spin_count test and call to nulldev are to prevent 3297c478bd9Sstevel@tonic-gate * the compiler optimizer from eliminating the delay loop. 3307c478bd9Sstevel@tonic-gate */ 331*e603b7d4Spm145316 if (&plat_lock_delay) { 332*e603b7d4Spm145316 plat_lock_delay(&backoff); 333*e603b7d4Spm145316 } else { 3347c478bd9Sstevel@tonic-gate for (backctr = backoff; backctr; backctr--) { 3357c478bd9Sstevel@tonic-gate if (!spin_count) (void) nulldev(); 3367c478bd9Sstevel@tonic-gate }; /* delay */ 3377c478bd9Sstevel@tonic-gate backoff = backoff << 1; /* double it */ 3387c478bd9Sstevel@tonic-gate if (backoff > BACKOFF_CAP) { 3397c478bd9Sstevel@tonic-gate backoff = BACKOFF_CAP; 3407c478bd9Sstevel@tonic-gate } 3417c478bd9Sstevel@tonic-gate 3427c478bd9Sstevel@tonic-gate SMT_PAUSE(); 343*e603b7d4Spm145316 } 3447c478bd9Sstevel@tonic-gate 3457c478bd9Sstevel@tonic-gate if (panicstr) 3467c478bd9Sstevel@tonic-gate return; 3477c478bd9Sstevel@tonic-gate 3487c478bd9Sstevel@tonic-gate if ((owner = MUTEX_OWNER(vlp)) == NULL) { 3497c478bd9Sstevel@tonic-gate if (mutex_adaptive_tryenter(lp)) 3507c478bd9Sstevel@tonic-gate break; 3517c478bd9Sstevel@tonic-gate continue; 3527c478bd9Sstevel@tonic-gate } 3537c478bd9Sstevel@tonic-gate 3547c478bd9Sstevel@tonic-gate if (owner == curthread) 3557c478bd9Sstevel@tonic-gate mutex_panic("recursive mutex_enter", lp); 3567c478bd9Sstevel@tonic-gate 3577c478bd9Sstevel@tonic-gate /* 3587c478bd9Sstevel@tonic-gate * If lock is held but owner is not yet set, spin. 3597c478bd9Sstevel@tonic-gate * (Only relevant for platforms that don't have cas.) 3607c478bd9Sstevel@tonic-gate */ 3617c478bd9Sstevel@tonic-gate if (owner == MUTEX_NO_OWNER) 3627c478bd9Sstevel@tonic-gate continue; 3637c478bd9Sstevel@tonic-gate 3647c478bd9Sstevel@tonic-gate /* 3657c478bd9Sstevel@tonic-gate * When searching the other CPUs, start with the one where 3667c478bd9Sstevel@tonic-gate * we last saw the owner thread. If owner is running, spin. 3677c478bd9Sstevel@tonic-gate * 3687c478bd9Sstevel@tonic-gate * We must disable preemption at this point to guarantee 3697c478bd9Sstevel@tonic-gate * that the list doesn't change while we traverse it 3707c478bd9Sstevel@tonic-gate * without the cpu_lock mutex. While preemption is 3717c478bd9Sstevel@tonic-gate * disabled, we must revalidate our cached cpu pointer. 3727c478bd9Sstevel@tonic-gate */ 3737c478bd9Sstevel@tonic-gate kpreempt_disable(); 3747c478bd9Sstevel@tonic-gate if (cpup->cpu_next == NULL) 3757c478bd9Sstevel@tonic-gate cpup = cpu_list; 3767c478bd9Sstevel@tonic-gate last_cpu = cpup; /* mark end of search */ 3777c478bd9Sstevel@tonic-gate do { 3787c478bd9Sstevel@tonic-gate if (cpup->cpu_thread == owner) { 3797c478bd9Sstevel@tonic-gate kpreempt_enable(); 3807c478bd9Sstevel@tonic-gate goto spin; 3817c478bd9Sstevel@tonic-gate } 3827c478bd9Sstevel@tonic-gate } while ((cpup = cpup->cpu_next) != last_cpu); 3837c478bd9Sstevel@tonic-gate kpreempt_enable(); 3847c478bd9Sstevel@tonic-gate 3857c478bd9Sstevel@tonic-gate /* 3867c478bd9Sstevel@tonic-gate * The owner appears not to be running, so block. 3877c478bd9Sstevel@tonic-gate * See the Big Theory Statement for memory ordering issues. 3887c478bd9Sstevel@tonic-gate */ 3897c478bd9Sstevel@tonic-gate ts = turnstile_lookup(lp); 3907c478bd9Sstevel@tonic-gate MUTEX_SET_WAITERS(lp); 3917c478bd9Sstevel@tonic-gate membar_enter(); 3927c478bd9Sstevel@tonic-gate 3937c478bd9Sstevel@tonic-gate /* 3947c478bd9Sstevel@tonic-gate * Recheck whether owner is running after waiters bit hits 3957c478bd9Sstevel@tonic-gate * global visibility (above). If owner is running, spin. 3967c478bd9Sstevel@tonic-gate * 3977c478bd9Sstevel@tonic-gate * Since we are at ipl DISP_LEVEL, kernel preemption is 3987c478bd9Sstevel@tonic-gate * disabled, however we still need to revalidate our cached 3997c478bd9Sstevel@tonic-gate * cpu pointer to make sure the cpu hasn't been deleted. 4007c478bd9Sstevel@tonic-gate */ 4017c478bd9Sstevel@tonic-gate if (cpup->cpu_next == NULL) 4027c478bd9Sstevel@tonic-gate last_cpu = cpup = cpu_list; 4037c478bd9Sstevel@tonic-gate do { 4047c478bd9Sstevel@tonic-gate if (cpup->cpu_thread == owner) { 4057c478bd9Sstevel@tonic-gate turnstile_exit(lp); 4067c478bd9Sstevel@tonic-gate goto spin; 4077c478bd9Sstevel@tonic-gate } 4087c478bd9Sstevel@tonic-gate } while ((cpup = cpup->cpu_next) != last_cpu); 4097c478bd9Sstevel@tonic-gate membar_consumer(); 4107c478bd9Sstevel@tonic-gate 4117c478bd9Sstevel@tonic-gate /* 4127c478bd9Sstevel@tonic-gate * If owner and waiters bit are unchanged, block. 4137c478bd9Sstevel@tonic-gate */ 4147c478bd9Sstevel@tonic-gate if (MUTEX_OWNER(vlp) == owner && MUTEX_HAS_WAITERS(vlp)) { 4157c478bd9Sstevel@tonic-gate sleep_time -= gethrtime(); 4167c478bd9Sstevel@tonic-gate (void) turnstile_block(ts, TS_WRITER_Q, lp, 4177c478bd9Sstevel@tonic-gate &mutex_sobj_ops, NULL, NULL); 4187c478bd9Sstevel@tonic-gate sleep_time += gethrtime(); 4190efe5e54Sdv142724 sleep_count++; 4207c478bd9Sstevel@tonic-gate } else { 4217c478bd9Sstevel@tonic-gate turnstile_exit(lp); 4227c478bd9Sstevel@tonic-gate } 4237c478bd9Sstevel@tonic-gate } 4247c478bd9Sstevel@tonic-gate 4257c478bd9Sstevel@tonic-gate ASSERT(MUTEX_OWNER(lp) == curthread); 4267c478bd9Sstevel@tonic-gate 4270efe5e54Sdv142724 if (sleep_time != 0) { 4280efe5e54Sdv142724 /* 4290efe5e54Sdv142724 * Note, sleep time is the sum of all the sleeping we 4300efe5e54Sdv142724 * did. 4310efe5e54Sdv142724 */ 4327c478bd9Sstevel@tonic-gate LOCKSTAT_RECORD(LS_MUTEX_ENTER_BLOCK, lp, sleep_time); 4337c478bd9Sstevel@tonic-gate } 4347c478bd9Sstevel@tonic-gate 4350efe5e54Sdv142724 /* 4360efe5e54Sdv142724 * We do not count a sleep as a spin. 4370efe5e54Sdv142724 */ 4380efe5e54Sdv142724 if (spin_count > sleep_count) 4390efe5e54Sdv142724 LOCKSTAT_RECORD(LS_MUTEX_ENTER_SPIN, lp, 4400efe5e54Sdv142724 spin_count - sleep_count); 4410efe5e54Sdv142724 4427c478bd9Sstevel@tonic-gate LOCKSTAT_RECORD0(LS_MUTEX_ENTER_ACQUIRE, lp); 4437c478bd9Sstevel@tonic-gate } 4447c478bd9Sstevel@tonic-gate 4457c478bd9Sstevel@tonic-gate /* 4467c478bd9Sstevel@tonic-gate * mutex_vector_tryenter() is called from the assembly mutex_tryenter() 4477c478bd9Sstevel@tonic-gate * routine if the lock is held or is not of type MUTEX_ADAPTIVE. 4487c478bd9Sstevel@tonic-gate */ 4497c478bd9Sstevel@tonic-gate int 4507c478bd9Sstevel@tonic-gate mutex_vector_tryenter(mutex_impl_t *lp) 4517c478bd9Sstevel@tonic-gate { 4527c478bd9Sstevel@tonic-gate int s; 4537c478bd9Sstevel@tonic-gate 4547c478bd9Sstevel@tonic-gate if (MUTEX_TYPE_ADAPTIVE(lp)) 4557c478bd9Sstevel@tonic-gate return (0); /* we already tried in assembly */ 4567c478bd9Sstevel@tonic-gate 4577c478bd9Sstevel@tonic-gate if (!MUTEX_TYPE_SPIN(lp)) { 4587c478bd9Sstevel@tonic-gate mutex_panic("mutex_tryenter: bad mutex", lp); 4597c478bd9Sstevel@tonic-gate return (0); 4607c478bd9Sstevel@tonic-gate } 4617c478bd9Sstevel@tonic-gate 4627c478bd9Sstevel@tonic-gate s = splr(lp->m_spin.m_minspl); 4637c478bd9Sstevel@tonic-gate if (lock_try(&lp->m_spin.m_spinlock)) { 4647c478bd9Sstevel@tonic-gate lp->m_spin.m_oldspl = (ushort_t)s; 4657c478bd9Sstevel@tonic-gate return (1); 4667c478bd9Sstevel@tonic-gate } 4677c478bd9Sstevel@tonic-gate splx(s); 4687c478bd9Sstevel@tonic-gate return (0); 4697c478bd9Sstevel@tonic-gate } 4707c478bd9Sstevel@tonic-gate 4717c478bd9Sstevel@tonic-gate /* 4727c478bd9Sstevel@tonic-gate * mutex_vector_exit() is called from mutex_exit() if the lock is not 4737c478bd9Sstevel@tonic-gate * adaptive, has waiters, or is not owned by the current thread (panic). 4747c478bd9Sstevel@tonic-gate */ 4757c478bd9Sstevel@tonic-gate void 4767c478bd9Sstevel@tonic-gate mutex_vector_exit(mutex_impl_t *lp) 4777c478bd9Sstevel@tonic-gate { 4787c478bd9Sstevel@tonic-gate turnstile_t *ts; 4797c478bd9Sstevel@tonic-gate 4807c478bd9Sstevel@tonic-gate if (MUTEX_TYPE_SPIN(lp)) { 4817c478bd9Sstevel@tonic-gate lock_clear_splx(&lp->m_spin.m_spinlock, lp->m_spin.m_oldspl); 4827c478bd9Sstevel@tonic-gate return; 4837c478bd9Sstevel@tonic-gate } 4847c478bd9Sstevel@tonic-gate 4857c478bd9Sstevel@tonic-gate if (MUTEX_OWNER(lp) != curthread) { 4867c478bd9Sstevel@tonic-gate mutex_panic("mutex_exit: not owner", lp); 4877c478bd9Sstevel@tonic-gate return; 4887c478bd9Sstevel@tonic-gate } 4897c478bd9Sstevel@tonic-gate 4907c478bd9Sstevel@tonic-gate ts = turnstile_lookup(lp); 4917c478bd9Sstevel@tonic-gate MUTEX_CLEAR_LOCK_AND_WAITERS(lp); 4927c478bd9Sstevel@tonic-gate if (ts == NULL) 4937c478bd9Sstevel@tonic-gate turnstile_exit(lp); 4947c478bd9Sstevel@tonic-gate else 4957c478bd9Sstevel@tonic-gate turnstile_wakeup(ts, TS_WRITER_Q, ts->ts_waiters, NULL); 4967c478bd9Sstevel@tonic-gate LOCKSTAT_RECORD0(LS_MUTEX_EXIT_RELEASE, lp); 4977c478bd9Sstevel@tonic-gate } 4987c478bd9Sstevel@tonic-gate 4997c478bd9Sstevel@tonic-gate int 5007c478bd9Sstevel@tonic-gate mutex_owned(kmutex_t *mp) 5017c478bd9Sstevel@tonic-gate { 5027c478bd9Sstevel@tonic-gate mutex_impl_t *lp = (mutex_impl_t *)mp; 5037c478bd9Sstevel@tonic-gate 5047c478bd9Sstevel@tonic-gate if (panicstr) 5057c478bd9Sstevel@tonic-gate return (1); 5067c478bd9Sstevel@tonic-gate 5077c478bd9Sstevel@tonic-gate if (MUTEX_TYPE_ADAPTIVE(lp)) 5087c478bd9Sstevel@tonic-gate return (MUTEX_OWNER(lp) == curthread); 5097c478bd9Sstevel@tonic-gate return (LOCK_HELD(&lp->m_spin.m_spinlock)); 5107c478bd9Sstevel@tonic-gate } 5117c478bd9Sstevel@tonic-gate 5127c478bd9Sstevel@tonic-gate kthread_t * 5137c478bd9Sstevel@tonic-gate mutex_owner(kmutex_t *mp) 5147c478bd9Sstevel@tonic-gate { 5157c478bd9Sstevel@tonic-gate mutex_impl_t *lp = (mutex_impl_t *)mp; 5167c478bd9Sstevel@tonic-gate kthread_id_t t; 5177c478bd9Sstevel@tonic-gate 5187c478bd9Sstevel@tonic-gate if (MUTEX_TYPE_ADAPTIVE(lp) && (t = MUTEX_OWNER(lp)) != MUTEX_NO_OWNER) 5197c478bd9Sstevel@tonic-gate return (t); 5207c478bd9Sstevel@tonic-gate return (NULL); 5217c478bd9Sstevel@tonic-gate } 5227c478bd9Sstevel@tonic-gate 5237c478bd9Sstevel@tonic-gate /* 5247c478bd9Sstevel@tonic-gate * The iblock cookie 'ibc' is the spl level associated with the lock; 5257c478bd9Sstevel@tonic-gate * this alone determines whether the lock will be ADAPTIVE or SPIN. 5267c478bd9Sstevel@tonic-gate * 5277c478bd9Sstevel@tonic-gate * Adaptive mutexes created in zeroed memory do not need to call 5287c478bd9Sstevel@tonic-gate * mutex_init() as their allocation in this fashion guarantees 5297c478bd9Sstevel@tonic-gate * their initialization. 5307c478bd9Sstevel@tonic-gate * eg adaptive mutexes created as static within the BSS or allocated 5317c478bd9Sstevel@tonic-gate * by kmem_zalloc(). 5327c478bd9Sstevel@tonic-gate */ 5337c478bd9Sstevel@tonic-gate /* ARGSUSED */ 5347c478bd9Sstevel@tonic-gate void 5357c478bd9Sstevel@tonic-gate mutex_init(kmutex_t *mp, char *name, kmutex_type_t type, void *ibc) 5367c478bd9Sstevel@tonic-gate { 5377c478bd9Sstevel@tonic-gate mutex_impl_t *lp = (mutex_impl_t *)mp; 5387c478bd9Sstevel@tonic-gate 5397c478bd9Sstevel@tonic-gate ASSERT(ibc < (void *)KERNELBASE); /* see 1215173 */ 5407c478bd9Sstevel@tonic-gate 5417c478bd9Sstevel@tonic-gate if ((intptr_t)ibc > ipltospl(LOCK_LEVEL) && ibc < (void *)KERNELBASE) { 5427c478bd9Sstevel@tonic-gate ASSERT(type != MUTEX_ADAPTIVE && type != MUTEX_DEFAULT); 5437c478bd9Sstevel@tonic-gate MUTEX_SET_TYPE(lp, MUTEX_SPIN); 5447c478bd9Sstevel@tonic-gate LOCK_INIT_CLEAR(&lp->m_spin.m_spinlock); 5457c478bd9Sstevel@tonic-gate LOCK_INIT_HELD(&lp->m_spin.m_dummylock); 5467c478bd9Sstevel@tonic-gate lp->m_spin.m_minspl = (int)(intptr_t)ibc; 5477c478bd9Sstevel@tonic-gate } else { 5487c478bd9Sstevel@tonic-gate ASSERT(type != MUTEX_SPIN); 5497c478bd9Sstevel@tonic-gate MUTEX_SET_TYPE(lp, MUTEX_ADAPTIVE); 5507c478bd9Sstevel@tonic-gate MUTEX_CLEAR_LOCK_AND_WAITERS(lp); 5517c478bd9Sstevel@tonic-gate } 5527c478bd9Sstevel@tonic-gate } 5537c478bd9Sstevel@tonic-gate 5547c478bd9Sstevel@tonic-gate void 5557c478bd9Sstevel@tonic-gate mutex_destroy(kmutex_t *mp) 5567c478bd9Sstevel@tonic-gate { 5577c478bd9Sstevel@tonic-gate mutex_impl_t *lp = (mutex_impl_t *)mp; 5587c478bd9Sstevel@tonic-gate 5597c478bd9Sstevel@tonic-gate if (lp->m_owner == 0 && !MUTEX_HAS_WAITERS(lp)) { 5607c478bd9Sstevel@tonic-gate MUTEX_DESTROY(lp); 5617c478bd9Sstevel@tonic-gate } else if (MUTEX_TYPE_SPIN(lp)) { 5627c478bd9Sstevel@tonic-gate LOCKSTAT_RECORD0(LS_MUTEX_DESTROY_RELEASE, lp); 5637c478bd9Sstevel@tonic-gate MUTEX_DESTROY(lp); 5647c478bd9Sstevel@tonic-gate } else if (MUTEX_TYPE_ADAPTIVE(lp)) { 5657c478bd9Sstevel@tonic-gate LOCKSTAT_RECORD0(LS_MUTEX_DESTROY_RELEASE, lp); 5667c478bd9Sstevel@tonic-gate if (MUTEX_OWNER(lp) != curthread) 5677c478bd9Sstevel@tonic-gate mutex_panic("mutex_destroy: not owner", lp); 5687c478bd9Sstevel@tonic-gate if (MUTEX_HAS_WAITERS(lp)) { 5697c478bd9Sstevel@tonic-gate turnstile_t *ts = turnstile_lookup(lp); 5707c478bd9Sstevel@tonic-gate turnstile_exit(lp); 5717c478bd9Sstevel@tonic-gate if (ts != NULL) 5727c478bd9Sstevel@tonic-gate mutex_panic("mutex_destroy: has waiters", lp); 5737c478bd9Sstevel@tonic-gate } 5747c478bd9Sstevel@tonic-gate MUTEX_DESTROY(lp); 5757c478bd9Sstevel@tonic-gate } else { 5767c478bd9Sstevel@tonic-gate mutex_panic("mutex_destroy: bad mutex", lp); 5777c478bd9Sstevel@tonic-gate } 5787c478bd9Sstevel@tonic-gate } 5797c478bd9Sstevel@tonic-gate 5807c478bd9Sstevel@tonic-gate /* 5817c478bd9Sstevel@tonic-gate * Simple C support for the cases where spin locks miss on the first try. 5827c478bd9Sstevel@tonic-gate */ 5837c478bd9Sstevel@tonic-gate void 5847c478bd9Sstevel@tonic-gate lock_set_spin(lock_t *lp) 5857c478bd9Sstevel@tonic-gate { 5867c478bd9Sstevel@tonic-gate int spin_count = 1; 5877c478bd9Sstevel@tonic-gate int backoff; /* current backoff */ 5887c478bd9Sstevel@tonic-gate int backctr; /* ctr for backoff */ 5897c478bd9Sstevel@tonic-gate 5907c478bd9Sstevel@tonic-gate if (panicstr) 5917c478bd9Sstevel@tonic-gate return; 5927c478bd9Sstevel@tonic-gate 5937c478bd9Sstevel@tonic-gate if (ncpus == 1) 5947c478bd9Sstevel@tonic-gate panic("lock_set: %p lock held and only one CPU", lp); 5957c478bd9Sstevel@tonic-gate 596*e603b7d4Spm145316 if (&plat_lock_delay) { 597*e603b7d4Spm145316 backoff = 0; 598*e603b7d4Spm145316 } else { 5997c478bd9Sstevel@tonic-gate backoff = BACKOFF_BASE; 600*e603b7d4Spm145316 } 601*e603b7d4Spm145316 6027c478bd9Sstevel@tonic-gate while (LOCK_HELD(lp) || !lock_spin_try(lp)) { 6037c478bd9Sstevel@tonic-gate if (panicstr) 6047c478bd9Sstevel@tonic-gate return; 6057c478bd9Sstevel@tonic-gate spin_count++; 6067c478bd9Sstevel@tonic-gate /* 6077c478bd9Sstevel@tonic-gate * Add an exponential backoff delay before trying again 6087c478bd9Sstevel@tonic-gate * to touch the mutex data structure. 6097c478bd9Sstevel@tonic-gate * the spin_count test and call to nulldev are to prevent 6107c478bd9Sstevel@tonic-gate * the compiler optimizer from eliminating the delay loop. 6117c478bd9Sstevel@tonic-gate */ 612*e603b7d4Spm145316 if (&plat_lock_delay) { 613*e603b7d4Spm145316 plat_lock_delay(&backoff); 614*e603b7d4Spm145316 } else { 615*e603b7d4Spm145316 /* delay */ 616*e603b7d4Spm145316 for (backctr = backoff; backctr; backctr--) { 6177c478bd9Sstevel@tonic-gate if (!spin_count) (void) nulldev(); 6187c478bd9Sstevel@tonic-gate } 6197c478bd9Sstevel@tonic-gate 6207c478bd9Sstevel@tonic-gate backoff = backoff << 1; /* double it */ 6217c478bd9Sstevel@tonic-gate if (backoff > BACKOFF_CAP) { 6227c478bd9Sstevel@tonic-gate backoff = BACKOFF_CAP; 6237c478bd9Sstevel@tonic-gate } 6247c478bd9Sstevel@tonic-gate SMT_PAUSE(); 6257c478bd9Sstevel@tonic-gate } 626*e603b7d4Spm145316 } 6277c478bd9Sstevel@tonic-gate 6287c478bd9Sstevel@tonic-gate if (spin_count) { 6297c478bd9Sstevel@tonic-gate LOCKSTAT_RECORD(LS_LOCK_SET_SPIN, lp, spin_count); 6307c478bd9Sstevel@tonic-gate } 6317c478bd9Sstevel@tonic-gate 6327c478bd9Sstevel@tonic-gate LOCKSTAT_RECORD0(LS_LOCK_SET_ACQUIRE, lp); 6337c478bd9Sstevel@tonic-gate } 6347c478bd9Sstevel@tonic-gate 6357c478bd9Sstevel@tonic-gate void 6367c478bd9Sstevel@tonic-gate lock_set_spl_spin(lock_t *lp, int new_pil, ushort_t *old_pil_addr, int old_pil) 6377c478bd9Sstevel@tonic-gate { 6387c478bd9Sstevel@tonic-gate int spin_count = 1; 6397c478bd9Sstevel@tonic-gate int backoff; /* current backoff */ 6407c478bd9Sstevel@tonic-gate int backctr; /* ctr for backoff */ 6417c478bd9Sstevel@tonic-gate 6427c478bd9Sstevel@tonic-gate if (panicstr) 6437c478bd9Sstevel@tonic-gate return; 6447c478bd9Sstevel@tonic-gate 6457c478bd9Sstevel@tonic-gate if (ncpus == 1) 6467c478bd9Sstevel@tonic-gate panic("lock_set_spl: %p lock held and only one CPU", lp); 6477c478bd9Sstevel@tonic-gate 6487c478bd9Sstevel@tonic-gate ASSERT(new_pil > LOCK_LEVEL); 6497c478bd9Sstevel@tonic-gate 650*e603b7d4Spm145316 if (&plat_lock_delay) { 651*e603b7d4Spm145316 backoff = 0; 652*e603b7d4Spm145316 } else { 6537c478bd9Sstevel@tonic-gate backoff = BACKOFF_BASE; 654*e603b7d4Spm145316 } 6557c478bd9Sstevel@tonic-gate do { 6567c478bd9Sstevel@tonic-gate splx(old_pil); 6577c478bd9Sstevel@tonic-gate while (LOCK_HELD(lp)) { 6587c478bd9Sstevel@tonic-gate if (panicstr) { 6597c478bd9Sstevel@tonic-gate *old_pil_addr = (ushort_t)splr(new_pil); 6607c478bd9Sstevel@tonic-gate return; 6617c478bd9Sstevel@tonic-gate } 6627c478bd9Sstevel@tonic-gate spin_count++; 6637c478bd9Sstevel@tonic-gate /* 6647c478bd9Sstevel@tonic-gate * Add an exponential backoff delay before trying again 6657c478bd9Sstevel@tonic-gate * to touch the mutex data structure. 6667c478bd9Sstevel@tonic-gate * spin_count test and call to nulldev are to prevent 6677c478bd9Sstevel@tonic-gate * compiler optimizer from eliminating the delay loop. 6687c478bd9Sstevel@tonic-gate */ 669*e603b7d4Spm145316 if (&plat_lock_delay) { 670*e603b7d4Spm145316 plat_lock_delay(&backoff); 671*e603b7d4Spm145316 } else { 6727c478bd9Sstevel@tonic-gate for (backctr = backoff; backctr; backctr--) { 6737c478bd9Sstevel@tonic-gate if (!spin_count) (void) nulldev(); 6747c478bd9Sstevel@tonic-gate } 6757c478bd9Sstevel@tonic-gate backoff = backoff << 1; /* double it */ 6767c478bd9Sstevel@tonic-gate if (backoff > BACKOFF_CAP) { 6777c478bd9Sstevel@tonic-gate backoff = BACKOFF_CAP; 6787c478bd9Sstevel@tonic-gate } 6797c478bd9Sstevel@tonic-gate 6807c478bd9Sstevel@tonic-gate SMT_PAUSE(); 6817c478bd9Sstevel@tonic-gate } 682*e603b7d4Spm145316 } 6837c478bd9Sstevel@tonic-gate old_pil = splr(new_pil); 6847c478bd9Sstevel@tonic-gate } while (!lock_spin_try(lp)); 6857c478bd9Sstevel@tonic-gate 6867c478bd9Sstevel@tonic-gate *old_pil_addr = (ushort_t)old_pil; 6877c478bd9Sstevel@tonic-gate 6887c478bd9Sstevel@tonic-gate if (spin_count) { 6897c478bd9Sstevel@tonic-gate LOCKSTAT_RECORD(LS_LOCK_SET_SPL_SPIN, lp, spin_count); 6907c478bd9Sstevel@tonic-gate } 6917c478bd9Sstevel@tonic-gate 6927c478bd9Sstevel@tonic-gate LOCKSTAT_RECORD(LS_LOCK_SET_SPL_ACQUIRE, lp, spin_count); 6937c478bd9Sstevel@tonic-gate } 694