19454b2d8SWarner Losh /*- 2047dd67eSAttilio Rao * Copyright (c) 2008 Attilio Rao <attilio@FreeBSD.org> 3047dd67eSAttilio Rao * All rights reserved. 453bf4bb2SPeter Wemm * 553bf4bb2SPeter Wemm * Redistribution and use in source and binary forms, with or without 653bf4bb2SPeter Wemm * modification, are permitted provided that the following conditions 753bf4bb2SPeter Wemm * are met: 853bf4bb2SPeter Wemm * 1. Redistributions of source code must retain the above copyright 9047dd67eSAttilio Rao * notice(s), this list of conditions and the following disclaimer as 10047dd67eSAttilio Rao * the first lines of this file unmodified other than the possible 11047dd67eSAttilio Rao * addition of one or more copyright notices. 1253bf4bb2SPeter Wemm * 2. Redistributions in binary form must reproduce the above copyright 13047dd67eSAttilio Rao * notice(s), this list of conditions and the following disclaimer in the 1453bf4bb2SPeter Wemm * documentation and/or other materials provided with the distribution. 1553bf4bb2SPeter Wemm * 16047dd67eSAttilio Rao * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY 17047dd67eSAttilio Rao * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 18047dd67eSAttilio Rao * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 19047dd67eSAttilio Rao * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY 20047dd67eSAttilio Rao * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 21047dd67eSAttilio Rao * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 22047dd67eSAttilio Rao * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 23047dd67eSAttilio Rao * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 2453bf4bb2SPeter Wemm * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25047dd67eSAttilio Rao * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 26047dd67eSAttilio Rao * DAMAGE. 2753bf4bb2SPeter Wemm */ 2853bf4bb2SPeter Wemm 29651175c9SAttilio Rao #include "opt_adaptive_lockmgrs.h" 30047dd67eSAttilio Rao #include "opt_ddb.h" 31f5f9340bSFabien Thomas #include "opt_hwpmc_hooks.h" 32a5aedd68SStacey Son #include "opt_kdtrace.h" 33047dd67eSAttilio Rao 34677b542eSDavid E. O'Brien #include <sys/cdefs.h> 35677b542eSDavid E. O'Brien __FBSDID("$FreeBSD$"); 36677b542eSDavid E. O'Brien 3753bf4bb2SPeter Wemm #include <sys/param.h> 38cd2fe4e6SAttilio Rao #include <sys/kdb.h> 3961d80e90SJohn Baldwin #include <sys/ktr.h> 4053bf4bb2SPeter Wemm #include <sys/lock.h> 41047dd67eSAttilio Rao #include <sys/lock_profile.h> 428302d183SBruce Evans #include <sys/lockmgr.h> 43d8881ca3SJohn Baldwin #include <sys/mutex.h> 448302d183SBruce Evans #include <sys/proc.h> 45047dd67eSAttilio Rao #include <sys/sleepqueue.h> 46e8ddb61dSJeff Roberson #ifdef DEBUG_LOCKS 47e8ddb61dSJeff Roberson #include <sys/stack.h> 48e8ddb61dSJeff Roberson #endif 49651175c9SAttilio Rao #include <sys/sysctl.h> 50047dd67eSAttilio Rao #include <sys/systm.h> 5153bf4bb2SPeter Wemm 52047dd67eSAttilio Rao #include <machine/cpu.h> 536efc8a16SAttilio Rao 54be6847d7SJohn Baldwin #ifdef DDB 55be6847d7SJohn Baldwin #include <ddb/ddb.h> 56047dd67eSAttilio Rao #endif 57047dd67eSAttilio Rao 58f5f9340bSFabien Thomas #ifdef HWPMC_HOOKS 59f5f9340bSFabien Thomas #include <sys/pmckern.h> 60f5f9340bSFabien Thomas PMC_SOFT_DECLARE( , , lock, failed); 61f5f9340bSFabien Thomas #endif 62f5f9340bSFabien Thomas 63651175c9SAttilio Rao CTASSERT(((LK_ADAPTIVE | LK_NOSHARE) & LO_CLASSFLAGS) == 64651175c9SAttilio Rao (LK_ADAPTIVE | LK_NOSHARE)); 65651175c9SAttilio Rao CTASSERT(LK_UNLOCKED == (LK_UNLOCKED & 66651175c9SAttilio Rao ~(LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS))); 67047dd67eSAttilio Rao 68047dd67eSAttilio Rao #define SQ_EXCLUSIVE_QUEUE 0 69047dd67eSAttilio Rao #define SQ_SHARED_QUEUE 1 70047dd67eSAttilio Rao 71047dd67eSAttilio Rao #ifndef INVARIANTS 72047dd67eSAttilio Rao #define _lockmgr_assert(lk, what, file, line) 73047dd67eSAttilio Rao #define TD_LOCKS_INC(td) 74047dd67eSAttilio Rao #define TD_LOCKS_DEC(td) 75047dd67eSAttilio Rao #else 76047dd67eSAttilio Rao #define TD_LOCKS_INC(td) ((td)->td_locks++) 77047dd67eSAttilio Rao #define TD_LOCKS_DEC(td) ((td)->td_locks--) 78047dd67eSAttilio Rao #endif 79047dd67eSAttilio Rao #define TD_SLOCKS_INC(td) ((td)->td_lk_slocks++) 80047dd67eSAttilio Rao #define TD_SLOCKS_DEC(td) ((td)->td_lk_slocks--) 81047dd67eSAttilio Rao 82047dd67eSAttilio Rao #ifndef DEBUG_LOCKS 83047dd67eSAttilio Rao #define STACK_PRINT(lk) 84047dd67eSAttilio Rao #define STACK_SAVE(lk) 85047dd67eSAttilio Rao #define STACK_ZERO(lk) 86047dd67eSAttilio Rao #else 87047dd67eSAttilio Rao #define STACK_PRINT(lk) stack_print_ddb(&(lk)->lk_stack) 88047dd67eSAttilio Rao #define STACK_SAVE(lk) stack_save(&(lk)->lk_stack) 89047dd67eSAttilio Rao #define STACK_ZERO(lk) stack_zero(&(lk)->lk_stack) 90047dd67eSAttilio Rao #endif 91047dd67eSAttilio Rao 92047dd67eSAttilio Rao #define LOCK_LOG2(lk, string, arg1, arg2) \ 93047dd67eSAttilio Rao if (LOCK_LOG_TEST(&(lk)->lock_object, 0)) \ 94047dd67eSAttilio Rao CTR2(KTR_LOCK, (string), (arg1), (arg2)) 95047dd67eSAttilio Rao #define LOCK_LOG3(lk, string, arg1, arg2, arg3) \ 96047dd67eSAttilio Rao if (LOCK_LOG_TEST(&(lk)->lock_object, 0)) \ 97047dd67eSAttilio Rao CTR3(KTR_LOCK, (string), (arg1), (arg2), (arg3)) 98047dd67eSAttilio Rao 99e5f94314SAttilio Rao #define GIANT_DECLARE \ 100e5f94314SAttilio Rao int _i = 0; \ 101e5f94314SAttilio Rao WITNESS_SAVE_DECL(Giant) 102e5f94314SAttilio Rao #define GIANT_RESTORE() do { \ 103e5f94314SAttilio Rao if (_i > 0) { \ 104e5f94314SAttilio Rao while (_i--) \ 105e5f94314SAttilio Rao mtx_lock(&Giant); \ 106e5f94314SAttilio Rao WITNESS_RESTORE(&Giant.lock_object, Giant); \ 107e5f94314SAttilio Rao } \ 108e5f94314SAttilio Rao } while (0) 109e5f94314SAttilio Rao #define GIANT_SAVE() do { \ 110e5f94314SAttilio Rao if (mtx_owned(&Giant)) { \ 111e5f94314SAttilio Rao WITNESS_SAVE(&Giant.lock_object, Giant); \ 112e5f94314SAttilio Rao while (mtx_owned(&Giant)) { \ 113e5f94314SAttilio Rao _i++; \ 114e5f94314SAttilio Rao mtx_unlock(&Giant); \ 115e5f94314SAttilio Rao } \ 116e5f94314SAttilio Rao } \ 117e5f94314SAttilio Rao } while (0) 118e5f94314SAttilio Rao 119047dd67eSAttilio Rao #define LK_CAN_SHARE(x) \ 120047dd67eSAttilio Rao (((x) & LK_SHARE) && (((x) & LK_EXCLUSIVE_WAITERS) == 0 || \ 121651175c9SAttilio Rao ((x) & LK_EXCLUSIVE_SPINNERS) == 0 || \ 122e0f62984SAttilio Rao curthread->td_lk_slocks || (curthread->td_pflags & TDP_DEADLKTREAT))) 123e5f94314SAttilio Rao #define LK_TRYOP(x) \ 124e5f94314SAttilio Rao ((x) & LK_NOWAIT) 125e5f94314SAttilio Rao 126e5f94314SAttilio Rao #define LK_CAN_WITNESS(x) \ 127e5f94314SAttilio Rao (((x) & LK_NOWITNESS) == 0 && !LK_TRYOP(x)) 128e5f94314SAttilio Rao #define LK_TRYWIT(x) \ 129e5f94314SAttilio Rao (LK_TRYOP(x) ? LOP_TRYLOCK : 0) 130047dd67eSAttilio Rao 131651175c9SAttilio Rao #define LK_CAN_ADAPT(lk, f) \ 132651175c9SAttilio Rao (((lk)->lock_object.lo_flags & LK_ADAPTIVE) != 0 && \ 133651175c9SAttilio Rao ((f) & LK_SLEEPFAIL) == 0) 134651175c9SAttilio Rao 135047dd67eSAttilio Rao #define lockmgr_disowned(lk) \ 136047dd67eSAttilio Rao (((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == LK_KERNPROC) 137047dd67eSAttilio Rao 138047dd67eSAttilio Rao #define lockmgr_xlocked(lk) \ 139047dd67eSAttilio Rao (((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == (uintptr_t)curthread) 140047dd67eSAttilio Rao 141d576deedSPawel Jakub Dawidek static void assert_lockmgr(const struct lock_object *lock, int how); 142047dd67eSAttilio Rao #ifdef DDB 143d576deedSPawel Jakub Dawidek static void db_show_lockmgr(const struct lock_object *lock); 144be6847d7SJohn Baldwin #endif 1456e21afd4SJohn Baldwin static void lock_lockmgr(struct lock_object *lock, int how); 146a5aedd68SStacey Son #ifdef KDTRACE_HOOKS 147d576deedSPawel Jakub Dawidek static int owner_lockmgr(const struct lock_object *lock, 148d576deedSPawel Jakub Dawidek struct thread **owner); 149a5aedd68SStacey Son #endif 1506e21afd4SJohn Baldwin static int unlock_lockmgr(struct lock_object *lock); 15161bd5e21SKip Macy 15261bd5e21SKip Macy struct lock_class lock_class_lockmgr = { 1533ff6d229SJohn Baldwin .lc_name = "lockmgr", 154047dd67eSAttilio Rao .lc_flags = LC_RECURSABLE | LC_SLEEPABLE | LC_SLEEPLOCK | LC_UPGRADABLE, 155f9721b43SAttilio Rao .lc_assert = assert_lockmgr, 15661bd5e21SKip Macy #ifdef DDB 1576e21afd4SJohn Baldwin .lc_ddb_show = db_show_lockmgr, 15861bd5e21SKip Macy #endif 1596e21afd4SJohn Baldwin .lc_lock = lock_lockmgr, 160a5aedd68SStacey Son .lc_unlock = unlock_lockmgr, 161a5aedd68SStacey Son #ifdef KDTRACE_HOOKS 162a5aedd68SStacey Son .lc_owner = owner_lockmgr, 163a5aedd68SStacey Son #endif 16461bd5e21SKip Macy }; 16561bd5e21SKip Macy 166651175c9SAttilio Rao #ifdef ADAPTIVE_LOCKMGRS 167651175c9SAttilio Rao static u_int alk_retries = 10; 168651175c9SAttilio Rao static u_int alk_loops = 10000; 1696472ac3dSEd Schouten static SYSCTL_NODE(_debug, OID_AUTO, lockmgr, CTLFLAG_RD, NULL, 1706472ac3dSEd Schouten "lockmgr debugging"); 171651175c9SAttilio Rao SYSCTL_UINT(_debug_lockmgr, OID_AUTO, retries, CTLFLAG_RW, &alk_retries, 0, ""); 172651175c9SAttilio Rao SYSCTL_UINT(_debug_lockmgr, OID_AUTO, loops, CTLFLAG_RW, &alk_loops, 0, ""); 173651175c9SAttilio Rao #endif 174651175c9SAttilio Rao 175047dd67eSAttilio Rao static __inline struct thread * 176d576deedSPawel Jakub Dawidek lockmgr_xholder(const struct lock *lk) 177047dd67eSAttilio Rao { 178047dd67eSAttilio Rao uintptr_t x; 179047dd67eSAttilio Rao 180047dd67eSAttilio Rao x = lk->lk_lock; 181047dd67eSAttilio Rao return ((x & LK_SHARE) ? NULL : (struct thread *)LK_HOLDER(x)); 182047dd67eSAttilio Rao } 18384887fa3SAttilio Rao 18453bf4bb2SPeter Wemm /* 185047dd67eSAttilio Rao * It assumes sleepq_lock held and returns with this one unheld. 186047dd67eSAttilio Rao * It also assumes the generic interlock is sane and previously checked. 187047dd67eSAttilio Rao * If LK_INTERLOCK is specified the interlock is not reacquired after the 188047dd67eSAttilio Rao * sleep. 18953bf4bb2SPeter Wemm */ 190047dd67eSAttilio Rao static __inline int 191047dd67eSAttilio Rao sleeplk(struct lock *lk, u_int flags, struct lock_object *ilk, 192047dd67eSAttilio Rao const char *wmesg, int pri, int timo, int queue) 193047dd67eSAttilio Rao { 194e5f94314SAttilio Rao GIANT_DECLARE; 195047dd67eSAttilio Rao struct lock_class *class; 196047dd67eSAttilio Rao int catch, error; 19753bf4bb2SPeter Wemm 198047dd67eSAttilio Rao class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL; 1995047a8fdSAttilio Rao catch = pri & PCATCH; 200047dd67eSAttilio Rao pri &= PRIMASK; 201047dd67eSAttilio Rao error = 0; 202047dd67eSAttilio Rao 203047dd67eSAttilio Rao LOCK_LOG3(lk, "%s: %p blocking on the %s sleepqueue", __func__, lk, 204047dd67eSAttilio Rao (queue == SQ_EXCLUSIVE_QUEUE) ? "exclusive" : "shared"); 205047dd67eSAttilio Rao 206047dd67eSAttilio Rao if (flags & LK_INTERLOCK) 207047dd67eSAttilio Rao class->lc_unlock(ilk); 2082028867dSAttilio Rao if (queue == SQ_EXCLUSIVE_QUEUE && (flags & LK_SLEEPFAIL) != 0) 2092028867dSAttilio Rao lk->lk_exslpfail++; 210e5f94314SAttilio Rao GIANT_SAVE(); 211047dd67eSAttilio Rao sleepq_add(&lk->lock_object, NULL, wmesg, SLEEPQ_LK | (catch ? 212047dd67eSAttilio Rao SLEEPQ_INTERRUPTIBLE : 0), queue); 213047dd67eSAttilio Rao if ((flags & LK_TIMELOCK) && timo) 214047dd67eSAttilio Rao sleepq_set_timeout(&lk->lock_object, timo); 215047dd67eSAttilio Rao 216047dd67eSAttilio Rao /* 217047dd67eSAttilio Rao * Decisional switch for real sleeping. 218047dd67eSAttilio Rao */ 219047dd67eSAttilio Rao if ((flags & LK_TIMELOCK) && timo && catch) 220047dd67eSAttilio Rao error = sleepq_timedwait_sig(&lk->lock_object, pri); 221047dd67eSAttilio Rao else if ((flags & LK_TIMELOCK) && timo) 222047dd67eSAttilio Rao error = sleepq_timedwait(&lk->lock_object, pri); 223047dd67eSAttilio Rao else if (catch) 224047dd67eSAttilio Rao error = sleepq_wait_sig(&lk->lock_object, pri); 225047dd67eSAttilio Rao else 226047dd67eSAttilio Rao sleepq_wait(&lk->lock_object, pri); 227e5f94314SAttilio Rao GIANT_RESTORE(); 228047dd67eSAttilio Rao if ((flags & LK_SLEEPFAIL) && error == 0) 229047dd67eSAttilio Rao error = ENOLCK; 230047dd67eSAttilio Rao 231047dd67eSAttilio Rao return (error); 232047dd67eSAttilio Rao } 233047dd67eSAttilio Rao 234da7bbd2cSJohn Baldwin static __inline int 235047dd67eSAttilio Rao wakeupshlk(struct lock *lk, const char *file, int line) 236047dd67eSAttilio Rao { 237047dd67eSAttilio Rao uintptr_t v, x; 2382028867dSAttilio Rao u_int realexslp; 239da7bbd2cSJohn Baldwin int queue, wakeup_swapper; 240047dd67eSAttilio Rao 241e5f94314SAttilio Rao WITNESS_UNLOCK(&lk->lock_object, 0, file, line); 242047dd67eSAttilio Rao LOCK_LOG_LOCK("SUNLOCK", &lk->lock_object, 0, 0, file, line); 243047dd67eSAttilio Rao 244da7bbd2cSJohn Baldwin wakeup_swapper = 0; 245047dd67eSAttilio Rao for (;;) { 246047dd67eSAttilio Rao x = lk->lk_lock; 247047dd67eSAttilio Rao 248047dd67eSAttilio Rao /* 249047dd67eSAttilio Rao * If there is more than one shared lock held, just drop one 250047dd67eSAttilio Rao * and return. 251047dd67eSAttilio Rao */ 252047dd67eSAttilio Rao if (LK_SHARERS(x) > 1) { 2537f9f80ceSAttilio Rao if (atomic_cmpset_rel_ptr(&lk->lk_lock, x, 254047dd67eSAttilio Rao x - LK_ONE_SHARER)) 255047dd67eSAttilio Rao break; 256047dd67eSAttilio Rao continue; 257047dd67eSAttilio Rao } 258047dd67eSAttilio Rao 259047dd67eSAttilio Rao /* 260047dd67eSAttilio Rao * If there are not waiters on the exclusive queue, drop the 261047dd67eSAttilio Rao * lock quickly. 262047dd67eSAttilio Rao */ 263047dd67eSAttilio Rao if ((x & LK_ALL_WAITERS) == 0) { 264651175c9SAttilio Rao MPASS((x & ~LK_EXCLUSIVE_SPINNERS) == 265651175c9SAttilio Rao LK_SHARERS_LOCK(1)); 2667f9f80ceSAttilio Rao if (atomic_cmpset_rel_ptr(&lk->lk_lock, x, LK_UNLOCKED)) 267047dd67eSAttilio Rao break; 268047dd67eSAttilio Rao continue; 269047dd67eSAttilio Rao } 270047dd67eSAttilio Rao 271047dd67eSAttilio Rao /* 272047dd67eSAttilio Rao * We should have a sharer with waiters, so enter the hard 273047dd67eSAttilio Rao * path in order to handle wakeups correctly. 274047dd67eSAttilio Rao */ 275047dd67eSAttilio Rao sleepq_lock(&lk->lock_object); 276651175c9SAttilio Rao x = lk->lk_lock & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS); 277047dd67eSAttilio Rao v = LK_UNLOCKED; 278047dd67eSAttilio Rao 279047dd67eSAttilio Rao /* 280047dd67eSAttilio Rao * If the lock has exclusive waiters, give them preference in 281047dd67eSAttilio Rao * order to avoid deadlock with shared runners up. 2822028867dSAttilio Rao * If interruptible sleeps left the exclusive queue empty 2832028867dSAttilio Rao * avoid a starvation for the threads sleeping on the shared 2842028867dSAttilio Rao * queue by giving them precedence and cleaning up the 2852028867dSAttilio Rao * exclusive waiters bit anyway. 286c636ba83SAttilio Rao * Please note that lk_exslpfail count may be lying about 287c636ba83SAttilio Rao * the real number of waiters with the LK_SLEEPFAIL flag on 288c636ba83SAttilio Rao * because they may be used in conjuction with interruptible 289aab9c8c2SAttilio Rao * sleeps so lk_exslpfail might be considered an 'upper limit' 290aab9c8c2SAttilio Rao * bound, including the edge cases. 291047dd67eSAttilio Rao */ 2922028867dSAttilio Rao realexslp = sleepq_sleepcnt(&lk->lock_object, 2932028867dSAttilio Rao SQ_EXCLUSIVE_QUEUE); 2942028867dSAttilio Rao if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) { 2952028867dSAttilio Rao if (lk->lk_exslpfail < realexslp) { 2962028867dSAttilio Rao lk->lk_exslpfail = 0; 297047dd67eSAttilio Rao queue = SQ_EXCLUSIVE_QUEUE; 298047dd67eSAttilio Rao v |= (x & LK_SHARED_WAITERS); 299047dd67eSAttilio Rao } else { 3002028867dSAttilio Rao lk->lk_exslpfail = 0; 3012028867dSAttilio Rao LOCK_LOG2(lk, 3022028867dSAttilio Rao "%s: %p has only LK_SLEEPFAIL sleepers", 3032028867dSAttilio Rao __func__, lk); 3042028867dSAttilio Rao LOCK_LOG2(lk, 3052028867dSAttilio Rao "%s: %p waking up threads on the exclusive queue", 3062028867dSAttilio Rao __func__, lk); 3072028867dSAttilio Rao wakeup_swapper = 3082028867dSAttilio Rao sleepq_broadcast(&lk->lock_object, 3092028867dSAttilio Rao SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE); 3102028867dSAttilio Rao queue = SQ_SHARED_QUEUE; 3112028867dSAttilio Rao } 3122028867dSAttilio Rao 3132028867dSAttilio Rao } else { 3149dbf7a62SAttilio Rao 3159dbf7a62SAttilio Rao /* 3169dbf7a62SAttilio Rao * Exclusive waiters sleeping with LK_SLEEPFAIL on 3179dbf7a62SAttilio Rao * and using interruptible sleeps/timeout may have 3189dbf7a62SAttilio Rao * left spourious lk_exslpfail counts on, so clean 3199dbf7a62SAttilio Rao * it up anyway. 3209dbf7a62SAttilio Rao */ 3219dbf7a62SAttilio Rao lk->lk_exslpfail = 0; 322047dd67eSAttilio Rao queue = SQ_SHARED_QUEUE; 323047dd67eSAttilio Rao } 324047dd67eSAttilio Rao 3257f9f80ceSAttilio Rao if (!atomic_cmpset_rel_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x, 326047dd67eSAttilio Rao v)) { 327047dd67eSAttilio Rao sleepq_release(&lk->lock_object); 328047dd67eSAttilio Rao continue; 329047dd67eSAttilio Rao } 330047dd67eSAttilio Rao LOCK_LOG3(lk, "%s: %p waking up threads on the %s queue", 331047dd67eSAttilio Rao __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" : 332047dd67eSAttilio Rao "exclusive"); 3332028867dSAttilio Rao wakeup_swapper |= sleepq_broadcast(&lk->lock_object, SLEEPQ_LK, 334da7bbd2cSJohn Baldwin 0, queue); 335047dd67eSAttilio Rao sleepq_release(&lk->lock_object); 336047dd67eSAttilio Rao break; 337047dd67eSAttilio Rao } 338047dd67eSAttilio Rao 339047dd67eSAttilio Rao lock_profile_release_lock(&lk->lock_object); 340*b5fb43e5SJohn Baldwin TD_LOCKS_DEC(curthread); 341*b5fb43e5SJohn Baldwin TD_SLOCKS_DEC(curthread); 342da7bbd2cSJohn Baldwin return (wakeup_swapper); 343047dd67eSAttilio Rao } 344047dd67eSAttilio Rao 345047dd67eSAttilio Rao static void 346d576deedSPawel Jakub Dawidek assert_lockmgr(const struct lock_object *lock, int what) 347f9721b43SAttilio Rao { 348f9721b43SAttilio Rao 349f9721b43SAttilio Rao panic("lockmgr locks do not support assertions"); 350f9721b43SAttilio Rao } 351f9721b43SAttilio Rao 352047dd67eSAttilio Rao static void 3536e21afd4SJohn Baldwin lock_lockmgr(struct lock_object *lock, int how) 3546e21afd4SJohn Baldwin { 3556e21afd4SJohn Baldwin 3566e21afd4SJohn Baldwin panic("lockmgr locks do not support sleep interlocking"); 3576e21afd4SJohn Baldwin } 3586e21afd4SJohn Baldwin 359047dd67eSAttilio Rao static int 3606e21afd4SJohn Baldwin unlock_lockmgr(struct lock_object *lock) 3616e21afd4SJohn Baldwin { 3626e21afd4SJohn Baldwin 3636e21afd4SJohn Baldwin panic("lockmgr locks do not support sleep interlocking"); 3646e21afd4SJohn Baldwin } 3656e21afd4SJohn Baldwin 366a5aedd68SStacey Son #ifdef KDTRACE_HOOKS 367a5aedd68SStacey Son static int 368d576deedSPawel Jakub Dawidek owner_lockmgr(const struct lock_object *lock, struct thread **owner) 369a5aedd68SStacey Son { 370a5aedd68SStacey Son 371a5aedd68SStacey Son panic("lockmgr locks do not support owner inquiring"); 372a5aedd68SStacey Son } 373a5aedd68SStacey Son #endif 374a5aedd68SStacey Son 37599448ed1SJohn Dyson void 376047dd67eSAttilio Rao lockinit(struct lock *lk, int pri, const char *wmesg, int timo, int flags) 37799448ed1SJohn Dyson { 3786efc8a16SAttilio Rao int iflags; 3796efc8a16SAttilio Rao 380047dd67eSAttilio Rao MPASS((flags & ~LK_INIT_MASK) == 0); 381353998acSAttilio Rao ASSERT_ATOMIC_LOAD_PTR(lk->lk_lock, 382353998acSAttilio Rao ("%s: lockmgr not aligned for %s: %p", __func__, wmesg, 383353998acSAttilio Rao &lk->lk_lock)); 38499448ed1SJohn Dyson 385f0830182SAttilio Rao iflags = LO_SLEEPABLE | LO_UPGRADABLE; 386f0830182SAttilio Rao if (flags & LK_CANRECURSE) 387f0830182SAttilio Rao iflags |= LO_RECURSABLE; 388047dd67eSAttilio Rao if ((flags & LK_NODUP) == 0) 3896efc8a16SAttilio Rao iflags |= LO_DUPOK; 3907fbfba7bSAttilio Rao if (flags & LK_NOPROFILE) 3917fbfba7bSAttilio Rao iflags |= LO_NOPROFILE; 392047dd67eSAttilio Rao if ((flags & LK_NOWITNESS) == 0) 3936efc8a16SAttilio Rao iflags |= LO_WITNESS; 3947fbfba7bSAttilio Rao if (flags & LK_QUIET) 3957fbfba7bSAttilio Rao iflags |= LO_QUIET; 396e63091eaSMarcel Moolenaar if (flags & LK_IS_VNODE) 397e63091eaSMarcel Moolenaar iflags |= LO_IS_VNODE; 398651175c9SAttilio Rao iflags |= flags & (LK_ADAPTIVE | LK_NOSHARE); 399047dd67eSAttilio Rao 400*b5fb43e5SJohn Baldwin lock_init(&lk->lock_object, &lock_class_lockmgr, wmesg, NULL, iflags); 401047dd67eSAttilio Rao lk->lk_lock = LK_UNLOCKED; 402047dd67eSAttilio Rao lk->lk_recurse = 0; 4032028867dSAttilio Rao lk->lk_exslpfail = 0; 404047dd67eSAttilio Rao lk->lk_timo = timo; 405047dd67eSAttilio Rao lk->lk_pri = pri; 406047dd67eSAttilio Rao STACK_ZERO(lk); 40799448ed1SJohn Dyson } 40899448ed1SJohn Dyson 4093634d5b2SJohn Baldwin /* 4103634d5b2SJohn Baldwin * XXX: Gross hacks to manipulate external lock flags after 4113634d5b2SJohn Baldwin * initialization. Used for certain vnode and buf locks. 4123634d5b2SJohn Baldwin */ 4133634d5b2SJohn Baldwin void 4143634d5b2SJohn Baldwin lockallowshare(struct lock *lk) 4153634d5b2SJohn Baldwin { 4163634d5b2SJohn Baldwin 4173634d5b2SJohn Baldwin lockmgr_assert(lk, KA_XLOCKED); 4183634d5b2SJohn Baldwin lk->lock_object.lo_flags &= ~LK_NOSHARE; 4193634d5b2SJohn Baldwin } 4203634d5b2SJohn Baldwin 4213634d5b2SJohn Baldwin void 4223634d5b2SJohn Baldwin lockallowrecurse(struct lock *lk) 4233634d5b2SJohn Baldwin { 4243634d5b2SJohn Baldwin 4253634d5b2SJohn Baldwin lockmgr_assert(lk, KA_XLOCKED); 4263634d5b2SJohn Baldwin lk->lock_object.lo_flags |= LO_RECURSABLE; 4273634d5b2SJohn Baldwin } 4283634d5b2SJohn Baldwin 4293634d5b2SJohn Baldwin void 4303634d5b2SJohn Baldwin lockdisablerecurse(struct lock *lk) 4313634d5b2SJohn Baldwin { 4323634d5b2SJohn Baldwin 4333634d5b2SJohn Baldwin lockmgr_assert(lk, KA_XLOCKED); 4343634d5b2SJohn Baldwin lk->lock_object.lo_flags &= ~LO_RECURSABLE; 4353634d5b2SJohn Baldwin } 4363634d5b2SJohn Baldwin 437a18b1f1dSJason Evans void 438047dd67eSAttilio Rao lockdestroy(struct lock *lk) 439a18b1f1dSJason Evans { 440c91fcee7SJohn Baldwin 441047dd67eSAttilio Rao KASSERT(lk->lk_lock == LK_UNLOCKED, ("lockmgr still held")); 442047dd67eSAttilio Rao KASSERT(lk->lk_recurse == 0, ("lockmgr still recursed")); 4432028867dSAttilio Rao KASSERT(lk->lk_exslpfail == 0, ("lockmgr still exclusive waiters")); 444047dd67eSAttilio Rao lock_destroy(&lk->lock_object); 445047dd67eSAttilio Rao } 446047dd67eSAttilio Rao 447047dd67eSAttilio Rao int 448047dd67eSAttilio Rao __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk, 449047dd67eSAttilio Rao const char *wmesg, int pri, int timo, const char *file, int line) 450047dd67eSAttilio Rao { 451e5f94314SAttilio Rao GIANT_DECLARE; 452047dd67eSAttilio Rao struct lock_class *class; 453047dd67eSAttilio Rao const char *iwmesg; 454047dd67eSAttilio Rao uintptr_t tid, v, x; 4552028867dSAttilio Rao u_int op, realexslp; 4561723a064SJeff Roberson int error, ipri, itimo, queue, wakeup_swapper; 4571723a064SJeff Roberson #ifdef LOCK_PROFILING 4581723a064SJeff Roberson uint64_t waittime = 0; 4591723a064SJeff Roberson int contested = 0; 4601723a064SJeff Roberson #endif 461651175c9SAttilio Rao #ifdef ADAPTIVE_LOCKMGRS 462651175c9SAttilio Rao volatile struct thread *owner; 463651175c9SAttilio Rao u_int i, spintries = 0; 464651175c9SAttilio Rao #endif 465047dd67eSAttilio Rao 466047dd67eSAttilio Rao error = 0; 467047dd67eSAttilio Rao tid = (uintptr_t)curthread; 468047dd67eSAttilio Rao op = (flags & LK_TYPE_MASK); 469047dd67eSAttilio Rao iwmesg = (wmesg == LK_WMESG_DEFAULT) ? lk->lock_object.lo_name : wmesg; 470047dd67eSAttilio Rao ipri = (pri == LK_PRIO_DEFAULT) ? lk->lk_pri : pri; 471047dd67eSAttilio Rao itimo = (timo == LK_TIMO_DEFAULT) ? lk->lk_timo : timo; 472047dd67eSAttilio Rao 473047dd67eSAttilio Rao MPASS((flags & ~LK_TOTAL_MASK) == 0); 474872b7289SAttilio Rao KASSERT((op & (op - 1)) == 0, 475872b7289SAttilio Rao ("%s: Invalid requested operation @ %s:%d", __func__, file, line)); 476047dd67eSAttilio Rao KASSERT((flags & (LK_NOWAIT | LK_SLEEPFAIL)) == 0 || 477047dd67eSAttilio Rao (op != LK_DOWNGRADE && op != LK_RELEASE), 478047dd67eSAttilio Rao ("%s: Invalid flags in regard of the operation desired @ %s:%d", 479047dd67eSAttilio Rao __func__, file, line)); 480047dd67eSAttilio Rao KASSERT((flags & LK_INTERLOCK) == 0 || ilk != NULL, 481047dd67eSAttilio Rao ("%s: LK_INTERLOCK passed without valid interlock @ %s:%d", 482047dd67eSAttilio Rao __func__, file, line)); 483cd2fe4e6SAttilio Rao KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread), 484e3ae0dfeSAttilio Rao ("%s: idle thread %p on lockmgr %s @ %s:%d", __func__, curthread, 485e3ae0dfeSAttilio Rao lk->lock_object.lo_name, file, line)); 486047dd67eSAttilio Rao 487047dd67eSAttilio Rao class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL; 488047dd67eSAttilio Rao if (panicstr != NULL) { 489047dd67eSAttilio Rao if (flags & LK_INTERLOCK) 490047dd67eSAttilio Rao class->lc_unlock(ilk); 491047dd67eSAttilio Rao return (0); 492047dd67eSAttilio Rao } 493047dd67eSAttilio Rao 494d0a724c5SKonstantin Belousov if (lk->lock_object.lo_flags & LK_NOSHARE) { 495d0a724c5SKonstantin Belousov switch (op) { 496d0a724c5SKonstantin Belousov case LK_SHARED: 497047dd67eSAttilio Rao op = LK_EXCLUSIVE; 498d0a724c5SKonstantin Belousov break; 499d0a724c5SKonstantin Belousov case LK_UPGRADE: 500d0a724c5SKonstantin Belousov case LK_DOWNGRADE: 501d0a724c5SKonstantin Belousov _lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED, 502d0a724c5SKonstantin Belousov file, line); 50343287e27SMateusz Guzik if (flags & LK_INTERLOCK) 50443287e27SMateusz Guzik class->lc_unlock(ilk); 505d0a724c5SKonstantin Belousov return (0); 506d0a724c5SKonstantin Belousov } 507d0a724c5SKonstantin Belousov } 508047dd67eSAttilio Rao 509da7bbd2cSJohn Baldwin wakeup_swapper = 0; 510047dd67eSAttilio Rao switch (op) { 511047dd67eSAttilio Rao case LK_SHARED: 512e5f94314SAttilio Rao if (LK_CAN_WITNESS(flags)) 513e5f94314SAttilio Rao WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER, 51424150d37SJohn Baldwin file, line, flags & LK_INTERLOCK ? ilk : NULL); 515047dd67eSAttilio Rao for (;;) { 516047dd67eSAttilio Rao x = lk->lk_lock; 517047dd67eSAttilio Rao 518047dd67eSAttilio Rao /* 519047dd67eSAttilio Rao * If no other thread has an exclusive lock, or 520047dd67eSAttilio Rao * no exclusive waiter is present, bump the count of 521047dd67eSAttilio Rao * sharers. Since we have to preserve the state of 522047dd67eSAttilio Rao * waiters, if we fail to acquire the shared lock 523047dd67eSAttilio Rao * loop back and retry. 524047dd67eSAttilio Rao */ 525047dd67eSAttilio Rao if (LK_CAN_SHARE(x)) { 526047dd67eSAttilio Rao if (atomic_cmpset_acq_ptr(&lk->lk_lock, x, 527047dd67eSAttilio Rao x + LK_ONE_SHARER)) 528047dd67eSAttilio Rao break; 529047dd67eSAttilio Rao continue; 530047dd67eSAttilio Rao } 531f5f9340bSFabien Thomas #ifdef HWPMC_HOOKS 532f5f9340bSFabien Thomas PMC_SOFT_CALL( , , lock, failed); 533f5f9340bSFabien Thomas #endif 534047dd67eSAttilio Rao lock_profile_obtain_lock_failed(&lk->lock_object, 535047dd67eSAttilio Rao &contested, &waittime); 536047dd67eSAttilio Rao 537047dd67eSAttilio Rao /* 53896f1567fSKonstantin Belousov * If the lock is already held by curthread in 539047dd67eSAttilio Rao * exclusive way avoid a deadlock. 540047dd67eSAttilio Rao */ 541047dd67eSAttilio Rao if (LK_HOLDER(x) == tid) { 542047dd67eSAttilio Rao LOCK_LOG2(lk, 54396f1567fSKonstantin Belousov "%s: %p already held in exclusive mode", 544047dd67eSAttilio Rao __func__, lk); 545047dd67eSAttilio Rao error = EDEADLK; 546047dd67eSAttilio Rao break; 547a18b1f1dSJason Evans } 548a18b1f1dSJason Evans 549a18b1f1dSJason Evans /* 550047dd67eSAttilio Rao * If the lock is expected to not sleep just give up 551047dd67eSAttilio Rao * and return. 552d7a7e179SAttilio Rao */ 553047dd67eSAttilio Rao if (LK_TRYOP(flags)) { 554047dd67eSAttilio Rao LOCK_LOG2(lk, "%s: %p fails the try operation", 555047dd67eSAttilio Rao __func__, lk); 556047dd67eSAttilio Rao error = EBUSY; 557047dd67eSAttilio Rao break; 558047dd67eSAttilio Rao } 559047dd67eSAttilio Rao 560651175c9SAttilio Rao #ifdef ADAPTIVE_LOCKMGRS 561651175c9SAttilio Rao /* 562651175c9SAttilio Rao * If the owner is running on another CPU, spin until 563651175c9SAttilio Rao * the owner stops running or the state of the lock 5648d3635c4SAttilio Rao * changes. We need a double-state handle here 5658d3635c4SAttilio Rao * because for a failed acquisition the lock can be 5668d3635c4SAttilio Rao * either held in exclusive mode or shared mode 5678d3635c4SAttilio Rao * (for the writer starvation avoidance technique). 568651175c9SAttilio Rao */ 569651175c9SAttilio Rao if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 && 570651175c9SAttilio Rao LK_HOLDER(x) != LK_KERNPROC) { 571651175c9SAttilio Rao owner = (struct thread *)LK_HOLDER(x); 572651175c9SAttilio Rao if (LOCK_LOG_TEST(&lk->lock_object, 0)) 573651175c9SAttilio Rao CTR3(KTR_LOCK, 574651175c9SAttilio Rao "%s: spinning on %p held by %p", 575651175c9SAttilio Rao __func__, lk, owner); 576651175c9SAttilio Rao 577651175c9SAttilio Rao /* 578651175c9SAttilio Rao * If we are holding also an interlock drop it 579651175c9SAttilio Rao * in order to avoid a deadlock if the lockmgr 580651175c9SAttilio Rao * owner is adaptively spinning on the 581651175c9SAttilio Rao * interlock itself. 582651175c9SAttilio Rao */ 583651175c9SAttilio Rao if (flags & LK_INTERLOCK) { 584651175c9SAttilio Rao class->lc_unlock(ilk); 585651175c9SAttilio Rao flags &= ~LK_INTERLOCK; 586651175c9SAttilio Rao } 587651175c9SAttilio Rao GIANT_SAVE(); 588651175c9SAttilio Rao while (LK_HOLDER(lk->lk_lock) == 589651175c9SAttilio Rao (uintptr_t)owner && TD_IS_RUNNING(owner)) 590651175c9SAttilio Rao cpu_spinwait(); 5918d3635c4SAttilio Rao GIANT_RESTORE(); 5928d3635c4SAttilio Rao continue; 593651175c9SAttilio Rao } else if (LK_CAN_ADAPT(lk, flags) && 594651175c9SAttilio Rao (x & LK_SHARE) != 0 && LK_SHARERS(x) && 595651175c9SAttilio Rao spintries < alk_retries) { 596651175c9SAttilio Rao if (flags & LK_INTERLOCK) { 597651175c9SAttilio Rao class->lc_unlock(ilk); 598651175c9SAttilio Rao flags &= ~LK_INTERLOCK; 599651175c9SAttilio Rao } 600651175c9SAttilio Rao GIANT_SAVE(); 601651175c9SAttilio Rao spintries++; 602651175c9SAttilio Rao for (i = 0; i < alk_loops; i++) { 603651175c9SAttilio Rao if (LOCK_LOG_TEST(&lk->lock_object, 0)) 604651175c9SAttilio Rao CTR4(KTR_LOCK, 605651175c9SAttilio Rao "%s: shared spinning on %p with %u and %u", 606651175c9SAttilio Rao __func__, lk, spintries, i); 607651175c9SAttilio Rao x = lk->lk_lock; 608651175c9SAttilio Rao if ((x & LK_SHARE) == 0 || 609651175c9SAttilio Rao LK_CAN_SHARE(x) != 0) 610651175c9SAttilio Rao break; 611651175c9SAttilio Rao cpu_spinwait(); 612651175c9SAttilio Rao } 6138d3635c4SAttilio Rao GIANT_RESTORE(); 614651175c9SAttilio Rao if (i != alk_loops) 615651175c9SAttilio Rao continue; 616651175c9SAttilio Rao } 617651175c9SAttilio Rao #endif 618651175c9SAttilio Rao 619047dd67eSAttilio Rao /* 620047dd67eSAttilio Rao * Acquire the sleepqueue chain lock because we 621047dd67eSAttilio Rao * probabilly will need to manipulate waiters flags. 622047dd67eSAttilio Rao */ 623047dd67eSAttilio Rao sleepq_lock(&lk->lock_object); 624047dd67eSAttilio Rao x = lk->lk_lock; 625047dd67eSAttilio Rao 626047dd67eSAttilio Rao /* 627047dd67eSAttilio Rao * if the lock can be acquired in shared mode, try 628047dd67eSAttilio Rao * again. 629047dd67eSAttilio Rao */ 630047dd67eSAttilio Rao if (LK_CAN_SHARE(x)) { 631047dd67eSAttilio Rao sleepq_release(&lk->lock_object); 632047dd67eSAttilio Rao continue; 633047dd67eSAttilio Rao } 634047dd67eSAttilio Rao 635651175c9SAttilio Rao #ifdef ADAPTIVE_LOCKMGRS 636651175c9SAttilio Rao /* 637651175c9SAttilio Rao * The current lock owner might have started executing 638651175c9SAttilio Rao * on another CPU (or the lock could have changed 639651175c9SAttilio Rao * owner) while we were waiting on the turnstile 640651175c9SAttilio Rao * chain lock. If so, drop the turnstile lock and try 641651175c9SAttilio Rao * again. 642651175c9SAttilio Rao */ 643651175c9SAttilio Rao if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 && 644651175c9SAttilio Rao LK_HOLDER(x) != LK_KERNPROC) { 645651175c9SAttilio Rao owner = (struct thread *)LK_HOLDER(x); 646651175c9SAttilio Rao if (TD_IS_RUNNING(owner)) { 647651175c9SAttilio Rao sleepq_release(&lk->lock_object); 648651175c9SAttilio Rao continue; 649651175c9SAttilio Rao } 650651175c9SAttilio Rao } 651651175c9SAttilio Rao #endif 652651175c9SAttilio Rao 653047dd67eSAttilio Rao /* 654047dd67eSAttilio Rao * Try to set the LK_SHARED_WAITERS flag. If we fail, 655047dd67eSAttilio Rao * loop back and retry. 656047dd67eSAttilio Rao */ 657047dd67eSAttilio Rao if ((x & LK_SHARED_WAITERS) == 0) { 658047dd67eSAttilio Rao if (!atomic_cmpset_acq_ptr(&lk->lk_lock, x, 659047dd67eSAttilio Rao x | LK_SHARED_WAITERS)) { 660047dd67eSAttilio Rao sleepq_release(&lk->lock_object); 661047dd67eSAttilio Rao continue; 662047dd67eSAttilio Rao } 663047dd67eSAttilio Rao LOCK_LOG2(lk, "%s: %p set shared waiters flag", 664047dd67eSAttilio Rao __func__, lk); 665047dd67eSAttilio Rao } 666047dd67eSAttilio Rao 667047dd67eSAttilio Rao /* 668047dd67eSAttilio Rao * As far as we have been unable to acquire the 669047dd67eSAttilio Rao * shared lock and the shared waiters flag is set, 670047dd67eSAttilio Rao * we will sleep. 671047dd67eSAttilio Rao */ 672047dd67eSAttilio Rao error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo, 673047dd67eSAttilio Rao SQ_SHARED_QUEUE); 674047dd67eSAttilio Rao flags &= ~LK_INTERLOCK; 675047dd67eSAttilio Rao if (error) { 676047dd67eSAttilio Rao LOCK_LOG3(lk, 677047dd67eSAttilio Rao "%s: interrupted sleep for %p with %d", 678047dd67eSAttilio Rao __func__, lk, error); 679047dd67eSAttilio Rao break; 680047dd67eSAttilio Rao } 681047dd67eSAttilio Rao LOCK_LOG2(lk, "%s: %p resuming from the sleep queue", 682047dd67eSAttilio Rao __func__, lk); 683047dd67eSAttilio Rao } 684047dd67eSAttilio Rao if (error == 0) { 685047dd67eSAttilio Rao lock_profile_obtain_lock_success(&lk->lock_object, 686047dd67eSAttilio Rao contested, waittime, file, line); 687047dd67eSAttilio Rao LOCK_LOG_LOCK("SLOCK", &lk->lock_object, 0, 0, file, 688047dd67eSAttilio Rao line); 689e5f94314SAttilio Rao WITNESS_LOCK(&lk->lock_object, LK_TRYWIT(flags), file, 690e5f94314SAttilio Rao line); 691047dd67eSAttilio Rao TD_LOCKS_INC(curthread); 692047dd67eSAttilio Rao TD_SLOCKS_INC(curthread); 693047dd67eSAttilio Rao STACK_SAVE(lk); 694047dd67eSAttilio Rao } 695047dd67eSAttilio Rao break; 696047dd67eSAttilio Rao case LK_UPGRADE: 697047dd67eSAttilio Rao _lockmgr_assert(lk, KA_SLOCKED, file, line); 698651175c9SAttilio Rao v = lk->lk_lock; 699651175c9SAttilio Rao x = v & LK_ALL_WAITERS; 700651175c9SAttilio Rao v &= LK_EXCLUSIVE_SPINNERS; 701047dd67eSAttilio Rao 702047dd67eSAttilio Rao /* 703047dd67eSAttilio Rao * Try to switch from one shared lock to an exclusive one. 704047dd67eSAttilio Rao * We need to preserve waiters flags during the operation. 705047dd67eSAttilio Rao */ 706651175c9SAttilio Rao if (atomic_cmpset_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x | v, 707047dd67eSAttilio Rao tid | x)) { 708047dd67eSAttilio Rao LOCK_LOG_LOCK("XUPGRADE", &lk->lock_object, 0, 0, file, 709047dd67eSAttilio Rao line); 710e5f94314SAttilio Rao WITNESS_UPGRADE(&lk->lock_object, LOP_EXCLUSIVE | 711e5f94314SAttilio Rao LK_TRYWIT(flags), file, line); 712047dd67eSAttilio Rao TD_SLOCKS_DEC(curthread); 713047dd67eSAttilio Rao break; 714047dd67eSAttilio Rao } 715047dd67eSAttilio Rao 716047dd67eSAttilio Rao /* 717047dd67eSAttilio Rao * We have been unable to succeed in upgrading, so just 718047dd67eSAttilio Rao * give up the shared lock. 719047dd67eSAttilio Rao */ 720814f26daSJohn Baldwin wakeup_swapper |= wakeupshlk(lk, file, line); 721047dd67eSAttilio Rao 722047dd67eSAttilio Rao /* FALLTHROUGH */ 723047dd67eSAttilio Rao case LK_EXCLUSIVE: 724e5f94314SAttilio Rao if (LK_CAN_WITNESS(flags)) 725e5f94314SAttilio Rao WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER | 72624150d37SJohn Baldwin LOP_EXCLUSIVE, file, line, flags & LK_INTERLOCK ? 72724150d37SJohn Baldwin ilk : NULL); 728047dd67eSAttilio Rao 729047dd67eSAttilio Rao /* 73096f1567fSKonstantin Belousov * If curthread already holds the lock and this one is 731047dd67eSAttilio Rao * allowed to recurse, simply recurse on it. 732047dd67eSAttilio Rao */ 733047dd67eSAttilio Rao if (lockmgr_xlocked(lk)) { 734047dd67eSAttilio Rao if ((flags & LK_CANRECURSE) == 0 && 735f0830182SAttilio Rao (lk->lock_object.lo_flags & LO_RECURSABLE) == 0) { 736047dd67eSAttilio Rao 737047dd67eSAttilio Rao /* 738047dd67eSAttilio Rao * If the lock is expected to not panic just 739047dd67eSAttilio Rao * give up and return. 740047dd67eSAttilio Rao */ 741047dd67eSAttilio Rao if (LK_TRYOP(flags)) { 742047dd67eSAttilio Rao LOCK_LOG2(lk, 743047dd67eSAttilio Rao "%s: %p fails the try operation", 744047dd67eSAttilio Rao __func__, lk); 745047dd67eSAttilio Rao error = EBUSY; 746047dd67eSAttilio Rao break; 747047dd67eSAttilio Rao } 748047dd67eSAttilio Rao if (flags & LK_INTERLOCK) 749047dd67eSAttilio Rao class->lc_unlock(ilk); 750047dd67eSAttilio Rao panic("%s: recursing on non recursive lockmgr %s @ %s:%d\n", 751047dd67eSAttilio Rao __func__, iwmesg, file, line); 752047dd67eSAttilio Rao } 753047dd67eSAttilio Rao lk->lk_recurse++; 754047dd67eSAttilio Rao LOCK_LOG2(lk, "%s: %p recursing", __func__, lk); 755047dd67eSAttilio Rao LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0, 756047dd67eSAttilio Rao lk->lk_recurse, file, line); 757e5f94314SAttilio Rao WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE | 758e5f94314SAttilio Rao LK_TRYWIT(flags), file, line); 759047dd67eSAttilio Rao TD_LOCKS_INC(curthread); 760047dd67eSAttilio Rao break; 761047dd67eSAttilio Rao } 762047dd67eSAttilio Rao 763047dd67eSAttilio Rao while (!atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, 764047dd67eSAttilio Rao tid)) { 765f5f9340bSFabien Thomas #ifdef HWPMC_HOOKS 766f5f9340bSFabien Thomas PMC_SOFT_CALL( , , lock, failed); 767f5f9340bSFabien Thomas #endif 768047dd67eSAttilio Rao lock_profile_obtain_lock_failed(&lk->lock_object, 769047dd67eSAttilio Rao &contested, &waittime); 770047dd67eSAttilio Rao 771047dd67eSAttilio Rao /* 772047dd67eSAttilio Rao * If the lock is expected to not sleep just give up 773047dd67eSAttilio Rao * and return. 774047dd67eSAttilio Rao */ 775047dd67eSAttilio Rao if (LK_TRYOP(flags)) { 776047dd67eSAttilio Rao LOCK_LOG2(lk, "%s: %p fails the try operation", 777047dd67eSAttilio Rao __func__, lk); 778047dd67eSAttilio Rao error = EBUSY; 779047dd67eSAttilio Rao break; 780047dd67eSAttilio Rao } 781047dd67eSAttilio Rao 782651175c9SAttilio Rao #ifdef ADAPTIVE_LOCKMGRS 783651175c9SAttilio Rao /* 784651175c9SAttilio Rao * If the owner is running on another CPU, spin until 785651175c9SAttilio Rao * the owner stops running or the state of the lock 786651175c9SAttilio Rao * changes. 787651175c9SAttilio Rao */ 788651175c9SAttilio Rao x = lk->lk_lock; 789651175c9SAttilio Rao if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 && 790651175c9SAttilio Rao LK_HOLDER(x) != LK_KERNPROC) { 791651175c9SAttilio Rao owner = (struct thread *)LK_HOLDER(x); 792651175c9SAttilio Rao if (LOCK_LOG_TEST(&lk->lock_object, 0)) 793651175c9SAttilio Rao CTR3(KTR_LOCK, 794651175c9SAttilio Rao "%s: spinning on %p held by %p", 795651175c9SAttilio Rao __func__, lk, owner); 796651175c9SAttilio Rao 797651175c9SAttilio Rao /* 798651175c9SAttilio Rao * If we are holding also an interlock drop it 799651175c9SAttilio Rao * in order to avoid a deadlock if the lockmgr 800651175c9SAttilio Rao * owner is adaptively spinning on the 801651175c9SAttilio Rao * interlock itself. 802651175c9SAttilio Rao */ 803651175c9SAttilio Rao if (flags & LK_INTERLOCK) { 804651175c9SAttilio Rao class->lc_unlock(ilk); 805651175c9SAttilio Rao flags &= ~LK_INTERLOCK; 806651175c9SAttilio Rao } 807651175c9SAttilio Rao GIANT_SAVE(); 808651175c9SAttilio Rao while (LK_HOLDER(lk->lk_lock) == 809651175c9SAttilio Rao (uintptr_t)owner && TD_IS_RUNNING(owner)) 810651175c9SAttilio Rao cpu_spinwait(); 8118d3635c4SAttilio Rao GIANT_RESTORE(); 8128d3635c4SAttilio Rao continue; 813651175c9SAttilio Rao } else if (LK_CAN_ADAPT(lk, flags) && 814651175c9SAttilio Rao (x & LK_SHARE) != 0 && LK_SHARERS(x) && 815651175c9SAttilio Rao spintries < alk_retries) { 816651175c9SAttilio Rao if ((x & LK_EXCLUSIVE_SPINNERS) == 0 && 817651175c9SAttilio Rao !atomic_cmpset_ptr(&lk->lk_lock, x, 818651175c9SAttilio Rao x | LK_EXCLUSIVE_SPINNERS)) 819651175c9SAttilio Rao continue; 820651175c9SAttilio Rao if (flags & LK_INTERLOCK) { 821651175c9SAttilio Rao class->lc_unlock(ilk); 822651175c9SAttilio Rao flags &= ~LK_INTERLOCK; 823651175c9SAttilio Rao } 824651175c9SAttilio Rao GIANT_SAVE(); 825651175c9SAttilio Rao spintries++; 826651175c9SAttilio Rao for (i = 0; i < alk_loops; i++) { 827651175c9SAttilio Rao if (LOCK_LOG_TEST(&lk->lock_object, 0)) 828651175c9SAttilio Rao CTR4(KTR_LOCK, 829651175c9SAttilio Rao "%s: shared spinning on %p with %u and %u", 830651175c9SAttilio Rao __func__, lk, spintries, i); 831651175c9SAttilio Rao if ((lk->lk_lock & 832651175c9SAttilio Rao LK_EXCLUSIVE_SPINNERS) == 0) 833651175c9SAttilio Rao break; 834651175c9SAttilio Rao cpu_spinwait(); 835651175c9SAttilio Rao } 8368d3635c4SAttilio Rao GIANT_RESTORE(); 837651175c9SAttilio Rao if (i != alk_loops) 838651175c9SAttilio Rao continue; 839651175c9SAttilio Rao } 840651175c9SAttilio Rao #endif 841651175c9SAttilio Rao 842047dd67eSAttilio Rao /* 843047dd67eSAttilio Rao * Acquire the sleepqueue chain lock because we 844047dd67eSAttilio Rao * probabilly will need to manipulate waiters flags. 845047dd67eSAttilio Rao */ 846047dd67eSAttilio Rao sleepq_lock(&lk->lock_object); 847047dd67eSAttilio Rao x = lk->lk_lock; 848047dd67eSAttilio Rao 849047dd67eSAttilio Rao /* 850047dd67eSAttilio Rao * if the lock has been released while we spun on 851047dd67eSAttilio Rao * the sleepqueue chain lock just try again. 852047dd67eSAttilio Rao */ 853047dd67eSAttilio Rao if (x == LK_UNLOCKED) { 854047dd67eSAttilio Rao sleepq_release(&lk->lock_object); 855047dd67eSAttilio Rao continue; 856047dd67eSAttilio Rao } 857047dd67eSAttilio Rao 858651175c9SAttilio Rao #ifdef ADAPTIVE_LOCKMGRS 859651175c9SAttilio Rao /* 860651175c9SAttilio Rao * The current lock owner might have started executing 861651175c9SAttilio Rao * on another CPU (or the lock could have changed 862651175c9SAttilio Rao * owner) while we were waiting on the turnstile 863651175c9SAttilio Rao * chain lock. If so, drop the turnstile lock and try 864651175c9SAttilio Rao * again. 865651175c9SAttilio Rao */ 866651175c9SAttilio Rao if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 && 867651175c9SAttilio Rao LK_HOLDER(x) != LK_KERNPROC) { 868651175c9SAttilio Rao owner = (struct thread *)LK_HOLDER(x); 869651175c9SAttilio Rao if (TD_IS_RUNNING(owner)) { 870651175c9SAttilio Rao sleepq_release(&lk->lock_object); 871651175c9SAttilio Rao continue; 872651175c9SAttilio Rao } 873651175c9SAttilio Rao } 874651175c9SAttilio Rao #endif 875651175c9SAttilio Rao 876047dd67eSAttilio Rao /* 877047dd67eSAttilio Rao * The lock can be in the state where there is a 878047dd67eSAttilio Rao * pending queue of waiters, but still no owner. 879047dd67eSAttilio Rao * This happens when the lock is contested and an 880047dd67eSAttilio Rao * owner is going to claim the lock. 881047dd67eSAttilio Rao * If curthread is the one successfully acquiring it 882047dd67eSAttilio Rao * claim lock ownership and return, preserving waiters 883047dd67eSAttilio Rao * flags. 884047dd67eSAttilio Rao */ 885651175c9SAttilio Rao v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS); 886651175c9SAttilio Rao if ((x & ~v) == LK_UNLOCKED) { 887651175c9SAttilio Rao v &= ~LK_EXCLUSIVE_SPINNERS; 888047dd67eSAttilio Rao if (atomic_cmpset_acq_ptr(&lk->lk_lock, x, 889047dd67eSAttilio Rao tid | v)) { 890047dd67eSAttilio Rao sleepq_release(&lk->lock_object); 891047dd67eSAttilio Rao LOCK_LOG2(lk, 892047dd67eSAttilio Rao "%s: %p claimed by a new writer", 893047dd67eSAttilio Rao __func__, lk); 894047dd67eSAttilio Rao break; 895047dd67eSAttilio Rao } 896047dd67eSAttilio Rao sleepq_release(&lk->lock_object); 897047dd67eSAttilio Rao continue; 898047dd67eSAttilio Rao } 899047dd67eSAttilio Rao 900047dd67eSAttilio Rao /* 901047dd67eSAttilio Rao * Try to set the LK_EXCLUSIVE_WAITERS flag. If we 902047dd67eSAttilio Rao * fail, loop back and retry. 903047dd67eSAttilio Rao */ 904047dd67eSAttilio Rao if ((x & LK_EXCLUSIVE_WAITERS) == 0) { 905047dd67eSAttilio Rao if (!atomic_cmpset_ptr(&lk->lk_lock, x, 906047dd67eSAttilio Rao x | LK_EXCLUSIVE_WAITERS)) { 907047dd67eSAttilio Rao sleepq_release(&lk->lock_object); 908047dd67eSAttilio Rao continue; 909047dd67eSAttilio Rao } 910047dd67eSAttilio Rao LOCK_LOG2(lk, "%s: %p set excl waiters flag", 911047dd67eSAttilio Rao __func__, lk); 912047dd67eSAttilio Rao } 913047dd67eSAttilio Rao 914047dd67eSAttilio Rao /* 915047dd67eSAttilio Rao * As far as we have been unable to acquire the 916047dd67eSAttilio Rao * exclusive lock and the exclusive waiters flag 917047dd67eSAttilio Rao * is set, we will sleep. 918047dd67eSAttilio Rao */ 919047dd67eSAttilio Rao error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo, 920047dd67eSAttilio Rao SQ_EXCLUSIVE_QUEUE); 921047dd67eSAttilio Rao flags &= ~LK_INTERLOCK; 922047dd67eSAttilio Rao if (error) { 923047dd67eSAttilio Rao LOCK_LOG3(lk, 924047dd67eSAttilio Rao "%s: interrupted sleep for %p with %d", 925047dd67eSAttilio Rao __func__, lk, error); 926047dd67eSAttilio Rao break; 927047dd67eSAttilio Rao } 928047dd67eSAttilio Rao LOCK_LOG2(lk, "%s: %p resuming from the sleep queue", 929047dd67eSAttilio Rao __func__, lk); 930047dd67eSAttilio Rao } 931047dd67eSAttilio Rao if (error == 0) { 932047dd67eSAttilio Rao lock_profile_obtain_lock_success(&lk->lock_object, 933047dd67eSAttilio Rao contested, waittime, file, line); 934047dd67eSAttilio Rao LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0, 935047dd67eSAttilio Rao lk->lk_recurse, file, line); 936e5f94314SAttilio Rao WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE | 937e5f94314SAttilio Rao LK_TRYWIT(flags), file, line); 938047dd67eSAttilio Rao TD_LOCKS_INC(curthread); 939047dd67eSAttilio Rao STACK_SAVE(lk); 940047dd67eSAttilio Rao } 941047dd67eSAttilio Rao break; 942047dd67eSAttilio Rao case LK_DOWNGRADE: 9431c7d98d0SAttilio Rao _lockmgr_assert(lk, KA_XLOCKED, file, line); 944e5f94314SAttilio Rao LOCK_LOG_LOCK("XDOWNGRADE", &lk->lock_object, 0, 0, file, line); 945e5f94314SAttilio Rao WITNESS_DOWNGRADE(&lk->lock_object, 0, file, line); 9461c7d98d0SAttilio Rao 9471c7d98d0SAttilio Rao /* 9481c7d98d0SAttilio Rao * Panic if the lock is recursed. 9491c7d98d0SAttilio Rao */ 9501c7d98d0SAttilio Rao if (lockmgr_xlocked(lk) && lockmgr_recursed(lk)) { 9511c7d98d0SAttilio Rao if (flags & LK_INTERLOCK) 9521c7d98d0SAttilio Rao class->lc_unlock(ilk); 9531c7d98d0SAttilio Rao panic("%s: downgrade a recursed lockmgr %s @ %s:%d\n", 9541c7d98d0SAttilio Rao __func__, iwmesg, file, line); 9551c7d98d0SAttilio Rao } 956e5f94314SAttilio Rao TD_SLOCKS_INC(curthread); 957047dd67eSAttilio Rao 958047dd67eSAttilio Rao /* 959047dd67eSAttilio Rao * In order to preserve waiters flags, just spin. 960047dd67eSAttilio Rao */ 961047dd67eSAttilio Rao for (;;) { 962651175c9SAttilio Rao x = lk->lk_lock; 963651175c9SAttilio Rao MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0); 964651175c9SAttilio Rao x &= LK_ALL_WAITERS; 965047dd67eSAttilio Rao if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x, 966e5f94314SAttilio Rao LK_SHARERS_LOCK(1) | x)) 967047dd67eSAttilio Rao break; 968047dd67eSAttilio Rao cpu_spinwait(); 969047dd67eSAttilio Rao } 970047dd67eSAttilio Rao break; 971047dd67eSAttilio Rao case LK_RELEASE: 972047dd67eSAttilio Rao _lockmgr_assert(lk, KA_LOCKED, file, line); 973047dd67eSAttilio Rao x = lk->lk_lock; 974047dd67eSAttilio Rao 975047dd67eSAttilio Rao if ((x & LK_SHARE) == 0) { 976047dd67eSAttilio Rao 977047dd67eSAttilio Rao /* 978047dd67eSAttilio Rao * As first option, treact the lock as if it has not 979047dd67eSAttilio Rao * any waiter. 980047dd67eSAttilio Rao * Fix-up the tid var if the lock has been disowned. 981047dd67eSAttilio Rao */ 982047dd67eSAttilio Rao if (LK_HOLDER(x) == LK_KERNPROC) 983047dd67eSAttilio Rao tid = LK_KERNPROC; 984e5f94314SAttilio Rao else { 985e5f94314SAttilio Rao WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, 986e5f94314SAttilio Rao file, line); 987047dd67eSAttilio Rao TD_LOCKS_DEC(curthread); 988e5f94314SAttilio Rao } 989047dd67eSAttilio Rao LOCK_LOG_LOCK("XUNLOCK", &lk->lock_object, 0, 990047dd67eSAttilio Rao lk->lk_recurse, file, line); 991047dd67eSAttilio Rao 992047dd67eSAttilio Rao /* 993047dd67eSAttilio Rao * The lock is held in exclusive mode. 994047dd67eSAttilio Rao * If the lock is recursed also, then unrecurse it. 995047dd67eSAttilio Rao */ 996047dd67eSAttilio Rao if (lockmgr_xlocked(lk) && lockmgr_recursed(lk)) { 997047dd67eSAttilio Rao LOCK_LOG2(lk, "%s: %p unrecursing", __func__, 998047dd67eSAttilio Rao lk); 999047dd67eSAttilio Rao lk->lk_recurse--; 1000047dd67eSAttilio Rao break; 1001047dd67eSAttilio Rao } 100204a28689SJeff Roberson if (tid != LK_KERNPROC) 1003047dd67eSAttilio Rao lock_profile_release_lock(&lk->lock_object); 1004047dd67eSAttilio Rao 1005047dd67eSAttilio Rao if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid, 1006047dd67eSAttilio Rao LK_UNLOCKED)) 1007047dd67eSAttilio Rao break; 1008047dd67eSAttilio Rao 1009047dd67eSAttilio Rao sleepq_lock(&lk->lock_object); 1010651175c9SAttilio Rao x = lk->lk_lock; 1011047dd67eSAttilio Rao v = LK_UNLOCKED; 1012047dd67eSAttilio Rao 1013047dd67eSAttilio Rao /* 1014047dd67eSAttilio Rao * If the lock has exclusive waiters, give them 1015047dd67eSAttilio Rao * preference in order to avoid deadlock with 1016047dd67eSAttilio Rao * shared runners up. 10172028867dSAttilio Rao * If interruptible sleeps left the exclusive queue 10182028867dSAttilio Rao * empty avoid a starvation for the threads sleeping 10192028867dSAttilio Rao * on the shared queue by giving them precedence 10202028867dSAttilio Rao * and cleaning up the exclusive waiters bit anyway. 1021c636ba83SAttilio Rao * Please note that lk_exslpfail count may be lying 1022c636ba83SAttilio Rao * about the real number of waiters with the 1023c636ba83SAttilio Rao * LK_SLEEPFAIL flag on because they may be used in 1024c636ba83SAttilio Rao * conjuction with interruptible sleeps so 1025aab9c8c2SAttilio Rao * lk_exslpfail might be considered an 'upper limit' 1026aab9c8c2SAttilio Rao * bound, including the edge cases. 1027047dd67eSAttilio Rao */ 1028651175c9SAttilio Rao MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0); 10292028867dSAttilio Rao realexslp = sleepq_sleepcnt(&lk->lock_object, 10302028867dSAttilio Rao SQ_EXCLUSIVE_QUEUE); 10312028867dSAttilio Rao if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) { 10322028867dSAttilio Rao if (lk->lk_exslpfail < realexslp) { 10332028867dSAttilio Rao lk->lk_exslpfail = 0; 1034047dd67eSAttilio Rao queue = SQ_EXCLUSIVE_QUEUE; 1035047dd67eSAttilio Rao v |= (x & LK_SHARED_WAITERS); 1036047dd67eSAttilio Rao } else { 10372028867dSAttilio Rao lk->lk_exslpfail = 0; 10382028867dSAttilio Rao LOCK_LOG2(lk, 10392028867dSAttilio Rao "%s: %p has only LK_SLEEPFAIL sleepers", 10402028867dSAttilio Rao __func__, lk); 10412028867dSAttilio Rao LOCK_LOG2(lk, 10422028867dSAttilio Rao "%s: %p waking up threads on the exclusive queue", 10432028867dSAttilio Rao __func__, lk); 10442028867dSAttilio Rao wakeup_swapper = 10452028867dSAttilio Rao sleepq_broadcast(&lk->lock_object, 10462028867dSAttilio Rao SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE); 10472028867dSAttilio Rao queue = SQ_SHARED_QUEUE; 10482028867dSAttilio Rao } 10492028867dSAttilio Rao } else { 10509dbf7a62SAttilio Rao 10519dbf7a62SAttilio Rao /* 10529dbf7a62SAttilio Rao * Exclusive waiters sleeping with LK_SLEEPFAIL 10539dbf7a62SAttilio Rao * on and using interruptible sleeps/timeout 10549dbf7a62SAttilio Rao * may have left spourious lk_exslpfail counts 10559dbf7a62SAttilio Rao * on, so clean it up anyway. 10569dbf7a62SAttilio Rao */ 10579dbf7a62SAttilio Rao lk->lk_exslpfail = 0; 1058047dd67eSAttilio Rao queue = SQ_SHARED_QUEUE; 1059047dd67eSAttilio Rao } 1060047dd67eSAttilio Rao 1061047dd67eSAttilio Rao LOCK_LOG3(lk, 1062047dd67eSAttilio Rao "%s: %p waking up threads on the %s queue", 1063047dd67eSAttilio Rao __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" : 1064047dd67eSAttilio Rao "exclusive"); 1065047dd67eSAttilio Rao atomic_store_rel_ptr(&lk->lk_lock, v); 10662028867dSAttilio Rao wakeup_swapper |= sleepq_broadcast(&lk->lock_object, 1067da7bbd2cSJohn Baldwin SLEEPQ_LK, 0, queue); 1068047dd67eSAttilio Rao sleepq_release(&lk->lock_object); 1069047dd67eSAttilio Rao break; 1070047dd67eSAttilio Rao } else 1071da7bbd2cSJohn Baldwin wakeup_swapper = wakeupshlk(lk, file, line); 1072047dd67eSAttilio Rao break; 1073047dd67eSAttilio Rao case LK_DRAIN: 1074e5f94314SAttilio Rao if (LK_CAN_WITNESS(flags)) 1075e5f94314SAttilio Rao WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER | 107624150d37SJohn Baldwin LOP_EXCLUSIVE, file, line, flags & LK_INTERLOCK ? 107724150d37SJohn Baldwin ilk : NULL); 1078047dd67eSAttilio Rao 1079047dd67eSAttilio Rao /* 108096f1567fSKonstantin Belousov * Trying to drain a lock we already own will result in a 1081047dd67eSAttilio Rao * deadlock. 1082047dd67eSAttilio Rao */ 1083047dd67eSAttilio Rao if (lockmgr_xlocked(lk)) { 1084047dd67eSAttilio Rao if (flags & LK_INTERLOCK) 1085047dd67eSAttilio Rao class->lc_unlock(ilk); 1086047dd67eSAttilio Rao panic("%s: draining %s with the lock held @ %s:%d\n", 1087047dd67eSAttilio Rao __func__, iwmesg, file, line); 1088047dd67eSAttilio Rao } 1089047dd67eSAttilio Rao 1090047dd67eSAttilio Rao while (!atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid)) { 1091f5f9340bSFabien Thomas #ifdef HWPMC_HOOKS 1092f5f9340bSFabien Thomas PMC_SOFT_CALL( , , lock, failed); 1093f5f9340bSFabien Thomas #endif 1094047dd67eSAttilio Rao lock_profile_obtain_lock_failed(&lk->lock_object, 1095047dd67eSAttilio Rao &contested, &waittime); 1096047dd67eSAttilio Rao 1097047dd67eSAttilio Rao /* 1098047dd67eSAttilio Rao * If the lock is expected to not sleep just give up 1099047dd67eSAttilio Rao * and return. 1100047dd67eSAttilio Rao */ 1101047dd67eSAttilio Rao if (LK_TRYOP(flags)) { 1102047dd67eSAttilio Rao LOCK_LOG2(lk, "%s: %p fails the try operation", 1103047dd67eSAttilio Rao __func__, lk); 1104047dd67eSAttilio Rao error = EBUSY; 1105047dd67eSAttilio Rao break; 1106047dd67eSAttilio Rao } 1107047dd67eSAttilio Rao 1108047dd67eSAttilio Rao /* 1109047dd67eSAttilio Rao * Acquire the sleepqueue chain lock because we 1110047dd67eSAttilio Rao * probabilly will need to manipulate waiters flags. 1111047dd67eSAttilio Rao */ 1112047dd67eSAttilio Rao sleepq_lock(&lk->lock_object); 1113047dd67eSAttilio Rao x = lk->lk_lock; 1114047dd67eSAttilio Rao 1115047dd67eSAttilio Rao /* 1116047dd67eSAttilio Rao * if the lock has been released while we spun on 1117047dd67eSAttilio Rao * the sleepqueue chain lock just try again. 1118047dd67eSAttilio Rao */ 1119047dd67eSAttilio Rao if (x == LK_UNLOCKED) { 1120047dd67eSAttilio Rao sleepq_release(&lk->lock_object); 1121047dd67eSAttilio Rao continue; 1122047dd67eSAttilio Rao } 1123047dd67eSAttilio Rao 1124651175c9SAttilio Rao v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS); 1125651175c9SAttilio Rao if ((x & ~v) == LK_UNLOCKED) { 1126651175c9SAttilio Rao v = (x & ~LK_EXCLUSIVE_SPINNERS); 11272028867dSAttilio Rao 11282028867dSAttilio Rao /* 11292028867dSAttilio Rao * If interruptible sleeps left the exclusive 11302028867dSAttilio Rao * queue empty avoid a starvation for the 11312028867dSAttilio Rao * threads sleeping on the shared queue by 11322028867dSAttilio Rao * giving them precedence and cleaning up the 11332028867dSAttilio Rao * exclusive waiters bit anyway. 1134c636ba83SAttilio Rao * Please note that lk_exslpfail count may be 1135c636ba83SAttilio Rao * lying about the real number of waiters with 1136c636ba83SAttilio Rao * the LK_SLEEPFAIL flag on because they may 1137c636ba83SAttilio Rao * be used in conjuction with interruptible 1138aab9c8c2SAttilio Rao * sleeps so lk_exslpfail might be considered 1139aab9c8c2SAttilio Rao * an 'upper limit' bound, including the edge 1140c636ba83SAttilio Rao * cases. 11412028867dSAttilio Rao */ 1142047dd67eSAttilio Rao if (v & LK_EXCLUSIVE_WAITERS) { 1143047dd67eSAttilio Rao queue = SQ_EXCLUSIVE_QUEUE; 1144047dd67eSAttilio Rao v &= ~LK_EXCLUSIVE_WAITERS; 1145047dd67eSAttilio Rao } else { 11469dbf7a62SAttilio Rao 11479dbf7a62SAttilio Rao /* 11489dbf7a62SAttilio Rao * Exclusive waiters sleeping with 11499dbf7a62SAttilio Rao * LK_SLEEPFAIL on and using 11509dbf7a62SAttilio Rao * interruptible sleeps/timeout may 11519dbf7a62SAttilio Rao * have left spourious lk_exslpfail 11529dbf7a62SAttilio Rao * counts on, so clean it up anyway. 11539dbf7a62SAttilio Rao */ 1154047dd67eSAttilio Rao MPASS(v & LK_SHARED_WAITERS); 11559dbf7a62SAttilio Rao lk->lk_exslpfail = 0; 1156047dd67eSAttilio Rao queue = SQ_SHARED_QUEUE; 1157047dd67eSAttilio Rao v &= ~LK_SHARED_WAITERS; 1158047dd67eSAttilio Rao } 11592028867dSAttilio Rao if (queue == SQ_EXCLUSIVE_QUEUE) { 11602028867dSAttilio Rao realexslp = 11612028867dSAttilio Rao sleepq_sleepcnt(&lk->lock_object, 11622028867dSAttilio Rao SQ_EXCLUSIVE_QUEUE); 11632028867dSAttilio Rao if (lk->lk_exslpfail >= realexslp) { 11642028867dSAttilio Rao lk->lk_exslpfail = 0; 11652028867dSAttilio Rao queue = SQ_SHARED_QUEUE; 11662028867dSAttilio Rao v &= ~LK_SHARED_WAITERS; 11672028867dSAttilio Rao if (realexslp != 0) { 11682028867dSAttilio Rao LOCK_LOG2(lk, 11692028867dSAttilio Rao "%s: %p has only LK_SLEEPFAIL sleepers", 11702028867dSAttilio Rao __func__, lk); 11712028867dSAttilio Rao LOCK_LOG2(lk, 11722028867dSAttilio Rao "%s: %p waking up threads on the exclusive queue", 11732028867dSAttilio Rao __func__, lk); 11742028867dSAttilio Rao wakeup_swapper = 11752028867dSAttilio Rao sleepq_broadcast( 11762028867dSAttilio Rao &lk->lock_object, 11772028867dSAttilio Rao SLEEPQ_LK, 0, 11782028867dSAttilio Rao SQ_EXCLUSIVE_QUEUE); 11792028867dSAttilio Rao } 11802028867dSAttilio Rao } else 11812028867dSAttilio Rao lk->lk_exslpfail = 0; 11822028867dSAttilio Rao } 1183047dd67eSAttilio Rao if (!atomic_cmpset_ptr(&lk->lk_lock, x, v)) { 1184047dd67eSAttilio Rao sleepq_release(&lk->lock_object); 1185047dd67eSAttilio Rao continue; 1186047dd67eSAttilio Rao } 1187047dd67eSAttilio Rao LOCK_LOG3(lk, 1188047dd67eSAttilio Rao "%s: %p waking up all threads on the %s queue", 1189047dd67eSAttilio Rao __func__, lk, queue == SQ_SHARED_QUEUE ? 1190047dd67eSAttilio Rao "shared" : "exclusive"); 1191814f26daSJohn Baldwin wakeup_swapper |= sleepq_broadcast( 1192da7bbd2cSJohn Baldwin &lk->lock_object, SLEEPQ_LK, 0, queue); 1193047dd67eSAttilio Rao 1194047dd67eSAttilio Rao /* 1195047dd67eSAttilio Rao * If shared waiters have been woken up we need 1196047dd67eSAttilio Rao * to wait for one of them to acquire the lock 1197047dd67eSAttilio Rao * before to set the exclusive waiters in 1198047dd67eSAttilio Rao * order to avoid a deadlock. 1199047dd67eSAttilio Rao */ 1200047dd67eSAttilio Rao if (queue == SQ_SHARED_QUEUE) { 1201047dd67eSAttilio Rao for (v = lk->lk_lock; 1202047dd67eSAttilio Rao (v & LK_SHARE) && !LK_SHARERS(v); 1203047dd67eSAttilio Rao v = lk->lk_lock) 1204047dd67eSAttilio Rao cpu_spinwait(); 1205047dd67eSAttilio Rao } 1206047dd67eSAttilio Rao } 1207047dd67eSAttilio Rao 1208047dd67eSAttilio Rao /* 1209047dd67eSAttilio Rao * Try to set the LK_EXCLUSIVE_WAITERS flag. If we 1210047dd67eSAttilio Rao * fail, loop back and retry. 1211047dd67eSAttilio Rao */ 1212047dd67eSAttilio Rao if ((x & LK_EXCLUSIVE_WAITERS) == 0) { 1213047dd67eSAttilio Rao if (!atomic_cmpset_ptr(&lk->lk_lock, x, 1214047dd67eSAttilio Rao x | LK_EXCLUSIVE_WAITERS)) { 1215047dd67eSAttilio Rao sleepq_release(&lk->lock_object); 1216047dd67eSAttilio Rao continue; 1217047dd67eSAttilio Rao } 1218047dd67eSAttilio Rao LOCK_LOG2(lk, "%s: %p set drain waiters flag", 1219047dd67eSAttilio Rao __func__, lk); 1220047dd67eSAttilio Rao } 1221047dd67eSAttilio Rao 1222047dd67eSAttilio Rao /* 1223047dd67eSAttilio Rao * As far as we have been unable to acquire the 1224047dd67eSAttilio Rao * exclusive lock and the exclusive waiters flag 1225047dd67eSAttilio Rao * is set, we will sleep. 1226047dd67eSAttilio Rao */ 1227047dd67eSAttilio Rao if (flags & LK_INTERLOCK) { 1228047dd67eSAttilio Rao class->lc_unlock(ilk); 1229047dd67eSAttilio Rao flags &= ~LK_INTERLOCK; 1230047dd67eSAttilio Rao } 1231e5f94314SAttilio Rao GIANT_SAVE(); 1232047dd67eSAttilio Rao sleepq_add(&lk->lock_object, NULL, iwmesg, SLEEPQ_LK, 1233047dd67eSAttilio Rao SQ_EXCLUSIVE_QUEUE); 1234047dd67eSAttilio Rao sleepq_wait(&lk->lock_object, ipri & PRIMASK); 1235e5f94314SAttilio Rao GIANT_RESTORE(); 1236047dd67eSAttilio Rao LOCK_LOG2(lk, "%s: %p resuming from the sleep queue", 1237047dd67eSAttilio Rao __func__, lk); 1238047dd67eSAttilio Rao } 1239047dd67eSAttilio Rao 1240047dd67eSAttilio Rao if (error == 0) { 1241047dd67eSAttilio Rao lock_profile_obtain_lock_success(&lk->lock_object, 1242047dd67eSAttilio Rao contested, waittime, file, line); 1243047dd67eSAttilio Rao LOCK_LOG_LOCK("DRAIN", &lk->lock_object, 0, 1244047dd67eSAttilio Rao lk->lk_recurse, file, line); 1245e5f94314SAttilio Rao WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE | 1246e5f94314SAttilio Rao LK_TRYWIT(flags), file, line); 1247047dd67eSAttilio Rao TD_LOCKS_INC(curthread); 1248047dd67eSAttilio Rao STACK_SAVE(lk); 1249047dd67eSAttilio Rao } 1250047dd67eSAttilio Rao break; 1251047dd67eSAttilio Rao default: 1252047dd67eSAttilio Rao if (flags & LK_INTERLOCK) 1253047dd67eSAttilio Rao class->lc_unlock(ilk); 1254047dd67eSAttilio Rao panic("%s: unknown lockmgr request 0x%x\n", __func__, op); 1255047dd67eSAttilio Rao } 1256047dd67eSAttilio Rao 1257047dd67eSAttilio Rao if (flags & LK_INTERLOCK) 1258047dd67eSAttilio Rao class->lc_unlock(ilk); 1259da7bbd2cSJohn Baldwin if (wakeup_swapper) 1260da7bbd2cSJohn Baldwin kick_proc0(); 1261047dd67eSAttilio Rao 1262047dd67eSAttilio Rao return (error); 1263047dd67eSAttilio Rao } 1264047dd67eSAttilio Rao 1265d7a7e179SAttilio Rao void 1266047dd67eSAttilio Rao _lockmgr_disown(struct lock *lk, const char *file, int line) 1267047dd67eSAttilio Rao { 1268047dd67eSAttilio Rao uintptr_t tid, x; 1269047dd67eSAttilio Rao 127035370593SAndriy Gapon if (SCHEDULER_STOPPED()) 127135370593SAndriy Gapon return; 127235370593SAndriy Gapon 1273047dd67eSAttilio Rao tid = (uintptr_t)curthread; 12741c7d98d0SAttilio Rao _lockmgr_assert(lk, KA_XLOCKED, file, line); 12751c7d98d0SAttilio Rao 12761c7d98d0SAttilio Rao /* 12771c7d98d0SAttilio Rao * Panic if the lock is recursed. 12781c7d98d0SAttilio Rao */ 12791c7d98d0SAttilio Rao if (lockmgr_xlocked(lk) && lockmgr_recursed(lk)) 12801c7d98d0SAttilio Rao panic("%s: disown a recursed lockmgr @ %s:%d\n", 12811c7d98d0SAttilio Rao __func__, file, line); 1282047dd67eSAttilio Rao 1283047dd67eSAttilio Rao /* 128496f1567fSKonstantin Belousov * If the owner is already LK_KERNPROC just skip the whole operation. 1285047dd67eSAttilio Rao */ 1286047dd67eSAttilio Rao if (LK_HOLDER(lk->lk_lock) != tid) 1287047dd67eSAttilio Rao return; 128804a28689SJeff Roberson lock_profile_release_lock(&lk->lock_object); 1289e5f94314SAttilio Rao LOCK_LOG_LOCK("XDISOWN", &lk->lock_object, 0, 0, file, line); 1290e5f94314SAttilio Rao WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, file, line); 1291e5f94314SAttilio Rao TD_LOCKS_DEC(curthread); 1292337c5ff4SAttilio Rao STACK_SAVE(lk); 1293047dd67eSAttilio Rao 1294047dd67eSAttilio Rao /* 1295047dd67eSAttilio Rao * In order to preserve waiters flags, just spin. 1296047dd67eSAttilio Rao */ 1297047dd67eSAttilio Rao for (;;) { 1298651175c9SAttilio Rao x = lk->lk_lock; 1299651175c9SAttilio Rao MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0); 1300651175c9SAttilio Rao x &= LK_ALL_WAITERS; 130122dd228dSAttilio Rao if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x, 1302e5f94314SAttilio Rao LK_KERNPROC | x)) 1303047dd67eSAttilio Rao return; 1304047dd67eSAttilio Rao cpu_spinwait(); 1305047dd67eSAttilio Rao } 1306047dd67eSAttilio Rao } 1307047dd67eSAttilio Rao 1308047dd67eSAttilio Rao void 1309d576deedSPawel Jakub Dawidek lockmgr_printinfo(const struct lock *lk) 1310d7a7e179SAttilio Rao { 1311d7a7e179SAttilio Rao struct thread *td; 1312047dd67eSAttilio Rao uintptr_t x; 1313d7a7e179SAttilio Rao 1314047dd67eSAttilio Rao if (lk->lk_lock == LK_UNLOCKED) 1315047dd67eSAttilio Rao printf("lock type %s: UNLOCKED\n", lk->lock_object.lo_name); 1316047dd67eSAttilio Rao else if (lk->lk_lock & LK_SHARE) 1317047dd67eSAttilio Rao printf("lock type %s: SHARED (count %ju)\n", 1318047dd67eSAttilio Rao lk->lock_object.lo_name, 1319047dd67eSAttilio Rao (uintmax_t)LK_SHARERS(lk->lk_lock)); 1320047dd67eSAttilio Rao else { 1321047dd67eSAttilio Rao td = lockmgr_xholder(lk); 13222573ea5fSIvan Voras printf("lock type %s: EXCL by thread %p " 13232573ea5fSIvan Voras "(pid %d, %s, tid %d)\n", lk->lock_object.lo_name, td, 13242573ea5fSIvan Voras td->td_proc->p_pid, td->td_proc->p_comm, td->td_tid); 1325d7a7e179SAttilio Rao } 1326d7a7e179SAttilio Rao 1327047dd67eSAttilio Rao x = lk->lk_lock; 1328047dd67eSAttilio Rao if (x & LK_EXCLUSIVE_WAITERS) 1329047dd67eSAttilio Rao printf(" with exclusive waiters pending\n"); 1330047dd67eSAttilio Rao if (x & LK_SHARED_WAITERS) 1331047dd67eSAttilio Rao printf(" with shared waiters pending\n"); 1332651175c9SAttilio Rao if (x & LK_EXCLUSIVE_SPINNERS) 1333651175c9SAttilio Rao printf(" with exclusive spinners pending\n"); 1334047dd67eSAttilio Rao 1335047dd67eSAttilio Rao STACK_PRINT(lk); 1336047dd67eSAttilio Rao } 1337047dd67eSAttilio Rao 133899448ed1SJohn Dyson int 1339d576deedSPawel Jakub Dawidek lockstatus(const struct lock *lk) 134099448ed1SJohn Dyson { 1341047dd67eSAttilio Rao uintptr_t v, x; 1342047dd67eSAttilio Rao int ret; 134399448ed1SJohn Dyson 1344047dd67eSAttilio Rao ret = LK_SHARED; 1345047dd67eSAttilio Rao x = lk->lk_lock; 1346047dd67eSAttilio Rao v = LK_HOLDER(x); 13470e9eb108SAttilio Rao 1348047dd67eSAttilio Rao if ((x & LK_SHARE) == 0) { 1349047dd67eSAttilio Rao if (v == (uintptr_t)curthread || v == LK_KERNPROC) 1350047dd67eSAttilio Rao ret = LK_EXCLUSIVE; 13516bdfe06aSEivind Eklund else 1352047dd67eSAttilio Rao ret = LK_EXCLOTHER; 1353047dd67eSAttilio Rao } else if (x == LK_UNLOCKED) 1354047dd67eSAttilio Rao ret = 0; 135599448ed1SJohn Dyson 1356047dd67eSAttilio Rao return (ret); 135753bf4bb2SPeter Wemm } 1358be6847d7SJohn Baldwin 135984887fa3SAttilio Rao #ifdef INVARIANT_SUPPORT 1360de5b1952SAlexander Leidinger 1361de5b1952SAlexander Leidinger FEATURE(invariant_support, 1362de5b1952SAlexander Leidinger "Support for modules compiled with INVARIANTS option"); 1363de5b1952SAlexander Leidinger 136484887fa3SAttilio Rao #ifndef INVARIANTS 136584887fa3SAttilio Rao #undef _lockmgr_assert 136684887fa3SAttilio Rao #endif 136784887fa3SAttilio Rao 136884887fa3SAttilio Rao void 1369d576deedSPawel Jakub Dawidek _lockmgr_assert(const struct lock *lk, int what, const char *file, int line) 137084887fa3SAttilio Rao { 137184887fa3SAttilio Rao int slocked = 0; 137284887fa3SAttilio Rao 137384887fa3SAttilio Rao if (panicstr != NULL) 137484887fa3SAttilio Rao return; 137584887fa3SAttilio Rao switch (what) { 137684887fa3SAttilio Rao case KA_SLOCKED: 137784887fa3SAttilio Rao case KA_SLOCKED | KA_NOTRECURSED: 137884887fa3SAttilio Rao case KA_SLOCKED | KA_RECURSED: 137984887fa3SAttilio Rao slocked = 1; 138084887fa3SAttilio Rao case KA_LOCKED: 138184887fa3SAttilio Rao case KA_LOCKED | KA_NOTRECURSED: 138284887fa3SAttilio Rao case KA_LOCKED | KA_RECURSED: 1383e5f94314SAttilio Rao #ifdef WITNESS 1384e5f94314SAttilio Rao 1385e5f94314SAttilio Rao /* 1386e5f94314SAttilio Rao * We cannot trust WITNESS if the lock is held in exclusive 1387e5f94314SAttilio Rao * mode and a call to lockmgr_disown() happened. 1388e5f94314SAttilio Rao * Workaround this skipping the check if the lock is held in 1389e5f94314SAttilio Rao * exclusive mode even for the KA_LOCKED case. 1390e5f94314SAttilio Rao */ 1391e5f94314SAttilio Rao if (slocked || (lk->lk_lock & LK_SHARE)) { 1392e5f94314SAttilio Rao witness_assert(&lk->lock_object, what, file, line); 1393e5f94314SAttilio Rao break; 1394e5f94314SAttilio Rao } 1395e5f94314SAttilio Rao #endif 1396047dd67eSAttilio Rao if (lk->lk_lock == LK_UNLOCKED || 1397047dd67eSAttilio Rao ((lk->lk_lock & LK_SHARE) == 0 && (slocked || 1398047dd67eSAttilio Rao (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk))))) 139984887fa3SAttilio Rao panic("Lock %s not %slocked @ %s:%d\n", 1400047dd67eSAttilio Rao lk->lock_object.lo_name, slocked ? "share" : "", 140184887fa3SAttilio Rao file, line); 1402047dd67eSAttilio Rao 1403047dd67eSAttilio Rao if ((lk->lk_lock & LK_SHARE) == 0) { 1404047dd67eSAttilio Rao if (lockmgr_recursed(lk)) { 140584887fa3SAttilio Rao if (what & KA_NOTRECURSED) 140684887fa3SAttilio Rao panic("Lock %s recursed @ %s:%d\n", 1407047dd67eSAttilio Rao lk->lock_object.lo_name, file, 1408047dd67eSAttilio Rao line); 140984887fa3SAttilio Rao } else if (what & KA_RECURSED) 141084887fa3SAttilio Rao panic("Lock %s not recursed @ %s:%d\n", 1411047dd67eSAttilio Rao lk->lock_object.lo_name, file, line); 141284887fa3SAttilio Rao } 141384887fa3SAttilio Rao break; 141484887fa3SAttilio Rao case KA_XLOCKED: 141584887fa3SAttilio Rao case KA_XLOCKED | KA_NOTRECURSED: 141684887fa3SAttilio Rao case KA_XLOCKED | KA_RECURSED: 1417047dd67eSAttilio Rao if (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk)) 141884887fa3SAttilio Rao panic("Lock %s not exclusively locked @ %s:%d\n", 1419047dd67eSAttilio Rao lk->lock_object.lo_name, file, line); 1420047dd67eSAttilio Rao if (lockmgr_recursed(lk)) { 142184887fa3SAttilio Rao if (what & KA_NOTRECURSED) 142284887fa3SAttilio Rao panic("Lock %s recursed @ %s:%d\n", 1423047dd67eSAttilio Rao lk->lock_object.lo_name, file, line); 142484887fa3SAttilio Rao } else if (what & KA_RECURSED) 142584887fa3SAttilio Rao panic("Lock %s not recursed @ %s:%d\n", 1426047dd67eSAttilio Rao lk->lock_object.lo_name, file, line); 142784887fa3SAttilio Rao break; 142884887fa3SAttilio Rao case KA_UNLOCKED: 1429047dd67eSAttilio Rao if (lockmgr_xlocked(lk) || lockmgr_disowned(lk)) 143084887fa3SAttilio Rao panic("Lock %s exclusively locked @ %s:%d\n", 1431047dd67eSAttilio Rao lk->lock_object.lo_name, file, line); 143284887fa3SAttilio Rao break; 143384887fa3SAttilio Rao default: 1434047dd67eSAttilio Rao panic("Unknown lockmgr assertion: %d @ %s:%d\n", what, file, 1435047dd67eSAttilio Rao line); 143684887fa3SAttilio Rao } 143784887fa3SAttilio Rao } 1438047dd67eSAttilio Rao #endif 143984887fa3SAttilio Rao 1440be6847d7SJohn Baldwin #ifdef DDB 1441462a7addSJohn Baldwin int 1442462a7addSJohn Baldwin lockmgr_chain(struct thread *td, struct thread **ownerp) 1443462a7addSJohn Baldwin { 1444047dd67eSAttilio Rao struct lock *lk; 1445462a7addSJohn Baldwin 1446047dd67eSAttilio Rao lk = td->td_wchan; 1447462a7addSJohn Baldwin 1448047dd67eSAttilio Rao if (LOCK_CLASS(&lk->lock_object) != &lock_class_lockmgr) 1449462a7addSJohn Baldwin return (0); 1450047dd67eSAttilio Rao db_printf("blocked on lockmgr %s", lk->lock_object.lo_name); 1451047dd67eSAttilio Rao if (lk->lk_lock & LK_SHARE) 1452047dd67eSAttilio Rao db_printf("SHARED (count %ju)\n", 1453047dd67eSAttilio Rao (uintmax_t)LK_SHARERS(lk->lk_lock)); 1454047dd67eSAttilio Rao else 1455047dd67eSAttilio Rao db_printf("EXCL\n"); 1456047dd67eSAttilio Rao *ownerp = lockmgr_xholder(lk); 1457462a7addSJohn Baldwin 1458462a7addSJohn Baldwin return (1); 1459462a7addSJohn Baldwin } 1460462a7addSJohn Baldwin 1461047dd67eSAttilio Rao static void 1462d576deedSPawel Jakub Dawidek db_show_lockmgr(const struct lock_object *lock) 1463be6847d7SJohn Baldwin { 1464be6847d7SJohn Baldwin struct thread *td; 1465d576deedSPawel Jakub Dawidek const struct lock *lk; 1466be6847d7SJohn Baldwin 1467d576deedSPawel Jakub Dawidek lk = (const struct lock *)lock; 1468be6847d7SJohn Baldwin 1469be6847d7SJohn Baldwin db_printf(" state: "); 1470047dd67eSAttilio Rao if (lk->lk_lock == LK_UNLOCKED) 1471be6847d7SJohn Baldwin db_printf("UNLOCKED\n"); 1472047dd67eSAttilio Rao else if (lk->lk_lock & LK_SHARE) 1473047dd67eSAttilio Rao db_printf("SLOCK: %ju\n", (uintmax_t)LK_SHARERS(lk->lk_lock)); 1474047dd67eSAttilio Rao else { 1475047dd67eSAttilio Rao td = lockmgr_xholder(lk); 1476047dd67eSAttilio Rao if (td == (struct thread *)LK_KERNPROC) 1477047dd67eSAttilio Rao db_printf("XLOCK: LK_KERNPROC\n"); 1478047dd67eSAttilio Rao else 1479047dd67eSAttilio Rao db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td, 1480047dd67eSAttilio Rao td->td_tid, td->td_proc->p_pid, 1481047dd67eSAttilio Rao td->td_proc->p_comm); 1482047dd67eSAttilio Rao if (lockmgr_recursed(lk)) 1483047dd67eSAttilio Rao db_printf(" recursed: %d\n", lk->lk_recurse); 1484047dd67eSAttilio Rao } 1485047dd67eSAttilio Rao db_printf(" waiters: "); 1486047dd67eSAttilio Rao switch (lk->lk_lock & LK_ALL_WAITERS) { 1487047dd67eSAttilio Rao case LK_SHARED_WAITERS: 1488047dd67eSAttilio Rao db_printf("shared\n"); 1489e5023dd9SEdward Tomasz Napierala break; 1490047dd67eSAttilio Rao case LK_EXCLUSIVE_WAITERS: 1491047dd67eSAttilio Rao db_printf("exclusive\n"); 1492047dd67eSAttilio Rao break; 1493047dd67eSAttilio Rao case LK_ALL_WAITERS: 1494047dd67eSAttilio Rao db_printf("shared and exclusive\n"); 1495047dd67eSAttilio Rao break; 1496047dd67eSAttilio Rao default: 1497047dd67eSAttilio Rao db_printf("none\n"); 1498047dd67eSAttilio Rao } 1499651175c9SAttilio Rao db_printf(" spinners: "); 1500651175c9SAttilio Rao if (lk->lk_lock & LK_EXCLUSIVE_SPINNERS) 1501651175c9SAttilio Rao db_printf("exclusive\n"); 1502651175c9SAttilio Rao else 1503651175c9SAttilio Rao db_printf("none\n"); 1504be6847d7SJohn Baldwin } 1505be6847d7SJohn Baldwin #endif 1506