19454b2d8SWarner Losh /*- 2047dd67eSAttilio Rao * Copyright (c) 2008 Attilio Rao <attilio@FreeBSD.org> 3047dd67eSAttilio Rao * All rights reserved. 453bf4bb2SPeter Wemm * 553bf4bb2SPeter Wemm * Redistribution and use in source and binary forms, with or without 653bf4bb2SPeter Wemm * modification, are permitted provided that the following conditions 753bf4bb2SPeter Wemm * are met: 853bf4bb2SPeter Wemm * 1. Redistributions of source code must retain the above copyright 9047dd67eSAttilio Rao * notice(s), this list of conditions and the following disclaimer as 10047dd67eSAttilio Rao * the first lines of this file unmodified other than the possible 11047dd67eSAttilio Rao * addition of one or more copyright notices. 1253bf4bb2SPeter Wemm * 2. Redistributions in binary form must reproduce the above copyright 13047dd67eSAttilio Rao * notice(s), this list of conditions and the following disclaimer in the 1453bf4bb2SPeter Wemm * documentation and/or other materials provided with the distribution. 1553bf4bb2SPeter Wemm * 16047dd67eSAttilio Rao * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY 17047dd67eSAttilio Rao * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 18047dd67eSAttilio Rao * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 19047dd67eSAttilio Rao * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY 20047dd67eSAttilio Rao * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 21047dd67eSAttilio Rao * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 22047dd67eSAttilio Rao * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 23047dd67eSAttilio Rao * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 2453bf4bb2SPeter Wemm * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25047dd67eSAttilio Rao * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 26047dd67eSAttilio Rao * DAMAGE. 2753bf4bb2SPeter Wemm */ 2853bf4bb2SPeter Wemm 29651175c9SAttilio Rao #include "opt_adaptive_lockmgrs.h" 30047dd67eSAttilio Rao #include "opt_ddb.h" 31f5f9340bSFabien Thomas #include "opt_hwpmc_hooks.h" 32047dd67eSAttilio Rao 33677b542eSDavid E. O'Brien #include <sys/cdefs.h> 34677b542eSDavid E. O'Brien __FBSDID("$FreeBSD$"); 35677b542eSDavid E. O'Brien 3653bf4bb2SPeter Wemm #include <sys/param.h> 37cd2fe4e6SAttilio Rao #include <sys/kdb.h> 3861d80e90SJohn Baldwin #include <sys/ktr.h> 3953bf4bb2SPeter Wemm #include <sys/lock.h> 40047dd67eSAttilio Rao #include <sys/lock_profile.h> 418302d183SBruce Evans #include <sys/lockmgr.h> 42d8881ca3SJohn Baldwin #include <sys/mutex.h> 438302d183SBruce Evans #include <sys/proc.h> 44047dd67eSAttilio Rao #include <sys/sleepqueue.h> 45e8ddb61dSJeff Roberson #ifdef DEBUG_LOCKS 46e8ddb61dSJeff Roberson #include <sys/stack.h> 47e8ddb61dSJeff Roberson #endif 48651175c9SAttilio Rao #include <sys/sysctl.h> 49047dd67eSAttilio Rao #include <sys/systm.h> 5053bf4bb2SPeter Wemm 51047dd67eSAttilio Rao #include <machine/cpu.h> 526efc8a16SAttilio Rao 53be6847d7SJohn Baldwin #ifdef DDB 54be6847d7SJohn Baldwin #include <ddb/ddb.h> 55047dd67eSAttilio Rao #endif 56047dd67eSAttilio Rao 57f5f9340bSFabien Thomas #ifdef HWPMC_HOOKS 58f5f9340bSFabien Thomas #include <sys/pmckern.h> 59f5f9340bSFabien Thomas PMC_SOFT_DECLARE( , , lock, failed); 60f5f9340bSFabien Thomas #endif 61f5f9340bSFabien Thomas 62651175c9SAttilio Rao CTASSERT(((LK_ADAPTIVE | LK_NOSHARE) & LO_CLASSFLAGS) == 63651175c9SAttilio Rao (LK_ADAPTIVE | LK_NOSHARE)); 64651175c9SAttilio Rao CTASSERT(LK_UNLOCKED == (LK_UNLOCKED & 65651175c9SAttilio Rao ~(LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS))); 66047dd67eSAttilio Rao 67047dd67eSAttilio Rao #define SQ_EXCLUSIVE_QUEUE 0 68047dd67eSAttilio Rao #define SQ_SHARED_QUEUE 1 69047dd67eSAttilio Rao 70047dd67eSAttilio Rao #ifndef INVARIANTS 71047dd67eSAttilio Rao #define _lockmgr_assert(lk, what, file, line) 72047dd67eSAttilio Rao #endif 73*ce1c953eSMark Johnston 74047dd67eSAttilio Rao #define TD_SLOCKS_INC(td) ((td)->td_lk_slocks++) 75047dd67eSAttilio Rao #define TD_SLOCKS_DEC(td) ((td)->td_lk_slocks--) 76047dd67eSAttilio Rao 77047dd67eSAttilio Rao #ifndef DEBUG_LOCKS 78047dd67eSAttilio Rao #define STACK_PRINT(lk) 79047dd67eSAttilio Rao #define STACK_SAVE(lk) 80047dd67eSAttilio Rao #define STACK_ZERO(lk) 81047dd67eSAttilio Rao #else 82047dd67eSAttilio Rao #define STACK_PRINT(lk) stack_print_ddb(&(lk)->lk_stack) 83047dd67eSAttilio Rao #define STACK_SAVE(lk) stack_save(&(lk)->lk_stack) 84047dd67eSAttilio Rao #define STACK_ZERO(lk) stack_zero(&(lk)->lk_stack) 85047dd67eSAttilio Rao #endif 86047dd67eSAttilio Rao 87047dd67eSAttilio Rao #define LOCK_LOG2(lk, string, arg1, arg2) \ 88047dd67eSAttilio Rao if (LOCK_LOG_TEST(&(lk)->lock_object, 0)) \ 89047dd67eSAttilio Rao CTR2(KTR_LOCK, (string), (arg1), (arg2)) 90047dd67eSAttilio Rao #define LOCK_LOG3(lk, string, arg1, arg2, arg3) \ 91047dd67eSAttilio Rao if (LOCK_LOG_TEST(&(lk)->lock_object, 0)) \ 92047dd67eSAttilio Rao CTR3(KTR_LOCK, (string), (arg1), (arg2), (arg3)) 93047dd67eSAttilio Rao 94e5f94314SAttilio Rao #define GIANT_DECLARE \ 95e5f94314SAttilio Rao int _i = 0; \ 96e5f94314SAttilio Rao WITNESS_SAVE_DECL(Giant) 97e5f94314SAttilio Rao #define GIANT_RESTORE() do { \ 98e5f94314SAttilio Rao if (_i > 0) { \ 99e5f94314SAttilio Rao while (_i--) \ 100e5f94314SAttilio Rao mtx_lock(&Giant); \ 101e5f94314SAttilio Rao WITNESS_RESTORE(&Giant.lock_object, Giant); \ 102e5f94314SAttilio Rao } \ 103e5f94314SAttilio Rao } while (0) 104e5f94314SAttilio Rao #define GIANT_SAVE() do { \ 105e5f94314SAttilio Rao if (mtx_owned(&Giant)) { \ 106e5f94314SAttilio Rao WITNESS_SAVE(&Giant.lock_object, Giant); \ 107e5f94314SAttilio Rao while (mtx_owned(&Giant)) { \ 108e5f94314SAttilio Rao _i++; \ 109e5f94314SAttilio Rao mtx_unlock(&Giant); \ 110e5f94314SAttilio Rao } \ 111e5f94314SAttilio Rao } \ 112e5f94314SAttilio Rao } while (0) 113e5f94314SAttilio Rao 11472ba3c08SKonstantin Belousov #define LK_CAN_SHARE(x, flags) \ 11572ba3c08SKonstantin Belousov (((x) & LK_SHARE) && \ 11672ba3c08SKonstantin Belousov (((x) & (LK_EXCLUSIVE_WAITERS | LK_EXCLUSIVE_SPINNERS)) == 0 || \ 11772ba3c08SKonstantin Belousov (curthread->td_lk_slocks != 0 && !(flags & LK_NODDLKTREAT)) || \ 11872ba3c08SKonstantin Belousov (curthread->td_pflags & TDP_DEADLKTREAT))) 119e5f94314SAttilio Rao #define LK_TRYOP(x) \ 120e5f94314SAttilio Rao ((x) & LK_NOWAIT) 121e5f94314SAttilio Rao 122e5f94314SAttilio Rao #define LK_CAN_WITNESS(x) \ 123e5f94314SAttilio Rao (((x) & LK_NOWITNESS) == 0 && !LK_TRYOP(x)) 124e5f94314SAttilio Rao #define LK_TRYWIT(x) \ 125e5f94314SAttilio Rao (LK_TRYOP(x) ? LOP_TRYLOCK : 0) 126047dd67eSAttilio Rao 127651175c9SAttilio Rao #define LK_CAN_ADAPT(lk, f) \ 128651175c9SAttilio Rao (((lk)->lock_object.lo_flags & LK_ADAPTIVE) != 0 && \ 129651175c9SAttilio Rao ((f) & LK_SLEEPFAIL) == 0) 130651175c9SAttilio Rao 131047dd67eSAttilio Rao #define lockmgr_disowned(lk) \ 132047dd67eSAttilio Rao (((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == LK_KERNPROC) 133047dd67eSAttilio Rao 134047dd67eSAttilio Rao #define lockmgr_xlocked(lk) \ 135047dd67eSAttilio Rao (((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == (uintptr_t)curthread) 136047dd67eSAttilio Rao 137d576deedSPawel Jakub Dawidek static void assert_lockmgr(const struct lock_object *lock, int how); 138047dd67eSAttilio Rao #ifdef DDB 139d576deedSPawel Jakub Dawidek static void db_show_lockmgr(const struct lock_object *lock); 140be6847d7SJohn Baldwin #endif 1417faf4d90SDavide Italiano static void lock_lockmgr(struct lock_object *lock, uintptr_t how); 142a5aedd68SStacey Son #ifdef KDTRACE_HOOKS 143d576deedSPawel Jakub Dawidek static int owner_lockmgr(const struct lock_object *lock, 144d576deedSPawel Jakub Dawidek struct thread **owner); 145a5aedd68SStacey Son #endif 1467faf4d90SDavide Italiano static uintptr_t unlock_lockmgr(struct lock_object *lock); 14761bd5e21SKip Macy 14861bd5e21SKip Macy struct lock_class lock_class_lockmgr = { 1493ff6d229SJohn Baldwin .lc_name = "lockmgr", 150047dd67eSAttilio Rao .lc_flags = LC_RECURSABLE | LC_SLEEPABLE | LC_SLEEPLOCK | LC_UPGRADABLE, 151f9721b43SAttilio Rao .lc_assert = assert_lockmgr, 15261bd5e21SKip Macy #ifdef DDB 1536e21afd4SJohn Baldwin .lc_ddb_show = db_show_lockmgr, 15461bd5e21SKip Macy #endif 1556e21afd4SJohn Baldwin .lc_lock = lock_lockmgr, 156a5aedd68SStacey Son .lc_unlock = unlock_lockmgr, 157a5aedd68SStacey Son #ifdef KDTRACE_HOOKS 158a5aedd68SStacey Son .lc_owner = owner_lockmgr, 159a5aedd68SStacey Son #endif 16061bd5e21SKip Macy }; 16161bd5e21SKip Macy 162651175c9SAttilio Rao #ifdef ADAPTIVE_LOCKMGRS 163651175c9SAttilio Rao static u_int alk_retries = 10; 164651175c9SAttilio Rao static u_int alk_loops = 10000; 1656472ac3dSEd Schouten static SYSCTL_NODE(_debug, OID_AUTO, lockmgr, CTLFLAG_RD, NULL, 1666472ac3dSEd Schouten "lockmgr debugging"); 167651175c9SAttilio Rao SYSCTL_UINT(_debug_lockmgr, OID_AUTO, retries, CTLFLAG_RW, &alk_retries, 0, ""); 168651175c9SAttilio Rao SYSCTL_UINT(_debug_lockmgr, OID_AUTO, loops, CTLFLAG_RW, &alk_loops, 0, ""); 169651175c9SAttilio Rao #endif 170651175c9SAttilio Rao 171047dd67eSAttilio Rao static __inline struct thread * 172d576deedSPawel Jakub Dawidek lockmgr_xholder(const struct lock *lk) 173047dd67eSAttilio Rao { 174047dd67eSAttilio Rao uintptr_t x; 175047dd67eSAttilio Rao 176047dd67eSAttilio Rao x = lk->lk_lock; 177047dd67eSAttilio Rao return ((x & LK_SHARE) ? NULL : (struct thread *)LK_HOLDER(x)); 178047dd67eSAttilio Rao } 17984887fa3SAttilio Rao 18053bf4bb2SPeter Wemm /* 181047dd67eSAttilio Rao * It assumes sleepq_lock held and returns with this one unheld. 182047dd67eSAttilio Rao * It also assumes the generic interlock is sane and previously checked. 183047dd67eSAttilio Rao * If LK_INTERLOCK is specified the interlock is not reacquired after the 184047dd67eSAttilio Rao * sleep. 18553bf4bb2SPeter Wemm */ 186047dd67eSAttilio Rao static __inline int 187047dd67eSAttilio Rao sleeplk(struct lock *lk, u_int flags, struct lock_object *ilk, 188047dd67eSAttilio Rao const char *wmesg, int pri, int timo, int queue) 189047dd67eSAttilio Rao { 190e5f94314SAttilio Rao GIANT_DECLARE; 191047dd67eSAttilio Rao struct lock_class *class; 192047dd67eSAttilio Rao int catch, error; 19353bf4bb2SPeter Wemm 194047dd67eSAttilio Rao class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL; 1955047a8fdSAttilio Rao catch = pri & PCATCH; 196047dd67eSAttilio Rao pri &= PRIMASK; 197047dd67eSAttilio Rao error = 0; 198047dd67eSAttilio Rao 199047dd67eSAttilio Rao LOCK_LOG3(lk, "%s: %p blocking on the %s sleepqueue", __func__, lk, 200047dd67eSAttilio Rao (queue == SQ_EXCLUSIVE_QUEUE) ? "exclusive" : "shared"); 201047dd67eSAttilio Rao 202047dd67eSAttilio Rao if (flags & LK_INTERLOCK) 203047dd67eSAttilio Rao class->lc_unlock(ilk); 2042028867dSAttilio Rao if (queue == SQ_EXCLUSIVE_QUEUE && (flags & LK_SLEEPFAIL) != 0) 2052028867dSAttilio Rao lk->lk_exslpfail++; 206e5f94314SAttilio Rao GIANT_SAVE(); 207047dd67eSAttilio Rao sleepq_add(&lk->lock_object, NULL, wmesg, SLEEPQ_LK | (catch ? 208047dd67eSAttilio Rao SLEEPQ_INTERRUPTIBLE : 0), queue); 209a115fb62SHans Petter Selasky if ((flags & LK_TIMELOCK) && timo) 210047dd67eSAttilio Rao sleepq_set_timeout(&lk->lock_object, timo); 211a115fb62SHans Petter Selasky 212047dd67eSAttilio Rao /* 213047dd67eSAttilio Rao * Decisional switch for real sleeping. 214047dd67eSAttilio Rao */ 215047dd67eSAttilio Rao if ((flags & LK_TIMELOCK) && timo && catch) 216047dd67eSAttilio Rao error = sleepq_timedwait_sig(&lk->lock_object, pri); 217047dd67eSAttilio Rao else if ((flags & LK_TIMELOCK) && timo) 218047dd67eSAttilio Rao error = sleepq_timedwait(&lk->lock_object, pri); 219047dd67eSAttilio Rao else if (catch) 220047dd67eSAttilio Rao error = sleepq_wait_sig(&lk->lock_object, pri); 221047dd67eSAttilio Rao else 222047dd67eSAttilio Rao sleepq_wait(&lk->lock_object, pri); 223e5f94314SAttilio Rao GIANT_RESTORE(); 224047dd67eSAttilio Rao if ((flags & LK_SLEEPFAIL) && error == 0) 225047dd67eSAttilio Rao error = ENOLCK; 226047dd67eSAttilio Rao 227047dd67eSAttilio Rao return (error); 228047dd67eSAttilio Rao } 229047dd67eSAttilio Rao 230da7bbd2cSJohn Baldwin static __inline int 231047dd67eSAttilio Rao wakeupshlk(struct lock *lk, const char *file, int line) 232047dd67eSAttilio Rao { 233047dd67eSAttilio Rao uintptr_t v, x; 2342028867dSAttilio Rao u_int realexslp; 235da7bbd2cSJohn Baldwin int queue, wakeup_swapper; 236047dd67eSAttilio Rao 237e5f94314SAttilio Rao WITNESS_UNLOCK(&lk->lock_object, 0, file, line); 238047dd67eSAttilio Rao LOCK_LOG_LOCK("SUNLOCK", &lk->lock_object, 0, 0, file, line); 239047dd67eSAttilio Rao 240da7bbd2cSJohn Baldwin wakeup_swapper = 0; 241047dd67eSAttilio Rao for (;;) { 242047dd67eSAttilio Rao x = lk->lk_lock; 243047dd67eSAttilio Rao 244047dd67eSAttilio Rao /* 245047dd67eSAttilio Rao * If there is more than one shared lock held, just drop one 246047dd67eSAttilio Rao * and return. 247047dd67eSAttilio Rao */ 248047dd67eSAttilio Rao if (LK_SHARERS(x) > 1) { 2497f9f80ceSAttilio Rao if (atomic_cmpset_rel_ptr(&lk->lk_lock, x, 250047dd67eSAttilio Rao x - LK_ONE_SHARER)) 251047dd67eSAttilio Rao break; 252047dd67eSAttilio Rao continue; 253047dd67eSAttilio Rao } 254047dd67eSAttilio Rao 255047dd67eSAttilio Rao /* 256047dd67eSAttilio Rao * If there are not waiters on the exclusive queue, drop the 257047dd67eSAttilio Rao * lock quickly. 258047dd67eSAttilio Rao */ 259047dd67eSAttilio Rao if ((x & LK_ALL_WAITERS) == 0) { 260651175c9SAttilio Rao MPASS((x & ~LK_EXCLUSIVE_SPINNERS) == 261651175c9SAttilio Rao LK_SHARERS_LOCK(1)); 2627f9f80ceSAttilio Rao if (atomic_cmpset_rel_ptr(&lk->lk_lock, x, LK_UNLOCKED)) 263047dd67eSAttilio Rao break; 264047dd67eSAttilio Rao continue; 265047dd67eSAttilio Rao } 266047dd67eSAttilio Rao 267047dd67eSAttilio Rao /* 268047dd67eSAttilio Rao * We should have a sharer with waiters, so enter the hard 269047dd67eSAttilio Rao * path in order to handle wakeups correctly. 270047dd67eSAttilio Rao */ 271047dd67eSAttilio Rao sleepq_lock(&lk->lock_object); 272651175c9SAttilio Rao x = lk->lk_lock & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS); 273047dd67eSAttilio Rao v = LK_UNLOCKED; 274047dd67eSAttilio Rao 275047dd67eSAttilio Rao /* 276047dd67eSAttilio Rao * If the lock has exclusive waiters, give them preference in 277047dd67eSAttilio Rao * order to avoid deadlock with shared runners up. 2782028867dSAttilio Rao * If interruptible sleeps left the exclusive queue empty 2792028867dSAttilio Rao * avoid a starvation for the threads sleeping on the shared 2802028867dSAttilio Rao * queue by giving them precedence and cleaning up the 2812028867dSAttilio Rao * exclusive waiters bit anyway. 282c636ba83SAttilio Rao * Please note that lk_exslpfail count may be lying about 283c636ba83SAttilio Rao * the real number of waiters with the LK_SLEEPFAIL flag on 284c636ba83SAttilio Rao * because they may be used in conjuction with interruptible 285aab9c8c2SAttilio Rao * sleeps so lk_exslpfail might be considered an 'upper limit' 286aab9c8c2SAttilio Rao * bound, including the edge cases. 287047dd67eSAttilio Rao */ 2882028867dSAttilio Rao realexslp = sleepq_sleepcnt(&lk->lock_object, 2892028867dSAttilio Rao SQ_EXCLUSIVE_QUEUE); 2902028867dSAttilio Rao if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) { 2912028867dSAttilio Rao if (lk->lk_exslpfail < realexslp) { 2922028867dSAttilio Rao lk->lk_exslpfail = 0; 293047dd67eSAttilio Rao queue = SQ_EXCLUSIVE_QUEUE; 294047dd67eSAttilio Rao v |= (x & LK_SHARED_WAITERS); 295047dd67eSAttilio Rao } else { 2962028867dSAttilio Rao lk->lk_exslpfail = 0; 2972028867dSAttilio Rao LOCK_LOG2(lk, 2982028867dSAttilio Rao "%s: %p has only LK_SLEEPFAIL sleepers", 2992028867dSAttilio Rao __func__, lk); 3002028867dSAttilio Rao LOCK_LOG2(lk, 3012028867dSAttilio Rao "%s: %p waking up threads on the exclusive queue", 3022028867dSAttilio Rao __func__, lk); 3032028867dSAttilio Rao wakeup_swapper = 3042028867dSAttilio Rao sleepq_broadcast(&lk->lock_object, 3052028867dSAttilio Rao SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE); 3062028867dSAttilio Rao queue = SQ_SHARED_QUEUE; 3072028867dSAttilio Rao } 3082028867dSAttilio Rao 3092028867dSAttilio Rao } else { 3109dbf7a62SAttilio Rao 3119dbf7a62SAttilio Rao /* 3129dbf7a62SAttilio Rao * Exclusive waiters sleeping with LK_SLEEPFAIL on 3139dbf7a62SAttilio Rao * and using interruptible sleeps/timeout may have 3149dbf7a62SAttilio Rao * left spourious lk_exslpfail counts on, so clean 3159dbf7a62SAttilio Rao * it up anyway. 3169dbf7a62SAttilio Rao */ 3179dbf7a62SAttilio Rao lk->lk_exslpfail = 0; 318047dd67eSAttilio Rao queue = SQ_SHARED_QUEUE; 319047dd67eSAttilio Rao } 320047dd67eSAttilio Rao 3217f9f80ceSAttilio Rao if (!atomic_cmpset_rel_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x, 322047dd67eSAttilio Rao v)) { 323047dd67eSAttilio Rao sleepq_release(&lk->lock_object); 324047dd67eSAttilio Rao continue; 325047dd67eSAttilio Rao } 326047dd67eSAttilio Rao LOCK_LOG3(lk, "%s: %p waking up threads on the %s queue", 327047dd67eSAttilio Rao __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" : 328047dd67eSAttilio Rao "exclusive"); 3292028867dSAttilio Rao wakeup_swapper |= sleepq_broadcast(&lk->lock_object, SLEEPQ_LK, 330da7bbd2cSJohn Baldwin 0, queue); 331047dd67eSAttilio Rao sleepq_release(&lk->lock_object); 332047dd67eSAttilio Rao break; 333047dd67eSAttilio Rao } 334047dd67eSAttilio Rao 335047dd67eSAttilio Rao lock_profile_release_lock(&lk->lock_object); 336b5fb43e5SJohn Baldwin TD_LOCKS_DEC(curthread); 337b5fb43e5SJohn Baldwin TD_SLOCKS_DEC(curthread); 338da7bbd2cSJohn Baldwin return (wakeup_swapper); 339047dd67eSAttilio Rao } 340047dd67eSAttilio Rao 341047dd67eSAttilio Rao static void 342d576deedSPawel Jakub Dawidek assert_lockmgr(const struct lock_object *lock, int what) 343f9721b43SAttilio Rao { 344f9721b43SAttilio Rao 345f9721b43SAttilio Rao panic("lockmgr locks do not support assertions"); 346f9721b43SAttilio Rao } 347f9721b43SAttilio Rao 348047dd67eSAttilio Rao static void 3497faf4d90SDavide Italiano lock_lockmgr(struct lock_object *lock, uintptr_t how) 3506e21afd4SJohn Baldwin { 3516e21afd4SJohn Baldwin 3526e21afd4SJohn Baldwin panic("lockmgr locks do not support sleep interlocking"); 3536e21afd4SJohn Baldwin } 3546e21afd4SJohn Baldwin 3557faf4d90SDavide Italiano static uintptr_t 3566e21afd4SJohn Baldwin unlock_lockmgr(struct lock_object *lock) 3576e21afd4SJohn Baldwin { 3586e21afd4SJohn Baldwin 3596e21afd4SJohn Baldwin panic("lockmgr locks do not support sleep interlocking"); 3606e21afd4SJohn Baldwin } 3616e21afd4SJohn Baldwin 362a5aedd68SStacey Son #ifdef KDTRACE_HOOKS 363a5aedd68SStacey Son static int 364d576deedSPawel Jakub Dawidek owner_lockmgr(const struct lock_object *lock, struct thread **owner) 365a5aedd68SStacey Son { 366a5aedd68SStacey Son 367a5aedd68SStacey Son panic("lockmgr locks do not support owner inquiring"); 368a5aedd68SStacey Son } 369a5aedd68SStacey Son #endif 370a5aedd68SStacey Son 37199448ed1SJohn Dyson void 372047dd67eSAttilio Rao lockinit(struct lock *lk, int pri, const char *wmesg, int timo, int flags) 37399448ed1SJohn Dyson { 3746efc8a16SAttilio Rao int iflags; 3756efc8a16SAttilio Rao 376047dd67eSAttilio Rao MPASS((flags & ~LK_INIT_MASK) == 0); 377353998acSAttilio Rao ASSERT_ATOMIC_LOAD_PTR(lk->lk_lock, 378353998acSAttilio Rao ("%s: lockmgr not aligned for %s: %p", __func__, wmesg, 379353998acSAttilio Rao &lk->lk_lock)); 38099448ed1SJohn Dyson 381f0830182SAttilio Rao iflags = LO_SLEEPABLE | LO_UPGRADABLE; 382f0830182SAttilio Rao if (flags & LK_CANRECURSE) 383f0830182SAttilio Rao iflags |= LO_RECURSABLE; 384047dd67eSAttilio Rao if ((flags & LK_NODUP) == 0) 3856efc8a16SAttilio Rao iflags |= LO_DUPOK; 3867fbfba7bSAttilio Rao if (flags & LK_NOPROFILE) 3877fbfba7bSAttilio Rao iflags |= LO_NOPROFILE; 388047dd67eSAttilio Rao if ((flags & LK_NOWITNESS) == 0) 3896efc8a16SAttilio Rao iflags |= LO_WITNESS; 3907fbfba7bSAttilio Rao if (flags & LK_QUIET) 3917fbfba7bSAttilio Rao iflags |= LO_QUIET; 392e63091eaSMarcel Moolenaar if (flags & LK_IS_VNODE) 393e63091eaSMarcel Moolenaar iflags |= LO_IS_VNODE; 394651175c9SAttilio Rao iflags |= flags & (LK_ADAPTIVE | LK_NOSHARE); 395047dd67eSAttilio Rao 396b5fb43e5SJohn Baldwin lock_init(&lk->lock_object, &lock_class_lockmgr, wmesg, NULL, iflags); 397047dd67eSAttilio Rao lk->lk_lock = LK_UNLOCKED; 398047dd67eSAttilio Rao lk->lk_recurse = 0; 3992028867dSAttilio Rao lk->lk_exslpfail = 0; 400047dd67eSAttilio Rao lk->lk_timo = timo; 401047dd67eSAttilio Rao lk->lk_pri = pri; 402047dd67eSAttilio Rao STACK_ZERO(lk); 40399448ed1SJohn Dyson } 40499448ed1SJohn Dyson 4053634d5b2SJohn Baldwin /* 4063634d5b2SJohn Baldwin * XXX: Gross hacks to manipulate external lock flags after 4073634d5b2SJohn Baldwin * initialization. Used for certain vnode and buf locks. 4083634d5b2SJohn Baldwin */ 4093634d5b2SJohn Baldwin void 4103634d5b2SJohn Baldwin lockallowshare(struct lock *lk) 4113634d5b2SJohn Baldwin { 4123634d5b2SJohn Baldwin 4133634d5b2SJohn Baldwin lockmgr_assert(lk, KA_XLOCKED); 4143634d5b2SJohn Baldwin lk->lock_object.lo_flags &= ~LK_NOSHARE; 4153634d5b2SJohn Baldwin } 4163634d5b2SJohn Baldwin 4173634d5b2SJohn Baldwin void 418575e02d9SKonstantin Belousov lockdisableshare(struct lock *lk) 419575e02d9SKonstantin Belousov { 420575e02d9SKonstantin Belousov 421575e02d9SKonstantin Belousov lockmgr_assert(lk, KA_XLOCKED); 422575e02d9SKonstantin Belousov lk->lock_object.lo_flags |= LK_NOSHARE; 423575e02d9SKonstantin Belousov } 424575e02d9SKonstantin Belousov 425575e02d9SKonstantin Belousov void 4263634d5b2SJohn Baldwin lockallowrecurse(struct lock *lk) 4273634d5b2SJohn Baldwin { 4283634d5b2SJohn Baldwin 4293634d5b2SJohn Baldwin lockmgr_assert(lk, KA_XLOCKED); 4303634d5b2SJohn Baldwin lk->lock_object.lo_flags |= LO_RECURSABLE; 4313634d5b2SJohn Baldwin } 4323634d5b2SJohn Baldwin 4333634d5b2SJohn Baldwin void 4343634d5b2SJohn Baldwin lockdisablerecurse(struct lock *lk) 4353634d5b2SJohn Baldwin { 4363634d5b2SJohn Baldwin 4373634d5b2SJohn Baldwin lockmgr_assert(lk, KA_XLOCKED); 4383634d5b2SJohn Baldwin lk->lock_object.lo_flags &= ~LO_RECURSABLE; 4393634d5b2SJohn Baldwin } 4403634d5b2SJohn Baldwin 441a18b1f1dSJason Evans void 442047dd67eSAttilio Rao lockdestroy(struct lock *lk) 443a18b1f1dSJason Evans { 444c91fcee7SJohn Baldwin 445047dd67eSAttilio Rao KASSERT(lk->lk_lock == LK_UNLOCKED, ("lockmgr still held")); 446047dd67eSAttilio Rao KASSERT(lk->lk_recurse == 0, ("lockmgr still recursed")); 4472028867dSAttilio Rao KASSERT(lk->lk_exslpfail == 0, ("lockmgr still exclusive waiters")); 448047dd67eSAttilio Rao lock_destroy(&lk->lock_object); 449047dd67eSAttilio Rao } 450047dd67eSAttilio Rao 451047dd67eSAttilio Rao int 452047dd67eSAttilio Rao __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk, 453047dd67eSAttilio Rao const char *wmesg, int pri, int timo, const char *file, int line) 454047dd67eSAttilio Rao { 455e5f94314SAttilio Rao GIANT_DECLARE; 456047dd67eSAttilio Rao struct lock_class *class; 457047dd67eSAttilio Rao const char *iwmesg; 458047dd67eSAttilio Rao uintptr_t tid, v, x; 4592028867dSAttilio Rao u_int op, realexslp; 4601723a064SJeff Roberson int error, ipri, itimo, queue, wakeup_swapper; 4611723a064SJeff Roberson #ifdef LOCK_PROFILING 4621723a064SJeff Roberson uint64_t waittime = 0; 4631723a064SJeff Roberson int contested = 0; 4641723a064SJeff Roberson #endif 465651175c9SAttilio Rao #ifdef ADAPTIVE_LOCKMGRS 466651175c9SAttilio Rao volatile struct thread *owner; 467651175c9SAttilio Rao u_int i, spintries = 0; 468651175c9SAttilio Rao #endif 469047dd67eSAttilio Rao 470047dd67eSAttilio Rao error = 0; 471047dd67eSAttilio Rao tid = (uintptr_t)curthread; 472047dd67eSAttilio Rao op = (flags & LK_TYPE_MASK); 473047dd67eSAttilio Rao iwmesg = (wmesg == LK_WMESG_DEFAULT) ? lk->lock_object.lo_name : wmesg; 474047dd67eSAttilio Rao ipri = (pri == LK_PRIO_DEFAULT) ? lk->lk_pri : pri; 475047dd67eSAttilio Rao itimo = (timo == LK_TIMO_DEFAULT) ? lk->lk_timo : timo; 476047dd67eSAttilio Rao 477047dd67eSAttilio Rao MPASS((flags & ~LK_TOTAL_MASK) == 0); 478872b7289SAttilio Rao KASSERT((op & (op - 1)) == 0, 479872b7289SAttilio Rao ("%s: Invalid requested operation @ %s:%d", __func__, file, line)); 480047dd67eSAttilio Rao KASSERT((flags & (LK_NOWAIT | LK_SLEEPFAIL)) == 0 || 481047dd67eSAttilio Rao (op != LK_DOWNGRADE && op != LK_RELEASE), 482047dd67eSAttilio Rao ("%s: Invalid flags in regard of the operation desired @ %s:%d", 483047dd67eSAttilio Rao __func__, file, line)); 484047dd67eSAttilio Rao KASSERT((flags & LK_INTERLOCK) == 0 || ilk != NULL, 485047dd67eSAttilio Rao ("%s: LK_INTERLOCK passed without valid interlock @ %s:%d", 486047dd67eSAttilio Rao __func__, file, line)); 487cd2fe4e6SAttilio Rao KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread), 488e3ae0dfeSAttilio Rao ("%s: idle thread %p on lockmgr %s @ %s:%d", __func__, curthread, 489e3ae0dfeSAttilio Rao lk->lock_object.lo_name, file, line)); 490047dd67eSAttilio Rao 491047dd67eSAttilio Rao class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL; 492047dd67eSAttilio Rao if (panicstr != NULL) { 493047dd67eSAttilio Rao if (flags & LK_INTERLOCK) 494047dd67eSAttilio Rao class->lc_unlock(ilk); 495047dd67eSAttilio Rao return (0); 496047dd67eSAttilio Rao } 497047dd67eSAttilio Rao 498d0a724c5SKonstantin Belousov if (lk->lock_object.lo_flags & LK_NOSHARE) { 499d0a724c5SKonstantin Belousov switch (op) { 500d0a724c5SKonstantin Belousov case LK_SHARED: 501047dd67eSAttilio Rao op = LK_EXCLUSIVE; 502d0a724c5SKonstantin Belousov break; 503d0a724c5SKonstantin Belousov case LK_UPGRADE: 5047c6fe803SKonstantin Belousov case LK_TRYUPGRADE: 505d0a724c5SKonstantin Belousov case LK_DOWNGRADE: 506d0a724c5SKonstantin Belousov _lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED, 507d0a724c5SKonstantin Belousov file, line); 50843287e27SMateusz Guzik if (flags & LK_INTERLOCK) 50943287e27SMateusz Guzik class->lc_unlock(ilk); 510d0a724c5SKonstantin Belousov return (0); 511d0a724c5SKonstantin Belousov } 512d0a724c5SKonstantin Belousov } 513047dd67eSAttilio Rao 514da7bbd2cSJohn Baldwin wakeup_swapper = 0; 515047dd67eSAttilio Rao switch (op) { 516047dd67eSAttilio Rao case LK_SHARED: 517e5f94314SAttilio Rao if (LK_CAN_WITNESS(flags)) 518e5f94314SAttilio Rao WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER, 51924150d37SJohn Baldwin file, line, flags & LK_INTERLOCK ? ilk : NULL); 520047dd67eSAttilio Rao for (;;) { 521047dd67eSAttilio Rao x = lk->lk_lock; 522047dd67eSAttilio Rao 523047dd67eSAttilio Rao /* 524047dd67eSAttilio Rao * If no other thread has an exclusive lock, or 525047dd67eSAttilio Rao * no exclusive waiter is present, bump the count of 526047dd67eSAttilio Rao * sharers. Since we have to preserve the state of 527047dd67eSAttilio Rao * waiters, if we fail to acquire the shared lock 528047dd67eSAttilio Rao * loop back and retry. 529047dd67eSAttilio Rao */ 53072ba3c08SKonstantin Belousov if (LK_CAN_SHARE(x, flags)) { 531047dd67eSAttilio Rao if (atomic_cmpset_acq_ptr(&lk->lk_lock, x, 532047dd67eSAttilio Rao x + LK_ONE_SHARER)) 533047dd67eSAttilio Rao break; 534047dd67eSAttilio Rao continue; 535047dd67eSAttilio Rao } 536f5f9340bSFabien Thomas #ifdef HWPMC_HOOKS 537f5f9340bSFabien Thomas PMC_SOFT_CALL( , , lock, failed); 538f5f9340bSFabien Thomas #endif 539047dd67eSAttilio Rao lock_profile_obtain_lock_failed(&lk->lock_object, 540047dd67eSAttilio Rao &contested, &waittime); 541047dd67eSAttilio Rao 542047dd67eSAttilio Rao /* 54396f1567fSKonstantin Belousov * If the lock is already held by curthread in 544047dd67eSAttilio Rao * exclusive way avoid a deadlock. 545047dd67eSAttilio Rao */ 546047dd67eSAttilio Rao if (LK_HOLDER(x) == tid) { 547047dd67eSAttilio Rao LOCK_LOG2(lk, 54896f1567fSKonstantin Belousov "%s: %p already held in exclusive mode", 549047dd67eSAttilio Rao __func__, lk); 550047dd67eSAttilio Rao error = EDEADLK; 551047dd67eSAttilio Rao break; 552a18b1f1dSJason Evans } 553a18b1f1dSJason Evans 554a18b1f1dSJason Evans /* 555047dd67eSAttilio Rao * If the lock is expected to not sleep just give up 556047dd67eSAttilio Rao * and return. 557d7a7e179SAttilio Rao */ 558047dd67eSAttilio Rao if (LK_TRYOP(flags)) { 559047dd67eSAttilio Rao LOCK_LOG2(lk, "%s: %p fails the try operation", 560047dd67eSAttilio Rao __func__, lk); 561047dd67eSAttilio Rao error = EBUSY; 562047dd67eSAttilio Rao break; 563047dd67eSAttilio Rao } 564047dd67eSAttilio Rao 565651175c9SAttilio Rao #ifdef ADAPTIVE_LOCKMGRS 566651175c9SAttilio Rao /* 567651175c9SAttilio Rao * If the owner is running on another CPU, spin until 568651175c9SAttilio Rao * the owner stops running or the state of the lock 5698d3635c4SAttilio Rao * changes. We need a double-state handle here 5708d3635c4SAttilio Rao * because for a failed acquisition the lock can be 5718d3635c4SAttilio Rao * either held in exclusive mode or shared mode 5728d3635c4SAttilio Rao * (for the writer starvation avoidance technique). 573651175c9SAttilio Rao */ 574651175c9SAttilio Rao if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 && 575651175c9SAttilio Rao LK_HOLDER(x) != LK_KERNPROC) { 576651175c9SAttilio Rao owner = (struct thread *)LK_HOLDER(x); 577651175c9SAttilio Rao if (LOCK_LOG_TEST(&lk->lock_object, 0)) 578651175c9SAttilio Rao CTR3(KTR_LOCK, 579651175c9SAttilio Rao "%s: spinning on %p held by %p", 580651175c9SAttilio Rao __func__, lk, owner); 5812cba8dd3SJohn Baldwin KTR_STATE1(KTR_SCHED, "thread", 5822cba8dd3SJohn Baldwin sched_tdname(td), "spinning", 5832cba8dd3SJohn Baldwin "lockname:\"%s\"", lk->lock_object.lo_name); 584651175c9SAttilio Rao 585651175c9SAttilio Rao /* 586651175c9SAttilio Rao * If we are holding also an interlock drop it 587651175c9SAttilio Rao * in order to avoid a deadlock if the lockmgr 588651175c9SAttilio Rao * owner is adaptively spinning on the 589651175c9SAttilio Rao * interlock itself. 590651175c9SAttilio Rao */ 591651175c9SAttilio Rao if (flags & LK_INTERLOCK) { 592651175c9SAttilio Rao class->lc_unlock(ilk); 593651175c9SAttilio Rao flags &= ~LK_INTERLOCK; 594651175c9SAttilio Rao } 595651175c9SAttilio Rao GIANT_SAVE(); 596651175c9SAttilio Rao while (LK_HOLDER(lk->lk_lock) == 597651175c9SAttilio Rao (uintptr_t)owner && TD_IS_RUNNING(owner)) 598651175c9SAttilio Rao cpu_spinwait(); 5992cba8dd3SJohn Baldwin KTR_STATE0(KTR_SCHED, "thread", 6002cba8dd3SJohn Baldwin sched_tdname(td), "running"); 6018d3635c4SAttilio Rao GIANT_RESTORE(); 6028d3635c4SAttilio Rao continue; 603651175c9SAttilio Rao } else if (LK_CAN_ADAPT(lk, flags) && 604651175c9SAttilio Rao (x & LK_SHARE) != 0 && LK_SHARERS(x) && 605651175c9SAttilio Rao spintries < alk_retries) { 6062cba8dd3SJohn Baldwin KTR_STATE1(KTR_SCHED, "thread", 6072cba8dd3SJohn Baldwin sched_tdname(td), "spinning", 6082cba8dd3SJohn Baldwin "lockname:\"%s\"", lk->lock_object.lo_name); 609651175c9SAttilio Rao if (flags & LK_INTERLOCK) { 610651175c9SAttilio Rao class->lc_unlock(ilk); 611651175c9SAttilio Rao flags &= ~LK_INTERLOCK; 612651175c9SAttilio Rao } 613651175c9SAttilio Rao GIANT_SAVE(); 614651175c9SAttilio Rao spintries++; 615651175c9SAttilio Rao for (i = 0; i < alk_loops; i++) { 616651175c9SAttilio Rao if (LOCK_LOG_TEST(&lk->lock_object, 0)) 617651175c9SAttilio Rao CTR4(KTR_LOCK, 618651175c9SAttilio Rao "%s: shared spinning on %p with %u and %u", 619651175c9SAttilio Rao __func__, lk, spintries, i); 620651175c9SAttilio Rao x = lk->lk_lock; 621651175c9SAttilio Rao if ((x & LK_SHARE) == 0 || 622cc246667SKonstantin Belousov LK_CAN_SHARE(x, flags) != 0) 623651175c9SAttilio Rao break; 624651175c9SAttilio Rao cpu_spinwait(); 625651175c9SAttilio Rao } 6262cba8dd3SJohn Baldwin KTR_STATE0(KTR_SCHED, "thread", 6272cba8dd3SJohn Baldwin sched_tdname(td), "running"); 6288d3635c4SAttilio Rao GIANT_RESTORE(); 629651175c9SAttilio Rao if (i != alk_loops) 630651175c9SAttilio Rao continue; 631651175c9SAttilio Rao } 632651175c9SAttilio Rao #endif 633651175c9SAttilio Rao 634047dd67eSAttilio Rao /* 635047dd67eSAttilio Rao * Acquire the sleepqueue chain lock because we 636047dd67eSAttilio Rao * probabilly will need to manipulate waiters flags. 637047dd67eSAttilio Rao */ 638047dd67eSAttilio Rao sleepq_lock(&lk->lock_object); 639047dd67eSAttilio Rao x = lk->lk_lock; 640047dd67eSAttilio Rao 641047dd67eSAttilio Rao /* 642047dd67eSAttilio Rao * if the lock can be acquired in shared mode, try 643047dd67eSAttilio Rao * again. 644047dd67eSAttilio Rao */ 64572ba3c08SKonstantin Belousov if (LK_CAN_SHARE(x, flags)) { 646047dd67eSAttilio Rao sleepq_release(&lk->lock_object); 647047dd67eSAttilio Rao continue; 648047dd67eSAttilio Rao } 649047dd67eSAttilio Rao 650651175c9SAttilio Rao #ifdef ADAPTIVE_LOCKMGRS 651651175c9SAttilio Rao /* 652651175c9SAttilio Rao * The current lock owner might have started executing 653651175c9SAttilio Rao * on another CPU (or the lock could have changed 654651175c9SAttilio Rao * owner) while we were waiting on the turnstile 655651175c9SAttilio Rao * chain lock. If so, drop the turnstile lock and try 656651175c9SAttilio Rao * again. 657651175c9SAttilio Rao */ 658651175c9SAttilio Rao if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 && 659651175c9SAttilio Rao LK_HOLDER(x) != LK_KERNPROC) { 660651175c9SAttilio Rao owner = (struct thread *)LK_HOLDER(x); 661651175c9SAttilio Rao if (TD_IS_RUNNING(owner)) { 662651175c9SAttilio Rao sleepq_release(&lk->lock_object); 663651175c9SAttilio Rao continue; 664651175c9SAttilio Rao } 665651175c9SAttilio Rao } 666651175c9SAttilio Rao #endif 667651175c9SAttilio Rao 668047dd67eSAttilio Rao /* 669047dd67eSAttilio Rao * Try to set the LK_SHARED_WAITERS flag. If we fail, 670047dd67eSAttilio Rao * loop back and retry. 671047dd67eSAttilio Rao */ 672047dd67eSAttilio Rao if ((x & LK_SHARED_WAITERS) == 0) { 673047dd67eSAttilio Rao if (!atomic_cmpset_acq_ptr(&lk->lk_lock, x, 674047dd67eSAttilio Rao x | LK_SHARED_WAITERS)) { 675047dd67eSAttilio Rao sleepq_release(&lk->lock_object); 676047dd67eSAttilio Rao continue; 677047dd67eSAttilio Rao } 678047dd67eSAttilio Rao LOCK_LOG2(lk, "%s: %p set shared waiters flag", 679047dd67eSAttilio Rao __func__, lk); 680047dd67eSAttilio Rao } 681047dd67eSAttilio Rao 682047dd67eSAttilio Rao /* 683047dd67eSAttilio Rao * As far as we have been unable to acquire the 684047dd67eSAttilio Rao * shared lock and the shared waiters flag is set, 685047dd67eSAttilio Rao * we will sleep. 686047dd67eSAttilio Rao */ 687047dd67eSAttilio Rao error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo, 688047dd67eSAttilio Rao SQ_SHARED_QUEUE); 689047dd67eSAttilio Rao flags &= ~LK_INTERLOCK; 690047dd67eSAttilio Rao if (error) { 691047dd67eSAttilio Rao LOCK_LOG3(lk, 692047dd67eSAttilio Rao "%s: interrupted sleep for %p with %d", 693047dd67eSAttilio Rao __func__, lk, error); 694047dd67eSAttilio Rao break; 695047dd67eSAttilio Rao } 696047dd67eSAttilio Rao LOCK_LOG2(lk, "%s: %p resuming from the sleep queue", 697047dd67eSAttilio Rao __func__, lk); 698047dd67eSAttilio Rao } 699047dd67eSAttilio Rao if (error == 0) { 700047dd67eSAttilio Rao lock_profile_obtain_lock_success(&lk->lock_object, 701047dd67eSAttilio Rao contested, waittime, file, line); 702047dd67eSAttilio Rao LOCK_LOG_LOCK("SLOCK", &lk->lock_object, 0, 0, file, 703047dd67eSAttilio Rao line); 704e5f94314SAttilio Rao WITNESS_LOCK(&lk->lock_object, LK_TRYWIT(flags), file, 705e5f94314SAttilio Rao line); 706047dd67eSAttilio Rao TD_LOCKS_INC(curthread); 707047dd67eSAttilio Rao TD_SLOCKS_INC(curthread); 708047dd67eSAttilio Rao STACK_SAVE(lk); 709047dd67eSAttilio Rao } 710047dd67eSAttilio Rao break; 711047dd67eSAttilio Rao case LK_UPGRADE: 7127c6fe803SKonstantin Belousov case LK_TRYUPGRADE: 713047dd67eSAttilio Rao _lockmgr_assert(lk, KA_SLOCKED, file, line); 714651175c9SAttilio Rao v = lk->lk_lock; 715651175c9SAttilio Rao x = v & LK_ALL_WAITERS; 716651175c9SAttilio Rao v &= LK_EXCLUSIVE_SPINNERS; 717047dd67eSAttilio Rao 718047dd67eSAttilio Rao /* 719047dd67eSAttilio Rao * Try to switch from one shared lock to an exclusive one. 720047dd67eSAttilio Rao * We need to preserve waiters flags during the operation. 721047dd67eSAttilio Rao */ 722651175c9SAttilio Rao if (atomic_cmpset_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x | v, 723047dd67eSAttilio Rao tid | x)) { 724047dd67eSAttilio Rao LOCK_LOG_LOCK("XUPGRADE", &lk->lock_object, 0, 0, file, 725047dd67eSAttilio Rao line); 726e5f94314SAttilio Rao WITNESS_UPGRADE(&lk->lock_object, LOP_EXCLUSIVE | 727e5f94314SAttilio Rao LK_TRYWIT(flags), file, line); 728047dd67eSAttilio Rao TD_SLOCKS_DEC(curthread); 729047dd67eSAttilio Rao break; 730047dd67eSAttilio Rao } 731047dd67eSAttilio Rao 732047dd67eSAttilio Rao /* 7337c6fe803SKonstantin Belousov * In LK_TRYUPGRADE mode, do not drop the lock, 7347c6fe803SKonstantin Belousov * returning EBUSY instead. 7357c6fe803SKonstantin Belousov */ 7367c6fe803SKonstantin Belousov if (op == LK_TRYUPGRADE) { 7377c6fe803SKonstantin Belousov LOCK_LOG2(lk, "%s: %p failed the nowait upgrade", 7387c6fe803SKonstantin Belousov __func__, lk); 7397c6fe803SKonstantin Belousov error = EBUSY; 7407c6fe803SKonstantin Belousov break; 7417c6fe803SKonstantin Belousov } 7427c6fe803SKonstantin Belousov 7437c6fe803SKonstantin Belousov /* 744047dd67eSAttilio Rao * We have been unable to succeed in upgrading, so just 745047dd67eSAttilio Rao * give up the shared lock. 746047dd67eSAttilio Rao */ 747814f26daSJohn Baldwin wakeup_swapper |= wakeupshlk(lk, file, line); 748047dd67eSAttilio Rao 749047dd67eSAttilio Rao /* FALLTHROUGH */ 750047dd67eSAttilio Rao case LK_EXCLUSIVE: 751e5f94314SAttilio Rao if (LK_CAN_WITNESS(flags)) 752e5f94314SAttilio Rao WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER | 75324150d37SJohn Baldwin LOP_EXCLUSIVE, file, line, flags & LK_INTERLOCK ? 75424150d37SJohn Baldwin ilk : NULL); 755047dd67eSAttilio Rao 756047dd67eSAttilio Rao /* 75796f1567fSKonstantin Belousov * If curthread already holds the lock and this one is 758047dd67eSAttilio Rao * allowed to recurse, simply recurse on it. 759047dd67eSAttilio Rao */ 760047dd67eSAttilio Rao if (lockmgr_xlocked(lk)) { 761047dd67eSAttilio Rao if ((flags & LK_CANRECURSE) == 0 && 762f0830182SAttilio Rao (lk->lock_object.lo_flags & LO_RECURSABLE) == 0) { 763047dd67eSAttilio Rao 764047dd67eSAttilio Rao /* 765047dd67eSAttilio Rao * If the lock is expected to not panic just 766047dd67eSAttilio Rao * give up and return. 767047dd67eSAttilio Rao */ 768047dd67eSAttilio Rao if (LK_TRYOP(flags)) { 769047dd67eSAttilio Rao LOCK_LOG2(lk, 770047dd67eSAttilio Rao "%s: %p fails the try operation", 771047dd67eSAttilio Rao __func__, lk); 772047dd67eSAttilio Rao error = EBUSY; 773047dd67eSAttilio Rao break; 774047dd67eSAttilio Rao } 775047dd67eSAttilio Rao if (flags & LK_INTERLOCK) 776047dd67eSAttilio Rao class->lc_unlock(ilk); 777047dd67eSAttilio Rao panic("%s: recursing on non recursive lockmgr %s @ %s:%d\n", 778047dd67eSAttilio Rao __func__, iwmesg, file, line); 779047dd67eSAttilio Rao } 780047dd67eSAttilio Rao lk->lk_recurse++; 781047dd67eSAttilio Rao LOCK_LOG2(lk, "%s: %p recursing", __func__, lk); 782047dd67eSAttilio Rao LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0, 783047dd67eSAttilio Rao lk->lk_recurse, file, line); 784e5f94314SAttilio Rao WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE | 785e5f94314SAttilio Rao LK_TRYWIT(flags), file, line); 786047dd67eSAttilio Rao TD_LOCKS_INC(curthread); 787047dd67eSAttilio Rao break; 788047dd67eSAttilio Rao } 789047dd67eSAttilio Rao 790047dd67eSAttilio Rao while (!atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, 791047dd67eSAttilio Rao tid)) { 792f5f9340bSFabien Thomas #ifdef HWPMC_HOOKS 793f5f9340bSFabien Thomas PMC_SOFT_CALL( , , lock, failed); 794f5f9340bSFabien Thomas #endif 795047dd67eSAttilio Rao lock_profile_obtain_lock_failed(&lk->lock_object, 796047dd67eSAttilio Rao &contested, &waittime); 797047dd67eSAttilio Rao 798047dd67eSAttilio Rao /* 799047dd67eSAttilio Rao * If the lock is expected to not sleep just give up 800047dd67eSAttilio Rao * and return. 801047dd67eSAttilio Rao */ 802047dd67eSAttilio Rao if (LK_TRYOP(flags)) { 803047dd67eSAttilio Rao LOCK_LOG2(lk, "%s: %p fails the try operation", 804047dd67eSAttilio Rao __func__, lk); 805047dd67eSAttilio Rao error = EBUSY; 806047dd67eSAttilio Rao break; 807047dd67eSAttilio Rao } 808047dd67eSAttilio Rao 809651175c9SAttilio Rao #ifdef ADAPTIVE_LOCKMGRS 810651175c9SAttilio Rao /* 811651175c9SAttilio Rao * If the owner is running on another CPU, spin until 812651175c9SAttilio Rao * the owner stops running or the state of the lock 813651175c9SAttilio Rao * changes. 814651175c9SAttilio Rao */ 815651175c9SAttilio Rao x = lk->lk_lock; 816651175c9SAttilio Rao if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 && 817651175c9SAttilio Rao LK_HOLDER(x) != LK_KERNPROC) { 818651175c9SAttilio Rao owner = (struct thread *)LK_HOLDER(x); 819651175c9SAttilio Rao if (LOCK_LOG_TEST(&lk->lock_object, 0)) 820651175c9SAttilio Rao CTR3(KTR_LOCK, 821651175c9SAttilio Rao "%s: spinning on %p held by %p", 822651175c9SAttilio Rao __func__, lk, owner); 8232cba8dd3SJohn Baldwin KTR_STATE1(KTR_SCHED, "thread", 8242cba8dd3SJohn Baldwin sched_tdname(td), "spinning", 8252cba8dd3SJohn Baldwin "lockname:\"%s\"", lk->lock_object.lo_name); 826651175c9SAttilio Rao 827651175c9SAttilio Rao /* 828651175c9SAttilio Rao * If we are holding also an interlock drop it 829651175c9SAttilio Rao * in order to avoid a deadlock if the lockmgr 830651175c9SAttilio Rao * owner is adaptively spinning on the 831651175c9SAttilio Rao * interlock itself. 832651175c9SAttilio Rao */ 833651175c9SAttilio Rao if (flags & LK_INTERLOCK) { 834651175c9SAttilio Rao class->lc_unlock(ilk); 835651175c9SAttilio Rao flags &= ~LK_INTERLOCK; 836651175c9SAttilio Rao } 837651175c9SAttilio Rao GIANT_SAVE(); 838651175c9SAttilio Rao while (LK_HOLDER(lk->lk_lock) == 839651175c9SAttilio Rao (uintptr_t)owner && TD_IS_RUNNING(owner)) 840651175c9SAttilio Rao cpu_spinwait(); 8412cba8dd3SJohn Baldwin KTR_STATE0(KTR_SCHED, "thread", 8422cba8dd3SJohn Baldwin sched_tdname(td), "running"); 8438d3635c4SAttilio Rao GIANT_RESTORE(); 8448d3635c4SAttilio Rao continue; 845651175c9SAttilio Rao } else if (LK_CAN_ADAPT(lk, flags) && 846651175c9SAttilio Rao (x & LK_SHARE) != 0 && LK_SHARERS(x) && 847651175c9SAttilio Rao spintries < alk_retries) { 848651175c9SAttilio Rao if ((x & LK_EXCLUSIVE_SPINNERS) == 0 && 849651175c9SAttilio Rao !atomic_cmpset_ptr(&lk->lk_lock, x, 850651175c9SAttilio Rao x | LK_EXCLUSIVE_SPINNERS)) 851651175c9SAttilio Rao continue; 8522cba8dd3SJohn Baldwin KTR_STATE1(KTR_SCHED, "thread", 8532cba8dd3SJohn Baldwin sched_tdname(td), "spinning", 8542cba8dd3SJohn Baldwin "lockname:\"%s\"", lk->lock_object.lo_name); 855651175c9SAttilio Rao if (flags & LK_INTERLOCK) { 856651175c9SAttilio Rao class->lc_unlock(ilk); 857651175c9SAttilio Rao flags &= ~LK_INTERLOCK; 858651175c9SAttilio Rao } 859651175c9SAttilio Rao GIANT_SAVE(); 860651175c9SAttilio Rao spintries++; 861651175c9SAttilio Rao for (i = 0; i < alk_loops; i++) { 862651175c9SAttilio Rao if (LOCK_LOG_TEST(&lk->lock_object, 0)) 863651175c9SAttilio Rao CTR4(KTR_LOCK, 864651175c9SAttilio Rao "%s: shared spinning on %p with %u and %u", 865651175c9SAttilio Rao __func__, lk, spintries, i); 866651175c9SAttilio Rao if ((lk->lk_lock & 867651175c9SAttilio Rao LK_EXCLUSIVE_SPINNERS) == 0) 868651175c9SAttilio Rao break; 869651175c9SAttilio Rao cpu_spinwait(); 870651175c9SAttilio Rao } 8712cba8dd3SJohn Baldwin KTR_STATE0(KTR_SCHED, "thread", 8722cba8dd3SJohn Baldwin sched_tdname(td), "running"); 8738d3635c4SAttilio Rao GIANT_RESTORE(); 874651175c9SAttilio Rao if (i != alk_loops) 875651175c9SAttilio Rao continue; 876651175c9SAttilio Rao } 877651175c9SAttilio Rao #endif 878651175c9SAttilio Rao 879047dd67eSAttilio Rao /* 880047dd67eSAttilio Rao * Acquire the sleepqueue chain lock because we 881047dd67eSAttilio Rao * probabilly will need to manipulate waiters flags. 882047dd67eSAttilio Rao */ 883047dd67eSAttilio Rao sleepq_lock(&lk->lock_object); 884047dd67eSAttilio Rao x = lk->lk_lock; 885047dd67eSAttilio Rao 886047dd67eSAttilio Rao /* 887047dd67eSAttilio Rao * if the lock has been released while we spun on 888047dd67eSAttilio Rao * the sleepqueue chain lock just try again. 889047dd67eSAttilio Rao */ 890047dd67eSAttilio Rao if (x == LK_UNLOCKED) { 891047dd67eSAttilio Rao sleepq_release(&lk->lock_object); 892047dd67eSAttilio Rao continue; 893047dd67eSAttilio Rao } 894047dd67eSAttilio Rao 895651175c9SAttilio Rao #ifdef ADAPTIVE_LOCKMGRS 896651175c9SAttilio Rao /* 897651175c9SAttilio Rao * The current lock owner might have started executing 898651175c9SAttilio Rao * on another CPU (or the lock could have changed 899651175c9SAttilio Rao * owner) while we were waiting on the turnstile 900651175c9SAttilio Rao * chain lock. If so, drop the turnstile lock and try 901651175c9SAttilio Rao * again. 902651175c9SAttilio Rao */ 903651175c9SAttilio Rao if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 && 904651175c9SAttilio Rao LK_HOLDER(x) != LK_KERNPROC) { 905651175c9SAttilio Rao owner = (struct thread *)LK_HOLDER(x); 906651175c9SAttilio Rao if (TD_IS_RUNNING(owner)) { 907651175c9SAttilio Rao sleepq_release(&lk->lock_object); 908651175c9SAttilio Rao continue; 909651175c9SAttilio Rao } 910651175c9SAttilio Rao } 911651175c9SAttilio Rao #endif 912651175c9SAttilio Rao 913047dd67eSAttilio Rao /* 914047dd67eSAttilio Rao * The lock can be in the state where there is a 915047dd67eSAttilio Rao * pending queue of waiters, but still no owner. 916047dd67eSAttilio Rao * This happens when the lock is contested and an 917047dd67eSAttilio Rao * owner is going to claim the lock. 918047dd67eSAttilio Rao * If curthread is the one successfully acquiring it 919047dd67eSAttilio Rao * claim lock ownership and return, preserving waiters 920047dd67eSAttilio Rao * flags. 921047dd67eSAttilio Rao */ 922651175c9SAttilio Rao v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS); 923651175c9SAttilio Rao if ((x & ~v) == LK_UNLOCKED) { 924651175c9SAttilio Rao v &= ~LK_EXCLUSIVE_SPINNERS; 925047dd67eSAttilio Rao if (atomic_cmpset_acq_ptr(&lk->lk_lock, x, 926047dd67eSAttilio Rao tid | v)) { 927047dd67eSAttilio Rao sleepq_release(&lk->lock_object); 928047dd67eSAttilio Rao LOCK_LOG2(lk, 929047dd67eSAttilio Rao "%s: %p claimed by a new writer", 930047dd67eSAttilio Rao __func__, lk); 931047dd67eSAttilio Rao break; 932047dd67eSAttilio Rao } 933047dd67eSAttilio Rao sleepq_release(&lk->lock_object); 934047dd67eSAttilio Rao continue; 935047dd67eSAttilio Rao } 936047dd67eSAttilio Rao 937047dd67eSAttilio Rao /* 938047dd67eSAttilio Rao * Try to set the LK_EXCLUSIVE_WAITERS flag. If we 939047dd67eSAttilio Rao * fail, loop back and retry. 940047dd67eSAttilio Rao */ 941047dd67eSAttilio Rao if ((x & LK_EXCLUSIVE_WAITERS) == 0) { 942047dd67eSAttilio Rao if (!atomic_cmpset_ptr(&lk->lk_lock, x, 943047dd67eSAttilio Rao x | LK_EXCLUSIVE_WAITERS)) { 944047dd67eSAttilio Rao sleepq_release(&lk->lock_object); 945047dd67eSAttilio Rao continue; 946047dd67eSAttilio Rao } 947047dd67eSAttilio Rao LOCK_LOG2(lk, "%s: %p set excl waiters flag", 948047dd67eSAttilio Rao __func__, lk); 949047dd67eSAttilio Rao } 950047dd67eSAttilio Rao 951047dd67eSAttilio Rao /* 952047dd67eSAttilio Rao * As far as we have been unable to acquire the 953047dd67eSAttilio Rao * exclusive lock and the exclusive waiters flag 954047dd67eSAttilio Rao * is set, we will sleep. 955047dd67eSAttilio Rao */ 956047dd67eSAttilio Rao error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo, 957047dd67eSAttilio Rao SQ_EXCLUSIVE_QUEUE); 958047dd67eSAttilio Rao flags &= ~LK_INTERLOCK; 959047dd67eSAttilio Rao if (error) { 960047dd67eSAttilio Rao LOCK_LOG3(lk, 961047dd67eSAttilio Rao "%s: interrupted sleep for %p with %d", 962047dd67eSAttilio Rao __func__, lk, error); 963047dd67eSAttilio Rao break; 964047dd67eSAttilio Rao } 965047dd67eSAttilio Rao LOCK_LOG2(lk, "%s: %p resuming from the sleep queue", 966047dd67eSAttilio Rao __func__, lk); 967047dd67eSAttilio Rao } 968047dd67eSAttilio Rao if (error == 0) { 969047dd67eSAttilio Rao lock_profile_obtain_lock_success(&lk->lock_object, 970047dd67eSAttilio Rao contested, waittime, file, line); 971047dd67eSAttilio Rao LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0, 972047dd67eSAttilio Rao lk->lk_recurse, file, line); 973e5f94314SAttilio Rao WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE | 974e5f94314SAttilio Rao LK_TRYWIT(flags), file, line); 975047dd67eSAttilio Rao TD_LOCKS_INC(curthread); 976047dd67eSAttilio Rao STACK_SAVE(lk); 977047dd67eSAttilio Rao } 978047dd67eSAttilio Rao break; 979047dd67eSAttilio Rao case LK_DOWNGRADE: 9801c7d98d0SAttilio Rao _lockmgr_assert(lk, KA_XLOCKED, file, line); 981e5f94314SAttilio Rao LOCK_LOG_LOCK("XDOWNGRADE", &lk->lock_object, 0, 0, file, line); 982e5f94314SAttilio Rao WITNESS_DOWNGRADE(&lk->lock_object, 0, file, line); 9831c7d98d0SAttilio Rao 9841c7d98d0SAttilio Rao /* 9851c7d98d0SAttilio Rao * Panic if the lock is recursed. 9861c7d98d0SAttilio Rao */ 9871c7d98d0SAttilio Rao if (lockmgr_xlocked(lk) && lockmgr_recursed(lk)) { 9881c7d98d0SAttilio Rao if (flags & LK_INTERLOCK) 9891c7d98d0SAttilio Rao class->lc_unlock(ilk); 9901c7d98d0SAttilio Rao panic("%s: downgrade a recursed lockmgr %s @ %s:%d\n", 9911c7d98d0SAttilio Rao __func__, iwmesg, file, line); 9921c7d98d0SAttilio Rao } 993e5f94314SAttilio Rao TD_SLOCKS_INC(curthread); 994047dd67eSAttilio Rao 995047dd67eSAttilio Rao /* 996047dd67eSAttilio Rao * In order to preserve waiters flags, just spin. 997047dd67eSAttilio Rao */ 998047dd67eSAttilio Rao for (;;) { 999651175c9SAttilio Rao x = lk->lk_lock; 1000651175c9SAttilio Rao MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0); 1001651175c9SAttilio Rao x &= LK_ALL_WAITERS; 1002047dd67eSAttilio Rao if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x, 1003e5f94314SAttilio Rao LK_SHARERS_LOCK(1) | x)) 1004047dd67eSAttilio Rao break; 1005047dd67eSAttilio Rao cpu_spinwait(); 1006047dd67eSAttilio Rao } 1007047dd67eSAttilio Rao break; 1008047dd67eSAttilio Rao case LK_RELEASE: 1009047dd67eSAttilio Rao _lockmgr_assert(lk, KA_LOCKED, file, line); 1010047dd67eSAttilio Rao x = lk->lk_lock; 1011047dd67eSAttilio Rao 1012047dd67eSAttilio Rao if ((x & LK_SHARE) == 0) { 1013047dd67eSAttilio Rao 1014047dd67eSAttilio Rao /* 1015047dd67eSAttilio Rao * As first option, treact the lock as if it has not 1016047dd67eSAttilio Rao * any waiter. 1017047dd67eSAttilio Rao * Fix-up the tid var if the lock has been disowned. 1018047dd67eSAttilio Rao */ 1019047dd67eSAttilio Rao if (LK_HOLDER(x) == LK_KERNPROC) 1020047dd67eSAttilio Rao tid = LK_KERNPROC; 1021e5f94314SAttilio Rao else { 1022e5f94314SAttilio Rao WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, 1023e5f94314SAttilio Rao file, line); 1024047dd67eSAttilio Rao TD_LOCKS_DEC(curthread); 1025e5f94314SAttilio Rao } 1026047dd67eSAttilio Rao LOCK_LOG_LOCK("XUNLOCK", &lk->lock_object, 0, 1027047dd67eSAttilio Rao lk->lk_recurse, file, line); 1028047dd67eSAttilio Rao 1029047dd67eSAttilio Rao /* 1030047dd67eSAttilio Rao * The lock is held in exclusive mode. 1031047dd67eSAttilio Rao * If the lock is recursed also, then unrecurse it. 1032047dd67eSAttilio Rao */ 1033047dd67eSAttilio Rao if (lockmgr_xlocked(lk) && lockmgr_recursed(lk)) { 1034047dd67eSAttilio Rao LOCK_LOG2(lk, "%s: %p unrecursing", __func__, 1035047dd67eSAttilio Rao lk); 1036047dd67eSAttilio Rao lk->lk_recurse--; 1037047dd67eSAttilio Rao break; 1038047dd67eSAttilio Rao } 103904a28689SJeff Roberson if (tid != LK_KERNPROC) 1040047dd67eSAttilio Rao lock_profile_release_lock(&lk->lock_object); 1041047dd67eSAttilio Rao 1042047dd67eSAttilio Rao if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid, 1043047dd67eSAttilio Rao LK_UNLOCKED)) 1044047dd67eSAttilio Rao break; 1045047dd67eSAttilio Rao 1046047dd67eSAttilio Rao sleepq_lock(&lk->lock_object); 1047651175c9SAttilio Rao x = lk->lk_lock; 1048047dd67eSAttilio Rao v = LK_UNLOCKED; 1049047dd67eSAttilio Rao 1050047dd67eSAttilio Rao /* 1051047dd67eSAttilio Rao * If the lock has exclusive waiters, give them 1052047dd67eSAttilio Rao * preference in order to avoid deadlock with 1053047dd67eSAttilio Rao * shared runners up. 10542028867dSAttilio Rao * If interruptible sleeps left the exclusive queue 10552028867dSAttilio Rao * empty avoid a starvation for the threads sleeping 10562028867dSAttilio Rao * on the shared queue by giving them precedence 10572028867dSAttilio Rao * and cleaning up the exclusive waiters bit anyway. 1058c636ba83SAttilio Rao * Please note that lk_exslpfail count may be lying 1059c636ba83SAttilio Rao * about the real number of waiters with the 1060c636ba83SAttilio Rao * LK_SLEEPFAIL flag on because they may be used in 1061c636ba83SAttilio Rao * conjuction with interruptible sleeps so 1062aab9c8c2SAttilio Rao * lk_exslpfail might be considered an 'upper limit' 1063aab9c8c2SAttilio Rao * bound, including the edge cases. 1064047dd67eSAttilio Rao */ 1065651175c9SAttilio Rao MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0); 10662028867dSAttilio Rao realexslp = sleepq_sleepcnt(&lk->lock_object, 10672028867dSAttilio Rao SQ_EXCLUSIVE_QUEUE); 10682028867dSAttilio Rao if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) { 10692028867dSAttilio Rao if (lk->lk_exslpfail < realexslp) { 10702028867dSAttilio Rao lk->lk_exslpfail = 0; 1071047dd67eSAttilio Rao queue = SQ_EXCLUSIVE_QUEUE; 1072047dd67eSAttilio Rao v |= (x & LK_SHARED_WAITERS); 1073047dd67eSAttilio Rao } else { 10742028867dSAttilio Rao lk->lk_exslpfail = 0; 10752028867dSAttilio Rao LOCK_LOG2(lk, 10762028867dSAttilio Rao "%s: %p has only LK_SLEEPFAIL sleepers", 10772028867dSAttilio Rao __func__, lk); 10782028867dSAttilio Rao LOCK_LOG2(lk, 10792028867dSAttilio Rao "%s: %p waking up threads on the exclusive queue", 10802028867dSAttilio Rao __func__, lk); 10812028867dSAttilio Rao wakeup_swapper = 10822028867dSAttilio Rao sleepq_broadcast(&lk->lock_object, 10832028867dSAttilio Rao SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE); 10842028867dSAttilio Rao queue = SQ_SHARED_QUEUE; 10852028867dSAttilio Rao } 10862028867dSAttilio Rao } else { 10879dbf7a62SAttilio Rao 10889dbf7a62SAttilio Rao /* 10899dbf7a62SAttilio Rao * Exclusive waiters sleeping with LK_SLEEPFAIL 10909dbf7a62SAttilio Rao * on and using interruptible sleeps/timeout 10919dbf7a62SAttilio Rao * may have left spourious lk_exslpfail counts 10929dbf7a62SAttilio Rao * on, so clean it up anyway. 10939dbf7a62SAttilio Rao */ 10949dbf7a62SAttilio Rao lk->lk_exslpfail = 0; 1095047dd67eSAttilio Rao queue = SQ_SHARED_QUEUE; 1096047dd67eSAttilio Rao } 1097047dd67eSAttilio Rao 1098047dd67eSAttilio Rao LOCK_LOG3(lk, 1099047dd67eSAttilio Rao "%s: %p waking up threads on the %s queue", 1100047dd67eSAttilio Rao __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" : 1101047dd67eSAttilio Rao "exclusive"); 1102047dd67eSAttilio Rao atomic_store_rel_ptr(&lk->lk_lock, v); 11032028867dSAttilio Rao wakeup_swapper |= sleepq_broadcast(&lk->lock_object, 1104da7bbd2cSJohn Baldwin SLEEPQ_LK, 0, queue); 1105047dd67eSAttilio Rao sleepq_release(&lk->lock_object); 1106047dd67eSAttilio Rao break; 1107047dd67eSAttilio Rao } else 1108da7bbd2cSJohn Baldwin wakeup_swapper = wakeupshlk(lk, file, line); 1109047dd67eSAttilio Rao break; 1110047dd67eSAttilio Rao case LK_DRAIN: 1111e5f94314SAttilio Rao if (LK_CAN_WITNESS(flags)) 1112e5f94314SAttilio Rao WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER | 111324150d37SJohn Baldwin LOP_EXCLUSIVE, file, line, flags & LK_INTERLOCK ? 111424150d37SJohn Baldwin ilk : NULL); 1115047dd67eSAttilio Rao 1116047dd67eSAttilio Rao /* 111796f1567fSKonstantin Belousov * Trying to drain a lock we already own will result in a 1118047dd67eSAttilio Rao * deadlock. 1119047dd67eSAttilio Rao */ 1120047dd67eSAttilio Rao if (lockmgr_xlocked(lk)) { 1121047dd67eSAttilio Rao if (flags & LK_INTERLOCK) 1122047dd67eSAttilio Rao class->lc_unlock(ilk); 1123047dd67eSAttilio Rao panic("%s: draining %s with the lock held @ %s:%d\n", 1124047dd67eSAttilio Rao __func__, iwmesg, file, line); 1125047dd67eSAttilio Rao } 1126047dd67eSAttilio Rao 1127047dd67eSAttilio Rao while (!atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid)) { 1128f5f9340bSFabien Thomas #ifdef HWPMC_HOOKS 1129f5f9340bSFabien Thomas PMC_SOFT_CALL( , , lock, failed); 1130f5f9340bSFabien Thomas #endif 1131047dd67eSAttilio Rao lock_profile_obtain_lock_failed(&lk->lock_object, 1132047dd67eSAttilio Rao &contested, &waittime); 1133047dd67eSAttilio Rao 1134047dd67eSAttilio Rao /* 1135047dd67eSAttilio Rao * If the lock is expected to not sleep just give up 1136047dd67eSAttilio Rao * and return. 1137047dd67eSAttilio Rao */ 1138047dd67eSAttilio Rao if (LK_TRYOP(flags)) { 1139047dd67eSAttilio Rao LOCK_LOG2(lk, "%s: %p fails the try operation", 1140047dd67eSAttilio Rao __func__, lk); 1141047dd67eSAttilio Rao error = EBUSY; 1142047dd67eSAttilio Rao break; 1143047dd67eSAttilio Rao } 1144047dd67eSAttilio Rao 1145047dd67eSAttilio Rao /* 1146047dd67eSAttilio Rao * Acquire the sleepqueue chain lock because we 1147047dd67eSAttilio Rao * probabilly will need to manipulate waiters flags. 1148047dd67eSAttilio Rao */ 1149047dd67eSAttilio Rao sleepq_lock(&lk->lock_object); 1150047dd67eSAttilio Rao x = lk->lk_lock; 1151047dd67eSAttilio Rao 1152047dd67eSAttilio Rao /* 1153047dd67eSAttilio Rao * if the lock has been released while we spun on 1154047dd67eSAttilio Rao * the sleepqueue chain lock just try again. 1155047dd67eSAttilio Rao */ 1156047dd67eSAttilio Rao if (x == LK_UNLOCKED) { 1157047dd67eSAttilio Rao sleepq_release(&lk->lock_object); 1158047dd67eSAttilio Rao continue; 1159047dd67eSAttilio Rao } 1160047dd67eSAttilio Rao 1161651175c9SAttilio Rao v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS); 1162651175c9SAttilio Rao if ((x & ~v) == LK_UNLOCKED) { 1163651175c9SAttilio Rao v = (x & ~LK_EXCLUSIVE_SPINNERS); 11642028867dSAttilio Rao 11652028867dSAttilio Rao /* 11662028867dSAttilio Rao * If interruptible sleeps left the exclusive 11672028867dSAttilio Rao * queue empty avoid a starvation for the 11682028867dSAttilio Rao * threads sleeping on the shared queue by 11692028867dSAttilio Rao * giving them precedence and cleaning up the 11702028867dSAttilio Rao * exclusive waiters bit anyway. 1171c636ba83SAttilio Rao * Please note that lk_exslpfail count may be 1172c636ba83SAttilio Rao * lying about the real number of waiters with 1173c636ba83SAttilio Rao * the LK_SLEEPFAIL flag on because they may 1174c636ba83SAttilio Rao * be used in conjuction with interruptible 1175aab9c8c2SAttilio Rao * sleeps so lk_exslpfail might be considered 1176aab9c8c2SAttilio Rao * an 'upper limit' bound, including the edge 1177c636ba83SAttilio Rao * cases. 11782028867dSAttilio Rao */ 1179047dd67eSAttilio Rao if (v & LK_EXCLUSIVE_WAITERS) { 1180047dd67eSAttilio Rao queue = SQ_EXCLUSIVE_QUEUE; 1181047dd67eSAttilio Rao v &= ~LK_EXCLUSIVE_WAITERS; 1182047dd67eSAttilio Rao } else { 11839dbf7a62SAttilio Rao 11849dbf7a62SAttilio Rao /* 11859dbf7a62SAttilio Rao * Exclusive waiters sleeping with 11869dbf7a62SAttilio Rao * LK_SLEEPFAIL on and using 11879dbf7a62SAttilio Rao * interruptible sleeps/timeout may 11889dbf7a62SAttilio Rao * have left spourious lk_exslpfail 11899dbf7a62SAttilio Rao * counts on, so clean it up anyway. 11909dbf7a62SAttilio Rao */ 1191047dd67eSAttilio Rao MPASS(v & LK_SHARED_WAITERS); 11929dbf7a62SAttilio Rao lk->lk_exslpfail = 0; 1193047dd67eSAttilio Rao queue = SQ_SHARED_QUEUE; 1194047dd67eSAttilio Rao v &= ~LK_SHARED_WAITERS; 1195047dd67eSAttilio Rao } 11962028867dSAttilio Rao if (queue == SQ_EXCLUSIVE_QUEUE) { 11972028867dSAttilio Rao realexslp = 11982028867dSAttilio Rao sleepq_sleepcnt(&lk->lock_object, 11992028867dSAttilio Rao SQ_EXCLUSIVE_QUEUE); 12002028867dSAttilio Rao if (lk->lk_exslpfail >= realexslp) { 12012028867dSAttilio Rao lk->lk_exslpfail = 0; 12022028867dSAttilio Rao queue = SQ_SHARED_QUEUE; 12032028867dSAttilio Rao v &= ~LK_SHARED_WAITERS; 12042028867dSAttilio Rao if (realexslp != 0) { 12052028867dSAttilio Rao LOCK_LOG2(lk, 12062028867dSAttilio Rao "%s: %p has only LK_SLEEPFAIL sleepers", 12072028867dSAttilio Rao __func__, lk); 12082028867dSAttilio Rao LOCK_LOG2(lk, 12092028867dSAttilio Rao "%s: %p waking up threads on the exclusive queue", 12102028867dSAttilio Rao __func__, lk); 12112028867dSAttilio Rao wakeup_swapper = 12122028867dSAttilio Rao sleepq_broadcast( 12132028867dSAttilio Rao &lk->lock_object, 12142028867dSAttilio Rao SLEEPQ_LK, 0, 12152028867dSAttilio Rao SQ_EXCLUSIVE_QUEUE); 12162028867dSAttilio Rao } 12172028867dSAttilio Rao } else 12182028867dSAttilio Rao lk->lk_exslpfail = 0; 12192028867dSAttilio Rao } 1220047dd67eSAttilio Rao if (!atomic_cmpset_ptr(&lk->lk_lock, x, v)) { 1221047dd67eSAttilio Rao sleepq_release(&lk->lock_object); 1222047dd67eSAttilio Rao continue; 1223047dd67eSAttilio Rao } 1224047dd67eSAttilio Rao LOCK_LOG3(lk, 1225047dd67eSAttilio Rao "%s: %p waking up all threads on the %s queue", 1226047dd67eSAttilio Rao __func__, lk, queue == SQ_SHARED_QUEUE ? 1227047dd67eSAttilio Rao "shared" : "exclusive"); 1228814f26daSJohn Baldwin wakeup_swapper |= sleepq_broadcast( 1229da7bbd2cSJohn Baldwin &lk->lock_object, SLEEPQ_LK, 0, queue); 1230047dd67eSAttilio Rao 1231047dd67eSAttilio Rao /* 1232047dd67eSAttilio Rao * If shared waiters have been woken up we need 1233047dd67eSAttilio Rao * to wait for one of them to acquire the lock 1234047dd67eSAttilio Rao * before to set the exclusive waiters in 1235047dd67eSAttilio Rao * order to avoid a deadlock. 1236047dd67eSAttilio Rao */ 1237047dd67eSAttilio Rao if (queue == SQ_SHARED_QUEUE) { 1238047dd67eSAttilio Rao for (v = lk->lk_lock; 1239047dd67eSAttilio Rao (v & LK_SHARE) && !LK_SHARERS(v); 1240047dd67eSAttilio Rao v = lk->lk_lock) 1241047dd67eSAttilio Rao cpu_spinwait(); 1242047dd67eSAttilio Rao } 1243047dd67eSAttilio Rao } 1244047dd67eSAttilio Rao 1245047dd67eSAttilio Rao /* 1246047dd67eSAttilio Rao * Try to set the LK_EXCLUSIVE_WAITERS flag. If we 1247047dd67eSAttilio Rao * fail, loop back and retry. 1248047dd67eSAttilio Rao */ 1249047dd67eSAttilio Rao if ((x & LK_EXCLUSIVE_WAITERS) == 0) { 1250047dd67eSAttilio Rao if (!atomic_cmpset_ptr(&lk->lk_lock, x, 1251047dd67eSAttilio Rao x | LK_EXCLUSIVE_WAITERS)) { 1252047dd67eSAttilio Rao sleepq_release(&lk->lock_object); 1253047dd67eSAttilio Rao continue; 1254047dd67eSAttilio Rao } 1255047dd67eSAttilio Rao LOCK_LOG2(lk, "%s: %p set drain waiters flag", 1256047dd67eSAttilio Rao __func__, lk); 1257047dd67eSAttilio Rao } 1258047dd67eSAttilio Rao 1259047dd67eSAttilio Rao /* 1260047dd67eSAttilio Rao * As far as we have been unable to acquire the 1261047dd67eSAttilio Rao * exclusive lock and the exclusive waiters flag 1262047dd67eSAttilio Rao * is set, we will sleep. 1263047dd67eSAttilio Rao */ 1264047dd67eSAttilio Rao if (flags & LK_INTERLOCK) { 1265047dd67eSAttilio Rao class->lc_unlock(ilk); 1266047dd67eSAttilio Rao flags &= ~LK_INTERLOCK; 1267047dd67eSAttilio Rao } 1268e5f94314SAttilio Rao GIANT_SAVE(); 1269047dd67eSAttilio Rao sleepq_add(&lk->lock_object, NULL, iwmesg, SLEEPQ_LK, 1270047dd67eSAttilio Rao SQ_EXCLUSIVE_QUEUE); 1271047dd67eSAttilio Rao sleepq_wait(&lk->lock_object, ipri & PRIMASK); 1272e5f94314SAttilio Rao GIANT_RESTORE(); 1273047dd67eSAttilio Rao LOCK_LOG2(lk, "%s: %p resuming from the sleep queue", 1274047dd67eSAttilio Rao __func__, lk); 1275047dd67eSAttilio Rao } 1276047dd67eSAttilio Rao 1277047dd67eSAttilio Rao if (error == 0) { 1278047dd67eSAttilio Rao lock_profile_obtain_lock_success(&lk->lock_object, 1279047dd67eSAttilio Rao contested, waittime, file, line); 1280047dd67eSAttilio Rao LOCK_LOG_LOCK("DRAIN", &lk->lock_object, 0, 1281047dd67eSAttilio Rao lk->lk_recurse, file, line); 1282e5f94314SAttilio Rao WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE | 1283e5f94314SAttilio Rao LK_TRYWIT(flags), file, line); 1284047dd67eSAttilio Rao TD_LOCKS_INC(curthread); 1285047dd67eSAttilio Rao STACK_SAVE(lk); 1286047dd67eSAttilio Rao } 1287047dd67eSAttilio Rao break; 1288047dd67eSAttilio Rao default: 1289047dd67eSAttilio Rao if (flags & LK_INTERLOCK) 1290047dd67eSAttilio Rao class->lc_unlock(ilk); 1291047dd67eSAttilio Rao panic("%s: unknown lockmgr request 0x%x\n", __func__, op); 1292047dd67eSAttilio Rao } 1293047dd67eSAttilio Rao 1294047dd67eSAttilio Rao if (flags & LK_INTERLOCK) 1295047dd67eSAttilio Rao class->lc_unlock(ilk); 1296da7bbd2cSJohn Baldwin if (wakeup_swapper) 1297da7bbd2cSJohn Baldwin kick_proc0(); 1298047dd67eSAttilio Rao 1299047dd67eSAttilio Rao return (error); 1300047dd67eSAttilio Rao } 1301047dd67eSAttilio Rao 1302d7a7e179SAttilio Rao void 1303047dd67eSAttilio Rao _lockmgr_disown(struct lock *lk, const char *file, int line) 1304047dd67eSAttilio Rao { 1305047dd67eSAttilio Rao uintptr_t tid, x; 1306047dd67eSAttilio Rao 130735370593SAndriy Gapon if (SCHEDULER_STOPPED()) 130835370593SAndriy Gapon return; 130935370593SAndriy Gapon 1310047dd67eSAttilio Rao tid = (uintptr_t)curthread; 13111c7d98d0SAttilio Rao _lockmgr_assert(lk, KA_XLOCKED, file, line); 13121c7d98d0SAttilio Rao 13131c7d98d0SAttilio Rao /* 13141c7d98d0SAttilio Rao * Panic if the lock is recursed. 13151c7d98d0SAttilio Rao */ 13161c7d98d0SAttilio Rao if (lockmgr_xlocked(lk) && lockmgr_recursed(lk)) 13171c7d98d0SAttilio Rao panic("%s: disown a recursed lockmgr @ %s:%d\n", 13181c7d98d0SAttilio Rao __func__, file, line); 1319047dd67eSAttilio Rao 1320047dd67eSAttilio Rao /* 132196f1567fSKonstantin Belousov * If the owner is already LK_KERNPROC just skip the whole operation. 1322047dd67eSAttilio Rao */ 1323047dd67eSAttilio Rao if (LK_HOLDER(lk->lk_lock) != tid) 1324047dd67eSAttilio Rao return; 132504a28689SJeff Roberson lock_profile_release_lock(&lk->lock_object); 1326e5f94314SAttilio Rao LOCK_LOG_LOCK("XDISOWN", &lk->lock_object, 0, 0, file, line); 1327e5f94314SAttilio Rao WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, file, line); 1328e5f94314SAttilio Rao TD_LOCKS_DEC(curthread); 1329337c5ff4SAttilio Rao STACK_SAVE(lk); 1330047dd67eSAttilio Rao 1331047dd67eSAttilio Rao /* 1332047dd67eSAttilio Rao * In order to preserve waiters flags, just spin. 1333047dd67eSAttilio Rao */ 1334047dd67eSAttilio Rao for (;;) { 1335651175c9SAttilio Rao x = lk->lk_lock; 1336651175c9SAttilio Rao MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0); 1337651175c9SAttilio Rao x &= LK_ALL_WAITERS; 133822dd228dSAttilio Rao if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x, 1339e5f94314SAttilio Rao LK_KERNPROC | x)) 1340047dd67eSAttilio Rao return; 1341047dd67eSAttilio Rao cpu_spinwait(); 1342047dd67eSAttilio Rao } 1343047dd67eSAttilio Rao } 1344047dd67eSAttilio Rao 1345047dd67eSAttilio Rao void 1346d576deedSPawel Jakub Dawidek lockmgr_printinfo(const struct lock *lk) 1347d7a7e179SAttilio Rao { 1348d7a7e179SAttilio Rao struct thread *td; 1349047dd67eSAttilio Rao uintptr_t x; 1350d7a7e179SAttilio Rao 1351047dd67eSAttilio Rao if (lk->lk_lock == LK_UNLOCKED) 1352047dd67eSAttilio Rao printf("lock type %s: UNLOCKED\n", lk->lock_object.lo_name); 1353047dd67eSAttilio Rao else if (lk->lk_lock & LK_SHARE) 1354047dd67eSAttilio Rao printf("lock type %s: SHARED (count %ju)\n", 1355047dd67eSAttilio Rao lk->lock_object.lo_name, 1356047dd67eSAttilio Rao (uintmax_t)LK_SHARERS(lk->lk_lock)); 1357047dd67eSAttilio Rao else { 1358047dd67eSAttilio Rao td = lockmgr_xholder(lk); 1359e64b4fa8SKonstantin Belousov if (td == (struct thread *)LK_KERNPROC) 1360e64b4fa8SKonstantin Belousov printf("lock type %s: EXCL by KERNPROC\n", 1361e64b4fa8SKonstantin Belousov lk->lock_object.lo_name); 1362e64b4fa8SKonstantin Belousov else 13632573ea5fSIvan Voras printf("lock type %s: EXCL by thread %p " 1364e64b4fa8SKonstantin Belousov "(pid %d, %s, tid %d)\n", lk->lock_object.lo_name, 1365e64b4fa8SKonstantin Belousov td, td->td_proc->p_pid, td->td_proc->p_comm, 1366e64b4fa8SKonstantin Belousov td->td_tid); 1367d7a7e179SAttilio Rao } 1368d7a7e179SAttilio Rao 1369047dd67eSAttilio Rao x = lk->lk_lock; 1370047dd67eSAttilio Rao if (x & LK_EXCLUSIVE_WAITERS) 1371047dd67eSAttilio Rao printf(" with exclusive waiters pending\n"); 1372047dd67eSAttilio Rao if (x & LK_SHARED_WAITERS) 1373047dd67eSAttilio Rao printf(" with shared waiters pending\n"); 1374651175c9SAttilio Rao if (x & LK_EXCLUSIVE_SPINNERS) 1375651175c9SAttilio Rao printf(" with exclusive spinners pending\n"); 1376047dd67eSAttilio Rao 1377047dd67eSAttilio Rao STACK_PRINT(lk); 1378047dd67eSAttilio Rao } 1379047dd67eSAttilio Rao 138099448ed1SJohn Dyson int 1381d576deedSPawel Jakub Dawidek lockstatus(const struct lock *lk) 138299448ed1SJohn Dyson { 1383047dd67eSAttilio Rao uintptr_t v, x; 1384047dd67eSAttilio Rao int ret; 138599448ed1SJohn Dyson 1386047dd67eSAttilio Rao ret = LK_SHARED; 1387047dd67eSAttilio Rao x = lk->lk_lock; 1388047dd67eSAttilio Rao v = LK_HOLDER(x); 13890e9eb108SAttilio Rao 1390047dd67eSAttilio Rao if ((x & LK_SHARE) == 0) { 1391047dd67eSAttilio Rao if (v == (uintptr_t)curthread || v == LK_KERNPROC) 1392047dd67eSAttilio Rao ret = LK_EXCLUSIVE; 13936bdfe06aSEivind Eklund else 1394047dd67eSAttilio Rao ret = LK_EXCLOTHER; 1395047dd67eSAttilio Rao } else if (x == LK_UNLOCKED) 1396047dd67eSAttilio Rao ret = 0; 139799448ed1SJohn Dyson 1398047dd67eSAttilio Rao return (ret); 139953bf4bb2SPeter Wemm } 1400be6847d7SJohn Baldwin 140184887fa3SAttilio Rao #ifdef INVARIANT_SUPPORT 1402de5b1952SAlexander Leidinger 1403de5b1952SAlexander Leidinger FEATURE(invariant_support, 1404de5b1952SAlexander Leidinger "Support for modules compiled with INVARIANTS option"); 1405de5b1952SAlexander Leidinger 140684887fa3SAttilio Rao #ifndef INVARIANTS 140784887fa3SAttilio Rao #undef _lockmgr_assert 140884887fa3SAttilio Rao #endif 140984887fa3SAttilio Rao 141084887fa3SAttilio Rao void 1411d576deedSPawel Jakub Dawidek _lockmgr_assert(const struct lock *lk, int what, const char *file, int line) 141284887fa3SAttilio Rao { 141384887fa3SAttilio Rao int slocked = 0; 141484887fa3SAttilio Rao 141584887fa3SAttilio Rao if (panicstr != NULL) 141684887fa3SAttilio Rao return; 141784887fa3SAttilio Rao switch (what) { 141884887fa3SAttilio Rao case KA_SLOCKED: 141984887fa3SAttilio Rao case KA_SLOCKED | KA_NOTRECURSED: 142084887fa3SAttilio Rao case KA_SLOCKED | KA_RECURSED: 142184887fa3SAttilio Rao slocked = 1; 142284887fa3SAttilio Rao case KA_LOCKED: 142384887fa3SAttilio Rao case KA_LOCKED | KA_NOTRECURSED: 142484887fa3SAttilio Rao case KA_LOCKED | KA_RECURSED: 1425e5f94314SAttilio Rao #ifdef WITNESS 1426e5f94314SAttilio Rao 1427e5f94314SAttilio Rao /* 1428e5f94314SAttilio Rao * We cannot trust WITNESS if the lock is held in exclusive 1429e5f94314SAttilio Rao * mode and a call to lockmgr_disown() happened. 1430e5f94314SAttilio Rao * Workaround this skipping the check if the lock is held in 1431e5f94314SAttilio Rao * exclusive mode even for the KA_LOCKED case. 1432e5f94314SAttilio Rao */ 1433e5f94314SAttilio Rao if (slocked || (lk->lk_lock & LK_SHARE)) { 1434e5f94314SAttilio Rao witness_assert(&lk->lock_object, what, file, line); 1435e5f94314SAttilio Rao break; 1436e5f94314SAttilio Rao } 1437e5f94314SAttilio Rao #endif 1438047dd67eSAttilio Rao if (lk->lk_lock == LK_UNLOCKED || 1439047dd67eSAttilio Rao ((lk->lk_lock & LK_SHARE) == 0 && (slocked || 1440047dd67eSAttilio Rao (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk))))) 144184887fa3SAttilio Rao panic("Lock %s not %slocked @ %s:%d\n", 1442047dd67eSAttilio Rao lk->lock_object.lo_name, slocked ? "share" : "", 144384887fa3SAttilio Rao file, line); 1444047dd67eSAttilio Rao 1445047dd67eSAttilio Rao if ((lk->lk_lock & LK_SHARE) == 0) { 1446047dd67eSAttilio Rao if (lockmgr_recursed(lk)) { 144784887fa3SAttilio Rao if (what & KA_NOTRECURSED) 144884887fa3SAttilio Rao panic("Lock %s recursed @ %s:%d\n", 1449047dd67eSAttilio Rao lk->lock_object.lo_name, file, 1450047dd67eSAttilio Rao line); 145184887fa3SAttilio Rao } else if (what & KA_RECURSED) 145284887fa3SAttilio Rao panic("Lock %s not recursed @ %s:%d\n", 1453047dd67eSAttilio Rao lk->lock_object.lo_name, file, line); 145484887fa3SAttilio Rao } 145584887fa3SAttilio Rao break; 145684887fa3SAttilio Rao case KA_XLOCKED: 145784887fa3SAttilio Rao case KA_XLOCKED | KA_NOTRECURSED: 145884887fa3SAttilio Rao case KA_XLOCKED | KA_RECURSED: 1459047dd67eSAttilio Rao if (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk)) 146084887fa3SAttilio Rao panic("Lock %s not exclusively locked @ %s:%d\n", 1461047dd67eSAttilio Rao lk->lock_object.lo_name, file, line); 1462047dd67eSAttilio Rao if (lockmgr_recursed(lk)) { 146384887fa3SAttilio Rao if (what & KA_NOTRECURSED) 146484887fa3SAttilio Rao panic("Lock %s recursed @ %s:%d\n", 1465047dd67eSAttilio Rao lk->lock_object.lo_name, file, line); 146684887fa3SAttilio Rao } else if (what & KA_RECURSED) 146784887fa3SAttilio Rao panic("Lock %s not recursed @ %s:%d\n", 1468047dd67eSAttilio Rao lk->lock_object.lo_name, file, line); 146984887fa3SAttilio Rao break; 147084887fa3SAttilio Rao case KA_UNLOCKED: 1471047dd67eSAttilio Rao if (lockmgr_xlocked(lk) || lockmgr_disowned(lk)) 147284887fa3SAttilio Rao panic("Lock %s exclusively locked @ %s:%d\n", 1473047dd67eSAttilio Rao lk->lock_object.lo_name, file, line); 147484887fa3SAttilio Rao break; 147584887fa3SAttilio Rao default: 1476047dd67eSAttilio Rao panic("Unknown lockmgr assertion: %d @ %s:%d\n", what, file, 1477047dd67eSAttilio Rao line); 147884887fa3SAttilio Rao } 147984887fa3SAttilio Rao } 1480047dd67eSAttilio Rao #endif 148184887fa3SAttilio Rao 1482be6847d7SJohn Baldwin #ifdef DDB 1483462a7addSJohn Baldwin int 1484462a7addSJohn Baldwin lockmgr_chain(struct thread *td, struct thread **ownerp) 1485462a7addSJohn Baldwin { 1486047dd67eSAttilio Rao struct lock *lk; 1487462a7addSJohn Baldwin 1488047dd67eSAttilio Rao lk = td->td_wchan; 1489462a7addSJohn Baldwin 1490047dd67eSAttilio Rao if (LOCK_CLASS(&lk->lock_object) != &lock_class_lockmgr) 1491462a7addSJohn Baldwin return (0); 1492047dd67eSAttilio Rao db_printf("blocked on lockmgr %s", lk->lock_object.lo_name); 1493047dd67eSAttilio Rao if (lk->lk_lock & LK_SHARE) 1494047dd67eSAttilio Rao db_printf("SHARED (count %ju)\n", 1495047dd67eSAttilio Rao (uintmax_t)LK_SHARERS(lk->lk_lock)); 1496047dd67eSAttilio Rao else 1497047dd67eSAttilio Rao db_printf("EXCL\n"); 1498047dd67eSAttilio Rao *ownerp = lockmgr_xholder(lk); 1499462a7addSJohn Baldwin 1500462a7addSJohn Baldwin return (1); 1501462a7addSJohn Baldwin } 1502462a7addSJohn Baldwin 1503047dd67eSAttilio Rao static void 1504d576deedSPawel Jakub Dawidek db_show_lockmgr(const struct lock_object *lock) 1505be6847d7SJohn Baldwin { 1506be6847d7SJohn Baldwin struct thread *td; 1507d576deedSPawel Jakub Dawidek const struct lock *lk; 1508be6847d7SJohn Baldwin 1509d576deedSPawel Jakub Dawidek lk = (const struct lock *)lock; 1510be6847d7SJohn Baldwin 1511be6847d7SJohn Baldwin db_printf(" state: "); 1512047dd67eSAttilio Rao if (lk->lk_lock == LK_UNLOCKED) 1513be6847d7SJohn Baldwin db_printf("UNLOCKED\n"); 1514047dd67eSAttilio Rao else if (lk->lk_lock & LK_SHARE) 1515047dd67eSAttilio Rao db_printf("SLOCK: %ju\n", (uintmax_t)LK_SHARERS(lk->lk_lock)); 1516047dd67eSAttilio Rao else { 1517047dd67eSAttilio Rao td = lockmgr_xholder(lk); 1518047dd67eSAttilio Rao if (td == (struct thread *)LK_KERNPROC) 1519047dd67eSAttilio Rao db_printf("XLOCK: LK_KERNPROC\n"); 1520047dd67eSAttilio Rao else 1521047dd67eSAttilio Rao db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td, 1522047dd67eSAttilio Rao td->td_tid, td->td_proc->p_pid, 1523047dd67eSAttilio Rao td->td_proc->p_comm); 1524047dd67eSAttilio Rao if (lockmgr_recursed(lk)) 1525047dd67eSAttilio Rao db_printf(" recursed: %d\n", lk->lk_recurse); 1526047dd67eSAttilio Rao } 1527047dd67eSAttilio Rao db_printf(" waiters: "); 1528047dd67eSAttilio Rao switch (lk->lk_lock & LK_ALL_WAITERS) { 1529047dd67eSAttilio Rao case LK_SHARED_WAITERS: 1530047dd67eSAttilio Rao db_printf("shared\n"); 1531e5023dd9SEdward Tomasz Napierala break; 1532047dd67eSAttilio Rao case LK_EXCLUSIVE_WAITERS: 1533047dd67eSAttilio Rao db_printf("exclusive\n"); 1534047dd67eSAttilio Rao break; 1535047dd67eSAttilio Rao case LK_ALL_WAITERS: 1536047dd67eSAttilio Rao db_printf("shared and exclusive\n"); 1537047dd67eSAttilio Rao break; 1538047dd67eSAttilio Rao default: 1539047dd67eSAttilio Rao db_printf("none\n"); 1540047dd67eSAttilio Rao } 1541651175c9SAttilio Rao db_printf(" spinners: "); 1542651175c9SAttilio Rao if (lk->lk_lock & LK_EXCLUSIVE_SPINNERS) 1543651175c9SAttilio Rao db_printf("exclusive\n"); 1544651175c9SAttilio Rao else 1545651175c9SAttilio Rao db_printf("none\n"); 1546be6847d7SJohn Baldwin } 1547be6847d7SJohn Baldwin #endif 1548