19454b2d8SWarner Losh /*- 2047dd67eSAttilio Rao * Copyright (c) 2008 Attilio Rao <attilio@FreeBSD.org> 3047dd67eSAttilio Rao * All rights reserved. 453bf4bb2SPeter Wemm * 553bf4bb2SPeter Wemm * Redistribution and use in source and binary forms, with or without 653bf4bb2SPeter Wemm * modification, are permitted provided that the following conditions 753bf4bb2SPeter Wemm * are met: 853bf4bb2SPeter Wemm * 1. Redistributions of source code must retain the above copyright 9047dd67eSAttilio Rao * notice(s), this list of conditions and the following disclaimer as 10047dd67eSAttilio Rao * the first lines of this file unmodified other than the possible 11047dd67eSAttilio Rao * addition of one or more copyright notices. 1253bf4bb2SPeter Wemm * 2. Redistributions in binary form must reproduce the above copyright 13047dd67eSAttilio Rao * notice(s), this list of conditions and the following disclaimer in the 1453bf4bb2SPeter Wemm * documentation and/or other materials provided with the distribution. 1553bf4bb2SPeter Wemm * 16047dd67eSAttilio Rao * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY 17047dd67eSAttilio Rao * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 18047dd67eSAttilio Rao * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 19047dd67eSAttilio Rao * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY 20047dd67eSAttilio Rao * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 21047dd67eSAttilio Rao * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 22047dd67eSAttilio Rao * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 23047dd67eSAttilio Rao * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 2453bf4bb2SPeter Wemm * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25047dd67eSAttilio Rao * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 26047dd67eSAttilio Rao * DAMAGE. 2753bf4bb2SPeter Wemm */ 2853bf4bb2SPeter Wemm 29047dd67eSAttilio Rao #include "opt_ddb.h" 30047dd67eSAttilio Rao 31677b542eSDavid E. O'Brien #include <sys/cdefs.h> 32677b542eSDavid E. O'Brien __FBSDID("$FreeBSD$"); 33677b542eSDavid E. O'Brien 3453bf4bb2SPeter Wemm #include <sys/param.h> 3561d80e90SJohn Baldwin #include <sys/ktr.h> 3653bf4bb2SPeter Wemm #include <sys/lock.h> 37047dd67eSAttilio Rao #include <sys/lock_profile.h> 388302d183SBruce Evans #include <sys/lockmgr.h> 39d8881ca3SJohn Baldwin #include <sys/mutex.h> 408302d183SBruce Evans #include <sys/proc.h> 41047dd67eSAttilio Rao #include <sys/sleepqueue.h> 42e8ddb61dSJeff Roberson #ifdef DEBUG_LOCKS 43e8ddb61dSJeff Roberson #include <sys/stack.h> 44e8ddb61dSJeff Roberson #endif 45047dd67eSAttilio Rao #include <sys/systm.h> 4653bf4bb2SPeter Wemm 47047dd67eSAttilio Rao #include <machine/cpu.h> 486efc8a16SAttilio Rao 49be6847d7SJohn Baldwin #ifdef DDB 50be6847d7SJohn Baldwin #include <ddb/ddb.h> 51047dd67eSAttilio Rao #endif 52047dd67eSAttilio Rao 53047dd67eSAttilio Rao CTASSERT(((LK_CANRECURSE | LK_NOSHARE) & LO_CLASSFLAGS) == 54047dd67eSAttilio Rao (LK_CANRECURSE | LK_NOSHARE)); 55047dd67eSAttilio Rao 56047dd67eSAttilio Rao #define SQ_EXCLUSIVE_QUEUE 0 57047dd67eSAttilio Rao #define SQ_SHARED_QUEUE 1 58047dd67eSAttilio Rao 59047dd67eSAttilio Rao #ifndef INVARIANTS 60047dd67eSAttilio Rao #define _lockmgr_assert(lk, what, file, line) 61047dd67eSAttilio Rao #define TD_LOCKS_INC(td) 62047dd67eSAttilio Rao #define TD_LOCKS_DEC(td) 63047dd67eSAttilio Rao #else 64047dd67eSAttilio Rao #define TD_LOCKS_INC(td) ((td)->td_locks++) 65047dd67eSAttilio Rao #define TD_LOCKS_DEC(td) ((td)->td_locks--) 66047dd67eSAttilio Rao #endif 67047dd67eSAttilio Rao #define TD_SLOCKS_INC(td) ((td)->td_lk_slocks++) 68047dd67eSAttilio Rao #define TD_SLOCKS_DEC(td) ((td)->td_lk_slocks--) 69047dd67eSAttilio Rao 70047dd67eSAttilio Rao #ifndef DEBUG_LOCKS 71047dd67eSAttilio Rao #define STACK_PRINT(lk) 72047dd67eSAttilio Rao #define STACK_SAVE(lk) 73047dd67eSAttilio Rao #define STACK_ZERO(lk) 74047dd67eSAttilio Rao #else 75047dd67eSAttilio Rao #define STACK_PRINT(lk) stack_print_ddb(&(lk)->lk_stack) 76047dd67eSAttilio Rao #define STACK_SAVE(lk) stack_save(&(lk)->lk_stack) 77047dd67eSAttilio Rao #define STACK_ZERO(lk) stack_zero(&(lk)->lk_stack) 78047dd67eSAttilio Rao #endif 79047dd67eSAttilio Rao 80047dd67eSAttilio Rao #define LOCK_LOG2(lk, string, arg1, arg2) \ 81047dd67eSAttilio Rao if (LOCK_LOG_TEST(&(lk)->lock_object, 0)) \ 82047dd67eSAttilio Rao CTR2(KTR_LOCK, (string), (arg1), (arg2)) 83047dd67eSAttilio Rao #define LOCK_LOG3(lk, string, arg1, arg2, arg3) \ 84047dd67eSAttilio Rao if (LOCK_LOG_TEST(&(lk)->lock_object, 0)) \ 85047dd67eSAttilio Rao CTR3(KTR_LOCK, (string), (arg1), (arg2), (arg3)) 86047dd67eSAttilio Rao 87e5f94314SAttilio Rao #define GIANT_DECLARE \ 88e5f94314SAttilio Rao int _i = 0; \ 89e5f94314SAttilio Rao WITNESS_SAVE_DECL(Giant) 90e5f94314SAttilio Rao #define GIANT_RESTORE() do { \ 91e5f94314SAttilio Rao if (_i > 0) { \ 92e5f94314SAttilio Rao while (_i--) \ 93e5f94314SAttilio Rao mtx_lock(&Giant); \ 94e5f94314SAttilio Rao WITNESS_RESTORE(&Giant.lock_object, Giant); \ 95e5f94314SAttilio Rao } \ 96e5f94314SAttilio Rao } while (0) 97e5f94314SAttilio Rao #define GIANT_SAVE() do { \ 98e5f94314SAttilio Rao if (mtx_owned(&Giant)) { \ 99e5f94314SAttilio Rao WITNESS_SAVE(&Giant.lock_object, Giant); \ 100e5f94314SAttilio Rao while (mtx_owned(&Giant)) { \ 101e5f94314SAttilio Rao _i++; \ 102e5f94314SAttilio Rao mtx_unlock(&Giant); \ 103e5f94314SAttilio Rao } \ 104e5f94314SAttilio Rao } \ 105e5f94314SAttilio Rao } while (0) 106e5f94314SAttilio Rao 107047dd67eSAttilio Rao #define LK_CAN_SHARE(x) \ 108047dd67eSAttilio Rao (((x) & LK_SHARE) && (((x) & LK_EXCLUSIVE_WAITERS) == 0 || \ 109e0f62984SAttilio Rao curthread->td_lk_slocks || (curthread->td_pflags & TDP_DEADLKTREAT))) 110e5f94314SAttilio Rao #define LK_TRYOP(x) \ 111e5f94314SAttilio Rao ((x) & LK_NOWAIT) 112e5f94314SAttilio Rao 113e5f94314SAttilio Rao #define LK_CAN_WITNESS(x) \ 114e5f94314SAttilio Rao (((x) & LK_NOWITNESS) == 0 && !LK_TRYOP(x)) 115e5f94314SAttilio Rao #define LK_TRYWIT(x) \ 116e5f94314SAttilio Rao (LK_TRYOP(x) ? LOP_TRYLOCK : 0) 117047dd67eSAttilio Rao 118047dd67eSAttilio Rao #define lockmgr_disowned(lk) \ 119047dd67eSAttilio Rao (((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == LK_KERNPROC) 120047dd67eSAttilio Rao 121047dd67eSAttilio Rao #define lockmgr_xlocked(lk) \ 122047dd67eSAttilio Rao (((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == (uintptr_t)curthread) 123047dd67eSAttilio Rao 124047dd67eSAttilio Rao static void assert_lockmgr(struct lock_object *lock, int how); 125047dd67eSAttilio Rao #ifdef DDB 12661bd5e21SKip Macy static void db_show_lockmgr(struct lock_object *lock); 127be6847d7SJohn Baldwin #endif 1286e21afd4SJohn Baldwin static void lock_lockmgr(struct lock_object *lock, int how); 1296e21afd4SJohn Baldwin static int unlock_lockmgr(struct lock_object *lock); 13061bd5e21SKip Macy 13161bd5e21SKip Macy struct lock_class lock_class_lockmgr = { 1323ff6d229SJohn Baldwin .lc_name = "lockmgr", 133047dd67eSAttilio Rao .lc_flags = LC_RECURSABLE | LC_SLEEPABLE | LC_SLEEPLOCK | LC_UPGRADABLE, 134f9721b43SAttilio Rao .lc_assert = assert_lockmgr, 13561bd5e21SKip Macy #ifdef DDB 1366e21afd4SJohn Baldwin .lc_ddb_show = db_show_lockmgr, 13761bd5e21SKip Macy #endif 1386e21afd4SJohn Baldwin .lc_lock = lock_lockmgr, 139047dd67eSAttilio Rao .lc_unlock = unlock_lockmgr 14061bd5e21SKip Macy }; 14161bd5e21SKip Macy 142047dd67eSAttilio Rao static __inline struct thread * 143047dd67eSAttilio Rao lockmgr_xholder(struct lock *lk) 144047dd67eSAttilio Rao { 145047dd67eSAttilio Rao uintptr_t x; 146047dd67eSAttilio Rao 147047dd67eSAttilio Rao x = lk->lk_lock; 148047dd67eSAttilio Rao return ((x & LK_SHARE) ? NULL : (struct thread *)LK_HOLDER(x)); 149047dd67eSAttilio Rao } 15084887fa3SAttilio Rao 15153bf4bb2SPeter Wemm /* 152047dd67eSAttilio Rao * It assumes sleepq_lock held and returns with this one unheld. 153047dd67eSAttilio Rao * It also assumes the generic interlock is sane and previously checked. 154047dd67eSAttilio Rao * If LK_INTERLOCK is specified the interlock is not reacquired after the 155047dd67eSAttilio Rao * sleep. 15653bf4bb2SPeter Wemm */ 157047dd67eSAttilio Rao static __inline int 158047dd67eSAttilio Rao sleeplk(struct lock *lk, u_int flags, struct lock_object *ilk, 159047dd67eSAttilio Rao const char *wmesg, int pri, int timo, int queue) 160047dd67eSAttilio Rao { 161e5f94314SAttilio Rao GIANT_DECLARE; 162047dd67eSAttilio Rao struct lock_class *class; 163047dd67eSAttilio Rao int catch, error; 16453bf4bb2SPeter Wemm 165047dd67eSAttilio Rao class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL; 1665047a8fdSAttilio Rao catch = pri & PCATCH; 167047dd67eSAttilio Rao pri &= PRIMASK; 168047dd67eSAttilio Rao error = 0; 169047dd67eSAttilio Rao 170047dd67eSAttilio Rao LOCK_LOG3(lk, "%s: %p blocking on the %s sleepqueue", __func__, lk, 171047dd67eSAttilio Rao (queue == SQ_EXCLUSIVE_QUEUE) ? "exclusive" : "shared"); 172047dd67eSAttilio Rao 173047dd67eSAttilio Rao if (flags & LK_INTERLOCK) 174047dd67eSAttilio Rao class->lc_unlock(ilk); 175e5f94314SAttilio Rao GIANT_SAVE(); 176047dd67eSAttilio Rao sleepq_add(&lk->lock_object, NULL, wmesg, SLEEPQ_LK | (catch ? 177047dd67eSAttilio Rao SLEEPQ_INTERRUPTIBLE : 0), queue); 178047dd67eSAttilio Rao if ((flags & LK_TIMELOCK) && timo) 179047dd67eSAttilio Rao sleepq_set_timeout(&lk->lock_object, timo); 180047dd67eSAttilio Rao 181047dd67eSAttilio Rao /* 182047dd67eSAttilio Rao * Decisional switch for real sleeping. 183047dd67eSAttilio Rao */ 184047dd67eSAttilio Rao if ((flags & LK_TIMELOCK) && timo && catch) 185047dd67eSAttilio Rao error = sleepq_timedwait_sig(&lk->lock_object, pri); 186047dd67eSAttilio Rao else if ((flags & LK_TIMELOCK) && timo) 187047dd67eSAttilio Rao error = sleepq_timedwait(&lk->lock_object, pri); 188047dd67eSAttilio Rao else if (catch) 189047dd67eSAttilio Rao error = sleepq_wait_sig(&lk->lock_object, pri); 190047dd67eSAttilio Rao else 191047dd67eSAttilio Rao sleepq_wait(&lk->lock_object, pri); 192e5f94314SAttilio Rao GIANT_RESTORE(); 193047dd67eSAttilio Rao if ((flags & LK_SLEEPFAIL) && error == 0) 194047dd67eSAttilio Rao error = ENOLCK; 195047dd67eSAttilio Rao 196047dd67eSAttilio Rao return (error); 197047dd67eSAttilio Rao } 198047dd67eSAttilio Rao 199da7bbd2cSJohn Baldwin static __inline int 200047dd67eSAttilio Rao wakeupshlk(struct lock *lk, const char *file, int line) 201047dd67eSAttilio Rao { 202047dd67eSAttilio Rao uintptr_t v, x; 203da7bbd2cSJohn Baldwin int queue, wakeup_swapper; 204047dd67eSAttilio Rao 205047dd67eSAttilio Rao TD_LOCKS_DEC(curthread); 206047dd67eSAttilio Rao TD_SLOCKS_DEC(curthread); 207e5f94314SAttilio Rao WITNESS_UNLOCK(&lk->lock_object, 0, file, line); 208047dd67eSAttilio Rao LOCK_LOG_LOCK("SUNLOCK", &lk->lock_object, 0, 0, file, line); 209047dd67eSAttilio Rao 210da7bbd2cSJohn Baldwin wakeup_swapper = 0; 211047dd67eSAttilio Rao for (;;) { 212047dd67eSAttilio Rao x = lk->lk_lock; 213047dd67eSAttilio Rao 214047dd67eSAttilio Rao /* 215047dd67eSAttilio Rao * If there is more than one shared lock held, just drop one 216047dd67eSAttilio Rao * and return. 217047dd67eSAttilio Rao */ 218047dd67eSAttilio Rao if (LK_SHARERS(x) > 1) { 219047dd67eSAttilio Rao if (atomic_cmpset_ptr(&lk->lk_lock, x, 220047dd67eSAttilio Rao x - LK_ONE_SHARER)) 221047dd67eSAttilio Rao break; 222047dd67eSAttilio Rao continue; 223047dd67eSAttilio Rao } 224047dd67eSAttilio Rao 225047dd67eSAttilio Rao /* 226047dd67eSAttilio Rao * If there are not waiters on the exclusive queue, drop the 227047dd67eSAttilio Rao * lock quickly. 228047dd67eSAttilio Rao */ 229047dd67eSAttilio Rao if ((x & LK_ALL_WAITERS) == 0) { 230047dd67eSAttilio Rao MPASS(x == LK_SHARERS_LOCK(1)); 231047dd67eSAttilio Rao if (atomic_cmpset_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1), 232047dd67eSAttilio Rao LK_UNLOCKED)) 233047dd67eSAttilio Rao break; 234047dd67eSAttilio Rao continue; 235047dd67eSAttilio Rao } 236047dd67eSAttilio Rao 237047dd67eSAttilio Rao /* 238047dd67eSAttilio Rao * We should have a sharer with waiters, so enter the hard 239047dd67eSAttilio Rao * path in order to handle wakeups correctly. 240047dd67eSAttilio Rao */ 241047dd67eSAttilio Rao sleepq_lock(&lk->lock_object); 242047dd67eSAttilio Rao x = lk->lk_lock & LK_ALL_WAITERS; 243047dd67eSAttilio Rao v = LK_UNLOCKED; 244047dd67eSAttilio Rao 245047dd67eSAttilio Rao /* 246047dd67eSAttilio Rao * If the lock has exclusive waiters, give them preference in 247047dd67eSAttilio Rao * order to avoid deadlock with shared runners up. 248047dd67eSAttilio Rao */ 249047dd67eSAttilio Rao if (x & LK_EXCLUSIVE_WAITERS) { 250047dd67eSAttilio Rao queue = SQ_EXCLUSIVE_QUEUE; 251047dd67eSAttilio Rao v |= (x & LK_SHARED_WAITERS); 252047dd67eSAttilio Rao } else { 253047dd67eSAttilio Rao MPASS(x == LK_SHARED_WAITERS); 254047dd67eSAttilio Rao queue = SQ_SHARED_QUEUE; 255047dd67eSAttilio Rao } 256047dd67eSAttilio Rao 257047dd67eSAttilio Rao if (!atomic_cmpset_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x, 258047dd67eSAttilio Rao v)) { 259047dd67eSAttilio Rao sleepq_release(&lk->lock_object); 260047dd67eSAttilio Rao continue; 261047dd67eSAttilio Rao } 262047dd67eSAttilio Rao LOCK_LOG3(lk, "%s: %p waking up threads on the %s queue", 263047dd67eSAttilio Rao __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" : 264047dd67eSAttilio Rao "exclusive"); 265da7bbd2cSJohn Baldwin wakeup_swapper = sleepq_broadcast(&lk->lock_object, SLEEPQ_LK, 266da7bbd2cSJohn Baldwin 0, queue); 267047dd67eSAttilio Rao sleepq_release(&lk->lock_object); 268047dd67eSAttilio Rao break; 269047dd67eSAttilio Rao } 270047dd67eSAttilio Rao 271047dd67eSAttilio Rao lock_profile_release_lock(&lk->lock_object); 272da7bbd2cSJohn Baldwin return (wakeup_swapper); 273047dd67eSAttilio Rao } 274047dd67eSAttilio Rao 275047dd67eSAttilio Rao static void 276f9721b43SAttilio Rao assert_lockmgr(struct lock_object *lock, int what) 277f9721b43SAttilio Rao { 278f9721b43SAttilio Rao 279f9721b43SAttilio Rao panic("lockmgr locks do not support assertions"); 280f9721b43SAttilio Rao } 281f9721b43SAttilio Rao 282047dd67eSAttilio Rao static void 2836e21afd4SJohn Baldwin lock_lockmgr(struct lock_object *lock, int how) 2846e21afd4SJohn Baldwin { 2856e21afd4SJohn Baldwin 2866e21afd4SJohn Baldwin panic("lockmgr locks do not support sleep interlocking"); 2876e21afd4SJohn Baldwin } 2886e21afd4SJohn Baldwin 289047dd67eSAttilio Rao static int 2906e21afd4SJohn Baldwin unlock_lockmgr(struct lock_object *lock) 2916e21afd4SJohn Baldwin { 2926e21afd4SJohn Baldwin 2936e21afd4SJohn Baldwin panic("lockmgr locks do not support sleep interlocking"); 2946e21afd4SJohn Baldwin } 2956e21afd4SJohn Baldwin 29699448ed1SJohn Dyson void 297047dd67eSAttilio Rao lockinit(struct lock *lk, int pri, const char *wmesg, int timo, int flags) 29899448ed1SJohn Dyson { 2996efc8a16SAttilio Rao int iflags; 3006efc8a16SAttilio Rao 301047dd67eSAttilio Rao MPASS((flags & ~LK_INIT_MASK) == 0); 30299448ed1SJohn Dyson 3036efc8a16SAttilio Rao iflags = LO_RECURSABLE | LO_SLEEPABLE | LO_UPGRADABLE; 304047dd67eSAttilio Rao if ((flags & LK_NODUP) == 0) 3056efc8a16SAttilio Rao iflags |= LO_DUPOK; 3067fbfba7bSAttilio Rao if (flags & LK_NOPROFILE) 3077fbfba7bSAttilio Rao iflags |= LO_NOPROFILE; 308047dd67eSAttilio Rao if ((flags & LK_NOWITNESS) == 0) 3096efc8a16SAttilio Rao iflags |= LO_WITNESS; 3107fbfba7bSAttilio Rao if (flags & LK_QUIET) 3117fbfba7bSAttilio Rao iflags |= LO_QUIET; 312047dd67eSAttilio Rao iflags |= flags & (LK_CANRECURSE | LK_NOSHARE); 313047dd67eSAttilio Rao 314047dd67eSAttilio Rao lk->lk_lock = LK_UNLOCKED; 315047dd67eSAttilio Rao lk->lk_recurse = 0; 316047dd67eSAttilio Rao lk->lk_timo = timo; 317047dd67eSAttilio Rao lk->lk_pri = pri; 318047dd67eSAttilio Rao lock_init(&lk->lock_object, &lock_class_lockmgr, wmesg, NULL, iflags); 319047dd67eSAttilio Rao STACK_ZERO(lk); 32099448ed1SJohn Dyson } 32199448ed1SJohn Dyson 322a18b1f1dSJason Evans void 323047dd67eSAttilio Rao lockdestroy(struct lock *lk) 324a18b1f1dSJason Evans { 325c91fcee7SJohn Baldwin 326047dd67eSAttilio Rao KASSERT(lk->lk_lock == LK_UNLOCKED, ("lockmgr still held")); 327047dd67eSAttilio Rao KASSERT(lk->lk_recurse == 0, ("lockmgr still recursed")); 328047dd67eSAttilio Rao lock_destroy(&lk->lock_object); 329047dd67eSAttilio Rao } 330047dd67eSAttilio Rao 331047dd67eSAttilio Rao int 332047dd67eSAttilio Rao __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk, 333047dd67eSAttilio Rao const char *wmesg, int pri, int timo, const char *file, int line) 334047dd67eSAttilio Rao { 335e5f94314SAttilio Rao GIANT_DECLARE; 336047dd67eSAttilio Rao struct lock_class *class; 337047dd67eSAttilio Rao const char *iwmesg; 338047dd67eSAttilio Rao uintptr_t tid, v, x; 339047dd67eSAttilio Rao u_int op; 3401723a064SJeff Roberson int error, ipri, itimo, queue, wakeup_swapper; 3411723a064SJeff Roberson #ifdef LOCK_PROFILING 3421723a064SJeff Roberson uint64_t waittime = 0; 3431723a064SJeff Roberson int contested = 0; 3441723a064SJeff Roberson #endif 345047dd67eSAttilio Rao 346047dd67eSAttilio Rao error = 0; 347047dd67eSAttilio Rao tid = (uintptr_t)curthread; 348047dd67eSAttilio Rao op = (flags & LK_TYPE_MASK); 349047dd67eSAttilio Rao iwmesg = (wmesg == LK_WMESG_DEFAULT) ? lk->lock_object.lo_name : wmesg; 350047dd67eSAttilio Rao ipri = (pri == LK_PRIO_DEFAULT) ? lk->lk_pri : pri; 351047dd67eSAttilio Rao itimo = (timo == LK_TIMO_DEFAULT) ? lk->lk_timo : timo; 352047dd67eSAttilio Rao 353047dd67eSAttilio Rao MPASS((flags & ~LK_TOTAL_MASK) == 0); 354872b7289SAttilio Rao KASSERT((op & (op - 1)) == 0, 355872b7289SAttilio Rao ("%s: Invalid requested operation @ %s:%d", __func__, file, line)); 356047dd67eSAttilio Rao KASSERT((flags & (LK_NOWAIT | LK_SLEEPFAIL)) == 0 || 357047dd67eSAttilio Rao (op != LK_DOWNGRADE && op != LK_RELEASE), 358047dd67eSAttilio Rao ("%s: Invalid flags in regard of the operation desired @ %s:%d", 359047dd67eSAttilio Rao __func__, file, line)); 360047dd67eSAttilio Rao KASSERT((flags & LK_INTERLOCK) == 0 || ilk != NULL, 361047dd67eSAttilio Rao ("%s: LK_INTERLOCK passed without valid interlock @ %s:%d", 362047dd67eSAttilio Rao __func__, file, line)); 363047dd67eSAttilio Rao 364047dd67eSAttilio Rao class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL; 365047dd67eSAttilio Rao if (panicstr != NULL) { 366047dd67eSAttilio Rao if (flags & LK_INTERLOCK) 367047dd67eSAttilio Rao class->lc_unlock(ilk); 368047dd67eSAttilio Rao return (0); 369047dd67eSAttilio Rao } 370047dd67eSAttilio Rao 371047dd67eSAttilio Rao if (op == LK_SHARED && (lk->lock_object.lo_flags & LK_NOSHARE)) 372047dd67eSAttilio Rao op = LK_EXCLUSIVE; 373047dd67eSAttilio Rao 374da7bbd2cSJohn Baldwin wakeup_swapper = 0; 375047dd67eSAttilio Rao switch (op) { 376047dd67eSAttilio Rao case LK_SHARED: 377e5f94314SAttilio Rao if (LK_CAN_WITNESS(flags)) 378e5f94314SAttilio Rao WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER, 37941313430SJohn Baldwin file, line, ilk); 380047dd67eSAttilio Rao for (;;) { 381047dd67eSAttilio Rao x = lk->lk_lock; 382047dd67eSAttilio Rao 383047dd67eSAttilio Rao /* 384047dd67eSAttilio Rao * If no other thread has an exclusive lock, or 385047dd67eSAttilio Rao * no exclusive waiter is present, bump the count of 386047dd67eSAttilio Rao * sharers. Since we have to preserve the state of 387047dd67eSAttilio Rao * waiters, if we fail to acquire the shared lock 388047dd67eSAttilio Rao * loop back and retry. 389047dd67eSAttilio Rao */ 390047dd67eSAttilio Rao if (LK_CAN_SHARE(x)) { 391047dd67eSAttilio Rao if (atomic_cmpset_acq_ptr(&lk->lk_lock, x, 392047dd67eSAttilio Rao x + LK_ONE_SHARER)) 393047dd67eSAttilio Rao break; 394047dd67eSAttilio Rao continue; 395047dd67eSAttilio Rao } 396047dd67eSAttilio Rao lock_profile_obtain_lock_failed(&lk->lock_object, 397047dd67eSAttilio Rao &contested, &waittime); 398047dd67eSAttilio Rao 399047dd67eSAttilio Rao /* 40096f1567fSKonstantin Belousov * If the lock is already held by curthread in 401047dd67eSAttilio Rao * exclusive way avoid a deadlock. 402047dd67eSAttilio Rao */ 403047dd67eSAttilio Rao if (LK_HOLDER(x) == tid) { 404047dd67eSAttilio Rao LOCK_LOG2(lk, 40596f1567fSKonstantin Belousov "%s: %p already held in exclusive mode", 406047dd67eSAttilio Rao __func__, lk); 407047dd67eSAttilio Rao error = EDEADLK; 408047dd67eSAttilio Rao break; 409a18b1f1dSJason Evans } 410a18b1f1dSJason Evans 411a18b1f1dSJason Evans /* 412047dd67eSAttilio Rao * If the lock is expected to not sleep just give up 413047dd67eSAttilio Rao * and return. 414d7a7e179SAttilio Rao */ 415047dd67eSAttilio Rao if (LK_TRYOP(flags)) { 416047dd67eSAttilio Rao LOCK_LOG2(lk, "%s: %p fails the try operation", 417047dd67eSAttilio Rao __func__, lk); 418047dd67eSAttilio Rao error = EBUSY; 419047dd67eSAttilio Rao break; 420047dd67eSAttilio Rao } 421047dd67eSAttilio Rao 422047dd67eSAttilio Rao /* 423047dd67eSAttilio Rao * Acquire the sleepqueue chain lock because we 424047dd67eSAttilio Rao * probabilly will need to manipulate waiters flags. 425047dd67eSAttilio Rao */ 426047dd67eSAttilio Rao sleepq_lock(&lk->lock_object); 427047dd67eSAttilio Rao x = lk->lk_lock; 428047dd67eSAttilio Rao 429047dd67eSAttilio Rao /* 430047dd67eSAttilio Rao * if the lock can be acquired in shared mode, try 431047dd67eSAttilio Rao * again. 432047dd67eSAttilio Rao */ 433047dd67eSAttilio Rao if (LK_CAN_SHARE(x)) { 434047dd67eSAttilio Rao sleepq_release(&lk->lock_object); 435047dd67eSAttilio Rao continue; 436047dd67eSAttilio Rao } 437047dd67eSAttilio Rao 438047dd67eSAttilio Rao /* 439047dd67eSAttilio Rao * Try to set the LK_SHARED_WAITERS flag. If we fail, 440047dd67eSAttilio Rao * loop back and retry. 441047dd67eSAttilio Rao */ 442047dd67eSAttilio Rao if ((x & LK_SHARED_WAITERS) == 0) { 443047dd67eSAttilio Rao if (!atomic_cmpset_acq_ptr(&lk->lk_lock, x, 444047dd67eSAttilio Rao x | LK_SHARED_WAITERS)) { 445047dd67eSAttilio Rao sleepq_release(&lk->lock_object); 446047dd67eSAttilio Rao continue; 447047dd67eSAttilio Rao } 448047dd67eSAttilio Rao LOCK_LOG2(lk, "%s: %p set shared waiters flag", 449047dd67eSAttilio Rao __func__, lk); 450047dd67eSAttilio Rao } 451047dd67eSAttilio Rao 452047dd67eSAttilio Rao /* 453047dd67eSAttilio Rao * As far as we have been unable to acquire the 454047dd67eSAttilio Rao * shared lock and the shared waiters flag is set, 455047dd67eSAttilio Rao * we will sleep. 456047dd67eSAttilio Rao */ 457047dd67eSAttilio Rao error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo, 458047dd67eSAttilio Rao SQ_SHARED_QUEUE); 459047dd67eSAttilio Rao flags &= ~LK_INTERLOCK; 460047dd67eSAttilio Rao if (error) { 461047dd67eSAttilio Rao LOCK_LOG3(lk, 462047dd67eSAttilio Rao "%s: interrupted sleep for %p with %d", 463047dd67eSAttilio Rao __func__, lk, error); 464047dd67eSAttilio Rao break; 465047dd67eSAttilio Rao } 466047dd67eSAttilio Rao LOCK_LOG2(lk, "%s: %p resuming from the sleep queue", 467047dd67eSAttilio Rao __func__, lk); 468047dd67eSAttilio Rao } 469047dd67eSAttilio Rao if (error == 0) { 470047dd67eSAttilio Rao lock_profile_obtain_lock_success(&lk->lock_object, 471047dd67eSAttilio Rao contested, waittime, file, line); 472047dd67eSAttilio Rao LOCK_LOG_LOCK("SLOCK", &lk->lock_object, 0, 0, file, 473047dd67eSAttilio Rao line); 474e5f94314SAttilio Rao WITNESS_LOCK(&lk->lock_object, LK_TRYWIT(flags), file, 475e5f94314SAttilio Rao line); 476047dd67eSAttilio Rao TD_LOCKS_INC(curthread); 477047dd67eSAttilio Rao TD_SLOCKS_INC(curthread); 478047dd67eSAttilio Rao STACK_SAVE(lk); 479047dd67eSAttilio Rao } 480047dd67eSAttilio Rao break; 481047dd67eSAttilio Rao case LK_UPGRADE: 482047dd67eSAttilio Rao _lockmgr_assert(lk, KA_SLOCKED, file, line); 483047dd67eSAttilio Rao x = lk->lk_lock & LK_ALL_WAITERS; 484047dd67eSAttilio Rao 485047dd67eSAttilio Rao /* 486047dd67eSAttilio Rao * Try to switch from one shared lock to an exclusive one. 487047dd67eSAttilio Rao * We need to preserve waiters flags during the operation. 488047dd67eSAttilio Rao */ 489047dd67eSAttilio Rao if (atomic_cmpset_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x, 490047dd67eSAttilio Rao tid | x)) { 491047dd67eSAttilio Rao LOCK_LOG_LOCK("XUPGRADE", &lk->lock_object, 0, 0, file, 492047dd67eSAttilio Rao line); 493e5f94314SAttilio Rao WITNESS_UPGRADE(&lk->lock_object, LOP_EXCLUSIVE | 494e5f94314SAttilio Rao LK_TRYWIT(flags), file, line); 495047dd67eSAttilio Rao TD_SLOCKS_DEC(curthread); 496047dd67eSAttilio Rao break; 497047dd67eSAttilio Rao } 498047dd67eSAttilio Rao 499047dd67eSAttilio Rao /* 500047dd67eSAttilio Rao * We have been unable to succeed in upgrading, so just 501047dd67eSAttilio Rao * give up the shared lock. 502047dd67eSAttilio Rao */ 503814f26daSJohn Baldwin wakeup_swapper |= wakeupshlk(lk, file, line); 504047dd67eSAttilio Rao 505047dd67eSAttilio Rao /* FALLTHROUGH */ 506047dd67eSAttilio Rao case LK_EXCLUSIVE: 507e5f94314SAttilio Rao if (LK_CAN_WITNESS(flags)) 508e5f94314SAttilio Rao WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER | 50941313430SJohn Baldwin LOP_EXCLUSIVE, file, line, ilk); 510047dd67eSAttilio Rao 511047dd67eSAttilio Rao /* 51296f1567fSKonstantin Belousov * If curthread already holds the lock and this one is 513047dd67eSAttilio Rao * allowed to recurse, simply recurse on it. 514047dd67eSAttilio Rao */ 515047dd67eSAttilio Rao if (lockmgr_xlocked(lk)) { 516047dd67eSAttilio Rao if ((flags & LK_CANRECURSE) == 0 && 517047dd67eSAttilio Rao (lk->lock_object.lo_flags & LK_CANRECURSE) == 0) { 518047dd67eSAttilio Rao 519047dd67eSAttilio Rao /* 520047dd67eSAttilio Rao * If the lock is expected to not panic just 521047dd67eSAttilio Rao * give up and return. 522047dd67eSAttilio Rao */ 523047dd67eSAttilio Rao if (LK_TRYOP(flags)) { 524047dd67eSAttilio Rao LOCK_LOG2(lk, 525047dd67eSAttilio Rao "%s: %p fails the try operation", 526047dd67eSAttilio Rao __func__, lk); 527047dd67eSAttilio Rao error = EBUSY; 528047dd67eSAttilio Rao break; 529047dd67eSAttilio Rao } 530047dd67eSAttilio Rao if (flags & LK_INTERLOCK) 531047dd67eSAttilio Rao class->lc_unlock(ilk); 532047dd67eSAttilio Rao panic("%s: recursing on non recursive lockmgr %s @ %s:%d\n", 533047dd67eSAttilio Rao __func__, iwmesg, file, line); 534047dd67eSAttilio Rao } 535047dd67eSAttilio Rao lk->lk_recurse++; 536047dd67eSAttilio Rao LOCK_LOG2(lk, "%s: %p recursing", __func__, lk); 537047dd67eSAttilio Rao LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0, 538047dd67eSAttilio Rao lk->lk_recurse, file, line); 539e5f94314SAttilio Rao WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE | 540e5f94314SAttilio Rao LK_TRYWIT(flags), file, line); 541047dd67eSAttilio Rao TD_LOCKS_INC(curthread); 542047dd67eSAttilio Rao break; 543047dd67eSAttilio Rao } 544047dd67eSAttilio Rao 545047dd67eSAttilio Rao while (!atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, 546047dd67eSAttilio Rao tid)) { 547047dd67eSAttilio Rao lock_profile_obtain_lock_failed(&lk->lock_object, 548047dd67eSAttilio Rao &contested, &waittime); 549047dd67eSAttilio Rao 550047dd67eSAttilio Rao /* 551047dd67eSAttilio Rao * If the lock is expected to not sleep just give up 552047dd67eSAttilio Rao * and return. 553047dd67eSAttilio Rao */ 554047dd67eSAttilio Rao if (LK_TRYOP(flags)) { 555047dd67eSAttilio Rao LOCK_LOG2(lk, "%s: %p fails the try operation", 556047dd67eSAttilio Rao __func__, lk); 557047dd67eSAttilio Rao error = EBUSY; 558047dd67eSAttilio Rao break; 559047dd67eSAttilio Rao } 560047dd67eSAttilio Rao 561047dd67eSAttilio Rao /* 562047dd67eSAttilio Rao * Acquire the sleepqueue chain lock because we 563047dd67eSAttilio Rao * probabilly will need to manipulate waiters flags. 564047dd67eSAttilio Rao */ 565047dd67eSAttilio Rao sleepq_lock(&lk->lock_object); 566047dd67eSAttilio Rao x = lk->lk_lock; 567047dd67eSAttilio Rao v = x & LK_ALL_WAITERS; 568047dd67eSAttilio Rao 569047dd67eSAttilio Rao /* 570047dd67eSAttilio Rao * if the lock has been released while we spun on 571047dd67eSAttilio Rao * the sleepqueue chain lock just try again. 572047dd67eSAttilio Rao */ 573047dd67eSAttilio Rao if (x == LK_UNLOCKED) { 574047dd67eSAttilio Rao sleepq_release(&lk->lock_object); 575047dd67eSAttilio Rao continue; 576047dd67eSAttilio Rao } 577047dd67eSAttilio Rao 578047dd67eSAttilio Rao /* 579047dd67eSAttilio Rao * The lock can be in the state where there is a 580047dd67eSAttilio Rao * pending queue of waiters, but still no owner. 581047dd67eSAttilio Rao * This happens when the lock is contested and an 582047dd67eSAttilio Rao * owner is going to claim the lock. 583047dd67eSAttilio Rao * If curthread is the one successfully acquiring it 584047dd67eSAttilio Rao * claim lock ownership and return, preserving waiters 585047dd67eSAttilio Rao * flags. 586047dd67eSAttilio Rao */ 587047dd67eSAttilio Rao if (x == (LK_UNLOCKED | v)) { 588047dd67eSAttilio Rao if (atomic_cmpset_acq_ptr(&lk->lk_lock, x, 589047dd67eSAttilio Rao tid | v)) { 590047dd67eSAttilio Rao sleepq_release(&lk->lock_object); 591047dd67eSAttilio Rao LOCK_LOG2(lk, 592047dd67eSAttilio Rao "%s: %p claimed by a new writer", 593047dd67eSAttilio Rao __func__, lk); 594047dd67eSAttilio Rao break; 595047dd67eSAttilio Rao } 596047dd67eSAttilio Rao sleepq_release(&lk->lock_object); 597047dd67eSAttilio Rao continue; 598047dd67eSAttilio Rao } 599047dd67eSAttilio Rao 600047dd67eSAttilio Rao /* 601047dd67eSAttilio Rao * Try to set the LK_EXCLUSIVE_WAITERS flag. If we 602047dd67eSAttilio Rao * fail, loop back and retry. 603047dd67eSAttilio Rao */ 604047dd67eSAttilio Rao if ((x & LK_EXCLUSIVE_WAITERS) == 0) { 605047dd67eSAttilio Rao if (!atomic_cmpset_ptr(&lk->lk_lock, x, 606047dd67eSAttilio Rao x | LK_EXCLUSIVE_WAITERS)) { 607047dd67eSAttilio Rao sleepq_release(&lk->lock_object); 608047dd67eSAttilio Rao continue; 609047dd67eSAttilio Rao } 610047dd67eSAttilio Rao LOCK_LOG2(lk, "%s: %p set excl waiters flag", 611047dd67eSAttilio Rao __func__, lk); 612047dd67eSAttilio Rao } 613047dd67eSAttilio Rao 614047dd67eSAttilio Rao /* 615047dd67eSAttilio Rao * As far as we have been unable to acquire the 616047dd67eSAttilio Rao * exclusive lock and the exclusive waiters flag 617047dd67eSAttilio Rao * is set, we will sleep. 618047dd67eSAttilio Rao */ 619047dd67eSAttilio Rao error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo, 620047dd67eSAttilio Rao SQ_EXCLUSIVE_QUEUE); 621047dd67eSAttilio Rao flags &= ~LK_INTERLOCK; 622047dd67eSAttilio Rao if (error) { 623047dd67eSAttilio Rao LOCK_LOG3(lk, 624047dd67eSAttilio Rao "%s: interrupted sleep for %p with %d", 625047dd67eSAttilio Rao __func__, lk, error); 626047dd67eSAttilio Rao break; 627047dd67eSAttilio Rao } 628047dd67eSAttilio Rao LOCK_LOG2(lk, "%s: %p resuming from the sleep queue", 629047dd67eSAttilio Rao __func__, lk); 630047dd67eSAttilio Rao } 631047dd67eSAttilio Rao if (error == 0) { 632047dd67eSAttilio Rao lock_profile_obtain_lock_success(&lk->lock_object, 633047dd67eSAttilio Rao contested, waittime, file, line); 634047dd67eSAttilio Rao LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0, 635047dd67eSAttilio Rao lk->lk_recurse, file, line); 636e5f94314SAttilio Rao WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE | 637e5f94314SAttilio Rao LK_TRYWIT(flags), file, line); 638047dd67eSAttilio Rao TD_LOCKS_INC(curthread); 639047dd67eSAttilio Rao STACK_SAVE(lk); 640047dd67eSAttilio Rao } 641047dd67eSAttilio Rao break; 642047dd67eSAttilio Rao case LK_DOWNGRADE: 643047dd67eSAttilio Rao _lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED, file, line); 644e5f94314SAttilio Rao LOCK_LOG_LOCK("XDOWNGRADE", &lk->lock_object, 0, 0, file, line); 645e5f94314SAttilio Rao WITNESS_DOWNGRADE(&lk->lock_object, 0, file, line); 646e5f94314SAttilio Rao TD_SLOCKS_INC(curthread); 647047dd67eSAttilio Rao 648047dd67eSAttilio Rao /* 649047dd67eSAttilio Rao * In order to preserve waiters flags, just spin. 650047dd67eSAttilio Rao */ 651047dd67eSAttilio Rao for (;;) { 652047dd67eSAttilio Rao x = lk->lk_lock & LK_ALL_WAITERS; 653047dd67eSAttilio Rao if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x, 654e5f94314SAttilio Rao LK_SHARERS_LOCK(1) | x)) 655047dd67eSAttilio Rao break; 656047dd67eSAttilio Rao cpu_spinwait(); 657047dd67eSAttilio Rao } 658047dd67eSAttilio Rao break; 659047dd67eSAttilio Rao case LK_RELEASE: 660047dd67eSAttilio Rao _lockmgr_assert(lk, KA_LOCKED, file, line); 661047dd67eSAttilio Rao x = lk->lk_lock; 662047dd67eSAttilio Rao 663047dd67eSAttilio Rao if ((x & LK_SHARE) == 0) { 664047dd67eSAttilio Rao 665047dd67eSAttilio Rao /* 666047dd67eSAttilio Rao * As first option, treact the lock as if it has not 667047dd67eSAttilio Rao * any waiter. 668047dd67eSAttilio Rao * Fix-up the tid var if the lock has been disowned. 669047dd67eSAttilio Rao */ 670047dd67eSAttilio Rao if (LK_HOLDER(x) == LK_KERNPROC) 671047dd67eSAttilio Rao tid = LK_KERNPROC; 672e5f94314SAttilio Rao else { 673e5f94314SAttilio Rao WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, 674e5f94314SAttilio Rao file, line); 675047dd67eSAttilio Rao TD_LOCKS_DEC(curthread); 676e5f94314SAttilio Rao } 677047dd67eSAttilio Rao LOCK_LOG_LOCK("XUNLOCK", &lk->lock_object, 0, 678047dd67eSAttilio Rao lk->lk_recurse, file, line); 679047dd67eSAttilio Rao 680047dd67eSAttilio Rao /* 681047dd67eSAttilio Rao * The lock is held in exclusive mode. 682047dd67eSAttilio Rao * If the lock is recursed also, then unrecurse it. 683047dd67eSAttilio Rao */ 684047dd67eSAttilio Rao if (lockmgr_xlocked(lk) && lockmgr_recursed(lk)) { 685047dd67eSAttilio Rao LOCK_LOG2(lk, "%s: %p unrecursing", __func__, 686047dd67eSAttilio Rao lk); 687047dd67eSAttilio Rao lk->lk_recurse--; 688047dd67eSAttilio Rao break; 689047dd67eSAttilio Rao } 69004a28689SJeff Roberson if (tid != LK_KERNPROC) 691047dd67eSAttilio Rao lock_profile_release_lock(&lk->lock_object); 692047dd67eSAttilio Rao 693047dd67eSAttilio Rao if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid, 694047dd67eSAttilio Rao LK_UNLOCKED)) 695047dd67eSAttilio Rao break; 696047dd67eSAttilio Rao 697047dd67eSAttilio Rao sleepq_lock(&lk->lock_object); 698047dd67eSAttilio Rao x = lk->lk_lock & LK_ALL_WAITERS; 699047dd67eSAttilio Rao v = LK_UNLOCKED; 700047dd67eSAttilio Rao 701047dd67eSAttilio Rao /* 702047dd67eSAttilio Rao * If the lock has exclusive waiters, give them 703047dd67eSAttilio Rao * preference in order to avoid deadlock with 704047dd67eSAttilio Rao * shared runners up. 705047dd67eSAttilio Rao */ 706047dd67eSAttilio Rao if (x & LK_EXCLUSIVE_WAITERS) { 707047dd67eSAttilio Rao queue = SQ_EXCLUSIVE_QUEUE; 708047dd67eSAttilio Rao v |= (x & LK_SHARED_WAITERS); 709047dd67eSAttilio Rao } else { 710047dd67eSAttilio Rao MPASS(x == LK_SHARED_WAITERS); 711047dd67eSAttilio Rao queue = SQ_SHARED_QUEUE; 712047dd67eSAttilio Rao } 713047dd67eSAttilio Rao 714047dd67eSAttilio Rao LOCK_LOG3(lk, 715047dd67eSAttilio Rao "%s: %p waking up threads on the %s queue", 716047dd67eSAttilio Rao __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" : 717047dd67eSAttilio Rao "exclusive"); 718047dd67eSAttilio Rao atomic_store_rel_ptr(&lk->lk_lock, v); 719da7bbd2cSJohn Baldwin wakeup_swapper = sleepq_broadcast(&lk->lock_object, 720da7bbd2cSJohn Baldwin SLEEPQ_LK, 0, queue); 721047dd67eSAttilio Rao sleepq_release(&lk->lock_object); 722047dd67eSAttilio Rao break; 723047dd67eSAttilio Rao } else 724da7bbd2cSJohn Baldwin wakeup_swapper = wakeupshlk(lk, file, line); 725047dd67eSAttilio Rao break; 726047dd67eSAttilio Rao case LK_DRAIN: 727e5f94314SAttilio Rao if (LK_CAN_WITNESS(flags)) 728e5f94314SAttilio Rao WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER | 72941313430SJohn Baldwin LOP_EXCLUSIVE, file, line, ilk); 730047dd67eSAttilio Rao 731047dd67eSAttilio Rao /* 73296f1567fSKonstantin Belousov * Trying to drain a lock we already own will result in a 733047dd67eSAttilio Rao * deadlock. 734047dd67eSAttilio Rao */ 735047dd67eSAttilio Rao if (lockmgr_xlocked(lk)) { 736047dd67eSAttilio Rao if (flags & LK_INTERLOCK) 737047dd67eSAttilio Rao class->lc_unlock(ilk); 738047dd67eSAttilio Rao panic("%s: draining %s with the lock held @ %s:%d\n", 739047dd67eSAttilio Rao __func__, iwmesg, file, line); 740047dd67eSAttilio Rao } 741047dd67eSAttilio Rao 742047dd67eSAttilio Rao while (!atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid)) { 743047dd67eSAttilio Rao lock_profile_obtain_lock_failed(&lk->lock_object, 744047dd67eSAttilio Rao &contested, &waittime); 745047dd67eSAttilio Rao 746047dd67eSAttilio Rao /* 747047dd67eSAttilio Rao * If the lock is expected to not sleep just give up 748047dd67eSAttilio Rao * and return. 749047dd67eSAttilio Rao */ 750047dd67eSAttilio Rao if (LK_TRYOP(flags)) { 751047dd67eSAttilio Rao LOCK_LOG2(lk, "%s: %p fails the try operation", 752047dd67eSAttilio Rao __func__, lk); 753047dd67eSAttilio Rao error = EBUSY; 754047dd67eSAttilio Rao break; 755047dd67eSAttilio Rao } 756047dd67eSAttilio Rao 757047dd67eSAttilio Rao /* 758047dd67eSAttilio Rao * Acquire the sleepqueue chain lock because we 759047dd67eSAttilio Rao * probabilly will need to manipulate waiters flags. 760047dd67eSAttilio Rao */ 761047dd67eSAttilio Rao sleepq_lock(&lk->lock_object); 762047dd67eSAttilio Rao x = lk->lk_lock; 763047dd67eSAttilio Rao v = x & LK_ALL_WAITERS; 764047dd67eSAttilio Rao 765047dd67eSAttilio Rao /* 766047dd67eSAttilio Rao * if the lock has been released while we spun on 767047dd67eSAttilio Rao * the sleepqueue chain lock just try again. 768047dd67eSAttilio Rao */ 769047dd67eSAttilio Rao if (x == LK_UNLOCKED) { 770047dd67eSAttilio Rao sleepq_release(&lk->lock_object); 771047dd67eSAttilio Rao continue; 772047dd67eSAttilio Rao } 773047dd67eSAttilio Rao 774047dd67eSAttilio Rao if (x == (LK_UNLOCKED | v)) { 775047dd67eSAttilio Rao v = x; 776047dd67eSAttilio Rao if (v & LK_EXCLUSIVE_WAITERS) { 777047dd67eSAttilio Rao queue = SQ_EXCLUSIVE_QUEUE; 778047dd67eSAttilio Rao v &= ~LK_EXCLUSIVE_WAITERS; 779047dd67eSAttilio Rao } else { 780047dd67eSAttilio Rao MPASS(v & LK_SHARED_WAITERS); 781047dd67eSAttilio Rao queue = SQ_SHARED_QUEUE; 782047dd67eSAttilio Rao v &= ~LK_SHARED_WAITERS; 783047dd67eSAttilio Rao } 784047dd67eSAttilio Rao if (!atomic_cmpset_ptr(&lk->lk_lock, x, v)) { 785047dd67eSAttilio Rao sleepq_release(&lk->lock_object); 786047dd67eSAttilio Rao continue; 787047dd67eSAttilio Rao } 788047dd67eSAttilio Rao LOCK_LOG3(lk, 789047dd67eSAttilio Rao "%s: %p waking up all threads on the %s queue", 790047dd67eSAttilio Rao __func__, lk, queue == SQ_SHARED_QUEUE ? 791047dd67eSAttilio Rao "shared" : "exclusive"); 792814f26daSJohn Baldwin wakeup_swapper |= sleepq_broadcast( 793da7bbd2cSJohn Baldwin &lk->lock_object, SLEEPQ_LK, 0, queue); 794047dd67eSAttilio Rao 795047dd67eSAttilio Rao /* 796047dd67eSAttilio Rao * If shared waiters have been woken up we need 797047dd67eSAttilio Rao * to wait for one of them to acquire the lock 798047dd67eSAttilio Rao * before to set the exclusive waiters in 799047dd67eSAttilio Rao * order to avoid a deadlock. 800047dd67eSAttilio Rao */ 801047dd67eSAttilio Rao if (queue == SQ_SHARED_QUEUE) { 802047dd67eSAttilio Rao for (v = lk->lk_lock; 803047dd67eSAttilio Rao (v & LK_SHARE) && !LK_SHARERS(v); 804047dd67eSAttilio Rao v = lk->lk_lock) 805047dd67eSAttilio Rao cpu_spinwait(); 806047dd67eSAttilio Rao } 807047dd67eSAttilio Rao } 808047dd67eSAttilio Rao 809047dd67eSAttilio Rao /* 810047dd67eSAttilio Rao * Try to set the LK_EXCLUSIVE_WAITERS flag. If we 811047dd67eSAttilio Rao * fail, loop back and retry. 812047dd67eSAttilio Rao */ 813047dd67eSAttilio Rao if ((x & LK_EXCLUSIVE_WAITERS) == 0) { 814047dd67eSAttilio Rao if (!atomic_cmpset_ptr(&lk->lk_lock, x, 815047dd67eSAttilio Rao x | LK_EXCLUSIVE_WAITERS)) { 816047dd67eSAttilio Rao sleepq_release(&lk->lock_object); 817047dd67eSAttilio Rao continue; 818047dd67eSAttilio Rao } 819047dd67eSAttilio Rao LOCK_LOG2(lk, "%s: %p set drain waiters flag", 820047dd67eSAttilio Rao __func__, lk); 821047dd67eSAttilio Rao } 822047dd67eSAttilio Rao 823047dd67eSAttilio Rao /* 824047dd67eSAttilio Rao * As far as we have been unable to acquire the 825047dd67eSAttilio Rao * exclusive lock and the exclusive waiters flag 826047dd67eSAttilio Rao * is set, we will sleep. 827047dd67eSAttilio Rao */ 828047dd67eSAttilio Rao if (flags & LK_INTERLOCK) { 829047dd67eSAttilio Rao class->lc_unlock(ilk); 830047dd67eSAttilio Rao flags &= ~LK_INTERLOCK; 831047dd67eSAttilio Rao } 832e5f94314SAttilio Rao GIANT_SAVE(); 833047dd67eSAttilio Rao sleepq_add(&lk->lock_object, NULL, iwmesg, SLEEPQ_LK, 834047dd67eSAttilio Rao SQ_EXCLUSIVE_QUEUE); 835047dd67eSAttilio Rao sleepq_wait(&lk->lock_object, ipri & PRIMASK); 836e5f94314SAttilio Rao GIANT_RESTORE(); 837047dd67eSAttilio Rao LOCK_LOG2(lk, "%s: %p resuming from the sleep queue", 838047dd67eSAttilio Rao __func__, lk); 839047dd67eSAttilio Rao } 840047dd67eSAttilio Rao 841047dd67eSAttilio Rao if (error == 0) { 842047dd67eSAttilio Rao lock_profile_obtain_lock_success(&lk->lock_object, 843047dd67eSAttilio Rao contested, waittime, file, line); 844047dd67eSAttilio Rao LOCK_LOG_LOCK("DRAIN", &lk->lock_object, 0, 845047dd67eSAttilio Rao lk->lk_recurse, file, line); 846e5f94314SAttilio Rao WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE | 847e5f94314SAttilio Rao LK_TRYWIT(flags), file, line); 848047dd67eSAttilio Rao TD_LOCKS_INC(curthread); 849047dd67eSAttilio Rao STACK_SAVE(lk); 850047dd67eSAttilio Rao } 851047dd67eSAttilio Rao break; 852047dd67eSAttilio Rao default: 853047dd67eSAttilio Rao if (flags & LK_INTERLOCK) 854047dd67eSAttilio Rao class->lc_unlock(ilk); 855047dd67eSAttilio Rao panic("%s: unknown lockmgr request 0x%x\n", __func__, op); 856047dd67eSAttilio Rao } 857047dd67eSAttilio Rao 858047dd67eSAttilio Rao if (flags & LK_INTERLOCK) 859047dd67eSAttilio Rao class->lc_unlock(ilk); 860da7bbd2cSJohn Baldwin if (wakeup_swapper) 861da7bbd2cSJohn Baldwin kick_proc0(); 862047dd67eSAttilio Rao 863047dd67eSAttilio Rao return (error); 864047dd67eSAttilio Rao } 865047dd67eSAttilio Rao 866d7a7e179SAttilio Rao void 867047dd67eSAttilio Rao _lockmgr_disown(struct lock *lk, const char *file, int line) 868047dd67eSAttilio Rao { 869047dd67eSAttilio Rao uintptr_t tid, x; 870047dd67eSAttilio Rao 871047dd67eSAttilio Rao tid = (uintptr_t)curthread; 872047dd67eSAttilio Rao _lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED, file, line); 873047dd67eSAttilio Rao 874047dd67eSAttilio Rao /* 87596f1567fSKonstantin Belousov * If the owner is already LK_KERNPROC just skip the whole operation. 876047dd67eSAttilio Rao */ 877047dd67eSAttilio Rao if (LK_HOLDER(lk->lk_lock) != tid) 878047dd67eSAttilio Rao return; 87904a28689SJeff Roberson lock_profile_release_lock(&lk->lock_object); 880e5f94314SAttilio Rao LOCK_LOG_LOCK("XDISOWN", &lk->lock_object, 0, 0, file, line); 881e5f94314SAttilio Rao WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, file, line); 882e5f94314SAttilio Rao TD_LOCKS_DEC(curthread); 883047dd67eSAttilio Rao 884047dd67eSAttilio Rao /* 885047dd67eSAttilio Rao * In order to preserve waiters flags, just spin. 886047dd67eSAttilio Rao */ 887047dd67eSAttilio Rao for (;;) { 888047dd67eSAttilio Rao x = lk->lk_lock & LK_ALL_WAITERS; 88922dd228dSAttilio Rao if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x, 890e5f94314SAttilio Rao LK_KERNPROC | x)) 891047dd67eSAttilio Rao return; 892047dd67eSAttilio Rao cpu_spinwait(); 893047dd67eSAttilio Rao } 894047dd67eSAttilio Rao } 895047dd67eSAttilio Rao 896047dd67eSAttilio Rao void 897047dd67eSAttilio Rao lockmgr_printinfo(struct lock *lk) 898d7a7e179SAttilio Rao { 899d7a7e179SAttilio Rao struct thread *td; 900047dd67eSAttilio Rao uintptr_t x; 901d7a7e179SAttilio Rao 902047dd67eSAttilio Rao if (lk->lk_lock == LK_UNLOCKED) 903047dd67eSAttilio Rao printf("lock type %s: UNLOCKED\n", lk->lock_object.lo_name); 904047dd67eSAttilio Rao else if (lk->lk_lock & LK_SHARE) 905047dd67eSAttilio Rao printf("lock type %s: SHARED (count %ju)\n", 906047dd67eSAttilio Rao lk->lock_object.lo_name, 907047dd67eSAttilio Rao (uintmax_t)LK_SHARERS(lk->lk_lock)); 908047dd67eSAttilio Rao else { 909047dd67eSAttilio Rao td = lockmgr_xholder(lk); 910047dd67eSAttilio Rao printf("lock type %s: EXCL by thread %p (pid %d)\n", 911047dd67eSAttilio Rao lk->lock_object.lo_name, td, td->td_proc->p_pid); 912d7a7e179SAttilio Rao } 913d7a7e179SAttilio Rao 914047dd67eSAttilio Rao x = lk->lk_lock; 915047dd67eSAttilio Rao if (x & LK_EXCLUSIVE_WAITERS) 916047dd67eSAttilio Rao printf(" with exclusive waiters pending\n"); 917047dd67eSAttilio Rao if (x & LK_SHARED_WAITERS) 918047dd67eSAttilio Rao printf(" with shared waiters pending\n"); 919047dd67eSAttilio Rao 920047dd67eSAttilio Rao STACK_PRINT(lk); 921047dd67eSAttilio Rao } 922047dd67eSAttilio Rao 92399448ed1SJohn Dyson int 924047dd67eSAttilio Rao lockstatus(struct lock *lk) 92599448ed1SJohn Dyson { 926047dd67eSAttilio Rao uintptr_t v, x; 927047dd67eSAttilio Rao int ret; 92899448ed1SJohn Dyson 929047dd67eSAttilio Rao ret = LK_SHARED; 930047dd67eSAttilio Rao x = lk->lk_lock; 931047dd67eSAttilio Rao v = LK_HOLDER(x); 9320e9eb108SAttilio Rao 933047dd67eSAttilio Rao if ((x & LK_SHARE) == 0) { 934047dd67eSAttilio Rao if (v == (uintptr_t)curthread || v == LK_KERNPROC) 935047dd67eSAttilio Rao ret = LK_EXCLUSIVE; 9366bdfe06aSEivind Eklund else 937047dd67eSAttilio Rao ret = LK_EXCLOTHER; 938047dd67eSAttilio Rao } else if (x == LK_UNLOCKED) 939047dd67eSAttilio Rao ret = 0; 94099448ed1SJohn Dyson 941047dd67eSAttilio Rao return (ret); 94253bf4bb2SPeter Wemm } 943be6847d7SJohn Baldwin 94484887fa3SAttilio Rao #ifdef INVARIANT_SUPPORT 94584887fa3SAttilio Rao #ifndef INVARIANTS 94684887fa3SAttilio Rao #undef _lockmgr_assert 94784887fa3SAttilio Rao #endif 94884887fa3SAttilio Rao 94984887fa3SAttilio Rao void 950047dd67eSAttilio Rao _lockmgr_assert(struct lock *lk, int what, const char *file, int line) 95184887fa3SAttilio Rao { 95284887fa3SAttilio Rao int slocked = 0; 95384887fa3SAttilio Rao 95484887fa3SAttilio Rao if (panicstr != NULL) 95584887fa3SAttilio Rao return; 95684887fa3SAttilio Rao switch (what) { 95784887fa3SAttilio Rao case KA_SLOCKED: 95884887fa3SAttilio Rao case KA_SLOCKED | KA_NOTRECURSED: 95984887fa3SAttilio Rao case KA_SLOCKED | KA_RECURSED: 96084887fa3SAttilio Rao slocked = 1; 96184887fa3SAttilio Rao case KA_LOCKED: 96284887fa3SAttilio Rao case KA_LOCKED | KA_NOTRECURSED: 96384887fa3SAttilio Rao case KA_LOCKED | KA_RECURSED: 964e5f94314SAttilio Rao #ifdef WITNESS 965e5f94314SAttilio Rao 966e5f94314SAttilio Rao /* 967e5f94314SAttilio Rao * We cannot trust WITNESS if the lock is held in exclusive 968e5f94314SAttilio Rao * mode and a call to lockmgr_disown() happened. 969e5f94314SAttilio Rao * Workaround this skipping the check if the lock is held in 970e5f94314SAttilio Rao * exclusive mode even for the KA_LOCKED case. 971e5f94314SAttilio Rao */ 972e5f94314SAttilio Rao if (slocked || (lk->lk_lock & LK_SHARE)) { 973e5f94314SAttilio Rao witness_assert(&lk->lock_object, what, file, line); 974e5f94314SAttilio Rao break; 975e5f94314SAttilio Rao } 976e5f94314SAttilio Rao #endif 977047dd67eSAttilio Rao if (lk->lk_lock == LK_UNLOCKED || 978047dd67eSAttilio Rao ((lk->lk_lock & LK_SHARE) == 0 && (slocked || 979047dd67eSAttilio Rao (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk))))) 98084887fa3SAttilio Rao panic("Lock %s not %slocked @ %s:%d\n", 981047dd67eSAttilio Rao lk->lock_object.lo_name, slocked ? "share" : "", 98284887fa3SAttilio Rao file, line); 983047dd67eSAttilio Rao 984047dd67eSAttilio Rao if ((lk->lk_lock & LK_SHARE) == 0) { 985047dd67eSAttilio Rao if (lockmgr_recursed(lk)) { 98684887fa3SAttilio Rao if (what & KA_NOTRECURSED) 98784887fa3SAttilio Rao panic("Lock %s recursed @ %s:%d\n", 988047dd67eSAttilio Rao lk->lock_object.lo_name, file, 989047dd67eSAttilio Rao line); 99084887fa3SAttilio Rao } else if (what & KA_RECURSED) 99184887fa3SAttilio Rao panic("Lock %s not recursed @ %s:%d\n", 992047dd67eSAttilio Rao lk->lock_object.lo_name, file, line); 99384887fa3SAttilio Rao } 99484887fa3SAttilio Rao break; 99584887fa3SAttilio Rao case KA_XLOCKED: 99684887fa3SAttilio Rao case KA_XLOCKED | KA_NOTRECURSED: 99784887fa3SAttilio Rao case KA_XLOCKED | KA_RECURSED: 998047dd67eSAttilio Rao if (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk)) 99984887fa3SAttilio Rao panic("Lock %s not exclusively locked @ %s:%d\n", 1000047dd67eSAttilio Rao lk->lock_object.lo_name, file, line); 1001047dd67eSAttilio Rao if (lockmgr_recursed(lk)) { 100284887fa3SAttilio Rao if (what & KA_NOTRECURSED) 100384887fa3SAttilio Rao panic("Lock %s recursed @ %s:%d\n", 1004047dd67eSAttilio Rao lk->lock_object.lo_name, file, line); 100584887fa3SAttilio Rao } else if (what & KA_RECURSED) 100684887fa3SAttilio Rao panic("Lock %s not recursed @ %s:%d\n", 1007047dd67eSAttilio Rao lk->lock_object.lo_name, file, line); 100884887fa3SAttilio Rao break; 100984887fa3SAttilio Rao case KA_UNLOCKED: 1010047dd67eSAttilio Rao if (lockmgr_xlocked(lk) || lockmgr_disowned(lk)) 101184887fa3SAttilio Rao panic("Lock %s exclusively locked @ %s:%d\n", 1012047dd67eSAttilio Rao lk->lock_object.lo_name, file, line); 101384887fa3SAttilio Rao break; 101484887fa3SAttilio Rao default: 1015047dd67eSAttilio Rao panic("Unknown lockmgr assertion: %d @ %s:%d\n", what, file, 1016047dd67eSAttilio Rao line); 101784887fa3SAttilio Rao } 101884887fa3SAttilio Rao } 1019047dd67eSAttilio Rao #endif 102084887fa3SAttilio Rao 1021be6847d7SJohn Baldwin #ifdef DDB 1022462a7addSJohn Baldwin int 1023462a7addSJohn Baldwin lockmgr_chain(struct thread *td, struct thread **ownerp) 1024462a7addSJohn Baldwin { 1025047dd67eSAttilio Rao struct lock *lk; 1026462a7addSJohn Baldwin 1027047dd67eSAttilio Rao lk = td->td_wchan; 1028462a7addSJohn Baldwin 1029047dd67eSAttilio Rao if (LOCK_CLASS(&lk->lock_object) != &lock_class_lockmgr) 1030462a7addSJohn Baldwin return (0); 1031047dd67eSAttilio Rao db_printf("blocked on lockmgr %s", lk->lock_object.lo_name); 1032047dd67eSAttilio Rao if (lk->lk_lock & LK_SHARE) 1033047dd67eSAttilio Rao db_printf("SHARED (count %ju)\n", 1034047dd67eSAttilio Rao (uintmax_t)LK_SHARERS(lk->lk_lock)); 1035047dd67eSAttilio Rao else 1036047dd67eSAttilio Rao db_printf("EXCL\n"); 1037047dd67eSAttilio Rao *ownerp = lockmgr_xholder(lk); 1038462a7addSJohn Baldwin 1039462a7addSJohn Baldwin return (1); 1040462a7addSJohn Baldwin } 1041462a7addSJohn Baldwin 1042047dd67eSAttilio Rao static void 104361bd5e21SKip Macy db_show_lockmgr(struct lock_object *lock) 1044be6847d7SJohn Baldwin { 1045be6847d7SJohn Baldwin struct thread *td; 1046047dd67eSAttilio Rao struct lock *lk; 1047be6847d7SJohn Baldwin 1048047dd67eSAttilio Rao lk = (struct lock *)lock; 1049be6847d7SJohn Baldwin 1050be6847d7SJohn Baldwin db_printf(" state: "); 1051047dd67eSAttilio Rao if (lk->lk_lock == LK_UNLOCKED) 1052be6847d7SJohn Baldwin db_printf("UNLOCKED\n"); 1053047dd67eSAttilio Rao else if (lk->lk_lock & LK_SHARE) 1054047dd67eSAttilio Rao db_printf("SLOCK: %ju\n", (uintmax_t)LK_SHARERS(lk->lk_lock)); 1055047dd67eSAttilio Rao else { 1056047dd67eSAttilio Rao td = lockmgr_xholder(lk); 1057047dd67eSAttilio Rao if (td == (struct thread *)LK_KERNPROC) 1058047dd67eSAttilio Rao db_printf("XLOCK: LK_KERNPROC\n"); 1059047dd67eSAttilio Rao else 1060047dd67eSAttilio Rao db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td, 1061047dd67eSAttilio Rao td->td_tid, td->td_proc->p_pid, 1062047dd67eSAttilio Rao td->td_proc->p_comm); 1063047dd67eSAttilio Rao if (lockmgr_recursed(lk)) 1064047dd67eSAttilio Rao db_printf(" recursed: %d\n", lk->lk_recurse); 1065047dd67eSAttilio Rao } 1066047dd67eSAttilio Rao db_printf(" waiters: "); 1067047dd67eSAttilio Rao switch (lk->lk_lock & LK_ALL_WAITERS) { 1068047dd67eSAttilio Rao case LK_SHARED_WAITERS: 1069047dd67eSAttilio Rao db_printf("shared\n"); 1070047dd67eSAttilio Rao case LK_EXCLUSIVE_WAITERS: 1071047dd67eSAttilio Rao db_printf("exclusive\n"); 1072047dd67eSAttilio Rao break; 1073047dd67eSAttilio Rao case LK_ALL_WAITERS: 1074047dd67eSAttilio Rao db_printf("shared and exclusive\n"); 1075047dd67eSAttilio Rao break; 1076047dd67eSAttilio Rao default: 1077047dd67eSAttilio Rao db_printf("none\n"); 1078047dd67eSAttilio Rao } 1079be6847d7SJohn Baldwin } 1080be6847d7SJohn Baldwin #endif 1081