19454b2d8SWarner Losh /*- 2047dd67eSAttilio Rao * Copyright (c) 2008 Attilio Rao <attilio@FreeBSD.org> 3047dd67eSAttilio Rao * All rights reserved. 453bf4bb2SPeter Wemm * 553bf4bb2SPeter Wemm * Redistribution and use in source and binary forms, with or without 653bf4bb2SPeter Wemm * modification, are permitted provided that the following conditions 753bf4bb2SPeter Wemm * are met: 853bf4bb2SPeter Wemm * 1. Redistributions of source code must retain the above copyright 9047dd67eSAttilio Rao * notice(s), this list of conditions and the following disclaimer as 10047dd67eSAttilio Rao * the first lines of this file unmodified other than the possible 11047dd67eSAttilio Rao * addition of one or more copyright notices. 1253bf4bb2SPeter Wemm * 2. Redistributions in binary form must reproduce the above copyright 13047dd67eSAttilio Rao * notice(s), this list of conditions and the following disclaimer in the 1453bf4bb2SPeter Wemm * documentation and/or other materials provided with the distribution. 1553bf4bb2SPeter Wemm * 16047dd67eSAttilio Rao * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY 17047dd67eSAttilio Rao * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 18047dd67eSAttilio Rao * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 19047dd67eSAttilio Rao * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY 20047dd67eSAttilio Rao * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 21047dd67eSAttilio Rao * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 22047dd67eSAttilio Rao * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 23047dd67eSAttilio Rao * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 2453bf4bb2SPeter Wemm * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25047dd67eSAttilio Rao * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 26047dd67eSAttilio Rao * DAMAGE. 2753bf4bb2SPeter Wemm */ 2853bf4bb2SPeter Wemm 29047dd67eSAttilio Rao #include "opt_ddb.h" 30047dd67eSAttilio Rao 31677b542eSDavid E. O'Brien #include <sys/cdefs.h> 32677b542eSDavid E. O'Brien __FBSDID("$FreeBSD$"); 33677b542eSDavid E. O'Brien 3453bf4bb2SPeter Wemm #include <sys/param.h> 3561d80e90SJohn Baldwin #include <sys/ktr.h> 3653bf4bb2SPeter Wemm #include <sys/lock.h> 37047dd67eSAttilio Rao #include <sys/lock_profile.h> 388302d183SBruce Evans #include <sys/lockmgr.h> 39d8881ca3SJohn Baldwin #include <sys/mutex.h> 408302d183SBruce Evans #include <sys/proc.h> 41047dd67eSAttilio Rao #include <sys/sleepqueue.h> 42e8ddb61dSJeff Roberson #ifdef DEBUG_LOCKS 43e8ddb61dSJeff Roberson #include <sys/stack.h> 44e8ddb61dSJeff Roberson #endif 45047dd67eSAttilio Rao #include <sys/systm.h> 4653bf4bb2SPeter Wemm 47047dd67eSAttilio Rao #include <machine/cpu.h> 486efc8a16SAttilio Rao 49be6847d7SJohn Baldwin #ifdef DDB 50be6847d7SJohn Baldwin #include <ddb/ddb.h> 51047dd67eSAttilio Rao #endif 52047dd67eSAttilio Rao 53047dd67eSAttilio Rao CTASSERT(((LK_CANRECURSE | LK_NOSHARE) & LO_CLASSFLAGS) == 54047dd67eSAttilio Rao (LK_CANRECURSE | LK_NOSHARE)); 55047dd67eSAttilio Rao 56047dd67eSAttilio Rao #define SQ_EXCLUSIVE_QUEUE 0 57047dd67eSAttilio Rao #define SQ_SHARED_QUEUE 1 58047dd67eSAttilio Rao 59047dd67eSAttilio Rao #ifndef INVARIANTS 60047dd67eSAttilio Rao #define _lockmgr_assert(lk, what, file, line) 61047dd67eSAttilio Rao #define TD_LOCKS_INC(td) 62047dd67eSAttilio Rao #define TD_LOCKS_DEC(td) 63047dd67eSAttilio Rao #else 64047dd67eSAttilio Rao #define TD_LOCKS_INC(td) ((td)->td_locks++) 65047dd67eSAttilio Rao #define TD_LOCKS_DEC(td) ((td)->td_locks--) 66047dd67eSAttilio Rao #endif 67047dd67eSAttilio Rao #define TD_SLOCKS_INC(td) ((td)->td_lk_slocks++) 68047dd67eSAttilio Rao #define TD_SLOCKS_DEC(td) ((td)->td_lk_slocks--) 69047dd67eSAttilio Rao 70047dd67eSAttilio Rao #ifndef DEBUG_LOCKS 71047dd67eSAttilio Rao #define STACK_PRINT(lk) 72047dd67eSAttilio Rao #define STACK_SAVE(lk) 73047dd67eSAttilio Rao #define STACK_ZERO(lk) 74047dd67eSAttilio Rao #else 75047dd67eSAttilio Rao #define STACK_PRINT(lk) stack_print_ddb(&(lk)->lk_stack) 76047dd67eSAttilio Rao #define STACK_SAVE(lk) stack_save(&(lk)->lk_stack) 77047dd67eSAttilio Rao #define STACK_ZERO(lk) stack_zero(&(lk)->lk_stack) 78047dd67eSAttilio Rao #endif 79047dd67eSAttilio Rao 80047dd67eSAttilio Rao #define LOCK_LOG2(lk, string, arg1, arg2) \ 81047dd67eSAttilio Rao if (LOCK_LOG_TEST(&(lk)->lock_object, 0)) \ 82047dd67eSAttilio Rao CTR2(KTR_LOCK, (string), (arg1), (arg2)) 83047dd67eSAttilio Rao #define LOCK_LOG3(lk, string, arg1, arg2, arg3) \ 84047dd67eSAttilio Rao if (LOCK_LOG_TEST(&(lk)->lock_object, 0)) \ 85047dd67eSAttilio Rao CTR3(KTR_LOCK, (string), (arg1), (arg2), (arg3)) 86047dd67eSAttilio Rao 87e5f94314SAttilio Rao #define GIANT_DECLARE \ 88e5f94314SAttilio Rao int _i = 0; \ 89e5f94314SAttilio Rao WITNESS_SAVE_DECL(Giant) 90e5f94314SAttilio Rao #define GIANT_RESTORE() do { \ 91e5f94314SAttilio Rao if (_i > 0) { \ 92e5f94314SAttilio Rao while (_i--) \ 93e5f94314SAttilio Rao mtx_lock(&Giant); \ 94e5f94314SAttilio Rao WITNESS_RESTORE(&Giant.lock_object, Giant); \ 95e5f94314SAttilio Rao } \ 96e5f94314SAttilio Rao } while (0) 97e5f94314SAttilio Rao #define GIANT_SAVE() do { \ 98e5f94314SAttilio Rao if (mtx_owned(&Giant)) { \ 99e5f94314SAttilio Rao WITNESS_SAVE(&Giant.lock_object, Giant); \ 100e5f94314SAttilio Rao while (mtx_owned(&Giant)) { \ 101e5f94314SAttilio Rao _i++; \ 102e5f94314SAttilio Rao mtx_unlock(&Giant); \ 103e5f94314SAttilio Rao } \ 104e5f94314SAttilio Rao } \ 105e5f94314SAttilio Rao } while (0) 106e5f94314SAttilio Rao 107047dd67eSAttilio Rao #define LK_CAN_SHARE(x) \ 108047dd67eSAttilio Rao (((x) & LK_SHARE) && (((x) & LK_EXCLUSIVE_WAITERS) == 0 || \ 109e0f62984SAttilio Rao curthread->td_lk_slocks || (curthread->td_pflags & TDP_DEADLKTREAT))) 110e5f94314SAttilio Rao #define LK_TRYOP(x) \ 111e5f94314SAttilio Rao ((x) & LK_NOWAIT) 112e5f94314SAttilio Rao 113e5f94314SAttilio Rao #define LK_CAN_WITNESS(x) \ 114e5f94314SAttilio Rao (((x) & LK_NOWITNESS) == 0 && !LK_TRYOP(x)) 115e5f94314SAttilio Rao #define LK_TRYWIT(x) \ 116e5f94314SAttilio Rao (LK_TRYOP(x) ? LOP_TRYLOCK : 0) 117047dd67eSAttilio Rao 118047dd67eSAttilio Rao #define lockmgr_disowned(lk) \ 119047dd67eSAttilio Rao (((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == LK_KERNPROC) 120047dd67eSAttilio Rao 121047dd67eSAttilio Rao #define lockmgr_xlocked(lk) \ 122047dd67eSAttilio Rao (((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == (uintptr_t)curthread) 123047dd67eSAttilio Rao 124047dd67eSAttilio Rao static void assert_lockmgr(struct lock_object *lock, int how); 125047dd67eSAttilio Rao #ifdef DDB 12661bd5e21SKip Macy static void db_show_lockmgr(struct lock_object *lock); 127be6847d7SJohn Baldwin #endif 1286e21afd4SJohn Baldwin static void lock_lockmgr(struct lock_object *lock, int how); 1296e21afd4SJohn Baldwin static int unlock_lockmgr(struct lock_object *lock); 13061bd5e21SKip Macy 13161bd5e21SKip Macy struct lock_class lock_class_lockmgr = { 1323ff6d229SJohn Baldwin .lc_name = "lockmgr", 133047dd67eSAttilio Rao .lc_flags = LC_RECURSABLE | LC_SLEEPABLE | LC_SLEEPLOCK | LC_UPGRADABLE, 134f9721b43SAttilio Rao .lc_assert = assert_lockmgr, 13561bd5e21SKip Macy #ifdef DDB 1366e21afd4SJohn Baldwin .lc_ddb_show = db_show_lockmgr, 13761bd5e21SKip Macy #endif 1386e21afd4SJohn Baldwin .lc_lock = lock_lockmgr, 139047dd67eSAttilio Rao .lc_unlock = unlock_lockmgr 14061bd5e21SKip Macy }; 14161bd5e21SKip Macy 142047dd67eSAttilio Rao static __inline struct thread * 143047dd67eSAttilio Rao lockmgr_xholder(struct lock *lk) 144047dd67eSAttilio Rao { 145047dd67eSAttilio Rao uintptr_t x; 146047dd67eSAttilio Rao 147047dd67eSAttilio Rao x = lk->lk_lock; 148047dd67eSAttilio Rao return ((x & LK_SHARE) ? NULL : (struct thread *)LK_HOLDER(x)); 149047dd67eSAttilio Rao } 15084887fa3SAttilio Rao 15153bf4bb2SPeter Wemm /* 152047dd67eSAttilio Rao * It assumes sleepq_lock held and returns with this one unheld. 153047dd67eSAttilio Rao * It also assumes the generic interlock is sane and previously checked. 154047dd67eSAttilio Rao * If LK_INTERLOCK is specified the interlock is not reacquired after the 155047dd67eSAttilio Rao * sleep. 15653bf4bb2SPeter Wemm */ 157047dd67eSAttilio Rao static __inline int 158047dd67eSAttilio Rao sleeplk(struct lock *lk, u_int flags, struct lock_object *ilk, 159047dd67eSAttilio Rao const char *wmesg, int pri, int timo, int queue) 160047dd67eSAttilio Rao { 161e5f94314SAttilio Rao GIANT_DECLARE; 162047dd67eSAttilio Rao struct lock_class *class; 163047dd67eSAttilio Rao int catch, error; 16453bf4bb2SPeter Wemm 165047dd67eSAttilio Rao class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL; 1665047a8fdSAttilio Rao catch = pri & PCATCH; 167047dd67eSAttilio Rao pri &= PRIMASK; 168047dd67eSAttilio Rao error = 0; 169047dd67eSAttilio Rao 170047dd67eSAttilio Rao LOCK_LOG3(lk, "%s: %p blocking on the %s sleepqueue", __func__, lk, 171047dd67eSAttilio Rao (queue == SQ_EXCLUSIVE_QUEUE) ? "exclusive" : "shared"); 172047dd67eSAttilio Rao 173047dd67eSAttilio Rao if (flags & LK_INTERLOCK) 174047dd67eSAttilio Rao class->lc_unlock(ilk); 175e5f94314SAttilio Rao GIANT_SAVE(); 176047dd67eSAttilio Rao sleepq_add(&lk->lock_object, NULL, wmesg, SLEEPQ_LK | (catch ? 177047dd67eSAttilio Rao SLEEPQ_INTERRUPTIBLE : 0), queue); 178047dd67eSAttilio Rao if ((flags & LK_TIMELOCK) && timo) 179047dd67eSAttilio Rao sleepq_set_timeout(&lk->lock_object, timo); 180047dd67eSAttilio Rao 181047dd67eSAttilio Rao /* 182047dd67eSAttilio Rao * Decisional switch for real sleeping. 183047dd67eSAttilio Rao */ 184047dd67eSAttilio Rao if ((flags & LK_TIMELOCK) && timo && catch) 185047dd67eSAttilio Rao error = sleepq_timedwait_sig(&lk->lock_object, pri); 186047dd67eSAttilio Rao else if ((flags & LK_TIMELOCK) && timo) 187047dd67eSAttilio Rao error = sleepq_timedwait(&lk->lock_object, pri); 188047dd67eSAttilio Rao else if (catch) 189047dd67eSAttilio Rao error = sleepq_wait_sig(&lk->lock_object, pri); 190047dd67eSAttilio Rao else 191047dd67eSAttilio Rao sleepq_wait(&lk->lock_object, pri); 192e5f94314SAttilio Rao GIANT_RESTORE(); 193047dd67eSAttilio Rao if ((flags & LK_SLEEPFAIL) && error == 0) 194047dd67eSAttilio Rao error = ENOLCK; 195047dd67eSAttilio Rao 196047dd67eSAttilio Rao return (error); 197047dd67eSAttilio Rao } 198047dd67eSAttilio Rao 199047dd67eSAttilio Rao static __inline void 200047dd67eSAttilio Rao wakeupshlk(struct lock *lk, const char *file, int line) 201047dd67eSAttilio Rao { 202047dd67eSAttilio Rao uintptr_t v, x; 203047dd67eSAttilio Rao int queue; 204047dd67eSAttilio Rao 205047dd67eSAttilio Rao TD_LOCKS_DEC(curthread); 206047dd67eSAttilio Rao TD_SLOCKS_DEC(curthread); 207e5f94314SAttilio Rao WITNESS_UNLOCK(&lk->lock_object, 0, file, line); 208047dd67eSAttilio Rao LOCK_LOG_LOCK("SUNLOCK", &lk->lock_object, 0, 0, file, line); 209047dd67eSAttilio Rao 210047dd67eSAttilio Rao for (;;) { 211047dd67eSAttilio Rao x = lk->lk_lock; 212047dd67eSAttilio Rao 213047dd67eSAttilio Rao /* 214047dd67eSAttilio Rao * If there is more than one shared lock held, just drop one 215047dd67eSAttilio Rao * and return. 216047dd67eSAttilio Rao */ 217047dd67eSAttilio Rao if (LK_SHARERS(x) > 1) { 218047dd67eSAttilio Rao if (atomic_cmpset_ptr(&lk->lk_lock, x, 219047dd67eSAttilio Rao x - LK_ONE_SHARER)) 220047dd67eSAttilio Rao break; 221047dd67eSAttilio Rao continue; 222047dd67eSAttilio Rao } 223047dd67eSAttilio Rao 224047dd67eSAttilio Rao /* 225047dd67eSAttilio Rao * If there are not waiters on the exclusive queue, drop the 226047dd67eSAttilio Rao * lock quickly. 227047dd67eSAttilio Rao */ 228047dd67eSAttilio Rao if ((x & LK_ALL_WAITERS) == 0) { 229047dd67eSAttilio Rao MPASS(x == LK_SHARERS_LOCK(1)); 230047dd67eSAttilio Rao if (atomic_cmpset_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1), 231047dd67eSAttilio Rao LK_UNLOCKED)) 232047dd67eSAttilio Rao break; 233047dd67eSAttilio Rao continue; 234047dd67eSAttilio Rao } 235047dd67eSAttilio Rao 236047dd67eSAttilio Rao /* 237047dd67eSAttilio Rao * We should have a sharer with waiters, so enter the hard 238047dd67eSAttilio Rao * path in order to handle wakeups correctly. 239047dd67eSAttilio Rao */ 240047dd67eSAttilio Rao sleepq_lock(&lk->lock_object); 241047dd67eSAttilio Rao x = lk->lk_lock & LK_ALL_WAITERS; 242047dd67eSAttilio Rao v = LK_UNLOCKED; 243047dd67eSAttilio Rao 244047dd67eSAttilio Rao /* 245047dd67eSAttilio Rao * If the lock has exclusive waiters, give them preference in 246047dd67eSAttilio Rao * order to avoid deadlock with shared runners up. 247047dd67eSAttilio Rao */ 248047dd67eSAttilio Rao if (x & LK_EXCLUSIVE_WAITERS) { 249047dd67eSAttilio Rao queue = SQ_EXCLUSIVE_QUEUE; 250047dd67eSAttilio Rao v |= (x & LK_SHARED_WAITERS); 251047dd67eSAttilio Rao } else { 252047dd67eSAttilio Rao MPASS(x == LK_SHARED_WAITERS); 253047dd67eSAttilio Rao queue = SQ_SHARED_QUEUE; 254047dd67eSAttilio Rao } 255047dd67eSAttilio Rao 256047dd67eSAttilio Rao if (!atomic_cmpset_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x, 257047dd67eSAttilio Rao v)) { 258047dd67eSAttilio Rao sleepq_release(&lk->lock_object); 259047dd67eSAttilio Rao continue; 260047dd67eSAttilio Rao } 261047dd67eSAttilio Rao LOCK_LOG3(lk, "%s: %p waking up threads on the %s queue", 262047dd67eSAttilio Rao __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" : 263047dd67eSAttilio Rao "exclusive"); 264047dd67eSAttilio Rao sleepq_broadcast(&lk->lock_object, SLEEPQ_LK, 0, queue); 265047dd67eSAttilio Rao sleepq_release(&lk->lock_object); 266047dd67eSAttilio Rao break; 267047dd67eSAttilio Rao } 268047dd67eSAttilio Rao 269047dd67eSAttilio Rao lock_profile_release_lock(&lk->lock_object); 270047dd67eSAttilio Rao } 271047dd67eSAttilio Rao 272047dd67eSAttilio Rao static void 273f9721b43SAttilio Rao assert_lockmgr(struct lock_object *lock, int what) 274f9721b43SAttilio Rao { 275f9721b43SAttilio Rao 276f9721b43SAttilio Rao panic("lockmgr locks do not support assertions"); 277f9721b43SAttilio Rao } 278f9721b43SAttilio Rao 279047dd67eSAttilio Rao static void 2806e21afd4SJohn Baldwin lock_lockmgr(struct lock_object *lock, int how) 2816e21afd4SJohn Baldwin { 2826e21afd4SJohn Baldwin 2836e21afd4SJohn Baldwin panic("lockmgr locks do not support sleep interlocking"); 2846e21afd4SJohn Baldwin } 2856e21afd4SJohn Baldwin 286047dd67eSAttilio Rao static int 2876e21afd4SJohn Baldwin unlock_lockmgr(struct lock_object *lock) 2886e21afd4SJohn Baldwin { 2896e21afd4SJohn Baldwin 2906e21afd4SJohn Baldwin panic("lockmgr locks do not support sleep interlocking"); 2916e21afd4SJohn Baldwin } 2926e21afd4SJohn Baldwin 29399448ed1SJohn Dyson void 294047dd67eSAttilio Rao lockinit(struct lock *lk, int pri, const char *wmesg, int timo, int flags) 29599448ed1SJohn Dyson { 2966efc8a16SAttilio Rao int iflags; 2976efc8a16SAttilio Rao 298047dd67eSAttilio Rao MPASS((flags & ~LK_INIT_MASK) == 0); 29999448ed1SJohn Dyson 3006efc8a16SAttilio Rao iflags = LO_RECURSABLE | LO_SLEEPABLE | LO_UPGRADABLE; 301047dd67eSAttilio Rao if ((flags & LK_NODUP) == 0) 3026efc8a16SAttilio Rao iflags |= LO_DUPOK; 3037fbfba7bSAttilio Rao if (flags & LK_NOPROFILE) 3047fbfba7bSAttilio Rao iflags |= LO_NOPROFILE; 305047dd67eSAttilio Rao if ((flags & LK_NOWITNESS) == 0) 3066efc8a16SAttilio Rao iflags |= LO_WITNESS; 3077fbfba7bSAttilio Rao if (flags & LK_QUIET) 3087fbfba7bSAttilio Rao iflags |= LO_QUIET; 309047dd67eSAttilio Rao iflags |= flags & (LK_CANRECURSE | LK_NOSHARE); 310047dd67eSAttilio Rao 311047dd67eSAttilio Rao lk->lk_lock = LK_UNLOCKED; 312047dd67eSAttilio Rao lk->lk_recurse = 0; 313047dd67eSAttilio Rao lk->lk_timo = timo; 314047dd67eSAttilio Rao lk->lk_pri = pri; 315047dd67eSAttilio Rao lock_init(&lk->lock_object, &lock_class_lockmgr, wmesg, NULL, iflags); 316047dd67eSAttilio Rao STACK_ZERO(lk); 31799448ed1SJohn Dyson } 31899448ed1SJohn Dyson 319a18b1f1dSJason Evans void 320047dd67eSAttilio Rao lockdestroy(struct lock *lk) 321a18b1f1dSJason Evans { 322c91fcee7SJohn Baldwin 323047dd67eSAttilio Rao KASSERT(lk->lk_lock == LK_UNLOCKED, ("lockmgr still held")); 324047dd67eSAttilio Rao KASSERT(lk->lk_recurse == 0, ("lockmgr still recursed")); 325047dd67eSAttilio Rao lock_destroy(&lk->lock_object); 326047dd67eSAttilio Rao } 327047dd67eSAttilio Rao 328047dd67eSAttilio Rao int 329047dd67eSAttilio Rao __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk, 330047dd67eSAttilio Rao const char *wmesg, int pri, int timo, const char *file, int line) 331047dd67eSAttilio Rao { 332e5f94314SAttilio Rao GIANT_DECLARE; 333047dd67eSAttilio Rao uint64_t waittime; 334047dd67eSAttilio Rao struct lock_class *class; 335047dd67eSAttilio Rao const char *iwmesg; 336047dd67eSAttilio Rao uintptr_t tid, v, x; 337047dd67eSAttilio Rao u_int op; 338047dd67eSAttilio Rao int contested, error, ipri, itimo, queue; 339047dd67eSAttilio Rao 340047dd67eSAttilio Rao contested = 0; 341047dd67eSAttilio Rao error = 0; 342047dd67eSAttilio Rao waittime = 0; 343047dd67eSAttilio Rao tid = (uintptr_t)curthread; 344047dd67eSAttilio Rao op = (flags & LK_TYPE_MASK); 345047dd67eSAttilio Rao iwmesg = (wmesg == LK_WMESG_DEFAULT) ? lk->lock_object.lo_name : wmesg; 346047dd67eSAttilio Rao ipri = (pri == LK_PRIO_DEFAULT) ? lk->lk_pri : pri; 347047dd67eSAttilio Rao itimo = (timo == LK_TIMO_DEFAULT) ? lk->lk_timo : timo; 348047dd67eSAttilio Rao 349047dd67eSAttilio Rao MPASS((flags & ~LK_TOTAL_MASK) == 0); 350872b7289SAttilio Rao KASSERT((op & (op - 1)) == 0, 351872b7289SAttilio Rao ("%s: Invalid requested operation @ %s:%d", __func__, file, line)); 352047dd67eSAttilio Rao KASSERT((flags & (LK_NOWAIT | LK_SLEEPFAIL)) == 0 || 353047dd67eSAttilio Rao (op != LK_DOWNGRADE && op != LK_RELEASE), 354047dd67eSAttilio Rao ("%s: Invalid flags in regard of the operation desired @ %s:%d", 355047dd67eSAttilio Rao __func__, file, line)); 356047dd67eSAttilio Rao KASSERT((flags & LK_INTERLOCK) == 0 || ilk != NULL, 357047dd67eSAttilio Rao ("%s: LK_INTERLOCK passed without valid interlock @ %s:%d", 358047dd67eSAttilio Rao __func__, file, line)); 359047dd67eSAttilio Rao 360047dd67eSAttilio Rao class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL; 361047dd67eSAttilio Rao if (panicstr != NULL) { 362047dd67eSAttilio Rao if (flags & LK_INTERLOCK) 363047dd67eSAttilio Rao class->lc_unlock(ilk); 364047dd67eSAttilio Rao return (0); 365047dd67eSAttilio Rao } 366047dd67eSAttilio Rao 367047dd67eSAttilio Rao if (op == LK_SHARED && (lk->lock_object.lo_flags & LK_NOSHARE)) 368047dd67eSAttilio Rao op = LK_EXCLUSIVE; 369047dd67eSAttilio Rao 370047dd67eSAttilio Rao switch (op) { 371047dd67eSAttilio Rao case LK_SHARED: 372e5f94314SAttilio Rao if (LK_CAN_WITNESS(flags)) 373e5f94314SAttilio Rao WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER, 374e5f94314SAttilio Rao file, line); 375047dd67eSAttilio Rao for (;;) { 376047dd67eSAttilio Rao x = lk->lk_lock; 377047dd67eSAttilio Rao 378047dd67eSAttilio Rao /* 379047dd67eSAttilio Rao * If no other thread has an exclusive lock, or 380047dd67eSAttilio Rao * no exclusive waiter is present, bump the count of 381047dd67eSAttilio Rao * sharers. Since we have to preserve the state of 382047dd67eSAttilio Rao * waiters, if we fail to acquire the shared lock 383047dd67eSAttilio Rao * loop back and retry. 384047dd67eSAttilio Rao */ 385047dd67eSAttilio Rao if (LK_CAN_SHARE(x)) { 386047dd67eSAttilio Rao if (atomic_cmpset_acq_ptr(&lk->lk_lock, x, 387047dd67eSAttilio Rao x + LK_ONE_SHARER)) 388047dd67eSAttilio Rao break; 389047dd67eSAttilio Rao continue; 390047dd67eSAttilio Rao } 391047dd67eSAttilio Rao lock_profile_obtain_lock_failed(&lk->lock_object, 392047dd67eSAttilio Rao &contested, &waittime); 393047dd67eSAttilio Rao 394047dd67eSAttilio Rao /* 39596f1567fSKonstantin Belousov * If the lock is already held by curthread in 396047dd67eSAttilio Rao * exclusive way avoid a deadlock. 397047dd67eSAttilio Rao */ 398047dd67eSAttilio Rao if (LK_HOLDER(x) == tid) { 399047dd67eSAttilio Rao LOCK_LOG2(lk, 40096f1567fSKonstantin Belousov "%s: %p already held in exclusive mode", 401047dd67eSAttilio Rao __func__, lk); 402047dd67eSAttilio Rao error = EDEADLK; 403047dd67eSAttilio Rao break; 404a18b1f1dSJason Evans } 405a18b1f1dSJason Evans 406a18b1f1dSJason Evans /* 407047dd67eSAttilio Rao * If the lock is expected to not sleep just give up 408047dd67eSAttilio Rao * and return. 409d7a7e179SAttilio Rao */ 410047dd67eSAttilio Rao if (LK_TRYOP(flags)) { 411047dd67eSAttilio Rao LOCK_LOG2(lk, "%s: %p fails the try operation", 412047dd67eSAttilio Rao __func__, lk); 413047dd67eSAttilio Rao error = EBUSY; 414047dd67eSAttilio Rao break; 415047dd67eSAttilio Rao } 416047dd67eSAttilio Rao 417047dd67eSAttilio Rao /* 418047dd67eSAttilio Rao * Acquire the sleepqueue chain lock because we 419047dd67eSAttilio Rao * probabilly will need to manipulate waiters flags. 420047dd67eSAttilio Rao */ 421047dd67eSAttilio Rao sleepq_lock(&lk->lock_object); 422047dd67eSAttilio Rao x = lk->lk_lock; 423047dd67eSAttilio Rao 424047dd67eSAttilio Rao /* 425047dd67eSAttilio Rao * if the lock can be acquired in shared mode, try 426047dd67eSAttilio Rao * again. 427047dd67eSAttilio Rao */ 428047dd67eSAttilio Rao if (LK_CAN_SHARE(x)) { 429047dd67eSAttilio Rao sleepq_release(&lk->lock_object); 430047dd67eSAttilio Rao continue; 431047dd67eSAttilio Rao } 432047dd67eSAttilio Rao 433047dd67eSAttilio Rao /* 434047dd67eSAttilio Rao * Try to set the LK_SHARED_WAITERS flag. If we fail, 435047dd67eSAttilio Rao * loop back and retry. 436047dd67eSAttilio Rao */ 437047dd67eSAttilio Rao if ((x & LK_SHARED_WAITERS) == 0) { 438047dd67eSAttilio Rao if (!atomic_cmpset_acq_ptr(&lk->lk_lock, x, 439047dd67eSAttilio Rao x | LK_SHARED_WAITERS)) { 440047dd67eSAttilio Rao sleepq_release(&lk->lock_object); 441047dd67eSAttilio Rao continue; 442047dd67eSAttilio Rao } 443047dd67eSAttilio Rao LOCK_LOG2(lk, "%s: %p set shared waiters flag", 444047dd67eSAttilio Rao __func__, lk); 445047dd67eSAttilio Rao } 446047dd67eSAttilio Rao 447047dd67eSAttilio Rao /* 448047dd67eSAttilio Rao * As far as we have been unable to acquire the 449047dd67eSAttilio Rao * shared lock and the shared waiters flag is set, 450047dd67eSAttilio Rao * we will sleep. 451047dd67eSAttilio Rao */ 452047dd67eSAttilio Rao error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo, 453047dd67eSAttilio Rao SQ_SHARED_QUEUE); 454047dd67eSAttilio Rao flags &= ~LK_INTERLOCK; 455047dd67eSAttilio Rao if (error) { 456047dd67eSAttilio Rao LOCK_LOG3(lk, 457047dd67eSAttilio Rao "%s: interrupted sleep for %p with %d", 458047dd67eSAttilio Rao __func__, lk, error); 459047dd67eSAttilio Rao break; 460047dd67eSAttilio Rao } 461047dd67eSAttilio Rao LOCK_LOG2(lk, "%s: %p resuming from the sleep queue", 462047dd67eSAttilio Rao __func__, lk); 463047dd67eSAttilio Rao } 464047dd67eSAttilio Rao if (error == 0) { 465047dd67eSAttilio Rao lock_profile_obtain_lock_success(&lk->lock_object, 466047dd67eSAttilio Rao contested, waittime, file, line); 467047dd67eSAttilio Rao LOCK_LOG_LOCK("SLOCK", &lk->lock_object, 0, 0, file, 468047dd67eSAttilio Rao line); 469e5f94314SAttilio Rao WITNESS_LOCK(&lk->lock_object, LK_TRYWIT(flags), file, 470e5f94314SAttilio Rao line); 471047dd67eSAttilio Rao TD_LOCKS_INC(curthread); 472047dd67eSAttilio Rao TD_SLOCKS_INC(curthread); 473047dd67eSAttilio Rao STACK_SAVE(lk); 474047dd67eSAttilio Rao } 475047dd67eSAttilio Rao break; 476047dd67eSAttilio Rao case LK_UPGRADE: 477047dd67eSAttilio Rao _lockmgr_assert(lk, KA_SLOCKED, file, line); 478047dd67eSAttilio Rao x = lk->lk_lock & LK_ALL_WAITERS; 479047dd67eSAttilio Rao 480047dd67eSAttilio Rao /* 481047dd67eSAttilio Rao * Try to switch from one shared lock to an exclusive one. 482047dd67eSAttilio Rao * We need to preserve waiters flags during the operation. 483047dd67eSAttilio Rao */ 484047dd67eSAttilio Rao if (atomic_cmpset_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x, 485047dd67eSAttilio Rao tid | x)) { 486047dd67eSAttilio Rao LOCK_LOG_LOCK("XUPGRADE", &lk->lock_object, 0, 0, file, 487047dd67eSAttilio Rao line); 488e5f94314SAttilio Rao WITNESS_UPGRADE(&lk->lock_object, LOP_EXCLUSIVE | 489e5f94314SAttilio Rao LK_TRYWIT(flags), file, line); 490047dd67eSAttilio Rao TD_SLOCKS_DEC(curthread); 491047dd67eSAttilio Rao break; 492047dd67eSAttilio Rao } 493047dd67eSAttilio Rao 494047dd67eSAttilio Rao /* 495047dd67eSAttilio Rao * We have been unable to succeed in upgrading, so just 496047dd67eSAttilio Rao * give up the shared lock. 497047dd67eSAttilio Rao */ 498047dd67eSAttilio Rao wakeupshlk(lk, file, line); 499047dd67eSAttilio Rao 500047dd67eSAttilio Rao /* FALLTHROUGH */ 501047dd67eSAttilio Rao case LK_EXCLUSIVE: 502e5f94314SAttilio Rao if (LK_CAN_WITNESS(flags)) 503e5f94314SAttilio Rao WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER | 504e5f94314SAttilio Rao LOP_EXCLUSIVE, file, line); 505047dd67eSAttilio Rao 506047dd67eSAttilio Rao /* 50796f1567fSKonstantin Belousov * If curthread already holds the lock and this one is 508047dd67eSAttilio Rao * allowed to recurse, simply recurse on it. 509047dd67eSAttilio Rao */ 510047dd67eSAttilio Rao if (lockmgr_xlocked(lk)) { 511047dd67eSAttilio Rao if ((flags & LK_CANRECURSE) == 0 && 512047dd67eSAttilio Rao (lk->lock_object.lo_flags & LK_CANRECURSE) == 0) { 513047dd67eSAttilio Rao 514047dd67eSAttilio Rao /* 515047dd67eSAttilio Rao * If the lock is expected to not panic just 516047dd67eSAttilio Rao * give up and return. 517047dd67eSAttilio Rao */ 518047dd67eSAttilio Rao if (LK_TRYOP(flags)) { 519047dd67eSAttilio Rao LOCK_LOG2(lk, 520047dd67eSAttilio Rao "%s: %p fails the try operation", 521047dd67eSAttilio Rao __func__, lk); 522047dd67eSAttilio Rao error = EBUSY; 523047dd67eSAttilio Rao break; 524047dd67eSAttilio Rao } 525047dd67eSAttilio Rao if (flags & LK_INTERLOCK) 526047dd67eSAttilio Rao class->lc_unlock(ilk); 527047dd67eSAttilio Rao panic("%s: recursing on non recursive lockmgr %s @ %s:%d\n", 528047dd67eSAttilio Rao __func__, iwmesg, file, line); 529047dd67eSAttilio Rao } 530047dd67eSAttilio Rao lk->lk_recurse++; 531047dd67eSAttilio Rao LOCK_LOG2(lk, "%s: %p recursing", __func__, lk); 532047dd67eSAttilio Rao LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0, 533047dd67eSAttilio Rao lk->lk_recurse, file, line); 534e5f94314SAttilio Rao WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE | 535e5f94314SAttilio Rao LK_TRYWIT(flags), file, line); 536047dd67eSAttilio Rao TD_LOCKS_INC(curthread); 537047dd67eSAttilio Rao break; 538047dd67eSAttilio Rao } 539047dd67eSAttilio Rao 540047dd67eSAttilio Rao while (!atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, 541047dd67eSAttilio Rao tid)) { 542047dd67eSAttilio Rao lock_profile_obtain_lock_failed(&lk->lock_object, 543047dd67eSAttilio Rao &contested, &waittime); 544047dd67eSAttilio Rao 545047dd67eSAttilio Rao /* 546047dd67eSAttilio Rao * If the lock is expected to not sleep just give up 547047dd67eSAttilio Rao * and return. 548047dd67eSAttilio Rao */ 549047dd67eSAttilio Rao if (LK_TRYOP(flags)) { 550047dd67eSAttilio Rao LOCK_LOG2(lk, "%s: %p fails the try operation", 551047dd67eSAttilio Rao __func__, lk); 552047dd67eSAttilio Rao error = EBUSY; 553047dd67eSAttilio Rao break; 554047dd67eSAttilio Rao } 555047dd67eSAttilio Rao 556047dd67eSAttilio Rao /* 557047dd67eSAttilio Rao * Acquire the sleepqueue chain lock because we 558047dd67eSAttilio Rao * probabilly will need to manipulate waiters flags. 559047dd67eSAttilio Rao */ 560047dd67eSAttilio Rao sleepq_lock(&lk->lock_object); 561047dd67eSAttilio Rao x = lk->lk_lock; 562047dd67eSAttilio Rao v = x & LK_ALL_WAITERS; 563047dd67eSAttilio Rao 564047dd67eSAttilio Rao /* 565047dd67eSAttilio Rao * if the lock has been released while we spun on 566047dd67eSAttilio Rao * the sleepqueue chain lock just try again. 567047dd67eSAttilio Rao */ 568047dd67eSAttilio Rao if (x == LK_UNLOCKED) { 569047dd67eSAttilio Rao sleepq_release(&lk->lock_object); 570047dd67eSAttilio Rao continue; 571047dd67eSAttilio Rao } 572047dd67eSAttilio Rao 573047dd67eSAttilio Rao /* 574047dd67eSAttilio Rao * The lock can be in the state where there is a 575047dd67eSAttilio Rao * pending queue of waiters, but still no owner. 576047dd67eSAttilio Rao * This happens when the lock is contested and an 577047dd67eSAttilio Rao * owner is going to claim the lock. 578047dd67eSAttilio Rao * If curthread is the one successfully acquiring it 579047dd67eSAttilio Rao * claim lock ownership and return, preserving waiters 580047dd67eSAttilio Rao * flags. 581047dd67eSAttilio Rao */ 582047dd67eSAttilio Rao if (x == (LK_UNLOCKED | v)) { 583047dd67eSAttilio Rao if (atomic_cmpset_acq_ptr(&lk->lk_lock, x, 584047dd67eSAttilio Rao tid | v)) { 585047dd67eSAttilio Rao sleepq_release(&lk->lock_object); 586047dd67eSAttilio Rao LOCK_LOG2(lk, 587047dd67eSAttilio Rao "%s: %p claimed by a new writer", 588047dd67eSAttilio Rao __func__, lk); 589047dd67eSAttilio Rao break; 590047dd67eSAttilio Rao } 591047dd67eSAttilio Rao sleepq_release(&lk->lock_object); 592047dd67eSAttilio Rao continue; 593047dd67eSAttilio Rao } 594047dd67eSAttilio Rao 595047dd67eSAttilio Rao /* 596047dd67eSAttilio Rao * Try to set the LK_EXCLUSIVE_WAITERS flag. If we 597047dd67eSAttilio Rao * fail, loop back and retry. 598047dd67eSAttilio Rao */ 599047dd67eSAttilio Rao if ((x & LK_EXCLUSIVE_WAITERS) == 0) { 600047dd67eSAttilio Rao if (!atomic_cmpset_ptr(&lk->lk_lock, x, 601047dd67eSAttilio Rao x | LK_EXCLUSIVE_WAITERS)) { 602047dd67eSAttilio Rao sleepq_release(&lk->lock_object); 603047dd67eSAttilio Rao continue; 604047dd67eSAttilio Rao } 605047dd67eSAttilio Rao LOCK_LOG2(lk, "%s: %p set excl waiters flag", 606047dd67eSAttilio Rao __func__, lk); 607047dd67eSAttilio Rao } 608047dd67eSAttilio Rao 609047dd67eSAttilio Rao /* 610047dd67eSAttilio Rao * As far as we have been unable to acquire the 611047dd67eSAttilio Rao * exclusive lock and the exclusive waiters flag 612047dd67eSAttilio Rao * is set, we will sleep. 613047dd67eSAttilio Rao */ 614047dd67eSAttilio Rao error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo, 615047dd67eSAttilio Rao SQ_EXCLUSIVE_QUEUE); 616047dd67eSAttilio Rao flags &= ~LK_INTERLOCK; 617047dd67eSAttilio Rao if (error) { 618047dd67eSAttilio Rao LOCK_LOG3(lk, 619047dd67eSAttilio Rao "%s: interrupted sleep for %p with %d", 620047dd67eSAttilio Rao __func__, lk, error); 621047dd67eSAttilio Rao break; 622047dd67eSAttilio Rao } 623047dd67eSAttilio Rao LOCK_LOG2(lk, "%s: %p resuming from the sleep queue", 624047dd67eSAttilio Rao __func__, lk); 625047dd67eSAttilio Rao } 626047dd67eSAttilio Rao if (error == 0) { 627047dd67eSAttilio Rao lock_profile_obtain_lock_success(&lk->lock_object, 628047dd67eSAttilio Rao contested, waittime, file, line); 629047dd67eSAttilio Rao LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0, 630047dd67eSAttilio Rao lk->lk_recurse, file, line); 631e5f94314SAttilio Rao WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE | 632e5f94314SAttilio Rao LK_TRYWIT(flags), file, line); 633047dd67eSAttilio Rao TD_LOCKS_INC(curthread); 634047dd67eSAttilio Rao STACK_SAVE(lk); 635047dd67eSAttilio Rao } 636047dd67eSAttilio Rao break; 637047dd67eSAttilio Rao case LK_DOWNGRADE: 638047dd67eSAttilio Rao _lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED, file, line); 639e5f94314SAttilio Rao LOCK_LOG_LOCK("XDOWNGRADE", &lk->lock_object, 0, 0, file, line); 640e5f94314SAttilio Rao WITNESS_DOWNGRADE(&lk->lock_object, 0, file, line); 641e5f94314SAttilio Rao TD_SLOCKS_INC(curthread); 642047dd67eSAttilio Rao 643047dd67eSAttilio Rao /* 644047dd67eSAttilio Rao * In order to preserve waiters flags, just spin. 645047dd67eSAttilio Rao */ 646047dd67eSAttilio Rao for (;;) { 647047dd67eSAttilio Rao x = lk->lk_lock & LK_ALL_WAITERS; 648047dd67eSAttilio Rao if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x, 649e5f94314SAttilio Rao LK_SHARERS_LOCK(1) | x)) 650047dd67eSAttilio Rao break; 651047dd67eSAttilio Rao cpu_spinwait(); 652047dd67eSAttilio Rao } 653047dd67eSAttilio Rao break; 654047dd67eSAttilio Rao case LK_RELEASE: 655047dd67eSAttilio Rao _lockmgr_assert(lk, KA_LOCKED, file, line); 656047dd67eSAttilio Rao x = lk->lk_lock; 657047dd67eSAttilio Rao 658047dd67eSAttilio Rao if ((x & LK_SHARE) == 0) { 659047dd67eSAttilio Rao 660047dd67eSAttilio Rao /* 661047dd67eSAttilio Rao * As first option, treact the lock as if it has not 662047dd67eSAttilio Rao * any waiter. 663047dd67eSAttilio Rao * Fix-up the tid var if the lock has been disowned. 664047dd67eSAttilio Rao */ 665047dd67eSAttilio Rao if (LK_HOLDER(x) == LK_KERNPROC) 666047dd67eSAttilio Rao tid = LK_KERNPROC; 667e5f94314SAttilio Rao else { 668e5f94314SAttilio Rao WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, 669e5f94314SAttilio Rao file, line); 670047dd67eSAttilio Rao TD_LOCKS_DEC(curthread); 671e5f94314SAttilio Rao } 672047dd67eSAttilio Rao LOCK_LOG_LOCK("XUNLOCK", &lk->lock_object, 0, 673047dd67eSAttilio Rao lk->lk_recurse, file, line); 674047dd67eSAttilio Rao 675047dd67eSAttilio Rao /* 676047dd67eSAttilio Rao * The lock is held in exclusive mode. 677047dd67eSAttilio Rao * If the lock is recursed also, then unrecurse it. 678047dd67eSAttilio Rao */ 679047dd67eSAttilio Rao if (lockmgr_xlocked(lk) && lockmgr_recursed(lk)) { 680047dd67eSAttilio Rao LOCK_LOG2(lk, "%s: %p unrecursing", __func__, 681047dd67eSAttilio Rao lk); 682047dd67eSAttilio Rao lk->lk_recurse--; 683047dd67eSAttilio Rao break; 684047dd67eSAttilio Rao } 685047dd67eSAttilio Rao lock_profile_release_lock(&lk->lock_object); 686047dd67eSAttilio Rao 687047dd67eSAttilio Rao if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid, 688047dd67eSAttilio Rao LK_UNLOCKED)) 689047dd67eSAttilio Rao break; 690047dd67eSAttilio Rao 691047dd67eSAttilio Rao sleepq_lock(&lk->lock_object); 692047dd67eSAttilio Rao x = lk->lk_lock & LK_ALL_WAITERS; 693047dd67eSAttilio Rao v = LK_UNLOCKED; 694047dd67eSAttilio Rao 695047dd67eSAttilio Rao /* 696047dd67eSAttilio Rao * If the lock has exclusive waiters, give them 697047dd67eSAttilio Rao * preference in order to avoid deadlock with 698047dd67eSAttilio Rao * shared runners up. 699047dd67eSAttilio Rao */ 700047dd67eSAttilio Rao if (x & LK_EXCLUSIVE_WAITERS) { 701047dd67eSAttilio Rao queue = SQ_EXCLUSIVE_QUEUE; 702047dd67eSAttilio Rao v |= (x & LK_SHARED_WAITERS); 703047dd67eSAttilio Rao } else { 704047dd67eSAttilio Rao MPASS(x == LK_SHARED_WAITERS); 705047dd67eSAttilio Rao queue = SQ_SHARED_QUEUE; 706047dd67eSAttilio Rao } 707047dd67eSAttilio Rao 708047dd67eSAttilio Rao LOCK_LOG3(lk, 709047dd67eSAttilio Rao "%s: %p waking up threads on the %s queue", 710047dd67eSAttilio Rao __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" : 711047dd67eSAttilio Rao "exclusive"); 712047dd67eSAttilio Rao atomic_store_rel_ptr(&lk->lk_lock, v); 713047dd67eSAttilio Rao sleepq_broadcast(&lk->lock_object, SLEEPQ_LK, 0, queue); 714047dd67eSAttilio Rao sleepq_release(&lk->lock_object); 715047dd67eSAttilio Rao break; 716047dd67eSAttilio Rao } else 717047dd67eSAttilio Rao wakeupshlk(lk, file, line); 718047dd67eSAttilio Rao break; 719047dd67eSAttilio Rao case LK_DRAIN: 720e5f94314SAttilio Rao if (LK_CAN_WITNESS(flags)) 721e5f94314SAttilio Rao WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER | 722e5f94314SAttilio Rao LOP_EXCLUSIVE, file, line); 723047dd67eSAttilio Rao 724047dd67eSAttilio Rao /* 72596f1567fSKonstantin Belousov * Trying to drain a lock we already own will result in a 726047dd67eSAttilio Rao * deadlock. 727047dd67eSAttilio Rao */ 728047dd67eSAttilio Rao if (lockmgr_xlocked(lk)) { 729047dd67eSAttilio Rao if (flags & LK_INTERLOCK) 730047dd67eSAttilio Rao class->lc_unlock(ilk); 731047dd67eSAttilio Rao panic("%s: draining %s with the lock held @ %s:%d\n", 732047dd67eSAttilio Rao __func__, iwmesg, file, line); 733047dd67eSAttilio Rao } 734047dd67eSAttilio Rao 735047dd67eSAttilio Rao while (!atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid)) { 736047dd67eSAttilio Rao lock_profile_obtain_lock_failed(&lk->lock_object, 737047dd67eSAttilio Rao &contested, &waittime); 738047dd67eSAttilio Rao 739047dd67eSAttilio Rao /* 740047dd67eSAttilio Rao * If the lock is expected to not sleep just give up 741047dd67eSAttilio Rao * and return. 742047dd67eSAttilio Rao */ 743047dd67eSAttilio Rao if (LK_TRYOP(flags)) { 744047dd67eSAttilio Rao LOCK_LOG2(lk, "%s: %p fails the try operation", 745047dd67eSAttilio Rao __func__, lk); 746047dd67eSAttilio Rao error = EBUSY; 747047dd67eSAttilio Rao break; 748047dd67eSAttilio Rao } 749047dd67eSAttilio Rao 750047dd67eSAttilio Rao /* 751047dd67eSAttilio Rao * Acquire the sleepqueue chain lock because we 752047dd67eSAttilio Rao * probabilly will need to manipulate waiters flags. 753047dd67eSAttilio Rao */ 754047dd67eSAttilio Rao sleepq_lock(&lk->lock_object); 755047dd67eSAttilio Rao x = lk->lk_lock; 756047dd67eSAttilio Rao v = x & LK_ALL_WAITERS; 757047dd67eSAttilio Rao 758047dd67eSAttilio Rao /* 759047dd67eSAttilio Rao * if the lock has been released while we spun on 760047dd67eSAttilio Rao * the sleepqueue chain lock just try again. 761047dd67eSAttilio Rao */ 762047dd67eSAttilio Rao if (x == LK_UNLOCKED) { 763047dd67eSAttilio Rao sleepq_release(&lk->lock_object); 764047dd67eSAttilio Rao continue; 765047dd67eSAttilio Rao } 766047dd67eSAttilio Rao 767047dd67eSAttilio Rao if (x == (LK_UNLOCKED | v)) { 768047dd67eSAttilio Rao v = x; 769047dd67eSAttilio Rao if (v & LK_EXCLUSIVE_WAITERS) { 770047dd67eSAttilio Rao queue = SQ_EXCLUSIVE_QUEUE; 771047dd67eSAttilio Rao v &= ~LK_EXCLUSIVE_WAITERS; 772047dd67eSAttilio Rao } else { 773047dd67eSAttilio Rao MPASS(v & LK_SHARED_WAITERS); 774047dd67eSAttilio Rao queue = SQ_SHARED_QUEUE; 775047dd67eSAttilio Rao v &= ~LK_SHARED_WAITERS; 776047dd67eSAttilio Rao } 777047dd67eSAttilio Rao if (!atomic_cmpset_ptr(&lk->lk_lock, x, v)) { 778047dd67eSAttilio Rao sleepq_release(&lk->lock_object); 779047dd67eSAttilio Rao continue; 780047dd67eSAttilio Rao } 781047dd67eSAttilio Rao LOCK_LOG3(lk, 782047dd67eSAttilio Rao "%s: %p waking up all threads on the %s queue", 783047dd67eSAttilio Rao __func__, lk, queue == SQ_SHARED_QUEUE ? 784047dd67eSAttilio Rao "shared" : "exclusive"); 785047dd67eSAttilio Rao sleepq_broadcast(&lk->lock_object, SLEEPQ_LK, 786047dd67eSAttilio Rao 0, queue); 787047dd67eSAttilio Rao 788047dd67eSAttilio Rao /* 789047dd67eSAttilio Rao * If shared waiters have been woken up we need 790047dd67eSAttilio Rao * to wait for one of them to acquire the lock 791047dd67eSAttilio Rao * before to set the exclusive waiters in 792047dd67eSAttilio Rao * order to avoid a deadlock. 793047dd67eSAttilio Rao */ 794047dd67eSAttilio Rao if (queue == SQ_SHARED_QUEUE) { 795047dd67eSAttilio Rao for (v = lk->lk_lock; 796047dd67eSAttilio Rao (v & LK_SHARE) && !LK_SHARERS(v); 797047dd67eSAttilio Rao v = lk->lk_lock) 798047dd67eSAttilio Rao cpu_spinwait(); 799047dd67eSAttilio Rao } 800047dd67eSAttilio Rao } 801047dd67eSAttilio Rao 802047dd67eSAttilio Rao /* 803047dd67eSAttilio Rao * Try to set the LK_EXCLUSIVE_WAITERS flag. If we 804047dd67eSAttilio Rao * fail, loop back and retry. 805047dd67eSAttilio Rao */ 806047dd67eSAttilio Rao if ((x & LK_EXCLUSIVE_WAITERS) == 0) { 807047dd67eSAttilio Rao if (!atomic_cmpset_ptr(&lk->lk_lock, x, 808047dd67eSAttilio Rao x | LK_EXCLUSIVE_WAITERS)) { 809047dd67eSAttilio Rao sleepq_release(&lk->lock_object); 810047dd67eSAttilio Rao continue; 811047dd67eSAttilio Rao } 812047dd67eSAttilio Rao LOCK_LOG2(lk, "%s: %p set drain waiters flag", 813047dd67eSAttilio Rao __func__, lk); 814047dd67eSAttilio Rao } 815047dd67eSAttilio Rao 816047dd67eSAttilio Rao /* 817047dd67eSAttilio Rao * As far as we have been unable to acquire the 818047dd67eSAttilio Rao * exclusive lock and the exclusive waiters flag 819047dd67eSAttilio Rao * is set, we will sleep. 820047dd67eSAttilio Rao */ 821047dd67eSAttilio Rao if (flags & LK_INTERLOCK) { 822047dd67eSAttilio Rao class->lc_unlock(ilk); 823047dd67eSAttilio Rao flags &= ~LK_INTERLOCK; 824047dd67eSAttilio Rao } 825e5f94314SAttilio Rao GIANT_SAVE(); 826047dd67eSAttilio Rao sleepq_add(&lk->lock_object, NULL, iwmesg, SLEEPQ_LK, 827047dd67eSAttilio Rao SQ_EXCLUSIVE_QUEUE); 828047dd67eSAttilio Rao sleepq_wait(&lk->lock_object, ipri & PRIMASK); 829e5f94314SAttilio Rao GIANT_RESTORE(); 830047dd67eSAttilio Rao LOCK_LOG2(lk, "%s: %p resuming from the sleep queue", 831047dd67eSAttilio Rao __func__, lk); 832047dd67eSAttilio Rao } 833047dd67eSAttilio Rao 834047dd67eSAttilio Rao if (error == 0) { 835047dd67eSAttilio Rao lock_profile_obtain_lock_success(&lk->lock_object, 836047dd67eSAttilio Rao contested, waittime, file, line); 837047dd67eSAttilio Rao LOCK_LOG_LOCK("DRAIN", &lk->lock_object, 0, 838047dd67eSAttilio Rao lk->lk_recurse, file, line); 839e5f94314SAttilio Rao WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE | 840e5f94314SAttilio Rao LK_TRYWIT(flags), file, line); 841047dd67eSAttilio Rao TD_LOCKS_INC(curthread); 842047dd67eSAttilio Rao STACK_SAVE(lk); 843047dd67eSAttilio Rao } 844047dd67eSAttilio Rao break; 845047dd67eSAttilio Rao default: 846047dd67eSAttilio Rao if (flags & LK_INTERLOCK) 847047dd67eSAttilio Rao class->lc_unlock(ilk); 848047dd67eSAttilio Rao panic("%s: unknown lockmgr request 0x%x\n", __func__, op); 849047dd67eSAttilio Rao } 850047dd67eSAttilio Rao 851047dd67eSAttilio Rao if (flags & LK_INTERLOCK) 852047dd67eSAttilio Rao class->lc_unlock(ilk); 853047dd67eSAttilio Rao 854047dd67eSAttilio Rao return (error); 855047dd67eSAttilio Rao } 856047dd67eSAttilio Rao 857d7a7e179SAttilio Rao void 858047dd67eSAttilio Rao _lockmgr_disown(struct lock *lk, const char *file, int line) 859047dd67eSAttilio Rao { 860047dd67eSAttilio Rao uintptr_t tid, x; 861047dd67eSAttilio Rao 862047dd67eSAttilio Rao tid = (uintptr_t)curthread; 863047dd67eSAttilio Rao _lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED, file, line); 864047dd67eSAttilio Rao 865047dd67eSAttilio Rao /* 86696f1567fSKonstantin Belousov * If the owner is already LK_KERNPROC just skip the whole operation. 867047dd67eSAttilio Rao */ 868047dd67eSAttilio Rao if (LK_HOLDER(lk->lk_lock) != tid) 869047dd67eSAttilio Rao return; 870e5f94314SAttilio Rao LOCK_LOG_LOCK("XDISOWN", &lk->lock_object, 0, 0, file, line); 871e5f94314SAttilio Rao WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, file, line); 872e5f94314SAttilio Rao TD_LOCKS_DEC(curthread); 873047dd67eSAttilio Rao 874047dd67eSAttilio Rao /* 875047dd67eSAttilio Rao * In order to preserve waiters flags, just spin. 876047dd67eSAttilio Rao */ 877047dd67eSAttilio Rao for (;;) { 878047dd67eSAttilio Rao x = lk->lk_lock & LK_ALL_WAITERS; 87922dd228dSAttilio Rao if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x, 880e5f94314SAttilio Rao LK_KERNPROC | x)) 881047dd67eSAttilio Rao return; 882047dd67eSAttilio Rao cpu_spinwait(); 883047dd67eSAttilio Rao } 884047dd67eSAttilio Rao } 885047dd67eSAttilio Rao 886047dd67eSAttilio Rao void 887047dd67eSAttilio Rao lockmgr_printinfo(struct lock *lk) 888d7a7e179SAttilio Rao { 889d7a7e179SAttilio Rao struct thread *td; 890047dd67eSAttilio Rao uintptr_t x; 891d7a7e179SAttilio Rao 892047dd67eSAttilio Rao if (lk->lk_lock == LK_UNLOCKED) 893047dd67eSAttilio Rao printf(" lock type %s: UNLOCKED\n", lk->lock_object.lo_name); 894047dd67eSAttilio Rao else if (lk->lk_lock & LK_SHARE) 895047dd67eSAttilio Rao printf(" lock type %s: SHARED (count %ju)\n", 896047dd67eSAttilio Rao lk->lock_object.lo_name, 897047dd67eSAttilio Rao (uintmax_t)LK_SHARERS(lk->lk_lock)); 898047dd67eSAttilio Rao else { 899047dd67eSAttilio Rao td = lockmgr_xholder(lk); 900047dd67eSAttilio Rao printf(" lock type %s: EXCL by thread %p (pid %d)\n", 901047dd67eSAttilio Rao lk->lock_object.lo_name, td, td->td_proc->p_pid); 902d7a7e179SAttilio Rao } 903d7a7e179SAttilio Rao 904047dd67eSAttilio Rao x = lk->lk_lock; 905047dd67eSAttilio Rao if (x & LK_EXCLUSIVE_WAITERS) 906047dd67eSAttilio Rao printf(" with exclusive waiters pending\n"); 907047dd67eSAttilio Rao if (x & LK_SHARED_WAITERS) 908047dd67eSAttilio Rao printf(" with shared waiters pending\n"); 909047dd67eSAttilio Rao 910047dd67eSAttilio Rao STACK_PRINT(lk); 911047dd67eSAttilio Rao } 912047dd67eSAttilio Rao 91399448ed1SJohn Dyson int 914047dd67eSAttilio Rao lockstatus(struct lock *lk) 91599448ed1SJohn Dyson { 916047dd67eSAttilio Rao uintptr_t v, x; 917047dd67eSAttilio Rao int ret; 91899448ed1SJohn Dyson 919047dd67eSAttilio Rao ret = LK_SHARED; 920047dd67eSAttilio Rao x = lk->lk_lock; 921047dd67eSAttilio Rao v = LK_HOLDER(x); 9220e9eb108SAttilio Rao 923047dd67eSAttilio Rao if ((x & LK_SHARE) == 0) { 924047dd67eSAttilio Rao if (v == (uintptr_t)curthread || v == LK_KERNPROC) 925047dd67eSAttilio Rao ret = LK_EXCLUSIVE; 9266bdfe06aSEivind Eklund else 927047dd67eSAttilio Rao ret = LK_EXCLOTHER; 928047dd67eSAttilio Rao } else if (x == LK_UNLOCKED) 929047dd67eSAttilio Rao ret = 0; 93099448ed1SJohn Dyson 931047dd67eSAttilio Rao return (ret); 93253bf4bb2SPeter Wemm } 933be6847d7SJohn Baldwin 93484887fa3SAttilio Rao #ifdef INVARIANT_SUPPORT 93584887fa3SAttilio Rao #ifndef INVARIANTS 93684887fa3SAttilio Rao #undef _lockmgr_assert 93784887fa3SAttilio Rao #endif 93884887fa3SAttilio Rao 93984887fa3SAttilio Rao void 940047dd67eSAttilio Rao _lockmgr_assert(struct lock *lk, int what, const char *file, int line) 94184887fa3SAttilio Rao { 94284887fa3SAttilio Rao int slocked = 0; 94384887fa3SAttilio Rao 94484887fa3SAttilio Rao if (panicstr != NULL) 94584887fa3SAttilio Rao return; 94684887fa3SAttilio Rao switch (what) { 94784887fa3SAttilio Rao case KA_SLOCKED: 94884887fa3SAttilio Rao case KA_SLOCKED | KA_NOTRECURSED: 94984887fa3SAttilio Rao case KA_SLOCKED | KA_RECURSED: 95084887fa3SAttilio Rao slocked = 1; 95184887fa3SAttilio Rao case KA_LOCKED: 95284887fa3SAttilio Rao case KA_LOCKED | KA_NOTRECURSED: 95384887fa3SAttilio Rao case KA_LOCKED | KA_RECURSED: 954e5f94314SAttilio Rao #ifdef WITNESS 955e5f94314SAttilio Rao 956e5f94314SAttilio Rao /* 957e5f94314SAttilio Rao * We cannot trust WITNESS if the lock is held in exclusive 958e5f94314SAttilio Rao * mode and a call to lockmgr_disown() happened. 959e5f94314SAttilio Rao * Workaround this skipping the check if the lock is held in 960e5f94314SAttilio Rao * exclusive mode even for the KA_LOCKED case. 961e5f94314SAttilio Rao */ 962e5f94314SAttilio Rao if (slocked || (lk->lk_lock & LK_SHARE)) { 963e5f94314SAttilio Rao witness_assert(&lk->lock_object, what, file, line); 964e5f94314SAttilio Rao break; 965e5f94314SAttilio Rao } 966e5f94314SAttilio Rao #endif 967047dd67eSAttilio Rao if (lk->lk_lock == LK_UNLOCKED || 968047dd67eSAttilio Rao ((lk->lk_lock & LK_SHARE) == 0 && (slocked || 969047dd67eSAttilio Rao (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk))))) 97084887fa3SAttilio Rao panic("Lock %s not %slocked @ %s:%d\n", 971047dd67eSAttilio Rao lk->lock_object.lo_name, slocked ? "share" : "", 97284887fa3SAttilio Rao file, line); 973047dd67eSAttilio Rao 974047dd67eSAttilio Rao if ((lk->lk_lock & LK_SHARE) == 0) { 975047dd67eSAttilio Rao if (lockmgr_recursed(lk)) { 97684887fa3SAttilio Rao if (what & KA_NOTRECURSED) 97784887fa3SAttilio Rao panic("Lock %s recursed @ %s:%d\n", 978047dd67eSAttilio Rao lk->lock_object.lo_name, file, 979047dd67eSAttilio Rao line); 98084887fa3SAttilio Rao } else if (what & KA_RECURSED) 98184887fa3SAttilio Rao panic("Lock %s not recursed @ %s:%d\n", 982047dd67eSAttilio Rao lk->lock_object.lo_name, file, line); 98384887fa3SAttilio Rao } 98484887fa3SAttilio Rao break; 98584887fa3SAttilio Rao case KA_XLOCKED: 98684887fa3SAttilio Rao case KA_XLOCKED | KA_NOTRECURSED: 98784887fa3SAttilio Rao case KA_XLOCKED | KA_RECURSED: 988047dd67eSAttilio Rao if (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk)) 98984887fa3SAttilio Rao panic("Lock %s not exclusively locked @ %s:%d\n", 990047dd67eSAttilio Rao lk->lock_object.lo_name, file, line); 991047dd67eSAttilio Rao if (lockmgr_recursed(lk)) { 99284887fa3SAttilio Rao if (what & KA_NOTRECURSED) 99384887fa3SAttilio Rao panic("Lock %s recursed @ %s:%d\n", 994047dd67eSAttilio Rao lk->lock_object.lo_name, file, line); 99584887fa3SAttilio Rao } else if (what & KA_RECURSED) 99684887fa3SAttilio Rao panic("Lock %s not recursed @ %s:%d\n", 997047dd67eSAttilio Rao lk->lock_object.lo_name, file, line); 99884887fa3SAttilio Rao break; 99984887fa3SAttilio Rao case KA_UNLOCKED: 1000047dd67eSAttilio Rao if (lockmgr_xlocked(lk) || lockmgr_disowned(lk)) 100184887fa3SAttilio Rao panic("Lock %s exclusively locked @ %s:%d\n", 1002047dd67eSAttilio Rao lk->lock_object.lo_name, file, line); 100384887fa3SAttilio Rao break; 100484887fa3SAttilio Rao default: 1005047dd67eSAttilio Rao panic("Unknown lockmgr assertion: %d @ %s:%d\n", what, file, 1006047dd67eSAttilio Rao line); 100784887fa3SAttilio Rao } 100884887fa3SAttilio Rao } 1009047dd67eSAttilio Rao #endif 101084887fa3SAttilio Rao 1011be6847d7SJohn Baldwin #ifdef DDB 1012462a7addSJohn Baldwin int 1013462a7addSJohn Baldwin lockmgr_chain(struct thread *td, struct thread **ownerp) 1014462a7addSJohn Baldwin { 1015047dd67eSAttilio Rao struct lock *lk; 1016462a7addSJohn Baldwin 1017047dd67eSAttilio Rao lk = td->td_wchan; 1018462a7addSJohn Baldwin 1019047dd67eSAttilio Rao if (LOCK_CLASS(&lk->lock_object) != &lock_class_lockmgr) 1020462a7addSJohn Baldwin return (0); 1021047dd67eSAttilio Rao db_printf("blocked on lockmgr %s", lk->lock_object.lo_name); 1022047dd67eSAttilio Rao if (lk->lk_lock & LK_SHARE) 1023047dd67eSAttilio Rao db_printf("SHARED (count %ju)\n", 1024047dd67eSAttilio Rao (uintmax_t)LK_SHARERS(lk->lk_lock)); 1025047dd67eSAttilio Rao else 1026047dd67eSAttilio Rao db_printf("EXCL\n"); 1027047dd67eSAttilio Rao *ownerp = lockmgr_xholder(lk); 1028462a7addSJohn Baldwin 1029462a7addSJohn Baldwin return (1); 1030462a7addSJohn Baldwin } 1031462a7addSJohn Baldwin 1032047dd67eSAttilio Rao static void 103361bd5e21SKip Macy db_show_lockmgr(struct lock_object *lock) 1034be6847d7SJohn Baldwin { 1035be6847d7SJohn Baldwin struct thread *td; 1036047dd67eSAttilio Rao struct lock *lk; 1037be6847d7SJohn Baldwin 1038047dd67eSAttilio Rao lk = (struct lock *)lock; 1039be6847d7SJohn Baldwin 1040be6847d7SJohn Baldwin db_printf(" state: "); 1041047dd67eSAttilio Rao if (lk->lk_lock == LK_UNLOCKED) 1042be6847d7SJohn Baldwin db_printf("UNLOCKED\n"); 1043047dd67eSAttilio Rao else if (lk->lk_lock & LK_SHARE) 1044047dd67eSAttilio Rao db_printf("SLOCK: %ju\n", (uintmax_t)LK_SHARERS(lk->lk_lock)); 1045047dd67eSAttilio Rao else { 1046047dd67eSAttilio Rao td = lockmgr_xholder(lk); 1047047dd67eSAttilio Rao if (td == (struct thread *)LK_KERNPROC) 1048047dd67eSAttilio Rao db_printf("XLOCK: LK_KERNPROC\n"); 1049047dd67eSAttilio Rao else 1050047dd67eSAttilio Rao db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td, 1051047dd67eSAttilio Rao td->td_tid, td->td_proc->p_pid, 1052047dd67eSAttilio Rao td->td_proc->p_comm); 1053047dd67eSAttilio Rao if (lockmgr_recursed(lk)) 1054047dd67eSAttilio Rao db_printf(" recursed: %d\n", lk->lk_recurse); 1055047dd67eSAttilio Rao } 1056047dd67eSAttilio Rao db_printf(" waiters: "); 1057047dd67eSAttilio Rao switch (lk->lk_lock & LK_ALL_WAITERS) { 1058047dd67eSAttilio Rao case LK_SHARED_WAITERS: 1059047dd67eSAttilio Rao db_printf("shared\n"); 1060047dd67eSAttilio Rao case LK_EXCLUSIVE_WAITERS: 1061047dd67eSAttilio Rao db_printf("exclusive\n"); 1062047dd67eSAttilio Rao break; 1063047dd67eSAttilio Rao case LK_ALL_WAITERS: 1064047dd67eSAttilio Rao db_printf("shared and exclusive\n"); 1065047dd67eSAttilio Rao break; 1066047dd67eSAttilio Rao default: 1067047dd67eSAttilio Rao db_printf("none\n"); 1068047dd67eSAttilio Rao } 1069be6847d7SJohn Baldwin } 1070be6847d7SJohn Baldwin #endif 1071