19454b2d8SWarner Losh /*- 28a36da99SPedro F. Giffuni * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 38a36da99SPedro F. Giffuni * 4047dd67eSAttilio Rao * Copyright (c) 2008 Attilio Rao <attilio@FreeBSD.org> 5047dd67eSAttilio Rao * All rights reserved. 653bf4bb2SPeter Wemm * 753bf4bb2SPeter Wemm * Redistribution and use in source and binary forms, with or without 853bf4bb2SPeter Wemm * modification, are permitted provided that the following conditions 953bf4bb2SPeter Wemm * are met: 1053bf4bb2SPeter Wemm * 1. Redistributions of source code must retain the above copyright 11047dd67eSAttilio Rao * notice(s), this list of conditions and the following disclaimer as 12047dd67eSAttilio Rao * the first lines of this file unmodified other than the possible 13047dd67eSAttilio Rao * addition of one or more copyright notices. 1453bf4bb2SPeter Wemm * 2. Redistributions in binary form must reproduce the above copyright 15047dd67eSAttilio Rao * notice(s), this list of conditions and the following disclaimer in the 1653bf4bb2SPeter Wemm * documentation and/or other materials provided with the distribution. 1753bf4bb2SPeter Wemm * 18047dd67eSAttilio Rao * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY 19047dd67eSAttilio Rao * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 20047dd67eSAttilio Rao * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 21047dd67eSAttilio Rao * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY 22047dd67eSAttilio Rao * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 23047dd67eSAttilio Rao * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 24047dd67eSAttilio Rao * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 25047dd67eSAttilio Rao * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 2653bf4bb2SPeter Wemm * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27047dd67eSAttilio Rao * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 28047dd67eSAttilio Rao * DAMAGE. 2953bf4bb2SPeter Wemm */ 3053bf4bb2SPeter Wemm 31047dd67eSAttilio Rao #include "opt_ddb.h" 32f5f9340bSFabien Thomas #include "opt_hwpmc_hooks.h" 33047dd67eSAttilio Rao 34677b542eSDavid E. O'Brien #include <sys/cdefs.h> 35677b542eSDavid E. O'Brien __FBSDID("$FreeBSD$"); 36677b542eSDavid E. O'Brien 3753bf4bb2SPeter Wemm #include <sys/param.h> 38cd2fe4e6SAttilio Rao #include <sys/kdb.h> 3961d80e90SJohn Baldwin #include <sys/ktr.h> 4053bf4bb2SPeter Wemm #include <sys/lock.h> 41047dd67eSAttilio Rao #include <sys/lock_profile.h> 428302d183SBruce Evans #include <sys/lockmgr.h> 435b699f16SMark Johnston #include <sys/lockstat.h> 44d8881ca3SJohn Baldwin #include <sys/mutex.h> 458302d183SBruce Evans #include <sys/proc.h> 46047dd67eSAttilio Rao #include <sys/sleepqueue.h> 47e8ddb61dSJeff Roberson #ifdef DEBUG_LOCKS 48e8ddb61dSJeff Roberson #include <sys/stack.h> 49e8ddb61dSJeff Roberson #endif 50651175c9SAttilio Rao #include <sys/sysctl.h> 51047dd67eSAttilio Rao #include <sys/systm.h> 5253bf4bb2SPeter Wemm 53047dd67eSAttilio Rao #include <machine/cpu.h> 546efc8a16SAttilio Rao 55be6847d7SJohn Baldwin #ifdef DDB 56be6847d7SJohn Baldwin #include <ddb/ddb.h> 57047dd67eSAttilio Rao #endif 58047dd67eSAttilio Rao 59f5f9340bSFabien Thomas #ifdef HWPMC_HOOKS 60f5f9340bSFabien Thomas #include <sys/pmckern.h> 61f5f9340bSFabien Thomas PMC_SOFT_DECLARE( , , lock, failed); 62f5f9340bSFabien Thomas #endif 63f5f9340bSFabien Thomas 64651175c9SAttilio Rao CTASSERT(LK_UNLOCKED == (LK_UNLOCKED & 65651175c9SAttilio Rao ~(LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS))); 66047dd67eSAttilio Rao 67047dd67eSAttilio Rao #define SQ_EXCLUSIVE_QUEUE 0 68047dd67eSAttilio Rao #define SQ_SHARED_QUEUE 1 69047dd67eSAttilio Rao 70047dd67eSAttilio Rao #ifndef INVARIANTS 71047dd67eSAttilio Rao #define _lockmgr_assert(lk, what, file, line) 72047dd67eSAttilio Rao #endif 73ce1c953eSMark Johnston 74047dd67eSAttilio Rao #define TD_SLOCKS_INC(td) ((td)->td_lk_slocks++) 75047dd67eSAttilio Rao #define TD_SLOCKS_DEC(td) ((td)->td_lk_slocks--) 76047dd67eSAttilio Rao 77047dd67eSAttilio Rao #ifndef DEBUG_LOCKS 78047dd67eSAttilio Rao #define STACK_PRINT(lk) 79047dd67eSAttilio Rao #define STACK_SAVE(lk) 80047dd67eSAttilio Rao #define STACK_ZERO(lk) 81047dd67eSAttilio Rao #else 82047dd67eSAttilio Rao #define STACK_PRINT(lk) stack_print_ddb(&(lk)->lk_stack) 83047dd67eSAttilio Rao #define STACK_SAVE(lk) stack_save(&(lk)->lk_stack) 84047dd67eSAttilio Rao #define STACK_ZERO(lk) stack_zero(&(lk)->lk_stack) 85047dd67eSAttilio Rao #endif 86047dd67eSAttilio Rao 87047dd67eSAttilio Rao #define LOCK_LOG2(lk, string, arg1, arg2) \ 88047dd67eSAttilio Rao if (LOCK_LOG_TEST(&(lk)->lock_object, 0)) \ 89047dd67eSAttilio Rao CTR2(KTR_LOCK, (string), (arg1), (arg2)) 90047dd67eSAttilio Rao #define LOCK_LOG3(lk, string, arg1, arg2, arg3) \ 91047dd67eSAttilio Rao if (LOCK_LOG_TEST(&(lk)->lock_object, 0)) \ 92047dd67eSAttilio Rao CTR3(KTR_LOCK, (string), (arg1), (arg2), (arg3)) 93047dd67eSAttilio Rao 94e5f94314SAttilio Rao #define GIANT_DECLARE \ 95e5f94314SAttilio Rao int _i = 0; \ 96e5f94314SAttilio Rao WITNESS_SAVE_DECL(Giant) 97e5f94314SAttilio Rao #define GIANT_RESTORE() do { \ 986e8c1ccbSMateusz Guzik if (__predict_false(_i > 0)) { \ 99e5f94314SAttilio Rao while (_i--) \ 100e5f94314SAttilio Rao mtx_lock(&Giant); \ 101e5f94314SAttilio Rao WITNESS_RESTORE(&Giant.lock_object, Giant); \ 102e5f94314SAttilio Rao } \ 103e5f94314SAttilio Rao } while (0) 104e5f94314SAttilio Rao #define GIANT_SAVE() do { \ 1056e8c1ccbSMateusz Guzik if (__predict_false(mtx_owned(&Giant))) { \ 106e5f94314SAttilio Rao WITNESS_SAVE(&Giant.lock_object, Giant); \ 107e5f94314SAttilio Rao while (mtx_owned(&Giant)) { \ 108e5f94314SAttilio Rao _i++; \ 109e5f94314SAttilio Rao mtx_unlock(&Giant); \ 110e5f94314SAttilio Rao } \ 111e5f94314SAttilio Rao } \ 112e5f94314SAttilio Rao } while (0) 113e5f94314SAttilio Rao 11495ab076dSMateusz Guzik static bool __always_inline 11595ab076dSMateusz Guzik LK_CAN_SHARE(uintptr_t x, int flags, bool fp) 11695ab076dSMateusz Guzik { 11795ab076dSMateusz Guzik 11895ab076dSMateusz Guzik if ((x & (LK_SHARE | LK_EXCLUSIVE_WAITERS | LK_EXCLUSIVE_SPINNERS)) == 11995ab076dSMateusz Guzik LK_SHARE) 12095ab076dSMateusz Guzik return (true); 12195ab076dSMateusz Guzik if (fp || (!(x & LK_SHARE))) 12295ab076dSMateusz Guzik return (false); 12395ab076dSMateusz Guzik if ((curthread->td_lk_slocks != 0 && !(flags & LK_NODDLKTREAT)) || 12495ab076dSMateusz Guzik (curthread->td_pflags & TDP_DEADLKTREAT)) 12595ab076dSMateusz Guzik return (true); 12695ab076dSMateusz Guzik return (false); 12795ab076dSMateusz Guzik } 12895ab076dSMateusz Guzik 129e5f94314SAttilio Rao #define LK_TRYOP(x) \ 130e5f94314SAttilio Rao ((x) & LK_NOWAIT) 131e5f94314SAttilio Rao 132e5f94314SAttilio Rao #define LK_CAN_WITNESS(x) \ 133e5f94314SAttilio Rao (((x) & LK_NOWITNESS) == 0 && !LK_TRYOP(x)) 134e5f94314SAttilio Rao #define LK_TRYWIT(x) \ 135e5f94314SAttilio Rao (LK_TRYOP(x) ? LOP_TRYLOCK : 0) 136047dd67eSAttilio Rao 137047dd67eSAttilio Rao #define lockmgr_disowned(lk) \ 138047dd67eSAttilio Rao (((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == LK_KERNPROC) 139047dd67eSAttilio Rao 14010391db5SMateusz Guzik #define lockmgr_xlocked_v(v) \ 14110391db5SMateusz Guzik (((v) & ~(LK_FLAGMASK & ~LK_SHARE)) == (uintptr_t)curthread) 14210391db5SMateusz Guzik 143bdb6d824SMateusz Guzik #define lockmgr_xlocked(lk) lockmgr_xlocked_v(lockmgr_read_value(lk)) 144047dd67eSAttilio Rao 145d576deedSPawel Jakub Dawidek static void assert_lockmgr(const struct lock_object *lock, int how); 146047dd67eSAttilio Rao #ifdef DDB 147d576deedSPawel Jakub Dawidek static void db_show_lockmgr(const struct lock_object *lock); 148be6847d7SJohn Baldwin #endif 1497faf4d90SDavide Italiano static void lock_lockmgr(struct lock_object *lock, uintptr_t how); 150a5aedd68SStacey Son #ifdef KDTRACE_HOOKS 151d576deedSPawel Jakub Dawidek static int owner_lockmgr(const struct lock_object *lock, 152d576deedSPawel Jakub Dawidek struct thread **owner); 153a5aedd68SStacey Son #endif 1547faf4d90SDavide Italiano static uintptr_t unlock_lockmgr(struct lock_object *lock); 15561bd5e21SKip Macy 15661bd5e21SKip Macy struct lock_class lock_class_lockmgr = { 1573ff6d229SJohn Baldwin .lc_name = "lockmgr", 158047dd67eSAttilio Rao .lc_flags = LC_RECURSABLE | LC_SLEEPABLE | LC_SLEEPLOCK | LC_UPGRADABLE, 159f9721b43SAttilio Rao .lc_assert = assert_lockmgr, 16061bd5e21SKip Macy #ifdef DDB 1616e21afd4SJohn Baldwin .lc_ddb_show = db_show_lockmgr, 16261bd5e21SKip Macy #endif 1636e21afd4SJohn Baldwin .lc_lock = lock_lockmgr, 164a5aedd68SStacey Son .lc_unlock = unlock_lockmgr, 165a5aedd68SStacey Son #ifdef KDTRACE_HOOKS 166a5aedd68SStacey Son .lc_owner = owner_lockmgr, 167a5aedd68SStacey Son #endif 16861bd5e21SKip Macy }; 16961bd5e21SKip Macy 17031ad4050SMateusz Guzik static __read_mostly bool lk_adaptive = true; 17131ad4050SMateusz Guzik static SYSCTL_NODE(_debug, OID_AUTO, lockmgr, CTLFLAG_RD, NULL, "lockmgr debugging"); 17231ad4050SMateusz Guzik SYSCTL_BOOL(_debug_lockmgr, OID_AUTO, adaptive_spinning, CTLFLAG_RW, &lk_adaptive, 17331ad4050SMateusz Guzik 0, ""); 17431ad4050SMateusz Guzik #define lockmgr_delay locks_delay 17531ad4050SMateusz Guzik 1761c6987ebSMateusz Guzik struct lockmgr_wait { 1771c6987ebSMateusz Guzik const char *iwmesg; 1781c6987ebSMateusz Guzik int ipri; 1791c6987ebSMateusz Guzik int itimo; 1801c6987ebSMateusz Guzik }; 1811c6987ebSMateusz Guzik 182c4a48867SMateusz Guzik static bool __always_inline lockmgr_slock_try(struct lock *lk, uintptr_t *xp, 18395ab076dSMateusz Guzik int flags, bool fp); 1841c6987ebSMateusz Guzik static bool __always_inline lockmgr_sunlock_try(struct lock *lk, uintptr_t *xp); 1851c6987ebSMateusz Guzik 1861c6987ebSMateusz Guzik static void 1871c6987ebSMateusz Guzik lockmgr_exit(u_int flags, struct lock_object *ilk, int wakeup_swapper) 1881c6987ebSMateusz Guzik { 1891c6987ebSMateusz Guzik struct lock_class *class; 1901c6987ebSMateusz Guzik 1911c6987ebSMateusz Guzik if (flags & LK_INTERLOCK) { 1921c6987ebSMateusz Guzik class = LOCK_CLASS(ilk); 1931c6987ebSMateusz Guzik class->lc_unlock(ilk); 1941c6987ebSMateusz Guzik } 1951c6987ebSMateusz Guzik 1961c6987ebSMateusz Guzik if (__predict_false(wakeup_swapper)) 1971c6987ebSMateusz Guzik kick_proc0(); 1981c6987ebSMateusz Guzik } 199c4a48867SMateusz Guzik 200c4a48867SMateusz Guzik static void 201c4a48867SMateusz Guzik lockmgr_note_shared_acquire(struct lock *lk, int contested, 202c4a48867SMateusz Guzik uint64_t waittime, const char *file, int line, int flags) 203c4a48867SMateusz Guzik { 204c4a48867SMateusz Guzik 2055b699f16SMark Johnston LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(lockmgr__acquire, lk, contested, 2065b699f16SMark Johnston waittime, file, line, LOCKSTAT_READER); 207c4a48867SMateusz Guzik LOCK_LOG_LOCK("SLOCK", &lk->lock_object, 0, 0, file, line); 208c4a48867SMateusz Guzik WITNESS_LOCK(&lk->lock_object, LK_TRYWIT(flags), file, line); 209c4a48867SMateusz Guzik TD_LOCKS_INC(curthread); 210c4a48867SMateusz Guzik TD_SLOCKS_INC(curthread); 211c4a48867SMateusz Guzik STACK_SAVE(lk); 212c4a48867SMateusz Guzik } 213c4a48867SMateusz Guzik 214c4a48867SMateusz Guzik static void 215c4a48867SMateusz Guzik lockmgr_note_shared_release(struct lock *lk, const char *file, int line) 216c4a48867SMateusz Guzik { 217c4a48867SMateusz Guzik 218c4a48867SMateusz Guzik WITNESS_UNLOCK(&lk->lock_object, 0, file, line); 219c4a48867SMateusz Guzik LOCK_LOG_LOCK("SUNLOCK", &lk->lock_object, 0, 0, file, line); 220c4a48867SMateusz Guzik TD_LOCKS_DEC(curthread); 221c4a48867SMateusz Guzik TD_SLOCKS_DEC(curthread); 222c4a48867SMateusz Guzik } 223c4a48867SMateusz Guzik 224c4a48867SMateusz Guzik static void 225c4a48867SMateusz Guzik lockmgr_note_exclusive_acquire(struct lock *lk, int contested, 226c4a48867SMateusz Guzik uint64_t waittime, const char *file, int line, int flags) 227c4a48867SMateusz Guzik { 228c4a48867SMateusz Guzik 2295b699f16SMark Johnston LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(lockmgr__acquire, lk, contested, 2305b699f16SMark Johnston waittime, file, line, LOCKSTAT_WRITER); 231c4a48867SMateusz Guzik LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0, lk->lk_recurse, file, line); 232c4a48867SMateusz Guzik WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE | LK_TRYWIT(flags), file, 233c4a48867SMateusz Guzik line); 234c4a48867SMateusz Guzik TD_LOCKS_INC(curthread); 235c4a48867SMateusz Guzik STACK_SAVE(lk); 236c4a48867SMateusz Guzik } 237c4a48867SMateusz Guzik 238c4a48867SMateusz Guzik static void 239c4a48867SMateusz Guzik lockmgr_note_exclusive_release(struct lock *lk, const char *file, int line) 240c4a48867SMateusz Guzik { 241c4a48867SMateusz Guzik 242bdb6d824SMateusz Guzik if (LK_HOLDER(lockmgr_read_value(lk)) != LK_KERNPROC) { 243c4a48867SMateusz Guzik WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, file, line); 244c4a48867SMateusz Guzik TD_LOCKS_DEC(curthread); 245c4a48867SMateusz Guzik } 246c00115f1SMateusz Guzik LOCK_LOG_LOCK("XUNLOCK", &lk->lock_object, 0, lk->lk_recurse, file, 247c00115f1SMateusz Guzik line); 248c00115f1SMateusz Guzik } 249c4a48867SMateusz Guzik 250047dd67eSAttilio Rao static __inline struct thread * 251d576deedSPawel Jakub Dawidek lockmgr_xholder(const struct lock *lk) 252047dd67eSAttilio Rao { 253047dd67eSAttilio Rao uintptr_t x; 254047dd67eSAttilio Rao 255bdb6d824SMateusz Guzik x = lockmgr_read_value(lk); 256047dd67eSAttilio Rao return ((x & LK_SHARE) ? NULL : (struct thread *)LK_HOLDER(x)); 257047dd67eSAttilio Rao } 25884887fa3SAttilio Rao 25953bf4bb2SPeter Wemm /* 260047dd67eSAttilio Rao * It assumes sleepq_lock held and returns with this one unheld. 261047dd67eSAttilio Rao * It also assumes the generic interlock is sane and previously checked. 262047dd67eSAttilio Rao * If LK_INTERLOCK is specified the interlock is not reacquired after the 263047dd67eSAttilio Rao * sleep. 26453bf4bb2SPeter Wemm */ 265047dd67eSAttilio Rao static __inline int 266047dd67eSAttilio Rao sleeplk(struct lock *lk, u_int flags, struct lock_object *ilk, 267047dd67eSAttilio Rao const char *wmesg, int pri, int timo, int queue) 268047dd67eSAttilio Rao { 269e5f94314SAttilio Rao GIANT_DECLARE; 270047dd67eSAttilio Rao struct lock_class *class; 271047dd67eSAttilio Rao int catch, error; 27253bf4bb2SPeter Wemm 273047dd67eSAttilio Rao class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL; 2745047a8fdSAttilio Rao catch = pri & PCATCH; 275047dd67eSAttilio Rao pri &= PRIMASK; 276047dd67eSAttilio Rao error = 0; 277047dd67eSAttilio Rao 278047dd67eSAttilio Rao LOCK_LOG3(lk, "%s: %p blocking on the %s sleepqueue", __func__, lk, 279047dd67eSAttilio Rao (queue == SQ_EXCLUSIVE_QUEUE) ? "exclusive" : "shared"); 280047dd67eSAttilio Rao 281047dd67eSAttilio Rao if (flags & LK_INTERLOCK) 282047dd67eSAttilio Rao class->lc_unlock(ilk); 2832028867dSAttilio Rao if (queue == SQ_EXCLUSIVE_QUEUE && (flags & LK_SLEEPFAIL) != 0) 2842028867dSAttilio Rao lk->lk_exslpfail++; 285e5f94314SAttilio Rao GIANT_SAVE(); 286047dd67eSAttilio Rao sleepq_add(&lk->lock_object, NULL, wmesg, SLEEPQ_LK | (catch ? 287047dd67eSAttilio Rao SLEEPQ_INTERRUPTIBLE : 0), queue); 288a115fb62SHans Petter Selasky if ((flags & LK_TIMELOCK) && timo) 289047dd67eSAttilio Rao sleepq_set_timeout(&lk->lock_object, timo); 290a115fb62SHans Petter Selasky 291047dd67eSAttilio Rao /* 292047dd67eSAttilio Rao * Decisional switch for real sleeping. 293047dd67eSAttilio Rao */ 294047dd67eSAttilio Rao if ((flags & LK_TIMELOCK) && timo && catch) 295047dd67eSAttilio Rao error = sleepq_timedwait_sig(&lk->lock_object, pri); 296047dd67eSAttilio Rao else if ((flags & LK_TIMELOCK) && timo) 297047dd67eSAttilio Rao error = sleepq_timedwait(&lk->lock_object, pri); 298047dd67eSAttilio Rao else if (catch) 299047dd67eSAttilio Rao error = sleepq_wait_sig(&lk->lock_object, pri); 300047dd67eSAttilio Rao else 301047dd67eSAttilio Rao sleepq_wait(&lk->lock_object, pri); 302e5f94314SAttilio Rao GIANT_RESTORE(); 303047dd67eSAttilio Rao if ((flags & LK_SLEEPFAIL) && error == 0) 304047dd67eSAttilio Rao error = ENOLCK; 305047dd67eSAttilio Rao 306047dd67eSAttilio Rao return (error); 307047dd67eSAttilio Rao } 308047dd67eSAttilio Rao 309da7bbd2cSJohn Baldwin static __inline int 310047dd67eSAttilio Rao wakeupshlk(struct lock *lk, const char *file, int line) 311047dd67eSAttilio Rao { 3120ad122a9SMateusz Guzik uintptr_t v, x, orig_x; 3132028867dSAttilio Rao u_int realexslp; 314da7bbd2cSJohn Baldwin int queue, wakeup_swapper; 315047dd67eSAttilio Rao 316da7bbd2cSJohn Baldwin wakeup_swapper = 0; 317047dd67eSAttilio Rao for (;;) { 318bdb6d824SMateusz Guzik x = lockmgr_read_value(lk); 3191c6987ebSMateusz Guzik if (lockmgr_sunlock_try(lk, &x)) 320047dd67eSAttilio Rao break; 321047dd67eSAttilio Rao 322047dd67eSAttilio Rao /* 323047dd67eSAttilio Rao * We should have a sharer with waiters, so enter the hard 324047dd67eSAttilio Rao * path in order to handle wakeups correctly. 325047dd67eSAttilio Rao */ 326047dd67eSAttilio Rao sleepq_lock(&lk->lock_object); 327bdb6d824SMateusz Guzik orig_x = lockmgr_read_value(lk); 3280ad122a9SMateusz Guzik retry_sleepq: 3290ad122a9SMateusz Guzik x = orig_x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS); 330047dd67eSAttilio Rao v = LK_UNLOCKED; 331047dd67eSAttilio Rao 332047dd67eSAttilio Rao /* 333047dd67eSAttilio Rao * If the lock has exclusive waiters, give them preference in 334047dd67eSAttilio Rao * order to avoid deadlock with shared runners up. 3352028867dSAttilio Rao * If interruptible sleeps left the exclusive queue empty 3362028867dSAttilio Rao * avoid a starvation for the threads sleeping on the shared 3372028867dSAttilio Rao * queue by giving them precedence and cleaning up the 3382028867dSAttilio Rao * exclusive waiters bit anyway. 339c636ba83SAttilio Rao * Please note that lk_exslpfail count may be lying about 340c636ba83SAttilio Rao * the real number of waiters with the LK_SLEEPFAIL flag on 341e3043798SPedro F. Giffuni * because they may be used in conjunction with interruptible 342aab9c8c2SAttilio Rao * sleeps so lk_exslpfail might be considered an 'upper limit' 343aab9c8c2SAttilio Rao * bound, including the edge cases. 344047dd67eSAttilio Rao */ 3452028867dSAttilio Rao realexslp = sleepq_sleepcnt(&lk->lock_object, 3462028867dSAttilio Rao SQ_EXCLUSIVE_QUEUE); 3472028867dSAttilio Rao if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) { 3482028867dSAttilio Rao if (lk->lk_exslpfail < realexslp) { 3492028867dSAttilio Rao lk->lk_exslpfail = 0; 350047dd67eSAttilio Rao queue = SQ_EXCLUSIVE_QUEUE; 351047dd67eSAttilio Rao v |= (x & LK_SHARED_WAITERS); 352047dd67eSAttilio Rao } else { 3532028867dSAttilio Rao lk->lk_exslpfail = 0; 3542028867dSAttilio Rao LOCK_LOG2(lk, 3552028867dSAttilio Rao "%s: %p has only LK_SLEEPFAIL sleepers", 3562028867dSAttilio Rao __func__, lk); 3572028867dSAttilio Rao LOCK_LOG2(lk, 3582028867dSAttilio Rao "%s: %p waking up threads on the exclusive queue", 3592028867dSAttilio Rao __func__, lk); 3602028867dSAttilio Rao wakeup_swapper = 3612028867dSAttilio Rao sleepq_broadcast(&lk->lock_object, 3622028867dSAttilio Rao SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE); 3632028867dSAttilio Rao queue = SQ_SHARED_QUEUE; 3642028867dSAttilio Rao } 3652028867dSAttilio Rao 3662028867dSAttilio Rao } else { 3679dbf7a62SAttilio Rao 3689dbf7a62SAttilio Rao /* 3699dbf7a62SAttilio Rao * Exclusive waiters sleeping with LK_SLEEPFAIL on 3709dbf7a62SAttilio Rao * and using interruptible sleeps/timeout may have 3719dbf7a62SAttilio Rao * left spourious lk_exslpfail counts on, so clean 3729dbf7a62SAttilio Rao * it up anyway. 3739dbf7a62SAttilio Rao */ 3749dbf7a62SAttilio Rao lk->lk_exslpfail = 0; 375047dd67eSAttilio Rao queue = SQ_SHARED_QUEUE; 376047dd67eSAttilio Rao } 377047dd67eSAttilio Rao 3780ad122a9SMateusz Guzik if (lockmgr_sunlock_try(lk, &orig_x)) { 379047dd67eSAttilio Rao sleepq_release(&lk->lock_object); 3800ad122a9SMateusz Guzik break; 3810ad122a9SMateusz Guzik } 3820ad122a9SMateusz Guzik 3830ad122a9SMateusz Guzik x |= LK_SHARERS_LOCK(1); 3840ad122a9SMateusz Guzik if (!atomic_fcmpset_rel_ptr(&lk->lk_lock, &x, v)) { 3850ad122a9SMateusz Guzik orig_x = x; 3860ad122a9SMateusz Guzik goto retry_sleepq; 387047dd67eSAttilio Rao } 388047dd67eSAttilio Rao LOCK_LOG3(lk, "%s: %p waking up threads on the %s queue", 389047dd67eSAttilio Rao __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" : 390047dd67eSAttilio Rao "exclusive"); 3912028867dSAttilio Rao wakeup_swapper |= sleepq_broadcast(&lk->lock_object, SLEEPQ_LK, 392da7bbd2cSJohn Baldwin 0, queue); 393047dd67eSAttilio Rao sleepq_release(&lk->lock_object); 394047dd67eSAttilio Rao break; 395047dd67eSAttilio Rao } 396047dd67eSAttilio Rao 397c00115f1SMateusz Guzik LOCKSTAT_PROFILE_RELEASE_RWLOCK(lockmgr__release, lk, LOCKSTAT_READER); 398da7bbd2cSJohn Baldwin return (wakeup_swapper); 399047dd67eSAttilio Rao } 400047dd67eSAttilio Rao 401047dd67eSAttilio Rao static void 402d576deedSPawel Jakub Dawidek assert_lockmgr(const struct lock_object *lock, int what) 403f9721b43SAttilio Rao { 404f9721b43SAttilio Rao 405f9721b43SAttilio Rao panic("lockmgr locks do not support assertions"); 406f9721b43SAttilio Rao } 407f9721b43SAttilio Rao 408047dd67eSAttilio Rao static void 4097faf4d90SDavide Italiano lock_lockmgr(struct lock_object *lock, uintptr_t how) 4106e21afd4SJohn Baldwin { 4116e21afd4SJohn Baldwin 4126e21afd4SJohn Baldwin panic("lockmgr locks do not support sleep interlocking"); 4136e21afd4SJohn Baldwin } 4146e21afd4SJohn Baldwin 4157faf4d90SDavide Italiano static uintptr_t 4166e21afd4SJohn Baldwin unlock_lockmgr(struct lock_object *lock) 4176e21afd4SJohn Baldwin { 4186e21afd4SJohn Baldwin 4196e21afd4SJohn Baldwin panic("lockmgr locks do not support sleep interlocking"); 4206e21afd4SJohn Baldwin } 4216e21afd4SJohn Baldwin 422a5aedd68SStacey Son #ifdef KDTRACE_HOOKS 423a5aedd68SStacey Son static int 424d576deedSPawel Jakub Dawidek owner_lockmgr(const struct lock_object *lock, struct thread **owner) 425a5aedd68SStacey Son { 426a5aedd68SStacey Son 427a5aedd68SStacey Son panic("lockmgr locks do not support owner inquiring"); 428a5aedd68SStacey Son } 429a5aedd68SStacey Son #endif 430a5aedd68SStacey Son 43199448ed1SJohn Dyson void 432047dd67eSAttilio Rao lockinit(struct lock *lk, int pri, const char *wmesg, int timo, int flags) 43399448ed1SJohn Dyson { 4346efc8a16SAttilio Rao int iflags; 4356efc8a16SAttilio Rao 436047dd67eSAttilio Rao MPASS((flags & ~LK_INIT_MASK) == 0); 437353998acSAttilio Rao ASSERT_ATOMIC_LOAD_PTR(lk->lk_lock, 438353998acSAttilio Rao ("%s: lockmgr not aligned for %s: %p", __func__, wmesg, 439353998acSAttilio Rao &lk->lk_lock)); 44099448ed1SJohn Dyson 441f0830182SAttilio Rao iflags = LO_SLEEPABLE | LO_UPGRADABLE; 442f0830182SAttilio Rao if (flags & LK_CANRECURSE) 443f0830182SAttilio Rao iflags |= LO_RECURSABLE; 444047dd67eSAttilio Rao if ((flags & LK_NODUP) == 0) 4456efc8a16SAttilio Rao iflags |= LO_DUPOK; 4467fbfba7bSAttilio Rao if (flags & LK_NOPROFILE) 4477fbfba7bSAttilio Rao iflags |= LO_NOPROFILE; 448047dd67eSAttilio Rao if ((flags & LK_NOWITNESS) == 0) 4496efc8a16SAttilio Rao iflags |= LO_WITNESS; 4507fbfba7bSAttilio Rao if (flags & LK_QUIET) 4517fbfba7bSAttilio Rao iflags |= LO_QUIET; 452e63091eaSMarcel Moolenaar if (flags & LK_IS_VNODE) 453e63091eaSMarcel Moolenaar iflags |= LO_IS_VNODE; 45446713135SGleb Smirnoff if (flags & LK_NEW) 45546713135SGleb Smirnoff iflags |= LO_NEW; 4565fe188b1SMateusz Guzik iflags |= flags & LK_NOSHARE; 457047dd67eSAttilio Rao 458b5fb43e5SJohn Baldwin lock_init(&lk->lock_object, &lock_class_lockmgr, wmesg, NULL, iflags); 459047dd67eSAttilio Rao lk->lk_lock = LK_UNLOCKED; 460047dd67eSAttilio Rao lk->lk_recurse = 0; 4612028867dSAttilio Rao lk->lk_exslpfail = 0; 462047dd67eSAttilio Rao lk->lk_timo = timo; 463047dd67eSAttilio Rao lk->lk_pri = pri; 464047dd67eSAttilio Rao STACK_ZERO(lk); 46599448ed1SJohn Dyson } 46699448ed1SJohn Dyson 4673634d5b2SJohn Baldwin /* 4683634d5b2SJohn Baldwin * XXX: Gross hacks to manipulate external lock flags after 4693634d5b2SJohn Baldwin * initialization. Used for certain vnode and buf locks. 4703634d5b2SJohn Baldwin */ 4713634d5b2SJohn Baldwin void 4723634d5b2SJohn Baldwin lockallowshare(struct lock *lk) 4733634d5b2SJohn Baldwin { 4743634d5b2SJohn Baldwin 4753634d5b2SJohn Baldwin lockmgr_assert(lk, KA_XLOCKED); 4763634d5b2SJohn Baldwin lk->lock_object.lo_flags &= ~LK_NOSHARE; 4773634d5b2SJohn Baldwin } 4783634d5b2SJohn Baldwin 4793634d5b2SJohn Baldwin void 480575e02d9SKonstantin Belousov lockdisableshare(struct lock *lk) 481575e02d9SKonstantin Belousov { 482575e02d9SKonstantin Belousov 483575e02d9SKonstantin Belousov lockmgr_assert(lk, KA_XLOCKED); 484575e02d9SKonstantin Belousov lk->lock_object.lo_flags |= LK_NOSHARE; 485575e02d9SKonstantin Belousov } 486575e02d9SKonstantin Belousov 487575e02d9SKonstantin Belousov void 4883634d5b2SJohn Baldwin lockallowrecurse(struct lock *lk) 4893634d5b2SJohn Baldwin { 4903634d5b2SJohn Baldwin 4913634d5b2SJohn Baldwin lockmgr_assert(lk, KA_XLOCKED); 4923634d5b2SJohn Baldwin lk->lock_object.lo_flags |= LO_RECURSABLE; 4933634d5b2SJohn Baldwin } 4943634d5b2SJohn Baldwin 4953634d5b2SJohn Baldwin void 4963634d5b2SJohn Baldwin lockdisablerecurse(struct lock *lk) 4973634d5b2SJohn Baldwin { 4983634d5b2SJohn Baldwin 4993634d5b2SJohn Baldwin lockmgr_assert(lk, KA_XLOCKED); 5003634d5b2SJohn Baldwin lk->lock_object.lo_flags &= ~LO_RECURSABLE; 5013634d5b2SJohn Baldwin } 5023634d5b2SJohn Baldwin 503a18b1f1dSJason Evans void 504047dd67eSAttilio Rao lockdestroy(struct lock *lk) 505a18b1f1dSJason Evans { 506c91fcee7SJohn Baldwin 507047dd67eSAttilio Rao KASSERT(lk->lk_lock == LK_UNLOCKED, ("lockmgr still held")); 508047dd67eSAttilio Rao KASSERT(lk->lk_recurse == 0, ("lockmgr still recursed")); 5092028867dSAttilio Rao KASSERT(lk->lk_exslpfail == 0, ("lockmgr still exclusive waiters")); 510047dd67eSAttilio Rao lock_destroy(&lk->lock_object); 511047dd67eSAttilio Rao } 512047dd67eSAttilio Rao 513c4a48867SMateusz Guzik static bool __always_inline 51495ab076dSMateusz Guzik lockmgr_slock_try(struct lock *lk, uintptr_t *xp, int flags, bool fp) 515c4a48867SMateusz Guzik { 516c4a48867SMateusz Guzik 517c4a48867SMateusz Guzik /* 518c4a48867SMateusz Guzik * If no other thread has an exclusive lock, or 519c4a48867SMateusz Guzik * no exclusive waiter is present, bump the count of 520c4a48867SMateusz Guzik * sharers. Since we have to preserve the state of 521c4a48867SMateusz Guzik * waiters, if we fail to acquire the shared lock 522c4a48867SMateusz Guzik * loop back and retry. 523c4a48867SMateusz Guzik */ 52495ab076dSMateusz Guzik while (LK_CAN_SHARE(*xp, flags, fp)) { 525c4a48867SMateusz Guzik if (atomic_fcmpset_acq_ptr(&lk->lk_lock, xp, 526c4a48867SMateusz Guzik *xp + LK_ONE_SHARER)) { 527c4a48867SMateusz Guzik return (true); 528c4a48867SMateusz Guzik } 529c4a48867SMateusz Guzik } 530c4a48867SMateusz Guzik return (false); 531c4a48867SMateusz Guzik } 532c4a48867SMateusz Guzik 533c4a48867SMateusz Guzik static bool __always_inline 5341c6987ebSMateusz Guzik lockmgr_sunlock_try(struct lock *lk, uintptr_t *xp) 535c4a48867SMateusz Guzik { 536c4a48867SMateusz Guzik 537c4a48867SMateusz Guzik for (;;) { 53895ab076dSMateusz Guzik if (LK_SHARERS(*xp) > 1 || !(*xp & LK_ALL_WAITERS)) { 5391c6987ebSMateusz Guzik if (atomic_fcmpset_rel_ptr(&lk->lk_lock, xp, 5401c6987ebSMateusz Guzik *xp - LK_ONE_SHARER)) 541c4a48867SMateusz Guzik return (true); 542c4a48867SMateusz Guzik continue; 543c4a48867SMateusz Guzik } 544c4a48867SMateusz Guzik break; 545c4a48867SMateusz Guzik } 546c4a48867SMateusz Guzik return (false); 547c4a48867SMateusz Guzik } 548c4a48867SMateusz Guzik 54931ad4050SMateusz Guzik static bool 55031ad4050SMateusz Guzik lockmgr_slock_adaptive(struct lock_delay_arg *lda, struct lock *lk, uintptr_t *xp, 55131ad4050SMateusz Guzik int flags) 55231ad4050SMateusz Guzik { 55331ad4050SMateusz Guzik struct thread *owner; 55431ad4050SMateusz Guzik uintptr_t x; 55531ad4050SMateusz Guzik 55631ad4050SMateusz Guzik x = *xp; 55731ad4050SMateusz Guzik MPASS(x != LK_UNLOCKED); 55831ad4050SMateusz Guzik owner = (struct thread *)LK_HOLDER(x); 55931ad4050SMateusz Guzik for (;;) { 56031ad4050SMateusz Guzik MPASS(owner != curthread); 56131ad4050SMateusz Guzik if (owner == (struct thread *)LK_KERNPROC) 56231ad4050SMateusz Guzik return (false); 56331ad4050SMateusz Guzik if ((x & LK_SHARE) && LK_SHARERS(x) > 0) 56431ad4050SMateusz Guzik return (false); 56531ad4050SMateusz Guzik if (owner == NULL) 56631ad4050SMateusz Guzik return (false); 56731ad4050SMateusz Guzik if (!TD_IS_RUNNING(owner)) 56831ad4050SMateusz Guzik return (false); 56931ad4050SMateusz Guzik if ((x & LK_ALL_WAITERS) != 0) 57031ad4050SMateusz Guzik return (false); 57131ad4050SMateusz Guzik lock_delay(lda); 57231ad4050SMateusz Guzik x = lockmgr_read_value(lk); 57331ad4050SMateusz Guzik if (LK_CAN_SHARE(x, flags, false)) { 57431ad4050SMateusz Guzik *xp = x; 57531ad4050SMateusz Guzik return (true); 57631ad4050SMateusz Guzik } 57731ad4050SMateusz Guzik owner = (struct thread *)LK_HOLDER(x); 57831ad4050SMateusz Guzik } 57931ad4050SMateusz Guzik } 58031ad4050SMateusz Guzik 5811c6987ebSMateusz Guzik static __noinline int 5821c6987ebSMateusz Guzik lockmgr_slock_hard(struct lock *lk, u_int flags, struct lock_object *ilk, 5831c6987ebSMateusz Guzik const char *file, int line, struct lockmgr_wait *lwa) 584c4a48867SMateusz Guzik { 5851c6987ebSMateusz Guzik uintptr_t tid, x; 5861c6987ebSMateusz Guzik int error = 0; 587047dd67eSAttilio Rao const char *iwmesg; 5881c6987ebSMateusz Guzik int ipri, itimo; 5891c6987ebSMateusz Guzik 5905b699f16SMark Johnston #ifdef KDTRACE_HOOKS 5915b699f16SMark Johnston uint64_t sleep_time = 0; 5925b699f16SMark Johnston #endif 5931723a064SJeff Roberson #ifdef LOCK_PROFILING 5941723a064SJeff Roberson uint64_t waittime = 0; 5951723a064SJeff Roberson int contested = 0; 5961723a064SJeff Roberson #endif 59731ad4050SMateusz Guzik struct lock_delay_arg lda; 598047dd67eSAttilio Rao 599879e0604SMateusz Guzik if (KERNEL_PANICKED()) 6001c6987ebSMateusz Guzik goto out; 6011c6987ebSMateusz Guzik 602047dd67eSAttilio Rao tid = (uintptr_t)curthread; 603047dd67eSAttilio Rao 604e5f94314SAttilio Rao if (LK_CAN_WITNESS(flags)) 605e5f94314SAttilio Rao WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER, 60624150d37SJohn Baldwin file, line, flags & LK_INTERLOCK ? ilk : NULL); 60731ad4050SMateusz Guzik lock_delay_arg_init(&lda, &lockmgr_delay); 60831ad4050SMateusz Guzik if (!lk_adaptive) 60931ad4050SMateusz Guzik flags &= ~LK_ADAPTIVE; 61031ad4050SMateusz Guzik x = lockmgr_read_value(lk); 611047dd67eSAttilio Rao /* 61231ad4050SMateusz Guzik * The lock may already be locked exclusive by curthread, 61331ad4050SMateusz Guzik * avoid deadlock. 614047dd67eSAttilio Rao */ 615047dd67eSAttilio Rao if (LK_HOLDER(x) == tid) { 616047dd67eSAttilio Rao LOCK_LOG2(lk, 61796f1567fSKonstantin Belousov "%s: %p already held in exclusive mode", 618047dd67eSAttilio Rao __func__, lk); 619047dd67eSAttilio Rao error = EDEADLK; 62031ad4050SMateusz Guzik goto out; 621a18b1f1dSJason Evans } 622a18b1f1dSJason Evans 62331ad4050SMateusz Guzik for (;;) { 62431ad4050SMateusz Guzik if (lockmgr_slock_try(lk, &x, flags, false)) 62531ad4050SMateusz Guzik break; 62631ad4050SMateusz Guzik 62731ad4050SMateusz Guzik if ((flags & (LK_ADAPTIVE | LK_INTERLOCK)) == LK_ADAPTIVE) { 62831ad4050SMateusz Guzik if (lockmgr_slock_adaptive(&lda, lk, &x, flags)) 62931ad4050SMateusz Guzik continue; 63031ad4050SMateusz Guzik } 63131ad4050SMateusz Guzik 63231ad4050SMateusz Guzik #ifdef HWPMC_HOOKS 63331ad4050SMateusz Guzik PMC_SOFT_CALL( , , lock, failed); 63431ad4050SMateusz Guzik #endif 63531ad4050SMateusz Guzik lock_profile_obtain_lock_failed(&lk->lock_object, 63631ad4050SMateusz Guzik &contested, &waittime); 63731ad4050SMateusz Guzik 638a18b1f1dSJason Evans /* 639047dd67eSAttilio Rao * If the lock is expected to not sleep just give up 640047dd67eSAttilio Rao * and return. 641d7a7e179SAttilio Rao */ 642047dd67eSAttilio Rao if (LK_TRYOP(flags)) { 643047dd67eSAttilio Rao LOCK_LOG2(lk, "%s: %p fails the try operation", 644047dd67eSAttilio Rao __func__, lk); 645047dd67eSAttilio Rao error = EBUSY; 646047dd67eSAttilio Rao break; 647047dd67eSAttilio Rao } 648047dd67eSAttilio Rao 649047dd67eSAttilio Rao /* 650047dd67eSAttilio Rao * Acquire the sleepqueue chain lock because we 651047dd67eSAttilio Rao * probabilly will need to manipulate waiters flags. 652047dd67eSAttilio Rao */ 653047dd67eSAttilio Rao sleepq_lock(&lk->lock_object); 654bdb6d824SMateusz Guzik x = lockmgr_read_value(lk); 6550ad122a9SMateusz Guzik retry_sleepq: 656047dd67eSAttilio Rao 657047dd67eSAttilio Rao /* 658047dd67eSAttilio Rao * if the lock can be acquired in shared mode, try 659047dd67eSAttilio Rao * again. 660047dd67eSAttilio Rao */ 66195ab076dSMateusz Guzik if (LK_CAN_SHARE(x, flags, false)) { 662047dd67eSAttilio Rao sleepq_release(&lk->lock_object); 663047dd67eSAttilio Rao continue; 664047dd67eSAttilio Rao } 665047dd67eSAttilio Rao 666047dd67eSAttilio Rao /* 667047dd67eSAttilio Rao * Try to set the LK_SHARED_WAITERS flag. If we fail, 668047dd67eSAttilio Rao * loop back and retry. 669047dd67eSAttilio Rao */ 670047dd67eSAttilio Rao if ((x & LK_SHARED_WAITERS) == 0) { 6710ad122a9SMateusz Guzik if (!atomic_fcmpset_acq_ptr(&lk->lk_lock, &x, 672047dd67eSAttilio Rao x | LK_SHARED_WAITERS)) { 6730ad122a9SMateusz Guzik goto retry_sleepq; 674047dd67eSAttilio Rao } 675047dd67eSAttilio Rao LOCK_LOG2(lk, "%s: %p set shared waiters flag", 676047dd67eSAttilio Rao __func__, lk); 677047dd67eSAttilio Rao } 678047dd67eSAttilio Rao 6791c6987ebSMateusz Guzik if (lwa == NULL) { 6801c6987ebSMateusz Guzik iwmesg = lk->lock_object.lo_name; 6811c6987ebSMateusz Guzik ipri = lk->lk_pri; 6821c6987ebSMateusz Guzik itimo = lk->lk_timo; 6831c6987ebSMateusz Guzik } else { 6841c6987ebSMateusz Guzik iwmesg = lwa->iwmesg; 6851c6987ebSMateusz Guzik ipri = lwa->ipri; 6861c6987ebSMateusz Guzik itimo = lwa->itimo; 6871c6987ebSMateusz Guzik } 6881c6987ebSMateusz Guzik 689047dd67eSAttilio Rao /* 690047dd67eSAttilio Rao * As far as we have been unable to acquire the 691047dd67eSAttilio Rao * shared lock and the shared waiters flag is set, 692047dd67eSAttilio Rao * we will sleep. 693047dd67eSAttilio Rao */ 6945b699f16SMark Johnston #ifdef KDTRACE_HOOKS 6955b699f16SMark Johnston sleep_time -= lockstat_nsecs(&lk->lock_object); 6965b699f16SMark Johnston #endif 697047dd67eSAttilio Rao error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo, 698047dd67eSAttilio Rao SQ_SHARED_QUEUE); 6995b699f16SMark Johnston #ifdef KDTRACE_HOOKS 7005b699f16SMark Johnston sleep_time += lockstat_nsecs(&lk->lock_object); 7015b699f16SMark Johnston #endif 702047dd67eSAttilio Rao flags &= ~LK_INTERLOCK; 703047dd67eSAttilio Rao if (error) { 704047dd67eSAttilio Rao LOCK_LOG3(lk, 705047dd67eSAttilio Rao "%s: interrupted sleep for %p with %d", 706047dd67eSAttilio Rao __func__, lk, error); 707047dd67eSAttilio Rao break; 708047dd67eSAttilio Rao } 709047dd67eSAttilio Rao LOCK_LOG2(lk, "%s: %p resuming from the sleep queue", 710047dd67eSAttilio Rao __func__, lk); 71131ad4050SMateusz Guzik x = lockmgr_read_value(lk); 712047dd67eSAttilio Rao } 713047dd67eSAttilio Rao if (error == 0) { 7145b699f16SMark Johnston #ifdef KDTRACE_HOOKS 7155b699f16SMark Johnston if (sleep_time != 0) 7165b699f16SMark Johnston LOCKSTAT_RECORD4(lockmgr__block, lk, sleep_time, 7175b699f16SMark Johnston LOCKSTAT_READER, (x & LK_SHARE) == 0, 7185b699f16SMark Johnston (x & LK_SHARE) == 0 ? 0 : LK_SHARERS(x)); 7195b699f16SMark Johnston #endif 720c4a48867SMateusz Guzik #ifdef LOCK_PROFILING 721c4a48867SMateusz Guzik lockmgr_note_shared_acquire(lk, contested, waittime, 722c4a48867SMateusz Guzik file, line, flags); 723c4a48867SMateusz Guzik #else 724c4a48867SMateusz Guzik lockmgr_note_shared_acquire(lk, 0, 0, file, line, 725c4a48867SMateusz Guzik flags); 726c4a48867SMateusz Guzik #endif 727047dd67eSAttilio Rao } 728047dd67eSAttilio Rao 7291c6987ebSMateusz Guzik out: 7301c6987ebSMateusz Guzik lockmgr_exit(flags, ilk, 0); 7311c6987ebSMateusz Guzik return (error); 732047dd67eSAttilio Rao } 733047dd67eSAttilio Rao 73431ad4050SMateusz Guzik static bool 73531ad4050SMateusz Guzik lockmgr_xlock_adaptive(struct lock_delay_arg *lda, struct lock *lk, uintptr_t *xp) 73631ad4050SMateusz Guzik { 73731ad4050SMateusz Guzik struct thread *owner; 73831ad4050SMateusz Guzik uintptr_t x; 73931ad4050SMateusz Guzik 74031ad4050SMateusz Guzik x = *xp; 74131ad4050SMateusz Guzik MPASS(x != LK_UNLOCKED); 74231ad4050SMateusz Guzik owner = (struct thread *)LK_HOLDER(x); 74331ad4050SMateusz Guzik for (;;) { 74431ad4050SMateusz Guzik MPASS(owner != curthread); 74531ad4050SMateusz Guzik if (owner == NULL) 74631ad4050SMateusz Guzik return (false); 74731ad4050SMateusz Guzik if ((x & LK_SHARE) && LK_SHARERS(x) > 0) 74831ad4050SMateusz Guzik return (false); 74931ad4050SMateusz Guzik if (owner == (struct thread *)LK_KERNPROC) 75031ad4050SMateusz Guzik return (false); 75131ad4050SMateusz Guzik if (!TD_IS_RUNNING(owner)) 75231ad4050SMateusz Guzik return (false); 75331ad4050SMateusz Guzik if ((x & LK_ALL_WAITERS) != 0) 75431ad4050SMateusz Guzik return (false); 75531ad4050SMateusz Guzik lock_delay(lda); 75631ad4050SMateusz Guzik x = lockmgr_read_value(lk); 75731ad4050SMateusz Guzik if (x == LK_UNLOCKED) { 75831ad4050SMateusz Guzik *xp = x; 75931ad4050SMateusz Guzik return (true); 76031ad4050SMateusz Guzik } 76131ad4050SMateusz Guzik owner = (struct thread *)LK_HOLDER(x); 76231ad4050SMateusz Guzik } 76331ad4050SMateusz Guzik } 76431ad4050SMateusz Guzik 7651c6987ebSMateusz Guzik static __noinline int 7661c6987ebSMateusz Guzik lockmgr_xlock_hard(struct lock *lk, u_int flags, struct lock_object *ilk, 7671c6987ebSMateusz Guzik const char *file, int line, struct lockmgr_wait *lwa) 7681c6987ebSMateusz Guzik { 7691c6987ebSMateusz Guzik struct lock_class *class; 7701c6987ebSMateusz Guzik uintptr_t tid, x, v; 7711c6987ebSMateusz Guzik int error = 0; 7721c6987ebSMateusz Guzik const char *iwmesg; 7731c6987ebSMateusz Guzik int ipri, itimo; 7747c6fe803SKonstantin Belousov 7755b699f16SMark Johnston #ifdef KDTRACE_HOOKS 7765b699f16SMark Johnston uint64_t sleep_time = 0; 7775b699f16SMark Johnston #endif 7781c6987ebSMateusz Guzik #ifdef LOCK_PROFILING 7791c6987ebSMateusz Guzik uint64_t waittime = 0; 7801c6987ebSMateusz Guzik int contested = 0; 7811c6987ebSMateusz Guzik #endif 78231ad4050SMateusz Guzik struct lock_delay_arg lda; 783047dd67eSAttilio Rao 784879e0604SMateusz Guzik if (KERNEL_PANICKED()) 7851c6987ebSMateusz Guzik goto out; 7861c6987ebSMateusz Guzik 7871c6987ebSMateusz Guzik tid = (uintptr_t)curthread; 7881c6987ebSMateusz Guzik 789e5f94314SAttilio Rao if (LK_CAN_WITNESS(flags)) 790e5f94314SAttilio Rao WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER | 79124150d37SJohn Baldwin LOP_EXCLUSIVE, file, line, flags & LK_INTERLOCK ? 79224150d37SJohn Baldwin ilk : NULL); 793047dd67eSAttilio Rao 794047dd67eSAttilio Rao /* 79596f1567fSKonstantin Belousov * If curthread already holds the lock and this one is 796047dd67eSAttilio Rao * allowed to recurse, simply recurse on it. 797047dd67eSAttilio Rao */ 798047dd67eSAttilio Rao if (lockmgr_xlocked(lk)) { 799047dd67eSAttilio Rao if ((flags & LK_CANRECURSE) == 0 && 800f0830182SAttilio Rao (lk->lock_object.lo_flags & LO_RECURSABLE) == 0) { 801047dd67eSAttilio Rao /* 802047dd67eSAttilio Rao * If the lock is expected to not panic just 803047dd67eSAttilio Rao * give up and return. 804047dd67eSAttilio Rao */ 805047dd67eSAttilio Rao if (LK_TRYOP(flags)) { 806047dd67eSAttilio Rao LOCK_LOG2(lk, 807047dd67eSAttilio Rao "%s: %p fails the try operation", 808047dd67eSAttilio Rao __func__, lk); 809047dd67eSAttilio Rao error = EBUSY; 8101c6987ebSMateusz Guzik goto out; 811047dd67eSAttilio Rao } 8121c6987ebSMateusz Guzik if (flags & LK_INTERLOCK) { 8131c6987ebSMateusz Guzik class = LOCK_CLASS(ilk); 814047dd67eSAttilio Rao class->lc_unlock(ilk); 8151c6987ebSMateusz Guzik } 8169a79b990SKirk McKusick STACK_PRINT(lk); 81783fc34eaSGleb Smirnoff panic("%s: recursing on non recursive lockmgr %p " 81883fc34eaSGleb Smirnoff "@ %s:%d\n", __func__, lk, file, line); 819047dd67eSAttilio Rao } 8204aff9f5dSMateusz Guzik atomic_set_ptr(&lk->lk_lock, LK_WRITER_RECURSED); 821047dd67eSAttilio Rao lk->lk_recurse++; 822047dd67eSAttilio Rao LOCK_LOG2(lk, "%s: %p recursing", __func__, lk); 823047dd67eSAttilio Rao LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0, 824047dd67eSAttilio Rao lk->lk_recurse, file, line); 825e5f94314SAttilio Rao WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE | 826e5f94314SAttilio Rao LK_TRYWIT(flags), file, line); 827047dd67eSAttilio Rao TD_LOCKS_INC(curthread); 8281c6987ebSMateusz Guzik goto out; 829047dd67eSAttilio Rao } 830047dd67eSAttilio Rao 83131ad4050SMateusz Guzik x = LK_UNLOCKED; 83231ad4050SMateusz Guzik lock_delay_arg_init(&lda, &lockmgr_delay); 83331ad4050SMateusz Guzik if (!lk_adaptive) 83431ad4050SMateusz Guzik flags &= ~LK_ADAPTIVE; 835fc4f686dSMateusz Guzik for (;;) { 83631ad4050SMateusz Guzik if (x == LK_UNLOCKED) { 83731ad4050SMateusz Guzik if (atomic_fcmpset_acq_ptr(&lk->lk_lock, &x, tid)) 838fc4f686dSMateusz Guzik break; 839*13869889SMateusz Guzik continue; 84031ad4050SMateusz Guzik } 84131ad4050SMateusz Guzik if ((flags & (LK_ADAPTIVE | LK_INTERLOCK)) == LK_ADAPTIVE) { 84231ad4050SMateusz Guzik if (lockmgr_xlock_adaptive(&lda, lk, &x)) 84331ad4050SMateusz Guzik continue; 84431ad4050SMateusz Guzik } 845f5f9340bSFabien Thomas #ifdef HWPMC_HOOKS 846f5f9340bSFabien Thomas PMC_SOFT_CALL( , , lock, failed); 847f5f9340bSFabien Thomas #endif 848047dd67eSAttilio Rao lock_profile_obtain_lock_failed(&lk->lock_object, 849047dd67eSAttilio Rao &contested, &waittime); 850047dd67eSAttilio Rao 851047dd67eSAttilio Rao /* 852047dd67eSAttilio Rao * If the lock is expected to not sleep just give up 853047dd67eSAttilio Rao * and return. 854047dd67eSAttilio Rao */ 855047dd67eSAttilio Rao if (LK_TRYOP(flags)) { 856047dd67eSAttilio Rao LOCK_LOG2(lk, "%s: %p fails the try operation", 857047dd67eSAttilio Rao __func__, lk); 858047dd67eSAttilio Rao error = EBUSY; 859047dd67eSAttilio Rao break; 860047dd67eSAttilio Rao } 861047dd67eSAttilio Rao 862047dd67eSAttilio Rao /* 863047dd67eSAttilio Rao * Acquire the sleepqueue chain lock because we 864047dd67eSAttilio Rao * probabilly will need to manipulate waiters flags. 865047dd67eSAttilio Rao */ 866047dd67eSAttilio Rao sleepq_lock(&lk->lock_object); 867bdb6d824SMateusz Guzik x = lockmgr_read_value(lk); 8680ad122a9SMateusz Guzik retry_sleepq: 869047dd67eSAttilio Rao 870047dd67eSAttilio Rao /* 871047dd67eSAttilio Rao * if the lock has been released while we spun on 872047dd67eSAttilio Rao * the sleepqueue chain lock just try again. 873047dd67eSAttilio Rao */ 874047dd67eSAttilio Rao if (x == LK_UNLOCKED) { 875047dd67eSAttilio Rao sleepq_release(&lk->lock_object); 876047dd67eSAttilio Rao continue; 877047dd67eSAttilio Rao } 878047dd67eSAttilio Rao 879047dd67eSAttilio Rao /* 880047dd67eSAttilio Rao * The lock can be in the state where there is a 881047dd67eSAttilio Rao * pending queue of waiters, but still no owner. 882047dd67eSAttilio Rao * This happens when the lock is contested and an 883047dd67eSAttilio Rao * owner is going to claim the lock. 884047dd67eSAttilio Rao * If curthread is the one successfully acquiring it 885047dd67eSAttilio Rao * claim lock ownership and return, preserving waiters 886047dd67eSAttilio Rao * flags. 887047dd67eSAttilio Rao */ 888651175c9SAttilio Rao v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS); 889651175c9SAttilio Rao if ((x & ~v) == LK_UNLOCKED) { 890651175c9SAttilio Rao v &= ~LK_EXCLUSIVE_SPINNERS; 8910ad122a9SMateusz Guzik if (atomic_fcmpset_acq_ptr(&lk->lk_lock, &x, 892047dd67eSAttilio Rao tid | v)) { 893047dd67eSAttilio Rao sleepq_release(&lk->lock_object); 894047dd67eSAttilio Rao LOCK_LOG2(lk, 895047dd67eSAttilio Rao "%s: %p claimed by a new writer", 896047dd67eSAttilio Rao __func__, lk); 897047dd67eSAttilio Rao break; 898047dd67eSAttilio Rao } 8990ad122a9SMateusz Guzik goto retry_sleepq; 900047dd67eSAttilio Rao } 901047dd67eSAttilio Rao 902047dd67eSAttilio Rao /* 903047dd67eSAttilio Rao * Try to set the LK_EXCLUSIVE_WAITERS flag. If we 904047dd67eSAttilio Rao * fail, loop back and retry. 905047dd67eSAttilio Rao */ 906047dd67eSAttilio Rao if ((x & LK_EXCLUSIVE_WAITERS) == 0) { 9070ad122a9SMateusz Guzik if (!atomic_fcmpset_ptr(&lk->lk_lock, &x, 908047dd67eSAttilio Rao x | LK_EXCLUSIVE_WAITERS)) { 9090ad122a9SMateusz Guzik goto retry_sleepq; 910047dd67eSAttilio Rao } 911047dd67eSAttilio Rao LOCK_LOG2(lk, "%s: %p set excl waiters flag", 912047dd67eSAttilio Rao __func__, lk); 913047dd67eSAttilio Rao } 914047dd67eSAttilio Rao 9151c6987ebSMateusz Guzik if (lwa == NULL) { 9161c6987ebSMateusz Guzik iwmesg = lk->lock_object.lo_name; 9171c6987ebSMateusz Guzik ipri = lk->lk_pri; 9181c6987ebSMateusz Guzik itimo = lk->lk_timo; 9191c6987ebSMateusz Guzik } else { 9201c6987ebSMateusz Guzik iwmesg = lwa->iwmesg; 9211c6987ebSMateusz Guzik ipri = lwa->ipri; 9221c6987ebSMateusz Guzik itimo = lwa->itimo; 9231c6987ebSMateusz Guzik } 9241c6987ebSMateusz Guzik 925047dd67eSAttilio Rao /* 926047dd67eSAttilio Rao * As far as we have been unable to acquire the 927047dd67eSAttilio Rao * exclusive lock and the exclusive waiters flag 928047dd67eSAttilio Rao * is set, we will sleep. 929047dd67eSAttilio Rao */ 9305b699f16SMark Johnston #ifdef KDTRACE_HOOKS 9315b699f16SMark Johnston sleep_time -= lockstat_nsecs(&lk->lock_object); 9325b699f16SMark Johnston #endif 933047dd67eSAttilio Rao error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo, 934047dd67eSAttilio Rao SQ_EXCLUSIVE_QUEUE); 9355b699f16SMark Johnston #ifdef KDTRACE_HOOKS 9365b699f16SMark Johnston sleep_time += lockstat_nsecs(&lk->lock_object); 9375b699f16SMark Johnston #endif 938047dd67eSAttilio Rao flags &= ~LK_INTERLOCK; 939047dd67eSAttilio Rao if (error) { 940047dd67eSAttilio Rao LOCK_LOG3(lk, 941047dd67eSAttilio Rao "%s: interrupted sleep for %p with %d", 942047dd67eSAttilio Rao __func__, lk, error); 943047dd67eSAttilio Rao break; 944047dd67eSAttilio Rao } 945047dd67eSAttilio Rao LOCK_LOG2(lk, "%s: %p resuming from the sleep queue", 946047dd67eSAttilio Rao __func__, lk); 94731ad4050SMateusz Guzik x = lockmgr_read_value(lk); 948047dd67eSAttilio Rao } 949047dd67eSAttilio Rao if (error == 0) { 9505b699f16SMark Johnston #ifdef KDTRACE_HOOKS 9515b699f16SMark Johnston if (sleep_time != 0) 9525b699f16SMark Johnston LOCKSTAT_RECORD4(lockmgr__block, lk, sleep_time, 9535b699f16SMark Johnston LOCKSTAT_WRITER, (x & LK_SHARE) == 0, 9545b699f16SMark Johnston (x & LK_SHARE) == 0 ? 0 : LK_SHARERS(x)); 9555b699f16SMark Johnston #endif 956c4a48867SMateusz Guzik #ifdef LOCK_PROFILING 957c4a48867SMateusz Guzik lockmgr_note_exclusive_acquire(lk, contested, waittime, 958c4a48867SMateusz Guzik file, line, flags); 959c4a48867SMateusz Guzik #else 960c4a48867SMateusz Guzik lockmgr_note_exclusive_acquire(lk, 0, 0, file, line, 961c4a48867SMateusz Guzik flags); 962c4a48867SMateusz Guzik #endif 963047dd67eSAttilio Rao } 9641c6987ebSMateusz Guzik 9651c6987ebSMateusz Guzik out: 9661c6987ebSMateusz Guzik lockmgr_exit(flags, ilk, 0); 9671c6987ebSMateusz Guzik return (error); 9681c6987ebSMateusz Guzik } 9691c6987ebSMateusz Guzik 9701c6987ebSMateusz Guzik static __noinline int 9711c6987ebSMateusz Guzik lockmgr_upgrade(struct lock *lk, u_int flags, struct lock_object *ilk, 9721c6987ebSMateusz Guzik const char *file, int line, struct lockmgr_wait *lwa) 9731c6987ebSMateusz Guzik { 974f6b091fbSMateusz Guzik uintptr_t tid, v, setv; 9751c6987ebSMateusz Guzik int error = 0; 9761c6987ebSMateusz Guzik int op; 9771c6987ebSMateusz Guzik 978879e0604SMateusz Guzik if (KERNEL_PANICKED()) 9791c6987ebSMateusz Guzik goto out; 9801c6987ebSMateusz Guzik 9811c6987ebSMateusz Guzik tid = (uintptr_t)curthread; 9821c6987ebSMateusz Guzik 9831c6987ebSMateusz Guzik _lockmgr_assert(lk, KA_SLOCKED, file, line); 984f6b091fbSMateusz Guzik 985f6b091fbSMateusz Guzik op = flags & LK_TYPE_MASK; 986bdb6d824SMateusz Guzik v = lockmgr_read_value(lk); 987f6b091fbSMateusz Guzik for (;;) { 988f6b091fbSMateusz Guzik if (LK_SHARERS_LOCK(v) > 1) { 989f6b091fbSMateusz Guzik if (op == LK_TRYUPGRADE) { 990f6b091fbSMateusz Guzik LOCK_LOG2(lk, "%s: %p failed the nowait upgrade", 991f6b091fbSMateusz Guzik __func__, lk); 992f6b091fbSMateusz Guzik error = EBUSY; 993f6b091fbSMateusz Guzik goto out; 994f6b091fbSMateusz Guzik } 995f6b091fbSMateusz Guzik if (lockmgr_sunlock_try(lk, &v)) { 996f6b091fbSMateusz Guzik lockmgr_note_shared_release(lk, file, line); 997f6b091fbSMateusz Guzik goto out_xlock; 998f6b091fbSMateusz Guzik } 999f6b091fbSMateusz Guzik } 1000f6b091fbSMateusz Guzik MPASS((v & ~LK_ALL_WAITERS) == LK_SHARERS_LOCK(1)); 1001f6b091fbSMateusz Guzik 1002f6b091fbSMateusz Guzik setv = tid; 1003f6b091fbSMateusz Guzik setv |= (v & LK_ALL_WAITERS); 10041c6987ebSMateusz Guzik 10051c6987ebSMateusz Guzik /* 10061c6987ebSMateusz Guzik * Try to switch from one shared lock to an exclusive one. 10071c6987ebSMateusz Guzik * We need to preserve waiters flags during the operation. 10081c6987ebSMateusz Guzik */ 1009f6b091fbSMateusz Guzik if (atomic_fcmpset_ptr(&lk->lk_lock, &v, setv)) { 10101c6987ebSMateusz Guzik LOCK_LOG_LOCK("XUPGRADE", &lk->lock_object, 0, 0, file, 10111c6987ebSMateusz Guzik line); 10121c6987ebSMateusz Guzik WITNESS_UPGRADE(&lk->lock_object, LOP_EXCLUSIVE | 10131c6987ebSMateusz Guzik LK_TRYWIT(flags), file, line); 10145b699f16SMark Johnston LOCKSTAT_RECORD0(lockmgr__upgrade, lk); 10151c6987ebSMateusz Guzik TD_SLOCKS_DEC(curthread); 10161c6987ebSMateusz Guzik goto out; 10171c6987ebSMateusz Guzik } 10181c6987ebSMateusz Guzik } 10191c6987ebSMateusz Guzik 1020f6b091fbSMateusz Guzik out_xlock: 10211c6987ebSMateusz Guzik error = lockmgr_xlock_hard(lk, flags, ilk, file, line, lwa); 10221c6987ebSMateusz Guzik flags &= ~LK_INTERLOCK; 10231c6987ebSMateusz Guzik out: 1024f6b091fbSMateusz Guzik lockmgr_exit(flags, ilk, 0); 10251c6987ebSMateusz Guzik return (error); 10261c6987ebSMateusz Guzik } 10271c6987ebSMateusz Guzik 10281c6987ebSMateusz Guzik int 1029c1b57fa7SMateusz Guzik lockmgr_lock_flags(struct lock *lk, u_int flags, struct lock_object *ilk, 10301c6987ebSMateusz Guzik const char *file, int line) 10311c6987ebSMateusz Guzik { 10321c6987ebSMateusz Guzik struct lock_class *class; 10331c6987ebSMateusz Guzik uintptr_t x, tid; 10341c6987ebSMateusz Guzik u_int op; 10351c6987ebSMateusz Guzik bool locked; 10361c6987ebSMateusz Guzik 1037879e0604SMateusz Guzik if (KERNEL_PANICKED()) 1038b543c98cSConrad Meyer return (0); 1039b543c98cSConrad Meyer 10401c6987ebSMateusz Guzik op = flags & LK_TYPE_MASK; 10411c6987ebSMateusz Guzik locked = false; 10421c6987ebSMateusz Guzik switch (op) { 10431c6987ebSMateusz Guzik case LK_SHARED: 10441c6987ebSMateusz Guzik if (LK_CAN_WITNESS(flags)) 10451c6987ebSMateusz Guzik WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER, 10461c6987ebSMateusz Guzik file, line, flags & LK_INTERLOCK ? ilk : NULL); 10471c6987ebSMateusz Guzik if (__predict_false(lk->lock_object.lo_flags & LK_NOSHARE)) 10481c6987ebSMateusz Guzik break; 104931ad4050SMateusz Guzik x = lockmgr_read_value(lk); 105095ab076dSMateusz Guzik if (lockmgr_slock_try(lk, &x, flags, true)) { 10511c6987ebSMateusz Guzik lockmgr_note_shared_acquire(lk, 0, 0, 10521c6987ebSMateusz Guzik file, line, flags); 10531c6987ebSMateusz Guzik locked = true; 10541c6987ebSMateusz Guzik } else { 10551c6987ebSMateusz Guzik return (lockmgr_slock_hard(lk, flags, ilk, file, line, 10561c6987ebSMateusz Guzik NULL)); 10571c6987ebSMateusz Guzik } 10581c6987ebSMateusz Guzik break; 10591c6987ebSMateusz Guzik case LK_EXCLUSIVE: 10601c6987ebSMateusz Guzik if (LK_CAN_WITNESS(flags)) 10611c6987ebSMateusz Guzik WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER | 10621c6987ebSMateusz Guzik LOP_EXCLUSIVE, file, line, flags & LK_INTERLOCK ? 10631c6987ebSMateusz Guzik ilk : NULL); 10641c6987ebSMateusz Guzik tid = (uintptr_t)curthread; 1065bdb6d824SMateusz Guzik if (lockmgr_read_value(lk) == LK_UNLOCKED && 10661c6987ebSMateusz Guzik atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid)) { 10671c6987ebSMateusz Guzik lockmgr_note_exclusive_acquire(lk, 0, 0, file, line, 10681c6987ebSMateusz Guzik flags); 10691c6987ebSMateusz Guzik locked = true; 10701c6987ebSMateusz Guzik } else { 10711c6987ebSMateusz Guzik return (lockmgr_xlock_hard(lk, flags, ilk, file, line, 10721c6987ebSMateusz Guzik NULL)); 10731c6987ebSMateusz Guzik } 10741c6987ebSMateusz Guzik break; 10751c6987ebSMateusz Guzik case LK_UPGRADE: 10761c6987ebSMateusz Guzik case LK_TRYUPGRADE: 10771c6987ebSMateusz Guzik return (lockmgr_upgrade(lk, flags, ilk, file, line, NULL)); 10781c6987ebSMateusz Guzik default: 10791c6987ebSMateusz Guzik break; 10801c6987ebSMateusz Guzik } 10811c6987ebSMateusz Guzik if (__predict_true(locked)) { 10821c6987ebSMateusz Guzik if (__predict_false(flags & LK_INTERLOCK)) { 10831c6987ebSMateusz Guzik class = LOCK_CLASS(ilk); 10841c6987ebSMateusz Guzik class->lc_unlock(ilk); 10851c6987ebSMateusz Guzik } 10861c6987ebSMateusz Guzik return (0); 10871c6987ebSMateusz Guzik } else { 10881c6987ebSMateusz Guzik return (__lockmgr_args(lk, flags, ilk, LK_WMESG_DEFAULT, 10891c6987ebSMateusz Guzik LK_PRIO_DEFAULT, LK_TIMO_DEFAULT, file, line)); 10901c6987ebSMateusz Guzik } 10911c6987ebSMateusz Guzik } 10921c6987ebSMateusz Guzik 10931c6987ebSMateusz Guzik static __noinline int 10941c6987ebSMateusz Guzik lockmgr_sunlock_hard(struct lock *lk, uintptr_t x, u_int flags, struct lock_object *ilk, 10951c6987ebSMateusz Guzik const char *file, int line) 10961c6987ebSMateusz Guzik 10971c6987ebSMateusz Guzik { 10981c6987ebSMateusz Guzik int wakeup_swapper = 0; 10991c6987ebSMateusz Guzik 1100879e0604SMateusz Guzik if (KERNEL_PANICKED()) 11011c6987ebSMateusz Guzik goto out; 11021c6987ebSMateusz Guzik 11031c6987ebSMateusz Guzik wakeup_swapper = wakeupshlk(lk, file, line); 11041c6987ebSMateusz Guzik 11051c6987ebSMateusz Guzik out: 11061c6987ebSMateusz Guzik lockmgr_exit(flags, ilk, wakeup_swapper); 11071c6987ebSMateusz Guzik return (0); 11081c6987ebSMateusz Guzik } 11091c6987ebSMateusz Guzik 11101c6987ebSMateusz Guzik static __noinline int 11111c6987ebSMateusz Guzik lockmgr_xunlock_hard(struct lock *lk, uintptr_t x, u_int flags, struct lock_object *ilk, 11121c6987ebSMateusz Guzik const char *file, int line) 11131c6987ebSMateusz Guzik { 11141c6987ebSMateusz Guzik uintptr_t tid, v; 11151c6987ebSMateusz Guzik int wakeup_swapper = 0; 11161c6987ebSMateusz Guzik u_int realexslp; 11171c6987ebSMateusz Guzik int queue; 11181c6987ebSMateusz Guzik 1119879e0604SMateusz Guzik if (KERNEL_PANICKED()) 11201c6987ebSMateusz Guzik goto out; 11211c6987ebSMateusz Guzik 11221c6987ebSMateusz Guzik tid = (uintptr_t)curthread; 11231c6987ebSMateusz Guzik 11241c6987ebSMateusz Guzik /* 11251c6987ebSMateusz Guzik * As first option, treact the lock as if it has not 11261c6987ebSMateusz Guzik * any waiter. 11271c6987ebSMateusz Guzik * Fix-up the tid var if the lock has been disowned. 11281c6987ebSMateusz Guzik */ 11291c6987ebSMateusz Guzik if (LK_HOLDER(x) == LK_KERNPROC) 11301c6987ebSMateusz Guzik tid = LK_KERNPROC; 11311c6987ebSMateusz Guzik 11321c6987ebSMateusz Guzik /* 11331c6987ebSMateusz Guzik * The lock is held in exclusive mode. 11341c6987ebSMateusz Guzik * If the lock is recursed also, then unrecurse it. 11351c6987ebSMateusz Guzik */ 11364aff9f5dSMateusz Guzik if (lockmgr_recursed_v(x)) { 11371c6987ebSMateusz Guzik LOCK_LOG2(lk, "%s: %p unrecursing", __func__, lk); 11381c6987ebSMateusz Guzik lk->lk_recurse--; 11394aff9f5dSMateusz Guzik if (lk->lk_recurse == 0) 11404aff9f5dSMateusz Guzik atomic_clear_ptr(&lk->lk_lock, LK_WRITER_RECURSED); 11411c6987ebSMateusz Guzik goto out; 11421c6987ebSMateusz Guzik } 11431c6987ebSMateusz Guzik if (tid != LK_KERNPROC) 11445b699f16SMark Johnston LOCKSTAT_PROFILE_RELEASE_RWLOCK(lockmgr__release, lk, 11455b699f16SMark Johnston LOCKSTAT_WRITER); 11461c6987ebSMateusz Guzik 114710391db5SMateusz Guzik if (x == tid && atomic_cmpset_rel_ptr(&lk->lk_lock, tid, LK_UNLOCKED)) 11481c6987ebSMateusz Guzik goto out; 11491c6987ebSMateusz Guzik 11501c6987ebSMateusz Guzik sleepq_lock(&lk->lock_object); 1151bdb6d824SMateusz Guzik x = lockmgr_read_value(lk); 11521c6987ebSMateusz Guzik v = LK_UNLOCKED; 11531c6987ebSMateusz Guzik 11541c6987ebSMateusz Guzik /* 11551c6987ebSMateusz Guzik * If the lock has exclusive waiters, give them 11561c6987ebSMateusz Guzik * preference in order to avoid deadlock with 11571c6987ebSMateusz Guzik * shared runners up. 11581c6987ebSMateusz Guzik * If interruptible sleeps left the exclusive queue 11591c6987ebSMateusz Guzik * empty avoid a starvation for the threads sleeping 11601c6987ebSMateusz Guzik * on the shared queue by giving them precedence 11611c6987ebSMateusz Guzik * and cleaning up the exclusive waiters bit anyway. 11621c6987ebSMateusz Guzik * Please note that lk_exslpfail count may be lying 11631c6987ebSMateusz Guzik * about the real number of waiters with the 11641c6987ebSMateusz Guzik * LK_SLEEPFAIL flag on because they may be used in 11651c6987ebSMateusz Guzik * conjunction with interruptible sleeps so 11661c6987ebSMateusz Guzik * lk_exslpfail might be considered an 'upper limit' 11671c6987ebSMateusz Guzik * bound, including the edge cases. 11681c6987ebSMateusz Guzik */ 11691c6987ebSMateusz Guzik MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0); 11701c6987ebSMateusz Guzik realexslp = sleepq_sleepcnt(&lk->lock_object, SQ_EXCLUSIVE_QUEUE); 11711c6987ebSMateusz Guzik if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) { 11721c6987ebSMateusz Guzik if (lk->lk_exslpfail < realexslp) { 11731c6987ebSMateusz Guzik lk->lk_exslpfail = 0; 11741c6987ebSMateusz Guzik queue = SQ_EXCLUSIVE_QUEUE; 11751c6987ebSMateusz Guzik v |= (x & LK_SHARED_WAITERS); 11761c6987ebSMateusz Guzik } else { 11771c6987ebSMateusz Guzik lk->lk_exslpfail = 0; 11781c6987ebSMateusz Guzik LOCK_LOG2(lk, 11791c6987ebSMateusz Guzik "%s: %p has only LK_SLEEPFAIL sleepers", 11801c6987ebSMateusz Guzik __func__, lk); 11811c6987ebSMateusz Guzik LOCK_LOG2(lk, 11821c6987ebSMateusz Guzik "%s: %p waking up threads on the exclusive queue", 11831c6987ebSMateusz Guzik __func__, lk); 11841c6987ebSMateusz Guzik wakeup_swapper = sleepq_broadcast(&lk->lock_object, 11851c6987ebSMateusz Guzik SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE); 11861c6987ebSMateusz Guzik queue = SQ_SHARED_QUEUE; 11871c6987ebSMateusz Guzik } 11881c6987ebSMateusz Guzik } else { 11891c6987ebSMateusz Guzik 11901c6987ebSMateusz Guzik /* 11911c6987ebSMateusz Guzik * Exclusive waiters sleeping with LK_SLEEPFAIL 11921c6987ebSMateusz Guzik * on and using interruptible sleeps/timeout 11931c6987ebSMateusz Guzik * may have left spourious lk_exslpfail counts 11941c6987ebSMateusz Guzik * on, so clean it up anyway. 11951c6987ebSMateusz Guzik */ 11961c6987ebSMateusz Guzik lk->lk_exslpfail = 0; 11971c6987ebSMateusz Guzik queue = SQ_SHARED_QUEUE; 11981c6987ebSMateusz Guzik } 11991c6987ebSMateusz Guzik 12001c6987ebSMateusz Guzik LOCK_LOG3(lk, "%s: %p waking up threads on the %s queue", 12011c6987ebSMateusz Guzik __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" : 12021c6987ebSMateusz Guzik "exclusive"); 12031c6987ebSMateusz Guzik atomic_store_rel_ptr(&lk->lk_lock, v); 12041c6987ebSMateusz Guzik wakeup_swapper |= sleepq_broadcast(&lk->lock_object, SLEEPQ_LK, 0, queue); 12051c6987ebSMateusz Guzik sleepq_release(&lk->lock_object); 12061c6987ebSMateusz Guzik 12071c6987ebSMateusz Guzik out: 12081c6987ebSMateusz Guzik lockmgr_exit(flags, ilk, wakeup_swapper); 12091c6987ebSMateusz Guzik return (0); 12101c6987ebSMateusz Guzik } 12111c6987ebSMateusz Guzik 1212c8b29d12SMateusz Guzik /* 1213c8b29d12SMateusz Guzik * Lightweight entry points for common operations. 1214c8b29d12SMateusz Guzik * 1215c8b29d12SMateusz Guzik * Functionality is similar to sx locks, in that none of the additional lockmgr 1216c8b29d12SMateusz Guzik * features are supported. To be clear, these are NOT supported: 1217c8b29d12SMateusz Guzik * 1. shared locking disablement 1218c8b29d12SMateusz Guzik * 2. returning with an error after sleep 1219c8b29d12SMateusz Guzik * 3. unlocking the interlock 1220c8b29d12SMateusz Guzik * 1221c1b57fa7SMateusz Guzik * If in doubt, use lockmgr_lock_flags. 1222c8b29d12SMateusz Guzik */ 1223c8b29d12SMateusz Guzik int 1224c8b29d12SMateusz Guzik lockmgr_slock(struct lock *lk, u_int flags, const char *file, int line) 1225c8b29d12SMateusz Guzik { 1226c8b29d12SMateusz Guzik uintptr_t x; 1227c8b29d12SMateusz Guzik 1228c8b29d12SMateusz Guzik MPASS((flags & LK_TYPE_MASK) == LK_SHARED); 1229c8b29d12SMateusz Guzik MPASS((flags & LK_INTERLOCK) == 0); 1230c8b29d12SMateusz Guzik MPASS((lk->lock_object.lo_flags & LK_NOSHARE) == 0); 1231c8b29d12SMateusz Guzik 1232c8b29d12SMateusz Guzik if (LK_CAN_WITNESS(flags)) 1233c8b29d12SMateusz Guzik WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER, 1234c8b29d12SMateusz Guzik file, line, NULL); 123531ad4050SMateusz Guzik x = lockmgr_read_value(lk); 1236c8b29d12SMateusz Guzik if (__predict_true(lockmgr_slock_try(lk, &x, flags, true))) { 1237c8b29d12SMateusz Guzik lockmgr_note_shared_acquire(lk, 0, 0, file, line, flags); 1238c8b29d12SMateusz Guzik return (0); 1239c8b29d12SMateusz Guzik } 1240c8b29d12SMateusz Guzik 124131ad4050SMateusz Guzik return (lockmgr_slock_hard(lk, flags | LK_ADAPTIVE, NULL, file, line, NULL)); 1242c8b29d12SMateusz Guzik } 1243c8b29d12SMateusz Guzik 1244c8b29d12SMateusz Guzik int 1245c8b29d12SMateusz Guzik lockmgr_xlock(struct lock *lk, u_int flags, const char *file, int line) 1246c8b29d12SMateusz Guzik { 1247c8b29d12SMateusz Guzik uintptr_t tid; 1248c8b29d12SMateusz Guzik 1249c8b29d12SMateusz Guzik MPASS((flags & LK_TYPE_MASK) == LK_EXCLUSIVE); 1250c8b29d12SMateusz Guzik MPASS((flags & LK_INTERLOCK) == 0); 1251c8b29d12SMateusz Guzik 1252c8b29d12SMateusz Guzik if (LK_CAN_WITNESS(flags)) 1253c8b29d12SMateusz Guzik WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER | 1254c8b29d12SMateusz Guzik LOP_EXCLUSIVE, file, line, NULL); 1255c8b29d12SMateusz Guzik tid = (uintptr_t)curthread; 1256c8b29d12SMateusz Guzik if (atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid)) { 1257c8b29d12SMateusz Guzik lockmgr_note_exclusive_acquire(lk, 0, 0, file, line, 1258c8b29d12SMateusz Guzik flags); 1259c8b29d12SMateusz Guzik return (0); 1260c8b29d12SMateusz Guzik } 1261c8b29d12SMateusz Guzik 126231ad4050SMateusz Guzik return (lockmgr_xlock_hard(lk, flags | LK_ADAPTIVE, NULL, file, line, NULL)); 1263c8b29d12SMateusz Guzik } 1264c8b29d12SMateusz Guzik 1265c8b29d12SMateusz Guzik int 1266c8b29d12SMateusz Guzik lockmgr_unlock(struct lock *lk) 1267c8b29d12SMateusz Guzik { 1268c8b29d12SMateusz Guzik uintptr_t x, tid; 1269c8b29d12SMateusz Guzik const char *file; 1270c8b29d12SMateusz Guzik int line; 1271c8b29d12SMateusz Guzik 1272c8b29d12SMateusz Guzik file = __FILE__; 1273c8b29d12SMateusz Guzik line = __LINE__; 1274c8b29d12SMateusz Guzik 1275c8b29d12SMateusz Guzik _lockmgr_assert(lk, KA_LOCKED, file, line); 1276bdb6d824SMateusz Guzik x = lockmgr_read_value(lk); 1277c8b29d12SMateusz Guzik if (__predict_true(x & LK_SHARE) != 0) { 1278c8b29d12SMateusz Guzik lockmgr_note_shared_release(lk, file, line); 1279c00115f1SMateusz Guzik if (lockmgr_sunlock_try(lk, &x)) { 1280c00115f1SMateusz Guzik LOCKSTAT_PROFILE_RELEASE_RWLOCK(lockmgr__release, lk, LOCKSTAT_READER); 1281c8b29d12SMateusz Guzik } else { 1282c8b29d12SMateusz Guzik return (lockmgr_sunlock_hard(lk, x, LK_RELEASE, NULL, file, line)); 1283c8b29d12SMateusz Guzik } 1284c8b29d12SMateusz Guzik } else { 1285c8b29d12SMateusz Guzik tid = (uintptr_t)curthread; 1286c00115f1SMateusz Guzik lockmgr_note_exclusive_release(lk, file, line); 12874aff9f5dSMateusz Guzik if (x == tid && atomic_cmpset_rel_ptr(&lk->lk_lock, tid, LK_UNLOCKED)) { 1288c00115f1SMateusz Guzik LOCKSTAT_PROFILE_RELEASE_RWLOCK(lockmgr__release, lk,LOCKSTAT_WRITER); 1289c8b29d12SMateusz Guzik } else { 1290c8b29d12SMateusz Guzik return (lockmgr_xunlock_hard(lk, x, LK_RELEASE, NULL, file, line)); 1291c8b29d12SMateusz Guzik } 1292c8b29d12SMateusz Guzik } 1293c8b29d12SMateusz Guzik return (0); 1294c8b29d12SMateusz Guzik } 1295c8b29d12SMateusz Guzik 12961c6987ebSMateusz Guzik int 12971c6987ebSMateusz Guzik __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk, 12981c6987ebSMateusz Guzik const char *wmesg, int pri, int timo, const char *file, int line) 12991c6987ebSMateusz Guzik { 13001c6987ebSMateusz Guzik GIANT_DECLARE; 13011c6987ebSMateusz Guzik struct lockmgr_wait lwa; 13021c6987ebSMateusz Guzik struct lock_class *class; 13031c6987ebSMateusz Guzik const char *iwmesg; 13041c6987ebSMateusz Guzik uintptr_t tid, v, x; 13051c6987ebSMateusz Guzik u_int op, realexslp; 13061c6987ebSMateusz Guzik int error, ipri, itimo, queue, wakeup_swapper; 13071c6987ebSMateusz Guzik #ifdef LOCK_PROFILING 13081c6987ebSMateusz Guzik uint64_t waittime = 0; 13091c6987ebSMateusz Guzik int contested = 0; 13101c6987ebSMateusz Guzik #endif 13111c6987ebSMateusz Guzik 1312879e0604SMateusz Guzik if (KERNEL_PANICKED()) 1313b543c98cSConrad Meyer return (0); 1314b543c98cSConrad Meyer 13151c6987ebSMateusz Guzik error = 0; 13161c6987ebSMateusz Guzik tid = (uintptr_t)curthread; 13171c6987ebSMateusz Guzik op = (flags & LK_TYPE_MASK); 13181c6987ebSMateusz Guzik iwmesg = (wmesg == LK_WMESG_DEFAULT) ? lk->lock_object.lo_name : wmesg; 13191c6987ebSMateusz Guzik ipri = (pri == LK_PRIO_DEFAULT) ? lk->lk_pri : pri; 13201c6987ebSMateusz Guzik itimo = (timo == LK_TIMO_DEFAULT) ? lk->lk_timo : timo; 13211c6987ebSMateusz Guzik 13221c6987ebSMateusz Guzik lwa.iwmesg = iwmesg; 13231c6987ebSMateusz Guzik lwa.ipri = ipri; 13241c6987ebSMateusz Guzik lwa.itimo = itimo; 13251c6987ebSMateusz Guzik 13261c6987ebSMateusz Guzik MPASS((flags & ~LK_TOTAL_MASK) == 0); 13271c6987ebSMateusz Guzik KASSERT((op & (op - 1)) == 0, 13281c6987ebSMateusz Guzik ("%s: Invalid requested operation @ %s:%d", __func__, file, line)); 13291c6987ebSMateusz Guzik KASSERT((flags & (LK_NOWAIT | LK_SLEEPFAIL)) == 0 || 13301c6987ebSMateusz Guzik (op != LK_DOWNGRADE && op != LK_RELEASE), 13311c6987ebSMateusz Guzik ("%s: Invalid flags in regard of the operation desired @ %s:%d", 13321c6987ebSMateusz Guzik __func__, file, line)); 13331c6987ebSMateusz Guzik KASSERT((flags & LK_INTERLOCK) == 0 || ilk != NULL, 13341c6987ebSMateusz Guzik ("%s: LK_INTERLOCK passed without valid interlock @ %s:%d", 13351c6987ebSMateusz Guzik __func__, file, line)); 13361c6987ebSMateusz Guzik KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread), 13371c6987ebSMateusz Guzik ("%s: idle thread %p on lockmgr %s @ %s:%d", __func__, curthread, 13381c6987ebSMateusz Guzik lk->lock_object.lo_name, file, line)); 13391c6987ebSMateusz Guzik 13401c6987ebSMateusz Guzik class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL; 13411c6987ebSMateusz Guzik 13421c6987ebSMateusz Guzik if (lk->lock_object.lo_flags & LK_NOSHARE) { 13431c6987ebSMateusz Guzik switch (op) { 13441c6987ebSMateusz Guzik case LK_SHARED: 13451c6987ebSMateusz Guzik op = LK_EXCLUSIVE; 13461c6987ebSMateusz Guzik break; 13471c6987ebSMateusz Guzik case LK_UPGRADE: 13481c6987ebSMateusz Guzik case LK_TRYUPGRADE: 13491c6987ebSMateusz Guzik case LK_DOWNGRADE: 13501c6987ebSMateusz Guzik _lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED, 13511c6987ebSMateusz Guzik file, line); 13521c6987ebSMateusz Guzik if (flags & LK_INTERLOCK) 13531c6987ebSMateusz Guzik class->lc_unlock(ilk); 13541c6987ebSMateusz Guzik return (0); 13551c6987ebSMateusz Guzik } 13561c6987ebSMateusz Guzik } 13571c6987ebSMateusz Guzik 13581c6987ebSMateusz Guzik wakeup_swapper = 0; 13591c6987ebSMateusz Guzik switch (op) { 13601c6987ebSMateusz Guzik case LK_SHARED: 13611c6987ebSMateusz Guzik return (lockmgr_slock_hard(lk, flags, ilk, file, line, &lwa)); 13621c6987ebSMateusz Guzik break; 13631c6987ebSMateusz Guzik case LK_UPGRADE: 13641c6987ebSMateusz Guzik case LK_TRYUPGRADE: 13651c6987ebSMateusz Guzik return (lockmgr_upgrade(lk, flags, ilk, file, line, &lwa)); 13661c6987ebSMateusz Guzik break; 13671c6987ebSMateusz Guzik case LK_EXCLUSIVE: 13681c6987ebSMateusz Guzik return (lockmgr_xlock_hard(lk, flags, ilk, file, line, &lwa)); 1369047dd67eSAttilio Rao break; 1370047dd67eSAttilio Rao case LK_DOWNGRADE: 13711c7d98d0SAttilio Rao _lockmgr_assert(lk, KA_XLOCKED, file, line); 1372e5f94314SAttilio Rao WITNESS_DOWNGRADE(&lk->lock_object, 0, file, line); 13731c7d98d0SAttilio Rao 13741c7d98d0SAttilio Rao /* 13751c7d98d0SAttilio Rao * Panic if the lock is recursed. 13761c7d98d0SAttilio Rao */ 13771c7d98d0SAttilio Rao if (lockmgr_xlocked(lk) && lockmgr_recursed(lk)) { 13781c7d98d0SAttilio Rao if (flags & LK_INTERLOCK) 13791c7d98d0SAttilio Rao class->lc_unlock(ilk); 13801c7d98d0SAttilio Rao panic("%s: downgrade a recursed lockmgr %s @ %s:%d\n", 13811c7d98d0SAttilio Rao __func__, iwmesg, file, line); 13821c7d98d0SAttilio Rao } 1383e5f94314SAttilio Rao TD_SLOCKS_INC(curthread); 1384047dd67eSAttilio Rao 1385047dd67eSAttilio Rao /* 1386047dd67eSAttilio Rao * In order to preserve waiters flags, just spin. 1387047dd67eSAttilio Rao */ 1388047dd67eSAttilio Rao for (;;) { 1389bdb6d824SMateusz Guzik x = lockmgr_read_value(lk); 1390651175c9SAttilio Rao MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0); 1391651175c9SAttilio Rao x &= LK_ALL_WAITERS; 1392047dd67eSAttilio Rao if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x, 1393e5f94314SAttilio Rao LK_SHARERS_LOCK(1) | x)) 1394047dd67eSAttilio Rao break; 1395047dd67eSAttilio Rao cpu_spinwait(); 1396047dd67eSAttilio Rao } 13975b699f16SMark Johnston LOCK_LOG_LOCK("XDOWNGRADE", &lk->lock_object, 0, 0, file, line); 13985b699f16SMark Johnston LOCKSTAT_RECORD0(lockmgr__downgrade, lk); 1399047dd67eSAttilio Rao break; 1400047dd67eSAttilio Rao case LK_RELEASE: 1401047dd67eSAttilio Rao _lockmgr_assert(lk, KA_LOCKED, file, line); 1402bdb6d824SMateusz Guzik x = lockmgr_read_value(lk); 1403047dd67eSAttilio Rao 14041c6987ebSMateusz Guzik if (__predict_true(x & LK_SHARE) != 0) { 1405c00115f1SMateusz Guzik lockmgr_note_shared_release(lk, file, line); 14061c6987ebSMateusz Guzik return (lockmgr_sunlock_hard(lk, x, flags, ilk, file, line)); 1407047dd67eSAttilio Rao } else { 1408c00115f1SMateusz Guzik lockmgr_note_exclusive_release(lk, file, line); 14091c6987ebSMateusz Guzik return (lockmgr_xunlock_hard(lk, x, flags, ilk, file, line)); 14102028867dSAttilio Rao } 1411047dd67eSAttilio Rao break; 1412047dd67eSAttilio Rao case LK_DRAIN: 1413e5f94314SAttilio Rao if (LK_CAN_WITNESS(flags)) 1414e5f94314SAttilio Rao WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER | 141524150d37SJohn Baldwin LOP_EXCLUSIVE, file, line, flags & LK_INTERLOCK ? 141624150d37SJohn Baldwin ilk : NULL); 1417047dd67eSAttilio Rao 1418047dd67eSAttilio Rao /* 141996f1567fSKonstantin Belousov * Trying to drain a lock we already own will result in a 1420047dd67eSAttilio Rao * deadlock. 1421047dd67eSAttilio Rao */ 1422047dd67eSAttilio Rao if (lockmgr_xlocked(lk)) { 1423047dd67eSAttilio Rao if (flags & LK_INTERLOCK) 1424047dd67eSAttilio Rao class->lc_unlock(ilk); 1425047dd67eSAttilio Rao panic("%s: draining %s with the lock held @ %s:%d\n", 1426047dd67eSAttilio Rao __func__, iwmesg, file, line); 1427047dd67eSAttilio Rao } 1428047dd67eSAttilio Rao 1429fc4f686dSMateusz Guzik for (;;) { 1430fc4f686dSMateusz Guzik if (lk->lk_lock == LK_UNLOCKED && 1431fc4f686dSMateusz Guzik atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid)) 1432fc4f686dSMateusz Guzik break; 1433fc4f686dSMateusz Guzik 1434f5f9340bSFabien Thomas #ifdef HWPMC_HOOKS 1435f5f9340bSFabien Thomas PMC_SOFT_CALL( , , lock, failed); 1436f5f9340bSFabien Thomas #endif 1437047dd67eSAttilio Rao lock_profile_obtain_lock_failed(&lk->lock_object, 1438047dd67eSAttilio Rao &contested, &waittime); 1439047dd67eSAttilio Rao 1440047dd67eSAttilio Rao /* 1441047dd67eSAttilio Rao * If the lock is expected to not sleep just give up 1442047dd67eSAttilio Rao * and return. 1443047dd67eSAttilio Rao */ 1444047dd67eSAttilio Rao if (LK_TRYOP(flags)) { 1445047dd67eSAttilio Rao LOCK_LOG2(lk, "%s: %p fails the try operation", 1446047dd67eSAttilio Rao __func__, lk); 1447047dd67eSAttilio Rao error = EBUSY; 1448047dd67eSAttilio Rao break; 1449047dd67eSAttilio Rao } 1450047dd67eSAttilio Rao 1451047dd67eSAttilio Rao /* 1452047dd67eSAttilio Rao * Acquire the sleepqueue chain lock because we 1453047dd67eSAttilio Rao * probabilly will need to manipulate waiters flags. 1454047dd67eSAttilio Rao */ 1455047dd67eSAttilio Rao sleepq_lock(&lk->lock_object); 1456bdb6d824SMateusz Guzik x = lockmgr_read_value(lk); 1457047dd67eSAttilio Rao 1458047dd67eSAttilio Rao /* 1459047dd67eSAttilio Rao * if the lock has been released while we spun on 1460047dd67eSAttilio Rao * the sleepqueue chain lock just try again. 1461047dd67eSAttilio Rao */ 1462047dd67eSAttilio Rao if (x == LK_UNLOCKED) { 1463047dd67eSAttilio Rao sleepq_release(&lk->lock_object); 1464047dd67eSAttilio Rao continue; 1465047dd67eSAttilio Rao } 1466047dd67eSAttilio Rao 1467651175c9SAttilio Rao v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS); 1468651175c9SAttilio Rao if ((x & ~v) == LK_UNLOCKED) { 1469651175c9SAttilio Rao v = (x & ~LK_EXCLUSIVE_SPINNERS); 14702028867dSAttilio Rao 14712028867dSAttilio Rao /* 14722028867dSAttilio Rao * If interruptible sleeps left the exclusive 14732028867dSAttilio Rao * queue empty avoid a starvation for the 14742028867dSAttilio Rao * threads sleeping on the shared queue by 14752028867dSAttilio Rao * giving them precedence and cleaning up the 14762028867dSAttilio Rao * exclusive waiters bit anyway. 1477c636ba83SAttilio Rao * Please note that lk_exslpfail count may be 1478c636ba83SAttilio Rao * lying about the real number of waiters with 1479c636ba83SAttilio Rao * the LK_SLEEPFAIL flag on because they may 1480e3043798SPedro F. Giffuni * be used in conjunction with interruptible 1481aab9c8c2SAttilio Rao * sleeps so lk_exslpfail might be considered 1482aab9c8c2SAttilio Rao * an 'upper limit' bound, including the edge 1483c636ba83SAttilio Rao * cases. 14842028867dSAttilio Rao */ 1485047dd67eSAttilio Rao if (v & LK_EXCLUSIVE_WAITERS) { 1486047dd67eSAttilio Rao queue = SQ_EXCLUSIVE_QUEUE; 1487047dd67eSAttilio Rao v &= ~LK_EXCLUSIVE_WAITERS; 1488047dd67eSAttilio Rao } else { 14899dbf7a62SAttilio Rao 14909dbf7a62SAttilio Rao /* 14919dbf7a62SAttilio Rao * Exclusive waiters sleeping with 14929dbf7a62SAttilio Rao * LK_SLEEPFAIL on and using 14939dbf7a62SAttilio Rao * interruptible sleeps/timeout may 14949dbf7a62SAttilio Rao * have left spourious lk_exslpfail 14959dbf7a62SAttilio Rao * counts on, so clean it up anyway. 14969dbf7a62SAttilio Rao */ 1497047dd67eSAttilio Rao MPASS(v & LK_SHARED_WAITERS); 14989dbf7a62SAttilio Rao lk->lk_exslpfail = 0; 1499047dd67eSAttilio Rao queue = SQ_SHARED_QUEUE; 1500047dd67eSAttilio Rao v &= ~LK_SHARED_WAITERS; 1501047dd67eSAttilio Rao } 15022028867dSAttilio Rao if (queue == SQ_EXCLUSIVE_QUEUE) { 15032028867dSAttilio Rao realexslp = 15042028867dSAttilio Rao sleepq_sleepcnt(&lk->lock_object, 15052028867dSAttilio Rao SQ_EXCLUSIVE_QUEUE); 15062028867dSAttilio Rao if (lk->lk_exslpfail >= realexslp) { 15072028867dSAttilio Rao lk->lk_exslpfail = 0; 15082028867dSAttilio Rao queue = SQ_SHARED_QUEUE; 15092028867dSAttilio Rao v &= ~LK_SHARED_WAITERS; 15102028867dSAttilio Rao if (realexslp != 0) { 15112028867dSAttilio Rao LOCK_LOG2(lk, 15122028867dSAttilio Rao "%s: %p has only LK_SLEEPFAIL sleepers", 15132028867dSAttilio Rao __func__, lk); 15142028867dSAttilio Rao LOCK_LOG2(lk, 15152028867dSAttilio Rao "%s: %p waking up threads on the exclusive queue", 15162028867dSAttilio Rao __func__, lk); 15172028867dSAttilio Rao wakeup_swapper = 15182028867dSAttilio Rao sleepq_broadcast( 15192028867dSAttilio Rao &lk->lock_object, 15202028867dSAttilio Rao SLEEPQ_LK, 0, 15212028867dSAttilio Rao SQ_EXCLUSIVE_QUEUE); 15222028867dSAttilio Rao } 15232028867dSAttilio Rao } else 15242028867dSAttilio Rao lk->lk_exslpfail = 0; 15252028867dSAttilio Rao } 1526047dd67eSAttilio Rao if (!atomic_cmpset_ptr(&lk->lk_lock, x, v)) { 1527047dd67eSAttilio Rao sleepq_release(&lk->lock_object); 1528047dd67eSAttilio Rao continue; 1529047dd67eSAttilio Rao } 1530047dd67eSAttilio Rao LOCK_LOG3(lk, 1531047dd67eSAttilio Rao "%s: %p waking up all threads on the %s queue", 1532047dd67eSAttilio Rao __func__, lk, queue == SQ_SHARED_QUEUE ? 1533047dd67eSAttilio Rao "shared" : "exclusive"); 1534814f26daSJohn Baldwin wakeup_swapper |= sleepq_broadcast( 1535da7bbd2cSJohn Baldwin &lk->lock_object, SLEEPQ_LK, 0, queue); 1536047dd67eSAttilio Rao 1537047dd67eSAttilio Rao /* 1538047dd67eSAttilio Rao * If shared waiters have been woken up we need 1539047dd67eSAttilio Rao * to wait for one of them to acquire the lock 1540047dd67eSAttilio Rao * before to set the exclusive waiters in 1541047dd67eSAttilio Rao * order to avoid a deadlock. 1542047dd67eSAttilio Rao */ 1543047dd67eSAttilio Rao if (queue == SQ_SHARED_QUEUE) { 1544047dd67eSAttilio Rao for (v = lk->lk_lock; 1545047dd67eSAttilio Rao (v & LK_SHARE) && !LK_SHARERS(v); 1546047dd67eSAttilio Rao v = lk->lk_lock) 1547047dd67eSAttilio Rao cpu_spinwait(); 1548047dd67eSAttilio Rao } 1549047dd67eSAttilio Rao } 1550047dd67eSAttilio Rao 1551047dd67eSAttilio Rao /* 1552047dd67eSAttilio Rao * Try to set the LK_EXCLUSIVE_WAITERS flag. If we 1553047dd67eSAttilio Rao * fail, loop back and retry. 1554047dd67eSAttilio Rao */ 1555047dd67eSAttilio Rao if ((x & LK_EXCLUSIVE_WAITERS) == 0) { 1556047dd67eSAttilio Rao if (!atomic_cmpset_ptr(&lk->lk_lock, x, 1557047dd67eSAttilio Rao x | LK_EXCLUSIVE_WAITERS)) { 1558047dd67eSAttilio Rao sleepq_release(&lk->lock_object); 1559047dd67eSAttilio Rao continue; 1560047dd67eSAttilio Rao } 1561047dd67eSAttilio Rao LOCK_LOG2(lk, "%s: %p set drain waiters flag", 1562047dd67eSAttilio Rao __func__, lk); 1563047dd67eSAttilio Rao } 1564047dd67eSAttilio Rao 1565047dd67eSAttilio Rao /* 1566047dd67eSAttilio Rao * As far as we have been unable to acquire the 1567047dd67eSAttilio Rao * exclusive lock and the exclusive waiters flag 1568047dd67eSAttilio Rao * is set, we will sleep. 1569047dd67eSAttilio Rao */ 1570047dd67eSAttilio Rao if (flags & LK_INTERLOCK) { 1571047dd67eSAttilio Rao class->lc_unlock(ilk); 1572047dd67eSAttilio Rao flags &= ~LK_INTERLOCK; 1573047dd67eSAttilio Rao } 1574e5f94314SAttilio Rao GIANT_SAVE(); 1575047dd67eSAttilio Rao sleepq_add(&lk->lock_object, NULL, iwmesg, SLEEPQ_LK, 1576047dd67eSAttilio Rao SQ_EXCLUSIVE_QUEUE); 1577047dd67eSAttilio Rao sleepq_wait(&lk->lock_object, ipri & PRIMASK); 1578e5f94314SAttilio Rao GIANT_RESTORE(); 1579047dd67eSAttilio Rao LOCK_LOG2(lk, "%s: %p resuming from the sleep queue", 1580047dd67eSAttilio Rao __func__, lk); 1581047dd67eSAttilio Rao } 1582047dd67eSAttilio Rao 1583047dd67eSAttilio Rao if (error == 0) { 1584047dd67eSAttilio Rao lock_profile_obtain_lock_success(&lk->lock_object, 1585047dd67eSAttilio Rao contested, waittime, file, line); 1586047dd67eSAttilio Rao LOCK_LOG_LOCK("DRAIN", &lk->lock_object, 0, 1587047dd67eSAttilio Rao lk->lk_recurse, file, line); 1588e5f94314SAttilio Rao WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE | 1589e5f94314SAttilio Rao LK_TRYWIT(flags), file, line); 1590047dd67eSAttilio Rao TD_LOCKS_INC(curthread); 1591047dd67eSAttilio Rao STACK_SAVE(lk); 1592047dd67eSAttilio Rao } 1593047dd67eSAttilio Rao break; 1594047dd67eSAttilio Rao default: 1595047dd67eSAttilio Rao if (flags & LK_INTERLOCK) 1596047dd67eSAttilio Rao class->lc_unlock(ilk); 1597047dd67eSAttilio Rao panic("%s: unknown lockmgr request 0x%x\n", __func__, op); 1598047dd67eSAttilio Rao } 1599047dd67eSAttilio Rao 1600047dd67eSAttilio Rao if (flags & LK_INTERLOCK) 1601047dd67eSAttilio Rao class->lc_unlock(ilk); 1602da7bbd2cSJohn Baldwin if (wakeup_swapper) 1603da7bbd2cSJohn Baldwin kick_proc0(); 1604047dd67eSAttilio Rao 1605047dd67eSAttilio Rao return (error); 1606047dd67eSAttilio Rao } 1607047dd67eSAttilio Rao 1608d7a7e179SAttilio Rao void 1609047dd67eSAttilio Rao _lockmgr_disown(struct lock *lk, const char *file, int line) 1610047dd67eSAttilio Rao { 1611047dd67eSAttilio Rao uintptr_t tid, x; 1612047dd67eSAttilio Rao 161335370593SAndriy Gapon if (SCHEDULER_STOPPED()) 161435370593SAndriy Gapon return; 161535370593SAndriy Gapon 1616047dd67eSAttilio Rao tid = (uintptr_t)curthread; 16171c7d98d0SAttilio Rao _lockmgr_assert(lk, KA_XLOCKED, file, line); 16181c7d98d0SAttilio Rao 16191c7d98d0SAttilio Rao /* 16201c7d98d0SAttilio Rao * Panic if the lock is recursed. 16211c7d98d0SAttilio Rao */ 16221c7d98d0SAttilio Rao if (lockmgr_xlocked(lk) && lockmgr_recursed(lk)) 16231c7d98d0SAttilio Rao panic("%s: disown a recursed lockmgr @ %s:%d\n", 16241c7d98d0SAttilio Rao __func__, file, line); 1625047dd67eSAttilio Rao 1626047dd67eSAttilio Rao /* 162796f1567fSKonstantin Belousov * If the owner is already LK_KERNPROC just skip the whole operation. 1628047dd67eSAttilio Rao */ 1629047dd67eSAttilio Rao if (LK_HOLDER(lk->lk_lock) != tid) 1630047dd67eSAttilio Rao return; 163104a28689SJeff Roberson lock_profile_release_lock(&lk->lock_object); 16325b699f16SMark Johnston LOCKSTAT_RECORD1(lockmgr__disown, lk, LOCKSTAT_WRITER); 1633e5f94314SAttilio Rao LOCK_LOG_LOCK("XDISOWN", &lk->lock_object, 0, 0, file, line); 1634e5f94314SAttilio Rao WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, file, line); 1635e5f94314SAttilio Rao TD_LOCKS_DEC(curthread); 1636337c5ff4SAttilio Rao STACK_SAVE(lk); 1637047dd67eSAttilio Rao 1638047dd67eSAttilio Rao /* 1639047dd67eSAttilio Rao * In order to preserve waiters flags, just spin. 1640047dd67eSAttilio Rao */ 1641047dd67eSAttilio Rao for (;;) { 1642bdb6d824SMateusz Guzik x = lockmgr_read_value(lk); 1643651175c9SAttilio Rao MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0); 1644651175c9SAttilio Rao x &= LK_ALL_WAITERS; 164522dd228dSAttilio Rao if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x, 1646e5f94314SAttilio Rao LK_KERNPROC | x)) 1647047dd67eSAttilio Rao return; 1648047dd67eSAttilio Rao cpu_spinwait(); 1649047dd67eSAttilio Rao } 1650047dd67eSAttilio Rao } 1651047dd67eSAttilio Rao 1652047dd67eSAttilio Rao void 1653d576deedSPawel Jakub Dawidek lockmgr_printinfo(const struct lock *lk) 1654d7a7e179SAttilio Rao { 1655d7a7e179SAttilio Rao struct thread *td; 1656047dd67eSAttilio Rao uintptr_t x; 1657d7a7e179SAttilio Rao 1658047dd67eSAttilio Rao if (lk->lk_lock == LK_UNLOCKED) 1659047dd67eSAttilio Rao printf("lock type %s: UNLOCKED\n", lk->lock_object.lo_name); 1660047dd67eSAttilio Rao else if (lk->lk_lock & LK_SHARE) 1661047dd67eSAttilio Rao printf("lock type %s: SHARED (count %ju)\n", 1662047dd67eSAttilio Rao lk->lock_object.lo_name, 1663047dd67eSAttilio Rao (uintmax_t)LK_SHARERS(lk->lk_lock)); 1664047dd67eSAttilio Rao else { 1665047dd67eSAttilio Rao td = lockmgr_xholder(lk); 1666e64b4fa8SKonstantin Belousov if (td == (struct thread *)LK_KERNPROC) 1667e64b4fa8SKonstantin Belousov printf("lock type %s: EXCL by KERNPROC\n", 1668e64b4fa8SKonstantin Belousov lk->lock_object.lo_name); 1669e64b4fa8SKonstantin Belousov else 16702573ea5fSIvan Voras printf("lock type %s: EXCL by thread %p " 1671e64b4fa8SKonstantin Belousov "(pid %d, %s, tid %d)\n", lk->lock_object.lo_name, 1672e64b4fa8SKonstantin Belousov td, td->td_proc->p_pid, td->td_proc->p_comm, 1673e64b4fa8SKonstantin Belousov td->td_tid); 1674d7a7e179SAttilio Rao } 1675d7a7e179SAttilio Rao 1676047dd67eSAttilio Rao x = lk->lk_lock; 1677047dd67eSAttilio Rao if (x & LK_EXCLUSIVE_WAITERS) 1678047dd67eSAttilio Rao printf(" with exclusive waiters pending\n"); 1679047dd67eSAttilio Rao if (x & LK_SHARED_WAITERS) 1680047dd67eSAttilio Rao printf(" with shared waiters pending\n"); 1681651175c9SAttilio Rao if (x & LK_EXCLUSIVE_SPINNERS) 1682651175c9SAttilio Rao printf(" with exclusive spinners pending\n"); 1683047dd67eSAttilio Rao 1684047dd67eSAttilio Rao STACK_PRINT(lk); 1685047dd67eSAttilio Rao } 1686047dd67eSAttilio Rao 168799448ed1SJohn Dyson int 1688d576deedSPawel Jakub Dawidek lockstatus(const struct lock *lk) 168999448ed1SJohn Dyson { 1690047dd67eSAttilio Rao uintptr_t v, x; 1691047dd67eSAttilio Rao int ret; 169299448ed1SJohn Dyson 1693047dd67eSAttilio Rao ret = LK_SHARED; 1694bdb6d824SMateusz Guzik x = lockmgr_read_value(lk); 1695047dd67eSAttilio Rao v = LK_HOLDER(x); 16960e9eb108SAttilio Rao 1697047dd67eSAttilio Rao if ((x & LK_SHARE) == 0) { 1698047dd67eSAttilio Rao if (v == (uintptr_t)curthread || v == LK_KERNPROC) 1699047dd67eSAttilio Rao ret = LK_EXCLUSIVE; 17006bdfe06aSEivind Eklund else 1701047dd67eSAttilio Rao ret = LK_EXCLOTHER; 1702047dd67eSAttilio Rao } else if (x == LK_UNLOCKED) 1703047dd67eSAttilio Rao ret = 0; 170499448ed1SJohn Dyson 1705047dd67eSAttilio Rao return (ret); 170653bf4bb2SPeter Wemm } 1707be6847d7SJohn Baldwin 170884887fa3SAttilio Rao #ifdef INVARIANT_SUPPORT 1709de5b1952SAlexander Leidinger 1710de5b1952SAlexander Leidinger FEATURE(invariant_support, 1711de5b1952SAlexander Leidinger "Support for modules compiled with INVARIANTS option"); 1712de5b1952SAlexander Leidinger 171384887fa3SAttilio Rao #ifndef INVARIANTS 171484887fa3SAttilio Rao #undef _lockmgr_assert 171584887fa3SAttilio Rao #endif 171684887fa3SAttilio Rao 171784887fa3SAttilio Rao void 1718d576deedSPawel Jakub Dawidek _lockmgr_assert(const struct lock *lk, int what, const char *file, int line) 171984887fa3SAttilio Rao { 172084887fa3SAttilio Rao int slocked = 0; 172184887fa3SAttilio Rao 1722879e0604SMateusz Guzik if (KERNEL_PANICKED()) 172384887fa3SAttilio Rao return; 172484887fa3SAttilio Rao switch (what) { 172584887fa3SAttilio Rao case KA_SLOCKED: 172684887fa3SAttilio Rao case KA_SLOCKED | KA_NOTRECURSED: 172784887fa3SAttilio Rao case KA_SLOCKED | KA_RECURSED: 172884887fa3SAttilio Rao slocked = 1; 172984887fa3SAttilio Rao case KA_LOCKED: 173084887fa3SAttilio Rao case KA_LOCKED | KA_NOTRECURSED: 173184887fa3SAttilio Rao case KA_LOCKED | KA_RECURSED: 1732e5f94314SAttilio Rao #ifdef WITNESS 1733e5f94314SAttilio Rao 1734e5f94314SAttilio Rao /* 1735e5f94314SAttilio Rao * We cannot trust WITNESS if the lock is held in exclusive 1736e5f94314SAttilio Rao * mode and a call to lockmgr_disown() happened. 1737e5f94314SAttilio Rao * Workaround this skipping the check if the lock is held in 1738e5f94314SAttilio Rao * exclusive mode even for the KA_LOCKED case. 1739e5f94314SAttilio Rao */ 1740e5f94314SAttilio Rao if (slocked || (lk->lk_lock & LK_SHARE)) { 1741e5f94314SAttilio Rao witness_assert(&lk->lock_object, what, file, line); 1742e5f94314SAttilio Rao break; 1743e5f94314SAttilio Rao } 1744e5f94314SAttilio Rao #endif 1745047dd67eSAttilio Rao if (lk->lk_lock == LK_UNLOCKED || 1746047dd67eSAttilio Rao ((lk->lk_lock & LK_SHARE) == 0 && (slocked || 1747047dd67eSAttilio Rao (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk))))) 174884887fa3SAttilio Rao panic("Lock %s not %slocked @ %s:%d\n", 1749047dd67eSAttilio Rao lk->lock_object.lo_name, slocked ? "share" : "", 175084887fa3SAttilio Rao file, line); 1751047dd67eSAttilio Rao 1752047dd67eSAttilio Rao if ((lk->lk_lock & LK_SHARE) == 0) { 1753047dd67eSAttilio Rao if (lockmgr_recursed(lk)) { 175484887fa3SAttilio Rao if (what & KA_NOTRECURSED) 175584887fa3SAttilio Rao panic("Lock %s recursed @ %s:%d\n", 1756047dd67eSAttilio Rao lk->lock_object.lo_name, file, 1757047dd67eSAttilio Rao line); 175884887fa3SAttilio Rao } else if (what & KA_RECURSED) 175984887fa3SAttilio Rao panic("Lock %s not recursed @ %s:%d\n", 1760047dd67eSAttilio Rao lk->lock_object.lo_name, file, line); 176184887fa3SAttilio Rao } 176284887fa3SAttilio Rao break; 176384887fa3SAttilio Rao case KA_XLOCKED: 176484887fa3SAttilio Rao case KA_XLOCKED | KA_NOTRECURSED: 176584887fa3SAttilio Rao case KA_XLOCKED | KA_RECURSED: 1766047dd67eSAttilio Rao if (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk)) 176784887fa3SAttilio Rao panic("Lock %s not exclusively locked @ %s:%d\n", 1768047dd67eSAttilio Rao lk->lock_object.lo_name, file, line); 1769047dd67eSAttilio Rao if (lockmgr_recursed(lk)) { 177084887fa3SAttilio Rao if (what & KA_NOTRECURSED) 177184887fa3SAttilio Rao panic("Lock %s recursed @ %s:%d\n", 1772047dd67eSAttilio Rao lk->lock_object.lo_name, file, line); 177384887fa3SAttilio Rao } else if (what & KA_RECURSED) 177484887fa3SAttilio Rao panic("Lock %s not recursed @ %s:%d\n", 1775047dd67eSAttilio Rao lk->lock_object.lo_name, file, line); 177684887fa3SAttilio Rao break; 177784887fa3SAttilio Rao case KA_UNLOCKED: 1778047dd67eSAttilio Rao if (lockmgr_xlocked(lk) || lockmgr_disowned(lk)) 177984887fa3SAttilio Rao panic("Lock %s exclusively locked @ %s:%d\n", 1780047dd67eSAttilio Rao lk->lock_object.lo_name, file, line); 178184887fa3SAttilio Rao break; 178284887fa3SAttilio Rao default: 1783047dd67eSAttilio Rao panic("Unknown lockmgr assertion: %d @ %s:%d\n", what, file, 1784047dd67eSAttilio Rao line); 178584887fa3SAttilio Rao } 178684887fa3SAttilio Rao } 1787047dd67eSAttilio Rao #endif 178884887fa3SAttilio Rao 1789be6847d7SJohn Baldwin #ifdef DDB 1790462a7addSJohn Baldwin int 1791462a7addSJohn Baldwin lockmgr_chain(struct thread *td, struct thread **ownerp) 1792462a7addSJohn Baldwin { 1793fea73412SConrad Meyer const struct lock *lk; 1794462a7addSJohn Baldwin 1795047dd67eSAttilio Rao lk = td->td_wchan; 1796462a7addSJohn Baldwin 1797047dd67eSAttilio Rao if (LOCK_CLASS(&lk->lock_object) != &lock_class_lockmgr) 1798462a7addSJohn Baldwin return (0); 1799047dd67eSAttilio Rao db_printf("blocked on lockmgr %s", lk->lock_object.lo_name); 1800047dd67eSAttilio Rao if (lk->lk_lock & LK_SHARE) 1801047dd67eSAttilio Rao db_printf("SHARED (count %ju)\n", 1802047dd67eSAttilio Rao (uintmax_t)LK_SHARERS(lk->lk_lock)); 1803047dd67eSAttilio Rao else 1804047dd67eSAttilio Rao db_printf("EXCL\n"); 1805047dd67eSAttilio Rao *ownerp = lockmgr_xholder(lk); 1806462a7addSJohn Baldwin 1807462a7addSJohn Baldwin return (1); 1808462a7addSJohn Baldwin } 1809462a7addSJohn Baldwin 1810047dd67eSAttilio Rao static void 1811d576deedSPawel Jakub Dawidek db_show_lockmgr(const struct lock_object *lock) 1812be6847d7SJohn Baldwin { 1813be6847d7SJohn Baldwin struct thread *td; 1814d576deedSPawel Jakub Dawidek const struct lock *lk; 1815be6847d7SJohn Baldwin 1816d576deedSPawel Jakub Dawidek lk = (const struct lock *)lock; 1817be6847d7SJohn Baldwin 1818be6847d7SJohn Baldwin db_printf(" state: "); 1819047dd67eSAttilio Rao if (lk->lk_lock == LK_UNLOCKED) 1820be6847d7SJohn Baldwin db_printf("UNLOCKED\n"); 1821047dd67eSAttilio Rao else if (lk->lk_lock & LK_SHARE) 1822047dd67eSAttilio Rao db_printf("SLOCK: %ju\n", (uintmax_t)LK_SHARERS(lk->lk_lock)); 1823047dd67eSAttilio Rao else { 1824047dd67eSAttilio Rao td = lockmgr_xholder(lk); 1825047dd67eSAttilio Rao if (td == (struct thread *)LK_KERNPROC) 1826047dd67eSAttilio Rao db_printf("XLOCK: LK_KERNPROC\n"); 1827047dd67eSAttilio Rao else 1828047dd67eSAttilio Rao db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td, 1829047dd67eSAttilio Rao td->td_tid, td->td_proc->p_pid, 1830047dd67eSAttilio Rao td->td_proc->p_comm); 1831047dd67eSAttilio Rao if (lockmgr_recursed(lk)) 1832047dd67eSAttilio Rao db_printf(" recursed: %d\n", lk->lk_recurse); 1833047dd67eSAttilio Rao } 1834047dd67eSAttilio Rao db_printf(" waiters: "); 1835047dd67eSAttilio Rao switch (lk->lk_lock & LK_ALL_WAITERS) { 1836047dd67eSAttilio Rao case LK_SHARED_WAITERS: 1837047dd67eSAttilio Rao db_printf("shared\n"); 1838e5023dd9SEdward Tomasz Napierala break; 1839047dd67eSAttilio Rao case LK_EXCLUSIVE_WAITERS: 1840047dd67eSAttilio Rao db_printf("exclusive\n"); 1841047dd67eSAttilio Rao break; 1842047dd67eSAttilio Rao case LK_ALL_WAITERS: 1843047dd67eSAttilio Rao db_printf("shared and exclusive\n"); 1844047dd67eSAttilio Rao break; 1845047dd67eSAttilio Rao default: 1846047dd67eSAttilio Rao db_printf("none\n"); 1847047dd67eSAttilio Rao } 1848651175c9SAttilio Rao db_printf(" spinners: "); 1849651175c9SAttilio Rao if (lk->lk_lock & LK_EXCLUSIVE_SPINNERS) 1850651175c9SAttilio Rao db_printf("exclusive\n"); 1851651175c9SAttilio Rao else 1852651175c9SAttilio Rao db_printf("none\n"); 1853be6847d7SJohn Baldwin } 1854be6847d7SJohn Baldwin #endif 1855