19454b2d8SWarner Losh /*- 28a36da99SPedro F. Giffuni * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 38a36da99SPedro F. Giffuni * 4047dd67eSAttilio Rao * Copyright (c) 2008 Attilio Rao <attilio@FreeBSD.org> 5047dd67eSAttilio Rao * All rights reserved. 653bf4bb2SPeter Wemm * 753bf4bb2SPeter Wemm * Redistribution and use in source and binary forms, with or without 853bf4bb2SPeter Wemm * modification, are permitted provided that the following conditions 953bf4bb2SPeter Wemm * are met: 1053bf4bb2SPeter Wemm * 1. Redistributions of source code must retain the above copyright 11047dd67eSAttilio Rao * notice(s), this list of conditions and the following disclaimer as 12047dd67eSAttilio Rao * the first lines of this file unmodified other than the possible 13047dd67eSAttilio Rao * addition of one or more copyright notices. 1453bf4bb2SPeter Wemm * 2. Redistributions in binary form must reproduce the above copyright 15047dd67eSAttilio Rao * notice(s), this list of conditions and the following disclaimer in the 1653bf4bb2SPeter Wemm * documentation and/or other materials provided with the distribution. 1753bf4bb2SPeter Wemm * 18047dd67eSAttilio Rao * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY 19047dd67eSAttilio Rao * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 20047dd67eSAttilio Rao * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 21047dd67eSAttilio Rao * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY 22047dd67eSAttilio Rao * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 23047dd67eSAttilio Rao * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 24047dd67eSAttilio Rao * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 25047dd67eSAttilio Rao * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 2653bf4bb2SPeter Wemm * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27047dd67eSAttilio Rao * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 28047dd67eSAttilio Rao * DAMAGE. 2953bf4bb2SPeter Wemm */ 3053bf4bb2SPeter Wemm 31047dd67eSAttilio Rao #include "opt_ddb.h" 32f5f9340bSFabien Thomas #include "opt_hwpmc_hooks.h" 33047dd67eSAttilio Rao 34677b542eSDavid E. O'Brien #include <sys/cdefs.h> 35677b542eSDavid E. O'Brien __FBSDID("$FreeBSD$"); 36677b542eSDavid E. O'Brien 3753bf4bb2SPeter Wemm #include <sys/param.h> 38cd2fe4e6SAttilio Rao #include <sys/kdb.h> 3961d80e90SJohn Baldwin #include <sys/ktr.h> 4053bf4bb2SPeter Wemm #include <sys/lock.h> 41047dd67eSAttilio Rao #include <sys/lock_profile.h> 428302d183SBruce Evans #include <sys/lockmgr.h> 43d8881ca3SJohn Baldwin #include <sys/mutex.h> 448302d183SBruce Evans #include <sys/proc.h> 45047dd67eSAttilio Rao #include <sys/sleepqueue.h> 46e8ddb61dSJeff Roberson #ifdef DEBUG_LOCKS 47e8ddb61dSJeff Roberson #include <sys/stack.h> 48e8ddb61dSJeff Roberson #endif 49651175c9SAttilio Rao #include <sys/sysctl.h> 50047dd67eSAttilio Rao #include <sys/systm.h> 5153bf4bb2SPeter Wemm 52047dd67eSAttilio Rao #include <machine/cpu.h> 536efc8a16SAttilio Rao 54be6847d7SJohn Baldwin #ifdef DDB 55be6847d7SJohn Baldwin #include <ddb/ddb.h> 56047dd67eSAttilio Rao #endif 57047dd67eSAttilio Rao 58f5f9340bSFabien Thomas #ifdef HWPMC_HOOKS 59f5f9340bSFabien Thomas #include <sys/pmckern.h> 60f5f9340bSFabien Thomas PMC_SOFT_DECLARE( , , lock, failed); 61f5f9340bSFabien Thomas #endif 62f5f9340bSFabien Thomas 63651175c9SAttilio Rao CTASSERT(((LK_ADAPTIVE | LK_NOSHARE) & LO_CLASSFLAGS) == 64651175c9SAttilio Rao (LK_ADAPTIVE | LK_NOSHARE)); 65651175c9SAttilio Rao CTASSERT(LK_UNLOCKED == (LK_UNLOCKED & 66651175c9SAttilio Rao ~(LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS))); 67047dd67eSAttilio Rao 68047dd67eSAttilio Rao #define SQ_EXCLUSIVE_QUEUE 0 69047dd67eSAttilio Rao #define SQ_SHARED_QUEUE 1 70047dd67eSAttilio Rao 71047dd67eSAttilio Rao #ifndef INVARIANTS 72047dd67eSAttilio Rao #define _lockmgr_assert(lk, what, file, line) 73047dd67eSAttilio Rao #endif 74ce1c953eSMark Johnston 75047dd67eSAttilio Rao #define TD_SLOCKS_INC(td) ((td)->td_lk_slocks++) 76047dd67eSAttilio Rao #define TD_SLOCKS_DEC(td) ((td)->td_lk_slocks--) 77047dd67eSAttilio Rao 78047dd67eSAttilio Rao #ifndef DEBUG_LOCKS 79047dd67eSAttilio Rao #define STACK_PRINT(lk) 80047dd67eSAttilio Rao #define STACK_SAVE(lk) 81047dd67eSAttilio Rao #define STACK_ZERO(lk) 82047dd67eSAttilio Rao #else 83047dd67eSAttilio Rao #define STACK_PRINT(lk) stack_print_ddb(&(lk)->lk_stack) 84047dd67eSAttilio Rao #define STACK_SAVE(lk) stack_save(&(lk)->lk_stack) 85047dd67eSAttilio Rao #define STACK_ZERO(lk) stack_zero(&(lk)->lk_stack) 86047dd67eSAttilio Rao #endif 87047dd67eSAttilio Rao 88047dd67eSAttilio Rao #define LOCK_LOG2(lk, string, arg1, arg2) \ 89047dd67eSAttilio Rao if (LOCK_LOG_TEST(&(lk)->lock_object, 0)) \ 90047dd67eSAttilio Rao CTR2(KTR_LOCK, (string), (arg1), (arg2)) 91047dd67eSAttilio Rao #define LOCK_LOG3(lk, string, arg1, arg2, arg3) \ 92047dd67eSAttilio Rao if (LOCK_LOG_TEST(&(lk)->lock_object, 0)) \ 93047dd67eSAttilio Rao CTR3(KTR_LOCK, (string), (arg1), (arg2), (arg3)) 94047dd67eSAttilio Rao 95e5f94314SAttilio Rao #define GIANT_DECLARE \ 96e5f94314SAttilio Rao int _i = 0; \ 97e5f94314SAttilio Rao WITNESS_SAVE_DECL(Giant) 98e5f94314SAttilio Rao #define GIANT_RESTORE() do { \ 99*6e8c1ccbSMateusz Guzik if (__predict_false(_i > 0)) { \ 100e5f94314SAttilio Rao while (_i--) \ 101e5f94314SAttilio Rao mtx_lock(&Giant); \ 102e5f94314SAttilio Rao WITNESS_RESTORE(&Giant.lock_object, Giant); \ 103e5f94314SAttilio Rao } \ 104e5f94314SAttilio Rao } while (0) 105e5f94314SAttilio Rao #define GIANT_SAVE() do { \ 106*6e8c1ccbSMateusz Guzik if (__predict_false(mtx_owned(&Giant))) { \ 107e5f94314SAttilio Rao WITNESS_SAVE(&Giant.lock_object, Giant); \ 108e5f94314SAttilio Rao while (mtx_owned(&Giant)) { \ 109e5f94314SAttilio Rao _i++; \ 110e5f94314SAttilio Rao mtx_unlock(&Giant); \ 111e5f94314SAttilio Rao } \ 112e5f94314SAttilio Rao } \ 113e5f94314SAttilio Rao } while (0) 114e5f94314SAttilio Rao 11595ab076dSMateusz Guzik static bool __always_inline 11695ab076dSMateusz Guzik LK_CAN_SHARE(uintptr_t x, int flags, bool fp) 11795ab076dSMateusz Guzik { 11895ab076dSMateusz Guzik 11995ab076dSMateusz Guzik if ((x & (LK_SHARE | LK_EXCLUSIVE_WAITERS | LK_EXCLUSIVE_SPINNERS)) == 12095ab076dSMateusz Guzik LK_SHARE) 12195ab076dSMateusz Guzik return (true); 12295ab076dSMateusz Guzik if (fp || (!(x & LK_SHARE))) 12395ab076dSMateusz Guzik return (false); 12495ab076dSMateusz Guzik if ((curthread->td_lk_slocks != 0 && !(flags & LK_NODDLKTREAT)) || 12595ab076dSMateusz Guzik (curthread->td_pflags & TDP_DEADLKTREAT)) 12695ab076dSMateusz Guzik return (true); 12795ab076dSMateusz Guzik return (false); 12895ab076dSMateusz Guzik } 12995ab076dSMateusz Guzik 130e5f94314SAttilio Rao #define LK_TRYOP(x) \ 131e5f94314SAttilio Rao ((x) & LK_NOWAIT) 132e5f94314SAttilio Rao 133e5f94314SAttilio Rao #define LK_CAN_WITNESS(x) \ 134e5f94314SAttilio Rao (((x) & LK_NOWITNESS) == 0 && !LK_TRYOP(x)) 135e5f94314SAttilio Rao #define LK_TRYWIT(x) \ 136e5f94314SAttilio Rao (LK_TRYOP(x) ? LOP_TRYLOCK : 0) 137047dd67eSAttilio Rao 138651175c9SAttilio Rao #define LK_CAN_ADAPT(lk, f) \ 139651175c9SAttilio Rao (((lk)->lock_object.lo_flags & LK_ADAPTIVE) != 0 && \ 140651175c9SAttilio Rao ((f) & LK_SLEEPFAIL) == 0) 141651175c9SAttilio Rao 142047dd67eSAttilio Rao #define lockmgr_disowned(lk) \ 143047dd67eSAttilio Rao (((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == LK_KERNPROC) 144047dd67eSAttilio Rao 14510391db5SMateusz Guzik #define lockmgr_xlocked_v(v) \ 14610391db5SMateusz Guzik (((v) & ~(LK_FLAGMASK & ~LK_SHARE)) == (uintptr_t)curthread) 14710391db5SMateusz Guzik 14810391db5SMateusz Guzik #define lockmgr_xlocked(lk) lockmgr_xlocked_v((lk)->lk_lock) 149047dd67eSAttilio Rao 150d576deedSPawel Jakub Dawidek static void assert_lockmgr(const struct lock_object *lock, int how); 151047dd67eSAttilio Rao #ifdef DDB 152d576deedSPawel Jakub Dawidek static void db_show_lockmgr(const struct lock_object *lock); 153be6847d7SJohn Baldwin #endif 1547faf4d90SDavide Italiano static void lock_lockmgr(struct lock_object *lock, uintptr_t how); 155a5aedd68SStacey Son #ifdef KDTRACE_HOOKS 156d576deedSPawel Jakub Dawidek static int owner_lockmgr(const struct lock_object *lock, 157d576deedSPawel Jakub Dawidek struct thread **owner); 158a5aedd68SStacey Son #endif 1597faf4d90SDavide Italiano static uintptr_t unlock_lockmgr(struct lock_object *lock); 16061bd5e21SKip Macy 16161bd5e21SKip Macy struct lock_class lock_class_lockmgr = { 1623ff6d229SJohn Baldwin .lc_name = "lockmgr", 163047dd67eSAttilio Rao .lc_flags = LC_RECURSABLE | LC_SLEEPABLE | LC_SLEEPLOCK | LC_UPGRADABLE, 164f9721b43SAttilio Rao .lc_assert = assert_lockmgr, 16561bd5e21SKip Macy #ifdef DDB 1666e21afd4SJohn Baldwin .lc_ddb_show = db_show_lockmgr, 16761bd5e21SKip Macy #endif 1686e21afd4SJohn Baldwin .lc_lock = lock_lockmgr, 169a5aedd68SStacey Son .lc_unlock = unlock_lockmgr, 170a5aedd68SStacey Son #ifdef KDTRACE_HOOKS 171a5aedd68SStacey Son .lc_owner = owner_lockmgr, 172a5aedd68SStacey Son #endif 17361bd5e21SKip Macy }; 17461bd5e21SKip Macy 1751c6987ebSMateusz Guzik struct lockmgr_wait { 1761c6987ebSMateusz Guzik const char *iwmesg; 1771c6987ebSMateusz Guzik int ipri; 1781c6987ebSMateusz Guzik int itimo; 1791c6987ebSMateusz Guzik }; 1801c6987ebSMateusz Guzik 181c4a48867SMateusz Guzik static bool __always_inline lockmgr_slock_try(struct lock *lk, uintptr_t *xp, 18295ab076dSMateusz Guzik int flags, bool fp); 1831c6987ebSMateusz Guzik static bool __always_inline lockmgr_sunlock_try(struct lock *lk, uintptr_t *xp); 1841c6987ebSMateusz Guzik 1851c6987ebSMateusz Guzik static void 1861c6987ebSMateusz Guzik lockmgr_exit(u_int flags, struct lock_object *ilk, int wakeup_swapper) 1871c6987ebSMateusz Guzik { 1881c6987ebSMateusz Guzik struct lock_class *class; 1891c6987ebSMateusz Guzik 1901c6987ebSMateusz Guzik if (flags & LK_INTERLOCK) { 1911c6987ebSMateusz Guzik class = LOCK_CLASS(ilk); 1921c6987ebSMateusz Guzik class->lc_unlock(ilk); 1931c6987ebSMateusz Guzik } 1941c6987ebSMateusz Guzik 1951c6987ebSMateusz Guzik if (__predict_false(wakeup_swapper)) 1961c6987ebSMateusz Guzik kick_proc0(); 1971c6987ebSMateusz Guzik } 198c4a48867SMateusz Guzik 199c4a48867SMateusz Guzik static void 200c4a48867SMateusz Guzik lockmgr_note_shared_acquire(struct lock *lk, int contested, 201c4a48867SMateusz Guzik uint64_t waittime, const char *file, int line, int flags) 202c4a48867SMateusz Guzik { 203c4a48867SMateusz Guzik 204c4a48867SMateusz Guzik lock_profile_obtain_lock_success(&lk->lock_object, contested, waittime, 205c4a48867SMateusz Guzik file, line); 206c4a48867SMateusz Guzik LOCK_LOG_LOCK("SLOCK", &lk->lock_object, 0, 0, file, line); 207c4a48867SMateusz Guzik WITNESS_LOCK(&lk->lock_object, LK_TRYWIT(flags), file, line); 208c4a48867SMateusz Guzik TD_LOCKS_INC(curthread); 209c4a48867SMateusz Guzik TD_SLOCKS_INC(curthread); 210c4a48867SMateusz Guzik STACK_SAVE(lk); 211c4a48867SMateusz Guzik } 212c4a48867SMateusz Guzik 213c4a48867SMateusz Guzik static void 214c4a48867SMateusz Guzik lockmgr_note_shared_release(struct lock *lk, const char *file, int line) 215c4a48867SMateusz Guzik { 216c4a48867SMateusz Guzik 217c4a48867SMateusz Guzik lock_profile_release_lock(&lk->lock_object); 218c4a48867SMateusz Guzik WITNESS_UNLOCK(&lk->lock_object, 0, file, line); 219c4a48867SMateusz Guzik LOCK_LOG_LOCK("SUNLOCK", &lk->lock_object, 0, 0, file, line); 220c4a48867SMateusz Guzik TD_LOCKS_DEC(curthread); 221c4a48867SMateusz Guzik TD_SLOCKS_DEC(curthread); 222c4a48867SMateusz Guzik } 223c4a48867SMateusz Guzik 224c4a48867SMateusz Guzik static void 225c4a48867SMateusz Guzik lockmgr_note_exclusive_acquire(struct lock *lk, int contested, 226c4a48867SMateusz Guzik uint64_t waittime, const char *file, int line, int flags) 227c4a48867SMateusz Guzik { 228c4a48867SMateusz Guzik 229c4a48867SMateusz Guzik lock_profile_obtain_lock_success(&lk->lock_object, contested, waittime, 230c4a48867SMateusz Guzik file, line); 231c4a48867SMateusz Guzik LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0, lk->lk_recurse, file, line); 232c4a48867SMateusz Guzik WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE | LK_TRYWIT(flags), file, 233c4a48867SMateusz Guzik line); 234c4a48867SMateusz Guzik TD_LOCKS_INC(curthread); 235c4a48867SMateusz Guzik STACK_SAVE(lk); 236c4a48867SMateusz Guzik } 237c4a48867SMateusz Guzik 238c4a48867SMateusz Guzik static void 239c4a48867SMateusz Guzik lockmgr_note_exclusive_release(struct lock *lk, const char *file, int line) 240c4a48867SMateusz Guzik { 241c4a48867SMateusz Guzik 242c4a48867SMateusz Guzik lock_profile_release_lock(&lk->lock_object); 243c4a48867SMateusz Guzik LOCK_LOG_LOCK("XUNLOCK", &lk->lock_object, 0, lk->lk_recurse, file, 244c4a48867SMateusz Guzik line); 245c4a48867SMateusz Guzik WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, file, line); 246c4a48867SMateusz Guzik TD_LOCKS_DEC(curthread); 247c4a48867SMateusz Guzik } 248c4a48867SMateusz Guzik 249047dd67eSAttilio Rao static __inline struct thread * 250d576deedSPawel Jakub Dawidek lockmgr_xholder(const struct lock *lk) 251047dd67eSAttilio Rao { 252047dd67eSAttilio Rao uintptr_t x; 253047dd67eSAttilio Rao 254047dd67eSAttilio Rao x = lk->lk_lock; 255047dd67eSAttilio Rao return ((x & LK_SHARE) ? NULL : (struct thread *)LK_HOLDER(x)); 256047dd67eSAttilio Rao } 25784887fa3SAttilio Rao 25853bf4bb2SPeter Wemm /* 259047dd67eSAttilio Rao * It assumes sleepq_lock held and returns with this one unheld. 260047dd67eSAttilio Rao * It also assumes the generic interlock is sane and previously checked. 261047dd67eSAttilio Rao * If LK_INTERLOCK is specified the interlock is not reacquired after the 262047dd67eSAttilio Rao * sleep. 26353bf4bb2SPeter Wemm */ 264047dd67eSAttilio Rao static __inline int 265047dd67eSAttilio Rao sleeplk(struct lock *lk, u_int flags, struct lock_object *ilk, 266047dd67eSAttilio Rao const char *wmesg, int pri, int timo, int queue) 267047dd67eSAttilio Rao { 268e5f94314SAttilio Rao GIANT_DECLARE; 269047dd67eSAttilio Rao struct lock_class *class; 270047dd67eSAttilio Rao int catch, error; 27153bf4bb2SPeter Wemm 272047dd67eSAttilio Rao class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL; 2735047a8fdSAttilio Rao catch = pri & PCATCH; 274047dd67eSAttilio Rao pri &= PRIMASK; 275047dd67eSAttilio Rao error = 0; 276047dd67eSAttilio Rao 277047dd67eSAttilio Rao LOCK_LOG3(lk, "%s: %p blocking on the %s sleepqueue", __func__, lk, 278047dd67eSAttilio Rao (queue == SQ_EXCLUSIVE_QUEUE) ? "exclusive" : "shared"); 279047dd67eSAttilio Rao 280047dd67eSAttilio Rao if (flags & LK_INTERLOCK) 281047dd67eSAttilio Rao class->lc_unlock(ilk); 2822028867dSAttilio Rao if (queue == SQ_EXCLUSIVE_QUEUE && (flags & LK_SLEEPFAIL) != 0) 2832028867dSAttilio Rao lk->lk_exslpfail++; 284e5f94314SAttilio Rao GIANT_SAVE(); 285047dd67eSAttilio Rao sleepq_add(&lk->lock_object, NULL, wmesg, SLEEPQ_LK | (catch ? 286047dd67eSAttilio Rao SLEEPQ_INTERRUPTIBLE : 0), queue); 287a115fb62SHans Petter Selasky if ((flags & LK_TIMELOCK) && timo) 288047dd67eSAttilio Rao sleepq_set_timeout(&lk->lock_object, timo); 289a115fb62SHans Petter Selasky 290047dd67eSAttilio Rao /* 291047dd67eSAttilio Rao * Decisional switch for real sleeping. 292047dd67eSAttilio Rao */ 293047dd67eSAttilio Rao if ((flags & LK_TIMELOCK) && timo && catch) 294047dd67eSAttilio Rao error = sleepq_timedwait_sig(&lk->lock_object, pri); 295047dd67eSAttilio Rao else if ((flags & LK_TIMELOCK) && timo) 296047dd67eSAttilio Rao error = sleepq_timedwait(&lk->lock_object, pri); 297047dd67eSAttilio Rao else if (catch) 298047dd67eSAttilio Rao error = sleepq_wait_sig(&lk->lock_object, pri); 299047dd67eSAttilio Rao else 300047dd67eSAttilio Rao sleepq_wait(&lk->lock_object, pri); 301e5f94314SAttilio Rao GIANT_RESTORE(); 302047dd67eSAttilio Rao if ((flags & LK_SLEEPFAIL) && error == 0) 303047dd67eSAttilio Rao error = ENOLCK; 304047dd67eSAttilio Rao 305047dd67eSAttilio Rao return (error); 306047dd67eSAttilio Rao } 307047dd67eSAttilio Rao 308da7bbd2cSJohn Baldwin static __inline int 309047dd67eSAttilio Rao wakeupshlk(struct lock *lk, const char *file, int line) 310047dd67eSAttilio Rao { 3110ad122a9SMateusz Guzik uintptr_t v, x, orig_x; 3122028867dSAttilio Rao u_int realexslp; 313da7bbd2cSJohn Baldwin int queue, wakeup_swapper; 314047dd67eSAttilio Rao 315da7bbd2cSJohn Baldwin wakeup_swapper = 0; 316047dd67eSAttilio Rao for (;;) { 317047dd67eSAttilio Rao x = lk->lk_lock; 3181c6987ebSMateusz Guzik if (lockmgr_sunlock_try(lk, &x)) 319047dd67eSAttilio Rao break; 320047dd67eSAttilio Rao 321047dd67eSAttilio Rao /* 322047dd67eSAttilio Rao * We should have a sharer with waiters, so enter the hard 323047dd67eSAttilio Rao * path in order to handle wakeups correctly. 324047dd67eSAttilio Rao */ 325047dd67eSAttilio Rao sleepq_lock(&lk->lock_object); 3260ad122a9SMateusz Guzik orig_x = lk->lk_lock; 3270ad122a9SMateusz Guzik retry_sleepq: 3280ad122a9SMateusz Guzik x = orig_x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS); 329047dd67eSAttilio Rao v = LK_UNLOCKED; 330047dd67eSAttilio Rao 331047dd67eSAttilio Rao /* 332047dd67eSAttilio Rao * If the lock has exclusive waiters, give them preference in 333047dd67eSAttilio Rao * order to avoid deadlock with shared runners up. 3342028867dSAttilio Rao * If interruptible sleeps left the exclusive queue empty 3352028867dSAttilio Rao * avoid a starvation for the threads sleeping on the shared 3362028867dSAttilio Rao * queue by giving them precedence and cleaning up the 3372028867dSAttilio Rao * exclusive waiters bit anyway. 338c636ba83SAttilio Rao * Please note that lk_exslpfail count may be lying about 339c636ba83SAttilio Rao * the real number of waiters with the LK_SLEEPFAIL flag on 340e3043798SPedro F. Giffuni * because they may be used in conjunction with interruptible 341aab9c8c2SAttilio Rao * sleeps so lk_exslpfail might be considered an 'upper limit' 342aab9c8c2SAttilio Rao * bound, including the edge cases. 343047dd67eSAttilio Rao */ 3442028867dSAttilio Rao realexslp = sleepq_sleepcnt(&lk->lock_object, 3452028867dSAttilio Rao SQ_EXCLUSIVE_QUEUE); 3462028867dSAttilio Rao if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) { 3472028867dSAttilio Rao if (lk->lk_exslpfail < realexslp) { 3482028867dSAttilio Rao lk->lk_exslpfail = 0; 349047dd67eSAttilio Rao queue = SQ_EXCLUSIVE_QUEUE; 350047dd67eSAttilio Rao v |= (x & LK_SHARED_WAITERS); 351047dd67eSAttilio Rao } else { 3522028867dSAttilio Rao lk->lk_exslpfail = 0; 3532028867dSAttilio Rao LOCK_LOG2(lk, 3542028867dSAttilio Rao "%s: %p has only LK_SLEEPFAIL sleepers", 3552028867dSAttilio Rao __func__, lk); 3562028867dSAttilio Rao LOCK_LOG2(lk, 3572028867dSAttilio Rao "%s: %p waking up threads on the exclusive queue", 3582028867dSAttilio Rao __func__, lk); 3592028867dSAttilio Rao wakeup_swapper = 3602028867dSAttilio Rao sleepq_broadcast(&lk->lock_object, 3612028867dSAttilio Rao SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE); 3622028867dSAttilio Rao queue = SQ_SHARED_QUEUE; 3632028867dSAttilio Rao } 3642028867dSAttilio Rao 3652028867dSAttilio Rao } else { 3669dbf7a62SAttilio Rao 3679dbf7a62SAttilio Rao /* 3689dbf7a62SAttilio Rao * Exclusive waiters sleeping with LK_SLEEPFAIL on 3699dbf7a62SAttilio Rao * and using interruptible sleeps/timeout may have 3709dbf7a62SAttilio Rao * left spourious lk_exslpfail counts on, so clean 3719dbf7a62SAttilio Rao * it up anyway. 3729dbf7a62SAttilio Rao */ 3739dbf7a62SAttilio Rao lk->lk_exslpfail = 0; 374047dd67eSAttilio Rao queue = SQ_SHARED_QUEUE; 375047dd67eSAttilio Rao } 376047dd67eSAttilio Rao 3770ad122a9SMateusz Guzik if (lockmgr_sunlock_try(lk, &orig_x)) { 378047dd67eSAttilio Rao sleepq_release(&lk->lock_object); 3790ad122a9SMateusz Guzik break; 3800ad122a9SMateusz Guzik } 3810ad122a9SMateusz Guzik 3820ad122a9SMateusz Guzik x |= LK_SHARERS_LOCK(1); 3830ad122a9SMateusz Guzik if (!atomic_fcmpset_rel_ptr(&lk->lk_lock, &x, v)) { 3840ad122a9SMateusz Guzik orig_x = x; 3850ad122a9SMateusz Guzik goto retry_sleepq; 386047dd67eSAttilio Rao } 387047dd67eSAttilio Rao LOCK_LOG3(lk, "%s: %p waking up threads on the %s queue", 388047dd67eSAttilio Rao __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" : 389047dd67eSAttilio Rao "exclusive"); 3902028867dSAttilio Rao wakeup_swapper |= sleepq_broadcast(&lk->lock_object, SLEEPQ_LK, 391da7bbd2cSJohn Baldwin 0, queue); 392047dd67eSAttilio Rao sleepq_release(&lk->lock_object); 393047dd67eSAttilio Rao break; 394047dd67eSAttilio Rao } 395047dd67eSAttilio Rao 396c4a48867SMateusz Guzik lockmgr_note_shared_release(lk, file, line); 397da7bbd2cSJohn Baldwin return (wakeup_swapper); 398047dd67eSAttilio Rao } 399047dd67eSAttilio Rao 400047dd67eSAttilio Rao static void 401d576deedSPawel Jakub Dawidek assert_lockmgr(const struct lock_object *lock, int what) 402f9721b43SAttilio Rao { 403f9721b43SAttilio Rao 404f9721b43SAttilio Rao panic("lockmgr locks do not support assertions"); 405f9721b43SAttilio Rao } 406f9721b43SAttilio Rao 407047dd67eSAttilio Rao static void 4087faf4d90SDavide Italiano lock_lockmgr(struct lock_object *lock, uintptr_t how) 4096e21afd4SJohn Baldwin { 4106e21afd4SJohn Baldwin 4116e21afd4SJohn Baldwin panic("lockmgr locks do not support sleep interlocking"); 4126e21afd4SJohn Baldwin } 4136e21afd4SJohn Baldwin 4147faf4d90SDavide Italiano static uintptr_t 4156e21afd4SJohn Baldwin unlock_lockmgr(struct lock_object *lock) 4166e21afd4SJohn Baldwin { 4176e21afd4SJohn Baldwin 4186e21afd4SJohn Baldwin panic("lockmgr locks do not support sleep interlocking"); 4196e21afd4SJohn Baldwin } 4206e21afd4SJohn Baldwin 421a5aedd68SStacey Son #ifdef KDTRACE_HOOKS 422a5aedd68SStacey Son static int 423d576deedSPawel Jakub Dawidek owner_lockmgr(const struct lock_object *lock, struct thread **owner) 424a5aedd68SStacey Son { 425a5aedd68SStacey Son 426a5aedd68SStacey Son panic("lockmgr locks do not support owner inquiring"); 427a5aedd68SStacey Son } 428a5aedd68SStacey Son #endif 429a5aedd68SStacey Son 43099448ed1SJohn Dyson void 431047dd67eSAttilio Rao lockinit(struct lock *lk, int pri, const char *wmesg, int timo, int flags) 43299448ed1SJohn Dyson { 4336efc8a16SAttilio Rao int iflags; 4346efc8a16SAttilio Rao 435047dd67eSAttilio Rao MPASS((flags & ~LK_INIT_MASK) == 0); 436353998acSAttilio Rao ASSERT_ATOMIC_LOAD_PTR(lk->lk_lock, 437353998acSAttilio Rao ("%s: lockmgr not aligned for %s: %p", __func__, wmesg, 438353998acSAttilio Rao &lk->lk_lock)); 43999448ed1SJohn Dyson 440f0830182SAttilio Rao iflags = LO_SLEEPABLE | LO_UPGRADABLE; 441f0830182SAttilio Rao if (flags & LK_CANRECURSE) 442f0830182SAttilio Rao iflags |= LO_RECURSABLE; 443047dd67eSAttilio Rao if ((flags & LK_NODUP) == 0) 4446efc8a16SAttilio Rao iflags |= LO_DUPOK; 4457fbfba7bSAttilio Rao if (flags & LK_NOPROFILE) 4467fbfba7bSAttilio Rao iflags |= LO_NOPROFILE; 447047dd67eSAttilio Rao if ((flags & LK_NOWITNESS) == 0) 4486efc8a16SAttilio Rao iflags |= LO_WITNESS; 4497fbfba7bSAttilio Rao if (flags & LK_QUIET) 4507fbfba7bSAttilio Rao iflags |= LO_QUIET; 451e63091eaSMarcel Moolenaar if (flags & LK_IS_VNODE) 452e63091eaSMarcel Moolenaar iflags |= LO_IS_VNODE; 453651175c9SAttilio Rao iflags |= flags & (LK_ADAPTIVE | LK_NOSHARE); 454047dd67eSAttilio Rao 455b5fb43e5SJohn Baldwin lock_init(&lk->lock_object, &lock_class_lockmgr, wmesg, NULL, iflags); 456047dd67eSAttilio Rao lk->lk_lock = LK_UNLOCKED; 457047dd67eSAttilio Rao lk->lk_recurse = 0; 4582028867dSAttilio Rao lk->lk_exslpfail = 0; 459047dd67eSAttilio Rao lk->lk_timo = timo; 460047dd67eSAttilio Rao lk->lk_pri = pri; 461047dd67eSAttilio Rao STACK_ZERO(lk); 46299448ed1SJohn Dyson } 46399448ed1SJohn Dyson 4643634d5b2SJohn Baldwin /* 4653634d5b2SJohn Baldwin * XXX: Gross hacks to manipulate external lock flags after 4663634d5b2SJohn Baldwin * initialization. Used for certain vnode and buf locks. 4673634d5b2SJohn Baldwin */ 4683634d5b2SJohn Baldwin void 4693634d5b2SJohn Baldwin lockallowshare(struct lock *lk) 4703634d5b2SJohn Baldwin { 4713634d5b2SJohn Baldwin 4723634d5b2SJohn Baldwin lockmgr_assert(lk, KA_XLOCKED); 4733634d5b2SJohn Baldwin lk->lock_object.lo_flags &= ~LK_NOSHARE; 4743634d5b2SJohn Baldwin } 4753634d5b2SJohn Baldwin 4763634d5b2SJohn Baldwin void 477575e02d9SKonstantin Belousov lockdisableshare(struct lock *lk) 478575e02d9SKonstantin Belousov { 479575e02d9SKonstantin Belousov 480575e02d9SKonstantin Belousov lockmgr_assert(lk, KA_XLOCKED); 481575e02d9SKonstantin Belousov lk->lock_object.lo_flags |= LK_NOSHARE; 482575e02d9SKonstantin Belousov } 483575e02d9SKonstantin Belousov 484575e02d9SKonstantin Belousov void 4853634d5b2SJohn Baldwin lockallowrecurse(struct lock *lk) 4863634d5b2SJohn Baldwin { 4873634d5b2SJohn Baldwin 4883634d5b2SJohn Baldwin lockmgr_assert(lk, KA_XLOCKED); 4893634d5b2SJohn Baldwin lk->lock_object.lo_flags |= LO_RECURSABLE; 4903634d5b2SJohn Baldwin } 4913634d5b2SJohn Baldwin 4923634d5b2SJohn Baldwin void 4933634d5b2SJohn Baldwin lockdisablerecurse(struct lock *lk) 4943634d5b2SJohn Baldwin { 4953634d5b2SJohn Baldwin 4963634d5b2SJohn Baldwin lockmgr_assert(lk, KA_XLOCKED); 4973634d5b2SJohn Baldwin lk->lock_object.lo_flags &= ~LO_RECURSABLE; 4983634d5b2SJohn Baldwin } 4993634d5b2SJohn Baldwin 500a18b1f1dSJason Evans void 501047dd67eSAttilio Rao lockdestroy(struct lock *lk) 502a18b1f1dSJason Evans { 503c91fcee7SJohn Baldwin 504047dd67eSAttilio Rao KASSERT(lk->lk_lock == LK_UNLOCKED, ("lockmgr still held")); 505047dd67eSAttilio Rao KASSERT(lk->lk_recurse == 0, ("lockmgr still recursed")); 5062028867dSAttilio Rao KASSERT(lk->lk_exslpfail == 0, ("lockmgr still exclusive waiters")); 507047dd67eSAttilio Rao lock_destroy(&lk->lock_object); 508047dd67eSAttilio Rao } 509047dd67eSAttilio Rao 510c4a48867SMateusz Guzik static bool __always_inline 51195ab076dSMateusz Guzik lockmgr_slock_try(struct lock *lk, uintptr_t *xp, int flags, bool fp) 512c4a48867SMateusz Guzik { 513c4a48867SMateusz Guzik 514c4a48867SMateusz Guzik /* 515c4a48867SMateusz Guzik * If no other thread has an exclusive lock, or 516c4a48867SMateusz Guzik * no exclusive waiter is present, bump the count of 517c4a48867SMateusz Guzik * sharers. Since we have to preserve the state of 518c4a48867SMateusz Guzik * waiters, if we fail to acquire the shared lock 519c4a48867SMateusz Guzik * loop back and retry. 520c4a48867SMateusz Guzik */ 521c4a48867SMateusz Guzik *xp = lk->lk_lock; 52295ab076dSMateusz Guzik while (LK_CAN_SHARE(*xp, flags, fp)) { 523c4a48867SMateusz Guzik if (atomic_fcmpset_acq_ptr(&lk->lk_lock, xp, 524c4a48867SMateusz Guzik *xp + LK_ONE_SHARER)) { 525c4a48867SMateusz Guzik return (true); 526c4a48867SMateusz Guzik } 527c4a48867SMateusz Guzik } 528c4a48867SMateusz Guzik return (false); 529c4a48867SMateusz Guzik } 530c4a48867SMateusz Guzik 531c4a48867SMateusz Guzik static bool __always_inline 5321c6987ebSMateusz Guzik lockmgr_sunlock_try(struct lock *lk, uintptr_t *xp) 533c4a48867SMateusz Guzik { 534c4a48867SMateusz Guzik 535c4a48867SMateusz Guzik for (;;) { 53695ab076dSMateusz Guzik if (LK_SHARERS(*xp) > 1 || !(*xp & LK_ALL_WAITERS)) { 5371c6987ebSMateusz Guzik if (atomic_fcmpset_rel_ptr(&lk->lk_lock, xp, 5381c6987ebSMateusz Guzik *xp - LK_ONE_SHARER)) 539c4a48867SMateusz Guzik return (true); 540c4a48867SMateusz Guzik continue; 541c4a48867SMateusz Guzik } 542c4a48867SMateusz Guzik break; 543c4a48867SMateusz Guzik } 544c4a48867SMateusz Guzik return (false); 545c4a48867SMateusz Guzik } 546c4a48867SMateusz Guzik 5471c6987ebSMateusz Guzik static __noinline int 5481c6987ebSMateusz Guzik lockmgr_slock_hard(struct lock *lk, u_int flags, struct lock_object *ilk, 5491c6987ebSMateusz Guzik const char *file, int line, struct lockmgr_wait *lwa) 550c4a48867SMateusz Guzik { 5511c6987ebSMateusz Guzik uintptr_t tid, x; 5521c6987ebSMateusz Guzik int error = 0; 553047dd67eSAttilio Rao const char *iwmesg; 5541c6987ebSMateusz Guzik int ipri, itimo; 5551c6987ebSMateusz Guzik 5561723a064SJeff Roberson #ifdef LOCK_PROFILING 5571723a064SJeff Roberson uint64_t waittime = 0; 5581723a064SJeff Roberson int contested = 0; 5591723a064SJeff Roberson #endif 560047dd67eSAttilio Rao 5611c6987ebSMateusz Guzik if (__predict_false(panicstr != NULL)) 5621c6987ebSMateusz Guzik goto out; 5631c6987ebSMateusz Guzik 564047dd67eSAttilio Rao tid = (uintptr_t)curthread; 565047dd67eSAttilio Rao 566e5f94314SAttilio Rao if (LK_CAN_WITNESS(flags)) 567e5f94314SAttilio Rao WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER, 56824150d37SJohn Baldwin file, line, flags & LK_INTERLOCK ? ilk : NULL); 569047dd67eSAttilio Rao for (;;) { 57095ab076dSMateusz Guzik if (lockmgr_slock_try(lk, &x, flags, false)) 571047dd67eSAttilio Rao break; 572f5f9340bSFabien Thomas #ifdef HWPMC_HOOKS 573f5f9340bSFabien Thomas PMC_SOFT_CALL( , , lock, failed); 574f5f9340bSFabien Thomas #endif 575047dd67eSAttilio Rao lock_profile_obtain_lock_failed(&lk->lock_object, 576047dd67eSAttilio Rao &contested, &waittime); 577047dd67eSAttilio Rao 578047dd67eSAttilio Rao /* 57996f1567fSKonstantin Belousov * If the lock is already held by curthread in 580047dd67eSAttilio Rao * exclusive way avoid a deadlock. 581047dd67eSAttilio Rao */ 582047dd67eSAttilio Rao if (LK_HOLDER(x) == tid) { 583047dd67eSAttilio Rao LOCK_LOG2(lk, 58496f1567fSKonstantin Belousov "%s: %p already held in exclusive mode", 585047dd67eSAttilio Rao __func__, lk); 586047dd67eSAttilio Rao error = EDEADLK; 587047dd67eSAttilio Rao break; 588a18b1f1dSJason Evans } 589a18b1f1dSJason Evans 590a18b1f1dSJason Evans /* 591047dd67eSAttilio Rao * If the lock is expected to not sleep just give up 592047dd67eSAttilio Rao * and return. 593d7a7e179SAttilio Rao */ 594047dd67eSAttilio Rao if (LK_TRYOP(flags)) { 595047dd67eSAttilio Rao LOCK_LOG2(lk, "%s: %p fails the try operation", 596047dd67eSAttilio Rao __func__, lk); 597047dd67eSAttilio Rao error = EBUSY; 598047dd67eSAttilio Rao break; 599047dd67eSAttilio Rao } 600047dd67eSAttilio Rao 601047dd67eSAttilio Rao /* 602047dd67eSAttilio Rao * Acquire the sleepqueue chain lock because we 603047dd67eSAttilio Rao * probabilly will need to manipulate waiters flags. 604047dd67eSAttilio Rao */ 605047dd67eSAttilio Rao sleepq_lock(&lk->lock_object); 606047dd67eSAttilio Rao x = lk->lk_lock; 6070ad122a9SMateusz Guzik retry_sleepq: 608047dd67eSAttilio Rao 609047dd67eSAttilio Rao /* 610047dd67eSAttilio Rao * if the lock can be acquired in shared mode, try 611047dd67eSAttilio Rao * again. 612047dd67eSAttilio Rao */ 61395ab076dSMateusz Guzik if (LK_CAN_SHARE(x, flags, false)) { 614047dd67eSAttilio Rao sleepq_release(&lk->lock_object); 615047dd67eSAttilio Rao continue; 616047dd67eSAttilio Rao } 617047dd67eSAttilio Rao 618047dd67eSAttilio Rao /* 619047dd67eSAttilio Rao * Try to set the LK_SHARED_WAITERS flag. If we fail, 620047dd67eSAttilio Rao * loop back and retry. 621047dd67eSAttilio Rao */ 622047dd67eSAttilio Rao if ((x & LK_SHARED_WAITERS) == 0) { 6230ad122a9SMateusz Guzik if (!atomic_fcmpset_acq_ptr(&lk->lk_lock, &x, 624047dd67eSAttilio Rao x | LK_SHARED_WAITERS)) { 6250ad122a9SMateusz Guzik goto retry_sleepq; 626047dd67eSAttilio Rao } 627047dd67eSAttilio Rao LOCK_LOG2(lk, "%s: %p set shared waiters flag", 628047dd67eSAttilio Rao __func__, lk); 629047dd67eSAttilio Rao } 630047dd67eSAttilio Rao 6311c6987ebSMateusz Guzik if (lwa == NULL) { 6321c6987ebSMateusz Guzik iwmesg = lk->lock_object.lo_name; 6331c6987ebSMateusz Guzik ipri = lk->lk_pri; 6341c6987ebSMateusz Guzik itimo = lk->lk_timo; 6351c6987ebSMateusz Guzik } else { 6361c6987ebSMateusz Guzik iwmesg = lwa->iwmesg; 6371c6987ebSMateusz Guzik ipri = lwa->ipri; 6381c6987ebSMateusz Guzik itimo = lwa->itimo; 6391c6987ebSMateusz Guzik } 6401c6987ebSMateusz Guzik 641047dd67eSAttilio Rao /* 642047dd67eSAttilio Rao * As far as we have been unable to acquire the 643047dd67eSAttilio Rao * shared lock and the shared waiters flag is set, 644047dd67eSAttilio Rao * we will sleep. 645047dd67eSAttilio Rao */ 646047dd67eSAttilio Rao error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo, 647047dd67eSAttilio Rao SQ_SHARED_QUEUE); 648047dd67eSAttilio Rao flags &= ~LK_INTERLOCK; 649047dd67eSAttilio Rao if (error) { 650047dd67eSAttilio Rao LOCK_LOG3(lk, 651047dd67eSAttilio Rao "%s: interrupted sleep for %p with %d", 652047dd67eSAttilio Rao __func__, lk, error); 653047dd67eSAttilio Rao break; 654047dd67eSAttilio Rao } 655047dd67eSAttilio Rao LOCK_LOG2(lk, "%s: %p resuming from the sleep queue", 656047dd67eSAttilio Rao __func__, lk); 657047dd67eSAttilio Rao } 658047dd67eSAttilio Rao if (error == 0) { 659c4a48867SMateusz Guzik #ifdef LOCK_PROFILING 660c4a48867SMateusz Guzik lockmgr_note_shared_acquire(lk, contested, waittime, 661c4a48867SMateusz Guzik file, line, flags); 662c4a48867SMateusz Guzik #else 663c4a48867SMateusz Guzik lockmgr_note_shared_acquire(lk, 0, 0, file, line, 664c4a48867SMateusz Guzik flags); 665c4a48867SMateusz Guzik #endif 666047dd67eSAttilio Rao } 667047dd67eSAttilio Rao 6681c6987ebSMateusz Guzik out: 6691c6987ebSMateusz Guzik lockmgr_exit(flags, ilk, 0); 6701c6987ebSMateusz Guzik return (error); 671047dd67eSAttilio Rao } 672047dd67eSAttilio Rao 6731c6987ebSMateusz Guzik static __noinline int 6741c6987ebSMateusz Guzik lockmgr_xlock_hard(struct lock *lk, u_int flags, struct lock_object *ilk, 6751c6987ebSMateusz Guzik const char *file, int line, struct lockmgr_wait *lwa) 6761c6987ebSMateusz Guzik { 6771c6987ebSMateusz Guzik struct lock_class *class; 6781c6987ebSMateusz Guzik uintptr_t tid, x, v; 6791c6987ebSMateusz Guzik int error = 0; 6801c6987ebSMateusz Guzik const char *iwmesg; 6811c6987ebSMateusz Guzik int ipri, itimo; 6827c6fe803SKonstantin Belousov 6831c6987ebSMateusz Guzik #ifdef LOCK_PROFILING 6841c6987ebSMateusz Guzik uint64_t waittime = 0; 6851c6987ebSMateusz Guzik int contested = 0; 6861c6987ebSMateusz Guzik #endif 687047dd67eSAttilio Rao 6881c6987ebSMateusz Guzik if (__predict_false(panicstr != NULL)) 6891c6987ebSMateusz Guzik goto out; 6901c6987ebSMateusz Guzik 6911c6987ebSMateusz Guzik tid = (uintptr_t)curthread; 6921c6987ebSMateusz Guzik 693e5f94314SAttilio Rao if (LK_CAN_WITNESS(flags)) 694e5f94314SAttilio Rao WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER | 69524150d37SJohn Baldwin LOP_EXCLUSIVE, file, line, flags & LK_INTERLOCK ? 69624150d37SJohn Baldwin ilk : NULL); 697047dd67eSAttilio Rao 698047dd67eSAttilio Rao /* 69996f1567fSKonstantin Belousov * If curthread already holds the lock and this one is 700047dd67eSAttilio Rao * allowed to recurse, simply recurse on it. 701047dd67eSAttilio Rao */ 702047dd67eSAttilio Rao if (lockmgr_xlocked(lk)) { 703047dd67eSAttilio Rao if ((flags & LK_CANRECURSE) == 0 && 704f0830182SAttilio Rao (lk->lock_object.lo_flags & LO_RECURSABLE) == 0) { 705047dd67eSAttilio Rao /* 706047dd67eSAttilio Rao * If the lock is expected to not panic just 707047dd67eSAttilio Rao * give up and return. 708047dd67eSAttilio Rao */ 709047dd67eSAttilio Rao if (LK_TRYOP(flags)) { 710047dd67eSAttilio Rao LOCK_LOG2(lk, 711047dd67eSAttilio Rao "%s: %p fails the try operation", 712047dd67eSAttilio Rao __func__, lk); 713047dd67eSAttilio Rao error = EBUSY; 7141c6987ebSMateusz Guzik goto out; 715047dd67eSAttilio Rao } 7161c6987ebSMateusz Guzik if (flags & LK_INTERLOCK) { 7171c6987ebSMateusz Guzik class = LOCK_CLASS(ilk); 718047dd67eSAttilio Rao class->lc_unlock(ilk); 7191c6987ebSMateusz Guzik } 72083fc34eaSGleb Smirnoff panic("%s: recursing on non recursive lockmgr %p " 72183fc34eaSGleb Smirnoff "@ %s:%d\n", __func__, lk, file, line); 722047dd67eSAttilio Rao } 723047dd67eSAttilio Rao lk->lk_recurse++; 724047dd67eSAttilio Rao LOCK_LOG2(lk, "%s: %p recursing", __func__, lk); 725047dd67eSAttilio Rao LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0, 726047dd67eSAttilio Rao lk->lk_recurse, file, line); 727e5f94314SAttilio Rao WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE | 728e5f94314SAttilio Rao LK_TRYWIT(flags), file, line); 729047dd67eSAttilio Rao TD_LOCKS_INC(curthread); 7301c6987ebSMateusz Guzik goto out; 731047dd67eSAttilio Rao } 732047dd67eSAttilio Rao 733fc4f686dSMateusz Guzik for (;;) { 734fc4f686dSMateusz Guzik if (lk->lk_lock == LK_UNLOCKED && 735fc4f686dSMateusz Guzik atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid)) 736fc4f686dSMateusz Guzik break; 737f5f9340bSFabien Thomas #ifdef HWPMC_HOOKS 738f5f9340bSFabien Thomas PMC_SOFT_CALL( , , lock, failed); 739f5f9340bSFabien Thomas #endif 740047dd67eSAttilio Rao lock_profile_obtain_lock_failed(&lk->lock_object, 741047dd67eSAttilio Rao &contested, &waittime); 742047dd67eSAttilio Rao 743047dd67eSAttilio Rao /* 744047dd67eSAttilio Rao * If the lock is expected to not sleep just give up 745047dd67eSAttilio Rao * and return. 746047dd67eSAttilio Rao */ 747047dd67eSAttilio Rao if (LK_TRYOP(flags)) { 748047dd67eSAttilio Rao LOCK_LOG2(lk, "%s: %p fails the try operation", 749047dd67eSAttilio Rao __func__, lk); 750047dd67eSAttilio Rao error = EBUSY; 751047dd67eSAttilio Rao break; 752047dd67eSAttilio Rao } 753047dd67eSAttilio Rao 754047dd67eSAttilio Rao /* 755047dd67eSAttilio Rao * Acquire the sleepqueue chain lock because we 756047dd67eSAttilio Rao * probabilly will need to manipulate waiters flags. 757047dd67eSAttilio Rao */ 758047dd67eSAttilio Rao sleepq_lock(&lk->lock_object); 759047dd67eSAttilio Rao x = lk->lk_lock; 7600ad122a9SMateusz Guzik retry_sleepq: 761047dd67eSAttilio Rao 762047dd67eSAttilio Rao /* 763047dd67eSAttilio Rao * if the lock has been released while we spun on 764047dd67eSAttilio Rao * the sleepqueue chain lock just try again. 765047dd67eSAttilio Rao */ 766047dd67eSAttilio Rao if (x == LK_UNLOCKED) { 767047dd67eSAttilio Rao sleepq_release(&lk->lock_object); 768047dd67eSAttilio Rao continue; 769047dd67eSAttilio Rao } 770047dd67eSAttilio Rao 771047dd67eSAttilio Rao /* 772047dd67eSAttilio Rao * The lock can be in the state where there is a 773047dd67eSAttilio Rao * pending queue of waiters, but still no owner. 774047dd67eSAttilio Rao * This happens when the lock is contested and an 775047dd67eSAttilio Rao * owner is going to claim the lock. 776047dd67eSAttilio Rao * If curthread is the one successfully acquiring it 777047dd67eSAttilio Rao * claim lock ownership and return, preserving waiters 778047dd67eSAttilio Rao * flags. 779047dd67eSAttilio Rao */ 780651175c9SAttilio Rao v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS); 781651175c9SAttilio Rao if ((x & ~v) == LK_UNLOCKED) { 782651175c9SAttilio Rao v &= ~LK_EXCLUSIVE_SPINNERS; 7830ad122a9SMateusz Guzik if (atomic_fcmpset_acq_ptr(&lk->lk_lock, &x, 784047dd67eSAttilio Rao tid | v)) { 785047dd67eSAttilio Rao sleepq_release(&lk->lock_object); 786047dd67eSAttilio Rao LOCK_LOG2(lk, 787047dd67eSAttilio Rao "%s: %p claimed by a new writer", 788047dd67eSAttilio Rao __func__, lk); 789047dd67eSAttilio Rao break; 790047dd67eSAttilio Rao } 7910ad122a9SMateusz Guzik goto retry_sleepq; 792047dd67eSAttilio Rao } 793047dd67eSAttilio Rao 794047dd67eSAttilio Rao /* 795047dd67eSAttilio Rao * Try to set the LK_EXCLUSIVE_WAITERS flag. If we 796047dd67eSAttilio Rao * fail, loop back and retry. 797047dd67eSAttilio Rao */ 798047dd67eSAttilio Rao if ((x & LK_EXCLUSIVE_WAITERS) == 0) { 7990ad122a9SMateusz Guzik if (!atomic_fcmpset_ptr(&lk->lk_lock, &x, 800047dd67eSAttilio Rao x | LK_EXCLUSIVE_WAITERS)) { 8010ad122a9SMateusz Guzik goto retry_sleepq; 802047dd67eSAttilio Rao } 803047dd67eSAttilio Rao LOCK_LOG2(lk, "%s: %p set excl waiters flag", 804047dd67eSAttilio Rao __func__, lk); 805047dd67eSAttilio Rao } 806047dd67eSAttilio Rao 8071c6987ebSMateusz Guzik if (lwa == NULL) { 8081c6987ebSMateusz Guzik iwmesg = lk->lock_object.lo_name; 8091c6987ebSMateusz Guzik ipri = lk->lk_pri; 8101c6987ebSMateusz Guzik itimo = lk->lk_timo; 8111c6987ebSMateusz Guzik } else { 8121c6987ebSMateusz Guzik iwmesg = lwa->iwmesg; 8131c6987ebSMateusz Guzik ipri = lwa->ipri; 8141c6987ebSMateusz Guzik itimo = lwa->itimo; 8151c6987ebSMateusz Guzik } 8161c6987ebSMateusz Guzik 817047dd67eSAttilio Rao /* 818047dd67eSAttilio Rao * As far as we have been unable to acquire the 819047dd67eSAttilio Rao * exclusive lock and the exclusive waiters flag 820047dd67eSAttilio Rao * is set, we will sleep. 821047dd67eSAttilio Rao */ 822047dd67eSAttilio Rao error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo, 823047dd67eSAttilio Rao SQ_EXCLUSIVE_QUEUE); 824047dd67eSAttilio Rao flags &= ~LK_INTERLOCK; 825047dd67eSAttilio Rao if (error) { 826047dd67eSAttilio Rao LOCK_LOG3(lk, 827047dd67eSAttilio Rao "%s: interrupted sleep for %p with %d", 828047dd67eSAttilio Rao __func__, lk, error); 829047dd67eSAttilio Rao break; 830047dd67eSAttilio Rao } 831047dd67eSAttilio Rao LOCK_LOG2(lk, "%s: %p resuming from the sleep queue", 832047dd67eSAttilio Rao __func__, lk); 833047dd67eSAttilio Rao } 834047dd67eSAttilio Rao if (error == 0) { 835c4a48867SMateusz Guzik #ifdef LOCK_PROFILING 836c4a48867SMateusz Guzik lockmgr_note_exclusive_acquire(lk, contested, waittime, 837c4a48867SMateusz Guzik file, line, flags); 838c4a48867SMateusz Guzik #else 839c4a48867SMateusz Guzik lockmgr_note_exclusive_acquire(lk, 0, 0, file, line, 840c4a48867SMateusz Guzik flags); 841c4a48867SMateusz Guzik #endif 842047dd67eSAttilio Rao } 8431c6987ebSMateusz Guzik 8441c6987ebSMateusz Guzik out: 8451c6987ebSMateusz Guzik lockmgr_exit(flags, ilk, 0); 8461c6987ebSMateusz Guzik return (error); 8471c6987ebSMateusz Guzik } 8481c6987ebSMateusz Guzik 8491c6987ebSMateusz Guzik static __noinline int 8501c6987ebSMateusz Guzik lockmgr_upgrade(struct lock *lk, u_int flags, struct lock_object *ilk, 8511c6987ebSMateusz Guzik const char *file, int line, struct lockmgr_wait *lwa) 8521c6987ebSMateusz Guzik { 8531c6987ebSMateusz Guzik uintptr_t tid, x, v; 8541c6987ebSMateusz Guzik int error = 0; 8551c6987ebSMateusz Guzik int wakeup_swapper = 0; 8561c6987ebSMateusz Guzik int op; 8571c6987ebSMateusz Guzik 8581c6987ebSMateusz Guzik if (__predict_false(panicstr != NULL)) 8591c6987ebSMateusz Guzik goto out; 8601c6987ebSMateusz Guzik 8611c6987ebSMateusz Guzik tid = (uintptr_t)curthread; 8621c6987ebSMateusz Guzik 8631c6987ebSMateusz Guzik _lockmgr_assert(lk, KA_SLOCKED, file, line); 8641c6987ebSMateusz Guzik v = lk->lk_lock; 8651c6987ebSMateusz Guzik x = v & LK_ALL_WAITERS; 8661c6987ebSMateusz Guzik v &= LK_EXCLUSIVE_SPINNERS; 8671c6987ebSMateusz Guzik 8681c6987ebSMateusz Guzik /* 8691c6987ebSMateusz Guzik * Try to switch from one shared lock to an exclusive one. 8701c6987ebSMateusz Guzik * We need to preserve waiters flags during the operation. 8711c6987ebSMateusz Guzik */ 8721c6987ebSMateusz Guzik if (atomic_cmpset_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x | v, 8731c6987ebSMateusz Guzik tid | x)) { 8741c6987ebSMateusz Guzik LOCK_LOG_LOCK("XUPGRADE", &lk->lock_object, 0, 0, file, 8751c6987ebSMateusz Guzik line); 8761c6987ebSMateusz Guzik WITNESS_UPGRADE(&lk->lock_object, LOP_EXCLUSIVE | 8771c6987ebSMateusz Guzik LK_TRYWIT(flags), file, line); 8781c6987ebSMateusz Guzik TD_SLOCKS_DEC(curthread); 8791c6987ebSMateusz Guzik goto out; 8801c6987ebSMateusz Guzik } 8811c6987ebSMateusz Guzik 8821c6987ebSMateusz Guzik op = flags & LK_TYPE_MASK; 8831c6987ebSMateusz Guzik 8841c6987ebSMateusz Guzik /* 8851c6987ebSMateusz Guzik * In LK_TRYUPGRADE mode, do not drop the lock, 8861c6987ebSMateusz Guzik * returning EBUSY instead. 8871c6987ebSMateusz Guzik */ 8881c6987ebSMateusz Guzik if (op == LK_TRYUPGRADE) { 8891c6987ebSMateusz Guzik LOCK_LOG2(lk, "%s: %p failed the nowait upgrade", 8901c6987ebSMateusz Guzik __func__, lk); 8911c6987ebSMateusz Guzik error = EBUSY; 8921c6987ebSMateusz Guzik goto out; 8931c6987ebSMateusz Guzik } 8941c6987ebSMateusz Guzik 8951c6987ebSMateusz Guzik /* 8961c6987ebSMateusz Guzik * We have been unable to succeed in upgrading, so just 8971c6987ebSMateusz Guzik * give up the shared lock. 8981c6987ebSMateusz Guzik */ 8991c6987ebSMateusz Guzik wakeup_swapper |= wakeupshlk(lk, file, line); 9001c6987ebSMateusz Guzik error = lockmgr_xlock_hard(lk, flags, ilk, file, line, lwa); 9011c6987ebSMateusz Guzik flags &= ~LK_INTERLOCK; 9021c6987ebSMateusz Guzik out: 9031c6987ebSMateusz Guzik lockmgr_exit(flags, ilk, wakeup_swapper); 9041c6987ebSMateusz Guzik return (error); 9051c6987ebSMateusz Guzik } 9061c6987ebSMateusz Guzik 9071c6987ebSMateusz Guzik int 9081c6987ebSMateusz Guzik lockmgr_lock_fast_path(struct lock *lk, u_int flags, struct lock_object *ilk, 9091c6987ebSMateusz Guzik const char *file, int line) 9101c6987ebSMateusz Guzik { 9111c6987ebSMateusz Guzik struct lock_class *class; 9121c6987ebSMateusz Guzik uintptr_t x, tid; 9131c6987ebSMateusz Guzik u_int op; 9141c6987ebSMateusz Guzik bool locked; 9151c6987ebSMateusz Guzik 916b543c98cSConrad Meyer if (__predict_false(panicstr != NULL)) 917b543c98cSConrad Meyer return (0); 918b543c98cSConrad Meyer 9191c6987ebSMateusz Guzik op = flags & LK_TYPE_MASK; 9201c6987ebSMateusz Guzik locked = false; 9211c6987ebSMateusz Guzik switch (op) { 9221c6987ebSMateusz Guzik case LK_SHARED: 9231c6987ebSMateusz Guzik if (LK_CAN_WITNESS(flags)) 9241c6987ebSMateusz Guzik WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER, 9251c6987ebSMateusz Guzik file, line, flags & LK_INTERLOCK ? ilk : NULL); 9261c6987ebSMateusz Guzik if (__predict_false(lk->lock_object.lo_flags & LK_NOSHARE)) 9271c6987ebSMateusz Guzik break; 92895ab076dSMateusz Guzik if (lockmgr_slock_try(lk, &x, flags, true)) { 9291c6987ebSMateusz Guzik lockmgr_note_shared_acquire(lk, 0, 0, 9301c6987ebSMateusz Guzik file, line, flags); 9311c6987ebSMateusz Guzik locked = true; 9321c6987ebSMateusz Guzik } else { 9331c6987ebSMateusz Guzik return (lockmgr_slock_hard(lk, flags, ilk, file, line, 9341c6987ebSMateusz Guzik NULL)); 9351c6987ebSMateusz Guzik } 9361c6987ebSMateusz Guzik break; 9371c6987ebSMateusz Guzik case LK_EXCLUSIVE: 9381c6987ebSMateusz Guzik if (LK_CAN_WITNESS(flags)) 9391c6987ebSMateusz Guzik WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER | 9401c6987ebSMateusz Guzik LOP_EXCLUSIVE, file, line, flags & LK_INTERLOCK ? 9411c6987ebSMateusz Guzik ilk : NULL); 9421c6987ebSMateusz Guzik tid = (uintptr_t)curthread; 9431c6987ebSMateusz Guzik if (lk->lk_lock == LK_UNLOCKED && 9441c6987ebSMateusz Guzik atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid)) { 9451c6987ebSMateusz Guzik lockmgr_note_exclusive_acquire(lk, 0, 0, file, line, 9461c6987ebSMateusz Guzik flags); 9471c6987ebSMateusz Guzik locked = true; 9481c6987ebSMateusz Guzik } else { 9491c6987ebSMateusz Guzik return (lockmgr_xlock_hard(lk, flags, ilk, file, line, 9501c6987ebSMateusz Guzik NULL)); 9511c6987ebSMateusz Guzik } 9521c6987ebSMateusz Guzik break; 9531c6987ebSMateusz Guzik case LK_UPGRADE: 9541c6987ebSMateusz Guzik case LK_TRYUPGRADE: 9551c6987ebSMateusz Guzik return (lockmgr_upgrade(lk, flags, ilk, file, line, NULL)); 9561c6987ebSMateusz Guzik default: 9571c6987ebSMateusz Guzik break; 9581c6987ebSMateusz Guzik } 9591c6987ebSMateusz Guzik if (__predict_true(locked)) { 9601c6987ebSMateusz Guzik if (__predict_false(flags & LK_INTERLOCK)) { 9611c6987ebSMateusz Guzik class = LOCK_CLASS(ilk); 9621c6987ebSMateusz Guzik class->lc_unlock(ilk); 9631c6987ebSMateusz Guzik } 9641c6987ebSMateusz Guzik return (0); 9651c6987ebSMateusz Guzik } else { 9661c6987ebSMateusz Guzik return (__lockmgr_args(lk, flags, ilk, LK_WMESG_DEFAULT, 9671c6987ebSMateusz Guzik LK_PRIO_DEFAULT, LK_TIMO_DEFAULT, file, line)); 9681c6987ebSMateusz Guzik } 9691c6987ebSMateusz Guzik } 9701c6987ebSMateusz Guzik 9711c6987ebSMateusz Guzik static __noinline int 9721c6987ebSMateusz Guzik lockmgr_sunlock_hard(struct lock *lk, uintptr_t x, u_int flags, struct lock_object *ilk, 9731c6987ebSMateusz Guzik const char *file, int line) 9741c6987ebSMateusz Guzik 9751c6987ebSMateusz Guzik { 9761c6987ebSMateusz Guzik int wakeup_swapper = 0; 9771c6987ebSMateusz Guzik 9781c6987ebSMateusz Guzik if (__predict_false(panicstr != NULL)) 9791c6987ebSMateusz Guzik goto out; 9801c6987ebSMateusz Guzik 9811c6987ebSMateusz Guzik wakeup_swapper = wakeupshlk(lk, file, line); 9821c6987ebSMateusz Guzik 9831c6987ebSMateusz Guzik out: 9841c6987ebSMateusz Guzik lockmgr_exit(flags, ilk, wakeup_swapper); 9851c6987ebSMateusz Guzik return (0); 9861c6987ebSMateusz Guzik } 9871c6987ebSMateusz Guzik 9881c6987ebSMateusz Guzik static __noinline int 9891c6987ebSMateusz Guzik lockmgr_xunlock_hard(struct lock *lk, uintptr_t x, u_int flags, struct lock_object *ilk, 9901c6987ebSMateusz Guzik const char *file, int line) 9911c6987ebSMateusz Guzik { 9921c6987ebSMateusz Guzik uintptr_t tid, v; 9931c6987ebSMateusz Guzik int wakeup_swapper = 0; 9941c6987ebSMateusz Guzik u_int realexslp; 9951c6987ebSMateusz Guzik int queue; 9961c6987ebSMateusz Guzik 9971c6987ebSMateusz Guzik if (__predict_false(panicstr != NULL)) 9981c6987ebSMateusz Guzik goto out; 9991c6987ebSMateusz Guzik 10001c6987ebSMateusz Guzik tid = (uintptr_t)curthread; 10011c6987ebSMateusz Guzik 10021c6987ebSMateusz Guzik /* 10031c6987ebSMateusz Guzik * As first option, treact the lock as if it has not 10041c6987ebSMateusz Guzik * any waiter. 10051c6987ebSMateusz Guzik * Fix-up the tid var if the lock has been disowned. 10061c6987ebSMateusz Guzik */ 10071c6987ebSMateusz Guzik if (LK_HOLDER(x) == LK_KERNPROC) 10081c6987ebSMateusz Guzik tid = LK_KERNPROC; 10091c6987ebSMateusz Guzik else { 10101c6987ebSMateusz Guzik WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, file, line); 10111c6987ebSMateusz Guzik TD_LOCKS_DEC(curthread); 10121c6987ebSMateusz Guzik } 10131c6987ebSMateusz Guzik LOCK_LOG_LOCK("XUNLOCK", &lk->lock_object, 0, lk->lk_recurse, file, line); 10141c6987ebSMateusz Guzik 10151c6987ebSMateusz Guzik /* 10161c6987ebSMateusz Guzik * The lock is held in exclusive mode. 10171c6987ebSMateusz Guzik * If the lock is recursed also, then unrecurse it. 10181c6987ebSMateusz Guzik */ 101910391db5SMateusz Guzik if (lockmgr_xlocked_v(x) && lockmgr_recursed(lk)) { 10201c6987ebSMateusz Guzik LOCK_LOG2(lk, "%s: %p unrecursing", __func__, lk); 10211c6987ebSMateusz Guzik lk->lk_recurse--; 10221c6987ebSMateusz Guzik goto out; 10231c6987ebSMateusz Guzik } 10241c6987ebSMateusz Guzik if (tid != LK_KERNPROC) 10251c6987ebSMateusz Guzik lock_profile_release_lock(&lk->lock_object); 10261c6987ebSMateusz Guzik 102710391db5SMateusz Guzik if (x == tid && atomic_cmpset_rel_ptr(&lk->lk_lock, tid, LK_UNLOCKED)) 10281c6987ebSMateusz Guzik goto out; 10291c6987ebSMateusz Guzik 10301c6987ebSMateusz Guzik sleepq_lock(&lk->lock_object); 10311c6987ebSMateusz Guzik x = lk->lk_lock; 10321c6987ebSMateusz Guzik v = LK_UNLOCKED; 10331c6987ebSMateusz Guzik 10341c6987ebSMateusz Guzik /* 10351c6987ebSMateusz Guzik * If the lock has exclusive waiters, give them 10361c6987ebSMateusz Guzik * preference in order to avoid deadlock with 10371c6987ebSMateusz Guzik * shared runners up. 10381c6987ebSMateusz Guzik * If interruptible sleeps left the exclusive queue 10391c6987ebSMateusz Guzik * empty avoid a starvation for the threads sleeping 10401c6987ebSMateusz Guzik * on the shared queue by giving them precedence 10411c6987ebSMateusz Guzik * and cleaning up the exclusive waiters bit anyway. 10421c6987ebSMateusz Guzik * Please note that lk_exslpfail count may be lying 10431c6987ebSMateusz Guzik * about the real number of waiters with the 10441c6987ebSMateusz Guzik * LK_SLEEPFAIL flag on because they may be used in 10451c6987ebSMateusz Guzik * conjunction with interruptible sleeps so 10461c6987ebSMateusz Guzik * lk_exslpfail might be considered an 'upper limit' 10471c6987ebSMateusz Guzik * bound, including the edge cases. 10481c6987ebSMateusz Guzik */ 10491c6987ebSMateusz Guzik MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0); 10501c6987ebSMateusz Guzik realexslp = sleepq_sleepcnt(&lk->lock_object, SQ_EXCLUSIVE_QUEUE); 10511c6987ebSMateusz Guzik if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) { 10521c6987ebSMateusz Guzik if (lk->lk_exslpfail < realexslp) { 10531c6987ebSMateusz Guzik lk->lk_exslpfail = 0; 10541c6987ebSMateusz Guzik queue = SQ_EXCLUSIVE_QUEUE; 10551c6987ebSMateusz Guzik v |= (x & LK_SHARED_WAITERS); 10561c6987ebSMateusz Guzik } else { 10571c6987ebSMateusz Guzik lk->lk_exslpfail = 0; 10581c6987ebSMateusz Guzik LOCK_LOG2(lk, 10591c6987ebSMateusz Guzik "%s: %p has only LK_SLEEPFAIL sleepers", 10601c6987ebSMateusz Guzik __func__, lk); 10611c6987ebSMateusz Guzik LOCK_LOG2(lk, 10621c6987ebSMateusz Guzik "%s: %p waking up threads on the exclusive queue", 10631c6987ebSMateusz Guzik __func__, lk); 10641c6987ebSMateusz Guzik wakeup_swapper = sleepq_broadcast(&lk->lock_object, 10651c6987ebSMateusz Guzik SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE); 10661c6987ebSMateusz Guzik queue = SQ_SHARED_QUEUE; 10671c6987ebSMateusz Guzik } 10681c6987ebSMateusz Guzik } else { 10691c6987ebSMateusz Guzik 10701c6987ebSMateusz Guzik /* 10711c6987ebSMateusz Guzik * Exclusive waiters sleeping with LK_SLEEPFAIL 10721c6987ebSMateusz Guzik * on and using interruptible sleeps/timeout 10731c6987ebSMateusz Guzik * may have left spourious lk_exslpfail counts 10741c6987ebSMateusz Guzik * on, so clean it up anyway. 10751c6987ebSMateusz Guzik */ 10761c6987ebSMateusz Guzik lk->lk_exslpfail = 0; 10771c6987ebSMateusz Guzik queue = SQ_SHARED_QUEUE; 10781c6987ebSMateusz Guzik } 10791c6987ebSMateusz Guzik 10801c6987ebSMateusz Guzik LOCK_LOG3(lk, "%s: %p waking up threads on the %s queue", 10811c6987ebSMateusz Guzik __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" : 10821c6987ebSMateusz Guzik "exclusive"); 10831c6987ebSMateusz Guzik atomic_store_rel_ptr(&lk->lk_lock, v); 10841c6987ebSMateusz Guzik wakeup_swapper |= sleepq_broadcast(&lk->lock_object, SLEEPQ_LK, 0, queue); 10851c6987ebSMateusz Guzik sleepq_release(&lk->lock_object); 10861c6987ebSMateusz Guzik 10871c6987ebSMateusz Guzik out: 10881c6987ebSMateusz Guzik lockmgr_exit(flags, ilk, wakeup_swapper); 10891c6987ebSMateusz Guzik return (0); 10901c6987ebSMateusz Guzik } 10911c6987ebSMateusz Guzik 10921c6987ebSMateusz Guzik int 10931c6987ebSMateusz Guzik lockmgr_unlock_fast_path(struct lock *lk, u_int flags, struct lock_object *ilk) 10941c6987ebSMateusz Guzik { 10951c6987ebSMateusz Guzik struct lock_class *class; 10961c6987ebSMateusz Guzik uintptr_t x, tid; 10971c6987ebSMateusz Guzik const char *file; 10981c6987ebSMateusz Guzik int line; 10991c6987ebSMateusz Guzik 1100b543c98cSConrad Meyer if (__predict_false(panicstr != NULL)) 1101b543c98cSConrad Meyer return (0); 1102b543c98cSConrad Meyer 11031c6987ebSMateusz Guzik file = __FILE__; 11041c6987ebSMateusz Guzik line = __LINE__; 11051c6987ebSMateusz Guzik 11061c6987ebSMateusz Guzik _lockmgr_assert(lk, KA_LOCKED, file, line); 11071c6987ebSMateusz Guzik x = lk->lk_lock; 11081c6987ebSMateusz Guzik if (__predict_true(x & LK_SHARE) != 0) { 11091c6987ebSMateusz Guzik if (lockmgr_sunlock_try(lk, &x)) { 11101c6987ebSMateusz Guzik lockmgr_note_shared_release(lk, file, line); 11111c6987ebSMateusz Guzik } else { 11121c6987ebSMateusz Guzik return (lockmgr_sunlock_hard(lk, x, flags, ilk, file, line)); 11131c6987ebSMateusz Guzik } 11141c6987ebSMateusz Guzik } else { 11151c6987ebSMateusz Guzik tid = (uintptr_t)curthread; 11161c6987ebSMateusz Guzik if (!lockmgr_recursed(lk) && 11171c6987ebSMateusz Guzik atomic_cmpset_rel_ptr(&lk->lk_lock, tid, LK_UNLOCKED)) { 11181c6987ebSMateusz Guzik lockmgr_note_exclusive_release(lk, file, line); 11191c6987ebSMateusz Guzik } else { 11201c6987ebSMateusz Guzik return (lockmgr_xunlock_hard(lk, x, flags, ilk, file, line)); 11211c6987ebSMateusz Guzik } 11221c6987ebSMateusz Guzik } 11231c6987ebSMateusz Guzik if (__predict_false(flags & LK_INTERLOCK)) { 11241c6987ebSMateusz Guzik class = LOCK_CLASS(ilk); 11251c6987ebSMateusz Guzik class->lc_unlock(ilk); 11261c6987ebSMateusz Guzik } 11271c6987ebSMateusz Guzik return (0); 11281c6987ebSMateusz Guzik } 11291c6987ebSMateusz Guzik 11301c6987ebSMateusz Guzik int 11311c6987ebSMateusz Guzik __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk, 11321c6987ebSMateusz Guzik const char *wmesg, int pri, int timo, const char *file, int line) 11331c6987ebSMateusz Guzik { 11341c6987ebSMateusz Guzik GIANT_DECLARE; 11351c6987ebSMateusz Guzik struct lockmgr_wait lwa; 11361c6987ebSMateusz Guzik struct lock_class *class; 11371c6987ebSMateusz Guzik const char *iwmesg; 11381c6987ebSMateusz Guzik uintptr_t tid, v, x; 11391c6987ebSMateusz Guzik u_int op, realexslp; 11401c6987ebSMateusz Guzik int error, ipri, itimo, queue, wakeup_swapper; 11411c6987ebSMateusz Guzik #ifdef LOCK_PROFILING 11421c6987ebSMateusz Guzik uint64_t waittime = 0; 11431c6987ebSMateusz Guzik int contested = 0; 11441c6987ebSMateusz Guzik #endif 11451c6987ebSMateusz Guzik 1146b543c98cSConrad Meyer if (panicstr != NULL) 1147b543c98cSConrad Meyer return (0); 1148b543c98cSConrad Meyer 11491c6987ebSMateusz Guzik error = 0; 11501c6987ebSMateusz Guzik tid = (uintptr_t)curthread; 11511c6987ebSMateusz Guzik op = (flags & LK_TYPE_MASK); 11521c6987ebSMateusz Guzik iwmesg = (wmesg == LK_WMESG_DEFAULT) ? lk->lock_object.lo_name : wmesg; 11531c6987ebSMateusz Guzik ipri = (pri == LK_PRIO_DEFAULT) ? lk->lk_pri : pri; 11541c6987ebSMateusz Guzik itimo = (timo == LK_TIMO_DEFAULT) ? lk->lk_timo : timo; 11551c6987ebSMateusz Guzik 11561c6987ebSMateusz Guzik lwa.iwmesg = iwmesg; 11571c6987ebSMateusz Guzik lwa.ipri = ipri; 11581c6987ebSMateusz Guzik lwa.itimo = itimo; 11591c6987ebSMateusz Guzik 11601c6987ebSMateusz Guzik MPASS((flags & ~LK_TOTAL_MASK) == 0); 11611c6987ebSMateusz Guzik KASSERT((op & (op - 1)) == 0, 11621c6987ebSMateusz Guzik ("%s: Invalid requested operation @ %s:%d", __func__, file, line)); 11631c6987ebSMateusz Guzik KASSERT((flags & (LK_NOWAIT | LK_SLEEPFAIL)) == 0 || 11641c6987ebSMateusz Guzik (op != LK_DOWNGRADE && op != LK_RELEASE), 11651c6987ebSMateusz Guzik ("%s: Invalid flags in regard of the operation desired @ %s:%d", 11661c6987ebSMateusz Guzik __func__, file, line)); 11671c6987ebSMateusz Guzik KASSERT((flags & LK_INTERLOCK) == 0 || ilk != NULL, 11681c6987ebSMateusz Guzik ("%s: LK_INTERLOCK passed without valid interlock @ %s:%d", 11691c6987ebSMateusz Guzik __func__, file, line)); 11701c6987ebSMateusz Guzik KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread), 11711c6987ebSMateusz Guzik ("%s: idle thread %p on lockmgr %s @ %s:%d", __func__, curthread, 11721c6987ebSMateusz Guzik lk->lock_object.lo_name, file, line)); 11731c6987ebSMateusz Guzik 11741c6987ebSMateusz Guzik class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL; 11751c6987ebSMateusz Guzik 11761c6987ebSMateusz Guzik if (lk->lock_object.lo_flags & LK_NOSHARE) { 11771c6987ebSMateusz Guzik switch (op) { 11781c6987ebSMateusz Guzik case LK_SHARED: 11791c6987ebSMateusz Guzik op = LK_EXCLUSIVE; 11801c6987ebSMateusz Guzik break; 11811c6987ebSMateusz Guzik case LK_UPGRADE: 11821c6987ebSMateusz Guzik case LK_TRYUPGRADE: 11831c6987ebSMateusz Guzik case LK_DOWNGRADE: 11841c6987ebSMateusz Guzik _lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED, 11851c6987ebSMateusz Guzik file, line); 11861c6987ebSMateusz Guzik if (flags & LK_INTERLOCK) 11871c6987ebSMateusz Guzik class->lc_unlock(ilk); 11881c6987ebSMateusz Guzik return (0); 11891c6987ebSMateusz Guzik } 11901c6987ebSMateusz Guzik } 11911c6987ebSMateusz Guzik 11921c6987ebSMateusz Guzik wakeup_swapper = 0; 11931c6987ebSMateusz Guzik switch (op) { 11941c6987ebSMateusz Guzik case LK_SHARED: 11951c6987ebSMateusz Guzik return (lockmgr_slock_hard(lk, flags, ilk, file, line, &lwa)); 11961c6987ebSMateusz Guzik break; 11971c6987ebSMateusz Guzik case LK_UPGRADE: 11981c6987ebSMateusz Guzik case LK_TRYUPGRADE: 11991c6987ebSMateusz Guzik return (lockmgr_upgrade(lk, flags, ilk, file, line, &lwa)); 12001c6987ebSMateusz Guzik break; 12011c6987ebSMateusz Guzik case LK_EXCLUSIVE: 12021c6987ebSMateusz Guzik return (lockmgr_xlock_hard(lk, flags, ilk, file, line, &lwa)); 1203047dd67eSAttilio Rao break; 1204047dd67eSAttilio Rao case LK_DOWNGRADE: 12051c7d98d0SAttilio Rao _lockmgr_assert(lk, KA_XLOCKED, file, line); 1206e5f94314SAttilio Rao LOCK_LOG_LOCK("XDOWNGRADE", &lk->lock_object, 0, 0, file, line); 1207e5f94314SAttilio Rao WITNESS_DOWNGRADE(&lk->lock_object, 0, file, line); 12081c7d98d0SAttilio Rao 12091c7d98d0SAttilio Rao /* 12101c7d98d0SAttilio Rao * Panic if the lock is recursed. 12111c7d98d0SAttilio Rao */ 12121c7d98d0SAttilio Rao if (lockmgr_xlocked(lk) && lockmgr_recursed(lk)) { 12131c7d98d0SAttilio Rao if (flags & LK_INTERLOCK) 12141c7d98d0SAttilio Rao class->lc_unlock(ilk); 12151c7d98d0SAttilio Rao panic("%s: downgrade a recursed lockmgr %s @ %s:%d\n", 12161c7d98d0SAttilio Rao __func__, iwmesg, file, line); 12171c7d98d0SAttilio Rao } 1218e5f94314SAttilio Rao TD_SLOCKS_INC(curthread); 1219047dd67eSAttilio Rao 1220047dd67eSAttilio Rao /* 1221047dd67eSAttilio Rao * In order to preserve waiters flags, just spin. 1222047dd67eSAttilio Rao */ 1223047dd67eSAttilio Rao for (;;) { 1224651175c9SAttilio Rao x = lk->lk_lock; 1225651175c9SAttilio Rao MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0); 1226651175c9SAttilio Rao x &= LK_ALL_WAITERS; 1227047dd67eSAttilio Rao if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x, 1228e5f94314SAttilio Rao LK_SHARERS_LOCK(1) | x)) 1229047dd67eSAttilio Rao break; 1230047dd67eSAttilio Rao cpu_spinwait(); 1231047dd67eSAttilio Rao } 1232047dd67eSAttilio Rao break; 1233047dd67eSAttilio Rao case LK_RELEASE: 1234047dd67eSAttilio Rao _lockmgr_assert(lk, KA_LOCKED, file, line); 1235047dd67eSAttilio Rao x = lk->lk_lock; 1236047dd67eSAttilio Rao 12371c6987ebSMateusz Guzik if (__predict_true(x & LK_SHARE) != 0) { 12381c6987ebSMateusz Guzik return (lockmgr_sunlock_hard(lk, x, flags, ilk, file, line)); 1239047dd67eSAttilio Rao } else { 12401c6987ebSMateusz Guzik return (lockmgr_xunlock_hard(lk, x, flags, ilk, file, line)); 12412028867dSAttilio Rao } 1242047dd67eSAttilio Rao break; 1243047dd67eSAttilio Rao case LK_DRAIN: 1244e5f94314SAttilio Rao if (LK_CAN_WITNESS(flags)) 1245e5f94314SAttilio Rao WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER | 124624150d37SJohn Baldwin LOP_EXCLUSIVE, file, line, flags & LK_INTERLOCK ? 124724150d37SJohn Baldwin ilk : NULL); 1248047dd67eSAttilio Rao 1249047dd67eSAttilio Rao /* 125096f1567fSKonstantin Belousov * Trying to drain a lock we already own will result in a 1251047dd67eSAttilio Rao * deadlock. 1252047dd67eSAttilio Rao */ 1253047dd67eSAttilio Rao if (lockmgr_xlocked(lk)) { 1254047dd67eSAttilio Rao if (flags & LK_INTERLOCK) 1255047dd67eSAttilio Rao class->lc_unlock(ilk); 1256047dd67eSAttilio Rao panic("%s: draining %s with the lock held @ %s:%d\n", 1257047dd67eSAttilio Rao __func__, iwmesg, file, line); 1258047dd67eSAttilio Rao } 1259047dd67eSAttilio Rao 1260fc4f686dSMateusz Guzik for (;;) { 1261fc4f686dSMateusz Guzik if (lk->lk_lock == LK_UNLOCKED && 1262fc4f686dSMateusz Guzik atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid)) 1263fc4f686dSMateusz Guzik break; 1264fc4f686dSMateusz Guzik 1265f5f9340bSFabien Thomas #ifdef HWPMC_HOOKS 1266f5f9340bSFabien Thomas PMC_SOFT_CALL( , , lock, failed); 1267f5f9340bSFabien Thomas #endif 1268047dd67eSAttilio Rao lock_profile_obtain_lock_failed(&lk->lock_object, 1269047dd67eSAttilio Rao &contested, &waittime); 1270047dd67eSAttilio Rao 1271047dd67eSAttilio Rao /* 1272047dd67eSAttilio Rao * If the lock is expected to not sleep just give up 1273047dd67eSAttilio Rao * and return. 1274047dd67eSAttilio Rao */ 1275047dd67eSAttilio Rao if (LK_TRYOP(flags)) { 1276047dd67eSAttilio Rao LOCK_LOG2(lk, "%s: %p fails the try operation", 1277047dd67eSAttilio Rao __func__, lk); 1278047dd67eSAttilio Rao error = EBUSY; 1279047dd67eSAttilio Rao break; 1280047dd67eSAttilio Rao } 1281047dd67eSAttilio Rao 1282047dd67eSAttilio Rao /* 1283047dd67eSAttilio Rao * Acquire the sleepqueue chain lock because we 1284047dd67eSAttilio Rao * probabilly will need to manipulate waiters flags. 1285047dd67eSAttilio Rao */ 1286047dd67eSAttilio Rao sleepq_lock(&lk->lock_object); 1287047dd67eSAttilio Rao x = lk->lk_lock; 1288047dd67eSAttilio Rao 1289047dd67eSAttilio Rao /* 1290047dd67eSAttilio Rao * if the lock has been released while we spun on 1291047dd67eSAttilio Rao * the sleepqueue chain lock just try again. 1292047dd67eSAttilio Rao */ 1293047dd67eSAttilio Rao if (x == LK_UNLOCKED) { 1294047dd67eSAttilio Rao sleepq_release(&lk->lock_object); 1295047dd67eSAttilio Rao continue; 1296047dd67eSAttilio Rao } 1297047dd67eSAttilio Rao 1298651175c9SAttilio Rao v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS); 1299651175c9SAttilio Rao if ((x & ~v) == LK_UNLOCKED) { 1300651175c9SAttilio Rao v = (x & ~LK_EXCLUSIVE_SPINNERS); 13012028867dSAttilio Rao 13022028867dSAttilio Rao /* 13032028867dSAttilio Rao * If interruptible sleeps left the exclusive 13042028867dSAttilio Rao * queue empty avoid a starvation for the 13052028867dSAttilio Rao * threads sleeping on the shared queue by 13062028867dSAttilio Rao * giving them precedence and cleaning up the 13072028867dSAttilio Rao * exclusive waiters bit anyway. 1308c636ba83SAttilio Rao * Please note that lk_exslpfail count may be 1309c636ba83SAttilio Rao * lying about the real number of waiters with 1310c636ba83SAttilio Rao * the LK_SLEEPFAIL flag on because they may 1311e3043798SPedro F. Giffuni * be used in conjunction with interruptible 1312aab9c8c2SAttilio Rao * sleeps so lk_exslpfail might be considered 1313aab9c8c2SAttilio Rao * an 'upper limit' bound, including the edge 1314c636ba83SAttilio Rao * cases. 13152028867dSAttilio Rao */ 1316047dd67eSAttilio Rao if (v & LK_EXCLUSIVE_WAITERS) { 1317047dd67eSAttilio Rao queue = SQ_EXCLUSIVE_QUEUE; 1318047dd67eSAttilio Rao v &= ~LK_EXCLUSIVE_WAITERS; 1319047dd67eSAttilio Rao } else { 13209dbf7a62SAttilio Rao 13219dbf7a62SAttilio Rao /* 13229dbf7a62SAttilio Rao * Exclusive waiters sleeping with 13239dbf7a62SAttilio Rao * LK_SLEEPFAIL on and using 13249dbf7a62SAttilio Rao * interruptible sleeps/timeout may 13259dbf7a62SAttilio Rao * have left spourious lk_exslpfail 13269dbf7a62SAttilio Rao * counts on, so clean it up anyway. 13279dbf7a62SAttilio Rao */ 1328047dd67eSAttilio Rao MPASS(v & LK_SHARED_WAITERS); 13299dbf7a62SAttilio Rao lk->lk_exslpfail = 0; 1330047dd67eSAttilio Rao queue = SQ_SHARED_QUEUE; 1331047dd67eSAttilio Rao v &= ~LK_SHARED_WAITERS; 1332047dd67eSAttilio Rao } 13332028867dSAttilio Rao if (queue == SQ_EXCLUSIVE_QUEUE) { 13342028867dSAttilio Rao realexslp = 13352028867dSAttilio Rao sleepq_sleepcnt(&lk->lock_object, 13362028867dSAttilio Rao SQ_EXCLUSIVE_QUEUE); 13372028867dSAttilio Rao if (lk->lk_exslpfail >= realexslp) { 13382028867dSAttilio Rao lk->lk_exslpfail = 0; 13392028867dSAttilio Rao queue = SQ_SHARED_QUEUE; 13402028867dSAttilio Rao v &= ~LK_SHARED_WAITERS; 13412028867dSAttilio Rao if (realexslp != 0) { 13422028867dSAttilio Rao LOCK_LOG2(lk, 13432028867dSAttilio Rao "%s: %p has only LK_SLEEPFAIL sleepers", 13442028867dSAttilio Rao __func__, lk); 13452028867dSAttilio Rao LOCK_LOG2(lk, 13462028867dSAttilio Rao "%s: %p waking up threads on the exclusive queue", 13472028867dSAttilio Rao __func__, lk); 13482028867dSAttilio Rao wakeup_swapper = 13492028867dSAttilio Rao sleepq_broadcast( 13502028867dSAttilio Rao &lk->lock_object, 13512028867dSAttilio Rao SLEEPQ_LK, 0, 13522028867dSAttilio Rao SQ_EXCLUSIVE_QUEUE); 13532028867dSAttilio Rao } 13542028867dSAttilio Rao } else 13552028867dSAttilio Rao lk->lk_exslpfail = 0; 13562028867dSAttilio Rao } 1357047dd67eSAttilio Rao if (!atomic_cmpset_ptr(&lk->lk_lock, x, v)) { 1358047dd67eSAttilio Rao sleepq_release(&lk->lock_object); 1359047dd67eSAttilio Rao continue; 1360047dd67eSAttilio Rao } 1361047dd67eSAttilio Rao LOCK_LOG3(lk, 1362047dd67eSAttilio Rao "%s: %p waking up all threads on the %s queue", 1363047dd67eSAttilio Rao __func__, lk, queue == SQ_SHARED_QUEUE ? 1364047dd67eSAttilio Rao "shared" : "exclusive"); 1365814f26daSJohn Baldwin wakeup_swapper |= sleepq_broadcast( 1366da7bbd2cSJohn Baldwin &lk->lock_object, SLEEPQ_LK, 0, queue); 1367047dd67eSAttilio Rao 1368047dd67eSAttilio Rao /* 1369047dd67eSAttilio Rao * If shared waiters have been woken up we need 1370047dd67eSAttilio Rao * to wait for one of them to acquire the lock 1371047dd67eSAttilio Rao * before to set the exclusive waiters in 1372047dd67eSAttilio Rao * order to avoid a deadlock. 1373047dd67eSAttilio Rao */ 1374047dd67eSAttilio Rao if (queue == SQ_SHARED_QUEUE) { 1375047dd67eSAttilio Rao for (v = lk->lk_lock; 1376047dd67eSAttilio Rao (v & LK_SHARE) && !LK_SHARERS(v); 1377047dd67eSAttilio Rao v = lk->lk_lock) 1378047dd67eSAttilio Rao cpu_spinwait(); 1379047dd67eSAttilio Rao } 1380047dd67eSAttilio Rao } 1381047dd67eSAttilio Rao 1382047dd67eSAttilio Rao /* 1383047dd67eSAttilio Rao * Try to set the LK_EXCLUSIVE_WAITERS flag. If we 1384047dd67eSAttilio Rao * fail, loop back and retry. 1385047dd67eSAttilio Rao */ 1386047dd67eSAttilio Rao if ((x & LK_EXCLUSIVE_WAITERS) == 0) { 1387047dd67eSAttilio Rao if (!atomic_cmpset_ptr(&lk->lk_lock, x, 1388047dd67eSAttilio Rao x | LK_EXCLUSIVE_WAITERS)) { 1389047dd67eSAttilio Rao sleepq_release(&lk->lock_object); 1390047dd67eSAttilio Rao continue; 1391047dd67eSAttilio Rao } 1392047dd67eSAttilio Rao LOCK_LOG2(lk, "%s: %p set drain waiters flag", 1393047dd67eSAttilio Rao __func__, lk); 1394047dd67eSAttilio Rao } 1395047dd67eSAttilio Rao 1396047dd67eSAttilio Rao /* 1397047dd67eSAttilio Rao * As far as we have been unable to acquire the 1398047dd67eSAttilio Rao * exclusive lock and the exclusive waiters flag 1399047dd67eSAttilio Rao * is set, we will sleep. 1400047dd67eSAttilio Rao */ 1401047dd67eSAttilio Rao if (flags & LK_INTERLOCK) { 1402047dd67eSAttilio Rao class->lc_unlock(ilk); 1403047dd67eSAttilio Rao flags &= ~LK_INTERLOCK; 1404047dd67eSAttilio Rao } 1405e5f94314SAttilio Rao GIANT_SAVE(); 1406047dd67eSAttilio Rao sleepq_add(&lk->lock_object, NULL, iwmesg, SLEEPQ_LK, 1407047dd67eSAttilio Rao SQ_EXCLUSIVE_QUEUE); 1408047dd67eSAttilio Rao sleepq_wait(&lk->lock_object, ipri & PRIMASK); 1409e5f94314SAttilio Rao GIANT_RESTORE(); 1410047dd67eSAttilio Rao LOCK_LOG2(lk, "%s: %p resuming from the sleep queue", 1411047dd67eSAttilio Rao __func__, lk); 1412047dd67eSAttilio Rao } 1413047dd67eSAttilio Rao 1414047dd67eSAttilio Rao if (error == 0) { 1415047dd67eSAttilio Rao lock_profile_obtain_lock_success(&lk->lock_object, 1416047dd67eSAttilio Rao contested, waittime, file, line); 1417047dd67eSAttilio Rao LOCK_LOG_LOCK("DRAIN", &lk->lock_object, 0, 1418047dd67eSAttilio Rao lk->lk_recurse, file, line); 1419e5f94314SAttilio Rao WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE | 1420e5f94314SAttilio Rao LK_TRYWIT(flags), file, line); 1421047dd67eSAttilio Rao TD_LOCKS_INC(curthread); 1422047dd67eSAttilio Rao STACK_SAVE(lk); 1423047dd67eSAttilio Rao } 1424047dd67eSAttilio Rao break; 1425047dd67eSAttilio Rao default: 1426047dd67eSAttilio Rao if (flags & LK_INTERLOCK) 1427047dd67eSAttilio Rao class->lc_unlock(ilk); 1428047dd67eSAttilio Rao panic("%s: unknown lockmgr request 0x%x\n", __func__, op); 1429047dd67eSAttilio Rao } 1430047dd67eSAttilio Rao 1431047dd67eSAttilio Rao if (flags & LK_INTERLOCK) 1432047dd67eSAttilio Rao class->lc_unlock(ilk); 1433da7bbd2cSJohn Baldwin if (wakeup_swapper) 1434da7bbd2cSJohn Baldwin kick_proc0(); 1435047dd67eSAttilio Rao 1436047dd67eSAttilio Rao return (error); 1437047dd67eSAttilio Rao } 1438047dd67eSAttilio Rao 1439d7a7e179SAttilio Rao void 1440047dd67eSAttilio Rao _lockmgr_disown(struct lock *lk, const char *file, int line) 1441047dd67eSAttilio Rao { 1442047dd67eSAttilio Rao uintptr_t tid, x; 1443047dd67eSAttilio Rao 144435370593SAndriy Gapon if (SCHEDULER_STOPPED()) 144535370593SAndriy Gapon return; 144635370593SAndriy Gapon 1447047dd67eSAttilio Rao tid = (uintptr_t)curthread; 14481c7d98d0SAttilio Rao _lockmgr_assert(lk, KA_XLOCKED, file, line); 14491c7d98d0SAttilio Rao 14501c7d98d0SAttilio Rao /* 14511c7d98d0SAttilio Rao * Panic if the lock is recursed. 14521c7d98d0SAttilio Rao */ 14531c7d98d0SAttilio Rao if (lockmgr_xlocked(lk) && lockmgr_recursed(lk)) 14541c7d98d0SAttilio Rao panic("%s: disown a recursed lockmgr @ %s:%d\n", 14551c7d98d0SAttilio Rao __func__, file, line); 1456047dd67eSAttilio Rao 1457047dd67eSAttilio Rao /* 145896f1567fSKonstantin Belousov * If the owner is already LK_KERNPROC just skip the whole operation. 1459047dd67eSAttilio Rao */ 1460047dd67eSAttilio Rao if (LK_HOLDER(lk->lk_lock) != tid) 1461047dd67eSAttilio Rao return; 146204a28689SJeff Roberson lock_profile_release_lock(&lk->lock_object); 1463e5f94314SAttilio Rao LOCK_LOG_LOCK("XDISOWN", &lk->lock_object, 0, 0, file, line); 1464e5f94314SAttilio Rao WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, file, line); 1465e5f94314SAttilio Rao TD_LOCKS_DEC(curthread); 1466337c5ff4SAttilio Rao STACK_SAVE(lk); 1467047dd67eSAttilio Rao 1468047dd67eSAttilio Rao /* 1469047dd67eSAttilio Rao * In order to preserve waiters flags, just spin. 1470047dd67eSAttilio Rao */ 1471047dd67eSAttilio Rao for (;;) { 1472651175c9SAttilio Rao x = lk->lk_lock; 1473651175c9SAttilio Rao MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0); 1474651175c9SAttilio Rao x &= LK_ALL_WAITERS; 147522dd228dSAttilio Rao if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x, 1476e5f94314SAttilio Rao LK_KERNPROC | x)) 1477047dd67eSAttilio Rao return; 1478047dd67eSAttilio Rao cpu_spinwait(); 1479047dd67eSAttilio Rao } 1480047dd67eSAttilio Rao } 1481047dd67eSAttilio Rao 1482047dd67eSAttilio Rao void 1483d576deedSPawel Jakub Dawidek lockmgr_printinfo(const struct lock *lk) 1484d7a7e179SAttilio Rao { 1485d7a7e179SAttilio Rao struct thread *td; 1486047dd67eSAttilio Rao uintptr_t x; 1487d7a7e179SAttilio Rao 1488047dd67eSAttilio Rao if (lk->lk_lock == LK_UNLOCKED) 1489047dd67eSAttilio Rao printf("lock type %s: UNLOCKED\n", lk->lock_object.lo_name); 1490047dd67eSAttilio Rao else if (lk->lk_lock & LK_SHARE) 1491047dd67eSAttilio Rao printf("lock type %s: SHARED (count %ju)\n", 1492047dd67eSAttilio Rao lk->lock_object.lo_name, 1493047dd67eSAttilio Rao (uintmax_t)LK_SHARERS(lk->lk_lock)); 1494047dd67eSAttilio Rao else { 1495047dd67eSAttilio Rao td = lockmgr_xholder(lk); 1496e64b4fa8SKonstantin Belousov if (td == (struct thread *)LK_KERNPROC) 1497e64b4fa8SKonstantin Belousov printf("lock type %s: EXCL by KERNPROC\n", 1498e64b4fa8SKonstantin Belousov lk->lock_object.lo_name); 1499e64b4fa8SKonstantin Belousov else 15002573ea5fSIvan Voras printf("lock type %s: EXCL by thread %p " 1501e64b4fa8SKonstantin Belousov "(pid %d, %s, tid %d)\n", lk->lock_object.lo_name, 1502e64b4fa8SKonstantin Belousov td, td->td_proc->p_pid, td->td_proc->p_comm, 1503e64b4fa8SKonstantin Belousov td->td_tid); 1504d7a7e179SAttilio Rao } 1505d7a7e179SAttilio Rao 1506047dd67eSAttilio Rao x = lk->lk_lock; 1507047dd67eSAttilio Rao if (x & LK_EXCLUSIVE_WAITERS) 1508047dd67eSAttilio Rao printf(" with exclusive waiters pending\n"); 1509047dd67eSAttilio Rao if (x & LK_SHARED_WAITERS) 1510047dd67eSAttilio Rao printf(" with shared waiters pending\n"); 1511651175c9SAttilio Rao if (x & LK_EXCLUSIVE_SPINNERS) 1512651175c9SAttilio Rao printf(" with exclusive spinners pending\n"); 1513047dd67eSAttilio Rao 1514047dd67eSAttilio Rao STACK_PRINT(lk); 1515047dd67eSAttilio Rao } 1516047dd67eSAttilio Rao 151799448ed1SJohn Dyson int 1518d576deedSPawel Jakub Dawidek lockstatus(const struct lock *lk) 151999448ed1SJohn Dyson { 1520047dd67eSAttilio Rao uintptr_t v, x; 1521047dd67eSAttilio Rao int ret; 152299448ed1SJohn Dyson 1523047dd67eSAttilio Rao ret = LK_SHARED; 1524047dd67eSAttilio Rao x = lk->lk_lock; 1525047dd67eSAttilio Rao v = LK_HOLDER(x); 15260e9eb108SAttilio Rao 1527047dd67eSAttilio Rao if ((x & LK_SHARE) == 0) { 1528047dd67eSAttilio Rao if (v == (uintptr_t)curthread || v == LK_KERNPROC) 1529047dd67eSAttilio Rao ret = LK_EXCLUSIVE; 15306bdfe06aSEivind Eklund else 1531047dd67eSAttilio Rao ret = LK_EXCLOTHER; 1532047dd67eSAttilio Rao } else if (x == LK_UNLOCKED) 1533047dd67eSAttilio Rao ret = 0; 153499448ed1SJohn Dyson 1535047dd67eSAttilio Rao return (ret); 153653bf4bb2SPeter Wemm } 1537be6847d7SJohn Baldwin 153884887fa3SAttilio Rao #ifdef INVARIANT_SUPPORT 1539de5b1952SAlexander Leidinger 1540de5b1952SAlexander Leidinger FEATURE(invariant_support, 1541de5b1952SAlexander Leidinger "Support for modules compiled with INVARIANTS option"); 1542de5b1952SAlexander Leidinger 154384887fa3SAttilio Rao #ifndef INVARIANTS 154484887fa3SAttilio Rao #undef _lockmgr_assert 154584887fa3SAttilio Rao #endif 154684887fa3SAttilio Rao 154784887fa3SAttilio Rao void 1548d576deedSPawel Jakub Dawidek _lockmgr_assert(const struct lock *lk, int what, const char *file, int line) 154984887fa3SAttilio Rao { 155084887fa3SAttilio Rao int slocked = 0; 155184887fa3SAttilio Rao 155284887fa3SAttilio Rao if (panicstr != NULL) 155384887fa3SAttilio Rao return; 155484887fa3SAttilio Rao switch (what) { 155584887fa3SAttilio Rao case KA_SLOCKED: 155684887fa3SAttilio Rao case KA_SLOCKED | KA_NOTRECURSED: 155784887fa3SAttilio Rao case KA_SLOCKED | KA_RECURSED: 155884887fa3SAttilio Rao slocked = 1; 155984887fa3SAttilio Rao case KA_LOCKED: 156084887fa3SAttilio Rao case KA_LOCKED | KA_NOTRECURSED: 156184887fa3SAttilio Rao case KA_LOCKED | KA_RECURSED: 1562e5f94314SAttilio Rao #ifdef WITNESS 1563e5f94314SAttilio Rao 1564e5f94314SAttilio Rao /* 1565e5f94314SAttilio Rao * We cannot trust WITNESS if the lock is held in exclusive 1566e5f94314SAttilio Rao * mode and a call to lockmgr_disown() happened. 1567e5f94314SAttilio Rao * Workaround this skipping the check if the lock is held in 1568e5f94314SAttilio Rao * exclusive mode even for the KA_LOCKED case. 1569e5f94314SAttilio Rao */ 1570e5f94314SAttilio Rao if (slocked || (lk->lk_lock & LK_SHARE)) { 1571e5f94314SAttilio Rao witness_assert(&lk->lock_object, what, file, line); 1572e5f94314SAttilio Rao break; 1573e5f94314SAttilio Rao } 1574e5f94314SAttilio Rao #endif 1575047dd67eSAttilio Rao if (lk->lk_lock == LK_UNLOCKED || 1576047dd67eSAttilio Rao ((lk->lk_lock & LK_SHARE) == 0 && (slocked || 1577047dd67eSAttilio Rao (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk))))) 157884887fa3SAttilio Rao panic("Lock %s not %slocked @ %s:%d\n", 1579047dd67eSAttilio Rao lk->lock_object.lo_name, slocked ? "share" : "", 158084887fa3SAttilio Rao file, line); 1581047dd67eSAttilio Rao 1582047dd67eSAttilio Rao if ((lk->lk_lock & LK_SHARE) == 0) { 1583047dd67eSAttilio Rao if (lockmgr_recursed(lk)) { 158484887fa3SAttilio Rao if (what & KA_NOTRECURSED) 158584887fa3SAttilio Rao panic("Lock %s recursed @ %s:%d\n", 1586047dd67eSAttilio Rao lk->lock_object.lo_name, file, 1587047dd67eSAttilio Rao line); 158884887fa3SAttilio Rao } else if (what & KA_RECURSED) 158984887fa3SAttilio Rao panic("Lock %s not recursed @ %s:%d\n", 1590047dd67eSAttilio Rao lk->lock_object.lo_name, file, line); 159184887fa3SAttilio Rao } 159284887fa3SAttilio Rao break; 159384887fa3SAttilio Rao case KA_XLOCKED: 159484887fa3SAttilio Rao case KA_XLOCKED | KA_NOTRECURSED: 159584887fa3SAttilio Rao case KA_XLOCKED | KA_RECURSED: 1596047dd67eSAttilio Rao if (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk)) 159784887fa3SAttilio Rao panic("Lock %s not exclusively locked @ %s:%d\n", 1598047dd67eSAttilio Rao lk->lock_object.lo_name, file, line); 1599047dd67eSAttilio Rao if (lockmgr_recursed(lk)) { 160084887fa3SAttilio Rao if (what & KA_NOTRECURSED) 160184887fa3SAttilio Rao panic("Lock %s recursed @ %s:%d\n", 1602047dd67eSAttilio Rao lk->lock_object.lo_name, file, line); 160384887fa3SAttilio Rao } else if (what & KA_RECURSED) 160484887fa3SAttilio Rao panic("Lock %s not recursed @ %s:%d\n", 1605047dd67eSAttilio Rao lk->lock_object.lo_name, file, line); 160684887fa3SAttilio Rao break; 160784887fa3SAttilio Rao case KA_UNLOCKED: 1608047dd67eSAttilio Rao if (lockmgr_xlocked(lk) || lockmgr_disowned(lk)) 160984887fa3SAttilio Rao panic("Lock %s exclusively locked @ %s:%d\n", 1610047dd67eSAttilio Rao lk->lock_object.lo_name, file, line); 161184887fa3SAttilio Rao break; 161284887fa3SAttilio Rao default: 1613047dd67eSAttilio Rao panic("Unknown lockmgr assertion: %d @ %s:%d\n", what, file, 1614047dd67eSAttilio Rao line); 161584887fa3SAttilio Rao } 161684887fa3SAttilio Rao } 1617047dd67eSAttilio Rao #endif 161884887fa3SAttilio Rao 1619be6847d7SJohn Baldwin #ifdef DDB 1620462a7addSJohn Baldwin int 1621462a7addSJohn Baldwin lockmgr_chain(struct thread *td, struct thread **ownerp) 1622462a7addSJohn Baldwin { 1623047dd67eSAttilio Rao struct lock *lk; 1624462a7addSJohn Baldwin 1625047dd67eSAttilio Rao lk = td->td_wchan; 1626462a7addSJohn Baldwin 1627047dd67eSAttilio Rao if (LOCK_CLASS(&lk->lock_object) != &lock_class_lockmgr) 1628462a7addSJohn Baldwin return (0); 1629047dd67eSAttilio Rao db_printf("blocked on lockmgr %s", lk->lock_object.lo_name); 1630047dd67eSAttilio Rao if (lk->lk_lock & LK_SHARE) 1631047dd67eSAttilio Rao db_printf("SHARED (count %ju)\n", 1632047dd67eSAttilio Rao (uintmax_t)LK_SHARERS(lk->lk_lock)); 1633047dd67eSAttilio Rao else 1634047dd67eSAttilio Rao db_printf("EXCL\n"); 1635047dd67eSAttilio Rao *ownerp = lockmgr_xholder(lk); 1636462a7addSJohn Baldwin 1637462a7addSJohn Baldwin return (1); 1638462a7addSJohn Baldwin } 1639462a7addSJohn Baldwin 1640047dd67eSAttilio Rao static void 1641d576deedSPawel Jakub Dawidek db_show_lockmgr(const struct lock_object *lock) 1642be6847d7SJohn Baldwin { 1643be6847d7SJohn Baldwin struct thread *td; 1644d576deedSPawel Jakub Dawidek const struct lock *lk; 1645be6847d7SJohn Baldwin 1646d576deedSPawel Jakub Dawidek lk = (const struct lock *)lock; 1647be6847d7SJohn Baldwin 1648be6847d7SJohn Baldwin db_printf(" state: "); 1649047dd67eSAttilio Rao if (lk->lk_lock == LK_UNLOCKED) 1650be6847d7SJohn Baldwin db_printf("UNLOCKED\n"); 1651047dd67eSAttilio Rao else if (lk->lk_lock & LK_SHARE) 1652047dd67eSAttilio Rao db_printf("SLOCK: %ju\n", (uintmax_t)LK_SHARERS(lk->lk_lock)); 1653047dd67eSAttilio Rao else { 1654047dd67eSAttilio Rao td = lockmgr_xholder(lk); 1655047dd67eSAttilio Rao if (td == (struct thread *)LK_KERNPROC) 1656047dd67eSAttilio Rao db_printf("XLOCK: LK_KERNPROC\n"); 1657047dd67eSAttilio Rao else 1658047dd67eSAttilio Rao db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td, 1659047dd67eSAttilio Rao td->td_tid, td->td_proc->p_pid, 1660047dd67eSAttilio Rao td->td_proc->p_comm); 1661047dd67eSAttilio Rao if (lockmgr_recursed(lk)) 1662047dd67eSAttilio Rao db_printf(" recursed: %d\n", lk->lk_recurse); 1663047dd67eSAttilio Rao } 1664047dd67eSAttilio Rao db_printf(" waiters: "); 1665047dd67eSAttilio Rao switch (lk->lk_lock & LK_ALL_WAITERS) { 1666047dd67eSAttilio Rao case LK_SHARED_WAITERS: 1667047dd67eSAttilio Rao db_printf("shared\n"); 1668e5023dd9SEdward Tomasz Napierala break; 1669047dd67eSAttilio Rao case LK_EXCLUSIVE_WAITERS: 1670047dd67eSAttilio Rao db_printf("exclusive\n"); 1671047dd67eSAttilio Rao break; 1672047dd67eSAttilio Rao case LK_ALL_WAITERS: 1673047dd67eSAttilio Rao db_printf("shared and exclusive\n"); 1674047dd67eSAttilio Rao break; 1675047dd67eSAttilio Rao default: 1676047dd67eSAttilio Rao db_printf("none\n"); 1677047dd67eSAttilio Rao } 1678651175c9SAttilio Rao db_printf(" spinners: "); 1679651175c9SAttilio Rao if (lk->lk_lock & LK_EXCLUSIVE_SPINNERS) 1680651175c9SAttilio Rao db_printf("exclusive\n"); 1681651175c9SAttilio Rao else 1682651175c9SAttilio Rao db_printf("none\n"); 1683be6847d7SJohn Baldwin } 1684be6847d7SJohn Baldwin #endif 1685