19454b2d8SWarner Losh /*- 28a36da99SPedro F. Giffuni * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 38a36da99SPedro F. Giffuni * 4047dd67eSAttilio Rao * Copyright (c) 2008 Attilio Rao <attilio@FreeBSD.org> 5047dd67eSAttilio Rao * All rights reserved. 653bf4bb2SPeter Wemm * 753bf4bb2SPeter Wemm * Redistribution and use in source and binary forms, with or without 853bf4bb2SPeter Wemm * modification, are permitted provided that the following conditions 953bf4bb2SPeter Wemm * are met: 1053bf4bb2SPeter Wemm * 1. Redistributions of source code must retain the above copyright 11047dd67eSAttilio Rao * notice(s), this list of conditions and the following disclaimer as 12047dd67eSAttilio Rao * the first lines of this file unmodified other than the possible 13047dd67eSAttilio Rao * addition of one or more copyright notices. 1453bf4bb2SPeter Wemm * 2. Redistributions in binary form must reproduce the above copyright 15047dd67eSAttilio Rao * notice(s), this list of conditions and the following disclaimer in the 1653bf4bb2SPeter Wemm * documentation and/or other materials provided with the distribution. 1753bf4bb2SPeter Wemm * 18047dd67eSAttilio Rao * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY 19047dd67eSAttilio Rao * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 20047dd67eSAttilio Rao * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 21047dd67eSAttilio Rao * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY 22047dd67eSAttilio Rao * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 23047dd67eSAttilio Rao * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 24047dd67eSAttilio Rao * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 25047dd67eSAttilio Rao * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 2653bf4bb2SPeter Wemm * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27047dd67eSAttilio Rao * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 28047dd67eSAttilio Rao * DAMAGE. 2953bf4bb2SPeter Wemm */ 3053bf4bb2SPeter Wemm 31047dd67eSAttilio Rao #include "opt_ddb.h" 32f5f9340bSFabien Thomas #include "opt_hwpmc_hooks.h" 33047dd67eSAttilio Rao 34677b542eSDavid E. O'Brien #include <sys/cdefs.h> 35677b542eSDavid E. O'Brien __FBSDID("$FreeBSD$"); 36677b542eSDavid E. O'Brien 3753bf4bb2SPeter Wemm #include <sys/param.h> 38cd2fe4e6SAttilio Rao #include <sys/kdb.h> 3961d80e90SJohn Baldwin #include <sys/ktr.h> 4053bf4bb2SPeter Wemm #include <sys/lock.h> 41047dd67eSAttilio Rao #include <sys/lock_profile.h> 428302d183SBruce Evans #include <sys/lockmgr.h> 435b699f16SMark Johnston #include <sys/lockstat.h> 44d8881ca3SJohn Baldwin #include <sys/mutex.h> 458302d183SBruce Evans #include <sys/proc.h> 46047dd67eSAttilio Rao #include <sys/sleepqueue.h> 47e8ddb61dSJeff Roberson #ifdef DEBUG_LOCKS 48e8ddb61dSJeff Roberson #include <sys/stack.h> 49e8ddb61dSJeff Roberson #endif 50651175c9SAttilio Rao #include <sys/sysctl.h> 51047dd67eSAttilio Rao #include <sys/systm.h> 5253bf4bb2SPeter Wemm 53047dd67eSAttilio Rao #include <machine/cpu.h> 546efc8a16SAttilio Rao 55be6847d7SJohn Baldwin #ifdef DDB 56be6847d7SJohn Baldwin #include <ddb/ddb.h> 57047dd67eSAttilio Rao #endif 58047dd67eSAttilio Rao 59f5f9340bSFabien Thomas #ifdef HWPMC_HOOKS 60f5f9340bSFabien Thomas #include <sys/pmckern.h> 61f5f9340bSFabien Thomas PMC_SOFT_DECLARE( , , lock, failed); 62f5f9340bSFabien Thomas #endif 63f5f9340bSFabien Thomas 64651175c9SAttilio Rao CTASSERT(LK_UNLOCKED == (LK_UNLOCKED & 65651175c9SAttilio Rao ~(LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS))); 66047dd67eSAttilio Rao 67047dd67eSAttilio Rao #define SQ_EXCLUSIVE_QUEUE 0 68047dd67eSAttilio Rao #define SQ_SHARED_QUEUE 1 69047dd67eSAttilio Rao 70047dd67eSAttilio Rao #ifndef INVARIANTS 71047dd67eSAttilio Rao #define _lockmgr_assert(lk, what, file, line) 72047dd67eSAttilio Rao #endif 73ce1c953eSMark Johnston 74047dd67eSAttilio Rao #define TD_SLOCKS_INC(td) ((td)->td_lk_slocks++) 75047dd67eSAttilio Rao #define TD_SLOCKS_DEC(td) ((td)->td_lk_slocks--) 76047dd67eSAttilio Rao 77047dd67eSAttilio Rao #ifndef DEBUG_LOCKS 78047dd67eSAttilio Rao #define STACK_PRINT(lk) 79047dd67eSAttilio Rao #define STACK_SAVE(lk) 80047dd67eSAttilio Rao #define STACK_ZERO(lk) 81047dd67eSAttilio Rao #else 82047dd67eSAttilio Rao #define STACK_PRINT(lk) stack_print_ddb(&(lk)->lk_stack) 83047dd67eSAttilio Rao #define STACK_SAVE(lk) stack_save(&(lk)->lk_stack) 84047dd67eSAttilio Rao #define STACK_ZERO(lk) stack_zero(&(lk)->lk_stack) 85047dd67eSAttilio Rao #endif 86047dd67eSAttilio Rao 87047dd67eSAttilio Rao #define LOCK_LOG2(lk, string, arg1, arg2) \ 88047dd67eSAttilio Rao if (LOCK_LOG_TEST(&(lk)->lock_object, 0)) \ 89047dd67eSAttilio Rao CTR2(KTR_LOCK, (string), (arg1), (arg2)) 90047dd67eSAttilio Rao #define LOCK_LOG3(lk, string, arg1, arg2, arg3) \ 91047dd67eSAttilio Rao if (LOCK_LOG_TEST(&(lk)->lock_object, 0)) \ 92047dd67eSAttilio Rao CTR3(KTR_LOCK, (string), (arg1), (arg2), (arg3)) 93047dd67eSAttilio Rao 94e5f94314SAttilio Rao #define GIANT_DECLARE \ 95e5f94314SAttilio Rao int _i = 0; \ 96e5f94314SAttilio Rao WITNESS_SAVE_DECL(Giant) 97e5f94314SAttilio Rao #define GIANT_RESTORE() do { \ 986e8c1ccbSMateusz Guzik if (__predict_false(_i > 0)) { \ 99e5f94314SAttilio Rao while (_i--) \ 100e5f94314SAttilio Rao mtx_lock(&Giant); \ 101e5f94314SAttilio Rao WITNESS_RESTORE(&Giant.lock_object, Giant); \ 102e5f94314SAttilio Rao } \ 103e5f94314SAttilio Rao } while (0) 104e5f94314SAttilio Rao #define GIANT_SAVE() do { \ 1056e8c1ccbSMateusz Guzik if (__predict_false(mtx_owned(&Giant))) { \ 106e5f94314SAttilio Rao WITNESS_SAVE(&Giant.lock_object, Giant); \ 107e5f94314SAttilio Rao while (mtx_owned(&Giant)) { \ 108e5f94314SAttilio Rao _i++; \ 109e5f94314SAttilio Rao mtx_unlock(&Giant); \ 110e5f94314SAttilio Rao } \ 111e5f94314SAttilio Rao } \ 112e5f94314SAttilio Rao } while (0) 113e5f94314SAttilio Rao 11495ab076dSMateusz Guzik static bool __always_inline 11595ab076dSMateusz Guzik LK_CAN_SHARE(uintptr_t x, int flags, bool fp) 11695ab076dSMateusz Guzik { 11795ab076dSMateusz Guzik 11895ab076dSMateusz Guzik if ((x & (LK_SHARE | LK_EXCLUSIVE_WAITERS | LK_EXCLUSIVE_SPINNERS)) == 11995ab076dSMateusz Guzik LK_SHARE) 12095ab076dSMateusz Guzik return (true); 12195ab076dSMateusz Guzik if (fp || (!(x & LK_SHARE))) 12295ab076dSMateusz Guzik return (false); 12395ab076dSMateusz Guzik if ((curthread->td_lk_slocks != 0 && !(flags & LK_NODDLKTREAT)) || 12495ab076dSMateusz Guzik (curthread->td_pflags & TDP_DEADLKTREAT)) 12595ab076dSMateusz Guzik return (true); 12695ab076dSMateusz Guzik return (false); 12795ab076dSMateusz Guzik } 12895ab076dSMateusz Guzik 129e5f94314SAttilio Rao #define LK_TRYOP(x) \ 130e5f94314SAttilio Rao ((x) & LK_NOWAIT) 131e5f94314SAttilio Rao 132e5f94314SAttilio Rao #define LK_CAN_WITNESS(x) \ 133e5f94314SAttilio Rao (((x) & LK_NOWITNESS) == 0 && !LK_TRYOP(x)) 134e5f94314SAttilio Rao #define LK_TRYWIT(x) \ 135e5f94314SAttilio Rao (LK_TRYOP(x) ? LOP_TRYLOCK : 0) 136047dd67eSAttilio Rao 137047dd67eSAttilio Rao #define lockmgr_disowned(lk) \ 138047dd67eSAttilio Rao (((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == LK_KERNPROC) 139047dd67eSAttilio Rao 14010391db5SMateusz Guzik #define lockmgr_xlocked_v(v) \ 14110391db5SMateusz Guzik (((v) & ~(LK_FLAGMASK & ~LK_SHARE)) == (uintptr_t)curthread) 14210391db5SMateusz Guzik 14310391db5SMateusz Guzik #define lockmgr_xlocked(lk) lockmgr_xlocked_v((lk)->lk_lock) 144047dd67eSAttilio Rao 145d576deedSPawel Jakub Dawidek static void assert_lockmgr(const struct lock_object *lock, int how); 146047dd67eSAttilio Rao #ifdef DDB 147d576deedSPawel Jakub Dawidek static void db_show_lockmgr(const struct lock_object *lock); 148be6847d7SJohn Baldwin #endif 1497faf4d90SDavide Italiano static void lock_lockmgr(struct lock_object *lock, uintptr_t how); 150a5aedd68SStacey Son #ifdef KDTRACE_HOOKS 151d576deedSPawel Jakub Dawidek static int owner_lockmgr(const struct lock_object *lock, 152d576deedSPawel Jakub Dawidek struct thread **owner); 153a5aedd68SStacey Son #endif 1547faf4d90SDavide Italiano static uintptr_t unlock_lockmgr(struct lock_object *lock); 15561bd5e21SKip Macy 15661bd5e21SKip Macy struct lock_class lock_class_lockmgr = { 1573ff6d229SJohn Baldwin .lc_name = "lockmgr", 158047dd67eSAttilio Rao .lc_flags = LC_RECURSABLE | LC_SLEEPABLE | LC_SLEEPLOCK | LC_UPGRADABLE, 159f9721b43SAttilio Rao .lc_assert = assert_lockmgr, 16061bd5e21SKip Macy #ifdef DDB 1616e21afd4SJohn Baldwin .lc_ddb_show = db_show_lockmgr, 16261bd5e21SKip Macy #endif 1636e21afd4SJohn Baldwin .lc_lock = lock_lockmgr, 164a5aedd68SStacey Son .lc_unlock = unlock_lockmgr, 165a5aedd68SStacey Son #ifdef KDTRACE_HOOKS 166a5aedd68SStacey Son .lc_owner = owner_lockmgr, 167a5aedd68SStacey Son #endif 16861bd5e21SKip Macy }; 16961bd5e21SKip Macy 1701c6987ebSMateusz Guzik struct lockmgr_wait { 1711c6987ebSMateusz Guzik const char *iwmesg; 1721c6987ebSMateusz Guzik int ipri; 1731c6987ebSMateusz Guzik int itimo; 1741c6987ebSMateusz Guzik }; 1751c6987ebSMateusz Guzik 176c4a48867SMateusz Guzik static bool __always_inline lockmgr_slock_try(struct lock *lk, uintptr_t *xp, 17795ab076dSMateusz Guzik int flags, bool fp); 1781c6987ebSMateusz Guzik static bool __always_inline lockmgr_sunlock_try(struct lock *lk, uintptr_t *xp); 1791c6987ebSMateusz Guzik 1801c6987ebSMateusz Guzik static void 1811c6987ebSMateusz Guzik lockmgr_exit(u_int flags, struct lock_object *ilk, int wakeup_swapper) 1821c6987ebSMateusz Guzik { 1831c6987ebSMateusz Guzik struct lock_class *class; 1841c6987ebSMateusz Guzik 1851c6987ebSMateusz Guzik if (flags & LK_INTERLOCK) { 1861c6987ebSMateusz Guzik class = LOCK_CLASS(ilk); 1871c6987ebSMateusz Guzik class->lc_unlock(ilk); 1881c6987ebSMateusz Guzik } 1891c6987ebSMateusz Guzik 1901c6987ebSMateusz Guzik if (__predict_false(wakeup_swapper)) 1911c6987ebSMateusz Guzik kick_proc0(); 1921c6987ebSMateusz Guzik } 193c4a48867SMateusz Guzik 194c4a48867SMateusz Guzik static void 195c4a48867SMateusz Guzik lockmgr_note_shared_acquire(struct lock *lk, int contested, 196c4a48867SMateusz Guzik uint64_t waittime, const char *file, int line, int flags) 197c4a48867SMateusz Guzik { 198c4a48867SMateusz Guzik 1995b699f16SMark Johnston LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(lockmgr__acquire, lk, contested, 2005b699f16SMark Johnston waittime, file, line, LOCKSTAT_READER); 201c4a48867SMateusz Guzik LOCK_LOG_LOCK("SLOCK", &lk->lock_object, 0, 0, file, line); 202c4a48867SMateusz Guzik WITNESS_LOCK(&lk->lock_object, LK_TRYWIT(flags), file, line); 203c4a48867SMateusz Guzik TD_LOCKS_INC(curthread); 204c4a48867SMateusz Guzik TD_SLOCKS_INC(curthread); 205c4a48867SMateusz Guzik STACK_SAVE(lk); 206c4a48867SMateusz Guzik } 207c4a48867SMateusz Guzik 208c4a48867SMateusz Guzik static void 209c4a48867SMateusz Guzik lockmgr_note_shared_release(struct lock *lk, const char *file, int line) 210c4a48867SMateusz Guzik { 211c4a48867SMateusz Guzik 212c4a48867SMateusz Guzik WITNESS_UNLOCK(&lk->lock_object, 0, file, line); 213c4a48867SMateusz Guzik LOCK_LOG_LOCK("SUNLOCK", &lk->lock_object, 0, 0, file, line); 214c4a48867SMateusz Guzik TD_LOCKS_DEC(curthread); 215c4a48867SMateusz Guzik TD_SLOCKS_DEC(curthread); 216c4a48867SMateusz Guzik } 217c4a48867SMateusz Guzik 218c4a48867SMateusz Guzik static void 219c4a48867SMateusz Guzik lockmgr_note_exclusive_acquire(struct lock *lk, int contested, 220c4a48867SMateusz Guzik uint64_t waittime, const char *file, int line, int flags) 221c4a48867SMateusz Guzik { 222c4a48867SMateusz Guzik 2235b699f16SMark Johnston LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(lockmgr__acquire, lk, contested, 2245b699f16SMark Johnston waittime, file, line, LOCKSTAT_WRITER); 225c4a48867SMateusz Guzik LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0, lk->lk_recurse, file, line); 226c4a48867SMateusz Guzik WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE | LK_TRYWIT(flags), file, 227c4a48867SMateusz Guzik line); 228c4a48867SMateusz Guzik TD_LOCKS_INC(curthread); 229c4a48867SMateusz Guzik STACK_SAVE(lk); 230c4a48867SMateusz Guzik } 231c4a48867SMateusz Guzik 232c4a48867SMateusz Guzik static void 233c4a48867SMateusz Guzik lockmgr_note_exclusive_release(struct lock *lk, const char *file, int line) 234c4a48867SMateusz Guzik { 235c4a48867SMateusz Guzik 236c00115f1SMateusz Guzik if (LK_HOLDER(lk->lk_lock) != LK_KERNPROC) { 237c4a48867SMateusz Guzik WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, file, line); 238c4a48867SMateusz Guzik TD_LOCKS_DEC(curthread); 239c4a48867SMateusz Guzik } 240c00115f1SMateusz Guzik LOCK_LOG_LOCK("XUNLOCK", &lk->lock_object, 0, lk->lk_recurse, file, 241c00115f1SMateusz Guzik line); 242c00115f1SMateusz Guzik } 243c4a48867SMateusz Guzik 244047dd67eSAttilio Rao static __inline struct thread * 245d576deedSPawel Jakub Dawidek lockmgr_xholder(const struct lock *lk) 246047dd67eSAttilio Rao { 247047dd67eSAttilio Rao uintptr_t x; 248047dd67eSAttilio Rao 249047dd67eSAttilio Rao x = lk->lk_lock; 250047dd67eSAttilio Rao return ((x & LK_SHARE) ? NULL : (struct thread *)LK_HOLDER(x)); 251047dd67eSAttilio Rao } 25284887fa3SAttilio Rao 25353bf4bb2SPeter Wemm /* 254047dd67eSAttilio Rao * It assumes sleepq_lock held and returns with this one unheld. 255047dd67eSAttilio Rao * It also assumes the generic interlock is sane and previously checked. 256047dd67eSAttilio Rao * If LK_INTERLOCK is specified the interlock is not reacquired after the 257047dd67eSAttilio Rao * sleep. 25853bf4bb2SPeter Wemm */ 259047dd67eSAttilio Rao static __inline int 260047dd67eSAttilio Rao sleeplk(struct lock *lk, u_int flags, struct lock_object *ilk, 261047dd67eSAttilio Rao const char *wmesg, int pri, int timo, int queue) 262047dd67eSAttilio Rao { 263e5f94314SAttilio Rao GIANT_DECLARE; 264047dd67eSAttilio Rao struct lock_class *class; 265047dd67eSAttilio Rao int catch, error; 26653bf4bb2SPeter Wemm 267047dd67eSAttilio Rao class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL; 2685047a8fdSAttilio Rao catch = pri & PCATCH; 269047dd67eSAttilio Rao pri &= PRIMASK; 270047dd67eSAttilio Rao error = 0; 271047dd67eSAttilio Rao 272047dd67eSAttilio Rao LOCK_LOG3(lk, "%s: %p blocking on the %s sleepqueue", __func__, lk, 273047dd67eSAttilio Rao (queue == SQ_EXCLUSIVE_QUEUE) ? "exclusive" : "shared"); 274047dd67eSAttilio Rao 275047dd67eSAttilio Rao if (flags & LK_INTERLOCK) 276047dd67eSAttilio Rao class->lc_unlock(ilk); 2772028867dSAttilio Rao if (queue == SQ_EXCLUSIVE_QUEUE && (flags & LK_SLEEPFAIL) != 0) 2782028867dSAttilio Rao lk->lk_exslpfail++; 279e5f94314SAttilio Rao GIANT_SAVE(); 280047dd67eSAttilio Rao sleepq_add(&lk->lock_object, NULL, wmesg, SLEEPQ_LK | (catch ? 281047dd67eSAttilio Rao SLEEPQ_INTERRUPTIBLE : 0), queue); 282a115fb62SHans Petter Selasky if ((flags & LK_TIMELOCK) && timo) 283047dd67eSAttilio Rao sleepq_set_timeout(&lk->lock_object, timo); 284a115fb62SHans Petter Selasky 285047dd67eSAttilio Rao /* 286047dd67eSAttilio Rao * Decisional switch for real sleeping. 287047dd67eSAttilio Rao */ 288047dd67eSAttilio Rao if ((flags & LK_TIMELOCK) && timo && catch) 289047dd67eSAttilio Rao error = sleepq_timedwait_sig(&lk->lock_object, pri); 290047dd67eSAttilio Rao else if ((flags & LK_TIMELOCK) && timo) 291047dd67eSAttilio Rao error = sleepq_timedwait(&lk->lock_object, pri); 292047dd67eSAttilio Rao else if (catch) 293047dd67eSAttilio Rao error = sleepq_wait_sig(&lk->lock_object, pri); 294047dd67eSAttilio Rao else 295047dd67eSAttilio Rao sleepq_wait(&lk->lock_object, pri); 296e5f94314SAttilio Rao GIANT_RESTORE(); 297047dd67eSAttilio Rao if ((flags & LK_SLEEPFAIL) && error == 0) 298047dd67eSAttilio Rao error = ENOLCK; 299047dd67eSAttilio Rao 300047dd67eSAttilio Rao return (error); 301047dd67eSAttilio Rao } 302047dd67eSAttilio Rao 303da7bbd2cSJohn Baldwin static __inline int 304047dd67eSAttilio Rao wakeupshlk(struct lock *lk, const char *file, int line) 305047dd67eSAttilio Rao { 3060ad122a9SMateusz Guzik uintptr_t v, x, orig_x; 3072028867dSAttilio Rao u_int realexslp; 308da7bbd2cSJohn Baldwin int queue, wakeup_swapper; 309047dd67eSAttilio Rao 310da7bbd2cSJohn Baldwin wakeup_swapper = 0; 311047dd67eSAttilio Rao for (;;) { 312047dd67eSAttilio Rao x = lk->lk_lock; 3131c6987ebSMateusz Guzik if (lockmgr_sunlock_try(lk, &x)) 314047dd67eSAttilio Rao break; 315047dd67eSAttilio Rao 316047dd67eSAttilio Rao /* 317047dd67eSAttilio Rao * We should have a sharer with waiters, so enter the hard 318047dd67eSAttilio Rao * path in order to handle wakeups correctly. 319047dd67eSAttilio Rao */ 320047dd67eSAttilio Rao sleepq_lock(&lk->lock_object); 3210ad122a9SMateusz Guzik orig_x = lk->lk_lock; 3220ad122a9SMateusz Guzik retry_sleepq: 3230ad122a9SMateusz Guzik x = orig_x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS); 324047dd67eSAttilio Rao v = LK_UNLOCKED; 325047dd67eSAttilio Rao 326047dd67eSAttilio Rao /* 327047dd67eSAttilio Rao * If the lock has exclusive waiters, give them preference in 328047dd67eSAttilio Rao * order to avoid deadlock with shared runners up. 3292028867dSAttilio Rao * If interruptible sleeps left the exclusive queue empty 3302028867dSAttilio Rao * avoid a starvation for the threads sleeping on the shared 3312028867dSAttilio Rao * queue by giving them precedence and cleaning up the 3322028867dSAttilio Rao * exclusive waiters bit anyway. 333c636ba83SAttilio Rao * Please note that lk_exslpfail count may be lying about 334c636ba83SAttilio Rao * the real number of waiters with the LK_SLEEPFAIL flag on 335e3043798SPedro F. Giffuni * because they may be used in conjunction with interruptible 336aab9c8c2SAttilio Rao * sleeps so lk_exslpfail might be considered an 'upper limit' 337aab9c8c2SAttilio Rao * bound, including the edge cases. 338047dd67eSAttilio Rao */ 3392028867dSAttilio Rao realexslp = sleepq_sleepcnt(&lk->lock_object, 3402028867dSAttilio Rao SQ_EXCLUSIVE_QUEUE); 3412028867dSAttilio Rao if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) { 3422028867dSAttilio Rao if (lk->lk_exslpfail < realexslp) { 3432028867dSAttilio Rao lk->lk_exslpfail = 0; 344047dd67eSAttilio Rao queue = SQ_EXCLUSIVE_QUEUE; 345047dd67eSAttilio Rao v |= (x & LK_SHARED_WAITERS); 346047dd67eSAttilio Rao } else { 3472028867dSAttilio Rao lk->lk_exslpfail = 0; 3482028867dSAttilio Rao LOCK_LOG2(lk, 3492028867dSAttilio Rao "%s: %p has only LK_SLEEPFAIL sleepers", 3502028867dSAttilio Rao __func__, lk); 3512028867dSAttilio Rao LOCK_LOG2(lk, 3522028867dSAttilio Rao "%s: %p waking up threads on the exclusive queue", 3532028867dSAttilio Rao __func__, lk); 3542028867dSAttilio Rao wakeup_swapper = 3552028867dSAttilio Rao sleepq_broadcast(&lk->lock_object, 3562028867dSAttilio Rao SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE); 3572028867dSAttilio Rao queue = SQ_SHARED_QUEUE; 3582028867dSAttilio Rao } 3592028867dSAttilio Rao 3602028867dSAttilio Rao } else { 3619dbf7a62SAttilio Rao 3629dbf7a62SAttilio Rao /* 3639dbf7a62SAttilio Rao * Exclusive waiters sleeping with LK_SLEEPFAIL on 3649dbf7a62SAttilio Rao * and using interruptible sleeps/timeout may have 3659dbf7a62SAttilio Rao * left spourious lk_exslpfail counts on, so clean 3669dbf7a62SAttilio Rao * it up anyway. 3679dbf7a62SAttilio Rao */ 3689dbf7a62SAttilio Rao lk->lk_exslpfail = 0; 369047dd67eSAttilio Rao queue = SQ_SHARED_QUEUE; 370047dd67eSAttilio Rao } 371047dd67eSAttilio Rao 3720ad122a9SMateusz Guzik if (lockmgr_sunlock_try(lk, &orig_x)) { 373047dd67eSAttilio Rao sleepq_release(&lk->lock_object); 3740ad122a9SMateusz Guzik break; 3750ad122a9SMateusz Guzik } 3760ad122a9SMateusz Guzik 3770ad122a9SMateusz Guzik x |= LK_SHARERS_LOCK(1); 3780ad122a9SMateusz Guzik if (!atomic_fcmpset_rel_ptr(&lk->lk_lock, &x, v)) { 3790ad122a9SMateusz Guzik orig_x = x; 3800ad122a9SMateusz Guzik goto retry_sleepq; 381047dd67eSAttilio Rao } 382047dd67eSAttilio Rao LOCK_LOG3(lk, "%s: %p waking up threads on the %s queue", 383047dd67eSAttilio Rao __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" : 384047dd67eSAttilio Rao "exclusive"); 3852028867dSAttilio Rao wakeup_swapper |= sleepq_broadcast(&lk->lock_object, SLEEPQ_LK, 386da7bbd2cSJohn Baldwin 0, queue); 387047dd67eSAttilio Rao sleepq_release(&lk->lock_object); 388047dd67eSAttilio Rao break; 389047dd67eSAttilio Rao } 390047dd67eSAttilio Rao 391c00115f1SMateusz Guzik LOCKSTAT_PROFILE_RELEASE_RWLOCK(lockmgr__release, lk, LOCKSTAT_READER); 392da7bbd2cSJohn Baldwin return (wakeup_swapper); 393047dd67eSAttilio Rao } 394047dd67eSAttilio Rao 395047dd67eSAttilio Rao static void 396d576deedSPawel Jakub Dawidek assert_lockmgr(const struct lock_object *lock, int what) 397f9721b43SAttilio Rao { 398f9721b43SAttilio Rao 399f9721b43SAttilio Rao panic("lockmgr locks do not support assertions"); 400f9721b43SAttilio Rao } 401f9721b43SAttilio Rao 402047dd67eSAttilio Rao static void 4037faf4d90SDavide Italiano lock_lockmgr(struct lock_object *lock, uintptr_t how) 4046e21afd4SJohn Baldwin { 4056e21afd4SJohn Baldwin 4066e21afd4SJohn Baldwin panic("lockmgr locks do not support sleep interlocking"); 4076e21afd4SJohn Baldwin } 4086e21afd4SJohn Baldwin 4097faf4d90SDavide Italiano static uintptr_t 4106e21afd4SJohn Baldwin unlock_lockmgr(struct lock_object *lock) 4116e21afd4SJohn Baldwin { 4126e21afd4SJohn Baldwin 4136e21afd4SJohn Baldwin panic("lockmgr locks do not support sleep interlocking"); 4146e21afd4SJohn Baldwin } 4156e21afd4SJohn Baldwin 416a5aedd68SStacey Son #ifdef KDTRACE_HOOKS 417a5aedd68SStacey Son static int 418d576deedSPawel Jakub Dawidek owner_lockmgr(const struct lock_object *lock, struct thread **owner) 419a5aedd68SStacey Son { 420a5aedd68SStacey Son 421a5aedd68SStacey Son panic("lockmgr locks do not support owner inquiring"); 422a5aedd68SStacey Son } 423a5aedd68SStacey Son #endif 424a5aedd68SStacey Son 42599448ed1SJohn Dyson void 426047dd67eSAttilio Rao lockinit(struct lock *lk, int pri, const char *wmesg, int timo, int flags) 42799448ed1SJohn Dyson { 4286efc8a16SAttilio Rao int iflags; 4296efc8a16SAttilio Rao 430047dd67eSAttilio Rao MPASS((flags & ~LK_INIT_MASK) == 0); 431353998acSAttilio Rao ASSERT_ATOMIC_LOAD_PTR(lk->lk_lock, 432353998acSAttilio Rao ("%s: lockmgr not aligned for %s: %p", __func__, wmesg, 433353998acSAttilio Rao &lk->lk_lock)); 43499448ed1SJohn Dyson 435f0830182SAttilio Rao iflags = LO_SLEEPABLE | LO_UPGRADABLE; 436f0830182SAttilio Rao if (flags & LK_CANRECURSE) 437f0830182SAttilio Rao iflags |= LO_RECURSABLE; 438047dd67eSAttilio Rao if ((flags & LK_NODUP) == 0) 4396efc8a16SAttilio Rao iflags |= LO_DUPOK; 4407fbfba7bSAttilio Rao if (flags & LK_NOPROFILE) 4417fbfba7bSAttilio Rao iflags |= LO_NOPROFILE; 442047dd67eSAttilio Rao if ((flags & LK_NOWITNESS) == 0) 4436efc8a16SAttilio Rao iflags |= LO_WITNESS; 4447fbfba7bSAttilio Rao if (flags & LK_QUIET) 4457fbfba7bSAttilio Rao iflags |= LO_QUIET; 446e63091eaSMarcel Moolenaar if (flags & LK_IS_VNODE) 447e63091eaSMarcel Moolenaar iflags |= LO_IS_VNODE; 44846713135SGleb Smirnoff if (flags & LK_NEW) 44946713135SGleb Smirnoff iflags |= LO_NEW; 4505fe188b1SMateusz Guzik iflags |= flags & LK_NOSHARE; 451047dd67eSAttilio Rao 452b5fb43e5SJohn Baldwin lock_init(&lk->lock_object, &lock_class_lockmgr, wmesg, NULL, iflags); 453047dd67eSAttilio Rao lk->lk_lock = LK_UNLOCKED; 454047dd67eSAttilio Rao lk->lk_recurse = 0; 4552028867dSAttilio Rao lk->lk_exslpfail = 0; 456047dd67eSAttilio Rao lk->lk_timo = timo; 457047dd67eSAttilio Rao lk->lk_pri = pri; 458047dd67eSAttilio Rao STACK_ZERO(lk); 45999448ed1SJohn Dyson } 46099448ed1SJohn Dyson 4613634d5b2SJohn Baldwin /* 4623634d5b2SJohn Baldwin * XXX: Gross hacks to manipulate external lock flags after 4633634d5b2SJohn Baldwin * initialization. Used for certain vnode and buf locks. 4643634d5b2SJohn Baldwin */ 4653634d5b2SJohn Baldwin void 4663634d5b2SJohn Baldwin lockallowshare(struct lock *lk) 4673634d5b2SJohn Baldwin { 4683634d5b2SJohn Baldwin 4693634d5b2SJohn Baldwin lockmgr_assert(lk, KA_XLOCKED); 4703634d5b2SJohn Baldwin lk->lock_object.lo_flags &= ~LK_NOSHARE; 4713634d5b2SJohn Baldwin } 4723634d5b2SJohn Baldwin 4733634d5b2SJohn Baldwin void 474575e02d9SKonstantin Belousov lockdisableshare(struct lock *lk) 475575e02d9SKonstantin Belousov { 476575e02d9SKonstantin Belousov 477575e02d9SKonstantin Belousov lockmgr_assert(lk, KA_XLOCKED); 478575e02d9SKonstantin Belousov lk->lock_object.lo_flags |= LK_NOSHARE; 479575e02d9SKonstantin Belousov } 480575e02d9SKonstantin Belousov 481575e02d9SKonstantin Belousov void 4823634d5b2SJohn Baldwin lockallowrecurse(struct lock *lk) 4833634d5b2SJohn Baldwin { 4843634d5b2SJohn Baldwin 4853634d5b2SJohn Baldwin lockmgr_assert(lk, KA_XLOCKED); 4863634d5b2SJohn Baldwin lk->lock_object.lo_flags |= LO_RECURSABLE; 4873634d5b2SJohn Baldwin } 4883634d5b2SJohn Baldwin 4893634d5b2SJohn Baldwin void 4903634d5b2SJohn Baldwin lockdisablerecurse(struct lock *lk) 4913634d5b2SJohn Baldwin { 4923634d5b2SJohn Baldwin 4933634d5b2SJohn Baldwin lockmgr_assert(lk, KA_XLOCKED); 4943634d5b2SJohn Baldwin lk->lock_object.lo_flags &= ~LO_RECURSABLE; 4953634d5b2SJohn Baldwin } 4963634d5b2SJohn Baldwin 497a18b1f1dSJason Evans void 498047dd67eSAttilio Rao lockdestroy(struct lock *lk) 499a18b1f1dSJason Evans { 500c91fcee7SJohn Baldwin 501047dd67eSAttilio Rao KASSERT(lk->lk_lock == LK_UNLOCKED, ("lockmgr still held")); 502047dd67eSAttilio Rao KASSERT(lk->lk_recurse == 0, ("lockmgr still recursed")); 5032028867dSAttilio Rao KASSERT(lk->lk_exslpfail == 0, ("lockmgr still exclusive waiters")); 504047dd67eSAttilio Rao lock_destroy(&lk->lock_object); 505047dd67eSAttilio Rao } 506047dd67eSAttilio Rao 507c4a48867SMateusz Guzik static bool __always_inline 50895ab076dSMateusz Guzik lockmgr_slock_try(struct lock *lk, uintptr_t *xp, int flags, bool fp) 509c4a48867SMateusz Guzik { 510c4a48867SMateusz Guzik 511c4a48867SMateusz Guzik /* 512c4a48867SMateusz Guzik * If no other thread has an exclusive lock, or 513c4a48867SMateusz Guzik * no exclusive waiter is present, bump the count of 514c4a48867SMateusz Guzik * sharers. Since we have to preserve the state of 515c4a48867SMateusz Guzik * waiters, if we fail to acquire the shared lock 516c4a48867SMateusz Guzik * loop back and retry. 517c4a48867SMateusz Guzik */ 518c4a48867SMateusz Guzik *xp = lk->lk_lock; 51995ab076dSMateusz Guzik while (LK_CAN_SHARE(*xp, flags, fp)) { 520c4a48867SMateusz Guzik if (atomic_fcmpset_acq_ptr(&lk->lk_lock, xp, 521c4a48867SMateusz Guzik *xp + LK_ONE_SHARER)) { 522c4a48867SMateusz Guzik return (true); 523c4a48867SMateusz Guzik } 524c4a48867SMateusz Guzik } 525c4a48867SMateusz Guzik return (false); 526c4a48867SMateusz Guzik } 527c4a48867SMateusz Guzik 528c4a48867SMateusz Guzik static bool __always_inline 5291c6987ebSMateusz Guzik lockmgr_sunlock_try(struct lock *lk, uintptr_t *xp) 530c4a48867SMateusz Guzik { 531c4a48867SMateusz Guzik 532c4a48867SMateusz Guzik for (;;) { 53395ab076dSMateusz Guzik if (LK_SHARERS(*xp) > 1 || !(*xp & LK_ALL_WAITERS)) { 5341c6987ebSMateusz Guzik if (atomic_fcmpset_rel_ptr(&lk->lk_lock, xp, 5351c6987ebSMateusz Guzik *xp - LK_ONE_SHARER)) 536c4a48867SMateusz Guzik return (true); 537c4a48867SMateusz Guzik continue; 538c4a48867SMateusz Guzik } 539c4a48867SMateusz Guzik break; 540c4a48867SMateusz Guzik } 541c4a48867SMateusz Guzik return (false); 542c4a48867SMateusz Guzik } 543c4a48867SMateusz Guzik 5441c6987ebSMateusz Guzik static __noinline int 5451c6987ebSMateusz Guzik lockmgr_slock_hard(struct lock *lk, u_int flags, struct lock_object *ilk, 5461c6987ebSMateusz Guzik const char *file, int line, struct lockmgr_wait *lwa) 547c4a48867SMateusz Guzik { 5481c6987ebSMateusz Guzik uintptr_t tid, x; 5491c6987ebSMateusz Guzik int error = 0; 550047dd67eSAttilio Rao const char *iwmesg; 5511c6987ebSMateusz Guzik int ipri, itimo; 5521c6987ebSMateusz Guzik 5535b699f16SMark Johnston #ifdef KDTRACE_HOOKS 5545b699f16SMark Johnston uint64_t sleep_time = 0; 5555b699f16SMark Johnston #endif 5561723a064SJeff Roberson #ifdef LOCK_PROFILING 5571723a064SJeff Roberson uint64_t waittime = 0; 5581723a064SJeff Roberson int contested = 0; 5591723a064SJeff Roberson #endif 560047dd67eSAttilio Rao 561879e0604SMateusz Guzik if (KERNEL_PANICKED()) 5621c6987ebSMateusz Guzik goto out; 5631c6987ebSMateusz Guzik 564047dd67eSAttilio Rao tid = (uintptr_t)curthread; 565047dd67eSAttilio Rao 566e5f94314SAttilio Rao if (LK_CAN_WITNESS(flags)) 567e5f94314SAttilio Rao WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER, 56824150d37SJohn Baldwin file, line, flags & LK_INTERLOCK ? ilk : NULL); 569047dd67eSAttilio Rao for (;;) { 57095ab076dSMateusz Guzik if (lockmgr_slock_try(lk, &x, flags, false)) 571047dd67eSAttilio Rao break; 572f5f9340bSFabien Thomas #ifdef HWPMC_HOOKS 573f5f9340bSFabien Thomas PMC_SOFT_CALL( , , lock, failed); 574f5f9340bSFabien Thomas #endif 575047dd67eSAttilio Rao lock_profile_obtain_lock_failed(&lk->lock_object, 576047dd67eSAttilio Rao &contested, &waittime); 577047dd67eSAttilio Rao 578047dd67eSAttilio Rao /* 57996f1567fSKonstantin Belousov * If the lock is already held by curthread in 580047dd67eSAttilio Rao * exclusive way avoid a deadlock. 581047dd67eSAttilio Rao */ 582047dd67eSAttilio Rao if (LK_HOLDER(x) == tid) { 583047dd67eSAttilio Rao LOCK_LOG2(lk, 58496f1567fSKonstantin Belousov "%s: %p already held in exclusive mode", 585047dd67eSAttilio Rao __func__, lk); 586047dd67eSAttilio Rao error = EDEADLK; 587047dd67eSAttilio Rao break; 588a18b1f1dSJason Evans } 589a18b1f1dSJason Evans 590a18b1f1dSJason Evans /* 591047dd67eSAttilio Rao * If the lock is expected to not sleep just give up 592047dd67eSAttilio Rao * and return. 593d7a7e179SAttilio Rao */ 594047dd67eSAttilio Rao if (LK_TRYOP(flags)) { 595047dd67eSAttilio Rao LOCK_LOG2(lk, "%s: %p fails the try operation", 596047dd67eSAttilio Rao __func__, lk); 597047dd67eSAttilio Rao error = EBUSY; 598047dd67eSAttilio Rao break; 599047dd67eSAttilio Rao } 600047dd67eSAttilio Rao 601047dd67eSAttilio Rao /* 602047dd67eSAttilio Rao * Acquire the sleepqueue chain lock because we 603047dd67eSAttilio Rao * probabilly will need to manipulate waiters flags. 604047dd67eSAttilio Rao */ 605047dd67eSAttilio Rao sleepq_lock(&lk->lock_object); 606047dd67eSAttilio Rao x = lk->lk_lock; 6070ad122a9SMateusz Guzik retry_sleepq: 608047dd67eSAttilio Rao 609047dd67eSAttilio Rao /* 610047dd67eSAttilio Rao * if the lock can be acquired in shared mode, try 611047dd67eSAttilio Rao * again. 612047dd67eSAttilio Rao */ 61395ab076dSMateusz Guzik if (LK_CAN_SHARE(x, flags, false)) { 614047dd67eSAttilio Rao sleepq_release(&lk->lock_object); 615047dd67eSAttilio Rao continue; 616047dd67eSAttilio Rao } 617047dd67eSAttilio Rao 618047dd67eSAttilio Rao /* 619047dd67eSAttilio Rao * Try to set the LK_SHARED_WAITERS flag. If we fail, 620047dd67eSAttilio Rao * loop back and retry. 621047dd67eSAttilio Rao */ 622047dd67eSAttilio Rao if ((x & LK_SHARED_WAITERS) == 0) { 6230ad122a9SMateusz Guzik if (!atomic_fcmpset_acq_ptr(&lk->lk_lock, &x, 624047dd67eSAttilio Rao x | LK_SHARED_WAITERS)) { 6250ad122a9SMateusz Guzik goto retry_sleepq; 626047dd67eSAttilio Rao } 627047dd67eSAttilio Rao LOCK_LOG2(lk, "%s: %p set shared waiters flag", 628047dd67eSAttilio Rao __func__, lk); 629047dd67eSAttilio Rao } 630047dd67eSAttilio Rao 6311c6987ebSMateusz Guzik if (lwa == NULL) { 6321c6987ebSMateusz Guzik iwmesg = lk->lock_object.lo_name; 6331c6987ebSMateusz Guzik ipri = lk->lk_pri; 6341c6987ebSMateusz Guzik itimo = lk->lk_timo; 6351c6987ebSMateusz Guzik } else { 6361c6987ebSMateusz Guzik iwmesg = lwa->iwmesg; 6371c6987ebSMateusz Guzik ipri = lwa->ipri; 6381c6987ebSMateusz Guzik itimo = lwa->itimo; 6391c6987ebSMateusz Guzik } 6401c6987ebSMateusz Guzik 641047dd67eSAttilio Rao /* 642047dd67eSAttilio Rao * As far as we have been unable to acquire the 643047dd67eSAttilio Rao * shared lock and the shared waiters flag is set, 644047dd67eSAttilio Rao * we will sleep. 645047dd67eSAttilio Rao */ 6465b699f16SMark Johnston #ifdef KDTRACE_HOOKS 6475b699f16SMark Johnston sleep_time -= lockstat_nsecs(&lk->lock_object); 6485b699f16SMark Johnston #endif 649047dd67eSAttilio Rao error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo, 650047dd67eSAttilio Rao SQ_SHARED_QUEUE); 6515b699f16SMark Johnston #ifdef KDTRACE_HOOKS 6525b699f16SMark Johnston sleep_time += lockstat_nsecs(&lk->lock_object); 6535b699f16SMark Johnston #endif 654047dd67eSAttilio Rao flags &= ~LK_INTERLOCK; 655047dd67eSAttilio Rao if (error) { 656047dd67eSAttilio Rao LOCK_LOG3(lk, 657047dd67eSAttilio Rao "%s: interrupted sleep for %p with %d", 658047dd67eSAttilio Rao __func__, lk, error); 659047dd67eSAttilio Rao break; 660047dd67eSAttilio Rao } 661047dd67eSAttilio Rao LOCK_LOG2(lk, "%s: %p resuming from the sleep queue", 662047dd67eSAttilio Rao __func__, lk); 663047dd67eSAttilio Rao } 664047dd67eSAttilio Rao if (error == 0) { 6655b699f16SMark Johnston #ifdef KDTRACE_HOOKS 6665b699f16SMark Johnston if (sleep_time != 0) 6675b699f16SMark Johnston LOCKSTAT_RECORD4(lockmgr__block, lk, sleep_time, 6685b699f16SMark Johnston LOCKSTAT_READER, (x & LK_SHARE) == 0, 6695b699f16SMark Johnston (x & LK_SHARE) == 0 ? 0 : LK_SHARERS(x)); 6705b699f16SMark Johnston #endif 671c4a48867SMateusz Guzik #ifdef LOCK_PROFILING 672c4a48867SMateusz Guzik lockmgr_note_shared_acquire(lk, contested, waittime, 673c4a48867SMateusz Guzik file, line, flags); 674c4a48867SMateusz Guzik #else 675c4a48867SMateusz Guzik lockmgr_note_shared_acquire(lk, 0, 0, file, line, 676c4a48867SMateusz Guzik flags); 677c4a48867SMateusz Guzik #endif 678047dd67eSAttilio Rao } 679047dd67eSAttilio Rao 6801c6987ebSMateusz Guzik out: 6811c6987ebSMateusz Guzik lockmgr_exit(flags, ilk, 0); 6821c6987ebSMateusz Guzik return (error); 683047dd67eSAttilio Rao } 684047dd67eSAttilio Rao 6851c6987ebSMateusz Guzik static __noinline int 6861c6987ebSMateusz Guzik lockmgr_xlock_hard(struct lock *lk, u_int flags, struct lock_object *ilk, 6871c6987ebSMateusz Guzik const char *file, int line, struct lockmgr_wait *lwa) 6881c6987ebSMateusz Guzik { 6891c6987ebSMateusz Guzik struct lock_class *class; 6901c6987ebSMateusz Guzik uintptr_t tid, x, v; 6911c6987ebSMateusz Guzik int error = 0; 6921c6987ebSMateusz Guzik const char *iwmesg; 6931c6987ebSMateusz Guzik int ipri, itimo; 6947c6fe803SKonstantin Belousov 6955b699f16SMark Johnston #ifdef KDTRACE_HOOKS 6965b699f16SMark Johnston uint64_t sleep_time = 0; 6975b699f16SMark Johnston #endif 6981c6987ebSMateusz Guzik #ifdef LOCK_PROFILING 6991c6987ebSMateusz Guzik uint64_t waittime = 0; 7001c6987ebSMateusz Guzik int contested = 0; 7011c6987ebSMateusz Guzik #endif 702047dd67eSAttilio Rao 703879e0604SMateusz Guzik if (KERNEL_PANICKED()) 7041c6987ebSMateusz Guzik goto out; 7051c6987ebSMateusz Guzik 7061c6987ebSMateusz Guzik tid = (uintptr_t)curthread; 7071c6987ebSMateusz Guzik 708e5f94314SAttilio Rao if (LK_CAN_WITNESS(flags)) 709e5f94314SAttilio Rao WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER | 71024150d37SJohn Baldwin LOP_EXCLUSIVE, file, line, flags & LK_INTERLOCK ? 71124150d37SJohn Baldwin ilk : NULL); 712047dd67eSAttilio Rao 713047dd67eSAttilio Rao /* 71496f1567fSKonstantin Belousov * If curthread already holds the lock and this one is 715047dd67eSAttilio Rao * allowed to recurse, simply recurse on it. 716047dd67eSAttilio Rao */ 717047dd67eSAttilio Rao if (lockmgr_xlocked(lk)) { 718047dd67eSAttilio Rao if ((flags & LK_CANRECURSE) == 0 && 719f0830182SAttilio Rao (lk->lock_object.lo_flags & LO_RECURSABLE) == 0) { 720047dd67eSAttilio Rao /* 721047dd67eSAttilio Rao * If the lock is expected to not panic just 722047dd67eSAttilio Rao * give up and return. 723047dd67eSAttilio Rao */ 724047dd67eSAttilio Rao if (LK_TRYOP(flags)) { 725047dd67eSAttilio Rao LOCK_LOG2(lk, 726047dd67eSAttilio Rao "%s: %p fails the try operation", 727047dd67eSAttilio Rao __func__, lk); 728047dd67eSAttilio Rao error = EBUSY; 7291c6987ebSMateusz Guzik goto out; 730047dd67eSAttilio Rao } 7311c6987ebSMateusz Guzik if (flags & LK_INTERLOCK) { 7321c6987ebSMateusz Guzik class = LOCK_CLASS(ilk); 733047dd67eSAttilio Rao class->lc_unlock(ilk); 7341c6987ebSMateusz Guzik } 73583fc34eaSGleb Smirnoff panic("%s: recursing on non recursive lockmgr %p " 73683fc34eaSGleb Smirnoff "@ %s:%d\n", __func__, lk, file, line); 737047dd67eSAttilio Rao } 738047dd67eSAttilio Rao lk->lk_recurse++; 739047dd67eSAttilio Rao LOCK_LOG2(lk, "%s: %p recursing", __func__, lk); 740047dd67eSAttilio Rao LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0, 741047dd67eSAttilio Rao lk->lk_recurse, file, line); 742e5f94314SAttilio Rao WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE | 743e5f94314SAttilio Rao LK_TRYWIT(flags), file, line); 744047dd67eSAttilio Rao TD_LOCKS_INC(curthread); 7451c6987ebSMateusz Guzik goto out; 746047dd67eSAttilio Rao } 747047dd67eSAttilio Rao 748fc4f686dSMateusz Guzik for (;;) { 749fc4f686dSMateusz Guzik if (lk->lk_lock == LK_UNLOCKED && 750fc4f686dSMateusz Guzik atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid)) 751fc4f686dSMateusz Guzik break; 752f5f9340bSFabien Thomas #ifdef HWPMC_HOOKS 753f5f9340bSFabien Thomas PMC_SOFT_CALL( , , lock, failed); 754f5f9340bSFabien Thomas #endif 755047dd67eSAttilio Rao lock_profile_obtain_lock_failed(&lk->lock_object, 756047dd67eSAttilio Rao &contested, &waittime); 757047dd67eSAttilio Rao 758047dd67eSAttilio Rao /* 759047dd67eSAttilio Rao * If the lock is expected to not sleep just give up 760047dd67eSAttilio Rao * and return. 761047dd67eSAttilio Rao */ 762047dd67eSAttilio Rao if (LK_TRYOP(flags)) { 763047dd67eSAttilio Rao LOCK_LOG2(lk, "%s: %p fails the try operation", 764047dd67eSAttilio Rao __func__, lk); 765047dd67eSAttilio Rao error = EBUSY; 766047dd67eSAttilio Rao break; 767047dd67eSAttilio Rao } 768047dd67eSAttilio Rao 769047dd67eSAttilio Rao /* 770047dd67eSAttilio Rao * Acquire the sleepqueue chain lock because we 771047dd67eSAttilio Rao * probabilly will need to manipulate waiters flags. 772047dd67eSAttilio Rao */ 773047dd67eSAttilio Rao sleepq_lock(&lk->lock_object); 774047dd67eSAttilio Rao x = lk->lk_lock; 7750ad122a9SMateusz Guzik retry_sleepq: 776047dd67eSAttilio Rao 777047dd67eSAttilio Rao /* 778047dd67eSAttilio Rao * if the lock has been released while we spun on 779047dd67eSAttilio Rao * the sleepqueue chain lock just try again. 780047dd67eSAttilio Rao */ 781047dd67eSAttilio Rao if (x == LK_UNLOCKED) { 782047dd67eSAttilio Rao sleepq_release(&lk->lock_object); 783047dd67eSAttilio Rao continue; 784047dd67eSAttilio Rao } 785047dd67eSAttilio Rao 786047dd67eSAttilio Rao /* 787047dd67eSAttilio Rao * The lock can be in the state where there is a 788047dd67eSAttilio Rao * pending queue of waiters, but still no owner. 789047dd67eSAttilio Rao * This happens when the lock is contested and an 790047dd67eSAttilio Rao * owner is going to claim the lock. 791047dd67eSAttilio Rao * If curthread is the one successfully acquiring it 792047dd67eSAttilio Rao * claim lock ownership and return, preserving waiters 793047dd67eSAttilio Rao * flags. 794047dd67eSAttilio Rao */ 795651175c9SAttilio Rao v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS); 796651175c9SAttilio Rao if ((x & ~v) == LK_UNLOCKED) { 797651175c9SAttilio Rao v &= ~LK_EXCLUSIVE_SPINNERS; 7980ad122a9SMateusz Guzik if (atomic_fcmpset_acq_ptr(&lk->lk_lock, &x, 799047dd67eSAttilio Rao tid | v)) { 800047dd67eSAttilio Rao sleepq_release(&lk->lock_object); 801047dd67eSAttilio Rao LOCK_LOG2(lk, 802047dd67eSAttilio Rao "%s: %p claimed by a new writer", 803047dd67eSAttilio Rao __func__, lk); 804047dd67eSAttilio Rao break; 805047dd67eSAttilio Rao } 8060ad122a9SMateusz Guzik goto retry_sleepq; 807047dd67eSAttilio Rao } 808047dd67eSAttilio Rao 809047dd67eSAttilio Rao /* 810047dd67eSAttilio Rao * Try to set the LK_EXCLUSIVE_WAITERS flag. If we 811047dd67eSAttilio Rao * fail, loop back and retry. 812047dd67eSAttilio Rao */ 813047dd67eSAttilio Rao if ((x & LK_EXCLUSIVE_WAITERS) == 0) { 8140ad122a9SMateusz Guzik if (!atomic_fcmpset_ptr(&lk->lk_lock, &x, 815047dd67eSAttilio Rao x | LK_EXCLUSIVE_WAITERS)) { 8160ad122a9SMateusz Guzik goto retry_sleepq; 817047dd67eSAttilio Rao } 818047dd67eSAttilio Rao LOCK_LOG2(lk, "%s: %p set excl waiters flag", 819047dd67eSAttilio Rao __func__, lk); 820047dd67eSAttilio Rao } 821047dd67eSAttilio Rao 8221c6987ebSMateusz Guzik if (lwa == NULL) { 8231c6987ebSMateusz Guzik iwmesg = lk->lock_object.lo_name; 8241c6987ebSMateusz Guzik ipri = lk->lk_pri; 8251c6987ebSMateusz Guzik itimo = lk->lk_timo; 8261c6987ebSMateusz Guzik } else { 8271c6987ebSMateusz Guzik iwmesg = lwa->iwmesg; 8281c6987ebSMateusz Guzik ipri = lwa->ipri; 8291c6987ebSMateusz Guzik itimo = lwa->itimo; 8301c6987ebSMateusz Guzik } 8311c6987ebSMateusz Guzik 832047dd67eSAttilio Rao /* 833047dd67eSAttilio Rao * As far as we have been unable to acquire the 834047dd67eSAttilio Rao * exclusive lock and the exclusive waiters flag 835047dd67eSAttilio Rao * is set, we will sleep. 836047dd67eSAttilio Rao */ 8375b699f16SMark Johnston #ifdef KDTRACE_HOOKS 8385b699f16SMark Johnston sleep_time -= lockstat_nsecs(&lk->lock_object); 8395b699f16SMark Johnston #endif 840047dd67eSAttilio Rao error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo, 841047dd67eSAttilio Rao SQ_EXCLUSIVE_QUEUE); 8425b699f16SMark Johnston #ifdef KDTRACE_HOOKS 8435b699f16SMark Johnston sleep_time += lockstat_nsecs(&lk->lock_object); 8445b699f16SMark Johnston #endif 845047dd67eSAttilio Rao flags &= ~LK_INTERLOCK; 846047dd67eSAttilio Rao if (error) { 847047dd67eSAttilio Rao LOCK_LOG3(lk, 848047dd67eSAttilio Rao "%s: interrupted sleep for %p with %d", 849047dd67eSAttilio Rao __func__, lk, error); 850047dd67eSAttilio Rao break; 851047dd67eSAttilio Rao } 852047dd67eSAttilio Rao LOCK_LOG2(lk, "%s: %p resuming from the sleep queue", 853047dd67eSAttilio Rao __func__, lk); 854047dd67eSAttilio Rao } 855047dd67eSAttilio Rao if (error == 0) { 8565b699f16SMark Johnston #ifdef KDTRACE_HOOKS 8575b699f16SMark Johnston if (sleep_time != 0) 8585b699f16SMark Johnston LOCKSTAT_RECORD4(lockmgr__block, lk, sleep_time, 8595b699f16SMark Johnston LOCKSTAT_WRITER, (x & LK_SHARE) == 0, 8605b699f16SMark Johnston (x & LK_SHARE) == 0 ? 0 : LK_SHARERS(x)); 8615b699f16SMark Johnston #endif 862c4a48867SMateusz Guzik #ifdef LOCK_PROFILING 863c4a48867SMateusz Guzik lockmgr_note_exclusive_acquire(lk, contested, waittime, 864c4a48867SMateusz Guzik file, line, flags); 865c4a48867SMateusz Guzik #else 866c4a48867SMateusz Guzik lockmgr_note_exclusive_acquire(lk, 0, 0, file, line, 867c4a48867SMateusz Guzik flags); 868c4a48867SMateusz Guzik #endif 869047dd67eSAttilio Rao } 8701c6987ebSMateusz Guzik 8711c6987ebSMateusz Guzik out: 8721c6987ebSMateusz Guzik lockmgr_exit(flags, ilk, 0); 8731c6987ebSMateusz Guzik return (error); 8741c6987ebSMateusz Guzik } 8751c6987ebSMateusz Guzik 8761c6987ebSMateusz Guzik static __noinline int 8771c6987ebSMateusz Guzik lockmgr_upgrade(struct lock *lk, u_int flags, struct lock_object *ilk, 8781c6987ebSMateusz Guzik const char *file, int line, struct lockmgr_wait *lwa) 8791c6987ebSMateusz Guzik { 8801c6987ebSMateusz Guzik uintptr_t tid, x, v; 8811c6987ebSMateusz Guzik int error = 0; 8821c6987ebSMateusz Guzik int wakeup_swapper = 0; 8831c6987ebSMateusz Guzik int op; 8841c6987ebSMateusz Guzik 885879e0604SMateusz Guzik if (KERNEL_PANICKED()) 8861c6987ebSMateusz Guzik goto out; 8871c6987ebSMateusz Guzik 8881c6987ebSMateusz Guzik tid = (uintptr_t)curthread; 8891c6987ebSMateusz Guzik 8901c6987ebSMateusz Guzik _lockmgr_assert(lk, KA_SLOCKED, file, line); 8911c6987ebSMateusz Guzik v = lk->lk_lock; 8921c6987ebSMateusz Guzik x = v & LK_ALL_WAITERS; 8931c6987ebSMateusz Guzik v &= LK_EXCLUSIVE_SPINNERS; 8941c6987ebSMateusz Guzik 8951c6987ebSMateusz Guzik /* 8961c6987ebSMateusz Guzik * Try to switch from one shared lock to an exclusive one. 8971c6987ebSMateusz Guzik * We need to preserve waiters flags during the operation. 8981c6987ebSMateusz Guzik */ 8991c6987ebSMateusz Guzik if (atomic_cmpset_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x | v, 9001c6987ebSMateusz Guzik tid | x)) { 9011c6987ebSMateusz Guzik LOCK_LOG_LOCK("XUPGRADE", &lk->lock_object, 0, 0, file, 9021c6987ebSMateusz Guzik line); 9031c6987ebSMateusz Guzik WITNESS_UPGRADE(&lk->lock_object, LOP_EXCLUSIVE | 9041c6987ebSMateusz Guzik LK_TRYWIT(flags), file, line); 9055b699f16SMark Johnston LOCKSTAT_RECORD0(lockmgr__upgrade, lk); 9061c6987ebSMateusz Guzik TD_SLOCKS_DEC(curthread); 9071c6987ebSMateusz Guzik goto out; 9081c6987ebSMateusz Guzik } 9091c6987ebSMateusz Guzik 9101c6987ebSMateusz Guzik op = flags & LK_TYPE_MASK; 9111c6987ebSMateusz Guzik 9121c6987ebSMateusz Guzik /* 9131c6987ebSMateusz Guzik * In LK_TRYUPGRADE mode, do not drop the lock, 9141c6987ebSMateusz Guzik * returning EBUSY instead. 9151c6987ebSMateusz Guzik */ 9161c6987ebSMateusz Guzik if (op == LK_TRYUPGRADE) { 9171c6987ebSMateusz Guzik LOCK_LOG2(lk, "%s: %p failed the nowait upgrade", 9181c6987ebSMateusz Guzik __func__, lk); 9191c6987ebSMateusz Guzik error = EBUSY; 9201c6987ebSMateusz Guzik goto out; 9211c6987ebSMateusz Guzik } 9221c6987ebSMateusz Guzik 9231c6987ebSMateusz Guzik /* 9241c6987ebSMateusz Guzik * We have been unable to succeed in upgrading, so just 9251c6987ebSMateusz Guzik * give up the shared lock. 9261c6987ebSMateusz Guzik */ 927c00115f1SMateusz Guzik lockmgr_note_shared_release(lk, file, line); 9281c6987ebSMateusz Guzik wakeup_swapper |= wakeupshlk(lk, file, line); 9291c6987ebSMateusz Guzik error = lockmgr_xlock_hard(lk, flags, ilk, file, line, lwa); 9301c6987ebSMateusz Guzik flags &= ~LK_INTERLOCK; 9311c6987ebSMateusz Guzik out: 9321c6987ebSMateusz Guzik lockmgr_exit(flags, ilk, wakeup_swapper); 9331c6987ebSMateusz Guzik return (error); 9341c6987ebSMateusz Guzik } 9351c6987ebSMateusz Guzik 9361c6987ebSMateusz Guzik int 937*c1b57fa7SMateusz Guzik lockmgr_lock_flags(struct lock *lk, u_int flags, struct lock_object *ilk, 9381c6987ebSMateusz Guzik const char *file, int line) 9391c6987ebSMateusz Guzik { 9401c6987ebSMateusz Guzik struct lock_class *class; 9411c6987ebSMateusz Guzik uintptr_t x, tid; 9421c6987ebSMateusz Guzik u_int op; 9431c6987ebSMateusz Guzik bool locked; 9441c6987ebSMateusz Guzik 945879e0604SMateusz Guzik if (KERNEL_PANICKED()) 946b543c98cSConrad Meyer return (0); 947b543c98cSConrad Meyer 9481c6987ebSMateusz Guzik op = flags & LK_TYPE_MASK; 9491c6987ebSMateusz Guzik locked = false; 9501c6987ebSMateusz Guzik switch (op) { 9511c6987ebSMateusz Guzik case LK_SHARED: 9521c6987ebSMateusz Guzik if (LK_CAN_WITNESS(flags)) 9531c6987ebSMateusz Guzik WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER, 9541c6987ebSMateusz Guzik file, line, flags & LK_INTERLOCK ? ilk : NULL); 9551c6987ebSMateusz Guzik if (__predict_false(lk->lock_object.lo_flags & LK_NOSHARE)) 9561c6987ebSMateusz Guzik break; 95795ab076dSMateusz Guzik if (lockmgr_slock_try(lk, &x, flags, true)) { 9581c6987ebSMateusz Guzik lockmgr_note_shared_acquire(lk, 0, 0, 9591c6987ebSMateusz Guzik file, line, flags); 9601c6987ebSMateusz Guzik locked = true; 9611c6987ebSMateusz Guzik } else { 9621c6987ebSMateusz Guzik return (lockmgr_slock_hard(lk, flags, ilk, file, line, 9631c6987ebSMateusz Guzik NULL)); 9641c6987ebSMateusz Guzik } 9651c6987ebSMateusz Guzik break; 9661c6987ebSMateusz Guzik case LK_EXCLUSIVE: 9671c6987ebSMateusz Guzik if (LK_CAN_WITNESS(flags)) 9681c6987ebSMateusz Guzik WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER | 9691c6987ebSMateusz Guzik LOP_EXCLUSIVE, file, line, flags & LK_INTERLOCK ? 9701c6987ebSMateusz Guzik ilk : NULL); 9711c6987ebSMateusz Guzik tid = (uintptr_t)curthread; 9721c6987ebSMateusz Guzik if (lk->lk_lock == LK_UNLOCKED && 9731c6987ebSMateusz Guzik atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid)) { 9741c6987ebSMateusz Guzik lockmgr_note_exclusive_acquire(lk, 0, 0, file, line, 9751c6987ebSMateusz Guzik flags); 9761c6987ebSMateusz Guzik locked = true; 9771c6987ebSMateusz Guzik } else { 9781c6987ebSMateusz Guzik return (lockmgr_xlock_hard(lk, flags, ilk, file, line, 9791c6987ebSMateusz Guzik NULL)); 9801c6987ebSMateusz Guzik } 9811c6987ebSMateusz Guzik break; 9821c6987ebSMateusz Guzik case LK_UPGRADE: 9831c6987ebSMateusz Guzik case LK_TRYUPGRADE: 9841c6987ebSMateusz Guzik return (lockmgr_upgrade(lk, flags, ilk, file, line, NULL)); 9851c6987ebSMateusz Guzik default: 9861c6987ebSMateusz Guzik break; 9871c6987ebSMateusz Guzik } 9881c6987ebSMateusz Guzik if (__predict_true(locked)) { 9891c6987ebSMateusz Guzik if (__predict_false(flags & LK_INTERLOCK)) { 9901c6987ebSMateusz Guzik class = LOCK_CLASS(ilk); 9911c6987ebSMateusz Guzik class->lc_unlock(ilk); 9921c6987ebSMateusz Guzik } 9931c6987ebSMateusz Guzik return (0); 9941c6987ebSMateusz Guzik } else { 9951c6987ebSMateusz Guzik return (__lockmgr_args(lk, flags, ilk, LK_WMESG_DEFAULT, 9961c6987ebSMateusz Guzik LK_PRIO_DEFAULT, LK_TIMO_DEFAULT, file, line)); 9971c6987ebSMateusz Guzik } 9981c6987ebSMateusz Guzik } 9991c6987ebSMateusz Guzik 10001c6987ebSMateusz Guzik static __noinline int 10011c6987ebSMateusz Guzik lockmgr_sunlock_hard(struct lock *lk, uintptr_t x, u_int flags, struct lock_object *ilk, 10021c6987ebSMateusz Guzik const char *file, int line) 10031c6987ebSMateusz Guzik 10041c6987ebSMateusz Guzik { 10051c6987ebSMateusz Guzik int wakeup_swapper = 0; 10061c6987ebSMateusz Guzik 1007879e0604SMateusz Guzik if (KERNEL_PANICKED()) 10081c6987ebSMateusz Guzik goto out; 10091c6987ebSMateusz Guzik 10101c6987ebSMateusz Guzik wakeup_swapper = wakeupshlk(lk, file, line); 10111c6987ebSMateusz Guzik 10121c6987ebSMateusz Guzik out: 10131c6987ebSMateusz Guzik lockmgr_exit(flags, ilk, wakeup_swapper); 10141c6987ebSMateusz Guzik return (0); 10151c6987ebSMateusz Guzik } 10161c6987ebSMateusz Guzik 10171c6987ebSMateusz Guzik static __noinline int 10181c6987ebSMateusz Guzik lockmgr_xunlock_hard(struct lock *lk, uintptr_t x, u_int flags, struct lock_object *ilk, 10191c6987ebSMateusz Guzik const char *file, int line) 10201c6987ebSMateusz Guzik { 10211c6987ebSMateusz Guzik uintptr_t tid, v; 10221c6987ebSMateusz Guzik int wakeup_swapper = 0; 10231c6987ebSMateusz Guzik u_int realexslp; 10241c6987ebSMateusz Guzik int queue; 10251c6987ebSMateusz Guzik 1026879e0604SMateusz Guzik if (KERNEL_PANICKED()) 10271c6987ebSMateusz Guzik goto out; 10281c6987ebSMateusz Guzik 10291c6987ebSMateusz Guzik tid = (uintptr_t)curthread; 10301c6987ebSMateusz Guzik 10311c6987ebSMateusz Guzik /* 10321c6987ebSMateusz Guzik * As first option, treact the lock as if it has not 10331c6987ebSMateusz Guzik * any waiter. 10341c6987ebSMateusz Guzik * Fix-up the tid var if the lock has been disowned. 10351c6987ebSMateusz Guzik */ 10361c6987ebSMateusz Guzik if (LK_HOLDER(x) == LK_KERNPROC) 10371c6987ebSMateusz Guzik tid = LK_KERNPROC; 10381c6987ebSMateusz Guzik 10391c6987ebSMateusz Guzik /* 10401c6987ebSMateusz Guzik * The lock is held in exclusive mode. 10411c6987ebSMateusz Guzik * If the lock is recursed also, then unrecurse it. 10421c6987ebSMateusz Guzik */ 104310391db5SMateusz Guzik if (lockmgr_xlocked_v(x) && lockmgr_recursed(lk)) { 10441c6987ebSMateusz Guzik LOCK_LOG2(lk, "%s: %p unrecursing", __func__, lk); 10451c6987ebSMateusz Guzik lk->lk_recurse--; 10461c6987ebSMateusz Guzik goto out; 10471c6987ebSMateusz Guzik } 10481c6987ebSMateusz Guzik if (tid != LK_KERNPROC) 10495b699f16SMark Johnston LOCKSTAT_PROFILE_RELEASE_RWLOCK(lockmgr__release, lk, 10505b699f16SMark Johnston LOCKSTAT_WRITER); 10511c6987ebSMateusz Guzik 105210391db5SMateusz Guzik if (x == tid && atomic_cmpset_rel_ptr(&lk->lk_lock, tid, LK_UNLOCKED)) 10531c6987ebSMateusz Guzik goto out; 10541c6987ebSMateusz Guzik 10551c6987ebSMateusz Guzik sleepq_lock(&lk->lock_object); 10561c6987ebSMateusz Guzik x = lk->lk_lock; 10571c6987ebSMateusz Guzik v = LK_UNLOCKED; 10581c6987ebSMateusz Guzik 10591c6987ebSMateusz Guzik /* 10601c6987ebSMateusz Guzik * If the lock has exclusive waiters, give them 10611c6987ebSMateusz Guzik * preference in order to avoid deadlock with 10621c6987ebSMateusz Guzik * shared runners up. 10631c6987ebSMateusz Guzik * If interruptible sleeps left the exclusive queue 10641c6987ebSMateusz Guzik * empty avoid a starvation for the threads sleeping 10651c6987ebSMateusz Guzik * on the shared queue by giving them precedence 10661c6987ebSMateusz Guzik * and cleaning up the exclusive waiters bit anyway. 10671c6987ebSMateusz Guzik * Please note that lk_exslpfail count may be lying 10681c6987ebSMateusz Guzik * about the real number of waiters with the 10691c6987ebSMateusz Guzik * LK_SLEEPFAIL flag on because they may be used in 10701c6987ebSMateusz Guzik * conjunction with interruptible sleeps so 10711c6987ebSMateusz Guzik * lk_exslpfail might be considered an 'upper limit' 10721c6987ebSMateusz Guzik * bound, including the edge cases. 10731c6987ebSMateusz Guzik */ 10741c6987ebSMateusz Guzik MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0); 10751c6987ebSMateusz Guzik realexslp = sleepq_sleepcnt(&lk->lock_object, SQ_EXCLUSIVE_QUEUE); 10761c6987ebSMateusz Guzik if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) { 10771c6987ebSMateusz Guzik if (lk->lk_exslpfail < realexslp) { 10781c6987ebSMateusz Guzik lk->lk_exslpfail = 0; 10791c6987ebSMateusz Guzik queue = SQ_EXCLUSIVE_QUEUE; 10801c6987ebSMateusz Guzik v |= (x & LK_SHARED_WAITERS); 10811c6987ebSMateusz Guzik } else { 10821c6987ebSMateusz Guzik lk->lk_exslpfail = 0; 10831c6987ebSMateusz Guzik LOCK_LOG2(lk, 10841c6987ebSMateusz Guzik "%s: %p has only LK_SLEEPFAIL sleepers", 10851c6987ebSMateusz Guzik __func__, lk); 10861c6987ebSMateusz Guzik LOCK_LOG2(lk, 10871c6987ebSMateusz Guzik "%s: %p waking up threads on the exclusive queue", 10881c6987ebSMateusz Guzik __func__, lk); 10891c6987ebSMateusz Guzik wakeup_swapper = sleepq_broadcast(&lk->lock_object, 10901c6987ebSMateusz Guzik SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE); 10911c6987ebSMateusz Guzik queue = SQ_SHARED_QUEUE; 10921c6987ebSMateusz Guzik } 10931c6987ebSMateusz Guzik } else { 10941c6987ebSMateusz Guzik 10951c6987ebSMateusz Guzik /* 10961c6987ebSMateusz Guzik * Exclusive waiters sleeping with LK_SLEEPFAIL 10971c6987ebSMateusz Guzik * on and using interruptible sleeps/timeout 10981c6987ebSMateusz Guzik * may have left spourious lk_exslpfail counts 10991c6987ebSMateusz Guzik * on, so clean it up anyway. 11001c6987ebSMateusz Guzik */ 11011c6987ebSMateusz Guzik lk->lk_exslpfail = 0; 11021c6987ebSMateusz Guzik queue = SQ_SHARED_QUEUE; 11031c6987ebSMateusz Guzik } 11041c6987ebSMateusz Guzik 11051c6987ebSMateusz Guzik LOCK_LOG3(lk, "%s: %p waking up threads on the %s queue", 11061c6987ebSMateusz Guzik __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" : 11071c6987ebSMateusz Guzik "exclusive"); 11081c6987ebSMateusz Guzik atomic_store_rel_ptr(&lk->lk_lock, v); 11091c6987ebSMateusz Guzik wakeup_swapper |= sleepq_broadcast(&lk->lock_object, SLEEPQ_LK, 0, queue); 11101c6987ebSMateusz Guzik sleepq_release(&lk->lock_object); 11111c6987ebSMateusz Guzik 11121c6987ebSMateusz Guzik out: 11131c6987ebSMateusz Guzik lockmgr_exit(flags, ilk, wakeup_swapper); 11141c6987ebSMateusz Guzik return (0); 11151c6987ebSMateusz Guzik } 11161c6987ebSMateusz Guzik 1117c8b29d12SMateusz Guzik /* 1118c8b29d12SMateusz Guzik * Lightweight entry points for common operations. 1119c8b29d12SMateusz Guzik * 1120c8b29d12SMateusz Guzik * Functionality is similar to sx locks, in that none of the additional lockmgr 1121c8b29d12SMateusz Guzik * features are supported. To be clear, these are NOT supported: 1122c8b29d12SMateusz Guzik * 1. shared locking disablement 1123c8b29d12SMateusz Guzik * 2. returning with an error after sleep 1124c8b29d12SMateusz Guzik * 3. unlocking the interlock 1125c8b29d12SMateusz Guzik * 1126*c1b57fa7SMateusz Guzik * If in doubt, use lockmgr_lock_flags. 1127c8b29d12SMateusz Guzik */ 1128c8b29d12SMateusz Guzik int 1129c8b29d12SMateusz Guzik lockmgr_slock(struct lock *lk, u_int flags, const char *file, int line) 1130c8b29d12SMateusz Guzik { 1131c8b29d12SMateusz Guzik uintptr_t x; 1132c8b29d12SMateusz Guzik 1133c8b29d12SMateusz Guzik MPASS((flags & LK_TYPE_MASK) == LK_SHARED); 1134c8b29d12SMateusz Guzik MPASS((flags & LK_INTERLOCK) == 0); 1135c8b29d12SMateusz Guzik MPASS((lk->lock_object.lo_flags & LK_NOSHARE) == 0); 1136c8b29d12SMateusz Guzik 1137c8b29d12SMateusz Guzik if (LK_CAN_WITNESS(flags)) 1138c8b29d12SMateusz Guzik WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER, 1139c8b29d12SMateusz Guzik file, line, NULL); 1140c8b29d12SMateusz Guzik if (__predict_true(lockmgr_slock_try(lk, &x, flags, true))) { 1141c8b29d12SMateusz Guzik lockmgr_note_shared_acquire(lk, 0, 0, file, line, flags); 1142c8b29d12SMateusz Guzik return (0); 1143c8b29d12SMateusz Guzik } 1144c8b29d12SMateusz Guzik 1145c8b29d12SMateusz Guzik return (lockmgr_slock_hard(lk, flags, NULL, file, line, NULL)); 1146c8b29d12SMateusz Guzik } 1147c8b29d12SMateusz Guzik 1148c8b29d12SMateusz Guzik int 1149c8b29d12SMateusz Guzik lockmgr_xlock(struct lock *lk, u_int flags, const char *file, int line) 1150c8b29d12SMateusz Guzik { 1151c8b29d12SMateusz Guzik uintptr_t tid; 1152c8b29d12SMateusz Guzik 1153c8b29d12SMateusz Guzik MPASS((flags & LK_TYPE_MASK) == LK_EXCLUSIVE); 1154c8b29d12SMateusz Guzik MPASS((flags & LK_INTERLOCK) == 0); 1155c8b29d12SMateusz Guzik 1156c8b29d12SMateusz Guzik if (LK_CAN_WITNESS(flags)) 1157c8b29d12SMateusz Guzik WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER | 1158c8b29d12SMateusz Guzik LOP_EXCLUSIVE, file, line, NULL); 1159c8b29d12SMateusz Guzik tid = (uintptr_t)curthread; 1160c8b29d12SMateusz Guzik if (atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid)) { 1161c8b29d12SMateusz Guzik lockmgr_note_exclusive_acquire(lk, 0, 0, file, line, 1162c8b29d12SMateusz Guzik flags); 1163c8b29d12SMateusz Guzik return (0); 1164c8b29d12SMateusz Guzik } 1165c8b29d12SMateusz Guzik 1166c8b29d12SMateusz Guzik return (lockmgr_xlock_hard(lk, flags, NULL, file, line, NULL)); 1167c8b29d12SMateusz Guzik } 1168c8b29d12SMateusz Guzik 1169c8b29d12SMateusz Guzik int 1170c8b29d12SMateusz Guzik lockmgr_unlock(struct lock *lk) 1171c8b29d12SMateusz Guzik { 1172c8b29d12SMateusz Guzik uintptr_t x, tid; 1173c8b29d12SMateusz Guzik const char *file; 1174c8b29d12SMateusz Guzik int line; 1175c8b29d12SMateusz Guzik 1176c8b29d12SMateusz Guzik file = __FILE__; 1177c8b29d12SMateusz Guzik line = __LINE__; 1178c8b29d12SMateusz Guzik 1179c8b29d12SMateusz Guzik _lockmgr_assert(lk, KA_LOCKED, file, line); 1180c8b29d12SMateusz Guzik x = lk->lk_lock; 1181c8b29d12SMateusz Guzik if (__predict_true(x & LK_SHARE) != 0) { 1182c8b29d12SMateusz Guzik lockmgr_note_shared_release(lk, file, line); 1183c00115f1SMateusz Guzik if (lockmgr_sunlock_try(lk, &x)) { 1184c00115f1SMateusz Guzik LOCKSTAT_PROFILE_RELEASE_RWLOCK(lockmgr__release, lk, LOCKSTAT_READER); 1185c8b29d12SMateusz Guzik } else { 1186c8b29d12SMateusz Guzik return (lockmgr_sunlock_hard(lk, x, LK_RELEASE, NULL, file, line)); 1187c8b29d12SMateusz Guzik } 1188c8b29d12SMateusz Guzik } else { 1189c8b29d12SMateusz Guzik tid = (uintptr_t)curthread; 1190c00115f1SMateusz Guzik lockmgr_note_exclusive_release(lk, file, line); 1191c8b29d12SMateusz Guzik if (!lockmgr_recursed(lk) && 1192c8b29d12SMateusz Guzik atomic_cmpset_rel_ptr(&lk->lk_lock, tid, LK_UNLOCKED)) { 1193c00115f1SMateusz Guzik LOCKSTAT_PROFILE_RELEASE_RWLOCK(lockmgr__release, lk, LOCKSTAT_WRITER); 1194c8b29d12SMateusz Guzik } else { 1195c8b29d12SMateusz Guzik return (lockmgr_xunlock_hard(lk, x, LK_RELEASE, NULL, file, line)); 1196c8b29d12SMateusz Guzik } 1197c8b29d12SMateusz Guzik } 1198c8b29d12SMateusz Guzik return (0); 1199c8b29d12SMateusz Guzik } 1200c8b29d12SMateusz Guzik 12011c6987ebSMateusz Guzik int 12021c6987ebSMateusz Guzik __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk, 12031c6987ebSMateusz Guzik const char *wmesg, int pri, int timo, const char *file, int line) 12041c6987ebSMateusz Guzik { 12051c6987ebSMateusz Guzik GIANT_DECLARE; 12061c6987ebSMateusz Guzik struct lockmgr_wait lwa; 12071c6987ebSMateusz Guzik struct lock_class *class; 12081c6987ebSMateusz Guzik const char *iwmesg; 12091c6987ebSMateusz Guzik uintptr_t tid, v, x; 12101c6987ebSMateusz Guzik u_int op, realexslp; 12111c6987ebSMateusz Guzik int error, ipri, itimo, queue, wakeup_swapper; 12121c6987ebSMateusz Guzik #ifdef LOCK_PROFILING 12131c6987ebSMateusz Guzik uint64_t waittime = 0; 12141c6987ebSMateusz Guzik int contested = 0; 12151c6987ebSMateusz Guzik #endif 12161c6987ebSMateusz Guzik 1217879e0604SMateusz Guzik if (KERNEL_PANICKED()) 1218b543c98cSConrad Meyer return (0); 1219b543c98cSConrad Meyer 12201c6987ebSMateusz Guzik error = 0; 12211c6987ebSMateusz Guzik tid = (uintptr_t)curthread; 12221c6987ebSMateusz Guzik op = (flags & LK_TYPE_MASK); 12231c6987ebSMateusz Guzik iwmesg = (wmesg == LK_WMESG_DEFAULT) ? lk->lock_object.lo_name : wmesg; 12241c6987ebSMateusz Guzik ipri = (pri == LK_PRIO_DEFAULT) ? lk->lk_pri : pri; 12251c6987ebSMateusz Guzik itimo = (timo == LK_TIMO_DEFAULT) ? lk->lk_timo : timo; 12261c6987ebSMateusz Guzik 12271c6987ebSMateusz Guzik lwa.iwmesg = iwmesg; 12281c6987ebSMateusz Guzik lwa.ipri = ipri; 12291c6987ebSMateusz Guzik lwa.itimo = itimo; 12301c6987ebSMateusz Guzik 12311c6987ebSMateusz Guzik MPASS((flags & ~LK_TOTAL_MASK) == 0); 12321c6987ebSMateusz Guzik KASSERT((op & (op - 1)) == 0, 12331c6987ebSMateusz Guzik ("%s: Invalid requested operation @ %s:%d", __func__, file, line)); 12341c6987ebSMateusz Guzik KASSERT((flags & (LK_NOWAIT | LK_SLEEPFAIL)) == 0 || 12351c6987ebSMateusz Guzik (op != LK_DOWNGRADE && op != LK_RELEASE), 12361c6987ebSMateusz Guzik ("%s: Invalid flags in regard of the operation desired @ %s:%d", 12371c6987ebSMateusz Guzik __func__, file, line)); 12381c6987ebSMateusz Guzik KASSERT((flags & LK_INTERLOCK) == 0 || ilk != NULL, 12391c6987ebSMateusz Guzik ("%s: LK_INTERLOCK passed without valid interlock @ %s:%d", 12401c6987ebSMateusz Guzik __func__, file, line)); 12411c6987ebSMateusz Guzik KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread), 12421c6987ebSMateusz Guzik ("%s: idle thread %p on lockmgr %s @ %s:%d", __func__, curthread, 12431c6987ebSMateusz Guzik lk->lock_object.lo_name, file, line)); 12441c6987ebSMateusz Guzik 12451c6987ebSMateusz Guzik class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL; 12461c6987ebSMateusz Guzik 12471c6987ebSMateusz Guzik if (lk->lock_object.lo_flags & LK_NOSHARE) { 12481c6987ebSMateusz Guzik switch (op) { 12491c6987ebSMateusz Guzik case LK_SHARED: 12501c6987ebSMateusz Guzik op = LK_EXCLUSIVE; 12511c6987ebSMateusz Guzik break; 12521c6987ebSMateusz Guzik case LK_UPGRADE: 12531c6987ebSMateusz Guzik case LK_TRYUPGRADE: 12541c6987ebSMateusz Guzik case LK_DOWNGRADE: 12551c6987ebSMateusz Guzik _lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED, 12561c6987ebSMateusz Guzik file, line); 12571c6987ebSMateusz Guzik if (flags & LK_INTERLOCK) 12581c6987ebSMateusz Guzik class->lc_unlock(ilk); 12591c6987ebSMateusz Guzik return (0); 12601c6987ebSMateusz Guzik } 12611c6987ebSMateusz Guzik } 12621c6987ebSMateusz Guzik 12631c6987ebSMateusz Guzik wakeup_swapper = 0; 12641c6987ebSMateusz Guzik switch (op) { 12651c6987ebSMateusz Guzik case LK_SHARED: 12661c6987ebSMateusz Guzik return (lockmgr_slock_hard(lk, flags, ilk, file, line, &lwa)); 12671c6987ebSMateusz Guzik break; 12681c6987ebSMateusz Guzik case LK_UPGRADE: 12691c6987ebSMateusz Guzik case LK_TRYUPGRADE: 12701c6987ebSMateusz Guzik return (lockmgr_upgrade(lk, flags, ilk, file, line, &lwa)); 12711c6987ebSMateusz Guzik break; 12721c6987ebSMateusz Guzik case LK_EXCLUSIVE: 12731c6987ebSMateusz Guzik return (lockmgr_xlock_hard(lk, flags, ilk, file, line, &lwa)); 1274047dd67eSAttilio Rao break; 1275047dd67eSAttilio Rao case LK_DOWNGRADE: 12761c7d98d0SAttilio Rao _lockmgr_assert(lk, KA_XLOCKED, file, line); 1277e5f94314SAttilio Rao WITNESS_DOWNGRADE(&lk->lock_object, 0, file, line); 12781c7d98d0SAttilio Rao 12791c7d98d0SAttilio Rao /* 12801c7d98d0SAttilio Rao * Panic if the lock is recursed. 12811c7d98d0SAttilio Rao */ 12821c7d98d0SAttilio Rao if (lockmgr_xlocked(lk) && lockmgr_recursed(lk)) { 12831c7d98d0SAttilio Rao if (flags & LK_INTERLOCK) 12841c7d98d0SAttilio Rao class->lc_unlock(ilk); 12851c7d98d0SAttilio Rao panic("%s: downgrade a recursed lockmgr %s @ %s:%d\n", 12861c7d98d0SAttilio Rao __func__, iwmesg, file, line); 12871c7d98d0SAttilio Rao } 1288e5f94314SAttilio Rao TD_SLOCKS_INC(curthread); 1289047dd67eSAttilio Rao 1290047dd67eSAttilio Rao /* 1291047dd67eSAttilio Rao * In order to preserve waiters flags, just spin. 1292047dd67eSAttilio Rao */ 1293047dd67eSAttilio Rao for (;;) { 1294651175c9SAttilio Rao x = lk->lk_lock; 1295651175c9SAttilio Rao MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0); 1296651175c9SAttilio Rao x &= LK_ALL_WAITERS; 1297047dd67eSAttilio Rao if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x, 1298e5f94314SAttilio Rao LK_SHARERS_LOCK(1) | x)) 1299047dd67eSAttilio Rao break; 1300047dd67eSAttilio Rao cpu_spinwait(); 1301047dd67eSAttilio Rao } 13025b699f16SMark Johnston LOCK_LOG_LOCK("XDOWNGRADE", &lk->lock_object, 0, 0, file, line); 13035b699f16SMark Johnston LOCKSTAT_RECORD0(lockmgr__downgrade, lk); 1304047dd67eSAttilio Rao break; 1305047dd67eSAttilio Rao case LK_RELEASE: 1306047dd67eSAttilio Rao _lockmgr_assert(lk, KA_LOCKED, file, line); 1307047dd67eSAttilio Rao x = lk->lk_lock; 1308047dd67eSAttilio Rao 13091c6987ebSMateusz Guzik if (__predict_true(x & LK_SHARE) != 0) { 1310c00115f1SMateusz Guzik lockmgr_note_shared_release(lk, file, line); 13111c6987ebSMateusz Guzik return (lockmgr_sunlock_hard(lk, x, flags, ilk, file, line)); 1312047dd67eSAttilio Rao } else { 1313c00115f1SMateusz Guzik lockmgr_note_exclusive_release(lk, file, line); 13141c6987ebSMateusz Guzik return (lockmgr_xunlock_hard(lk, x, flags, ilk, file, line)); 13152028867dSAttilio Rao } 1316047dd67eSAttilio Rao break; 1317047dd67eSAttilio Rao case LK_DRAIN: 1318e5f94314SAttilio Rao if (LK_CAN_WITNESS(flags)) 1319e5f94314SAttilio Rao WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER | 132024150d37SJohn Baldwin LOP_EXCLUSIVE, file, line, flags & LK_INTERLOCK ? 132124150d37SJohn Baldwin ilk : NULL); 1322047dd67eSAttilio Rao 1323047dd67eSAttilio Rao /* 132496f1567fSKonstantin Belousov * Trying to drain a lock we already own will result in a 1325047dd67eSAttilio Rao * deadlock. 1326047dd67eSAttilio Rao */ 1327047dd67eSAttilio Rao if (lockmgr_xlocked(lk)) { 1328047dd67eSAttilio Rao if (flags & LK_INTERLOCK) 1329047dd67eSAttilio Rao class->lc_unlock(ilk); 1330047dd67eSAttilio Rao panic("%s: draining %s with the lock held @ %s:%d\n", 1331047dd67eSAttilio Rao __func__, iwmesg, file, line); 1332047dd67eSAttilio Rao } 1333047dd67eSAttilio Rao 1334fc4f686dSMateusz Guzik for (;;) { 1335fc4f686dSMateusz Guzik if (lk->lk_lock == LK_UNLOCKED && 1336fc4f686dSMateusz Guzik atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid)) 1337fc4f686dSMateusz Guzik break; 1338fc4f686dSMateusz Guzik 1339f5f9340bSFabien Thomas #ifdef HWPMC_HOOKS 1340f5f9340bSFabien Thomas PMC_SOFT_CALL( , , lock, failed); 1341f5f9340bSFabien Thomas #endif 1342047dd67eSAttilio Rao lock_profile_obtain_lock_failed(&lk->lock_object, 1343047dd67eSAttilio Rao &contested, &waittime); 1344047dd67eSAttilio Rao 1345047dd67eSAttilio Rao /* 1346047dd67eSAttilio Rao * If the lock is expected to not sleep just give up 1347047dd67eSAttilio Rao * and return. 1348047dd67eSAttilio Rao */ 1349047dd67eSAttilio Rao if (LK_TRYOP(flags)) { 1350047dd67eSAttilio Rao LOCK_LOG2(lk, "%s: %p fails the try operation", 1351047dd67eSAttilio Rao __func__, lk); 1352047dd67eSAttilio Rao error = EBUSY; 1353047dd67eSAttilio Rao break; 1354047dd67eSAttilio Rao } 1355047dd67eSAttilio Rao 1356047dd67eSAttilio Rao /* 1357047dd67eSAttilio Rao * Acquire the sleepqueue chain lock because we 1358047dd67eSAttilio Rao * probabilly will need to manipulate waiters flags. 1359047dd67eSAttilio Rao */ 1360047dd67eSAttilio Rao sleepq_lock(&lk->lock_object); 1361047dd67eSAttilio Rao x = lk->lk_lock; 1362047dd67eSAttilio Rao 1363047dd67eSAttilio Rao /* 1364047dd67eSAttilio Rao * if the lock has been released while we spun on 1365047dd67eSAttilio Rao * the sleepqueue chain lock just try again. 1366047dd67eSAttilio Rao */ 1367047dd67eSAttilio Rao if (x == LK_UNLOCKED) { 1368047dd67eSAttilio Rao sleepq_release(&lk->lock_object); 1369047dd67eSAttilio Rao continue; 1370047dd67eSAttilio Rao } 1371047dd67eSAttilio Rao 1372651175c9SAttilio Rao v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS); 1373651175c9SAttilio Rao if ((x & ~v) == LK_UNLOCKED) { 1374651175c9SAttilio Rao v = (x & ~LK_EXCLUSIVE_SPINNERS); 13752028867dSAttilio Rao 13762028867dSAttilio Rao /* 13772028867dSAttilio Rao * If interruptible sleeps left the exclusive 13782028867dSAttilio Rao * queue empty avoid a starvation for the 13792028867dSAttilio Rao * threads sleeping on the shared queue by 13802028867dSAttilio Rao * giving them precedence and cleaning up the 13812028867dSAttilio Rao * exclusive waiters bit anyway. 1382c636ba83SAttilio Rao * Please note that lk_exslpfail count may be 1383c636ba83SAttilio Rao * lying about the real number of waiters with 1384c636ba83SAttilio Rao * the LK_SLEEPFAIL flag on because they may 1385e3043798SPedro F. Giffuni * be used in conjunction with interruptible 1386aab9c8c2SAttilio Rao * sleeps so lk_exslpfail might be considered 1387aab9c8c2SAttilio Rao * an 'upper limit' bound, including the edge 1388c636ba83SAttilio Rao * cases. 13892028867dSAttilio Rao */ 1390047dd67eSAttilio Rao if (v & LK_EXCLUSIVE_WAITERS) { 1391047dd67eSAttilio Rao queue = SQ_EXCLUSIVE_QUEUE; 1392047dd67eSAttilio Rao v &= ~LK_EXCLUSIVE_WAITERS; 1393047dd67eSAttilio Rao } else { 13949dbf7a62SAttilio Rao 13959dbf7a62SAttilio Rao /* 13969dbf7a62SAttilio Rao * Exclusive waiters sleeping with 13979dbf7a62SAttilio Rao * LK_SLEEPFAIL on and using 13989dbf7a62SAttilio Rao * interruptible sleeps/timeout may 13999dbf7a62SAttilio Rao * have left spourious lk_exslpfail 14009dbf7a62SAttilio Rao * counts on, so clean it up anyway. 14019dbf7a62SAttilio Rao */ 1402047dd67eSAttilio Rao MPASS(v & LK_SHARED_WAITERS); 14039dbf7a62SAttilio Rao lk->lk_exslpfail = 0; 1404047dd67eSAttilio Rao queue = SQ_SHARED_QUEUE; 1405047dd67eSAttilio Rao v &= ~LK_SHARED_WAITERS; 1406047dd67eSAttilio Rao } 14072028867dSAttilio Rao if (queue == SQ_EXCLUSIVE_QUEUE) { 14082028867dSAttilio Rao realexslp = 14092028867dSAttilio Rao sleepq_sleepcnt(&lk->lock_object, 14102028867dSAttilio Rao SQ_EXCLUSIVE_QUEUE); 14112028867dSAttilio Rao if (lk->lk_exslpfail >= realexslp) { 14122028867dSAttilio Rao lk->lk_exslpfail = 0; 14132028867dSAttilio Rao queue = SQ_SHARED_QUEUE; 14142028867dSAttilio Rao v &= ~LK_SHARED_WAITERS; 14152028867dSAttilio Rao if (realexslp != 0) { 14162028867dSAttilio Rao LOCK_LOG2(lk, 14172028867dSAttilio Rao "%s: %p has only LK_SLEEPFAIL sleepers", 14182028867dSAttilio Rao __func__, lk); 14192028867dSAttilio Rao LOCK_LOG2(lk, 14202028867dSAttilio Rao "%s: %p waking up threads on the exclusive queue", 14212028867dSAttilio Rao __func__, lk); 14222028867dSAttilio Rao wakeup_swapper = 14232028867dSAttilio Rao sleepq_broadcast( 14242028867dSAttilio Rao &lk->lock_object, 14252028867dSAttilio Rao SLEEPQ_LK, 0, 14262028867dSAttilio Rao SQ_EXCLUSIVE_QUEUE); 14272028867dSAttilio Rao } 14282028867dSAttilio Rao } else 14292028867dSAttilio Rao lk->lk_exslpfail = 0; 14302028867dSAttilio Rao } 1431047dd67eSAttilio Rao if (!atomic_cmpset_ptr(&lk->lk_lock, x, v)) { 1432047dd67eSAttilio Rao sleepq_release(&lk->lock_object); 1433047dd67eSAttilio Rao continue; 1434047dd67eSAttilio Rao } 1435047dd67eSAttilio Rao LOCK_LOG3(lk, 1436047dd67eSAttilio Rao "%s: %p waking up all threads on the %s queue", 1437047dd67eSAttilio Rao __func__, lk, queue == SQ_SHARED_QUEUE ? 1438047dd67eSAttilio Rao "shared" : "exclusive"); 1439814f26daSJohn Baldwin wakeup_swapper |= sleepq_broadcast( 1440da7bbd2cSJohn Baldwin &lk->lock_object, SLEEPQ_LK, 0, queue); 1441047dd67eSAttilio Rao 1442047dd67eSAttilio Rao /* 1443047dd67eSAttilio Rao * If shared waiters have been woken up we need 1444047dd67eSAttilio Rao * to wait for one of them to acquire the lock 1445047dd67eSAttilio Rao * before to set the exclusive waiters in 1446047dd67eSAttilio Rao * order to avoid a deadlock. 1447047dd67eSAttilio Rao */ 1448047dd67eSAttilio Rao if (queue == SQ_SHARED_QUEUE) { 1449047dd67eSAttilio Rao for (v = lk->lk_lock; 1450047dd67eSAttilio Rao (v & LK_SHARE) && !LK_SHARERS(v); 1451047dd67eSAttilio Rao v = lk->lk_lock) 1452047dd67eSAttilio Rao cpu_spinwait(); 1453047dd67eSAttilio Rao } 1454047dd67eSAttilio Rao } 1455047dd67eSAttilio Rao 1456047dd67eSAttilio Rao /* 1457047dd67eSAttilio Rao * Try to set the LK_EXCLUSIVE_WAITERS flag. If we 1458047dd67eSAttilio Rao * fail, loop back and retry. 1459047dd67eSAttilio Rao */ 1460047dd67eSAttilio Rao if ((x & LK_EXCLUSIVE_WAITERS) == 0) { 1461047dd67eSAttilio Rao if (!atomic_cmpset_ptr(&lk->lk_lock, x, 1462047dd67eSAttilio Rao x | LK_EXCLUSIVE_WAITERS)) { 1463047dd67eSAttilio Rao sleepq_release(&lk->lock_object); 1464047dd67eSAttilio Rao continue; 1465047dd67eSAttilio Rao } 1466047dd67eSAttilio Rao LOCK_LOG2(lk, "%s: %p set drain waiters flag", 1467047dd67eSAttilio Rao __func__, lk); 1468047dd67eSAttilio Rao } 1469047dd67eSAttilio Rao 1470047dd67eSAttilio Rao /* 1471047dd67eSAttilio Rao * As far as we have been unable to acquire the 1472047dd67eSAttilio Rao * exclusive lock and the exclusive waiters flag 1473047dd67eSAttilio Rao * is set, we will sleep. 1474047dd67eSAttilio Rao */ 1475047dd67eSAttilio Rao if (flags & LK_INTERLOCK) { 1476047dd67eSAttilio Rao class->lc_unlock(ilk); 1477047dd67eSAttilio Rao flags &= ~LK_INTERLOCK; 1478047dd67eSAttilio Rao } 1479e5f94314SAttilio Rao GIANT_SAVE(); 1480047dd67eSAttilio Rao sleepq_add(&lk->lock_object, NULL, iwmesg, SLEEPQ_LK, 1481047dd67eSAttilio Rao SQ_EXCLUSIVE_QUEUE); 1482047dd67eSAttilio Rao sleepq_wait(&lk->lock_object, ipri & PRIMASK); 1483e5f94314SAttilio Rao GIANT_RESTORE(); 1484047dd67eSAttilio Rao LOCK_LOG2(lk, "%s: %p resuming from the sleep queue", 1485047dd67eSAttilio Rao __func__, lk); 1486047dd67eSAttilio Rao } 1487047dd67eSAttilio Rao 1488047dd67eSAttilio Rao if (error == 0) { 1489047dd67eSAttilio Rao lock_profile_obtain_lock_success(&lk->lock_object, 1490047dd67eSAttilio Rao contested, waittime, file, line); 1491047dd67eSAttilio Rao LOCK_LOG_LOCK("DRAIN", &lk->lock_object, 0, 1492047dd67eSAttilio Rao lk->lk_recurse, file, line); 1493e5f94314SAttilio Rao WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE | 1494e5f94314SAttilio Rao LK_TRYWIT(flags), file, line); 1495047dd67eSAttilio Rao TD_LOCKS_INC(curthread); 1496047dd67eSAttilio Rao STACK_SAVE(lk); 1497047dd67eSAttilio Rao } 1498047dd67eSAttilio Rao break; 1499047dd67eSAttilio Rao default: 1500047dd67eSAttilio Rao if (flags & LK_INTERLOCK) 1501047dd67eSAttilio Rao class->lc_unlock(ilk); 1502047dd67eSAttilio Rao panic("%s: unknown lockmgr request 0x%x\n", __func__, op); 1503047dd67eSAttilio Rao } 1504047dd67eSAttilio Rao 1505047dd67eSAttilio Rao if (flags & LK_INTERLOCK) 1506047dd67eSAttilio Rao class->lc_unlock(ilk); 1507da7bbd2cSJohn Baldwin if (wakeup_swapper) 1508da7bbd2cSJohn Baldwin kick_proc0(); 1509047dd67eSAttilio Rao 1510047dd67eSAttilio Rao return (error); 1511047dd67eSAttilio Rao } 1512047dd67eSAttilio Rao 1513d7a7e179SAttilio Rao void 1514047dd67eSAttilio Rao _lockmgr_disown(struct lock *lk, const char *file, int line) 1515047dd67eSAttilio Rao { 1516047dd67eSAttilio Rao uintptr_t tid, x; 1517047dd67eSAttilio Rao 151835370593SAndriy Gapon if (SCHEDULER_STOPPED()) 151935370593SAndriy Gapon return; 152035370593SAndriy Gapon 1521047dd67eSAttilio Rao tid = (uintptr_t)curthread; 15221c7d98d0SAttilio Rao _lockmgr_assert(lk, KA_XLOCKED, file, line); 15231c7d98d0SAttilio Rao 15241c7d98d0SAttilio Rao /* 15251c7d98d0SAttilio Rao * Panic if the lock is recursed. 15261c7d98d0SAttilio Rao */ 15271c7d98d0SAttilio Rao if (lockmgr_xlocked(lk) && lockmgr_recursed(lk)) 15281c7d98d0SAttilio Rao panic("%s: disown a recursed lockmgr @ %s:%d\n", 15291c7d98d0SAttilio Rao __func__, file, line); 1530047dd67eSAttilio Rao 1531047dd67eSAttilio Rao /* 153296f1567fSKonstantin Belousov * If the owner is already LK_KERNPROC just skip the whole operation. 1533047dd67eSAttilio Rao */ 1534047dd67eSAttilio Rao if (LK_HOLDER(lk->lk_lock) != tid) 1535047dd67eSAttilio Rao return; 153604a28689SJeff Roberson lock_profile_release_lock(&lk->lock_object); 15375b699f16SMark Johnston LOCKSTAT_RECORD1(lockmgr__disown, lk, LOCKSTAT_WRITER); 1538e5f94314SAttilio Rao LOCK_LOG_LOCK("XDISOWN", &lk->lock_object, 0, 0, file, line); 1539e5f94314SAttilio Rao WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, file, line); 1540e5f94314SAttilio Rao TD_LOCKS_DEC(curthread); 1541337c5ff4SAttilio Rao STACK_SAVE(lk); 1542047dd67eSAttilio Rao 1543047dd67eSAttilio Rao /* 1544047dd67eSAttilio Rao * In order to preserve waiters flags, just spin. 1545047dd67eSAttilio Rao */ 1546047dd67eSAttilio Rao for (;;) { 1547651175c9SAttilio Rao x = lk->lk_lock; 1548651175c9SAttilio Rao MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0); 1549651175c9SAttilio Rao x &= LK_ALL_WAITERS; 155022dd228dSAttilio Rao if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x, 1551e5f94314SAttilio Rao LK_KERNPROC | x)) 1552047dd67eSAttilio Rao return; 1553047dd67eSAttilio Rao cpu_spinwait(); 1554047dd67eSAttilio Rao } 1555047dd67eSAttilio Rao } 1556047dd67eSAttilio Rao 1557047dd67eSAttilio Rao void 1558d576deedSPawel Jakub Dawidek lockmgr_printinfo(const struct lock *lk) 1559d7a7e179SAttilio Rao { 1560d7a7e179SAttilio Rao struct thread *td; 1561047dd67eSAttilio Rao uintptr_t x; 1562d7a7e179SAttilio Rao 1563047dd67eSAttilio Rao if (lk->lk_lock == LK_UNLOCKED) 1564047dd67eSAttilio Rao printf("lock type %s: UNLOCKED\n", lk->lock_object.lo_name); 1565047dd67eSAttilio Rao else if (lk->lk_lock & LK_SHARE) 1566047dd67eSAttilio Rao printf("lock type %s: SHARED (count %ju)\n", 1567047dd67eSAttilio Rao lk->lock_object.lo_name, 1568047dd67eSAttilio Rao (uintmax_t)LK_SHARERS(lk->lk_lock)); 1569047dd67eSAttilio Rao else { 1570047dd67eSAttilio Rao td = lockmgr_xholder(lk); 1571e64b4fa8SKonstantin Belousov if (td == (struct thread *)LK_KERNPROC) 1572e64b4fa8SKonstantin Belousov printf("lock type %s: EXCL by KERNPROC\n", 1573e64b4fa8SKonstantin Belousov lk->lock_object.lo_name); 1574e64b4fa8SKonstantin Belousov else 15752573ea5fSIvan Voras printf("lock type %s: EXCL by thread %p " 1576e64b4fa8SKonstantin Belousov "(pid %d, %s, tid %d)\n", lk->lock_object.lo_name, 1577e64b4fa8SKonstantin Belousov td, td->td_proc->p_pid, td->td_proc->p_comm, 1578e64b4fa8SKonstantin Belousov td->td_tid); 1579d7a7e179SAttilio Rao } 1580d7a7e179SAttilio Rao 1581047dd67eSAttilio Rao x = lk->lk_lock; 1582047dd67eSAttilio Rao if (x & LK_EXCLUSIVE_WAITERS) 1583047dd67eSAttilio Rao printf(" with exclusive waiters pending\n"); 1584047dd67eSAttilio Rao if (x & LK_SHARED_WAITERS) 1585047dd67eSAttilio Rao printf(" with shared waiters pending\n"); 1586651175c9SAttilio Rao if (x & LK_EXCLUSIVE_SPINNERS) 1587651175c9SAttilio Rao printf(" with exclusive spinners pending\n"); 1588047dd67eSAttilio Rao 1589047dd67eSAttilio Rao STACK_PRINT(lk); 1590047dd67eSAttilio Rao } 1591047dd67eSAttilio Rao 159299448ed1SJohn Dyson int 1593d576deedSPawel Jakub Dawidek lockstatus(const struct lock *lk) 159499448ed1SJohn Dyson { 1595047dd67eSAttilio Rao uintptr_t v, x; 1596047dd67eSAttilio Rao int ret; 159799448ed1SJohn Dyson 1598047dd67eSAttilio Rao ret = LK_SHARED; 1599047dd67eSAttilio Rao x = lk->lk_lock; 1600047dd67eSAttilio Rao v = LK_HOLDER(x); 16010e9eb108SAttilio Rao 1602047dd67eSAttilio Rao if ((x & LK_SHARE) == 0) { 1603047dd67eSAttilio Rao if (v == (uintptr_t)curthread || v == LK_KERNPROC) 1604047dd67eSAttilio Rao ret = LK_EXCLUSIVE; 16056bdfe06aSEivind Eklund else 1606047dd67eSAttilio Rao ret = LK_EXCLOTHER; 1607047dd67eSAttilio Rao } else if (x == LK_UNLOCKED) 1608047dd67eSAttilio Rao ret = 0; 160999448ed1SJohn Dyson 1610047dd67eSAttilio Rao return (ret); 161153bf4bb2SPeter Wemm } 1612be6847d7SJohn Baldwin 161384887fa3SAttilio Rao #ifdef INVARIANT_SUPPORT 1614de5b1952SAlexander Leidinger 1615de5b1952SAlexander Leidinger FEATURE(invariant_support, 1616de5b1952SAlexander Leidinger "Support for modules compiled with INVARIANTS option"); 1617de5b1952SAlexander Leidinger 161884887fa3SAttilio Rao #ifndef INVARIANTS 161984887fa3SAttilio Rao #undef _lockmgr_assert 162084887fa3SAttilio Rao #endif 162184887fa3SAttilio Rao 162284887fa3SAttilio Rao void 1623d576deedSPawel Jakub Dawidek _lockmgr_assert(const struct lock *lk, int what, const char *file, int line) 162484887fa3SAttilio Rao { 162584887fa3SAttilio Rao int slocked = 0; 162684887fa3SAttilio Rao 1627879e0604SMateusz Guzik if (KERNEL_PANICKED()) 162884887fa3SAttilio Rao return; 162984887fa3SAttilio Rao switch (what) { 163084887fa3SAttilio Rao case KA_SLOCKED: 163184887fa3SAttilio Rao case KA_SLOCKED | KA_NOTRECURSED: 163284887fa3SAttilio Rao case KA_SLOCKED | KA_RECURSED: 163384887fa3SAttilio Rao slocked = 1; 163484887fa3SAttilio Rao case KA_LOCKED: 163584887fa3SAttilio Rao case KA_LOCKED | KA_NOTRECURSED: 163684887fa3SAttilio Rao case KA_LOCKED | KA_RECURSED: 1637e5f94314SAttilio Rao #ifdef WITNESS 1638e5f94314SAttilio Rao 1639e5f94314SAttilio Rao /* 1640e5f94314SAttilio Rao * We cannot trust WITNESS if the lock is held in exclusive 1641e5f94314SAttilio Rao * mode and a call to lockmgr_disown() happened. 1642e5f94314SAttilio Rao * Workaround this skipping the check if the lock is held in 1643e5f94314SAttilio Rao * exclusive mode even for the KA_LOCKED case. 1644e5f94314SAttilio Rao */ 1645e5f94314SAttilio Rao if (slocked || (lk->lk_lock & LK_SHARE)) { 1646e5f94314SAttilio Rao witness_assert(&lk->lock_object, what, file, line); 1647e5f94314SAttilio Rao break; 1648e5f94314SAttilio Rao } 1649e5f94314SAttilio Rao #endif 1650047dd67eSAttilio Rao if (lk->lk_lock == LK_UNLOCKED || 1651047dd67eSAttilio Rao ((lk->lk_lock & LK_SHARE) == 0 && (slocked || 1652047dd67eSAttilio Rao (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk))))) 165384887fa3SAttilio Rao panic("Lock %s not %slocked @ %s:%d\n", 1654047dd67eSAttilio Rao lk->lock_object.lo_name, slocked ? "share" : "", 165584887fa3SAttilio Rao file, line); 1656047dd67eSAttilio Rao 1657047dd67eSAttilio Rao if ((lk->lk_lock & LK_SHARE) == 0) { 1658047dd67eSAttilio Rao if (lockmgr_recursed(lk)) { 165984887fa3SAttilio Rao if (what & KA_NOTRECURSED) 166084887fa3SAttilio Rao panic("Lock %s recursed @ %s:%d\n", 1661047dd67eSAttilio Rao lk->lock_object.lo_name, file, 1662047dd67eSAttilio Rao line); 166384887fa3SAttilio Rao } else if (what & KA_RECURSED) 166484887fa3SAttilio Rao panic("Lock %s not recursed @ %s:%d\n", 1665047dd67eSAttilio Rao lk->lock_object.lo_name, file, line); 166684887fa3SAttilio Rao } 166784887fa3SAttilio Rao break; 166884887fa3SAttilio Rao case KA_XLOCKED: 166984887fa3SAttilio Rao case KA_XLOCKED | KA_NOTRECURSED: 167084887fa3SAttilio Rao case KA_XLOCKED | KA_RECURSED: 1671047dd67eSAttilio Rao if (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk)) 167284887fa3SAttilio Rao panic("Lock %s not exclusively locked @ %s:%d\n", 1673047dd67eSAttilio Rao lk->lock_object.lo_name, file, line); 1674047dd67eSAttilio Rao if (lockmgr_recursed(lk)) { 167584887fa3SAttilio Rao if (what & KA_NOTRECURSED) 167684887fa3SAttilio Rao panic("Lock %s recursed @ %s:%d\n", 1677047dd67eSAttilio Rao lk->lock_object.lo_name, file, line); 167884887fa3SAttilio Rao } else if (what & KA_RECURSED) 167984887fa3SAttilio Rao panic("Lock %s not recursed @ %s:%d\n", 1680047dd67eSAttilio Rao lk->lock_object.lo_name, file, line); 168184887fa3SAttilio Rao break; 168284887fa3SAttilio Rao case KA_UNLOCKED: 1683047dd67eSAttilio Rao if (lockmgr_xlocked(lk) || lockmgr_disowned(lk)) 168484887fa3SAttilio Rao panic("Lock %s exclusively locked @ %s:%d\n", 1685047dd67eSAttilio Rao lk->lock_object.lo_name, file, line); 168684887fa3SAttilio Rao break; 168784887fa3SAttilio Rao default: 1688047dd67eSAttilio Rao panic("Unknown lockmgr assertion: %d @ %s:%d\n", what, file, 1689047dd67eSAttilio Rao line); 169084887fa3SAttilio Rao } 169184887fa3SAttilio Rao } 1692047dd67eSAttilio Rao #endif 169384887fa3SAttilio Rao 1694be6847d7SJohn Baldwin #ifdef DDB 1695462a7addSJohn Baldwin int 1696462a7addSJohn Baldwin lockmgr_chain(struct thread *td, struct thread **ownerp) 1697462a7addSJohn Baldwin { 1698fea73412SConrad Meyer const struct lock *lk; 1699462a7addSJohn Baldwin 1700047dd67eSAttilio Rao lk = td->td_wchan; 1701462a7addSJohn Baldwin 1702047dd67eSAttilio Rao if (LOCK_CLASS(&lk->lock_object) != &lock_class_lockmgr) 1703462a7addSJohn Baldwin return (0); 1704047dd67eSAttilio Rao db_printf("blocked on lockmgr %s", lk->lock_object.lo_name); 1705047dd67eSAttilio Rao if (lk->lk_lock & LK_SHARE) 1706047dd67eSAttilio Rao db_printf("SHARED (count %ju)\n", 1707047dd67eSAttilio Rao (uintmax_t)LK_SHARERS(lk->lk_lock)); 1708047dd67eSAttilio Rao else 1709047dd67eSAttilio Rao db_printf("EXCL\n"); 1710047dd67eSAttilio Rao *ownerp = lockmgr_xholder(lk); 1711462a7addSJohn Baldwin 1712462a7addSJohn Baldwin return (1); 1713462a7addSJohn Baldwin } 1714462a7addSJohn Baldwin 1715047dd67eSAttilio Rao static void 1716d576deedSPawel Jakub Dawidek db_show_lockmgr(const struct lock_object *lock) 1717be6847d7SJohn Baldwin { 1718be6847d7SJohn Baldwin struct thread *td; 1719d576deedSPawel Jakub Dawidek const struct lock *lk; 1720be6847d7SJohn Baldwin 1721d576deedSPawel Jakub Dawidek lk = (const struct lock *)lock; 1722be6847d7SJohn Baldwin 1723be6847d7SJohn Baldwin db_printf(" state: "); 1724047dd67eSAttilio Rao if (lk->lk_lock == LK_UNLOCKED) 1725be6847d7SJohn Baldwin db_printf("UNLOCKED\n"); 1726047dd67eSAttilio Rao else if (lk->lk_lock & LK_SHARE) 1727047dd67eSAttilio Rao db_printf("SLOCK: %ju\n", (uintmax_t)LK_SHARERS(lk->lk_lock)); 1728047dd67eSAttilio Rao else { 1729047dd67eSAttilio Rao td = lockmgr_xholder(lk); 1730047dd67eSAttilio Rao if (td == (struct thread *)LK_KERNPROC) 1731047dd67eSAttilio Rao db_printf("XLOCK: LK_KERNPROC\n"); 1732047dd67eSAttilio Rao else 1733047dd67eSAttilio Rao db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td, 1734047dd67eSAttilio Rao td->td_tid, td->td_proc->p_pid, 1735047dd67eSAttilio Rao td->td_proc->p_comm); 1736047dd67eSAttilio Rao if (lockmgr_recursed(lk)) 1737047dd67eSAttilio Rao db_printf(" recursed: %d\n", lk->lk_recurse); 1738047dd67eSAttilio Rao } 1739047dd67eSAttilio Rao db_printf(" waiters: "); 1740047dd67eSAttilio Rao switch (lk->lk_lock & LK_ALL_WAITERS) { 1741047dd67eSAttilio Rao case LK_SHARED_WAITERS: 1742047dd67eSAttilio Rao db_printf("shared\n"); 1743e5023dd9SEdward Tomasz Napierala break; 1744047dd67eSAttilio Rao case LK_EXCLUSIVE_WAITERS: 1745047dd67eSAttilio Rao db_printf("exclusive\n"); 1746047dd67eSAttilio Rao break; 1747047dd67eSAttilio Rao case LK_ALL_WAITERS: 1748047dd67eSAttilio Rao db_printf("shared and exclusive\n"); 1749047dd67eSAttilio Rao break; 1750047dd67eSAttilio Rao default: 1751047dd67eSAttilio Rao db_printf("none\n"); 1752047dd67eSAttilio Rao } 1753651175c9SAttilio Rao db_printf(" spinners: "); 1754651175c9SAttilio Rao if (lk->lk_lock & LK_EXCLUSIVE_SPINNERS) 1755651175c9SAttilio Rao db_printf("exclusive\n"); 1756651175c9SAttilio Rao else 1757651175c9SAttilio Rao db_printf("none\n"); 1758be6847d7SJohn Baldwin } 1759be6847d7SJohn Baldwin #endif 1760