19454b2d8SWarner Losh /*-
24d846d26SWarner Losh * SPDX-License-Identifier: BSD-2-Clause
38a36da99SPedro F. Giffuni *
4047dd67eSAttilio Rao * Copyright (c) 2008 Attilio Rao <attilio@FreeBSD.org>
5047dd67eSAttilio Rao * All rights reserved.
653bf4bb2SPeter Wemm *
753bf4bb2SPeter Wemm * Redistribution and use in source and binary forms, with or without
853bf4bb2SPeter Wemm * modification, are permitted provided that the following conditions
953bf4bb2SPeter Wemm * are met:
1053bf4bb2SPeter Wemm * 1. Redistributions of source code must retain the above copyright
11047dd67eSAttilio Rao * notice(s), this list of conditions and the following disclaimer as
12047dd67eSAttilio Rao * the first lines of this file unmodified other than the possible
13047dd67eSAttilio Rao * addition of one or more copyright notices.
1453bf4bb2SPeter Wemm * 2. Redistributions in binary form must reproduce the above copyright
15047dd67eSAttilio Rao * notice(s), this list of conditions and the following disclaimer in the
1653bf4bb2SPeter Wemm * documentation and/or other materials provided with the distribution.
1753bf4bb2SPeter Wemm *
18047dd67eSAttilio Rao * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
19047dd67eSAttilio Rao * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20047dd67eSAttilio Rao * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21047dd67eSAttilio Rao * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
22047dd67eSAttilio Rao * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
23047dd67eSAttilio Rao * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
24047dd67eSAttilio Rao * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
25047dd67eSAttilio Rao * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
2653bf4bb2SPeter Wemm * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27047dd67eSAttilio Rao * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
28047dd67eSAttilio Rao * DAMAGE.
2953bf4bb2SPeter Wemm */
3053bf4bb2SPeter Wemm
31047dd67eSAttilio Rao #include "opt_ddb.h"
32f5f9340bSFabien Thomas #include "opt_hwpmc_hooks.h"
33047dd67eSAttilio Rao
3453bf4bb2SPeter Wemm #include <sys/param.h>
35cd2fe4e6SAttilio Rao #include <sys/kdb.h>
3661d80e90SJohn Baldwin #include <sys/ktr.h>
37eac22dd4SMateusz Guzik #include <sys/limits.h>
3853bf4bb2SPeter Wemm #include <sys/lock.h>
39047dd67eSAttilio Rao #include <sys/lock_profile.h>
408302d183SBruce Evans #include <sys/lockmgr.h>
415b699f16SMark Johnston #include <sys/lockstat.h>
42d8881ca3SJohn Baldwin #include <sys/mutex.h>
438302d183SBruce Evans #include <sys/proc.h>
44047dd67eSAttilio Rao #include <sys/sleepqueue.h>
45e8ddb61dSJeff Roberson #ifdef DEBUG_LOCKS
46e8ddb61dSJeff Roberson #include <sys/stack.h>
47e8ddb61dSJeff Roberson #endif
48651175c9SAttilio Rao #include <sys/sysctl.h>
49047dd67eSAttilio Rao #include <sys/systm.h>
5053bf4bb2SPeter Wemm
51047dd67eSAttilio Rao #include <machine/cpu.h>
526efc8a16SAttilio Rao
53be6847d7SJohn Baldwin #ifdef DDB
54be6847d7SJohn Baldwin #include <ddb/ddb.h>
55047dd67eSAttilio Rao #endif
56047dd67eSAttilio Rao
57f5f9340bSFabien Thomas #ifdef HWPMC_HOOKS
58f5f9340bSFabien Thomas #include <sys/pmckern.h>
59f5f9340bSFabien Thomas PMC_SOFT_DECLARE( , , lock, failed);
60f5f9340bSFabien Thomas #endif
61f5f9340bSFabien Thomas
62eac22dd4SMateusz Guzik /*
63eac22dd4SMateusz Guzik * Hack. There should be prio_t or similar so that this is not necessary.
64eac22dd4SMateusz Guzik */
65eac22dd4SMateusz Guzik _Static_assert((PRILASTFLAG * 2) - 1 <= USHRT_MAX,
66eac22dd4SMateusz Guzik "prio flags wont fit in u_short pri in struct lock");
67eac22dd4SMateusz Guzik
68651175c9SAttilio Rao CTASSERT(LK_UNLOCKED == (LK_UNLOCKED &
69651175c9SAttilio Rao ~(LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS)));
70047dd67eSAttilio Rao
71047dd67eSAttilio Rao #define SQ_EXCLUSIVE_QUEUE 0
72047dd67eSAttilio Rao #define SQ_SHARED_QUEUE 1
73047dd67eSAttilio Rao
74047dd67eSAttilio Rao #ifndef INVARIANTS
75047dd67eSAttilio Rao #define _lockmgr_assert(lk, what, file, line)
76047dd67eSAttilio Rao #endif
77ce1c953eSMark Johnston
78047dd67eSAttilio Rao #define TD_SLOCKS_INC(td) ((td)->td_lk_slocks++)
79047dd67eSAttilio Rao #define TD_SLOCKS_DEC(td) ((td)->td_lk_slocks--)
80047dd67eSAttilio Rao
81047dd67eSAttilio Rao #ifndef DEBUG_LOCKS
82047dd67eSAttilio Rao #define STACK_PRINT(lk)
83047dd67eSAttilio Rao #define STACK_SAVE(lk)
84047dd67eSAttilio Rao #define STACK_ZERO(lk)
85047dd67eSAttilio Rao #else
86047dd67eSAttilio Rao #define STACK_PRINT(lk) stack_print_ddb(&(lk)->lk_stack)
87047dd67eSAttilio Rao #define STACK_SAVE(lk) stack_save(&(lk)->lk_stack)
88047dd67eSAttilio Rao #define STACK_ZERO(lk) stack_zero(&(lk)->lk_stack)
89047dd67eSAttilio Rao #endif
90047dd67eSAttilio Rao
91047dd67eSAttilio Rao #define LOCK_LOG2(lk, string, arg1, arg2) \
92047dd67eSAttilio Rao if (LOCK_LOG_TEST(&(lk)->lock_object, 0)) \
93047dd67eSAttilio Rao CTR2(KTR_LOCK, (string), (arg1), (arg2))
94047dd67eSAttilio Rao #define LOCK_LOG3(lk, string, arg1, arg2, arg3) \
95047dd67eSAttilio Rao if (LOCK_LOG_TEST(&(lk)->lock_object, 0)) \
96047dd67eSAttilio Rao CTR3(KTR_LOCK, (string), (arg1), (arg2), (arg3))
97047dd67eSAttilio Rao
98e5f94314SAttilio Rao #define GIANT_DECLARE \
99e5f94314SAttilio Rao int _i = 0; \
100e5f94314SAttilio Rao WITNESS_SAVE_DECL(Giant)
101e5f94314SAttilio Rao #define GIANT_RESTORE() do { \
1026e8c1ccbSMateusz Guzik if (__predict_false(_i > 0)) { \
103e5f94314SAttilio Rao while (_i--) \
104e5f94314SAttilio Rao mtx_lock(&Giant); \
105e5f94314SAttilio Rao WITNESS_RESTORE(&Giant.lock_object, Giant); \
106e5f94314SAttilio Rao } \
107e5f94314SAttilio Rao } while (0)
108e5f94314SAttilio Rao #define GIANT_SAVE() do { \
1096e8c1ccbSMateusz Guzik if (__predict_false(mtx_owned(&Giant))) { \
110e5f94314SAttilio Rao WITNESS_SAVE(&Giant.lock_object, Giant); \
111e5f94314SAttilio Rao while (mtx_owned(&Giant)) { \
112e5f94314SAttilio Rao _i++; \
113e5f94314SAttilio Rao mtx_unlock(&Giant); \
114e5f94314SAttilio Rao } \
115e5f94314SAttilio Rao } \
116e5f94314SAttilio Rao } while (0)
117e5f94314SAttilio Rao
1183c84b4b3SRyan Libby static __always_inline bool
LK_CAN_SHARE(uintptr_t x,int flags,bool fp)11995ab076dSMateusz Guzik LK_CAN_SHARE(uintptr_t x, int flags, bool fp)
12095ab076dSMateusz Guzik {
12195ab076dSMateusz Guzik
12295ab076dSMateusz Guzik if ((x & (LK_SHARE | LK_EXCLUSIVE_WAITERS | LK_EXCLUSIVE_SPINNERS)) ==
12395ab076dSMateusz Guzik LK_SHARE)
12495ab076dSMateusz Guzik return (true);
12595ab076dSMateusz Guzik if (fp || (!(x & LK_SHARE)))
12695ab076dSMateusz Guzik return (false);
12795ab076dSMateusz Guzik if ((curthread->td_lk_slocks != 0 && !(flags & LK_NODDLKTREAT)) ||
12895ab076dSMateusz Guzik (curthread->td_pflags & TDP_DEADLKTREAT))
12995ab076dSMateusz Guzik return (true);
13095ab076dSMateusz Guzik return (false);
13195ab076dSMateusz Guzik }
13295ab076dSMateusz Guzik
133e5f94314SAttilio Rao #define LK_TRYOP(x) \
134e5f94314SAttilio Rao ((x) & LK_NOWAIT)
135e5f94314SAttilio Rao
136e5f94314SAttilio Rao #define LK_CAN_WITNESS(x) \
137e5f94314SAttilio Rao (((x) & LK_NOWITNESS) == 0 && !LK_TRYOP(x))
138e5f94314SAttilio Rao #define LK_TRYWIT(x) \
139e5f94314SAttilio Rao (LK_TRYOP(x) ? LOP_TRYLOCK : 0)
140047dd67eSAttilio Rao
14110391db5SMateusz Guzik #define lockmgr_xlocked_v(v) \
14210391db5SMateusz Guzik (((v) & ~(LK_FLAGMASK & ~LK_SHARE)) == (uintptr_t)curthread)
14310391db5SMateusz Guzik
144bdb6d824SMateusz Guzik #define lockmgr_xlocked(lk) lockmgr_xlocked_v(lockmgr_read_value(lk))
145047dd67eSAttilio Rao
146d576deedSPawel Jakub Dawidek static void assert_lockmgr(const struct lock_object *lock, int how);
147047dd67eSAttilio Rao #ifdef DDB
148d576deedSPawel Jakub Dawidek static void db_show_lockmgr(const struct lock_object *lock);
149be6847d7SJohn Baldwin #endif
1507faf4d90SDavide Italiano static void lock_lockmgr(struct lock_object *lock, uintptr_t how);
151a5aedd68SStacey Son #ifdef KDTRACE_HOOKS
152d576deedSPawel Jakub Dawidek static int owner_lockmgr(const struct lock_object *lock,
153d576deedSPawel Jakub Dawidek struct thread **owner);
154a5aedd68SStacey Son #endif
1557faf4d90SDavide Italiano static uintptr_t unlock_lockmgr(struct lock_object *lock);
15661bd5e21SKip Macy
15761bd5e21SKip Macy struct lock_class lock_class_lockmgr = {
1583ff6d229SJohn Baldwin .lc_name = "lockmgr",
159047dd67eSAttilio Rao .lc_flags = LC_RECURSABLE | LC_SLEEPABLE | LC_SLEEPLOCK | LC_UPGRADABLE,
160f9721b43SAttilio Rao .lc_assert = assert_lockmgr,
16161bd5e21SKip Macy #ifdef DDB
1626e21afd4SJohn Baldwin .lc_ddb_show = db_show_lockmgr,
16361bd5e21SKip Macy #endif
1646e21afd4SJohn Baldwin .lc_lock = lock_lockmgr,
165a5aedd68SStacey Son .lc_unlock = unlock_lockmgr,
166a5aedd68SStacey Son #ifdef KDTRACE_HOOKS
167a5aedd68SStacey Son .lc_owner = owner_lockmgr,
168a5aedd68SStacey Son #endif
16961bd5e21SKip Macy };
17061bd5e21SKip Macy
17131ad4050SMateusz Guzik static __read_mostly bool lk_adaptive = true;
17231ad4050SMateusz Guzik static SYSCTL_NODE(_debug, OID_AUTO, lockmgr, CTLFLAG_RD, NULL, "lockmgr debugging");
17331ad4050SMateusz Guzik SYSCTL_BOOL(_debug_lockmgr, OID_AUTO, adaptive_spinning, CTLFLAG_RW, &lk_adaptive,
17431ad4050SMateusz Guzik 0, "");
17531ad4050SMateusz Guzik #define lockmgr_delay locks_delay
17631ad4050SMateusz Guzik
1771c6987ebSMateusz Guzik struct lockmgr_wait {
1781c6987ebSMateusz Guzik const char *iwmesg;
1791c6987ebSMateusz Guzik int ipri;
1801c6987ebSMateusz Guzik int itimo;
1811c6987ebSMateusz Guzik };
1821c6987ebSMateusz Guzik
1833c84b4b3SRyan Libby static __always_inline bool lockmgr_slock_try(struct lock *lk, uintptr_t *xp,
18495ab076dSMateusz Guzik int flags, bool fp);
1853c84b4b3SRyan Libby static __always_inline bool lockmgr_sunlock_try(struct lock *lk,
1863c84b4b3SRyan Libby uintptr_t *xp);
1871c6987ebSMateusz Guzik
1881c6987ebSMateusz Guzik static void
lockmgr_exit(u_int flags,struct lock_object * ilk)18901518f5eSMark Johnston lockmgr_exit(u_int flags, struct lock_object *ilk)
1901c6987ebSMateusz Guzik {
1911c6987ebSMateusz Guzik struct lock_class *class;
1921c6987ebSMateusz Guzik
1931c6987ebSMateusz Guzik if (flags & LK_INTERLOCK) {
1941c6987ebSMateusz Guzik class = LOCK_CLASS(ilk);
1951c6987ebSMateusz Guzik class->lc_unlock(ilk);
1961c6987ebSMateusz Guzik }
1971c6987ebSMateusz Guzik }
198c4a48867SMateusz Guzik
199c4a48867SMateusz Guzik static void
lockmgr_note_shared_acquire(struct lock * lk,int contested,uint64_t waittime,const char * file,int line,int flags)200c4a48867SMateusz Guzik lockmgr_note_shared_acquire(struct lock *lk, int contested,
201c4a48867SMateusz Guzik uint64_t waittime, const char *file, int line, int flags)
202c4a48867SMateusz Guzik {
203c4a48867SMateusz Guzik
2045b699f16SMark Johnston LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(lockmgr__acquire, lk, contested,
2055b699f16SMark Johnston waittime, file, line, LOCKSTAT_READER);
206c4a48867SMateusz Guzik LOCK_LOG_LOCK("SLOCK", &lk->lock_object, 0, 0, file, line);
207c4a48867SMateusz Guzik WITNESS_LOCK(&lk->lock_object, LK_TRYWIT(flags), file, line);
208c4a48867SMateusz Guzik TD_LOCKS_INC(curthread);
209c4a48867SMateusz Guzik TD_SLOCKS_INC(curthread);
210c4a48867SMateusz Guzik STACK_SAVE(lk);
211c4a48867SMateusz Guzik }
212c4a48867SMateusz Guzik
213c4a48867SMateusz Guzik static void
lockmgr_note_shared_release(struct lock * lk,const char * file,int line)214c4a48867SMateusz Guzik lockmgr_note_shared_release(struct lock *lk, const char *file, int line)
215c4a48867SMateusz Guzik {
216c4a48867SMateusz Guzik
217c4a48867SMateusz Guzik WITNESS_UNLOCK(&lk->lock_object, 0, file, line);
218c4a48867SMateusz Guzik LOCK_LOG_LOCK("SUNLOCK", &lk->lock_object, 0, 0, file, line);
219c4a48867SMateusz Guzik TD_LOCKS_DEC(curthread);
220c4a48867SMateusz Guzik TD_SLOCKS_DEC(curthread);
221c4a48867SMateusz Guzik }
222c4a48867SMateusz Guzik
223c4a48867SMateusz Guzik static void
lockmgr_note_exclusive_acquire(struct lock * lk,int contested,uint64_t waittime,const char * file,int line,int flags)224c4a48867SMateusz Guzik lockmgr_note_exclusive_acquire(struct lock *lk, int contested,
225c4a48867SMateusz Guzik uint64_t waittime, const char *file, int line, int flags)
226c4a48867SMateusz Guzik {
227c4a48867SMateusz Guzik
2285b699f16SMark Johnston LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(lockmgr__acquire, lk, contested,
2295b699f16SMark Johnston waittime, file, line, LOCKSTAT_WRITER);
230c4a48867SMateusz Guzik LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0, lk->lk_recurse, file, line);
231c4a48867SMateusz Guzik WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE | LK_TRYWIT(flags), file,
232c4a48867SMateusz Guzik line);
233c4a48867SMateusz Guzik TD_LOCKS_INC(curthread);
234c4a48867SMateusz Guzik STACK_SAVE(lk);
235c4a48867SMateusz Guzik }
236c4a48867SMateusz Guzik
237c4a48867SMateusz Guzik static void
lockmgr_note_exclusive_release(struct lock * lk,const char * file,int line)238c4a48867SMateusz Guzik lockmgr_note_exclusive_release(struct lock *lk, const char *file, int line)
239c4a48867SMateusz Guzik {
240c4a48867SMateusz Guzik
241b92cd6b2SRyan Libby if (!lockmgr_disowned(lk)) {
242c4a48867SMateusz Guzik WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, file, line);
243c4a48867SMateusz Guzik TD_LOCKS_DEC(curthread);
244c4a48867SMateusz Guzik }
245c00115f1SMateusz Guzik LOCK_LOG_LOCK("XUNLOCK", &lk->lock_object, 0, lk->lk_recurse, file,
246c00115f1SMateusz Guzik line);
247c00115f1SMateusz Guzik }
248c4a48867SMateusz Guzik
249047dd67eSAttilio Rao static __inline struct thread *
lockmgr_xholder(const struct lock * lk)250d576deedSPawel Jakub Dawidek lockmgr_xholder(const struct lock *lk)
251047dd67eSAttilio Rao {
252047dd67eSAttilio Rao uintptr_t x;
253047dd67eSAttilio Rao
254bdb6d824SMateusz Guzik x = lockmgr_read_value(lk);
255047dd67eSAttilio Rao return ((x & LK_SHARE) ? NULL : (struct thread *)LK_HOLDER(x));
256047dd67eSAttilio Rao }
25784887fa3SAttilio Rao
25853bf4bb2SPeter Wemm /*
259047dd67eSAttilio Rao * It assumes sleepq_lock held and returns with this one unheld.
260047dd67eSAttilio Rao * It also assumes the generic interlock is sane and previously checked.
261047dd67eSAttilio Rao * If LK_INTERLOCK is specified the interlock is not reacquired after the
262047dd67eSAttilio Rao * sleep.
26353bf4bb2SPeter Wemm */
264047dd67eSAttilio Rao static __inline int
sleeplk(struct lock * lk,u_int flags,struct lock_object * ilk,const char * wmesg,int pri,int timo,int queue)265047dd67eSAttilio Rao sleeplk(struct lock *lk, u_int flags, struct lock_object *ilk,
266047dd67eSAttilio Rao const char *wmesg, int pri, int timo, int queue)
267047dd67eSAttilio Rao {
268e5f94314SAttilio Rao GIANT_DECLARE;
269047dd67eSAttilio Rao struct lock_class *class;
270047dd67eSAttilio Rao int catch, error;
27153bf4bb2SPeter Wemm
272047dd67eSAttilio Rao class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
2735047a8fdSAttilio Rao catch = pri & PCATCH;
274047dd67eSAttilio Rao pri &= PRIMASK;
275047dd67eSAttilio Rao error = 0;
276047dd67eSAttilio Rao
277047dd67eSAttilio Rao LOCK_LOG3(lk, "%s: %p blocking on the %s sleepqueue", __func__, lk,
278047dd67eSAttilio Rao (queue == SQ_EXCLUSIVE_QUEUE) ? "exclusive" : "shared");
279047dd67eSAttilio Rao
280047dd67eSAttilio Rao if (flags & LK_INTERLOCK)
281047dd67eSAttilio Rao class->lc_unlock(ilk);
282eac22dd4SMateusz Guzik if (queue == SQ_EXCLUSIVE_QUEUE && (flags & LK_SLEEPFAIL) != 0) {
283eac22dd4SMateusz Guzik if (lk->lk_exslpfail < USHRT_MAX)
2842028867dSAttilio Rao lk->lk_exslpfail++;
285eac22dd4SMateusz Guzik }
286e5f94314SAttilio Rao GIANT_SAVE();
287047dd67eSAttilio Rao sleepq_add(&lk->lock_object, NULL, wmesg, SLEEPQ_LK | (catch ?
288047dd67eSAttilio Rao SLEEPQ_INTERRUPTIBLE : 0), queue);
289a115fb62SHans Petter Selasky if ((flags & LK_TIMELOCK) && timo)
290047dd67eSAttilio Rao sleepq_set_timeout(&lk->lock_object, timo);
291a115fb62SHans Petter Selasky
292047dd67eSAttilio Rao /*
293047dd67eSAttilio Rao * Decisional switch for real sleeping.
294047dd67eSAttilio Rao */
295047dd67eSAttilio Rao if ((flags & LK_TIMELOCK) && timo && catch)
296047dd67eSAttilio Rao error = sleepq_timedwait_sig(&lk->lock_object, pri);
297047dd67eSAttilio Rao else if ((flags & LK_TIMELOCK) && timo)
298047dd67eSAttilio Rao error = sleepq_timedwait(&lk->lock_object, pri);
299047dd67eSAttilio Rao else if (catch)
300047dd67eSAttilio Rao error = sleepq_wait_sig(&lk->lock_object, pri);
301047dd67eSAttilio Rao else
302047dd67eSAttilio Rao sleepq_wait(&lk->lock_object, pri);
303e5f94314SAttilio Rao GIANT_RESTORE();
304047dd67eSAttilio Rao if ((flags & LK_SLEEPFAIL) && error == 0)
305047dd67eSAttilio Rao error = ENOLCK;
306047dd67eSAttilio Rao
307047dd67eSAttilio Rao return (error);
308047dd67eSAttilio Rao }
309047dd67eSAttilio Rao
31001518f5eSMark Johnston static __inline void
wakeupshlk(struct lock * lk,const char * file,int line)311047dd67eSAttilio Rao wakeupshlk(struct lock *lk, const char *file, int line)
312047dd67eSAttilio Rao {
3130ad122a9SMateusz Guzik uintptr_t v, x, orig_x;
3142028867dSAttilio Rao u_int realexslp;
31501518f5eSMark Johnston int queue;
316047dd67eSAttilio Rao
317047dd67eSAttilio Rao for (;;) {
318bdb6d824SMateusz Guzik x = lockmgr_read_value(lk);
3191c6987ebSMateusz Guzik if (lockmgr_sunlock_try(lk, &x))
320047dd67eSAttilio Rao break;
321047dd67eSAttilio Rao
322047dd67eSAttilio Rao /*
323047dd67eSAttilio Rao * We should have a sharer with waiters, so enter the hard
324047dd67eSAttilio Rao * path in order to handle wakeups correctly.
325047dd67eSAttilio Rao */
326047dd67eSAttilio Rao sleepq_lock(&lk->lock_object);
327bdb6d824SMateusz Guzik orig_x = lockmgr_read_value(lk);
3280ad122a9SMateusz Guzik retry_sleepq:
3290ad122a9SMateusz Guzik x = orig_x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
330047dd67eSAttilio Rao v = LK_UNLOCKED;
331047dd67eSAttilio Rao
332047dd67eSAttilio Rao /*
333047dd67eSAttilio Rao * If the lock has exclusive waiters, give them preference in
334047dd67eSAttilio Rao * order to avoid deadlock with shared runners up.
3352028867dSAttilio Rao * If interruptible sleeps left the exclusive queue empty
3362028867dSAttilio Rao * avoid a starvation for the threads sleeping on the shared
3372028867dSAttilio Rao * queue by giving them precedence and cleaning up the
3382028867dSAttilio Rao * exclusive waiters bit anyway.
339c636ba83SAttilio Rao * Please note that lk_exslpfail count may be lying about
340c636ba83SAttilio Rao * the real number of waiters with the LK_SLEEPFAIL flag on
341e3043798SPedro F. Giffuni * because they may be used in conjunction with interruptible
342aab9c8c2SAttilio Rao * sleeps so lk_exslpfail might be considered an 'upper limit'
343aab9c8c2SAttilio Rao * bound, including the edge cases.
344047dd67eSAttilio Rao */
3452028867dSAttilio Rao realexslp = sleepq_sleepcnt(&lk->lock_object,
3462028867dSAttilio Rao SQ_EXCLUSIVE_QUEUE);
3472028867dSAttilio Rao if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) {
348eac22dd4SMateusz Guzik if (lk->lk_exslpfail != USHRT_MAX && lk->lk_exslpfail < realexslp) {
3492028867dSAttilio Rao lk->lk_exslpfail = 0;
350047dd67eSAttilio Rao queue = SQ_EXCLUSIVE_QUEUE;
351047dd67eSAttilio Rao v |= (x & LK_SHARED_WAITERS);
352047dd67eSAttilio Rao } else {
3532028867dSAttilio Rao lk->lk_exslpfail = 0;
3542028867dSAttilio Rao LOCK_LOG2(lk,
3552028867dSAttilio Rao "%s: %p has only LK_SLEEPFAIL sleepers",
3562028867dSAttilio Rao __func__, lk);
3572028867dSAttilio Rao LOCK_LOG2(lk,
3582028867dSAttilio Rao "%s: %p waking up threads on the exclusive queue",
3592028867dSAttilio Rao __func__, lk);
36001518f5eSMark Johnston sleepq_broadcast(&lk->lock_object, SLEEPQ_LK, 0,
36101518f5eSMark Johnston SQ_EXCLUSIVE_QUEUE);
3622028867dSAttilio Rao queue = SQ_SHARED_QUEUE;
3632028867dSAttilio Rao }
3642028867dSAttilio Rao } else {
3659dbf7a62SAttilio Rao /*
3669dbf7a62SAttilio Rao * Exclusive waiters sleeping with LK_SLEEPFAIL on
3679dbf7a62SAttilio Rao * and using interruptible sleeps/timeout may have
3689dbf7a62SAttilio Rao * left spourious lk_exslpfail counts on, so clean
3699dbf7a62SAttilio Rao * it up anyway.
3709dbf7a62SAttilio Rao */
3719dbf7a62SAttilio Rao lk->lk_exslpfail = 0;
372047dd67eSAttilio Rao queue = SQ_SHARED_QUEUE;
373047dd67eSAttilio Rao }
374047dd67eSAttilio Rao
3750ad122a9SMateusz Guzik if (lockmgr_sunlock_try(lk, &orig_x)) {
376047dd67eSAttilio Rao sleepq_release(&lk->lock_object);
3770ad122a9SMateusz Guzik break;
3780ad122a9SMateusz Guzik }
3790ad122a9SMateusz Guzik
3800ad122a9SMateusz Guzik x |= LK_SHARERS_LOCK(1);
3810ad122a9SMateusz Guzik if (!atomic_fcmpset_rel_ptr(&lk->lk_lock, &x, v)) {
3820ad122a9SMateusz Guzik orig_x = x;
3830ad122a9SMateusz Guzik goto retry_sleepq;
384047dd67eSAttilio Rao }
385047dd67eSAttilio Rao LOCK_LOG3(lk, "%s: %p waking up threads on the %s queue",
386047dd67eSAttilio Rao __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
387047dd67eSAttilio Rao "exclusive");
38801518f5eSMark Johnston sleepq_broadcast(&lk->lock_object, SLEEPQ_LK, 0, queue);
389047dd67eSAttilio Rao sleepq_release(&lk->lock_object);
390047dd67eSAttilio Rao break;
391047dd67eSAttilio Rao }
392047dd67eSAttilio Rao
393c00115f1SMateusz Guzik LOCKSTAT_PROFILE_RELEASE_RWLOCK(lockmgr__release, lk, LOCKSTAT_READER);
394047dd67eSAttilio Rao }
395047dd67eSAttilio Rao
396047dd67eSAttilio Rao static void
assert_lockmgr(const struct lock_object * lock,int what)397d576deedSPawel Jakub Dawidek assert_lockmgr(const struct lock_object *lock, int what)
398f9721b43SAttilio Rao {
399f9721b43SAttilio Rao
400f9721b43SAttilio Rao panic("lockmgr locks do not support assertions");
401f9721b43SAttilio Rao }
402f9721b43SAttilio Rao
403047dd67eSAttilio Rao static void
lock_lockmgr(struct lock_object * lock,uintptr_t how)4047faf4d90SDavide Italiano lock_lockmgr(struct lock_object *lock, uintptr_t how)
4056e21afd4SJohn Baldwin {
4066e21afd4SJohn Baldwin
4076e21afd4SJohn Baldwin panic("lockmgr locks do not support sleep interlocking");
4086e21afd4SJohn Baldwin }
4096e21afd4SJohn Baldwin
4107faf4d90SDavide Italiano static uintptr_t
unlock_lockmgr(struct lock_object * lock)4116e21afd4SJohn Baldwin unlock_lockmgr(struct lock_object *lock)
4126e21afd4SJohn Baldwin {
4136e21afd4SJohn Baldwin
4146e21afd4SJohn Baldwin panic("lockmgr locks do not support sleep interlocking");
4156e21afd4SJohn Baldwin }
4166e21afd4SJohn Baldwin
417a5aedd68SStacey Son #ifdef KDTRACE_HOOKS
418a5aedd68SStacey Son static int
owner_lockmgr(const struct lock_object * lock,struct thread ** owner)419d576deedSPawel Jakub Dawidek owner_lockmgr(const struct lock_object *lock, struct thread **owner)
420a5aedd68SStacey Son {
421a5aedd68SStacey Son
422a5aedd68SStacey Son panic("lockmgr locks do not support owner inquiring");
423a5aedd68SStacey Son }
424a5aedd68SStacey Son #endif
425a5aedd68SStacey Son
42699448ed1SJohn Dyson void
lockinit(struct lock * lk,int pri,const char * wmesg,int timo,int flags)427047dd67eSAttilio Rao lockinit(struct lock *lk, int pri, const char *wmesg, int timo, int flags)
42899448ed1SJohn Dyson {
4296efc8a16SAttilio Rao int iflags;
4306efc8a16SAttilio Rao
431047dd67eSAttilio Rao MPASS((flags & ~LK_INIT_MASK) == 0);
432353998acSAttilio Rao ASSERT_ATOMIC_LOAD_PTR(lk->lk_lock,
433353998acSAttilio Rao ("%s: lockmgr not aligned for %s: %p", __func__, wmesg,
434353998acSAttilio Rao &lk->lk_lock));
43599448ed1SJohn Dyson
436f0830182SAttilio Rao iflags = LO_SLEEPABLE | LO_UPGRADABLE;
437f0830182SAttilio Rao if (flags & LK_CANRECURSE)
438f0830182SAttilio Rao iflags |= LO_RECURSABLE;
439047dd67eSAttilio Rao if ((flags & LK_NODUP) == 0)
4406efc8a16SAttilio Rao iflags |= LO_DUPOK;
4417fbfba7bSAttilio Rao if (flags & LK_NOPROFILE)
4427fbfba7bSAttilio Rao iflags |= LO_NOPROFILE;
443047dd67eSAttilio Rao if ((flags & LK_NOWITNESS) == 0)
4446efc8a16SAttilio Rao iflags |= LO_WITNESS;
4457fbfba7bSAttilio Rao if (flags & LK_QUIET)
4467fbfba7bSAttilio Rao iflags |= LO_QUIET;
447e63091eaSMarcel Moolenaar if (flags & LK_IS_VNODE)
448e63091eaSMarcel Moolenaar iflags |= LO_IS_VNODE;
44946713135SGleb Smirnoff if (flags & LK_NEW)
45046713135SGleb Smirnoff iflags |= LO_NEW;
4515fe188b1SMateusz Guzik iflags |= flags & LK_NOSHARE;
452047dd67eSAttilio Rao
453b5fb43e5SJohn Baldwin lock_init(&lk->lock_object, &lock_class_lockmgr, wmesg, NULL, iflags);
454047dd67eSAttilio Rao lk->lk_lock = LK_UNLOCKED;
455047dd67eSAttilio Rao lk->lk_recurse = 0;
4562028867dSAttilio Rao lk->lk_exslpfail = 0;
457047dd67eSAttilio Rao lk->lk_timo = timo;
458047dd67eSAttilio Rao lk->lk_pri = pri;
459047dd67eSAttilio Rao STACK_ZERO(lk);
46099448ed1SJohn Dyson }
46199448ed1SJohn Dyson
4623634d5b2SJohn Baldwin /*
4633634d5b2SJohn Baldwin * XXX: Gross hacks to manipulate external lock flags after
4643634d5b2SJohn Baldwin * initialization. Used for certain vnode and buf locks.
4653634d5b2SJohn Baldwin */
4663634d5b2SJohn Baldwin void
lockallowshare(struct lock * lk)4673634d5b2SJohn Baldwin lockallowshare(struct lock *lk)
4683634d5b2SJohn Baldwin {
4693634d5b2SJohn Baldwin
4703634d5b2SJohn Baldwin lockmgr_assert(lk, KA_XLOCKED);
4713634d5b2SJohn Baldwin lk->lock_object.lo_flags &= ~LK_NOSHARE;
4723634d5b2SJohn Baldwin }
4733634d5b2SJohn Baldwin
4743634d5b2SJohn Baldwin void
lockdisableshare(struct lock * lk)475575e02d9SKonstantin Belousov lockdisableshare(struct lock *lk)
476575e02d9SKonstantin Belousov {
477575e02d9SKonstantin Belousov
478575e02d9SKonstantin Belousov lockmgr_assert(lk, KA_XLOCKED);
479575e02d9SKonstantin Belousov lk->lock_object.lo_flags |= LK_NOSHARE;
480575e02d9SKonstantin Belousov }
481575e02d9SKonstantin Belousov
482575e02d9SKonstantin Belousov void
lockallowrecurse(struct lock * lk)4833634d5b2SJohn Baldwin lockallowrecurse(struct lock *lk)
4843634d5b2SJohn Baldwin {
4853634d5b2SJohn Baldwin
4863634d5b2SJohn Baldwin lockmgr_assert(lk, KA_XLOCKED);
4873634d5b2SJohn Baldwin lk->lock_object.lo_flags |= LO_RECURSABLE;
4883634d5b2SJohn Baldwin }
4893634d5b2SJohn Baldwin
4903634d5b2SJohn Baldwin void
lockdisablerecurse(struct lock * lk)4913634d5b2SJohn Baldwin lockdisablerecurse(struct lock *lk)
4923634d5b2SJohn Baldwin {
4933634d5b2SJohn Baldwin
4943634d5b2SJohn Baldwin lockmgr_assert(lk, KA_XLOCKED);
4953634d5b2SJohn Baldwin lk->lock_object.lo_flags &= ~LO_RECURSABLE;
4963634d5b2SJohn Baldwin }
4973634d5b2SJohn Baldwin
498a18b1f1dSJason Evans void
lockdestroy(struct lock * lk)499047dd67eSAttilio Rao lockdestroy(struct lock *lk)
500a18b1f1dSJason Evans {
501c91fcee7SJohn Baldwin
502047dd67eSAttilio Rao KASSERT(lk->lk_lock == LK_UNLOCKED, ("lockmgr still held"));
503047dd67eSAttilio Rao KASSERT(lk->lk_recurse == 0, ("lockmgr still recursed"));
5042028867dSAttilio Rao KASSERT(lk->lk_exslpfail == 0, ("lockmgr still exclusive waiters"));
505047dd67eSAttilio Rao lock_destroy(&lk->lock_object);
506047dd67eSAttilio Rao }
507047dd67eSAttilio Rao
5083c84b4b3SRyan Libby static __always_inline bool
lockmgr_slock_try(struct lock * lk,uintptr_t * xp,int flags,bool fp)50995ab076dSMateusz Guzik lockmgr_slock_try(struct lock *lk, uintptr_t *xp, int flags, bool fp)
510c4a48867SMateusz Guzik {
511c4a48867SMateusz Guzik
512c4a48867SMateusz Guzik /*
513c4a48867SMateusz Guzik * If no other thread has an exclusive lock, or
514c4a48867SMateusz Guzik * no exclusive waiter is present, bump the count of
515c4a48867SMateusz Guzik * sharers. Since we have to preserve the state of
516c4a48867SMateusz Guzik * waiters, if we fail to acquire the shared lock
517c4a48867SMateusz Guzik * loop back and retry.
518c4a48867SMateusz Guzik */
51995ab076dSMateusz Guzik while (LK_CAN_SHARE(*xp, flags, fp)) {
520c4a48867SMateusz Guzik if (atomic_fcmpset_acq_ptr(&lk->lk_lock, xp,
521c4a48867SMateusz Guzik *xp + LK_ONE_SHARER)) {
522c4a48867SMateusz Guzik return (true);
523c4a48867SMateusz Guzik }
524c4a48867SMateusz Guzik }
525c4a48867SMateusz Guzik return (false);
526c4a48867SMateusz Guzik }
527c4a48867SMateusz Guzik
5283c84b4b3SRyan Libby static __always_inline bool
lockmgr_sunlock_try(struct lock * lk,uintptr_t * xp)5291c6987ebSMateusz Guzik lockmgr_sunlock_try(struct lock *lk, uintptr_t *xp)
530c4a48867SMateusz Guzik {
531c4a48867SMateusz Guzik
532c4a48867SMateusz Guzik for (;;) {
53395ab076dSMateusz Guzik if (LK_SHARERS(*xp) > 1 || !(*xp & LK_ALL_WAITERS)) {
5341c6987ebSMateusz Guzik if (atomic_fcmpset_rel_ptr(&lk->lk_lock, xp,
5351c6987ebSMateusz Guzik *xp - LK_ONE_SHARER))
536c4a48867SMateusz Guzik return (true);
537c4a48867SMateusz Guzik continue;
538c4a48867SMateusz Guzik }
539c4a48867SMateusz Guzik break;
540c4a48867SMateusz Guzik }
541c4a48867SMateusz Guzik return (false);
542c4a48867SMateusz Guzik }
543c4a48867SMateusz Guzik
54431ad4050SMateusz Guzik static bool
lockmgr_slock_adaptive(struct lock_delay_arg * lda,struct lock * lk,uintptr_t * xp,int flags)54531ad4050SMateusz Guzik lockmgr_slock_adaptive(struct lock_delay_arg *lda, struct lock *lk, uintptr_t *xp,
54631ad4050SMateusz Guzik int flags)
54731ad4050SMateusz Guzik {
54831ad4050SMateusz Guzik struct thread *owner;
54931ad4050SMateusz Guzik uintptr_t x;
55031ad4050SMateusz Guzik
55131ad4050SMateusz Guzik x = *xp;
55231ad4050SMateusz Guzik MPASS(x != LK_UNLOCKED);
55331ad4050SMateusz Guzik owner = (struct thread *)LK_HOLDER(x);
55431ad4050SMateusz Guzik for (;;) {
55531ad4050SMateusz Guzik MPASS(owner != curthread);
55631ad4050SMateusz Guzik if (owner == (struct thread *)LK_KERNPROC)
55731ad4050SMateusz Guzik return (false);
55831ad4050SMateusz Guzik if ((x & LK_SHARE) && LK_SHARERS(x) > 0)
55931ad4050SMateusz Guzik return (false);
56031ad4050SMateusz Guzik if (owner == NULL)
56131ad4050SMateusz Guzik return (false);
56231ad4050SMateusz Guzik if (!TD_IS_RUNNING(owner))
56331ad4050SMateusz Guzik return (false);
56431ad4050SMateusz Guzik if ((x & LK_ALL_WAITERS) != 0)
56531ad4050SMateusz Guzik return (false);
56631ad4050SMateusz Guzik lock_delay(lda);
56731ad4050SMateusz Guzik x = lockmgr_read_value(lk);
56831ad4050SMateusz Guzik if (LK_CAN_SHARE(x, flags, false)) {
56931ad4050SMateusz Guzik *xp = x;
57031ad4050SMateusz Guzik return (true);
57131ad4050SMateusz Guzik }
57231ad4050SMateusz Guzik owner = (struct thread *)LK_HOLDER(x);
57331ad4050SMateusz Guzik }
57431ad4050SMateusz Guzik }
57531ad4050SMateusz Guzik
5761c6987ebSMateusz Guzik static __noinline int
lockmgr_slock_hard(struct lock * lk,u_int flags,struct lock_object * ilk,const char * file,int line,struct lockmgr_wait * lwa)5771c6987ebSMateusz Guzik lockmgr_slock_hard(struct lock *lk, u_int flags, struct lock_object *ilk,
5781c6987ebSMateusz Guzik const char *file, int line, struct lockmgr_wait *lwa)
579c4a48867SMateusz Guzik {
5801c6987ebSMateusz Guzik uintptr_t tid, x;
5811c6987ebSMateusz Guzik int error = 0;
582047dd67eSAttilio Rao const char *iwmesg;
5831c6987ebSMateusz Guzik int ipri, itimo;
5841c6987ebSMateusz Guzik
5855b699f16SMark Johnston #ifdef KDTRACE_HOOKS
5865b699f16SMark Johnston uint64_t sleep_time = 0;
5875b699f16SMark Johnston #endif
5881723a064SJeff Roberson #ifdef LOCK_PROFILING
5891723a064SJeff Roberson uint64_t waittime = 0;
5901723a064SJeff Roberson int contested = 0;
5911723a064SJeff Roberson #endif
59231ad4050SMateusz Guzik struct lock_delay_arg lda;
593047dd67eSAttilio Rao
5949a7f7c26SMitchell Horne if (SCHEDULER_STOPPED())
5951c6987ebSMateusz Guzik goto out;
5961c6987ebSMateusz Guzik
597047dd67eSAttilio Rao tid = (uintptr_t)curthread;
598047dd67eSAttilio Rao
599e5f94314SAttilio Rao if (LK_CAN_WITNESS(flags))
600e5f94314SAttilio Rao WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER,
60124150d37SJohn Baldwin file, line, flags & LK_INTERLOCK ? ilk : NULL);
602f90d57b8SMateusz Guzik x = lockmgr_read_value(lk);
60331ad4050SMateusz Guzik lock_delay_arg_init(&lda, &lockmgr_delay);
60431ad4050SMateusz Guzik if (!lk_adaptive)
60531ad4050SMateusz Guzik flags &= ~LK_ADAPTIVE;
606047dd67eSAttilio Rao /*
60731ad4050SMateusz Guzik * The lock may already be locked exclusive by curthread,
60831ad4050SMateusz Guzik * avoid deadlock.
609047dd67eSAttilio Rao */
610047dd67eSAttilio Rao if (LK_HOLDER(x) == tid) {
611047dd67eSAttilio Rao LOCK_LOG2(lk,
61296f1567fSKonstantin Belousov "%s: %p already held in exclusive mode",
613047dd67eSAttilio Rao __func__, lk);
614047dd67eSAttilio Rao error = EDEADLK;
61531ad4050SMateusz Guzik goto out;
616a18b1f1dSJason Evans }
617a18b1f1dSJason Evans
61831ad4050SMateusz Guzik for (;;) {
61931ad4050SMateusz Guzik if (lockmgr_slock_try(lk, &x, flags, false))
62031ad4050SMateusz Guzik break;
62131ad4050SMateusz Guzik
622f902e4bbSMateusz Guzik lock_profile_obtain_lock_failed(&lk->lock_object, false,
623f902e4bbSMateusz Guzik &contested, &waittime);
624f902e4bbSMateusz Guzik
62531ad4050SMateusz Guzik if ((flags & (LK_ADAPTIVE | LK_INTERLOCK)) == LK_ADAPTIVE) {
62631ad4050SMateusz Guzik if (lockmgr_slock_adaptive(&lda, lk, &x, flags))
62731ad4050SMateusz Guzik continue;
62831ad4050SMateusz Guzik }
62931ad4050SMateusz Guzik
63031ad4050SMateusz Guzik #ifdef HWPMC_HOOKS
63131ad4050SMateusz Guzik PMC_SOFT_CALL( , , lock, failed);
63231ad4050SMateusz Guzik #endif
63331ad4050SMateusz Guzik
634a18b1f1dSJason Evans /*
635047dd67eSAttilio Rao * If the lock is expected to not sleep just give up
636047dd67eSAttilio Rao * and return.
637d7a7e179SAttilio Rao */
638047dd67eSAttilio Rao if (LK_TRYOP(flags)) {
639047dd67eSAttilio Rao LOCK_LOG2(lk, "%s: %p fails the try operation",
640047dd67eSAttilio Rao __func__, lk);
641047dd67eSAttilio Rao error = EBUSY;
642047dd67eSAttilio Rao break;
643047dd67eSAttilio Rao }
644047dd67eSAttilio Rao
645047dd67eSAttilio Rao /*
646047dd67eSAttilio Rao * Acquire the sleepqueue chain lock because we
647047dd67eSAttilio Rao * probabilly will need to manipulate waiters flags.
648047dd67eSAttilio Rao */
649047dd67eSAttilio Rao sleepq_lock(&lk->lock_object);
650bdb6d824SMateusz Guzik x = lockmgr_read_value(lk);
6510ad122a9SMateusz Guzik retry_sleepq:
652047dd67eSAttilio Rao
653047dd67eSAttilio Rao /*
654047dd67eSAttilio Rao * if the lock can be acquired in shared mode, try
655047dd67eSAttilio Rao * again.
656047dd67eSAttilio Rao */
65795ab076dSMateusz Guzik if (LK_CAN_SHARE(x, flags, false)) {
658047dd67eSAttilio Rao sleepq_release(&lk->lock_object);
659047dd67eSAttilio Rao continue;
660047dd67eSAttilio Rao }
661047dd67eSAttilio Rao
662047dd67eSAttilio Rao /*
663047dd67eSAttilio Rao * Try to set the LK_SHARED_WAITERS flag. If we fail,
664047dd67eSAttilio Rao * loop back and retry.
665047dd67eSAttilio Rao */
666047dd67eSAttilio Rao if ((x & LK_SHARED_WAITERS) == 0) {
6670ad122a9SMateusz Guzik if (!atomic_fcmpset_acq_ptr(&lk->lk_lock, &x,
668047dd67eSAttilio Rao x | LK_SHARED_WAITERS)) {
6690ad122a9SMateusz Guzik goto retry_sleepq;
670047dd67eSAttilio Rao }
671047dd67eSAttilio Rao LOCK_LOG2(lk, "%s: %p set shared waiters flag",
672047dd67eSAttilio Rao __func__, lk);
673047dd67eSAttilio Rao }
674047dd67eSAttilio Rao
6751c6987ebSMateusz Guzik if (lwa == NULL) {
6761c6987ebSMateusz Guzik iwmesg = lk->lock_object.lo_name;
6771c6987ebSMateusz Guzik ipri = lk->lk_pri;
6781c6987ebSMateusz Guzik itimo = lk->lk_timo;
6791c6987ebSMateusz Guzik } else {
6801c6987ebSMateusz Guzik iwmesg = lwa->iwmesg;
6811c6987ebSMateusz Guzik ipri = lwa->ipri;
6821c6987ebSMateusz Guzik itimo = lwa->itimo;
6831c6987ebSMateusz Guzik }
6841c6987ebSMateusz Guzik
685047dd67eSAttilio Rao /*
686047dd67eSAttilio Rao * As far as we have been unable to acquire the
687047dd67eSAttilio Rao * shared lock and the shared waiters flag is set,
688047dd67eSAttilio Rao * we will sleep.
689047dd67eSAttilio Rao */
6905b699f16SMark Johnston #ifdef KDTRACE_HOOKS
6915b699f16SMark Johnston sleep_time -= lockstat_nsecs(&lk->lock_object);
6925b699f16SMark Johnston #endif
693047dd67eSAttilio Rao error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
694047dd67eSAttilio Rao SQ_SHARED_QUEUE);
6955b699f16SMark Johnston #ifdef KDTRACE_HOOKS
6965b699f16SMark Johnston sleep_time += lockstat_nsecs(&lk->lock_object);
6975b699f16SMark Johnston #endif
698047dd67eSAttilio Rao flags &= ~LK_INTERLOCK;
699047dd67eSAttilio Rao if (error) {
700047dd67eSAttilio Rao LOCK_LOG3(lk,
701047dd67eSAttilio Rao "%s: interrupted sleep for %p with %d",
702047dd67eSAttilio Rao __func__, lk, error);
703047dd67eSAttilio Rao break;
704047dd67eSAttilio Rao }
705047dd67eSAttilio Rao LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
706047dd67eSAttilio Rao __func__, lk);
70731ad4050SMateusz Guzik x = lockmgr_read_value(lk);
708047dd67eSAttilio Rao }
709047dd67eSAttilio Rao if (error == 0) {
7105b699f16SMark Johnston #ifdef KDTRACE_HOOKS
7115b699f16SMark Johnston if (sleep_time != 0)
7125b699f16SMark Johnston LOCKSTAT_RECORD4(lockmgr__block, lk, sleep_time,
7135b699f16SMark Johnston LOCKSTAT_READER, (x & LK_SHARE) == 0,
7145b699f16SMark Johnston (x & LK_SHARE) == 0 ? 0 : LK_SHARERS(x));
7155b699f16SMark Johnston #endif
716c4a48867SMateusz Guzik #ifdef LOCK_PROFILING
717c4a48867SMateusz Guzik lockmgr_note_shared_acquire(lk, contested, waittime,
718c4a48867SMateusz Guzik file, line, flags);
719c4a48867SMateusz Guzik #else
720c4a48867SMateusz Guzik lockmgr_note_shared_acquire(lk, 0, 0, file, line,
721c4a48867SMateusz Guzik flags);
722c4a48867SMateusz Guzik #endif
723047dd67eSAttilio Rao }
724047dd67eSAttilio Rao
7251c6987ebSMateusz Guzik out:
72601518f5eSMark Johnston lockmgr_exit(flags, ilk);
7271c6987ebSMateusz Guzik return (error);
728047dd67eSAttilio Rao }
729047dd67eSAttilio Rao
73031ad4050SMateusz Guzik static bool
lockmgr_xlock_adaptive(struct lock_delay_arg * lda,struct lock * lk,uintptr_t * xp)73131ad4050SMateusz Guzik lockmgr_xlock_adaptive(struct lock_delay_arg *lda, struct lock *lk, uintptr_t *xp)
73231ad4050SMateusz Guzik {
73331ad4050SMateusz Guzik struct thread *owner;
73431ad4050SMateusz Guzik uintptr_t x;
73531ad4050SMateusz Guzik
73631ad4050SMateusz Guzik x = *xp;
73731ad4050SMateusz Guzik MPASS(x != LK_UNLOCKED);
73831ad4050SMateusz Guzik owner = (struct thread *)LK_HOLDER(x);
73931ad4050SMateusz Guzik for (;;) {
74031ad4050SMateusz Guzik MPASS(owner != curthread);
74131ad4050SMateusz Guzik if (owner == NULL)
74231ad4050SMateusz Guzik return (false);
74331ad4050SMateusz Guzik if ((x & LK_SHARE) && LK_SHARERS(x) > 0)
74431ad4050SMateusz Guzik return (false);
74531ad4050SMateusz Guzik if (owner == (struct thread *)LK_KERNPROC)
74631ad4050SMateusz Guzik return (false);
74731ad4050SMateusz Guzik if (!TD_IS_RUNNING(owner))
74831ad4050SMateusz Guzik return (false);
74931ad4050SMateusz Guzik if ((x & LK_ALL_WAITERS) != 0)
75031ad4050SMateusz Guzik return (false);
75131ad4050SMateusz Guzik lock_delay(lda);
75231ad4050SMateusz Guzik x = lockmgr_read_value(lk);
75331ad4050SMateusz Guzik if (x == LK_UNLOCKED) {
75431ad4050SMateusz Guzik *xp = x;
75531ad4050SMateusz Guzik return (true);
75631ad4050SMateusz Guzik }
75731ad4050SMateusz Guzik owner = (struct thread *)LK_HOLDER(x);
75831ad4050SMateusz Guzik }
75931ad4050SMateusz Guzik }
76031ad4050SMateusz Guzik
7611c6987ebSMateusz Guzik static __noinline int
lockmgr_xlock_hard(struct lock * lk,u_int flags,struct lock_object * ilk,const char * file,int line,struct lockmgr_wait * lwa)7621c6987ebSMateusz Guzik lockmgr_xlock_hard(struct lock *lk, u_int flags, struct lock_object *ilk,
7631c6987ebSMateusz Guzik const char *file, int line, struct lockmgr_wait *lwa)
7641c6987ebSMateusz Guzik {
7651c6987ebSMateusz Guzik struct lock_class *class;
7661c6987ebSMateusz Guzik uintptr_t tid, x, v;
7671c6987ebSMateusz Guzik int error = 0;
7681c6987ebSMateusz Guzik const char *iwmesg;
7691c6987ebSMateusz Guzik int ipri, itimo;
7707c6fe803SKonstantin Belousov
7715b699f16SMark Johnston #ifdef KDTRACE_HOOKS
7725b699f16SMark Johnston uint64_t sleep_time = 0;
7735b699f16SMark Johnston #endif
7741c6987ebSMateusz Guzik #ifdef LOCK_PROFILING
7751c6987ebSMateusz Guzik uint64_t waittime = 0;
7761c6987ebSMateusz Guzik int contested = 0;
7771c6987ebSMateusz Guzik #endif
77831ad4050SMateusz Guzik struct lock_delay_arg lda;
779047dd67eSAttilio Rao
7809a7f7c26SMitchell Horne if (SCHEDULER_STOPPED())
7811c6987ebSMateusz Guzik goto out;
7821c6987ebSMateusz Guzik
7831c6987ebSMateusz Guzik tid = (uintptr_t)curthread;
7841c6987ebSMateusz Guzik
785e5f94314SAttilio Rao if (LK_CAN_WITNESS(flags))
786e5f94314SAttilio Rao WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
78724150d37SJohn Baldwin LOP_EXCLUSIVE, file, line, flags & LK_INTERLOCK ?
78824150d37SJohn Baldwin ilk : NULL);
789047dd67eSAttilio Rao
790047dd67eSAttilio Rao /*
79196f1567fSKonstantin Belousov * If curthread already holds the lock and this one is
792047dd67eSAttilio Rao * allowed to recurse, simply recurse on it.
793047dd67eSAttilio Rao */
794047dd67eSAttilio Rao if (lockmgr_xlocked(lk)) {
795047dd67eSAttilio Rao if ((flags & LK_CANRECURSE) == 0 &&
796f0830182SAttilio Rao (lk->lock_object.lo_flags & LO_RECURSABLE) == 0) {
797047dd67eSAttilio Rao /*
798047dd67eSAttilio Rao * If the lock is expected to not panic just
799047dd67eSAttilio Rao * give up and return.
800047dd67eSAttilio Rao */
801047dd67eSAttilio Rao if (LK_TRYOP(flags)) {
802047dd67eSAttilio Rao LOCK_LOG2(lk,
803047dd67eSAttilio Rao "%s: %p fails the try operation",
804047dd67eSAttilio Rao __func__, lk);
805047dd67eSAttilio Rao error = EBUSY;
8061c6987ebSMateusz Guzik goto out;
807047dd67eSAttilio Rao }
8081c6987ebSMateusz Guzik if (flags & LK_INTERLOCK) {
8091c6987ebSMateusz Guzik class = LOCK_CLASS(ilk);
810047dd67eSAttilio Rao class->lc_unlock(ilk);
8111c6987ebSMateusz Guzik }
8129a79b990SKirk McKusick STACK_PRINT(lk);
81383fc34eaSGleb Smirnoff panic("%s: recursing on non recursive lockmgr %p "
81483fc34eaSGleb Smirnoff "@ %s:%d\n", __func__, lk, file, line);
815047dd67eSAttilio Rao }
8164aff9f5dSMateusz Guzik atomic_set_ptr(&lk->lk_lock, LK_WRITER_RECURSED);
817047dd67eSAttilio Rao lk->lk_recurse++;
818047dd67eSAttilio Rao LOCK_LOG2(lk, "%s: %p recursing", __func__, lk);
819047dd67eSAttilio Rao LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0,
820047dd67eSAttilio Rao lk->lk_recurse, file, line);
821e5f94314SAttilio Rao WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
822e5f94314SAttilio Rao LK_TRYWIT(flags), file, line);
823047dd67eSAttilio Rao TD_LOCKS_INC(curthread);
8241c6987ebSMateusz Guzik goto out;
825047dd67eSAttilio Rao }
826047dd67eSAttilio Rao
82731ad4050SMateusz Guzik x = LK_UNLOCKED;
82831ad4050SMateusz Guzik lock_delay_arg_init(&lda, &lockmgr_delay);
82931ad4050SMateusz Guzik if (!lk_adaptive)
83031ad4050SMateusz Guzik flags &= ~LK_ADAPTIVE;
831fc4f686dSMateusz Guzik for (;;) {
83231ad4050SMateusz Guzik if (x == LK_UNLOCKED) {
83331ad4050SMateusz Guzik if (atomic_fcmpset_acq_ptr(&lk->lk_lock, &x, tid))
834fc4f686dSMateusz Guzik break;
83513869889SMateusz Guzik continue;
83631ad4050SMateusz Guzik }
837f902e4bbSMateusz Guzik
838f902e4bbSMateusz Guzik lock_profile_obtain_lock_failed(&lk->lock_object, false,
839f902e4bbSMateusz Guzik &contested, &waittime);
840f902e4bbSMateusz Guzik
84131ad4050SMateusz Guzik if ((flags & (LK_ADAPTIVE | LK_INTERLOCK)) == LK_ADAPTIVE) {
84231ad4050SMateusz Guzik if (lockmgr_xlock_adaptive(&lda, lk, &x))
84331ad4050SMateusz Guzik continue;
84431ad4050SMateusz Guzik }
845f5f9340bSFabien Thomas #ifdef HWPMC_HOOKS
846f5f9340bSFabien Thomas PMC_SOFT_CALL( , , lock, failed);
847f5f9340bSFabien Thomas #endif
848047dd67eSAttilio Rao
849047dd67eSAttilio Rao /*
850047dd67eSAttilio Rao * If the lock is expected to not sleep just give up
851047dd67eSAttilio Rao * and return.
852047dd67eSAttilio Rao */
853047dd67eSAttilio Rao if (LK_TRYOP(flags)) {
854047dd67eSAttilio Rao LOCK_LOG2(lk, "%s: %p fails the try operation",
855047dd67eSAttilio Rao __func__, lk);
856047dd67eSAttilio Rao error = EBUSY;
857047dd67eSAttilio Rao break;
858047dd67eSAttilio Rao }
859047dd67eSAttilio Rao
860047dd67eSAttilio Rao /*
861047dd67eSAttilio Rao * Acquire the sleepqueue chain lock because we
862047dd67eSAttilio Rao * probabilly will need to manipulate waiters flags.
863047dd67eSAttilio Rao */
864047dd67eSAttilio Rao sleepq_lock(&lk->lock_object);
865bdb6d824SMateusz Guzik x = lockmgr_read_value(lk);
8660ad122a9SMateusz Guzik retry_sleepq:
867047dd67eSAttilio Rao
868047dd67eSAttilio Rao /*
869047dd67eSAttilio Rao * if the lock has been released while we spun on
870047dd67eSAttilio Rao * the sleepqueue chain lock just try again.
871047dd67eSAttilio Rao */
872047dd67eSAttilio Rao if (x == LK_UNLOCKED) {
873047dd67eSAttilio Rao sleepq_release(&lk->lock_object);
874047dd67eSAttilio Rao continue;
875047dd67eSAttilio Rao }
876047dd67eSAttilio Rao
877047dd67eSAttilio Rao /*
878047dd67eSAttilio Rao * The lock can be in the state where there is a
879047dd67eSAttilio Rao * pending queue of waiters, but still no owner.
880047dd67eSAttilio Rao * This happens when the lock is contested and an
881047dd67eSAttilio Rao * owner is going to claim the lock.
882047dd67eSAttilio Rao * If curthread is the one successfully acquiring it
883047dd67eSAttilio Rao * claim lock ownership and return, preserving waiters
884047dd67eSAttilio Rao * flags.
885047dd67eSAttilio Rao */
886651175c9SAttilio Rao v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
887651175c9SAttilio Rao if ((x & ~v) == LK_UNLOCKED) {
888651175c9SAttilio Rao v &= ~LK_EXCLUSIVE_SPINNERS;
8890ad122a9SMateusz Guzik if (atomic_fcmpset_acq_ptr(&lk->lk_lock, &x,
890047dd67eSAttilio Rao tid | v)) {
891047dd67eSAttilio Rao sleepq_release(&lk->lock_object);
892047dd67eSAttilio Rao LOCK_LOG2(lk,
893047dd67eSAttilio Rao "%s: %p claimed by a new writer",
894047dd67eSAttilio Rao __func__, lk);
895047dd67eSAttilio Rao break;
896047dd67eSAttilio Rao }
8970ad122a9SMateusz Guzik goto retry_sleepq;
898047dd67eSAttilio Rao }
899047dd67eSAttilio Rao
900047dd67eSAttilio Rao /*
901047dd67eSAttilio Rao * Try to set the LK_EXCLUSIVE_WAITERS flag. If we
902047dd67eSAttilio Rao * fail, loop back and retry.
903047dd67eSAttilio Rao */
904047dd67eSAttilio Rao if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
9050ad122a9SMateusz Guzik if (!atomic_fcmpset_ptr(&lk->lk_lock, &x,
906047dd67eSAttilio Rao x | LK_EXCLUSIVE_WAITERS)) {
9070ad122a9SMateusz Guzik goto retry_sleepq;
908047dd67eSAttilio Rao }
909047dd67eSAttilio Rao LOCK_LOG2(lk, "%s: %p set excl waiters flag",
910047dd67eSAttilio Rao __func__, lk);
911047dd67eSAttilio Rao }
912047dd67eSAttilio Rao
9131c6987ebSMateusz Guzik if (lwa == NULL) {
9141c6987ebSMateusz Guzik iwmesg = lk->lock_object.lo_name;
9151c6987ebSMateusz Guzik ipri = lk->lk_pri;
9161c6987ebSMateusz Guzik itimo = lk->lk_timo;
9171c6987ebSMateusz Guzik } else {
9181c6987ebSMateusz Guzik iwmesg = lwa->iwmesg;
9191c6987ebSMateusz Guzik ipri = lwa->ipri;
9201c6987ebSMateusz Guzik itimo = lwa->itimo;
9211c6987ebSMateusz Guzik }
9221c6987ebSMateusz Guzik
923047dd67eSAttilio Rao /*
924047dd67eSAttilio Rao * As far as we have been unable to acquire the
925047dd67eSAttilio Rao * exclusive lock and the exclusive waiters flag
926047dd67eSAttilio Rao * is set, we will sleep.
927047dd67eSAttilio Rao */
9285b699f16SMark Johnston #ifdef KDTRACE_HOOKS
9295b699f16SMark Johnston sleep_time -= lockstat_nsecs(&lk->lock_object);
9305b699f16SMark Johnston #endif
931047dd67eSAttilio Rao error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
932047dd67eSAttilio Rao SQ_EXCLUSIVE_QUEUE);
9335b699f16SMark Johnston #ifdef KDTRACE_HOOKS
9345b699f16SMark Johnston sleep_time += lockstat_nsecs(&lk->lock_object);
9355b699f16SMark Johnston #endif
936047dd67eSAttilio Rao flags &= ~LK_INTERLOCK;
937047dd67eSAttilio Rao if (error) {
938047dd67eSAttilio Rao LOCK_LOG3(lk,
939047dd67eSAttilio Rao "%s: interrupted sleep for %p with %d",
940047dd67eSAttilio Rao __func__, lk, error);
941047dd67eSAttilio Rao break;
942047dd67eSAttilio Rao }
943047dd67eSAttilio Rao LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
944047dd67eSAttilio Rao __func__, lk);
94531ad4050SMateusz Guzik x = lockmgr_read_value(lk);
946047dd67eSAttilio Rao }
947047dd67eSAttilio Rao if (error == 0) {
9485b699f16SMark Johnston #ifdef KDTRACE_HOOKS
9495b699f16SMark Johnston if (sleep_time != 0)
9505b699f16SMark Johnston LOCKSTAT_RECORD4(lockmgr__block, lk, sleep_time,
9515b699f16SMark Johnston LOCKSTAT_WRITER, (x & LK_SHARE) == 0,
9525b699f16SMark Johnston (x & LK_SHARE) == 0 ? 0 : LK_SHARERS(x));
9535b699f16SMark Johnston #endif
954c4a48867SMateusz Guzik #ifdef LOCK_PROFILING
955c4a48867SMateusz Guzik lockmgr_note_exclusive_acquire(lk, contested, waittime,
956c4a48867SMateusz Guzik file, line, flags);
957c4a48867SMateusz Guzik #else
958c4a48867SMateusz Guzik lockmgr_note_exclusive_acquire(lk, 0, 0, file, line,
959c4a48867SMateusz Guzik flags);
960c4a48867SMateusz Guzik #endif
961047dd67eSAttilio Rao }
9621c6987ebSMateusz Guzik
9631c6987ebSMateusz Guzik out:
96401518f5eSMark Johnston lockmgr_exit(flags, ilk);
9651c6987ebSMateusz Guzik return (error);
9661c6987ebSMateusz Guzik }
9671c6987ebSMateusz Guzik
9681c6987ebSMateusz Guzik static __noinline int
lockmgr_upgrade(struct lock * lk,u_int flags,struct lock_object * ilk,const char * file,int line,struct lockmgr_wait * lwa)9691c6987ebSMateusz Guzik lockmgr_upgrade(struct lock *lk, u_int flags, struct lock_object *ilk,
9701c6987ebSMateusz Guzik const char *file, int line, struct lockmgr_wait *lwa)
9711c6987ebSMateusz Guzik {
972f6b091fbSMateusz Guzik uintptr_t tid, v, setv;
9731c6987ebSMateusz Guzik int error = 0;
9741c6987ebSMateusz Guzik int op;
9751c6987ebSMateusz Guzik
9769a7f7c26SMitchell Horne if (SCHEDULER_STOPPED())
9771c6987ebSMateusz Guzik goto out;
9781c6987ebSMateusz Guzik
9791c6987ebSMateusz Guzik tid = (uintptr_t)curthread;
9801c6987ebSMateusz Guzik
9811c6987ebSMateusz Guzik _lockmgr_assert(lk, KA_SLOCKED, file, line);
982f6b091fbSMateusz Guzik
983f6b091fbSMateusz Guzik op = flags & LK_TYPE_MASK;
984bdb6d824SMateusz Guzik v = lockmgr_read_value(lk);
985f6b091fbSMateusz Guzik for (;;) {
98638baca17SMateusz Guzik if (LK_SHARERS(v) > 1) {
987f6b091fbSMateusz Guzik if (op == LK_TRYUPGRADE) {
988f6b091fbSMateusz Guzik LOCK_LOG2(lk, "%s: %p failed the nowait upgrade",
989f6b091fbSMateusz Guzik __func__, lk);
990f6b091fbSMateusz Guzik error = EBUSY;
991f6b091fbSMateusz Guzik goto out;
992f6b091fbSMateusz Guzik }
99338baca17SMateusz Guzik if (atomic_fcmpset_rel_ptr(&lk->lk_lock, &v,
99438baca17SMateusz Guzik v - LK_ONE_SHARER)) {
995f6b091fbSMateusz Guzik lockmgr_note_shared_release(lk, file, line);
996f6b091fbSMateusz Guzik goto out_xlock;
997f6b091fbSMateusz Guzik }
99838baca17SMateusz Guzik continue;
999f6b091fbSMateusz Guzik }
1000f6b091fbSMateusz Guzik MPASS((v & ~LK_ALL_WAITERS) == LK_SHARERS_LOCK(1));
1001f6b091fbSMateusz Guzik
1002f6b091fbSMateusz Guzik setv = tid;
1003f6b091fbSMateusz Guzik setv |= (v & LK_ALL_WAITERS);
10041c6987ebSMateusz Guzik
10051c6987ebSMateusz Guzik /*
10061c6987ebSMateusz Guzik * Try to switch from one shared lock to an exclusive one.
10071c6987ebSMateusz Guzik * We need to preserve waiters flags during the operation.
10081c6987ebSMateusz Guzik */
1009f6b091fbSMateusz Guzik if (atomic_fcmpset_ptr(&lk->lk_lock, &v, setv)) {
10101c6987ebSMateusz Guzik LOCK_LOG_LOCK("XUPGRADE", &lk->lock_object, 0, 0, file,
10111c6987ebSMateusz Guzik line);
10121c6987ebSMateusz Guzik WITNESS_UPGRADE(&lk->lock_object, LOP_EXCLUSIVE |
10131c6987ebSMateusz Guzik LK_TRYWIT(flags), file, line);
10145b699f16SMark Johnston LOCKSTAT_RECORD0(lockmgr__upgrade, lk);
10151c6987ebSMateusz Guzik TD_SLOCKS_DEC(curthread);
10161c6987ebSMateusz Guzik goto out;
10171c6987ebSMateusz Guzik }
10181c6987ebSMateusz Guzik }
10191c6987ebSMateusz Guzik
1020f6b091fbSMateusz Guzik out_xlock:
10211c6987ebSMateusz Guzik error = lockmgr_xlock_hard(lk, flags, ilk, file, line, lwa);
10221c6987ebSMateusz Guzik flags &= ~LK_INTERLOCK;
10231c6987ebSMateusz Guzik out:
102401518f5eSMark Johnston lockmgr_exit(flags, ilk);
10251c6987ebSMateusz Guzik return (error);
10261c6987ebSMateusz Guzik }
10271c6987ebSMateusz Guzik
10281c6987ebSMateusz Guzik int
lockmgr_lock_flags(struct lock * lk,u_int flags,struct lock_object * ilk,const char * file,int line)1029c1b57fa7SMateusz Guzik lockmgr_lock_flags(struct lock *lk, u_int flags, struct lock_object *ilk,
10301c6987ebSMateusz Guzik const char *file, int line)
10311c6987ebSMateusz Guzik {
10321c6987ebSMateusz Guzik struct lock_class *class;
10331c6987ebSMateusz Guzik uintptr_t x, tid;
10341c6987ebSMateusz Guzik u_int op;
10351c6987ebSMateusz Guzik bool locked;
10361c6987ebSMateusz Guzik
10379a7f7c26SMitchell Horne if (SCHEDULER_STOPPED())
1038b543c98cSConrad Meyer return (0);
1039b543c98cSConrad Meyer
10401c6987ebSMateusz Guzik op = flags & LK_TYPE_MASK;
10411c6987ebSMateusz Guzik locked = false;
10421c6987ebSMateusz Guzik switch (op) {
10431c6987ebSMateusz Guzik case LK_SHARED:
10441c6987ebSMateusz Guzik if (LK_CAN_WITNESS(flags))
10451c6987ebSMateusz Guzik WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER,
10461c6987ebSMateusz Guzik file, line, flags & LK_INTERLOCK ? ilk : NULL);
10471c6987ebSMateusz Guzik if (__predict_false(lk->lock_object.lo_flags & LK_NOSHARE))
10481c6987ebSMateusz Guzik break;
104931ad4050SMateusz Guzik x = lockmgr_read_value(lk);
105095ab076dSMateusz Guzik if (lockmgr_slock_try(lk, &x, flags, true)) {
10511c6987ebSMateusz Guzik lockmgr_note_shared_acquire(lk, 0, 0,
10521c6987ebSMateusz Guzik file, line, flags);
10531c6987ebSMateusz Guzik locked = true;
10541c6987ebSMateusz Guzik } else {
10551c6987ebSMateusz Guzik return (lockmgr_slock_hard(lk, flags, ilk, file, line,
10561c6987ebSMateusz Guzik NULL));
10571c6987ebSMateusz Guzik }
10581c6987ebSMateusz Guzik break;
10591c6987ebSMateusz Guzik case LK_EXCLUSIVE:
10601c6987ebSMateusz Guzik if (LK_CAN_WITNESS(flags))
10611c6987ebSMateusz Guzik WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
10621c6987ebSMateusz Guzik LOP_EXCLUSIVE, file, line, flags & LK_INTERLOCK ?
10631c6987ebSMateusz Guzik ilk : NULL);
10641c6987ebSMateusz Guzik tid = (uintptr_t)curthread;
1065bdb6d824SMateusz Guzik if (lockmgr_read_value(lk) == LK_UNLOCKED &&
10661c6987ebSMateusz Guzik atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid)) {
10671c6987ebSMateusz Guzik lockmgr_note_exclusive_acquire(lk, 0, 0, file, line,
10681c6987ebSMateusz Guzik flags);
10691c6987ebSMateusz Guzik locked = true;
10701c6987ebSMateusz Guzik } else {
10711c6987ebSMateusz Guzik return (lockmgr_xlock_hard(lk, flags, ilk, file, line,
10721c6987ebSMateusz Guzik NULL));
10731c6987ebSMateusz Guzik }
10741c6987ebSMateusz Guzik break;
10751c6987ebSMateusz Guzik case LK_UPGRADE:
10761c6987ebSMateusz Guzik case LK_TRYUPGRADE:
10771c6987ebSMateusz Guzik return (lockmgr_upgrade(lk, flags, ilk, file, line, NULL));
10781c6987ebSMateusz Guzik default:
10791c6987ebSMateusz Guzik break;
10801c6987ebSMateusz Guzik }
10811c6987ebSMateusz Guzik if (__predict_true(locked)) {
10821c6987ebSMateusz Guzik if (__predict_false(flags & LK_INTERLOCK)) {
10831c6987ebSMateusz Guzik class = LOCK_CLASS(ilk);
10841c6987ebSMateusz Guzik class->lc_unlock(ilk);
10851c6987ebSMateusz Guzik }
10861c6987ebSMateusz Guzik return (0);
10871c6987ebSMateusz Guzik } else {
10881c6987ebSMateusz Guzik return (__lockmgr_args(lk, flags, ilk, LK_WMESG_DEFAULT,
10891c6987ebSMateusz Guzik LK_PRIO_DEFAULT, LK_TIMO_DEFAULT, file, line));
10901c6987ebSMateusz Guzik }
10911c6987ebSMateusz Guzik }
10921c6987ebSMateusz Guzik
10931c6987ebSMateusz Guzik static __noinline int
lockmgr_sunlock_hard(struct lock * lk,uintptr_t x,u_int flags,struct lock_object * ilk,const char * file,int line)10941c6987ebSMateusz Guzik lockmgr_sunlock_hard(struct lock *lk, uintptr_t x, u_int flags, struct lock_object *ilk,
10951c6987ebSMateusz Guzik const char *file, int line)
10961c6987ebSMateusz Guzik {
109701518f5eSMark Johnston if (!SCHEDULER_STOPPED())
109801518f5eSMark Johnston wakeupshlk(lk, file, line);
109901518f5eSMark Johnston lockmgr_exit(flags, ilk);
11001c6987ebSMateusz Guzik return (0);
11011c6987ebSMateusz Guzik }
11021c6987ebSMateusz Guzik
11031c6987ebSMateusz Guzik static __noinline int
lockmgr_xunlock_hard(struct lock * lk,uintptr_t x,u_int flags,struct lock_object * ilk,const char * file,int line)11041c6987ebSMateusz Guzik lockmgr_xunlock_hard(struct lock *lk, uintptr_t x, u_int flags, struct lock_object *ilk,
11051c6987ebSMateusz Guzik const char *file, int line)
11061c6987ebSMateusz Guzik {
11071c6987ebSMateusz Guzik uintptr_t tid, v;
11081c6987ebSMateusz Guzik u_int realexslp;
11091c6987ebSMateusz Guzik int queue;
11101c6987ebSMateusz Guzik
11119a7f7c26SMitchell Horne if (SCHEDULER_STOPPED())
11121c6987ebSMateusz Guzik goto out;
11131c6987ebSMateusz Guzik
11141c6987ebSMateusz Guzik tid = (uintptr_t)curthread;
11151c6987ebSMateusz Guzik
11161c6987ebSMateusz Guzik /*
11171c6987ebSMateusz Guzik * As first option, treact the lock as if it has not
11181c6987ebSMateusz Guzik * any waiter.
11191c6987ebSMateusz Guzik * Fix-up the tid var if the lock has been disowned.
11201c6987ebSMateusz Guzik */
1121b92cd6b2SRyan Libby if (lockmgr_disowned_v(x))
11221c6987ebSMateusz Guzik tid = LK_KERNPROC;
11231c6987ebSMateusz Guzik
11241c6987ebSMateusz Guzik /*
11251c6987ebSMateusz Guzik * The lock is held in exclusive mode.
11261c6987ebSMateusz Guzik * If the lock is recursed also, then unrecurse it.
11271c6987ebSMateusz Guzik */
11284aff9f5dSMateusz Guzik if (lockmgr_recursed_v(x)) {
11291c6987ebSMateusz Guzik LOCK_LOG2(lk, "%s: %p unrecursing", __func__, lk);
11301c6987ebSMateusz Guzik lk->lk_recurse--;
11314aff9f5dSMateusz Guzik if (lk->lk_recurse == 0)
11324aff9f5dSMateusz Guzik atomic_clear_ptr(&lk->lk_lock, LK_WRITER_RECURSED);
11331c6987ebSMateusz Guzik goto out;
11341c6987ebSMateusz Guzik }
11351c6987ebSMateusz Guzik if (tid != LK_KERNPROC)
11365b699f16SMark Johnston LOCKSTAT_PROFILE_RELEASE_RWLOCK(lockmgr__release, lk,
11375b699f16SMark Johnston LOCKSTAT_WRITER);
11381c6987ebSMateusz Guzik
113910391db5SMateusz Guzik if (x == tid && atomic_cmpset_rel_ptr(&lk->lk_lock, tid, LK_UNLOCKED))
11401c6987ebSMateusz Guzik goto out;
11411c6987ebSMateusz Guzik
11421c6987ebSMateusz Guzik sleepq_lock(&lk->lock_object);
1143bdb6d824SMateusz Guzik x = lockmgr_read_value(lk);
11441c6987ebSMateusz Guzik v = LK_UNLOCKED;
11451c6987ebSMateusz Guzik
11461c6987ebSMateusz Guzik /*
11471c6987ebSMateusz Guzik * If the lock has exclusive waiters, give them
11481c6987ebSMateusz Guzik * preference in order to avoid deadlock with
11491c6987ebSMateusz Guzik * shared runners up.
11501c6987ebSMateusz Guzik * If interruptible sleeps left the exclusive queue
11511c6987ebSMateusz Guzik * empty avoid a starvation for the threads sleeping
11521c6987ebSMateusz Guzik * on the shared queue by giving them precedence
11531c6987ebSMateusz Guzik * and cleaning up the exclusive waiters bit anyway.
11541c6987ebSMateusz Guzik * Please note that lk_exslpfail count may be lying
11551c6987ebSMateusz Guzik * about the real number of waiters with the
11561c6987ebSMateusz Guzik * LK_SLEEPFAIL flag on because they may be used in
11571c6987ebSMateusz Guzik * conjunction with interruptible sleeps so
11581c6987ebSMateusz Guzik * lk_exslpfail might be considered an 'upper limit'
11591c6987ebSMateusz Guzik * bound, including the edge cases.
11601c6987ebSMateusz Guzik */
11611c6987ebSMateusz Guzik MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
11621c6987ebSMateusz Guzik realexslp = sleepq_sleepcnt(&lk->lock_object, SQ_EXCLUSIVE_QUEUE);
11631c6987ebSMateusz Guzik if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) {
1164eac22dd4SMateusz Guzik if (lk->lk_exslpfail != USHRT_MAX && lk->lk_exslpfail < realexslp) {
11651c6987ebSMateusz Guzik lk->lk_exslpfail = 0;
11661c6987ebSMateusz Guzik queue = SQ_EXCLUSIVE_QUEUE;
11671c6987ebSMateusz Guzik v |= (x & LK_SHARED_WAITERS);
11681c6987ebSMateusz Guzik } else {
11691c6987ebSMateusz Guzik lk->lk_exslpfail = 0;
11701c6987ebSMateusz Guzik LOCK_LOG2(lk,
11711c6987ebSMateusz Guzik "%s: %p has only LK_SLEEPFAIL sleepers",
11721c6987ebSMateusz Guzik __func__, lk);
11731c6987ebSMateusz Guzik LOCK_LOG2(lk,
11741c6987ebSMateusz Guzik "%s: %p waking up threads on the exclusive queue",
11751c6987ebSMateusz Guzik __func__, lk);
117601518f5eSMark Johnston sleepq_broadcast(&lk->lock_object, SLEEPQ_LK, 0,
117701518f5eSMark Johnston SQ_EXCLUSIVE_QUEUE);
11781c6987ebSMateusz Guzik queue = SQ_SHARED_QUEUE;
11791c6987ebSMateusz Guzik }
11801c6987ebSMateusz Guzik } else {
11811c6987ebSMateusz Guzik /*
11821c6987ebSMateusz Guzik * Exclusive waiters sleeping with LK_SLEEPFAIL
11831c6987ebSMateusz Guzik * on and using interruptible sleeps/timeout
11841c6987ebSMateusz Guzik * may have left spourious lk_exslpfail counts
11851c6987ebSMateusz Guzik * on, so clean it up anyway.
11861c6987ebSMateusz Guzik */
11871c6987ebSMateusz Guzik lk->lk_exslpfail = 0;
11881c6987ebSMateusz Guzik queue = SQ_SHARED_QUEUE;
11891c6987ebSMateusz Guzik }
11901c6987ebSMateusz Guzik
11911c6987ebSMateusz Guzik LOCK_LOG3(lk, "%s: %p waking up threads on the %s queue",
11921c6987ebSMateusz Guzik __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
11931c6987ebSMateusz Guzik "exclusive");
11941c6987ebSMateusz Guzik atomic_store_rel_ptr(&lk->lk_lock, v);
119501518f5eSMark Johnston sleepq_broadcast(&lk->lock_object, SLEEPQ_LK, 0, queue);
11961c6987ebSMateusz Guzik sleepq_release(&lk->lock_object);
11971c6987ebSMateusz Guzik
11981c6987ebSMateusz Guzik out:
119901518f5eSMark Johnston lockmgr_exit(flags, ilk);
12001c6987ebSMateusz Guzik return (0);
12011c6987ebSMateusz Guzik }
12021c6987ebSMateusz Guzik
1203c8b29d12SMateusz Guzik /*
1204c8b29d12SMateusz Guzik * Lightweight entry points for common operations.
1205c8b29d12SMateusz Guzik *
1206c8b29d12SMateusz Guzik * Functionality is similar to sx locks, in that none of the additional lockmgr
1207c8b29d12SMateusz Guzik * features are supported. To be clear, these are NOT supported:
1208c8b29d12SMateusz Guzik * 1. shared locking disablement
1209c8b29d12SMateusz Guzik * 2. returning with an error after sleep
1210c8b29d12SMateusz Guzik * 3. unlocking the interlock
1211c8b29d12SMateusz Guzik *
1212c1b57fa7SMateusz Guzik * If in doubt, use lockmgr_lock_flags.
1213c8b29d12SMateusz Guzik */
1214c8b29d12SMateusz Guzik int
lockmgr_slock(struct lock * lk,u_int flags,const char * file,int line)1215c8b29d12SMateusz Guzik lockmgr_slock(struct lock *lk, u_int flags, const char *file, int line)
1216c8b29d12SMateusz Guzik {
1217c8b29d12SMateusz Guzik uintptr_t x;
1218c8b29d12SMateusz Guzik
1219c8b29d12SMateusz Guzik MPASS((flags & LK_TYPE_MASK) == LK_SHARED);
1220c8b29d12SMateusz Guzik MPASS((flags & LK_INTERLOCK) == 0);
1221c8b29d12SMateusz Guzik MPASS((lk->lock_object.lo_flags & LK_NOSHARE) == 0);
1222c8b29d12SMateusz Guzik
1223c8b29d12SMateusz Guzik if (LK_CAN_WITNESS(flags))
1224c8b29d12SMateusz Guzik WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER,
1225c8b29d12SMateusz Guzik file, line, NULL);
122631ad4050SMateusz Guzik x = lockmgr_read_value(lk);
1227c8b29d12SMateusz Guzik if (__predict_true(lockmgr_slock_try(lk, &x, flags, true))) {
1228c8b29d12SMateusz Guzik lockmgr_note_shared_acquire(lk, 0, 0, file, line, flags);
1229c8b29d12SMateusz Guzik return (0);
1230c8b29d12SMateusz Guzik }
1231c8b29d12SMateusz Guzik
123231ad4050SMateusz Guzik return (lockmgr_slock_hard(lk, flags | LK_ADAPTIVE, NULL, file, line, NULL));
1233c8b29d12SMateusz Guzik }
1234c8b29d12SMateusz Guzik
1235c8b29d12SMateusz Guzik int
lockmgr_xlock(struct lock * lk,u_int flags,const char * file,int line)1236c8b29d12SMateusz Guzik lockmgr_xlock(struct lock *lk, u_int flags, const char *file, int line)
1237c8b29d12SMateusz Guzik {
1238c8b29d12SMateusz Guzik uintptr_t tid;
1239c8b29d12SMateusz Guzik
1240c8b29d12SMateusz Guzik MPASS((flags & LK_TYPE_MASK) == LK_EXCLUSIVE);
1241c8b29d12SMateusz Guzik MPASS((flags & LK_INTERLOCK) == 0);
1242c8b29d12SMateusz Guzik
1243c8b29d12SMateusz Guzik if (LK_CAN_WITNESS(flags))
1244c8b29d12SMateusz Guzik WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
1245c8b29d12SMateusz Guzik LOP_EXCLUSIVE, file, line, NULL);
1246c8b29d12SMateusz Guzik tid = (uintptr_t)curthread;
1247c8b29d12SMateusz Guzik if (atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid)) {
1248c8b29d12SMateusz Guzik lockmgr_note_exclusive_acquire(lk, 0, 0, file, line,
1249c8b29d12SMateusz Guzik flags);
1250c8b29d12SMateusz Guzik return (0);
1251c8b29d12SMateusz Guzik }
1252c8b29d12SMateusz Guzik
125331ad4050SMateusz Guzik return (lockmgr_xlock_hard(lk, flags | LK_ADAPTIVE, NULL, file, line, NULL));
1254c8b29d12SMateusz Guzik }
1255c8b29d12SMateusz Guzik
1256c8b29d12SMateusz Guzik int
lockmgr_unlock(struct lock * lk)1257c8b29d12SMateusz Guzik lockmgr_unlock(struct lock *lk)
1258c8b29d12SMateusz Guzik {
1259c8b29d12SMateusz Guzik uintptr_t x, tid;
1260c8b29d12SMateusz Guzik const char *file;
1261c8b29d12SMateusz Guzik int line;
1262c8b29d12SMateusz Guzik
1263c8b29d12SMateusz Guzik file = __FILE__;
1264c8b29d12SMateusz Guzik line = __LINE__;
1265c8b29d12SMateusz Guzik
1266c8b29d12SMateusz Guzik _lockmgr_assert(lk, KA_LOCKED, file, line);
1267bdb6d824SMateusz Guzik x = lockmgr_read_value(lk);
1268c8b29d12SMateusz Guzik if (__predict_true(x & LK_SHARE) != 0) {
1269c8b29d12SMateusz Guzik lockmgr_note_shared_release(lk, file, line);
1270c00115f1SMateusz Guzik if (lockmgr_sunlock_try(lk, &x)) {
1271c00115f1SMateusz Guzik LOCKSTAT_PROFILE_RELEASE_RWLOCK(lockmgr__release, lk, LOCKSTAT_READER);
1272c8b29d12SMateusz Guzik } else {
1273c8b29d12SMateusz Guzik return (lockmgr_sunlock_hard(lk, x, LK_RELEASE, NULL, file, line));
1274c8b29d12SMateusz Guzik }
1275c8b29d12SMateusz Guzik } else {
1276c8b29d12SMateusz Guzik tid = (uintptr_t)curthread;
1277c00115f1SMateusz Guzik lockmgr_note_exclusive_release(lk, file, line);
12784aff9f5dSMateusz Guzik if (x == tid && atomic_cmpset_rel_ptr(&lk->lk_lock, tid, LK_UNLOCKED)) {
1279c00115f1SMateusz Guzik LOCKSTAT_PROFILE_RELEASE_RWLOCK(lockmgr__release, lk,LOCKSTAT_WRITER);
1280c8b29d12SMateusz Guzik } else {
1281c8b29d12SMateusz Guzik return (lockmgr_xunlock_hard(lk, x, LK_RELEASE, NULL, file, line));
1282c8b29d12SMateusz Guzik }
1283c8b29d12SMateusz Guzik }
1284c8b29d12SMateusz Guzik return (0);
1285c8b29d12SMateusz Guzik }
1286c8b29d12SMateusz Guzik
12871c6987ebSMateusz Guzik int
__lockmgr_args(struct lock * lk,u_int flags,struct lock_object * ilk,const char * wmesg,int pri,int timo,const char * file,int line)12881c6987ebSMateusz Guzik __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
12891c6987ebSMateusz Guzik const char *wmesg, int pri, int timo, const char *file, int line)
12901c6987ebSMateusz Guzik {
12911c6987ebSMateusz Guzik GIANT_DECLARE;
12921c6987ebSMateusz Guzik struct lockmgr_wait lwa;
12931c6987ebSMateusz Guzik struct lock_class *class;
12941c6987ebSMateusz Guzik const char *iwmesg;
12951c6987ebSMateusz Guzik uintptr_t tid, v, x;
12961c6987ebSMateusz Guzik u_int op, realexslp;
129701518f5eSMark Johnston int error, ipri, itimo, queue;
12981c6987ebSMateusz Guzik #ifdef LOCK_PROFILING
12991c6987ebSMateusz Guzik uint64_t waittime = 0;
13001c6987ebSMateusz Guzik int contested = 0;
13011c6987ebSMateusz Guzik #endif
13021c6987ebSMateusz Guzik
13039a7f7c26SMitchell Horne if (SCHEDULER_STOPPED())
1304b543c98cSConrad Meyer return (0);
1305b543c98cSConrad Meyer
13061c6987ebSMateusz Guzik error = 0;
13071c6987ebSMateusz Guzik tid = (uintptr_t)curthread;
13081c6987ebSMateusz Guzik op = (flags & LK_TYPE_MASK);
13091c6987ebSMateusz Guzik iwmesg = (wmesg == LK_WMESG_DEFAULT) ? lk->lock_object.lo_name : wmesg;
13101c6987ebSMateusz Guzik ipri = (pri == LK_PRIO_DEFAULT) ? lk->lk_pri : pri;
13111c6987ebSMateusz Guzik itimo = (timo == LK_TIMO_DEFAULT) ? lk->lk_timo : timo;
13121c6987ebSMateusz Guzik
13131c6987ebSMateusz Guzik lwa.iwmesg = iwmesg;
13141c6987ebSMateusz Guzik lwa.ipri = ipri;
13151c6987ebSMateusz Guzik lwa.itimo = itimo;
13161c6987ebSMateusz Guzik
13171c6987ebSMateusz Guzik MPASS((flags & ~LK_TOTAL_MASK) == 0);
13181c6987ebSMateusz Guzik KASSERT((op & (op - 1)) == 0,
13191c6987ebSMateusz Guzik ("%s: Invalid requested operation @ %s:%d", __func__, file, line));
13201c6987ebSMateusz Guzik KASSERT((flags & (LK_NOWAIT | LK_SLEEPFAIL)) == 0 ||
13211c6987ebSMateusz Guzik (op != LK_DOWNGRADE && op != LK_RELEASE),
13221c6987ebSMateusz Guzik ("%s: Invalid flags in regard of the operation desired @ %s:%d",
13231c6987ebSMateusz Guzik __func__, file, line));
13241c6987ebSMateusz Guzik KASSERT((flags & LK_INTERLOCK) == 0 || ilk != NULL,
13251c6987ebSMateusz Guzik ("%s: LK_INTERLOCK passed without valid interlock @ %s:%d",
13261c6987ebSMateusz Guzik __func__, file, line));
13271c6987ebSMateusz Guzik KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
1328*a52a51a2SJohn Baldwin ("%s: idle thread %p on lockmgr %p @ %s:%d", __func__, curthread,
1329*a52a51a2SJohn Baldwin lk, file, line));
13301c6987ebSMateusz Guzik
13311c6987ebSMateusz Guzik class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
13321c6987ebSMateusz Guzik
13331c6987ebSMateusz Guzik if (lk->lock_object.lo_flags & LK_NOSHARE) {
13341c6987ebSMateusz Guzik switch (op) {
13351c6987ebSMateusz Guzik case LK_SHARED:
13361c6987ebSMateusz Guzik op = LK_EXCLUSIVE;
13371c6987ebSMateusz Guzik break;
13381c6987ebSMateusz Guzik case LK_UPGRADE:
13391c6987ebSMateusz Guzik case LK_TRYUPGRADE:
13401c6987ebSMateusz Guzik case LK_DOWNGRADE:
13411c6987ebSMateusz Guzik _lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED,
13421c6987ebSMateusz Guzik file, line);
13431c6987ebSMateusz Guzik if (flags & LK_INTERLOCK)
13441c6987ebSMateusz Guzik class->lc_unlock(ilk);
13451c6987ebSMateusz Guzik return (0);
13461c6987ebSMateusz Guzik }
13471c6987ebSMateusz Guzik }
13481c6987ebSMateusz Guzik
13491c6987ebSMateusz Guzik switch (op) {
13501c6987ebSMateusz Guzik case LK_SHARED:
13511c6987ebSMateusz Guzik return (lockmgr_slock_hard(lk, flags, ilk, file, line, &lwa));
13521c6987ebSMateusz Guzik break;
13531c6987ebSMateusz Guzik case LK_UPGRADE:
13541c6987ebSMateusz Guzik case LK_TRYUPGRADE:
13551c6987ebSMateusz Guzik return (lockmgr_upgrade(lk, flags, ilk, file, line, &lwa));
13561c6987ebSMateusz Guzik break;
13571c6987ebSMateusz Guzik case LK_EXCLUSIVE:
13581c6987ebSMateusz Guzik return (lockmgr_xlock_hard(lk, flags, ilk, file, line, &lwa));
1359047dd67eSAttilio Rao break;
1360047dd67eSAttilio Rao case LK_DOWNGRADE:
13611c7d98d0SAttilio Rao _lockmgr_assert(lk, KA_XLOCKED, file, line);
1362e5f94314SAttilio Rao WITNESS_DOWNGRADE(&lk->lock_object, 0, file, line);
13631c7d98d0SAttilio Rao
13641c7d98d0SAttilio Rao /*
13651c7d98d0SAttilio Rao * Panic if the lock is recursed.
13661c7d98d0SAttilio Rao */
13671c7d98d0SAttilio Rao if (lockmgr_xlocked(lk) && lockmgr_recursed(lk)) {
13681c7d98d0SAttilio Rao if (flags & LK_INTERLOCK)
13691c7d98d0SAttilio Rao class->lc_unlock(ilk);
13701c7d98d0SAttilio Rao panic("%s: downgrade a recursed lockmgr %s @ %s:%d\n",
13711c7d98d0SAttilio Rao __func__, iwmesg, file, line);
13721c7d98d0SAttilio Rao }
1373e5f94314SAttilio Rao TD_SLOCKS_INC(curthread);
1374047dd67eSAttilio Rao
1375047dd67eSAttilio Rao /*
1376047dd67eSAttilio Rao * In order to preserve waiters flags, just spin.
1377047dd67eSAttilio Rao */
1378047dd67eSAttilio Rao for (;;) {
1379bdb6d824SMateusz Guzik x = lockmgr_read_value(lk);
1380651175c9SAttilio Rao MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
1381651175c9SAttilio Rao x &= LK_ALL_WAITERS;
1382047dd67eSAttilio Rao if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
1383e5f94314SAttilio Rao LK_SHARERS_LOCK(1) | x))
1384047dd67eSAttilio Rao break;
1385047dd67eSAttilio Rao cpu_spinwait();
1386047dd67eSAttilio Rao }
13875b699f16SMark Johnston LOCK_LOG_LOCK("XDOWNGRADE", &lk->lock_object, 0, 0, file, line);
13885b699f16SMark Johnston LOCKSTAT_RECORD0(lockmgr__downgrade, lk);
1389047dd67eSAttilio Rao break;
1390047dd67eSAttilio Rao case LK_RELEASE:
1391047dd67eSAttilio Rao _lockmgr_assert(lk, KA_LOCKED, file, line);
1392bdb6d824SMateusz Guzik x = lockmgr_read_value(lk);
1393047dd67eSAttilio Rao
13941c6987ebSMateusz Guzik if (__predict_true(x & LK_SHARE) != 0) {
1395c00115f1SMateusz Guzik lockmgr_note_shared_release(lk, file, line);
13961c6987ebSMateusz Guzik return (lockmgr_sunlock_hard(lk, x, flags, ilk, file, line));
1397047dd67eSAttilio Rao } else {
1398c00115f1SMateusz Guzik lockmgr_note_exclusive_release(lk, file, line);
13991c6987ebSMateusz Guzik return (lockmgr_xunlock_hard(lk, x, flags, ilk, file, line));
14002028867dSAttilio Rao }
1401047dd67eSAttilio Rao break;
1402047dd67eSAttilio Rao case LK_DRAIN:
1403e5f94314SAttilio Rao if (LK_CAN_WITNESS(flags))
1404e5f94314SAttilio Rao WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
140524150d37SJohn Baldwin LOP_EXCLUSIVE, file, line, flags & LK_INTERLOCK ?
140624150d37SJohn Baldwin ilk : NULL);
1407047dd67eSAttilio Rao
1408047dd67eSAttilio Rao /*
140996f1567fSKonstantin Belousov * Trying to drain a lock we already own will result in a
1410047dd67eSAttilio Rao * deadlock.
1411047dd67eSAttilio Rao */
1412047dd67eSAttilio Rao if (lockmgr_xlocked(lk)) {
1413047dd67eSAttilio Rao if (flags & LK_INTERLOCK)
1414047dd67eSAttilio Rao class->lc_unlock(ilk);
1415047dd67eSAttilio Rao panic("%s: draining %s with the lock held @ %s:%d\n",
1416047dd67eSAttilio Rao __func__, iwmesg, file, line);
1417047dd67eSAttilio Rao }
1418047dd67eSAttilio Rao
1419fc4f686dSMateusz Guzik for (;;) {
1420fc4f686dSMateusz Guzik if (lk->lk_lock == LK_UNLOCKED &&
1421fc4f686dSMateusz Guzik atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid))
1422fc4f686dSMateusz Guzik break;
1423fc4f686dSMateusz Guzik
1424f5f9340bSFabien Thomas #ifdef HWPMC_HOOKS
1425f5f9340bSFabien Thomas PMC_SOFT_CALL( , , lock, failed);
1426f5f9340bSFabien Thomas #endif
14276a467cc5SMateusz Guzik lock_profile_obtain_lock_failed(&lk->lock_object, false,
1428047dd67eSAttilio Rao &contested, &waittime);
1429047dd67eSAttilio Rao
1430047dd67eSAttilio Rao /*
1431047dd67eSAttilio Rao * If the lock is expected to not sleep just give up
1432047dd67eSAttilio Rao * and return.
1433047dd67eSAttilio Rao */
1434047dd67eSAttilio Rao if (LK_TRYOP(flags)) {
1435047dd67eSAttilio Rao LOCK_LOG2(lk, "%s: %p fails the try operation",
1436047dd67eSAttilio Rao __func__, lk);
1437047dd67eSAttilio Rao error = EBUSY;
1438047dd67eSAttilio Rao break;
1439047dd67eSAttilio Rao }
1440047dd67eSAttilio Rao
1441047dd67eSAttilio Rao /*
1442047dd67eSAttilio Rao * Acquire the sleepqueue chain lock because we
1443047dd67eSAttilio Rao * probabilly will need to manipulate waiters flags.
1444047dd67eSAttilio Rao */
1445047dd67eSAttilio Rao sleepq_lock(&lk->lock_object);
1446bdb6d824SMateusz Guzik x = lockmgr_read_value(lk);
1447047dd67eSAttilio Rao
1448047dd67eSAttilio Rao /*
1449047dd67eSAttilio Rao * if the lock has been released while we spun on
1450047dd67eSAttilio Rao * the sleepqueue chain lock just try again.
1451047dd67eSAttilio Rao */
1452047dd67eSAttilio Rao if (x == LK_UNLOCKED) {
1453047dd67eSAttilio Rao sleepq_release(&lk->lock_object);
1454047dd67eSAttilio Rao continue;
1455047dd67eSAttilio Rao }
1456047dd67eSAttilio Rao
1457651175c9SAttilio Rao v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
1458651175c9SAttilio Rao if ((x & ~v) == LK_UNLOCKED) {
1459651175c9SAttilio Rao v = (x & ~LK_EXCLUSIVE_SPINNERS);
14602028867dSAttilio Rao
14612028867dSAttilio Rao /*
14622028867dSAttilio Rao * If interruptible sleeps left the exclusive
14632028867dSAttilio Rao * queue empty avoid a starvation for the
14642028867dSAttilio Rao * threads sleeping on the shared queue by
14652028867dSAttilio Rao * giving them precedence and cleaning up the
14662028867dSAttilio Rao * exclusive waiters bit anyway.
1467c636ba83SAttilio Rao * Please note that lk_exslpfail count may be
1468c636ba83SAttilio Rao * lying about the real number of waiters with
1469c636ba83SAttilio Rao * the LK_SLEEPFAIL flag on because they may
1470e3043798SPedro F. Giffuni * be used in conjunction with interruptible
1471aab9c8c2SAttilio Rao * sleeps so lk_exslpfail might be considered
1472aab9c8c2SAttilio Rao * an 'upper limit' bound, including the edge
1473c636ba83SAttilio Rao * cases.
14742028867dSAttilio Rao */
1475047dd67eSAttilio Rao if (v & LK_EXCLUSIVE_WAITERS) {
1476047dd67eSAttilio Rao queue = SQ_EXCLUSIVE_QUEUE;
1477047dd67eSAttilio Rao v &= ~LK_EXCLUSIVE_WAITERS;
1478047dd67eSAttilio Rao } else {
14799dbf7a62SAttilio Rao /*
14809dbf7a62SAttilio Rao * Exclusive waiters sleeping with
14819dbf7a62SAttilio Rao * LK_SLEEPFAIL on and using
14829dbf7a62SAttilio Rao * interruptible sleeps/timeout may
14839dbf7a62SAttilio Rao * have left spourious lk_exslpfail
14849dbf7a62SAttilio Rao * counts on, so clean it up anyway.
14859dbf7a62SAttilio Rao */
1486047dd67eSAttilio Rao MPASS(v & LK_SHARED_WAITERS);
14879dbf7a62SAttilio Rao lk->lk_exslpfail = 0;
1488047dd67eSAttilio Rao queue = SQ_SHARED_QUEUE;
1489047dd67eSAttilio Rao v &= ~LK_SHARED_WAITERS;
1490047dd67eSAttilio Rao }
14912028867dSAttilio Rao if (queue == SQ_EXCLUSIVE_QUEUE) {
14922028867dSAttilio Rao realexslp =
14932028867dSAttilio Rao sleepq_sleepcnt(&lk->lock_object,
14942028867dSAttilio Rao SQ_EXCLUSIVE_QUEUE);
14952028867dSAttilio Rao if (lk->lk_exslpfail >= realexslp) {
14962028867dSAttilio Rao lk->lk_exslpfail = 0;
14972028867dSAttilio Rao queue = SQ_SHARED_QUEUE;
14982028867dSAttilio Rao v &= ~LK_SHARED_WAITERS;
14992028867dSAttilio Rao if (realexslp != 0) {
15002028867dSAttilio Rao LOCK_LOG2(lk,
15012028867dSAttilio Rao "%s: %p has only LK_SLEEPFAIL sleepers",
15022028867dSAttilio Rao __func__, lk);
15032028867dSAttilio Rao LOCK_LOG2(lk,
15042028867dSAttilio Rao "%s: %p waking up threads on the exclusive queue",
15052028867dSAttilio Rao __func__, lk);
15062028867dSAttilio Rao sleepq_broadcast(
15072028867dSAttilio Rao &lk->lock_object,
15082028867dSAttilio Rao SLEEPQ_LK, 0,
15092028867dSAttilio Rao SQ_EXCLUSIVE_QUEUE);
15102028867dSAttilio Rao }
15112028867dSAttilio Rao } else
15122028867dSAttilio Rao lk->lk_exslpfail = 0;
15132028867dSAttilio Rao }
1514047dd67eSAttilio Rao if (!atomic_cmpset_ptr(&lk->lk_lock, x, v)) {
1515047dd67eSAttilio Rao sleepq_release(&lk->lock_object);
1516047dd67eSAttilio Rao continue;
1517047dd67eSAttilio Rao }
1518047dd67eSAttilio Rao LOCK_LOG3(lk,
1519047dd67eSAttilio Rao "%s: %p waking up all threads on the %s queue",
1520047dd67eSAttilio Rao __func__, lk, queue == SQ_SHARED_QUEUE ?
1521047dd67eSAttilio Rao "shared" : "exclusive");
152201518f5eSMark Johnston sleepq_broadcast(&lk->lock_object, SLEEPQ_LK, 0,
152301518f5eSMark Johnston queue);
1524047dd67eSAttilio Rao
1525047dd67eSAttilio Rao /*
1526047dd67eSAttilio Rao * If shared waiters have been woken up we need
1527047dd67eSAttilio Rao * to wait for one of them to acquire the lock
1528047dd67eSAttilio Rao * before to set the exclusive waiters in
1529047dd67eSAttilio Rao * order to avoid a deadlock.
1530047dd67eSAttilio Rao */
1531047dd67eSAttilio Rao if (queue == SQ_SHARED_QUEUE) {
1532047dd67eSAttilio Rao for (v = lk->lk_lock;
1533047dd67eSAttilio Rao (v & LK_SHARE) && !LK_SHARERS(v);
1534047dd67eSAttilio Rao v = lk->lk_lock)
1535047dd67eSAttilio Rao cpu_spinwait();
1536047dd67eSAttilio Rao }
1537047dd67eSAttilio Rao }
1538047dd67eSAttilio Rao
1539047dd67eSAttilio Rao /*
1540047dd67eSAttilio Rao * Try to set the LK_EXCLUSIVE_WAITERS flag. If we
1541047dd67eSAttilio Rao * fail, loop back and retry.
1542047dd67eSAttilio Rao */
1543047dd67eSAttilio Rao if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
1544047dd67eSAttilio Rao if (!atomic_cmpset_ptr(&lk->lk_lock, x,
1545047dd67eSAttilio Rao x | LK_EXCLUSIVE_WAITERS)) {
1546047dd67eSAttilio Rao sleepq_release(&lk->lock_object);
1547047dd67eSAttilio Rao continue;
1548047dd67eSAttilio Rao }
1549047dd67eSAttilio Rao LOCK_LOG2(lk, "%s: %p set drain waiters flag",
1550047dd67eSAttilio Rao __func__, lk);
1551047dd67eSAttilio Rao }
1552047dd67eSAttilio Rao
1553047dd67eSAttilio Rao /*
1554047dd67eSAttilio Rao * As far as we have been unable to acquire the
1555047dd67eSAttilio Rao * exclusive lock and the exclusive waiters flag
1556047dd67eSAttilio Rao * is set, we will sleep.
1557047dd67eSAttilio Rao */
1558047dd67eSAttilio Rao if (flags & LK_INTERLOCK) {
1559047dd67eSAttilio Rao class->lc_unlock(ilk);
1560047dd67eSAttilio Rao flags &= ~LK_INTERLOCK;
1561047dd67eSAttilio Rao }
1562e5f94314SAttilio Rao GIANT_SAVE();
1563047dd67eSAttilio Rao sleepq_add(&lk->lock_object, NULL, iwmesg, SLEEPQ_LK,
1564047dd67eSAttilio Rao SQ_EXCLUSIVE_QUEUE);
1565047dd67eSAttilio Rao sleepq_wait(&lk->lock_object, ipri & PRIMASK);
1566e5f94314SAttilio Rao GIANT_RESTORE();
1567047dd67eSAttilio Rao LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
1568047dd67eSAttilio Rao __func__, lk);
1569047dd67eSAttilio Rao }
1570047dd67eSAttilio Rao
1571047dd67eSAttilio Rao if (error == 0) {
1572047dd67eSAttilio Rao lock_profile_obtain_lock_success(&lk->lock_object,
15736a467cc5SMateusz Guzik false, contested, waittime, file, line);
1574047dd67eSAttilio Rao LOCK_LOG_LOCK("DRAIN", &lk->lock_object, 0,
1575047dd67eSAttilio Rao lk->lk_recurse, file, line);
1576e5f94314SAttilio Rao WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
1577e5f94314SAttilio Rao LK_TRYWIT(flags), file, line);
1578047dd67eSAttilio Rao TD_LOCKS_INC(curthread);
1579047dd67eSAttilio Rao STACK_SAVE(lk);
1580047dd67eSAttilio Rao }
1581047dd67eSAttilio Rao break;
1582047dd67eSAttilio Rao default:
1583047dd67eSAttilio Rao if (flags & LK_INTERLOCK)
1584047dd67eSAttilio Rao class->lc_unlock(ilk);
1585047dd67eSAttilio Rao panic("%s: unknown lockmgr request 0x%x\n", __func__, op);
1586047dd67eSAttilio Rao }
1587047dd67eSAttilio Rao
1588047dd67eSAttilio Rao if (flags & LK_INTERLOCK)
1589047dd67eSAttilio Rao class->lc_unlock(ilk);
1590047dd67eSAttilio Rao
1591047dd67eSAttilio Rao return (error);
1592047dd67eSAttilio Rao }
1593047dd67eSAttilio Rao
1594d7a7e179SAttilio Rao void
_lockmgr_disown(struct lock * lk,const char * file,int line)1595047dd67eSAttilio Rao _lockmgr_disown(struct lock *lk, const char *file, int line)
1596047dd67eSAttilio Rao {
1597047dd67eSAttilio Rao uintptr_t tid, x;
1598047dd67eSAttilio Rao
159935370593SAndriy Gapon if (SCHEDULER_STOPPED())
160035370593SAndriy Gapon return;
160135370593SAndriy Gapon
1602047dd67eSAttilio Rao tid = (uintptr_t)curthread;
16031c7d98d0SAttilio Rao _lockmgr_assert(lk, KA_XLOCKED, file, line);
16041c7d98d0SAttilio Rao
16051c7d98d0SAttilio Rao /*
16061c7d98d0SAttilio Rao * Panic if the lock is recursed.
16071c7d98d0SAttilio Rao */
16081c7d98d0SAttilio Rao if (lockmgr_xlocked(lk) && lockmgr_recursed(lk))
16091c7d98d0SAttilio Rao panic("%s: disown a recursed lockmgr @ %s:%d\n",
16101c7d98d0SAttilio Rao __func__, file, line);
1611047dd67eSAttilio Rao
1612047dd67eSAttilio Rao /*
161396f1567fSKonstantin Belousov * If the owner is already LK_KERNPROC just skip the whole operation.
1614047dd67eSAttilio Rao */
1615047dd67eSAttilio Rao if (LK_HOLDER(lk->lk_lock) != tid)
1616047dd67eSAttilio Rao return;
16176a467cc5SMateusz Guzik lock_profile_release_lock(&lk->lock_object, false);
16185b699f16SMark Johnston LOCKSTAT_RECORD1(lockmgr__disown, lk, LOCKSTAT_WRITER);
1619e5f94314SAttilio Rao LOCK_LOG_LOCK("XDISOWN", &lk->lock_object, 0, 0, file, line);
1620e5f94314SAttilio Rao WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, file, line);
1621e5f94314SAttilio Rao TD_LOCKS_DEC(curthread);
1622337c5ff4SAttilio Rao STACK_SAVE(lk);
1623047dd67eSAttilio Rao
1624047dd67eSAttilio Rao /*
1625047dd67eSAttilio Rao * In order to preserve waiters flags, just spin.
1626047dd67eSAttilio Rao */
1627047dd67eSAttilio Rao for (;;) {
1628bdb6d824SMateusz Guzik x = lockmgr_read_value(lk);
1629651175c9SAttilio Rao MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
1630651175c9SAttilio Rao x &= LK_ALL_WAITERS;
163122dd228dSAttilio Rao if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
1632e5f94314SAttilio Rao LK_KERNPROC | x))
1633047dd67eSAttilio Rao return;
1634047dd67eSAttilio Rao cpu_spinwait();
1635047dd67eSAttilio Rao }
1636047dd67eSAttilio Rao }
1637047dd67eSAttilio Rao
1638047dd67eSAttilio Rao void
lockmgr_printinfo(const struct lock * lk)1639d576deedSPawel Jakub Dawidek lockmgr_printinfo(const struct lock *lk)
1640d7a7e179SAttilio Rao {
1641d7a7e179SAttilio Rao struct thread *td;
1642047dd67eSAttilio Rao uintptr_t x;
1643d7a7e179SAttilio Rao
1644047dd67eSAttilio Rao if (lk->lk_lock == LK_UNLOCKED)
1645047dd67eSAttilio Rao printf("lock type %s: UNLOCKED\n", lk->lock_object.lo_name);
1646047dd67eSAttilio Rao else if (lk->lk_lock & LK_SHARE)
1647047dd67eSAttilio Rao printf("lock type %s: SHARED (count %ju)\n",
1648047dd67eSAttilio Rao lk->lock_object.lo_name,
1649047dd67eSAttilio Rao (uintmax_t)LK_SHARERS(lk->lk_lock));
1650047dd67eSAttilio Rao else {
1651047dd67eSAttilio Rao td = lockmgr_xholder(lk);
1652e64b4fa8SKonstantin Belousov if (td == (struct thread *)LK_KERNPROC)
1653e64b4fa8SKonstantin Belousov printf("lock type %s: EXCL by KERNPROC\n",
1654e64b4fa8SKonstantin Belousov lk->lock_object.lo_name);
1655e64b4fa8SKonstantin Belousov else
16562573ea5fSIvan Voras printf("lock type %s: EXCL by thread %p "
1657e64b4fa8SKonstantin Belousov "(pid %d, %s, tid %d)\n", lk->lock_object.lo_name,
1658e64b4fa8SKonstantin Belousov td, td->td_proc->p_pid, td->td_proc->p_comm,
1659e64b4fa8SKonstantin Belousov td->td_tid);
1660d7a7e179SAttilio Rao }
1661d7a7e179SAttilio Rao
1662047dd67eSAttilio Rao x = lk->lk_lock;
1663047dd67eSAttilio Rao if (x & LK_EXCLUSIVE_WAITERS)
1664047dd67eSAttilio Rao printf(" with exclusive waiters pending\n");
1665047dd67eSAttilio Rao if (x & LK_SHARED_WAITERS)
1666047dd67eSAttilio Rao printf(" with shared waiters pending\n");
1667651175c9SAttilio Rao if (x & LK_EXCLUSIVE_SPINNERS)
1668651175c9SAttilio Rao printf(" with exclusive spinners pending\n");
1669047dd67eSAttilio Rao
1670047dd67eSAttilio Rao STACK_PRINT(lk);
1671047dd67eSAttilio Rao }
1672047dd67eSAttilio Rao
167399448ed1SJohn Dyson int
lockstatus(const struct lock * lk)1674d576deedSPawel Jakub Dawidek lockstatus(const struct lock *lk)
167599448ed1SJohn Dyson {
1676047dd67eSAttilio Rao uintptr_t v, x;
1677047dd67eSAttilio Rao int ret;
167899448ed1SJohn Dyson
1679047dd67eSAttilio Rao ret = LK_SHARED;
1680bdb6d824SMateusz Guzik x = lockmgr_read_value(lk);
1681047dd67eSAttilio Rao v = LK_HOLDER(x);
16820e9eb108SAttilio Rao
1683047dd67eSAttilio Rao if ((x & LK_SHARE) == 0) {
1684047dd67eSAttilio Rao if (v == (uintptr_t)curthread || v == LK_KERNPROC)
1685047dd67eSAttilio Rao ret = LK_EXCLUSIVE;
16866bdfe06aSEivind Eklund else
1687047dd67eSAttilio Rao ret = LK_EXCLOTHER;
1688047dd67eSAttilio Rao } else if (x == LK_UNLOCKED)
1689047dd67eSAttilio Rao ret = 0;
169099448ed1SJohn Dyson
1691047dd67eSAttilio Rao return (ret);
169253bf4bb2SPeter Wemm }
1693be6847d7SJohn Baldwin
169484887fa3SAttilio Rao #ifdef INVARIANT_SUPPORT
1695de5b1952SAlexander Leidinger
1696de5b1952SAlexander Leidinger FEATURE(invariant_support,
1697de5b1952SAlexander Leidinger "Support for modules compiled with INVARIANTS option");
1698de5b1952SAlexander Leidinger
169984887fa3SAttilio Rao #ifndef INVARIANTS
170084887fa3SAttilio Rao #undef _lockmgr_assert
170184887fa3SAttilio Rao #endif
170284887fa3SAttilio Rao
170384887fa3SAttilio Rao void
_lockmgr_assert(const struct lock * lk,int what,const char * file,int line)1704d576deedSPawel Jakub Dawidek _lockmgr_assert(const struct lock *lk, int what, const char *file, int line)
170584887fa3SAttilio Rao {
170684887fa3SAttilio Rao int slocked = 0;
170784887fa3SAttilio Rao
17089a7f7c26SMitchell Horne if (SCHEDULER_STOPPED())
170984887fa3SAttilio Rao return;
171084887fa3SAttilio Rao switch (what) {
171184887fa3SAttilio Rao case KA_SLOCKED:
171284887fa3SAttilio Rao case KA_SLOCKED | KA_NOTRECURSED:
171384887fa3SAttilio Rao case KA_SLOCKED | KA_RECURSED:
171484887fa3SAttilio Rao slocked = 1;
171584887fa3SAttilio Rao case KA_LOCKED:
171684887fa3SAttilio Rao case KA_LOCKED | KA_NOTRECURSED:
171784887fa3SAttilio Rao case KA_LOCKED | KA_RECURSED:
1718e5f94314SAttilio Rao #ifdef WITNESS
1719e5f94314SAttilio Rao
1720e5f94314SAttilio Rao /*
1721e5f94314SAttilio Rao * We cannot trust WITNESS if the lock is held in exclusive
1722e5f94314SAttilio Rao * mode and a call to lockmgr_disown() happened.
1723e5f94314SAttilio Rao * Workaround this skipping the check if the lock is held in
1724e5f94314SAttilio Rao * exclusive mode even for the KA_LOCKED case.
1725e5f94314SAttilio Rao */
1726e5f94314SAttilio Rao if (slocked || (lk->lk_lock & LK_SHARE)) {
1727e5f94314SAttilio Rao witness_assert(&lk->lock_object, what, file, line);
1728e5f94314SAttilio Rao break;
1729e5f94314SAttilio Rao }
1730e5f94314SAttilio Rao #endif
1731047dd67eSAttilio Rao if (lk->lk_lock == LK_UNLOCKED ||
1732047dd67eSAttilio Rao ((lk->lk_lock & LK_SHARE) == 0 && (slocked ||
1733047dd67eSAttilio Rao (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk)))))
173484887fa3SAttilio Rao panic("Lock %s not %slocked @ %s:%d\n",
1735047dd67eSAttilio Rao lk->lock_object.lo_name, slocked ? "share" : "",
173684887fa3SAttilio Rao file, line);
1737047dd67eSAttilio Rao
1738047dd67eSAttilio Rao if ((lk->lk_lock & LK_SHARE) == 0) {
1739047dd67eSAttilio Rao if (lockmgr_recursed(lk)) {
174084887fa3SAttilio Rao if (what & KA_NOTRECURSED)
174184887fa3SAttilio Rao panic("Lock %s recursed @ %s:%d\n",
1742047dd67eSAttilio Rao lk->lock_object.lo_name, file,
1743047dd67eSAttilio Rao line);
174484887fa3SAttilio Rao } else if (what & KA_RECURSED)
174584887fa3SAttilio Rao panic("Lock %s not recursed @ %s:%d\n",
1746047dd67eSAttilio Rao lk->lock_object.lo_name, file, line);
174784887fa3SAttilio Rao }
174884887fa3SAttilio Rao break;
174984887fa3SAttilio Rao case KA_XLOCKED:
175084887fa3SAttilio Rao case KA_XLOCKED | KA_NOTRECURSED:
175184887fa3SAttilio Rao case KA_XLOCKED | KA_RECURSED:
1752047dd67eSAttilio Rao if (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk))
175384887fa3SAttilio Rao panic("Lock %s not exclusively locked @ %s:%d\n",
1754047dd67eSAttilio Rao lk->lock_object.lo_name, file, line);
1755047dd67eSAttilio Rao if (lockmgr_recursed(lk)) {
175684887fa3SAttilio Rao if (what & KA_NOTRECURSED)
175784887fa3SAttilio Rao panic("Lock %s recursed @ %s:%d\n",
1758047dd67eSAttilio Rao lk->lock_object.lo_name, file, line);
175984887fa3SAttilio Rao } else if (what & KA_RECURSED)
176084887fa3SAttilio Rao panic("Lock %s not recursed @ %s:%d\n",
1761047dd67eSAttilio Rao lk->lock_object.lo_name, file, line);
176284887fa3SAttilio Rao break;
176384887fa3SAttilio Rao case KA_UNLOCKED:
1764047dd67eSAttilio Rao if (lockmgr_xlocked(lk) || lockmgr_disowned(lk))
176584887fa3SAttilio Rao panic("Lock %s exclusively locked @ %s:%d\n",
1766047dd67eSAttilio Rao lk->lock_object.lo_name, file, line);
176784887fa3SAttilio Rao break;
176884887fa3SAttilio Rao default:
1769047dd67eSAttilio Rao panic("Unknown lockmgr assertion: %d @ %s:%d\n", what, file,
1770047dd67eSAttilio Rao line);
177184887fa3SAttilio Rao }
177284887fa3SAttilio Rao }
1773047dd67eSAttilio Rao #endif
177484887fa3SAttilio Rao
1775be6847d7SJohn Baldwin #ifdef DDB
1776462a7addSJohn Baldwin int
lockmgr_chain(struct thread * td,struct thread ** ownerp)1777462a7addSJohn Baldwin lockmgr_chain(struct thread *td, struct thread **ownerp)
1778462a7addSJohn Baldwin {
1779fea73412SConrad Meyer const struct lock *lk;
1780462a7addSJohn Baldwin
1781047dd67eSAttilio Rao lk = td->td_wchan;
1782462a7addSJohn Baldwin
1783047dd67eSAttilio Rao if (LOCK_CLASS(&lk->lock_object) != &lock_class_lockmgr)
1784462a7addSJohn Baldwin return (0);
1785047dd67eSAttilio Rao db_printf("blocked on lockmgr %s", lk->lock_object.lo_name);
1786047dd67eSAttilio Rao if (lk->lk_lock & LK_SHARE)
1787047dd67eSAttilio Rao db_printf("SHARED (count %ju)\n",
1788047dd67eSAttilio Rao (uintmax_t)LK_SHARERS(lk->lk_lock));
1789047dd67eSAttilio Rao else
1790047dd67eSAttilio Rao db_printf("EXCL\n");
1791047dd67eSAttilio Rao *ownerp = lockmgr_xholder(lk);
1792462a7addSJohn Baldwin
1793462a7addSJohn Baldwin return (1);
1794462a7addSJohn Baldwin }
1795462a7addSJohn Baldwin
1796047dd67eSAttilio Rao static void
db_show_lockmgr(const struct lock_object * lock)1797d576deedSPawel Jakub Dawidek db_show_lockmgr(const struct lock_object *lock)
1798be6847d7SJohn Baldwin {
1799be6847d7SJohn Baldwin struct thread *td;
1800d576deedSPawel Jakub Dawidek const struct lock *lk;
1801be6847d7SJohn Baldwin
1802d576deedSPawel Jakub Dawidek lk = (const struct lock *)lock;
1803be6847d7SJohn Baldwin
1804be6847d7SJohn Baldwin db_printf(" state: ");
1805047dd67eSAttilio Rao if (lk->lk_lock == LK_UNLOCKED)
1806be6847d7SJohn Baldwin db_printf("UNLOCKED\n");
1807047dd67eSAttilio Rao else if (lk->lk_lock & LK_SHARE)
1808047dd67eSAttilio Rao db_printf("SLOCK: %ju\n", (uintmax_t)LK_SHARERS(lk->lk_lock));
1809047dd67eSAttilio Rao else {
1810047dd67eSAttilio Rao td = lockmgr_xholder(lk);
1811047dd67eSAttilio Rao if (td == (struct thread *)LK_KERNPROC)
1812047dd67eSAttilio Rao db_printf("XLOCK: LK_KERNPROC\n");
1813047dd67eSAttilio Rao else
1814047dd67eSAttilio Rao db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
1815047dd67eSAttilio Rao td->td_tid, td->td_proc->p_pid,
1816047dd67eSAttilio Rao td->td_proc->p_comm);
1817047dd67eSAttilio Rao if (lockmgr_recursed(lk))
1818047dd67eSAttilio Rao db_printf(" recursed: %d\n", lk->lk_recurse);
1819047dd67eSAttilio Rao }
1820047dd67eSAttilio Rao db_printf(" waiters: ");
1821047dd67eSAttilio Rao switch (lk->lk_lock & LK_ALL_WAITERS) {
1822047dd67eSAttilio Rao case LK_SHARED_WAITERS:
1823047dd67eSAttilio Rao db_printf("shared\n");
1824e5023dd9SEdward Tomasz Napierala break;
1825047dd67eSAttilio Rao case LK_EXCLUSIVE_WAITERS:
1826047dd67eSAttilio Rao db_printf("exclusive\n");
1827047dd67eSAttilio Rao break;
1828047dd67eSAttilio Rao case LK_ALL_WAITERS:
1829047dd67eSAttilio Rao db_printf("shared and exclusive\n");
1830047dd67eSAttilio Rao break;
1831047dd67eSAttilio Rao default:
1832047dd67eSAttilio Rao db_printf("none\n");
1833047dd67eSAttilio Rao }
1834651175c9SAttilio Rao db_printf(" spinners: ");
1835651175c9SAttilio Rao if (lk->lk_lock & LK_EXCLUSIVE_SPINNERS)
1836651175c9SAttilio Rao db_printf("exclusive\n");
1837651175c9SAttilio Rao else
1838651175c9SAttilio Rao db_printf("none\n");
1839be6847d7SJohn Baldwin }
1840be6847d7SJohn Baldwin #endif
1841