1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2008 Attilio Rao <attilio@FreeBSD.org>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice(s), this list of conditions and the following disclaimer as
12 * the first lines of this file unmodified other than the possible
13 * addition of one or more copyright notices.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice(s), this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
19 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
22 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
23 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
24 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
25 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
28 * DAMAGE.
29 */
30
31 #include "opt_ddb.h"
32 #include "opt_hwpmc_hooks.h"
33
34 #include <sys/param.h>
35 #include <sys/kdb.h>
36 #include <sys/ktr.h>
37 #include <sys/limits.h>
38 #include <sys/lock.h>
39 #include <sys/lock_profile.h>
40 #include <sys/lockmgr.h>
41 #include <sys/lockstat.h>
42 #include <sys/mutex.h>
43 #include <sys/proc.h>
44 #include <sys/sleepqueue.h>
45 #ifdef DEBUG_LOCKS
46 #include <sys/stack.h>
47 #endif
48 #include <sys/sysctl.h>
49 #include <sys/systm.h>
50
51 #include <machine/cpu.h>
52
53 #ifdef DDB
54 #include <ddb/ddb.h>
55 #endif
56
57 #ifdef HWPMC_HOOKS
58 #include <sys/pmckern.h>
59 PMC_SOFT_DECLARE( , , lock, failed);
60 #endif
61
62 /*
63 * Hack. There should be prio_t or similar so that this is not necessary.
64 */
65 _Static_assert((PRILASTFLAG * 2) - 1 <= USHRT_MAX,
66 "prio flags wont fit in u_short pri in struct lock");
67
68 CTASSERT(LK_UNLOCKED == (LK_UNLOCKED &
69 ~(LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS)));
70
71 #define SQ_EXCLUSIVE_QUEUE 0
72 #define SQ_SHARED_QUEUE 1
73
74 #ifndef INVARIANTS
75 #define _lockmgr_assert(lk, what, file, line)
76 #endif
77
78 #define TD_SLOCKS_INC(td) ((td)->td_lk_slocks++)
79 #define TD_SLOCKS_DEC(td) ((td)->td_lk_slocks--)
80
81 #ifndef DEBUG_LOCKS
82 #define STACK_PRINT(lk)
83 #define STACK_SAVE(lk)
84 #define STACK_ZERO(lk)
85 #else
86 #define STACK_PRINT(lk) stack_print_ddb(&(lk)->lk_stack)
87 #define STACK_SAVE(lk) stack_save(&(lk)->lk_stack)
88 #define STACK_ZERO(lk) stack_zero(&(lk)->lk_stack)
89 #endif
90
91 #define LOCK_LOG2(lk, string, arg1, arg2) \
92 if (LOCK_LOG_TEST(&(lk)->lock_object, 0)) \
93 CTR2(KTR_LOCK, (string), (arg1), (arg2))
94 #define LOCK_LOG3(lk, string, arg1, arg2, arg3) \
95 if (LOCK_LOG_TEST(&(lk)->lock_object, 0)) \
96 CTR3(KTR_LOCK, (string), (arg1), (arg2), (arg3))
97
98 #define GIANT_DECLARE \
99 int _i = 0; \
100 WITNESS_SAVE_DECL(Giant)
101 #define GIANT_RESTORE() do { \
102 if (__predict_false(_i > 0)) { \
103 while (_i--) \
104 mtx_lock(&Giant); \
105 WITNESS_RESTORE(&Giant.lock_object, Giant); \
106 } \
107 } while (0)
108 #define GIANT_SAVE() do { \
109 if (__predict_false(mtx_owned(&Giant))) { \
110 WITNESS_SAVE(&Giant.lock_object, Giant); \
111 while (mtx_owned(&Giant)) { \
112 _i++; \
113 mtx_unlock(&Giant); \
114 } \
115 } \
116 } while (0)
117
118 static __always_inline bool
LK_CAN_SHARE(uintptr_t x,int flags,bool fp)119 LK_CAN_SHARE(uintptr_t x, int flags, bool fp)
120 {
121
122 if ((x & (LK_SHARE | LK_EXCLUSIVE_WAITERS | LK_EXCLUSIVE_SPINNERS)) ==
123 LK_SHARE)
124 return (true);
125 if (fp || (!(x & LK_SHARE)))
126 return (false);
127 if ((curthread->td_lk_slocks != 0 && !(flags & LK_NODDLKTREAT)) ||
128 (curthread->td_pflags & TDP_DEADLKTREAT))
129 return (true);
130 return (false);
131 }
132
133 #define LK_TRYOP(x) \
134 ((x) & LK_NOWAIT)
135
136 #define LK_CAN_WITNESS(x) \
137 (((x) & LK_NOWITNESS) == 0 && !LK_TRYOP(x))
138 #define LK_TRYWIT(x) \
139 (LK_TRYOP(x) ? LOP_TRYLOCK : 0)
140
141 #define lockmgr_xlocked_v(v) \
142 (((v) & ~(LK_FLAGMASK & ~LK_SHARE)) == (uintptr_t)curthread)
143
144 #define lockmgr_xlocked(lk) lockmgr_xlocked_v(lockmgr_read_value(lk))
145
146 static void assert_lockmgr(const struct lock_object *lock, int how);
147 #ifdef DDB
148 static void db_show_lockmgr(const struct lock_object *lock);
149 #endif
150 static void lock_lockmgr(struct lock_object *lock, uintptr_t how);
151 #ifdef KDTRACE_HOOKS
152 static int owner_lockmgr(const struct lock_object *lock,
153 struct thread **owner);
154 #endif
155 static uintptr_t unlock_lockmgr(struct lock_object *lock);
156
157 struct lock_class lock_class_lockmgr = {
158 .lc_name = "lockmgr",
159 .lc_flags = LC_RECURSABLE | LC_SLEEPABLE | LC_SLEEPLOCK | LC_UPGRADABLE,
160 .lc_assert = assert_lockmgr,
161 #ifdef DDB
162 .lc_ddb_show = db_show_lockmgr,
163 #endif
164 .lc_lock = lock_lockmgr,
165 .lc_unlock = unlock_lockmgr,
166 #ifdef KDTRACE_HOOKS
167 .lc_owner = owner_lockmgr,
168 #endif
169 };
170
171 static __read_mostly bool lk_adaptive = true;
172 static SYSCTL_NODE(_debug, OID_AUTO, lockmgr, CTLFLAG_RD, NULL, "lockmgr debugging");
173 SYSCTL_BOOL(_debug_lockmgr, OID_AUTO, adaptive_spinning, CTLFLAG_RW, &lk_adaptive,
174 0, "");
175 #define lockmgr_delay locks_delay
176
177 struct lockmgr_wait {
178 const char *iwmesg;
179 int ipri;
180 int itimo;
181 };
182
183 static __always_inline bool lockmgr_slock_try(struct lock *lk, uintptr_t *xp,
184 int flags, bool fp);
185 static __always_inline bool lockmgr_sunlock_try(struct lock *lk,
186 uintptr_t *xp);
187
188 static void
lockmgr_exit(u_int flags,struct lock_object * ilk)189 lockmgr_exit(u_int flags, struct lock_object *ilk)
190 {
191 struct lock_class *class;
192
193 if (flags & LK_INTERLOCK) {
194 class = LOCK_CLASS(ilk);
195 class->lc_unlock(ilk);
196 }
197 }
198
199 static void
lockmgr_note_shared_acquire(struct lock * lk,int contested,uint64_t waittime,const char * file,int line,int flags)200 lockmgr_note_shared_acquire(struct lock *lk, int contested,
201 uint64_t waittime, const char *file, int line, int flags)
202 {
203
204 LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(lockmgr__acquire, lk, contested,
205 waittime, file, line, LOCKSTAT_READER);
206 LOCK_LOG_LOCK("SLOCK", &lk->lock_object, 0, 0, file, line);
207 WITNESS_LOCK(&lk->lock_object, LK_TRYWIT(flags), file, line);
208 TD_LOCKS_INC(curthread);
209 TD_SLOCKS_INC(curthread);
210 STACK_SAVE(lk);
211 }
212
213 static void
lockmgr_note_shared_release(struct lock * lk,const char * file,int line)214 lockmgr_note_shared_release(struct lock *lk, const char *file, int line)
215 {
216
217 WITNESS_UNLOCK(&lk->lock_object, 0, file, line);
218 LOCK_LOG_LOCK("SUNLOCK", &lk->lock_object, 0, 0, file, line);
219 TD_LOCKS_DEC(curthread);
220 TD_SLOCKS_DEC(curthread);
221 }
222
223 static void
lockmgr_note_exclusive_acquire(struct lock * lk,int contested,uint64_t waittime,const char * file,int line,int flags)224 lockmgr_note_exclusive_acquire(struct lock *lk, int contested,
225 uint64_t waittime, const char *file, int line, int flags)
226 {
227
228 LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(lockmgr__acquire, lk, contested,
229 waittime, file, line, LOCKSTAT_WRITER);
230 LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0, lk->lk_recurse, file, line);
231 WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE | LK_TRYWIT(flags), file,
232 line);
233 TD_LOCKS_INC(curthread);
234 STACK_SAVE(lk);
235 }
236
237 static void
lockmgr_note_exclusive_release(struct lock * lk,const char * file,int line)238 lockmgr_note_exclusive_release(struct lock *lk, const char *file, int line)
239 {
240
241 if (!lockmgr_disowned(lk)) {
242 WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, file, line);
243 TD_LOCKS_DEC(curthread);
244 }
245 LOCK_LOG_LOCK("XUNLOCK", &lk->lock_object, 0, lk->lk_recurse, file,
246 line);
247 }
248
249 static __inline struct thread *
lockmgr_xholder(const struct lock * lk)250 lockmgr_xholder(const struct lock *lk)
251 {
252 uintptr_t x;
253
254 x = lockmgr_read_value(lk);
255 return ((x & LK_SHARE) ? NULL : (struct thread *)LK_HOLDER(x));
256 }
257
258 /*
259 * It assumes sleepq_lock held and returns with this one unheld.
260 * It also assumes the generic interlock is sane and previously checked.
261 * If LK_INTERLOCK is specified the interlock is not reacquired after the
262 * sleep.
263 */
264 static __inline int
sleeplk(struct lock * lk,u_int flags,struct lock_object * ilk,const char * wmesg,int pri,int timo,int queue)265 sleeplk(struct lock *lk, u_int flags, struct lock_object *ilk,
266 const char *wmesg, int pri, int timo, int queue)
267 {
268 GIANT_DECLARE;
269 struct lock_class *class;
270 int catch, error;
271
272 class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
273 catch = pri & PCATCH;
274 pri &= PRIMASK;
275 error = 0;
276
277 LOCK_LOG3(lk, "%s: %p blocking on the %s sleepqueue", __func__, lk,
278 (queue == SQ_EXCLUSIVE_QUEUE) ? "exclusive" : "shared");
279
280 if (flags & LK_INTERLOCK)
281 class->lc_unlock(ilk);
282 if (queue == SQ_EXCLUSIVE_QUEUE && (flags & LK_SLEEPFAIL) != 0) {
283 if (lk->lk_exslpfail < USHRT_MAX)
284 lk->lk_exslpfail++;
285 }
286 GIANT_SAVE();
287 sleepq_add(&lk->lock_object, NULL, wmesg, SLEEPQ_LK | (catch ?
288 SLEEPQ_INTERRUPTIBLE : 0), queue);
289 if ((flags & LK_TIMELOCK) && timo)
290 sleepq_set_timeout(&lk->lock_object, timo);
291
292 /*
293 * Decisional switch for real sleeping.
294 */
295 if ((flags & LK_TIMELOCK) && timo && catch)
296 error = sleepq_timedwait_sig(&lk->lock_object, pri);
297 else if ((flags & LK_TIMELOCK) && timo)
298 error = sleepq_timedwait(&lk->lock_object, pri);
299 else if (catch)
300 error = sleepq_wait_sig(&lk->lock_object, pri);
301 else
302 sleepq_wait(&lk->lock_object, pri);
303 GIANT_RESTORE();
304 if ((flags & LK_SLEEPFAIL) && error == 0)
305 error = ENOLCK;
306
307 return (error);
308 }
309
310 static __inline void
wakeupshlk(struct lock * lk,const char * file,int line)311 wakeupshlk(struct lock *lk, const char *file, int line)
312 {
313 uintptr_t v, x, orig_x;
314 u_int realexslp;
315 int queue;
316
317 for (;;) {
318 x = lockmgr_read_value(lk);
319 if (lockmgr_sunlock_try(lk, &x))
320 break;
321
322 /*
323 * We should have a sharer with waiters, so enter the hard
324 * path in order to handle wakeups correctly.
325 */
326 sleepq_lock(&lk->lock_object);
327 orig_x = lockmgr_read_value(lk);
328 retry_sleepq:
329 x = orig_x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
330 v = LK_UNLOCKED;
331
332 /*
333 * If the lock has exclusive waiters, give them preference in
334 * order to avoid deadlock with shared runners up.
335 * If interruptible sleeps left the exclusive queue empty
336 * avoid a starvation for the threads sleeping on the shared
337 * queue by giving them precedence and cleaning up the
338 * exclusive waiters bit anyway.
339 * Please note that lk_exslpfail count may be lying about
340 * the real number of waiters with the LK_SLEEPFAIL flag on
341 * because they may be used in conjunction with interruptible
342 * sleeps so lk_exslpfail might be considered an 'upper limit'
343 * bound, including the edge cases.
344 */
345 realexslp = sleepq_sleepcnt(&lk->lock_object,
346 SQ_EXCLUSIVE_QUEUE);
347 if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) {
348 if (lk->lk_exslpfail != USHRT_MAX && lk->lk_exslpfail < realexslp) {
349 lk->lk_exslpfail = 0;
350 queue = SQ_EXCLUSIVE_QUEUE;
351 v |= (x & LK_SHARED_WAITERS);
352 } else {
353 lk->lk_exslpfail = 0;
354 LOCK_LOG2(lk,
355 "%s: %p has only LK_SLEEPFAIL sleepers",
356 __func__, lk);
357 LOCK_LOG2(lk,
358 "%s: %p waking up threads on the exclusive queue",
359 __func__, lk);
360 sleepq_broadcast(&lk->lock_object, SLEEPQ_LK, 0,
361 SQ_EXCLUSIVE_QUEUE);
362 queue = SQ_SHARED_QUEUE;
363 }
364 } else {
365 /*
366 * Exclusive waiters sleeping with LK_SLEEPFAIL on
367 * and using interruptible sleeps/timeout may have
368 * left spourious lk_exslpfail counts on, so clean
369 * it up anyway.
370 */
371 lk->lk_exslpfail = 0;
372 queue = SQ_SHARED_QUEUE;
373 }
374
375 if (lockmgr_sunlock_try(lk, &orig_x)) {
376 sleepq_release(&lk->lock_object);
377 break;
378 }
379
380 x |= LK_SHARERS_LOCK(1);
381 if (!atomic_fcmpset_rel_ptr(&lk->lk_lock, &x, v)) {
382 orig_x = x;
383 goto retry_sleepq;
384 }
385 LOCK_LOG3(lk, "%s: %p waking up threads on the %s queue",
386 __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
387 "exclusive");
388 sleepq_broadcast(&lk->lock_object, SLEEPQ_LK, 0, queue);
389 sleepq_release(&lk->lock_object);
390 break;
391 }
392
393 LOCKSTAT_PROFILE_RELEASE_RWLOCK(lockmgr__release, lk, LOCKSTAT_READER);
394 }
395
396 static void
assert_lockmgr(const struct lock_object * lock,int what)397 assert_lockmgr(const struct lock_object *lock, int what)
398 {
399
400 panic("lockmgr locks do not support assertions");
401 }
402
403 static void
lock_lockmgr(struct lock_object * lock,uintptr_t how)404 lock_lockmgr(struct lock_object *lock, uintptr_t how)
405 {
406
407 panic("lockmgr locks do not support sleep interlocking");
408 }
409
410 static uintptr_t
unlock_lockmgr(struct lock_object * lock)411 unlock_lockmgr(struct lock_object *lock)
412 {
413
414 panic("lockmgr locks do not support sleep interlocking");
415 }
416
417 #ifdef KDTRACE_HOOKS
418 static int
owner_lockmgr(const struct lock_object * lock,struct thread ** owner)419 owner_lockmgr(const struct lock_object *lock, struct thread **owner)
420 {
421
422 panic("lockmgr locks do not support owner inquiring");
423 }
424 #endif
425
426 void
lockinit(struct lock * lk,int pri,const char * wmesg,int timo,int flags)427 lockinit(struct lock *lk, int pri, const char *wmesg, int timo, int flags)
428 {
429 int iflags;
430
431 MPASS((flags & ~LK_INIT_MASK) == 0);
432 ASSERT_ATOMIC_LOAD_PTR(lk->lk_lock,
433 ("%s: lockmgr not aligned for %s: %p", __func__, wmesg,
434 &lk->lk_lock));
435
436 iflags = LO_SLEEPABLE | LO_UPGRADABLE;
437 if (flags & LK_CANRECURSE)
438 iflags |= LO_RECURSABLE;
439 if ((flags & LK_NODUP) == 0)
440 iflags |= LO_DUPOK;
441 if (flags & LK_NOPROFILE)
442 iflags |= LO_NOPROFILE;
443 if ((flags & LK_NOWITNESS) == 0)
444 iflags |= LO_WITNESS;
445 if (flags & LK_QUIET)
446 iflags |= LO_QUIET;
447 if (flags & LK_IS_VNODE)
448 iflags |= LO_IS_VNODE;
449 if (flags & LK_NEW)
450 iflags |= LO_NEW;
451 iflags |= flags & LK_NOSHARE;
452
453 lock_init(&lk->lock_object, &lock_class_lockmgr, wmesg, NULL, iflags);
454 lk->lk_lock = LK_UNLOCKED;
455 lk->lk_recurse = 0;
456 lk->lk_exslpfail = 0;
457 lk->lk_timo = timo;
458 lk->lk_pri = pri;
459 STACK_ZERO(lk);
460 }
461
462 /*
463 * XXX: Gross hacks to manipulate external lock flags after
464 * initialization. Used for certain vnode and buf locks.
465 */
466 void
lockallowshare(struct lock * lk)467 lockallowshare(struct lock *lk)
468 {
469
470 lockmgr_assert(lk, KA_XLOCKED);
471 lk->lock_object.lo_flags &= ~LK_NOSHARE;
472 }
473
474 void
lockdisableshare(struct lock * lk)475 lockdisableshare(struct lock *lk)
476 {
477
478 lockmgr_assert(lk, KA_XLOCKED);
479 lk->lock_object.lo_flags |= LK_NOSHARE;
480 }
481
482 void
lockallowrecurse(struct lock * lk)483 lockallowrecurse(struct lock *lk)
484 {
485
486 lockmgr_assert(lk, KA_XLOCKED);
487 lk->lock_object.lo_flags |= LO_RECURSABLE;
488 }
489
490 void
lockdisablerecurse(struct lock * lk)491 lockdisablerecurse(struct lock *lk)
492 {
493
494 lockmgr_assert(lk, KA_XLOCKED);
495 lk->lock_object.lo_flags &= ~LO_RECURSABLE;
496 }
497
498 void
lockdestroy(struct lock * lk)499 lockdestroy(struct lock *lk)
500 {
501
502 KASSERT(lk->lk_lock == LK_UNLOCKED, ("lockmgr still held"));
503 KASSERT(lk->lk_recurse == 0, ("lockmgr still recursed"));
504 KASSERT(lk->lk_exslpfail == 0, ("lockmgr still exclusive waiters"));
505 lock_destroy(&lk->lock_object);
506 }
507
508 static __always_inline bool
lockmgr_slock_try(struct lock * lk,uintptr_t * xp,int flags,bool fp)509 lockmgr_slock_try(struct lock *lk, uintptr_t *xp, int flags, bool fp)
510 {
511
512 /*
513 * If no other thread has an exclusive lock, or
514 * no exclusive waiter is present, bump the count of
515 * sharers. Since we have to preserve the state of
516 * waiters, if we fail to acquire the shared lock
517 * loop back and retry.
518 */
519 while (LK_CAN_SHARE(*xp, flags, fp)) {
520 if (atomic_fcmpset_acq_ptr(&lk->lk_lock, xp,
521 *xp + LK_ONE_SHARER)) {
522 return (true);
523 }
524 }
525 return (false);
526 }
527
528 static __always_inline bool
lockmgr_sunlock_try(struct lock * lk,uintptr_t * xp)529 lockmgr_sunlock_try(struct lock *lk, uintptr_t *xp)
530 {
531
532 for (;;) {
533 if (LK_SHARERS(*xp) > 1 || !(*xp & LK_ALL_WAITERS)) {
534 if (atomic_fcmpset_rel_ptr(&lk->lk_lock, xp,
535 *xp - LK_ONE_SHARER))
536 return (true);
537 continue;
538 }
539 break;
540 }
541 return (false);
542 }
543
544 static bool
lockmgr_slock_adaptive(struct lock_delay_arg * lda,struct lock * lk,uintptr_t * xp,int flags)545 lockmgr_slock_adaptive(struct lock_delay_arg *lda, struct lock *lk, uintptr_t *xp,
546 int flags)
547 {
548 struct thread *owner;
549 uintptr_t x;
550
551 x = *xp;
552 MPASS(x != LK_UNLOCKED);
553 owner = (struct thread *)LK_HOLDER(x);
554 for (;;) {
555 MPASS(owner != curthread);
556 if (owner == (struct thread *)LK_KERNPROC)
557 return (false);
558 if ((x & LK_SHARE) && LK_SHARERS(x) > 0)
559 return (false);
560 if (owner == NULL)
561 return (false);
562 if (!TD_IS_RUNNING(owner))
563 return (false);
564 if ((x & LK_ALL_WAITERS) != 0)
565 return (false);
566 lock_delay(lda);
567 x = lockmgr_read_value(lk);
568 if (LK_CAN_SHARE(x, flags, false)) {
569 *xp = x;
570 return (true);
571 }
572 owner = (struct thread *)LK_HOLDER(x);
573 }
574 }
575
576 static __noinline int
lockmgr_slock_hard(struct lock * lk,u_int flags,struct lock_object * ilk,const char * file,int line,struct lockmgr_wait * lwa)577 lockmgr_slock_hard(struct lock *lk, u_int flags, struct lock_object *ilk,
578 const char *file, int line, struct lockmgr_wait *lwa)
579 {
580 uintptr_t tid, x;
581 int error = 0;
582 const char *iwmesg;
583 int ipri, itimo;
584
585 #ifdef KDTRACE_HOOKS
586 uint64_t sleep_time = 0;
587 #endif
588 #ifdef LOCK_PROFILING
589 uint64_t waittime = 0;
590 int contested = 0;
591 #endif
592 struct lock_delay_arg lda;
593
594 if (SCHEDULER_STOPPED())
595 goto out;
596
597 tid = (uintptr_t)curthread;
598
599 if (LK_CAN_WITNESS(flags))
600 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER,
601 file, line, flags & LK_INTERLOCK ? ilk : NULL);
602 x = lockmgr_read_value(lk);
603 lock_delay_arg_init(&lda, &lockmgr_delay);
604 if (!lk_adaptive)
605 flags &= ~LK_ADAPTIVE;
606 /*
607 * The lock may already be locked exclusive by curthread,
608 * avoid deadlock.
609 */
610 if (LK_HOLDER(x) == tid) {
611 LOCK_LOG2(lk,
612 "%s: %p already held in exclusive mode",
613 __func__, lk);
614 error = EDEADLK;
615 goto out;
616 }
617
618 for (;;) {
619 if (lockmgr_slock_try(lk, &x, flags, false))
620 break;
621
622 lock_profile_obtain_lock_failed(&lk->lock_object, false,
623 &contested, &waittime);
624
625 if ((flags & (LK_ADAPTIVE | LK_INTERLOCK)) == LK_ADAPTIVE) {
626 if (lockmgr_slock_adaptive(&lda, lk, &x, flags))
627 continue;
628 }
629
630 #ifdef HWPMC_HOOKS
631 PMC_SOFT_CALL( , , lock, failed);
632 #endif
633
634 /*
635 * If the lock is expected to not sleep just give up
636 * and return.
637 */
638 if (LK_TRYOP(flags)) {
639 LOCK_LOG2(lk, "%s: %p fails the try operation",
640 __func__, lk);
641 error = EBUSY;
642 break;
643 }
644
645 /*
646 * Acquire the sleepqueue chain lock because we
647 * probabilly will need to manipulate waiters flags.
648 */
649 sleepq_lock(&lk->lock_object);
650 x = lockmgr_read_value(lk);
651 retry_sleepq:
652
653 /*
654 * if the lock can be acquired in shared mode, try
655 * again.
656 */
657 if (LK_CAN_SHARE(x, flags, false)) {
658 sleepq_release(&lk->lock_object);
659 continue;
660 }
661
662 /*
663 * Try to set the LK_SHARED_WAITERS flag. If we fail,
664 * loop back and retry.
665 */
666 if ((x & LK_SHARED_WAITERS) == 0) {
667 if (!atomic_fcmpset_acq_ptr(&lk->lk_lock, &x,
668 x | LK_SHARED_WAITERS)) {
669 goto retry_sleepq;
670 }
671 LOCK_LOG2(lk, "%s: %p set shared waiters flag",
672 __func__, lk);
673 }
674
675 if (lwa == NULL) {
676 iwmesg = lk->lock_object.lo_name;
677 ipri = lk->lk_pri;
678 itimo = lk->lk_timo;
679 } else {
680 iwmesg = lwa->iwmesg;
681 ipri = lwa->ipri;
682 itimo = lwa->itimo;
683 }
684
685 /*
686 * As far as we have been unable to acquire the
687 * shared lock and the shared waiters flag is set,
688 * we will sleep.
689 */
690 #ifdef KDTRACE_HOOKS
691 sleep_time -= lockstat_nsecs(&lk->lock_object);
692 #endif
693 error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
694 SQ_SHARED_QUEUE);
695 #ifdef KDTRACE_HOOKS
696 sleep_time += lockstat_nsecs(&lk->lock_object);
697 #endif
698 flags &= ~LK_INTERLOCK;
699 if (error) {
700 LOCK_LOG3(lk,
701 "%s: interrupted sleep for %p with %d",
702 __func__, lk, error);
703 break;
704 }
705 LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
706 __func__, lk);
707 x = lockmgr_read_value(lk);
708 }
709 if (error == 0) {
710 #ifdef KDTRACE_HOOKS
711 if (sleep_time != 0)
712 LOCKSTAT_RECORD4(lockmgr__block, lk, sleep_time,
713 LOCKSTAT_READER, (x & LK_SHARE) == 0,
714 (x & LK_SHARE) == 0 ? 0 : LK_SHARERS(x));
715 #endif
716 #ifdef LOCK_PROFILING
717 lockmgr_note_shared_acquire(lk, contested, waittime,
718 file, line, flags);
719 #else
720 lockmgr_note_shared_acquire(lk, 0, 0, file, line,
721 flags);
722 #endif
723 }
724
725 out:
726 lockmgr_exit(flags, ilk);
727 return (error);
728 }
729
730 static bool
lockmgr_xlock_adaptive(struct lock_delay_arg * lda,struct lock * lk,uintptr_t * xp)731 lockmgr_xlock_adaptive(struct lock_delay_arg *lda, struct lock *lk, uintptr_t *xp)
732 {
733 struct thread *owner;
734 uintptr_t x;
735
736 x = *xp;
737 MPASS(x != LK_UNLOCKED);
738 owner = (struct thread *)LK_HOLDER(x);
739 for (;;) {
740 MPASS(owner != curthread);
741 if (owner == NULL)
742 return (false);
743 if ((x & LK_SHARE) && LK_SHARERS(x) > 0)
744 return (false);
745 if (owner == (struct thread *)LK_KERNPROC)
746 return (false);
747 if (!TD_IS_RUNNING(owner))
748 return (false);
749 if ((x & LK_ALL_WAITERS) != 0)
750 return (false);
751 lock_delay(lda);
752 x = lockmgr_read_value(lk);
753 if (x == LK_UNLOCKED) {
754 *xp = x;
755 return (true);
756 }
757 owner = (struct thread *)LK_HOLDER(x);
758 }
759 }
760
761 static __noinline int
lockmgr_xlock_hard(struct lock * lk,u_int flags,struct lock_object * ilk,const char * file,int line,struct lockmgr_wait * lwa)762 lockmgr_xlock_hard(struct lock *lk, u_int flags, struct lock_object *ilk,
763 const char *file, int line, struct lockmgr_wait *lwa)
764 {
765 struct lock_class *class;
766 uintptr_t tid, x, v;
767 int error = 0;
768 const char *iwmesg;
769 int ipri, itimo;
770
771 #ifdef KDTRACE_HOOKS
772 uint64_t sleep_time = 0;
773 #endif
774 #ifdef LOCK_PROFILING
775 uint64_t waittime = 0;
776 int contested = 0;
777 #endif
778 struct lock_delay_arg lda;
779
780 if (SCHEDULER_STOPPED())
781 goto out;
782
783 tid = (uintptr_t)curthread;
784
785 if (LK_CAN_WITNESS(flags))
786 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
787 LOP_EXCLUSIVE, file, line, flags & LK_INTERLOCK ?
788 ilk : NULL);
789
790 /*
791 * If curthread already holds the lock and this one is
792 * allowed to recurse, simply recurse on it.
793 */
794 if (lockmgr_xlocked(lk)) {
795 if ((flags & LK_CANRECURSE) == 0 &&
796 (lk->lock_object.lo_flags & LO_RECURSABLE) == 0) {
797 /*
798 * If the lock is expected to not panic just
799 * give up and return.
800 */
801 if (LK_TRYOP(flags)) {
802 LOCK_LOG2(lk,
803 "%s: %p fails the try operation",
804 __func__, lk);
805 error = EBUSY;
806 goto out;
807 }
808 if (flags & LK_INTERLOCK) {
809 class = LOCK_CLASS(ilk);
810 class->lc_unlock(ilk);
811 }
812 STACK_PRINT(lk);
813 panic("%s: recursing on non recursive lockmgr %p "
814 "@ %s:%d\n", __func__, lk, file, line);
815 }
816 atomic_set_ptr(&lk->lk_lock, LK_WRITER_RECURSED);
817 lk->lk_recurse++;
818 LOCK_LOG2(lk, "%s: %p recursing", __func__, lk);
819 LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0,
820 lk->lk_recurse, file, line);
821 WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
822 LK_TRYWIT(flags), file, line);
823 TD_LOCKS_INC(curthread);
824 goto out;
825 }
826
827 x = LK_UNLOCKED;
828 lock_delay_arg_init(&lda, &lockmgr_delay);
829 if (!lk_adaptive)
830 flags &= ~LK_ADAPTIVE;
831 for (;;) {
832 if (x == LK_UNLOCKED) {
833 if (atomic_fcmpset_acq_ptr(&lk->lk_lock, &x, tid))
834 break;
835 continue;
836 }
837
838 lock_profile_obtain_lock_failed(&lk->lock_object, false,
839 &contested, &waittime);
840
841 if ((flags & (LK_ADAPTIVE | LK_INTERLOCK)) == LK_ADAPTIVE) {
842 if (lockmgr_xlock_adaptive(&lda, lk, &x))
843 continue;
844 }
845 #ifdef HWPMC_HOOKS
846 PMC_SOFT_CALL( , , lock, failed);
847 #endif
848
849 /*
850 * If the lock is expected to not sleep just give up
851 * and return.
852 */
853 if (LK_TRYOP(flags)) {
854 LOCK_LOG2(lk, "%s: %p fails the try operation",
855 __func__, lk);
856 error = EBUSY;
857 break;
858 }
859
860 /*
861 * Acquire the sleepqueue chain lock because we
862 * probabilly will need to manipulate waiters flags.
863 */
864 sleepq_lock(&lk->lock_object);
865 x = lockmgr_read_value(lk);
866 retry_sleepq:
867
868 /*
869 * if the lock has been released while we spun on
870 * the sleepqueue chain lock just try again.
871 */
872 if (x == LK_UNLOCKED) {
873 sleepq_release(&lk->lock_object);
874 continue;
875 }
876
877 /*
878 * The lock can be in the state where there is a
879 * pending queue of waiters, but still no owner.
880 * This happens when the lock is contested and an
881 * owner is going to claim the lock.
882 * If curthread is the one successfully acquiring it
883 * claim lock ownership and return, preserving waiters
884 * flags.
885 */
886 v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
887 if ((x & ~v) == LK_UNLOCKED) {
888 v &= ~LK_EXCLUSIVE_SPINNERS;
889 if (atomic_fcmpset_acq_ptr(&lk->lk_lock, &x,
890 tid | v)) {
891 sleepq_release(&lk->lock_object);
892 LOCK_LOG2(lk,
893 "%s: %p claimed by a new writer",
894 __func__, lk);
895 break;
896 }
897 goto retry_sleepq;
898 }
899
900 /*
901 * Try to set the LK_EXCLUSIVE_WAITERS flag. If we
902 * fail, loop back and retry.
903 */
904 if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
905 if (!atomic_fcmpset_ptr(&lk->lk_lock, &x,
906 x | LK_EXCLUSIVE_WAITERS)) {
907 goto retry_sleepq;
908 }
909 LOCK_LOG2(lk, "%s: %p set excl waiters flag",
910 __func__, lk);
911 }
912
913 if (lwa == NULL) {
914 iwmesg = lk->lock_object.lo_name;
915 ipri = lk->lk_pri;
916 itimo = lk->lk_timo;
917 } else {
918 iwmesg = lwa->iwmesg;
919 ipri = lwa->ipri;
920 itimo = lwa->itimo;
921 }
922
923 /*
924 * As far as we have been unable to acquire the
925 * exclusive lock and the exclusive waiters flag
926 * is set, we will sleep.
927 */
928 #ifdef KDTRACE_HOOKS
929 sleep_time -= lockstat_nsecs(&lk->lock_object);
930 #endif
931 error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
932 SQ_EXCLUSIVE_QUEUE);
933 #ifdef KDTRACE_HOOKS
934 sleep_time += lockstat_nsecs(&lk->lock_object);
935 #endif
936 flags &= ~LK_INTERLOCK;
937 if (error) {
938 LOCK_LOG3(lk,
939 "%s: interrupted sleep for %p with %d",
940 __func__, lk, error);
941 break;
942 }
943 LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
944 __func__, lk);
945 x = lockmgr_read_value(lk);
946 }
947 if (error == 0) {
948 #ifdef KDTRACE_HOOKS
949 if (sleep_time != 0)
950 LOCKSTAT_RECORD4(lockmgr__block, lk, sleep_time,
951 LOCKSTAT_WRITER, (x & LK_SHARE) == 0,
952 (x & LK_SHARE) == 0 ? 0 : LK_SHARERS(x));
953 #endif
954 #ifdef LOCK_PROFILING
955 lockmgr_note_exclusive_acquire(lk, contested, waittime,
956 file, line, flags);
957 #else
958 lockmgr_note_exclusive_acquire(lk, 0, 0, file, line,
959 flags);
960 #endif
961 }
962
963 out:
964 lockmgr_exit(flags, ilk);
965 return (error);
966 }
967
968 static __noinline int
lockmgr_upgrade(struct lock * lk,u_int flags,struct lock_object * ilk,const char * file,int line,struct lockmgr_wait * lwa)969 lockmgr_upgrade(struct lock *lk, u_int flags, struct lock_object *ilk,
970 const char *file, int line, struct lockmgr_wait *lwa)
971 {
972 uintptr_t tid, v, setv;
973 int error = 0;
974 int op;
975
976 if (SCHEDULER_STOPPED())
977 goto out;
978
979 tid = (uintptr_t)curthread;
980
981 _lockmgr_assert(lk, KA_SLOCKED, file, line);
982
983 op = flags & LK_TYPE_MASK;
984 v = lockmgr_read_value(lk);
985 for (;;) {
986 if (LK_SHARERS(v) > 1) {
987 if (op == LK_TRYUPGRADE) {
988 LOCK_LOG2(lk, "%s: %p failed the nowait upgrade",
989 __func__, lk);
990 error = EBUSY;
991 goto out;
992 }
993 if (atomic_fcmpset_rel_ptr(&lk->lk_lock, &v,
994 v - LK_ONE_SHARER)) {
995 lockmgr_note_shared_release(lk, file, line);
996 goto out_xlock;
997 }
998 continue;
999 }
1000 MPASS((v & ~LK_ALL_WAITERS) == LK_SHARERS_LOCK(1));
1001
1002 setv = tid;
1003 setv |= (v & LK_ALL_WAITERS);
1004
1005 /*
1006 * Try to switch from one shared lock to an exclusive one.
1007 * We need to preserve waiters flags during the operation.
1008 */
1009 if (atomic_fcmpset_ptr(&lk->lk_lock, &v, setv)) {
1010 LOCK_LOG_LOCK("XUPGRADE", &lk->lock_object, 0, 0, file,
1011 line);
1012 WITNESS_UPGRADE(&lk->lock_object, LOP_EXCLUSIVE |
1013 LK_TRYWIT(flags), file, line);
1014 LOCKSTAT_RECORD0(lockmgr__upgrade, lk);
1015 TD_SLOCKS_DEC(curthread);
1016 goto out;
1017 }
1018 }
1019
1020 out_xlock:
1021 error = lockmgr_xlock_hard(lk, flags, ilk, file, line, lwa);
1022 flags &= ~LK_INTERLOCK;
1023 out:
1024 lockmgr_exit(flags, ilk);
1025 return (error);
1026 }
1027
1028 int
lockmgr_lock_flags(struct lock * lk,u_int flags,struct lock_object * ilk,const char * file,int line)1029 lockmgr_lock_flags(struct lock *lk, u_int flags, struct lock_object *ilk,
1030 const char *file, int line)
1031 {
1032 struct lock_class *class;
1033 uintptr_t x, tid;
1034 u_int op;
1035 bool locked;
1036
1037 if (SCHEDULER_STOPPED())
1038 return (0);
1039
1040 op = flags & LK_TYPE_MASK;
1041 locked = false;
1042 switch (op) {
1043 case LK_SHARED:
1044 if (LK_CAN_WITNESS(flags))
1045 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER,
1046 file, line, flags & LK_INTERLOCK ? ilk : NULL);
1047 if (__predict_false(lk->lock_object.lo_flags & LK_NOSHARE))
1048 break;
1049 x = lockmgr_read_value(lk);
1050 if (lockmgr_slock_try(lk, &x, flags, true)) {
1051 lockmgr_note_shared_acquire(lk, 0, 0,
1052 file, line, flags);
1053 locked = true;
1054 } else {
1055 return (lockmgr_slock_hard(lk, flags, ilk, file, line,
1056 NULL));
1057 }
1058 break;
1059 case LK_EXCLUSIVE:
1060 if (LK_CAN_WITNESS(flags))
1061 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
1062 LOP_EXCLUSIVE, file, line, flags & LK_INTERLOCK ?
1063 ilk : NULL);
1064 tid = (uintptr_t)curthread;
1065 if (lockmgr_read_value(lk) == LK_UNLOCKED &&
1066 atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid)) {
1067 lockmgr_note_exclusive_acquire(lk, 0, 0, file, line,
1068 flags);
1069 locked = true;
1070 } else {
1071 return (lockmgr_xlock_hard(lk, flags, ilk, file, line,
1072 NULL));
1073 }
1074 break;
1075 case LK_UPGRADE:
1076 case LK_TRYUPGRADE:
1077 return (lockmgr_upgrade(lk, flags, ilk, file, line, NULL));
1078 default:
1079 break;
1080 }
1081 if (__predict_true(locked)) {
1082 if (__predict_false(flags & LK_INTERLOCK)) {
1083 class = LOCK_CLASS(ilk);
1084 class->lc_unlock(ilk);
1085 }
1086 return (0);
1087 } else {
1088 return (__lockmgr_args(lk, flags, ilk, LK_WMESG_DEFAULT,
1089 LK_PRIO_DEFAULT, LK_TIMO_DEFAULT, file, line));
1090 }
1091 }
1092
1093 static __noinline int
lockmgr_sunlock_hard(struct lock * lk,uintptr_t x,u_int flags,struct lock_object * ilk,const char * file,int line)1094 lockmgr_sunlock_hard(struct lock *lk, uintptr_t x, u_int flags, struct lock_object *ilk,
1095 const char *file, int line)
1096 {
1097 if (!SCHEDULER_STOPPED())
1098 wakeupshlk(lk, file, line);
1099 lockmgr_exit(flags, ilk);
1100 return (0);
1101 }
1102
1103 static __noinline int
lockmgr_xunlock_hard(struct lock * lk,uintptr_t x,u_int flags,struct lock_object * ilk,const char * file,int line)1104 lockmgr_xunlock_hard(struct lock *lk, uintptr_t x, u_int flags, struct lock_object *ilk,
1105 const char *file, int line)
1106 {
1107 uintptr_t tid, v;
1108 u_int realexslp;
1109 int queue;
1110
1111 if (SCHEDULER_STOPPED())
1112 goto out;
1113
1114 tid = (uintptr_t)curthread;
1115
1116 /*
1117 * As first option, treact the lock as if it has not
1118 * any waiter.
1119 * Fix-up the tid var if the lock has been disowned.
1120 */
1121 if (lockmgr_disowned_v(x))
1122 tid = LK_KERNPROC;
1123
1124 /*
1125 * The lock is held in exclusive mode.
1126 * If the lock is recursed also, then unrecurse it.
1127 */
1128 if (lockmgr_recursed_v(x)) {
1129 LOCK_LOG2(lk, "%s: %p unrecursing", __func__, lk);
1130 lk->lk_recurse--;
1131 if (lk->lk_recurse == 0)
1132 atomic_clear_ptr(&lk->lk_lock, LK_WRITER_RECURSED);
1133 goto out;
1134 }
1135 if (tid != LK_KERNPROC)
1136 LOCKSTAT_PROFILE_RELEASE_RWLOCK(lockmgr__release, lk,
1137 LOCKSTAT_WRITER);
1138
1139 if (x == tid && atomic_cmpset_rel_ptr(&lk->lk_lock, tid, LK_UNLOCKED))
1140 goto out;
1141
1142 sleepq_lock(&lk->lock_object);
1143 x = lockmgr_read_value(lk);
1144 v = LK_UNLOCKED;
1145
1146 /*
1147 * If the lock has exclusive waiters, give them
1148 * preference in order to avoid deadlock with
1149 * shared runners up.
1150 * If interruptible sleeps left the exclusive queue
1151 * empty avoid a starvation for the threads sleeping
1152 * on the shared queue by giving them precedence
1153 * and cleaning up the exclusive waiters bit anyway.
1154 * Please note that lk_exslpfail count may be lying
1155 * about the real number of waiters with the
1156 * LK_SLEEPFAIL flag on because they may be used in
1157 * conjunction with interruptible sleeps so
1158 * lk_exslpfail might be considered an 'upper limit'
1159 * bound, including the edge cases.
1160 */
1161 MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
1162 realexslp = sleepq_sleepcnt(&lk->lock_object, SQ_EXCLUSIVE_QUEUE);
1163 if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) {
1164 if (lk->lk_exslpfail != USHRT_MAX && lk->lk_exslpfail < realexslp) {
1165 lk->lk_exslpfail = 0;
1166 queue = SQ_EXCLUSIVE_QUEUE;
1167 v |= (x & LK_SHARED_WAITERS);
1168 } else {
1169 lk->lk_exslpfail = 0;
1170 LOCK_LOG2(lk,
1171 "%s: %p has only LK_SLEEPFAIL sleepers",
1172 __func__, lk);
1173 LOCK_LOG2(lk,
1174 "%s: %p waking up threads on the exclusive queue",
1175 __func__, lk);
1176 sleepq_broadcast(&lk->lock_object, SLEEPQ_LK, 0,
1177 SQ_EXCLUSIVE_QUEUE);
1178 queue = SQ_SHARED_QUEUE;
1179 }
1180 } else {
1181 /*
1182 * Exclusive waiters sleeping with LK_SLEEPFAIL
1183 * on and using interruptible sleeps/timeout
1184 * may have left spourious lk_exslpfail counts
1185 * on, so clean it up anyway.
1186 */
1187 lk->lk_exslpfail = 0;
1188 queue = SQ_SHARED_QUEUE;
1189 }
1190
1191 LOCK_LOG3(lk, "%s: %p waking up threads on the %s queue",
1192 __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
1193 "exclusive");
1194 atomic_store_rel_ptr(&lk->lk_lock, v);
1195 sleepq_broadcast(&lk->lock_object, SLEEPQ_LK, 0, queue);
1196 sleepq_release(&lk->lock_object);
1197
1198 out:
1199 lockmgr_exit(flags, ilk);
1200 return (0);
1201 }
1202
1203 /*
1204 * Lightweight entry points for common operations.
1205 *
1206 * Functionality is similar to sx locks, in that none of the additional lockmgr
1207 * features are supported. To be clear, these are NOT supported:
1208 * 1. shared locking disablement
1209 * 2. returning with an error after sleep
1210 * 3. unlocking the interlock
1211 *
1212 * If in doubt, use lockmgr_lock_flags.
1213 */
1214 int
lockmgr_slock(struct lock * lk,u_int flags,const char * file,int line)1215 lockmgr_slock(struct lock *lk, u_int flags, const char *file, int line)
1216 {
1217 uintptr_t x;
1218
1219 MPASS((flags & LK_TYPE_MASK) == LK_SHARED);
1220 MPASS((flags & LK_INTERLOCK) == 0);
1221 MPASS((lk->lock_object.lo_flags & LK_NOSHARE) == 0);
1222
1223 if (LK_CAN_WITNESS(flags))
1224 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER,
1225 file, line, NULL);
1226 x = lockmgr_read_value(lk);
1227 if (__predict_true(lockmgr_slock_try(lk, &x, flags, true))) {
1228 lockmgr_note_shared_acquire(lk, 0, 0, file, line, flags);
1229 return (0);
1230 }
1231
1232 return (lockmgr_slock_hard(lk, flags | LK_ADAPTIVE, NULL, file, line, NULL));
1233 }
1234
1235 int
lockmgr_xlock(struct lock * lk,u_int flags,const char * file,int line)1236 lockmgr_xlock(struct lock *lk, u_int flags, const char *file, int line)
1237 {
1238 uintptr_t tid;
1239
1240 MPASS((flags & LK_TYPE_MASK) == LK_EXCLUSIVE);
1241 MPASS((flags & LK_INTERLOCK) == 0);
1242
1243 if (LK_CAN_WITNESS(flags))
1244 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
1245 LOP_EXCLUSIVE, file, line, NULL);
1246 tid = (uintptr_t)curthread;
1247 if (atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid)) {
1248 lockmgr_note_exclusive_acquire(lk, 0, 0, file, line,
1249 flags);
1250 return (0);
1251 }
1252
1253 return (lockmgr_xlock_hard(lk, flags | LK_ADAPTIVE, NULL, file, line, NULL));
1254 }
1255
1256 int
lockmgr_unlock(struct lock * lk)1257 lockmgr_unlock(struct lock *lk)
1258 {
1259 uintptr_t x, tid;
1260 const char *file;
1261 int line;
1262
1263 file = __FILE__;
1264 line = __LINE__;
1265
1266 _lockmgr_assert(lk, KA_LOCKED, file, line);
1267 x = lockmgr_read_value(lk);
1268 if (__predict_true(x & LK_SHARE) != 0) {
1269 lockmgr_note_shared_release(lk, file, line);
1270 if (lockmgr_sunlock_try(lk, &x)) {
1271 LOCKSTAT_PROFILE_RELEASE_RWLOCK(lockmgr__release, lk, LOCKSTAT_READER);
1272 } else {
1273 return (lockmgr_sunlock_hard(lk, x, LK_RELEASE, NULL, file, line));
1274 }
1275 } else {
1276 tid = (uintptr_t)curthread;
1277 lockmgr_note_exclusive_release(lk, file, line);
1278 if (x == tid && atomic_cmpset_rel_ptr(&lk->lk_lock, tid, LK_UNLOCKED)) {
1279 LOCKSTAT_PROFILE_RELEASE_RWLOCK(lockmgr__release, lk,LOCKSTAT_WRITER);
1280 } else {
1281 return (lockmgr_xunlock_hard(lk, x, LK_RELEASE, NULL, file, line));
1282 }
1283 }
1284 return (0);
1285 }
1286
1287 int
__lockmgr_args(struct lock * lk,u_int flags,struct lock_object * ilk,const char * wmesg,int pri,int timo,const char * file,int line)1288 __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
1289 const char *wmesg, int pri, int timo, const char *file, int line)
1290 {
1291 GIANT_DECLARE;
1292 struct lockmgr_wait lwa;
1293 struct lock_class *class;
1294 const char *iwmesg;
1295 uintptr_t tid, v, x;
1296 u_int op, realexslp;
1297 int error, ipri, itimo, queue;
1298 #ifdef LOCK_PROFILING
1299 uint64_t waittime = 0;
1300 int contested = 0;
1301 #endif
1302
1303 if (SCHEDULER_STOPPED())
1304 return (0);
1305
1306 error = 0;
1307 tid = (uintptr_t)curthread;
1308 op = (flags & LK_TYPE_MASK);
1309 iwmesg = (wmesg == LK_WMESG_DEFAULT) ? lk->lock_object.lo_name : wmesg;
1310 ipri = (pri == LK_PRIO_DEFAULT) ? lk->lk_pri : pri;
1311 itimo = (timo == LK_TIMO_DEFAULT) ? lk->lk_timo : timo;
1312
1313 lwa.iwmesg = iwmesg;
1314 lwa.ipri = ipri;
1315 lwa.itimo = itimo;
1316
1317 MPASS((flags & ~LK_TOTAL_MASK) == 0);
1318 KASSERT((op & (op - 1)) == 0,
1319 ("%s: Invalid requested operation @ %s:%d", __func__, file, line));
1320 KASSERT((flags & (LK_NOWAIT | LK_SLEEPFAIL)) == 0 ||
1321 (op != LK_DOWNGRADE && op != LK_RELEASE),
1322 ("%s: Invalid flags in regard of the operation desired @ %s:%d",
1323 __func__, file, line));
1324 KASSERT((flags & LK_INTERLOCK) == 0 || ilk != NULL,
1325 ("%s: LK_INTERLOCK passed without valid interlock @ %s:%d",
1326 __func__, file, line));
1327 KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
1328 ("%s: idle thread %p on lockmgr %p @ %s:%d", __func__, curthread,
1329 lk, file, line));
1330
1331 class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
1332
1333 if (lk->lock_object.lo_flags & LK_NOSHARE) {
1334 switch (op) {
1335 case LK_SHARED:
1336 op = LK_EXCLUSIVE;
1337 break;
1338 case LK_UPGRADE:
1339 case LK_TRYUPGRADE:
1340 case LK_DOWNGRADE:
1341 _lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED,
1342 file, line);
1343 if (flags & LK_INTERLOCK)
1344 class->lc_unlock(ilk);
1345 return (0);
1346 }
1347 }
1348
1349 switch (op) {
1350 case LK_SHARED:
1351 return (lockmgr_slock_hard(lk, flags, ilk, file, line, &lwa));
1352 break;
1353 case LK_UPGRADE:
1354 case LK_TRYUPGRADE:
1355 return (lockmgr_upgrade(lk, flags, ilk, file, line, &lwa));
1356 break;
1357 case LK_EXCLUSIVE:
1358 return (lockmgr_xlock_hard(lk, flags, ilk, file, line, &lwa));
1359 break;
1360 case LK_DOWNGRADE:
1361 _lockmgr_assert(lk, KA_XLOCKED, file, line);
1362 WITNESS_DOWNGRADE(&lk->lock_object, 0, file, line);
1363
1364 /*
1365 * Panic if the lock is recursed.
1366 */
1367 if (lockmgr_xlocked(lk) && lockmgr_recursed(lk)) {
1368 if (flags & LK_INTERLOCK)
1369 class->lc_unlock(ilk);
1370 panic("%s: downgrade a recursed lockmgr %s @ %s:%d\n",
1371 __func__, iwmesg, file, line);
1372 }
1373 TD_SLOCKS_INC(curthread);
1374
1375 /*
1376 * In order to preserve waiters flags, just spin.
1377 */
1378 for (;;) {
1379 x = lockmgr_read_value(lk);
1380 MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
1381 x &= LK_ALL_WAITERS;
1382 if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
1383 LK_SHARERS_LOCK(1) | x))
1384 break;
1385 cpu_spinwait();
1386 }
1387 LOCK_LOG_LOCK("XDOWNGRADE", &lk->lock_object, 0, 0, file, line);
1388 LOCKSTAT_RECORD0(lockmgr__downgrade, lk);
1389 break;
1390 case LK_RELEASE:
1391 _lockmgr_assert(lk, KA_LOCKED, file, line);
1392 x = lockmgr_read_value(lk);
1393
1394 if (__predict_true(x & LK_SHARE) != 0) {
1395 lockmgr_note_shared_release(lk, file, line);
1396 return (lockmgr_sunlock_hard(lk, x, flags, ilk, file, line));
1397 } else {
1398 lockmgr_note_exclusive_release(lk, file, line);
1399 return (lockmgr_xunlock_hard(lk, x, flags, ilk, file, line));
1400 }
1401 break;
1402 case LK_DRAIN:
1403 if (LK_CAN_WITNESS(flags))
1404 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
1405 LOP_EXCLUSIVE, file, line, flags & LK_INTERLOCK ?
1406 ilk : NULL);
1407
1408 /*
1409 * Trying to drain a lock we already own will result in a
1410 * deadlock.
1411 */
1412 if (lockmgr_xlocked(lk)) {
1413 if (flags & LK_INTERLOCK)
1414 class->lc_unlock(ilk);
1415 panic("%s: draining %s with the lock held @ %s:%d\n",
1416 __func__, iwmesg, file, line);
1417 }
1418
1419 for (;;) {
1420 if (lk->lk_lock == LK_UNLOCKED &&
1421 atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid))
1422 break;
1423
1424 #ifdef HWPMC_HOOKS
1425 PMC_SOFT_CALL( , , lock, failed);
1426 #endif
1427 lock_profile_obtain_lock_failed(&lk->lock_object, false,
1428 &contested, &waittime);
1429
1430 /*
1431 * If the lock is expected to not sleep just give up
1432 * and return.
1433 */
1434 if (LK_TRYOP(flags)) {
1435 LOCK_LOG2(lk, "%s: %p fails the try operation",
1436 __func__, lk);
1437 error = EBUSY;
1438 break;
1439 }
1440
1441 /*
1442 * Acquire the sleepqueue chain lock because we
1443 * probabilly will need to manipulate waiters flags.
1444 */
1445 sleepq_lock(&lk->lock_object);
1446 x = lockmgr_read_value(lk);
1447
1448 /*
1449 * if the lock has been released while we spun on
1450 * the sleepqueue chain lock just try again.
1451 */
1452 if (x == LK_UNLOCKED) {
1453 sleepq_release(&lk->lock_object);
1454 continue;
1455 }
1456
1457 v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
1458 if ((x & ~v) == LK_UNLOCKED) {
1459 v = (x & ~LK_EXCLUSIVE_SPINNERS);
1460
1461 /*
1462 * If interruptible sleeps left the exclusive
1463 * queue empty avoid a starvation for the
1464 * threads sleeping on the shared queue by
1465 * giving them precedence and cleaning up the
1466 * exclusive waiters bit anyway.
1467 * Please note that lk_exslpfail count may be
1468 * lying about the real number of waiters with
1469 * the LK_SLEEPFAIL flag on because they may
1470 * be used in conjunction with interruptible
1471 * sleeps so lk_exslpfail might be considered
1472 * an 'upper limit' bound, including the edge
1473 * cases.
1474 */
1475 if (v & LK_EXCLUSIVE_WAITERS) {
1476 queue = SQ_EXCLUSIVE_QUEUE;
1477 v &= ~LK_EXCLUSIVE_WAITERS;
1478 } else {
1479 /*
1480 * Exclusive waiters sleeping with
1481 * LK_SLEEPFAIL on and using
1482 * interruptible sleeps/timeout may
1483 * have left spourious lk_exslpfail
1484 * counts on, so clean it up anyway.
1485 */
1486 MPASS(v & LK_SHARED_WAITERS);
1487 lk->lk_exslpfail = 0;
1488 queue = SQ_SHARED_QUEUE;
1489 v &= ~LK_SHARED_WAITERS;
1490 }
1491 if (queue == SQ_EXCLUSIVE_QUEUE) {
1492 realexslp =
1493 sleepq_sleepcnt(&lk->lock_object,
1494 SQ_EXCLUSIVE_QUEUE);
1495 if (lk->lk_exslpfail >= realexslp) {
1496 lk->lk_exslpfail = 0;
1497 queue = SQ_SHARED_QUEUE;
1498 v &= ~LK_SHARED_WAITERS;
1499 if (realexslp != 0) {
1500 LOCK_LOG2(lk,
1501 "%s: %p has only LK_SLEEPFAIL sleepers",
1502 __func__, lk);
1503 LOCK_LOG2(lk,
1504 "%s: %p waking up threads on the exclusive queue",
1505 __func__, lk);
1506 sleepq_broadcast(
1507 &lk->lock_object,
1508 SLEEPQ_LK, 0,
1509 SQ_EXCLUSIVE_QUEUE);
1510 }
1511 } else
1512 lk->lk_exslpfail = 0;
1513 }
1514 if (!atomic_cmpset_ptr(&lk->lk_lock, x, v)) {
1515 sleepq_release(&lk->lock_object);
1516 continue;
1517 }
1518 LOCK_LOG3(lk,
1519 "%s: %p waking up all threads on the %s queue",
1520 __func__, lk, queue == SQ_SHARED_QUEUE ?
1521 "shared" : "exclusive");
1522 sleepq_broadcast(&lk->lock_object, SLEEPQ_LK, 0,
1523 queue);
1524
1525 /*
1526 * If shared waiters have been woken up we need
1527 * to wait for one of them to acquire the lock
1528 * before to set the exclusive waiters in
1529 * order to avoid a deadlock.
1530 */
1531 if (queue == SQ_SHARED_QUEUE) {
1532 for (v = lk->lk_lock;
1533 (v & LK_SHARE) && !LK_SHARERS(v);
1534 v = lk->lk_lock)
1535 cpu_spinwait();
1536 }
1537 }
1538
1539 /*
1540 * Try to set the LK_EXCLUSIVE_WAITERS flag. If we
1541 * fail, loop back and retry.
1542 */
1543 if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
1544 if (!atomic_cmpset_ptr(&lk->lk_lock, x,
1545 x | LK_EXCLUSIVE_WAITERS)) {
1546 sleepq_release(&lk->lock_object);
1547 continue;
1548 }
1549 LOCK_LOG2(lk, "%s: %p set drain waiters flag",
1550 __func__, lk);
1551 }
1552
1553 /*
1554 * As far as we have been unable to acquire the
1555 * exclusive lock and the exclusive waiters flag
1556 * is set, we will sleep.
1557 */
1558 if (flags & LK_INTERLOCK) {
1559 class->lc_unlock(ilk);
1560 flags &= ~LK_INTERLOCK;
1561 }
1562 GIANT_SAVE();
1563 sleepq_add(&lk->lock_object, NULL, iwmesg, SLEEPQ_LK,
1564 SQ_EXCLUSIVE_QUEUE);
1565 sleepq_wait(&lk->lock_object, ipri & PRIMASK);
1566 GIANT_RESTORE();
1567 LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
1568 __func__, lk);
1569 }
1570
1571 if (error == 0) {
1572 lock_profile_obtain_lock_success(&lk->lock_object,
1573 false, contested, waittime, file, line);
1574 LOCK_LOG_LOCK("DRAIN", &lk->lock_object, 0,
1575 lk->lk_recurse, file, line);
1576 WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
1577 LK_TRYWIT(flags), file, line);
1578 TD_LOCKS_INC(curthread);
1579 STACK_SAVE(lk);
1580 }
1581 break;
1582 default:
1583 if (flags & LK_INTERLOCK)
1584 class->lc_unlock(ilk);
1585 panic("%s: unknown lockmgr request 0x%x\n", __func__, op);
1586 }
1587
1588 if (flags & LK_INTERLOCK)
1589 class->lc_unlock(ilk);
1590
1591 return (error);
1592 }
1593
1594 void
_lockmgr_disown(struct lock * lk,const char * file,int line)1595 _lockmgr_disown(struct lock *lk, const char *file, int line)
1596 {
1597 uintptr_t tid, x;
1598
1599 if (SCHEDULER_STOPPED())
1600 return;
1601
1602 tid = (uintptr_t)curthread;
1603 _lockmgr_assert(lk, KA_XLOCKED, file, line);
1604
1605 /*
1606 * Panic if the lock is recursed.
1607 */
1608 if (lockmgr_xlocked(lk) && lockmgr_recursed(lk))
1609 panic("%s: disown a recursed lockmgr @ %s:%d\n",
1610 __func__, file, line);
1611
1612 /*
1613 * If the owner is already LK_KERNPROC just skip the whole operation.
1614 */
1615 if (LK_HOLDER(lk->lk_lock) != tid)
1616 return;
1617 lock_profile_release_lock(&lk->lock_object, false);
1618 LOCKSTAT_RECORD1(lockmgr__disown, lk, LOCKSTAT_WRITER);
1619 LOCK_LOG_LOCK("XDISOWN", &lk->lock_object, 0, 0, file, line);
1620 WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, file, line);
1621 TD_LOCKS_DEC(curthread);
1622 STACK_SAVE(lk);
1623
1624 /*
1625 * In order to preserve waiters flags, just spin.
1626 */
1627 for (;;) {
1628 x = lockmgr_read_value(lk);
1629 MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
1630 x &= LK_ALL_WAITERS;
1631 if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
1632 LK_KERNPROC | x))
1633 return;
1634 cpu_spinwait();
1635 }
1636 }
1637
1638 void
lockmgr_printinfo(const struct lock * lk)1639 lockmgr_printinfo(const struct lock *lk)
1640 {
1641 struct thread *td;
1642 uintptr_t x;
1643
1644 if (lk->lk_lock == LK_UNLOCKED)
1645 printf("lock type %s: UNLOCKED\n", lk->lock_object.lo_name);
1646 else if (lk->lk_lock & LK_SHARE)
1647 printf("lock type %s: SHARED (count %ju)\n",
1648 lk->lock_object.lo_name,
1649 (uintmax_t)LK_SHARERS(lk->lk_lock));
1650 else {
1651 td = lockmgr_xholder(lk);
1652 if (td == (struct thread *)LK_KERNPROC)
1653 printf("lock type %s: EXCL by KERNPROC\n",
1654 lk->lock_object.lo_name);
1655 else
1656 printf("lock type %s: EXCL by thread %p "
1657 "(pid %d, %s, tid %d)\n", lk->lock_object.lo_name,
1658 td, td->td_proc->p_pid, td->td_proc->p_comm,
1659 td->td_tid);
1660 }
1661
1662 x = lk->lk_lock;
1663 if (x & LK_EXCLUSIVE_WAITERS)
1664 printf(" with exclusive waiters pending\n");
1665 if (x & LK_SHARED_WAITERS)
1666 printf(" with shared waiters pending\n");
1667 if (x & LK_EXCLUSIVE_SPINNERS)
1668 printf(" with exclusive spinners pending\n");
1669
1670 STACK_PRINT(lk);
1671 }
1672
1673 int
lockstatus(const struct lock * lk)1674 lockstatus(const struct lock *lk)
1675 {
1676 uintptr_t v, x;
1677 int ret;
1678
1679 ret = LK_SHARED;
1680 x = lockmgr_read_value(lk);
1681 v = LK_HOLDER(x);
1682
1683 if ((x & LK_SHARE) == 0) {
1684 if (v == (uintptr_t)curthread || v == LK_KERNPROC)
1685 ret = LK_EXCLUSIVE;
1686 else
1687 ret = LK_EXCLOTHER;
1688 } else if (x == LK_UNLOCKED)
1689 ret = 0;
1690
1691 return (ret);
1692 }
1693
1694 #ifdef INVARIANT_SUPPORT
1695
1696 FEATURE(invariant_support,
1697 "Support for modules compiled with INVARIANTS option");
1698
1699 #ifndef INVARIANTS
1700 #undef _lockmgr_assert
1701 #endif
1702
1703 void
_lockmgr_assert(const struct lock * lk,int what,const char * file,int line)1704 _lockmgr_assert(const struct lock *lk, int what, const char *file, int line)
1705 {
1706 int slocked = 0;
1707
1708 if (SCHEDULER_STOPPED())
1709 return;
1710 switch (what) {
1711 case KA_SLOCKED:
1712 case KA_SLOCKED | KA_NOTRECURSED:
1713 case KA_SLOCKED | KA_RECURSED:
1714 slocked = 1;
1715 case KA_LOCKED:
1716 case KA_LOCKED | KA_NOTRECURSED:
1717 case KA_LOCKED | KA_RECURSED:
1718 #ifdef WITNESS
1719
1720 /*
1721 * We cannot trust WITNESS if the lock is held in exclusive
1722 * mode and a call to lockmgr_disown() happened.
1723 * Workaround this skipping the check if the lock is held in
1724 * exclusive mode even for the KA_LOCKED case.
1725 */
1726 if (slocked || (lk->lk_lock & LK_SHARE)) {
1727 witness_assert(&lk->lock_object, what, file, line);
1728 break;
1729 }
1730 #endif
1731 if (lk->lk_lock == LK_UNLOCKED ||
1732 ((lk->lk_lock & LK_SHARE) == 0 && (slocked ||
1733 (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk)))))
1734 panic("Lock %s not %slocked @ %s:%d\n",
1735 lk->lock_object.lo_name, slocked ? "share" : "",
1736 file, line);
1737
1738 if ((lk->lk_lock & LK_SHARE) == 0) {
1739 if (lockmgr_recursed(lk)) {
1740 if (what & KA_NOTRECURSED)
1741 panic("Lock %s recursed @ %s:%d\n",
1742 lk->lock_object.lo_name, file,
1743 line);
1744 } else if (what & KA_RECURSED)
1745 panic("Lock %s not recursed @ %s:%d\n",
1746 lk->lock_object.lo_name, file, line);
1747 }
1748 break;
1749 case KA_XLOCKED:
1750 case KA_XLOCKED | KA_NOTRECURSED:
1751 case KA_XLOCKED | KA_RECURSED:
1752 if (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk))
1753 panic("Lock %s not exclusively locked @ %s:%d\n",
1754 lk->lock_object.lo_name, file, line);
1755 if (lockmgr_recursed(lk)) {
1756 if (what & KA_NOTRECURSED)
1757 panic("Lock %s recursed @ %s:%d\n",
1758 lk->lock_object.lo_name, file, line);
1759 } else if (what & KA_RECURSED)
1760 panic("Lock %s not recursed @ %s:%d\n",
1761 lk->lock_object.lo_name, file, line);
1762 break;
1763 case KA_UNLOCKED:
1764 if (lockmgr_xlocked(lk) || lockmgr_disowned(lk))
1765 panic("Lock %s exclusively locked @ %s:%d\n",
1766 lk->lock_object.lo_name, file, line);
1767 break;
1768 default:
1769 panic("Unknown lockmgr assertion: %d @ %s:%d\n", what, file,
1770 line);
1771 }
1772 }
1773 #endif
1774
1775 #ifdef DDB
1776 int
lockmgr_chain(struct thread * td,struct thread ** ownerp)1777 lockmgr_chain(struct thread *td, struct thread **ownerp)
1778 {
1779 const struct lock *lk;
1780
1781 lk = td->td_wchan;
1782
1783 if (LOCK_CLASS(&lk->lock_object) != &lock_class_lockmgr)
1784 return (0);
1785 db_printf("blocked on lockmgr %s", lk->lock_object.lo_name);
1786 if (lk->lk_lock & LK_SHARE)
1787 db_printf("SHARED (count %ju)\n",
1788 (uintmax_t)LK_SHARERS(lk->lk_lock));
1789 else
1790 db_printf("EXCL\n");
1791 *ownerp = lockmgr_xholder(lk);
1792
1793 return (1);
1794 }
1795
1796 static void
db_show_lockmgr(const struct lock_object * lock)1797 db_show_lockmgr(const struct lock_object *lock)
1798 {
1799 struct thread *td;
1800 const struct lock *lk;
1801
1802 lk = (const struct lock *)lock;
1803
1804 db_printf(" state: ");
1805 if (lk->lk_lock == LK_UNLOCKED)
1806 db_printf("UNLOCKED\n");
1807 else if (lk->lk_lock & LK_SHARE)
1808 db_printf("SLOCK: %ju\n", (uintmax_t)LK_SHARERS(lk->lk_lock));
1809 else {
1810 td = lockmgr_xholder(lk);
1811 if (td == (struct thread *)LK_KERNPROC)
1812 db_printf("XLOCK: LK_KERNPROC\n");
1813 else
1814 db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
1815 td->td_tid, td->td_proc->p_pid,
1816 td->td_proc->p_comm);
1817 if (lockmgr_recursed(lk))
1818 db_printf(" recursed: %d\n", lk->lk_recurse);
1819 }
1820 db_printf(" waiters: ");
1821 switch (lk->lk_lock & LK_ALL_WAITERS) {
1822 case LK_SHARED_WAITERS:
1823 db_printf("shared\n");
1824 break;
1825 case LK_EXCLUSIVE_WAITERS:
1826 db_printf("exclusive\n");
1827 break;
1828 case LK_ALL_WAITERS:
1829 db_printf("shared and exclusive\n");
1830 break;
1831 default:
1832 db_printf("none\n");
1833 }
1834 db_printf(" spinners: ");
1835 if (lk->lk_lock & LK_EXCLUSIVE_SPINNERS)
1836 db_printf("exclusive\n");
1837 else
1838 db_printf("none\n");
1839 }
1840 #endif
1841