1 /*- 2 * Copyright (c) 2008 Attilio Rao <attilio@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice(s), this list of conditions and the following disclaimer as 10 * the first lines of this file unmodified other than the possible 11 * addition of one or more copyright notices. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice(s), this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY 17 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 19 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY 20 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 22 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 23 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 26 * DAMAGE. 27 */ 28 29 #include "opt_adaptive_lockmgrs.h" 30 #include "opt_ddb.h" 31 #include "opt_hwpmc_hooks.h" 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 36 #include <sys/param.h> 37 #include <sys/kdb.h> 38 #include <sys/ktr.h> 39 #include <sys/lock.h> 40 #include <sys/lock_profile.h> 41 #include <sys/lockmgr.h> 42 #include <sys/mutex.h> 43 #include <sys/proc.h> 44 #include <sys/sleepqueue.h> 45 #ifdef DEBUG_LOCKS 46 #include <sys/stack.h> 47 #endif 48 #include <sys/sysctl.h> 49 #include <sys/systm.h> 50 51 #include <machine/cpu.h> 52 53 #ifdef DDB 54 #include <ddb/ddb.h> 55 #endif 56 57 #ifdef HWPMC_HOOKS 58 #include <sys/pmckern.h> 59 PMC_SOFT_DECLARE( , , lock, failed); 60 #endif 61 62 CTASSERT(((LK_ADAPTIVE | LK_NOSHARE) & LO_CLASSFLAGS) == 63 (LK_ADAPTIVE | LK_NOSHARE)); 64 CTASSERT(LK_UNLOCKED == (LK_UNLOCKED & 65 ~(LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS))); 66 67 #define SQ_EXCLUSIVE_QUEUE 0 68 #define SQ_SHARED_QUEUE 1 69 70 #ifndef INVARIANTS 71 #define _lockmgr_assert(lk, what, file, line) 72 #define TD_LOCKS_INC(td) 73 #define TD_LOCKS_DEC(td) 74 #else 75 #define TD_LOCKS_INC(td) ((td)->td_locks++) 76 #define TD_LOCKS_DEC(td) ((td)->td_locks--) 77 #endif 78 #define TD_SLOCKS_INC(td) ((td)->td_lk_slocks++) 79 #define TD_SLOCKS_DEC(td) ((td)->td_lk_slocks--) 80 81 #ifndef DEBUG_LOCKS 82 #define STACK_PRINT(lk) 83 #define STACK_SAVE(lk) 84 #define STACK_ZERO(lk) 85 #else 86 #define STACK_PRINT(lk) stack_print_ddb(&(lk)->lk_stack) 87 #define STACK_SAVE(lk) stack_save(&(lk)->lk_stack) 88 #define STACK_ZERO(lk) stack_zero(&(lk)->lk_stack) 89 #endif 90 91 #define LOCK_LOG2(lk, string, arg1, arg2) \ 92 if (LOCK_LOG_TEST(&(lk)->lock_object, 0)) \ 93 CTR2(KTR_LOCK, (string), (arg1), (arg2)) 94 #define LOCK_LOG3(lk, string, arg1, arg2, arg3) \ 95 if (LOCK_LOG_TEST(&(lk)->lock_object, 0)) \ 96 CTR3(KTR_LOCK, (string), (arg1), (arg2), (arg3)) 97 98 #define GIANT_DECLARE \ 99 int _i = 0; \ 100 WITNESS_SAVE_DECL(Giant) 101 #define GIANT_RESTORE() do { \ 102 if (_i > 0) { \ 103 while (_i--) \ 104 mtx_lock(&Giant); \ 105 WITNESS_RESTORE(&Giant.lock_object, Giant); \ 106 } \ 107 } while (0) 108 #define GIANT_SAVE() do { \ 109 if (mtx_owned(&Giant)) { \ 110 WITNESS_SAVE(&Giant.lock_object, Giant); \ 111 while (mtx_owned(&Giant)) { \ 112 _i++; \ 113 mtx_unlock(&Giant); \ 114 } \ 115 } \ 116 } while (0) 117 118 #define LK_CAN_SHARE(x) \ 119 (((x) & LK_SHARE) && (((x) & LK_EXCLUSIVE_WAITERS) == 0 || \ 120 ((x) & LK_EXCLUSIVE_SPINNERS) == 0 || \ 121 curthread->td_lk_slocks || (curthread->td_pflags & TDP_DEADLKTREAT))) 122 #define LK_TRYOP(x) \ 123 ((x) & LK_NOWAIT) 124 125 #define LK_CAN_WITNESS(x) \ 126 (((x) & LK_NOWITNESS) == 0 && !LK_TRYOP(x)) 127 #define LK_TRYWIT(x) \ 128 (LK_TRYOP(x) ? LOP_TRYLOCK : 0) 129 130 #define LK_CAN_ADAPT(lk, f) \ 131 (((lk)->lock_object.lo_flags & LK_ADAPTIVE) != 0 && \ 132 ((f) & LK_SLEEPFAIL) == 0) 133 134 #define lockmgr_disowned(lk) \ 135 (((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == LK_KERNPROC) 136 137 #define lockmgr_xlocked(lk) \ 138 (((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == (uintptr_t)curthread) 139 140 static void assert_lockmgr(const struct lock_object *lock, int how); 141 #ifdef DDB 142 static void db_show_lockmgr(const struct lock_object *lock); 143 #endif 144 static void lock_lockmgr(struct lock_object *lock, uintptr_t how); 145 #ifdef KDTRACE_HOOKS 146 static int owner_lockmgr(const struct lock_object *lock, 147 struct thread **owner); 148 #endif 149 static uintptr_t unlock_lockmgr(struct lock_object *lock); 150 151 struct lock_class lock_class_lockmgr = { 152 .lc_name = "lockmgr", 153 .lc_flags = LC_RECURSABLE | LC_SLEEPABLE | LC_SLEEPLOCK | LC_UPGRADABLE, 154 .lc_assert = assert_lockmgr, 155 #ifdef DDB 156 .lc_ddb_show = db_show_lockmgr, 157 #endif 158 .lc_lock = lock_lockmgr, 159 .lc_unlock = unlock_lockmgr, 160 #ifdef KDTRACE_HOOKS 161 .lc_owner = owner_lockmgr, 162 #endif 163 }; 164 165 #ifdef ADAPTIVE_LOCKMGRS 166 static u_int alk_retries = 10; 167 static u_int alk_loops = 10000; 168 static SYSCTL_NODE(_debug, OID_AUTO, lockmgr, CTLFLAG_RD, NULL, 169 "lockmgr debugging"); 170 SYSCTL_UINT(_debug_lockmgr, OID_AUTO, retries, CTLFLAG_RW, &alk_retries, 0, ""); 171 SYSCTL_UINT(_debug_lockmgr, OID_AUTO, loops, CTLFLAG_RW, &alk_loops, 0, ""); 172 #endif 173 174 static __inline struct thread * 175 lockmgr_xholder(const struct lock *lk) 176 { 177 uintptr_t x; 178 179 x = lk->lk_lock; 180 return ((x & LK_SHARE) ? NULL : (struct thread *)LK_HOLDER(x)); 181 } 182 183 /* 184 * It assumes sleepq_lock held and returns with this one unheld. 185 * It also assumes the generic interlock is sane and previously checked. 186 * If LK_INTERLOCK is specified the interlock is not reacquired after the 187 * sleep. 188 */ 189 static __inline int 190 sleeplk(struct lock *lk, u_int flags, struct lock_object *ilk, 191 const char *wmesg, int pri, int timo, int queue) 192 { 193 GIANT_DECLARE; 194 struct lock_class *class; 195 int catch, error; 196 197 class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL; 198 catch = pri & PCATCH; 199 pri &= PRIMASK; 200 error = 0; 201 202 LOCK_LOG3(lk, "%s: %p blocking on the %s sleepqueue", __func__, lk, 203 (queue == SQ_EXCLUSIVE_QUEUE) ? "exclusive" : "shared"); 204 205 if (flags & LK_INTERLOCK) 206 class->lc_unlock(ilk); 207 if (queue == SQ_EXCLUSIVE_QUEUE && (flags & LK_SLEEPFAIL) != 0) 208 lk->lk_exslpfail++; 209 GIANT_SAVE(); 210 sleepq_add(&lk->lock_object, NULL, wmesg, SLEEPQ_LK | (catch ? 211 SLEEPQ_INTERRUPTIBLE : 0), queue); 212 if ((flags & LK_TIMELOCK) && timo) 213 sleepq_set_timeout(&lk->lock_object, timo); 214 215 /* 216 * Decisional switch for real sleeping. 217 */ 218 if ((flags & LK_TIMELOCK) && timo && catch) 219 error = sleepq_timedwait_sig(&lk->lock_object, pri); 220 else if ((flags & LK_TIMELOCK) && timo) 221 error = sleepq_timedwait(&lk->lock_object, pri); 222 else if (catch) 223 error = sleepq_wait_sig(&lk->lock_object, pri); 224 else 225 sleepq_wait(&lk->lock_object, pri); 226 GIANT_RESTORE(); 227 if ((flags & LK_SLEEPFAIL) && error == 0) 228 error = ENOLCK; 229 230 return (error); 231 } 232 233 static __inline int 234 wakeupshlk(struct lock *lk, const char *file, int line) 235 { 236 uintptr_t v, x; 237 u_int realexslp; 238 int queue, wakeup_swapper; 239 240 WITNESS_UNLOCK(&lk->lock_object, 0, file, line); 241 LOCK_LOG_LOCK("SUNLOCK", &lk->lock_object, 0, 0, file, line); 242 243 wakeup_swapper = 0; 244 for (;;) { 245 x = lk->lk_lock; 246 247 /* 248 * If there is more than one shared lock held, just drop one 249 * and return. 250 */ 251 if (LK_SHARERS(x) > 1) { 252 if (atomic_cmpset_rel_ptr(&lk->lk_lock, x, 253 x - LK_ONE_SHARER)) 254 break; 255 continue; 256 } 257 258 /* 259 * If there are not waiters on the exclusive queue, drop the 260 * lock quickly. 261 */ 262 if ((x & LK_ALL_WAITERS) == 0) { 263 MPASS((x & ~LK_EXCLUSIVE_SPINNERS) == 264 LK_SHARERS_LOCK(1)); 265 if (atomic_cmpset_rel_ptr(&lk->lk_lock, x, LK_UNLOCKED)) 266 break; 267 continue; 268 } 269 270 /* 271 * We should have a sharer with waiters, so enter the hard 272 * path in order to handle wakeups correctly. 273 */ 274 sleepq_lock(&lk->lock_object); 275 x = lk->lk_lock & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS); 276 v = LK_UNLOCKED; 277 278 /* 279 * If the lock has exclusive waiters, give them preference in 280 * order to avoid deadlock with shared runners up. 281 * If interruptible sleeps left the exclusive queue empty 282 * avoid a starvation for the threads sleeping on the shared 283 * queue by giving them precedence and cleaning up the 284 * exclusive waiters bit anyway. 285 * Please note that lk_exslpfail count may be lying about 286 * the real number of waiters with the LK_SLEEPFAIL flag on 287 * because they may be used in conjuction with interruptible 288 * sleeps so lk_exslpfail might be considered an 'upper limit' 289 * bound, including the edge cases. 290 */ 291 realexslp = sleepq_sleepcnt(&lk->lock_object, 292 SQ_EXCLUSIVE_QUEUE); 293 if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) { 294 if (lk->lk_exslpfail < realexslp) { 295 lk->lk_exslpfail = 0; 296 queue = SQ_EXCLUSIVE_QUEUE; 297 v |= (x & LK_SHARED_WAITERS); 298 } else { 299 lk->lk_exslpfail = 0; 300 LOCK_LOG2(lk, 301 "%s: %p has only LK_SLEEPFAIL sleepers", 302 __func__, lk); 303 LOCK_LOG2(lk, 304 "%s: %p waking up threads on the exclusive queue", 305 __func__, lk); 306 wakeup_swapper = 307 sleepq_broadcast(&lk->lock_object, 308 SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE); 309 queue = SQ_SHARED_QUEUE; 310 } 311 312 } else { 313 314 /* 315 * Exclusive waiters sleeping with LK_SLEEPFAIL on 316 * and using interruptible sleeps/timeout may have 317 * left spourious lk_exslpfail counts on, so clean 318 * it up anyway. 319 */ 320 lk->lk_exslpfail = 0; 321 queue = SQ_SHARED_QUEUE; 322 } 323 324 if (!atomic_cmpset_rel_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x, 325 v)) { 326 sleepq_release(&lk->lock_object); 327 continue; 328 } 329 LOCK_LOG3(lk, "%s: %p waking up threads on the %s queue", 330 __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" : 331 "exclusive"); 332 wakeup_swapper |= sleepq_broadcast(&lk->lock_object, SLEEPQ_LK, 333 0, queue); 334 sleepq_release(&lk->lock_object); 335 break; 336 } 337 338 lock_profile_release_lock(&lk->lock_object); 339 TD_LOCKS_DEC(curthread); 340 TD_SLOCKS_DEC(curthread); 341 return (wakeup_swapper); 342 } 343 344 static void 345 assert_lockmgr(const struct lock_object *lock, int what) 346 { 347 348 panic("lockmgr locks do not support assertions"); 349 } 350 351 static void 352 lock_lockmgr(struct lock_object *lock, uintptr_t how) 353 { 354 355 panic("lockmgr locks do not support sleep interlocking"); 356 } 357 358 static uintptr_t 359 unlock_lockmgr(struct lock_object *lock) 360 { 361 362 panic("lockmgr locks do not support sleep interlocking"); 363 } 364 365 #ifdef KDTRACE_HOOKS 366 static int 367 owner_lockmgr(const struct lock_object *lock, struct thread **owner) 368 { 369 370 panic("lockmgr locks do not support owner inquiring"); 371 } 372 #endif 373 374 void 375 lockinit(struct lock *lk, int pri, const char *wmesg, int timo, int flags) 376 { 377 int iflags; 378 379 MPASS((flags & ~LK_INIT_MASK) == 0); 380 ASSERT_ATOMIC_LOAD_PTR(lk->lk_lock, 381 ("%s: lockmgr not aligned for %s: %p", __func__, wmesg, 382 &lk->lk_lock)); 383 384 iflags = LO_SLEEPABLE | LO_UPGRADABLE; 385 if (flags & LK_CANRECURSE) 386 iflags |= LO_RECURSABLE; 387 if ((flags & LK_NODUP) == 0) 388 iflags |= LO_DUPOK; 389 if (flags & LK_NOPROFILE) 390 iflags |= LO_NOPROFILE; 391 if ((flags & LK_NOWITNESS) == 0) 392 iflags |= LO_WITNESS; 393 if (flags & LK_QUIET) 394 iflags |= LO_QUIET; 395 if (flags & LK_IS_VNODE) 396 iflags |= LO_IS_VNODE; 397 iflags |= flags & (LK_ADAPTIVE | LK_NOSHARE); 398 399 lock_init(&lk->lock_object, &lock_class_lockmgr, wmesg, NULL, iflags); 400 lk->lk_lock = LK_UNLOCKED; 401 lk->lk_recurse = 0; 402 lk->lk_exslpfail = 0; 403 lk->lk_timo = timo; 404 lk->lk_pri = pri; 405 STACK_ZERO(lk); 406 } 407 408 /* 409 * XXX: Gross hacks to manipulate external lock flags after 410 * initialization. Used for certain vnode and buf locks. 411 */ 412 void 413 lockallowshare(struct lock *lk) 414 { 415 416 lockmgr_assert(lk, KA_XLOCKED); 417 lk->lock_object.lo_flags &= ~LK_NOSHARE; 418 } 419 420 void 421 lockdisableshare(struct lock *lk) 422 { 423 424 lockmgr_assert(lk, KA_XLOCKED); 425 lk->lock_object.lo_flags |= LK_NOSHARE; 426 } 427 428 void 429 lockallowrecurse(struct lock *lk) 430 { 431 432 lockmgr_assert(lk, KA_XLOCKED); 433 lk->lock_object.lo_flags |= LO_RECURSABLE; 434 } 435 436 void 437 lockdisablerecurse(struct lock *lk) 438 { 439 440 lockmgr_assert(lk, KA_XLOCKED); 441 lk->lock_object.lo_flags &= ~LO_RECURSABLE; 442 } 443 444 void 445 lockdestroy(struct lock *lk) 446 { 447 448 KASSERT(lk->lk_lock == LK_UNLOCKED, ("lockmgr still held")); 449 KASSERT(lk->lk_recurse == 0, ("lockmgr still recursed")); 450 KASSERT(lk->lk_exslpfail == 0, ("lockmgr still exclusive waiters")); 451 lock_destroy(&lk->lock_object); 452 } 453 454 int 455 __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk, 456 const char *wmesg, int pri, int timo, const char *file, int line) 457 { 458 GIANT_DECLARE; 459 struct lock_class *class; 460 const char *iwmesg; 461 uintptr_t tid, v, x; 462 u_int op, realexslp; 463 int error, ipri, itimo, queue, wakeup_swapper; 464 #ifdef LOCK_PROFILING 465 uint64_t waittime = 0; 466 int contested = 0; 467 #endif 468 #ifdef ADAPTIVE_LOCKMGRS 469 volatile struct thread *owner; 470 u_int i, spintries = 0; 471 #endif 472 473 error = 0; 474 tid = (uintptr_t)curthread; 475 op = (flags & LK_TYPE_MASK); 476 iwmesg = (wmesg == LK_WMESG_DEFAULT) ? lk->lock_object.lo_name : wmesg; 477 ipri = (pri == LK_PRIO_DEFAULT) ? lk->lk_pri : pri; 478 itimo = (timo == LK_TIMO_DEFAULT) ? lk->lk_timo : timo; 479 480 MPASS((flags & ~LK_TOTAL_MASK) == 0); 481 KASSERT((op & (op - 1)) == 0, 482 ("%s: Invalid requested operation @ %s:%d", __func__, file, line)); 483 KASSERT((flags & (LK_NOWAIT | LK_SLEEPFAIL)) == 0 || 484 (op != LK_DOWNGRADE && op != LK_RELEASE), 485 ("%s: Invalid flags in regard of the operation desired @ %s:%d", 486 __func__, file, line)); 487 KASSERT((flags & LK_INTERLOCK) == 0 || ilk != NULL, 488 ("%s: LK_INTERLOCK passed without valid interlock @ %s:%d", 489 __func__, file, line)); 490 KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread), 491 ("%s: idle thread %p on lockmgr %s @ %s:%d", __func__, curthread, 492 lk->lock_object.lo_name, file, line)); 493 494 class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL; 495 if (panicstr != NULL) { 496 if (flags & LK_INTERLOCK) 497 class->lc_unlock(ilk); 498 return (0); 499 } 500 501 if (lk->lock_object.lo_flags & LK_NOSHARE) { 502 switch (op) { 503 case LK_SHARED: 504 op = LK_EXCLUSIVE; 505 break; 506 case LK_UPGRADE: 507 case LK_TRYUPGRADE: 508 case LK_DOWNGRADE: 509 _lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED, 510 file, line); 511 if (flags & LK_INTERLOCK) 512 class->lc_unlock(ilk); 513 return (0); 514 } 515 } 516 517 wakeup_swapper = 0; 518 switch (op) { 519 case LK_SHARED: 520 if (LK_CAN_WITNESS(flags)) 521 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER, 522 file, line, flags & LK_INTERLOCK ? ilk : NULL); 523 for (;;) { 524 x = lk->lk_lock; 525 526 /* 527 * If no other thread has an exclusive lock, or 528 * no exclusive waiter is present, bump the count of 529 * sharers. Since we have to preserve the state of 530 * waiters, if we fail to acquire the shared lock 531 * loop back and retry. 532 */ 533 if (LK_CAN_SHARE(x)) { 534 if (atomic_cmpset_acq_ptr(&lk->lk_lock, x, 535 x + LK_ONE_SHARER)) 536 break; 537 continue; 538 } 539 #ifdef HWPMC_HOOKS 540 PMC_SOFT_CALL( , , lock, failed); 541 #endif 542 lock_profile_obtain_lock_failed(&lk->lock_object, 543 &contested, &waittime); 544 545 /* 546 * If the lock is already held by curthread in 547 * exclusive way avoid a deadlock. 548 */ 549 if (LK_HOLDER(x) == tid) { 550 LOCK_LOG2(lk, 551 "%s: %p already held in exclusive mode", 552 __func__, lk); 553 error = EDEADLK; 554 break; 555 } 556 557 /* 558 * If the lock is expected to not sleep just give up 559 * and return. 560 */ 561 if (LK_TRYOP(flags)) { 562 LOCK_LOG2(lk, "%s: %p fails the try operation", 563 __func__, lk); 564 error = EBUSY; 565 break; 566 } 567 568 #ifdef ADAPTIVE_LOCKMGRS 569 /* 570 * If the owner is running on another CPU, spin until 571 * the owner stops running or the state of the lock 572 * changes. We need a double-state handle here 573 * because for a failed acquisition the lock can be 574 * either held in exclusive mode or shared mode 575 * (for the writer starvation avoidance technique). 576 */ 577 if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 && 578 LK_HOLDER(x) != LK_KERNPROC) { 579 owner = (struct thread *)LK_HOLDER(x); 580 if (LOCK_LOG_TEST(&lk->lock_object, 0)) 581 CTR3(KTR_LOCK, 582 "%s: spinning on %p held by %p", 583 __func__, lk, owner); 584 585 /* 586 * If we are holding also an interlock drop it 587 * in order to avoid a deadlock if the lockmgr 588 * owner is adaptively spinning on the 589 * interlock itself. 590 */ 591 if (flags & LK_INTERLOCK) { 592 class->lc_unlock(ilk); 593 flags &= ~LK_INTERLOCK; 594 } 595 GIANT_SAVE(); 596 while (LK_HOLDER(lk->lk_lock) == 597 (uintptr_t)owner && TD_IS_RUNNING(owner)) 598 cpu_spinwait(); 599 GIANT_RESTORE(); 600 continue; 601 } else if (LK_CAN_ADAPT(lk, flags) && 602 (x & LK_SHARE) != 0 && LK_SHARERS(x) && 603 spintries < alk_retries) { 604 if (flags & LK_INTERLOCK) { 605 class->lc_unlock(ilk); 606 flags &= ~LK_INTERLOCK; 607 } 608 GIANT_SAVE(); 609 spintries++; 610 for (i = 0; i < alk_loops; i++) { 611 if (LOCK_LOG_TEST(&lk->lock_object, 0)) 612 CTR4(KTR_LOCK, 613 "%s: shared spinning on %p with %u and %u", 614 __func__, lk, spintries, i); 615 x = lk->lk_lock; 616 if ((x & LK_SHARE) == 0 || 617 LK_CAN_SHARE(x) != 0) 618 break; 619 cpu_spinwait(); 620 } 621 GIANT_RESTORE(); 622 if (i != alk_loops) 623 continue; 624 } 625 #endif 626 627 /* 628 * Acquire the sleepqueue chain lock because we 629 * probabilly will need to manipulate waiters flags. 630 */ 631 sleepq_lock(&lk->lock_object); 632 x = lk->lk_lock; 633 634 /* 635 * if the lock can be acquired in shared mode, try 636 * again. 637 */ 638 if (LK_CAN_SHARE(x)) { 639 sleepq_release(&lk->lock_object); 640 continue; 641 } 642 643 #ifdef ADAPTIVE_LOCKMGRS 644 /* 645 * The current lock owner might have started executing 646 * on another CPU (or the lock could have changed 647 * owner) while we were waiting on the turnstile 648 * chain lock. If so, drop the turnstile lock and try 649 * again. 650 */ 651 if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 && 652 LK_HOLDER(x) != LK_KERNPROC) { 653 owner = (struct thread *)LK_HOLDER(x); 654 if (TD_IS_RUNNING(owner)) { 655 sleepq_release(&lk->lock_object); 656 continue; 657 } 658 } 659 #endif 660 661 /* 662 * Try to set the LK_SHARED_WAITERS flag. If we fail, 663 * loop back and retry. 664 */ 665 if ((x & LK_SHARED_WAITERS) == 0) { 666 if (!atomic_cmpset_acq_ptr(&lk->lk_lock, x, 667 x | LK_SHARED_WAITERS)) { 668 sleepq_release(&lk->lock_object); 669 continue; 670 } 671 LOCK_LOG2(lk, "%s: %p set shared waiters flag", 672 __func__, lk); 673 } 674 675 /* 676 * As far as we have been unable to acquire the 677 * shared lock and the shared waiters flag is set, 678 * we will sleep. 679 */ 680 error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo, 681 SQ_SHARED_QUEUE); 682 flags &= ~LK_INTERLOCK; 683 if (error) { 684 LOCK_LOG3(lk, 685 "%s: interrupted sleep for %p with %d", 686 __func__, lk, error); 687 break; 688 } 689 LOCK_LOG2(lk, "%s: %p resuming from the sleep queue", 690 __func__, lk); 691 } 692 if (error == 0) { 693 lock_profile_obtain_lock_success(&lk->lock_object, 694 contested, waittime, file, line); 695 LOCK_LOG_LOCK("SLOCK", &lk->lock_object, 0, 0, file, 696 line); 697 WITNESS_LOCK(&lk->lock_object, LK_TRYWIT(flags), file, 698 line); 699 TD_LOCKS_INC(curthread); 700 TD_SLOCKS_INC(curthread); 701 STACK_SAVE(lk); 702 } 703 break; 704 case LK_UPGRADE: 705 case LK_TRYUPGRADE: 706 _lockmgr_assert(lk, KA_SLOCKED, file, line); 707 v = lk->lk_lock; 708 x = v & LK_ALL_WAITERS; 709 v &= LK_EXCLUSIVE_SPINNERS; 710 711 /* 712 * Try to switch from one shared lock to an exclusive one. 713 * We need to preserve waiters flags during the operation. 714 */ 715 if (atomic_cmpset_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x | v, 716 tid | x)) { 717 LOCK_LOG_LOCK("XUPGRADE", &lk->lock_object, 0, 0, file, 718 line); 719 WITNESS_UPGRADE(&lk->lock_object, LOP_EXCLUSIVE | 720 LK_TRYWIT(flags), file, line); 721 TD_SLOCKS_DEC(curthread); 722 break; 723 } 724 725 /* 726 * In LK_TRYUPGRADE mode, do not drop the lock, 727 * returning EBUSY instead. 728 */ 729 if (op == LK_TRYUPGRADE) { 730 LOCK_LOG2(lk, "%s: %p failed the nowait upgrade", 731 __func__, lk); 732 error = EBUSY; 733 break; 734 } 735 736 /* 737 * We have been unable to succeed in upgrading, so just 738 * give up the shared lock. 739 */ 740 wakeup_swapper |= wakeupshlk(lk, file, line); 741 742 /* FALLTHROUGH */ 743 case LK_EXCLUSIVE: 744 if (LK_CAN_WITNESS(flags)) 745 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER | 746 LOP_EXCLUSIVE, file, line, flags & LK_INTERLOCK ? 747 ilk : NULL); 748 749 /* 750 * If curthread already holds the lock and this one is 751 * allowed to recurse, simply recurse on it. 752 */ 753 if (lockmgr_xlocked(lk)) { 754 if ((flags & LK_CANRECURSE) == 0 && 755 (lk->lock_object.lo_flags & LO_RECURSABLE) == 0) { 756 757 /* 758 * If the lock is expected to not panic just 759 * give up and return. 760 */ 761 if (LK_TRYOP(flags)) { 762 LOCK_LOG2(lk, 763 "%s: %p fails the try operation", 764 __func__, lk); 765 error = EBUSY; 766 break; 767 } 768 if (flags & LK_INTERLOCK) 769 class->lc_unlock(ilk); 770 panic("%s: recursing on non recursive lockmgr %s @ %s:%d\n", 771 __func__, iwmesg, file, line); 772 } 773 lk->lk_recurse++; 774 LOCK_LOG2(lk, "%s: %p recursing", __func__, lk); 775 LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0, 776 lk->lk_recurse, file, line); 777 WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE | 778 LK_TRYWIT(flags), file, line); 779 TD_LOCKS_INC(curthread); 780 break; 781 } 782 783 while (!atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, 784 tid)) { 785 #ifdef HWPMC_HOOKS 786 PMC_SOFT_CALL( , , lock, failed); 787 #endif 788 lock_profile_obtain_lock_failed(&lk->lock_object, 789 &contested, &waittime); 790 791 /* 792 * If the lock is expected to not sleep just give up 793 * and return. 794 */ 795 if (LK_TRYOP(flags)) { 796 LOCK_LOG2(lk, "%s: %p fails the try operation", 797 __func__, lk); 798 error = EBUSY; 799 break; 800 } 801 802 #ifdef ADAPTIVE_LOCKMGRS 803 /* 804 * If the owner is running on another CPU, spin until 805 * the owner stops running or the state of the lock 806 * changes. 807 */ 808 x = lk->lk_lock; 809 if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 && 810 LK_HOLDER(x) != LK_KERNPROC) { 811 owner = (struct thread *)LK_HOLDER(x); 812 if (LOCK_LOG_TEST(&lk->lock_object, 0)) 813 CTR3(KTR_LOCK, 814 "%s: spinning on %p held by %p", 815 __func__, lk, owner); 816 817 /* 818 * If we are holding also an interlock drop it 819 * in order to avoid a deadlock if the lockmgr 820 * owner is adaptively spinning on the 821 * interlock itself. 822 */ 823 if (flags & LK_INTERLOCK) { 824 class->lc_unlock(ilk); 825 flags &= ~LK_INTERLOCK; 826 } 827 GIANT_SAVE(); 828 while (LK_HOLDER(lk->lk_lock) == 829 (uintptr_t)owner && TD_IS_RUNNING(owner)) 830 cpu_spinwait(); 831 GIANT_RESTORE(); 832 continue; 833 } else if (LK_CAN_ADAPT(lk, flags) && 834 (x & LK_SHARE) != 0 && LK_SHARERS(x) && 835 spintries < alk_retries) { 836 if ((x & LK_EXCLUSIVE_SPINNERS) == 0 && 837 !atomic_cmpset_ptr(&lk->lk_lock, x, 838 x | LK_EXCLUSIVE_SPINNERS)) 839 continue; 840 if (flags & LK_INTERLOCK) { 841 class->lc_unlock(ilk); 842 flags &= ~LK_INTERLOCK; 843 } 844 GIANT_SAVE(); 845 spintries++; 846 for (i = 0; i < alk_loops; i++) { 847 if (LOCK_LOG_TEST(&lk->lock_object, 0)) 848 CTR4(KTR_LOCK, 849 "%s: shared spinning on %p with %u and %u", 850 __func__, lk, spintries, i); 851 if ((lk->lk_lock & 852 LK_EXCLUSIVE_SPINNERS) == 0) 853 break; 854 cpu_spinwait(); 855 } 856 GIANT_RESTORE(); 857 if (i != alk_loops) 858 continue; 859 } 860 #endif 861 862 /* 863 * Acquire the sleepqueue chain lock because we 864 * probabilly will need to manipulate waiters flags. 865 */ 866 sleepq_lock(&lk->lock_object); 867 x = lk->lk_lock; 868 869 /* 870 * if the lock has been released while we spun on 871 * the sleepqueue chain lock just try again. 872 */ 873 if (x == LK_UNLOCKED) { 874 sleepq_release(&lk->lock_object); 875 continue; 876 } 877 878 #ifdef ADAPTIVE_LOCKMGRS 879 /* 880 * The current lock owner might have started executing 881 * on another CPU (or the lock could have changed 882 * owner) while we were waiting on the turnstile 883 * chain lock. If so, drop the turnstile lock and try 884 * again. 885 */ 886 if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 && 887 LK_HOLDER(x) != LK_KERNPROC) { 888 owner = (struct thread *)LK_HOLDER(x); 889 if (TD_IS_RUNNING(owner)) { 890 sleepq_release(&lk->lock_object); 891 continue; 892 } 893 } 894 #endif 895 896 /* 897 * The lock can be in the state where there is a 898 * pending queue of waiters, but still no owner. 899 * This happens when the lock is contested and an 900 * owner is going to claim the lock. 901 * If curthread is the one successfully acquiring it 902 * claim lock ownership and return, preserving waiters 903 * flags. 904 */ 905 v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS); 906 if ((x & ~v) == LK_UNLOCKED) { 907 v &= ~LK_EXCLUSIVE_SPINNERS; 908 if (atomic_cmpset_acq_ptr(&lk->lk_lock, x, 909 tid | v)) { 910 sleepq_release(&lk->lock_object); 911 LOCK_LOG2(lk, 912 "%s: %p claimed by a new writer", 913 __func__, lk); 914 break; 915 } 916 sleepq_release(&lk->lock_object); 917 continue; 918 } 919 920 /* 921 * Try to set the LK_EXCLUSIVE_WAITERS flag. If we 922 * fail, loop back and retry. 923 */ 924 if ((x & LK_EXCLUSIVE_WAITERS) == 0) { 925 if (!atomic_cmpset_ptr(&lk->lk_lock, x, 926 x | LK_EXCLUSIVE_WAITERS)) { 927 sleepq_release(&lk->lock_object); 928 continue; 929 } 930 LOCK_LOG2(lk, "%s: %p set excl waiters flag", 931 __func__, lk); 932 } 933 934 /* 935 * As far as we have been unable to acquire the 936 * exclusive lock and the exclusive waiters flag 937 * is set, we will sleep. 938 */ 939 error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo, 940 SQ_EXCLUSIVE_QUEUE); 941 flags &= ~LK_INTERLOCK; 942 if (error) { 943 LOCK_LOG3(lk, 944 "%s: interrupted sleep for %p with %d", 945 __func__, lk, error); 946 break; 947 } 948 LOCK_LOG2(lk, "%s: %p resuming from the sleep queue", 949 __func__, lk); 950 } 951 if (error == 0) { 952 lock_profile_obtain_lock_success(&lk->lock_object, 953 contested, waittime, file, line); 954 LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0, 955 lk->lk_recurse, file, line); 956 WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE | 957 LK_TRYWIT(flags), file, line); 958 TD_LOCKS_INC(curthread); 959 STACK_SAVE(lk); 960 } 961 break; 962 case LK_DOWNGRADE: 963 _lockmgr_assert(lk, KA_XLOCKED, file, line); 964 LOCK_LOG_LOCK("XDOWNGRADE", &lk->lock_object, 0, 0, file, line); 965 WITNESS_DOWNGRADE(&lk->lock_object, 0, file, line); 966 967 /* 968 * Panic if the lock is recursed. 969 */ 970 if (lockmgr_xlocked(lk) && lockmgr_recursed(lk)) { 971 if (flags & LK_INTERLOCK) 972 class->lc_unlock(ilk); 973 panic("%s: downgrade a recursed lockmgr %s @ %s:%d\n", 974 __func__, iwmesg, file, line); 975 } 976 TD_SLOCKS_INC(curthread); 977 978 /* 979 * In order to preserve waiters flags, just spin. 980 */ 981 for (;;) { 982 x = lk->lk_lock; 983 MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0); 984 x &= LK_ALL_WAITERS; 985 if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x, 986 LK_SHARERS_LOCK(1) | x)) 987 break; 988 cpu_spinwait(); 989 } 990 break; 991 case LK_RELEASE: 992 _lockmgr_assert(lk, KA_LOCKED, file, line); 993 x = lk->lk_lock; 994 995 if ((x & LK_SHARE) == 0) { 996 997 /* 998 * As first option, treact the lock as if it has not 999 * any waiter. 1000 * Fix-up the tid var if the lock has been disowned. 1001 */ 1002 if (LK_HOLDER(x) == LK_KERNPROC) 1003 tid = LK_KERNPROC; 1004 else { 1005 WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, 1006 file, line); 1007 TD_LOCKS_DEC(curthread); 1008 } 1009 LOCK_LOG_LOCK("XUNLOCK", &lk->lock_object, 0, 1010 lk->lk_recurse, file, line); 1011 1012 /* 1013 * The lock is held in exclusive mode. 1014 * If the lock is recursed also, then unrecurse it. 1015 */ 1016 if (lockmgr_xlocked(lk) && lockmgr_recursed(lk)) { 1017 LOCK_LOG2(lk, "%s: %p unrecursing", __func__, 1018 lk); 1019 lk->lk_recurse--; 1020 break; 1021 } 1022 if (tid != LK_KERNPROC) 1023 lock_profile_release_lock(&lk->lock_object); 1024 1025 if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid, 1026 LK_UNLOCKED)) 1027 break; 1028 1029 sleepq_lock(&lk->lock_object); 1030 x = lk->lk_lock; 1031 v = LK_UNLOCKED; 1032 1033 /* 1034 * If the lock has exclusive waiters, give them 1035 * preference in order to avoid deadlock with 1036 * shared runners up. 1037 * If interruptible sleeps left the exclusive queue 1038 * empty avoid a starvation for the threads sleeping 1039 * on the shared queue by giving them precedence 1040 * and cleaning up the exclusive waiters bit anyway. 1041 * Please note that lk_exslpfail count may be lying 1042 * about the real number of waiters with the 1043 * LK_SLEEPFAIL flag on because they may be used in 1044 * conjuction with interruptible sleeps so 1045 * lk_exslpfail might be considered an 'upper limit' 1046 * bound, including the edge cases. 1047 */ 1048 MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0); 1049 realexslp = sleepq_sleepcnt(&lk->lock_object, 1050 SQ_EXCLUSIVE_QUEUE); 1051 if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) { 1052 if (lk->lk_exslpfail < realexslp) { 1053 lk->lk_exslpfail = 0; 1054 queue = SQ_EXCLUSIVE_QUEUE; 1055 v |= (x & LK_SHARED_WAITERS); 1056 } else { 1057 lk->lk_exslpfail = 0; 1058 LOCK_LOG2(lk, 1059 "%s: %p has only LK_SLEEPFAIL sleepers", 1060 __func__, lk); 1061 LOCK_LOG2(lk, 1062 "%s: %p waking up threads on the exclusive queue", 1063 __func__, lk); 1064 wakeup_swapper = 1065 sleepq_broadcast(&lk->lock_object, 1066 SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE); 1067 queue = SQ_SHARED_QUEUE; 1068 } 1069 } else { 1070 1071 /* 1072 * Exclusive waiters sleeping with LK_SLEEPFAIL 1073 * on and using interruptible sleeps/timeout 1074 * may have left spourious lk_exslpfail counts 1075 * on, so clean it up anyway. 1076 */ 1077 lk->lk_exslpfail = 0; 1078 queue = SQ_SHARED_QUEUE; 1079 } 1080 1081 LOCK_LOG3(lk, 1082 "%s: %p waking up threads on the %s queue", 1083 __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" : 1084 "exclusive"); 1085 atomic_store_rel_ptr(&lk->lk_lock, v); 1086 wakeup_swapper |= sleepq_broadcast(&lk->lock_object, 1087 SLEEPQ_LK, 0, queue); 1088 sleepq_release(&lk->lock_object); 1089 break; 1090 } else 1091 wakeup_swapper = wakeupshlk(lk, file, line); 1092 break; 1093 case LK_DRAIN: 1094 if (LK_CAN_WITNESS(flags)) 1095 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER | 1096 LOP_EXCLUSIVE, file, line, flags & LK_INTERLOCK ? 1097 ilk : NULL); 1098 1099 /* 1100 * Trying to drain a lock we already own will result in a 1101 * deadlock. 1102 */ 1103 if (lockmgr_xlocked(lk)) { 1104 if (flags & LK_INTERLOCK) 1105 class->lc_unlock(ilk); 1106 panic("%s: draining %s with the lock held @ %s:%d\n", 1107 __func__, iwmesg, file, line); 1108 } 1109 1110 while (!atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid)) { 1111 #ifdef HWPMC_HOOKS 1112 PMC_SOFT_CALL( , , lock, failed); 1113 #endif 1114 lock_profile_obtain_lock_failed(&lk->lock_object, 1115 &contested, &waittime); 1116 1117 /* 1118 * If the lock is expected to not sleep just give up 1119 * and return. 1120 */ 1121 if (LK_TRYOP(flags)) { 1122 LOCK_LOG2(lk, "%s: %p fails the try operation", 1123 __func__, lk); 1124 error = EBUSY; 1125 break; 1126 } 1127 1128 /* 1129 * Acquire the sleepqueue chain lock because we 1130 * probabilly will need to manipulate waiters flags. 1131 */ 1132 sleepq_lock(&lk->lock_object); 1133 x = lk->lk_lock; 1134 1135 /* 1136 * if the lock has been released while we spun on 1137 * the sleepqueue chain lock just try again. 1138 */ 1139 if (x == LK_UNLOCKED) { 1140 sleepq_release(&lk->lock_object); 1141 continue; 1142 } 1143 1144 v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS); 1145 if ((x & ~v) == LK_UNLOCKED) { 1146 v = (x & ~LK_EXCLUSIVE_SPINNERS); 1147 1148 /* 1149 * If interruptible sleeps left the exclusive 1150 * queue empty avoid a starvation for the 1151 * threads sleeping on the shared queue by 1152 * giving them precedence and cleaning up the 1153 * exclusive waiters bit anyway. 1154 * Please note that lk_exslpfail count may be 1155 * lying about the real number of waiters with 1156 * the LK_SLEEPFAIL flag on because they may 1157 * be used in conjuction with interruptible 1158 * sleeps so lk_exslpfail might be considered 1159 * an 'upper limit' bound, including the edge 1160 * cases. 1161 */ 1162 if (v & LK_EXCLUSIVE_WAITERS) { 1163 queue = SQ_EXCLUSIVE_QUEUE; 1164 v &= ~LK_EXCLUSIVE_WAITERS; 1165 } else { 1166 1167 /* 1168 * Exclusive waiters sleeping with 1169 * LK_SLEEPFAIL on and using 1170 * interruptible sleeps/timeout may 1171 * have left spourious lk_exslpfail 1172 * counts on, so clean it up anyway. 1173 */ 1174 MPASS(v & LK_SHARED_WAITERS); 1175 lk->lk_exslpfail = 0; 1176 queue = SQ_SHARED_QUEUE; 1177 v &= ~LK_SHARED_WAITERS; 1178 } 1179 if (queue == SQ_EXCLUSIVE_QUEUE) { 1180 realexslp = 1181 sleepq_sleepcnt(&lk->lock_object, 1182 SQ_EXCLUSIVE_QUEUE); 1183 if (lk->lk_exslpfail >= realexslp) { 1184 lk->lk_exslpfail = 0; 1185 queue = SQ_SHARED_QUEUE; 1186 v &= ~LK_SHARED_WAITERS; 1187 if (realexslp != 0) { 1188 LOCK_LOG2(lk, 1189 "%s: %p has only LK_SLEEPFAIL sleepers", 1190 __func__, lk); 1191 LOCK_LOG2(lk, 1192 "%s: %p waking up threads on the exclusive queue", 1193 __func__, lk); 1194 wakeup_swapper = 1195 sleepq_broadcast( 1196 &lk->lock_object, 1197 SLEEPQ_LK, 0, 1198 SQ_EXCLUSIVE_QUEUE); 1199 } 1200 } else 1201 lk->lk_exslpfail = 0; 1202 } 1203 if (!atomic_cmpset_ptr(&lk->lk_lock, x, v)) { 1204 sleepq_release(&lk->lock_object); 1205 continue; 1206 } 1207 LOCK_LOG3(lk, 1208 "%s: %p waking up all threads on the %s queue", 1209 __func__, lk, queue == SQ_SHARED_QUEUE ? 1210 "shared" : "exclusive"); 1211 wakeup_swapper |= sleepq_broadcast( 1212 &lk->lock_object, SLEEPQ_LK, 0, queue); 1213 1214 /* 1215 * If shared waiters have been woken up we need 1216 * to wait for one of them to acquire the lock 1217 * before to set the exclusive waiters in 1218 * order to avoid a deadlock. 1219 */ 1220 if (queue == SQ_SHARED_QUEUE) { 1221 for (v = lk->lk_lock; 1222 (v & LK_SHARE) && !LK_SHARERS(v); 1223 v = lk->lk_lock) 1224 cpu_spinwait(); 1225 } 1226 } 1227 1228 /* 1229 * Try to set the LK_EXCLUSIVE_WAITERS flag. If we 1230 * fail, loop back and retry. 1231 */ 1232 if ((x & LK_EXCLUSIVE_WAITERS) == 0) { 1233 if (!atomic_cmpset_ptr(&lk->lk_lock, x, 1234 x | LK_EXCLUSIVE_WAITERS)) { 1235 sleepq_release(&lk->lock_object); 1236 continue; 1237 } 1238 LOCK_LOG2(lk, "%s: %p set drain waiters flag", 1239 __func__, lk); 1240 } 1241 1242 /* 1243 * As far as we have been unable to acquire the 1244 * exclusive lock and the exclusive waiters flag 1245 * is set, we will sleep. 1246 */ 1247 if (flags & LK_INTERLOCK) { 1248 class->lc_unlock(ilk); 1249 flags &= ~LK_INTERLOCK; 1250 } 1251 GIANT_SAVE(); 1252 sleepq_add(&lk->lock_object, NULL, iwmesg, SLEEPQ_LK, 1253 SQ_EXCLUSIVE_QUEUE); 1254 sleepq_wait(&lk->lock_object, ipri & PRIMASK); 1255 GIANT_RESTORE(); 1256 LOCK_LOG2(lk, "%s: %p resuming from the sleep queue", 1257 __func__, lk); 1258 } 1259 1260 if (error == 0) { 1261 lock_profile_obtain_lock_success(&lk->lock_object, 1262 contested, waittime, file, line); 1263 LOCK_LOG_LOCK("DRAIN", &lk->lock_object, 0, 1264 lk->lk_recurse, file, line); 1265 WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE | 1266 LK_TRYWIT(flags), file, line); 1267 TD_LOCKS_INC(curthread); 1268 STACK_SAVE(lk); 1269 } 1270 break; 1271 default: 1272 if (flags & LK_INTERLOCK) 1273 class->lc_unlock(ilk); 1274 panic("%s: unknown lockmgr request 0x%x\n", __func__, op); 1275 } 1276 1277 if (flags & LK_INTERLOCK) 1278 class->lc_unlock(ilk); 1279 if (wakeup_swapper) 1280 kick_proc0(); 1281 1282 return (error); 1283 } 1284 1285 void 1286 _lockmgr_disown(struct lock *lk, const char *file, int line) 1287 { 1288 uintptr_t tid, x; 1289 1290 if (SCHEDULER_STOPPED()) 1291 return; 1292 1293 tid = (uintptr_t)curthread; 1294 _lockmgr_assert(lk, KA_XLOCKED, file, line); 1295 1296 /* 1297 * Panic if the lock is recursed. 1298 */ 1299 if (lockmgr_xlocked(lk) && lockmgr_recursed(lk)) 1300 panic("%s: disown a recursed lockmgr @ %s:%d\n", 1301 __func__, file, line); 1302 1303 /* 1304 * If the owner is already LK_KERNPROC just skip the whole operation. 1305 */ 1306 if (LK_HOLDER(lk->lk_lock) != tid) 1307 return; 1308 lock_profile_release_lock(&lk->lock_object); 1309 LOCK_LOG_LOCK("XDISOWN", &lk->lock_object, 0, 0, file, line); 1310 WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, file, line); 1311 TD_LOCKS_DEC(curthread); 1312 STACK_SAVE(lk); 1313 1314 /* 1315 * In order to preserve waiters flags, just spin. 1316 */ 1317 for (;;) { 1318 x = lk->lk_lock; 1319 MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0); 1320 x &= LK_ALL_WAITERS; 1321 if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x, 1322 LK_KERNPROC | x)) 1323 return; 1324 cpu_spinwait(); 1325 } 1326 } 1327 1328 void 1329 lockmgr_printinfo(const struct lock *lk) 1330 { 1331 struct thread *td; 1332 uintptr_t x; 1333 1334 if (lk->lk_lock == LK_UNLOCKED) 1335 printf("lock type %s: UNLOCKED\n", lk->lock_object.lo_name); 1336 else if (lk->lk_lock & LK_SHARE) 1337 printf("lock type %s: SHARED (count %ju)\n", 1338 lk->lock_object.lo_name, 1339 (uintmax_t)LK_SHARERS(lk->lk_lock)); 1340 else { 1341 td = lockmgr_xholder(lk); 1342 printf("lock type %s: EXCL by thread %p " 1343 "(pid %d, %s, tid %d)\n", lk->lock_object.lo_name, td, 1344 td->td_proc->p_pid, td->td_proc->p_comm, td->td_tid); 1345 } 1346 1347 x = lk->lk_lock; 1348 if (x & LK_EXCLUSIVE_WAITERS) 1349 printf(" with exclusive waiters pending\n"); 1350 if (x & LK_SHARED_WAITERS) 1351 printf(" with shared waiters pending\n"); 1352 if (x & LK_EXCLUSIVE_SPINNERS) 1353 printf(" with exclusive spinners pending\n"); 1354 1355 STACK_PRINT(lk); 1356 } 1357 1358 int 1359 lockstatus(const struct lock *lk) 1360 { 1361 uintptr_t v, x; 1362 int ret; 1363 1364 ret = LK_SHARED; 1365 x = lk->lk_lock; 1366 v = LK_HOLDER(x); 1367 1368 if ((x & LK_SHARE) == 0) { 1369 if (v == (uintptr_t)curthread || v == LK_KERNPROC) 1370 ret = LK_EXCLUSIVE; 1371 else 1372 ret = LK_EXCLOTHER; 1373 } else if (x == LK_UNLOCKED) 1374 ret = 0; 1375 1376 return (ret); 1377 } 1378 1379 #ifdef INVARIANT_SUPPORT 1380 1381 FEATURE(invariant_support, 1382 "Support for modules compiled with INVARIANTS option"); 1383 1384 #ifndef INVARIANTS 1385 #undef _lockmgr_assert 1386 #endif 1387 1388 void 1389 _lockmgr_assert(const struct lock *lk, int what, const char *file, int line) 1390 { 1391 int slocked = 0; 1392 1393 if (panicstr != NULL) 1394 return; 1395 switch (what) { 1396 case KA_SLOCKED: 1397 case KA_SLOCKED | KA_NOTRECURSED: 1398 case KA_SLOCKED | KA_RECURSED: 1399 slocked = 1; 1400 case KA_LOCKED: 1401 case KA_LOCKED | KA_NOTRECURSED: 1402 case KA_LOCKED | KA_RECURSED: 1403 #ifdef WITNESS 1404 1405 /* 1406 * We cannot trust WITNESS if the lock is held in exclusive 1407 * mode and a call to lockmgr_disown() happened. 1408 * Workaround this skipping the check if the lock is held in 1409 * exclusive mode even for the KA_LOCKED case. 1410 */ 1411 if (slocked || (lk->lk_lock & LK_SHARE)) { 1412 witness_assert(&lk->lock_object, what, file, line); 1413 break; 1414 } 1415 #endif 1416 if (lk->lk_lock == LK_UNLOCKED || 1417 ((lk->lk_lock & LK_SHARE) == 0 && (slocked || 1418 (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk))))) 1419 panic("Lock %s not %slocked @ %s:%d\n", 1420 lk->lock_object.lo_name, slocked ? "share" : "", 1421 file, line); 1422 1423 if ((lk->lk_lock & LK_SHARE) == 0) { 1424 if (lockmgr_recursed(lk)) { 1425 if (what & KA_NOTRECURSED) 1426 panic("Lock %s recursed @ %s:%d\n", 1427 lk->lock_object.lo_name, file, 1428 line); 1429 } else if (what & KA_RECURSED) 1430 panic("Lock %s not recursed @ %s:%d\n", 1431 lk->lock_object.lo_name, file, line); 1432 } 1433 break; 1434 case KA_XLOCKED: 1435 case KA_XLOCKED | KA_NOTRECURSED: 1436 case KA_XLOCKED | KA_RECURSED: 1437 if (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk)) 1438 panic("Lock %s not exclusively locked @ %s:%d\n", 1439 lk->lock_object.lo_name, file, line); 1440 if (lockmgr_recursed(lk)) { 1441 if (what & KA_NOTRECURSED) 1442 panic("Lock %s recursed @ %s:%d\n", 1443 lk->lock_object.lo_name, file, line); 1444 } else if (what & KA_RECURSED) 1445 panic("Lock %s not recursed @ %s:%d\n", 1446 lk->lock_object.lo_name, file, line); 1447 break; 1448 case KA_UNLOCKED: 1449 if (lockmgr_xlocked(lk) || lockmgr_disowned(lk)) 1450 panic("Lock %s exclusively locked @ %s:%d\n", 1451 lk->lock_object.lo_name, file, line); 1452 break; 1453 default: 1454 panic("Unknown lockmgr assertion: %d @ %s:%d\n", what, file, 1455 line); 1456 } 1457 } 1458 #endif 1459 1460 #ifdef DDB 1461 int 1462 lockmgr_chain(struct thread *td, struct thread **ownerp) 1463 { 1464 struct lock *lk; 1465 1466 lk = td->td_wchan; 1467 1468 if (LOCK_CLASS(&lk->lock_object) != &lock_class_lockmgr) 1469 return (0); 1470 db_printf("blocked on lockmgr %s", lk->lock_object.lo_name); 1471 if (lk->lk_lock & LK_SHARE) 1472 db_printf("SHARED (count %ju)\n", 1473 (uintmax_t)LK_SHARERS(lk->lk_lock)); 1474 else 1475 db_printf("EXCL\n"); 1476 *ownerp = lockmgr_xholder(lk); 1477 1478 return (1); 1479 } 1480 1481 static void 1482 db_show_lockmgr(const struct lock_object *lock) 1483 { 1484 struct thread *td; 1485 const struct lock *lk; 1486 1487 lk = (const struct lock *)lock; 1488 1489 db_printf(" state: "); 1490 if (lk->lk_lock == LK_UNLOCKED) 1491 db_printf("UNLOCKED\n"); 1492 else if (lk->lk_lock & LK_SHARE) 1493 db_printf("SLOCK: %ju\n", (uintmax_t)LK_SHARERS(lk->lk_lock)); 1494 else { 1495 td = lockmgr_xholder(lk); 1496 if (td == (struct thread *)LK_KERNPROC) 1497 db_printf("XLOCK: LK_KERNPROC\n"); 1498 else 1499 db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td, 1500 td->td_tid, td->td_proc->p_pid, 1501 td->td_proc->p_comm); 1502 if (lockmgr_recursed(lk)) 1503 db_printf(" recursed: %d\n", lk->lk_recurse); 1504 } 1505 db_printf(" waiters: "); 1506 switch (lk->lk_lock & LK_ALL_WAITERS) { 1507 case LK_SHARED_WAITERS: 1508 db_printf("shared\n"); 1509 break; 1510 case LK_EXCLUSIVE_WAITERS: 1511 db_printf("exclusive\n"); 1512 break; 1513 case LK_ALL_WAITERS: 1514 db_printf("shared and exclusive\n"); 1515 break; 1516 default: 1517 db_printf("none\n"); 1518 } 1519 db_printf(" spinners: "); 1520 if (lk->lk_lock & LK_EXCLUSIVE_SPINNERS) 1521 db_printf("exclusive\n"); 1522 else 1523 db_printf("none\n"); 1524 } 1525 #endif 1526