1 /*- 2 * Copyright (c) 2008 Attilio Rao <attilio@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice(s), this list of conditions and the following disclaimer as 10 * the first lines of this file unmodified other than the possible 11 * addition of one or more copyright notices. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice(s), this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY 17 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 19 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY 20 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 22 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 23 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 26 * DAMAGE. 27 */ 28 29 #include "opt_adaptive_lockmgrs.h" 30 #include "opt_ddb.h" 31 #include "opt_hwpmc_hooks.h" 32 #include "opt_kdtrace.h" 33 34 #include <sys/cdefs.h> 35 __FBSDID("$FreeBSD$"); 36 37 #include <sys/param.h> 38 #include <sys/ktr.h> 39 #include <sys/lock.h> 40 #include <sys/lock_profile.h> 41 #include <sys/lockmgr.h> 42 #include <sys/mutex.h> 43 #include <sys/proc.h> 44 #include <sys/sleepqueue.h> 45 #ifdef DEBUG_LOCKS 46 #include <sys/stack.h> 47 #endif 48 #include <sys/sysctl.h> 49 #include <sys/systm.h> 50 51 #include <machine/cpu.h> 52 53 #ifdef DDB 54 #include <ddb/ddb.h> 55 #endif 56 57 #ifdef HWPMC_HOOKS 58 #include <sys/pmckern.h> 59 PMC_SOFT_DECLARE( , , lock, failed); 60 #endif 61 62 CTASSERT(((LK_ADAPTIVE | LK_NOSHARE) & LO_CLASSFLAGS) == 63 (LK_ADAPTIVE | LK_NOSHARE)); 64 CTASSERT(LK_UNLOCKED == (LK_UNLOCKED & 65 ~(LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS))); 66 67 #define SQ_EXCLUSIVE_QUEUE 0 68 #define SQ_SHARED_QUEUE 1 69 70 #ifndef INVARIANTS 71 #define _lockmgr_assert(lk, what, file, line) 72 #define TD_LOCKS_INC(td) 73 #define TD_LOCKS_DEC(td) 74 #else 75 #define TD_LOCKS_INC(td) ((td)->td_locks++) 76 #define TD_LOCKS_DEC(td) ((td)->td_locks--) 77 #endif 78 #define TD_SLOCKS_INC(td) ((td)->td_lk_slocks++) 79 #define TD_SLOCKS_DEC(td) ((td)->td_lk_slocks--) 80 81 #ifndef DEBUG_LOCKS 82 #define STACK_PRINT(lk) 83 #define STACK_SAVE(lk) 84 #define STACK_ZERO(lk) 85 #else 86 #define STACK_PRINT(lk) stack_print_ddb(&(lk)->lk_stack) 87 #define STACK_SAVE(lk) stack_save(&(lk)->lk_stack) 88 #define STACK_ZERO(lk) stack_zero(&(lk)->lk_stack) 89 #endif 90 91 #define LOCK_LOG2(lk, string, arg1, arg2) \ 92 if (LOCK_LOG_TEST(&(lk)->lock_object, 0)) \ 93 CTR2(KTR_LOCK, (string), (arg1), (arg2)) 94 #define LOCK_LOG3(lk, string, arg1, arg2, arg3) \ 95 if (LOCK_LOG_TEST(&(lk)->lock_object, 0)) \ 96 CTR3(KTR_LOCK, (string), (arg1), (arg2), (arg3)) 97 98 #define GIANT_DECLARE \ 99 int _i = 0; \ 100 WITNESS_SAVE_DECL(Giant) 101 #define GIANT_RESTORE() do { \ 102 if (_i > 0) { \ 103 while (_i--) \ 104 mtx_lock(&Giant); \ 105 WITNESS_RESTORE(&Giant.lock_object, Giant); \ 106 } \ 107 } while (0) 108 #define GIANT_SAVE() do { \ 109 if (mtx_owned(&Giant)) { \ 110 WITNESS_SAVE(&Giant.lock_object, Giant); \ 111 while (mtx_owned(&Giant)) { \ 112 _i++; \ 113 mtx_unlock(&Giant); \ 114 } \ 115 } \ 116 } while (0) 117 118 #define LK_CAN_SHARE(x) \ 119 (((x) & LK_SHARE) && (((x) & LK_EXCLUSIVE_WAITERS) == 0 || \ 120 ((x) & LK_EXCLUSIVE_SPINNERS) == 0 || \ 121 curthread->td_lk_slocks || (curthread->td_pflags & TDP_DEADLKTREAT))) 122 #define LK_TRYOP(x) \ 123 ((x) & LK_NOWAIT) 124 125 #define LK_CAN_WITNESS(x) \ 126 (((x) & LK_NOWITNESS) == 0 && !LK_TRYOP(x)) 127 #define LK_TRYWIT(x) \ 128 (LK_TRYOP(x) ? LOP_TRYLOCK : 0) 129 130 #define LK_CAN_ADAPT(lk, f) \ 131 (((lk)->lock_object.lo_flags & LK_ADAPTIVE) != 0 && \ 132 ((f) & LK_SLEEPFAIL) == 0) 133 134 #define lockmgr_disowned(lk) \ 135 (((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == LK_KERNPROC) 136 137 #define lockmgr_xlocked(lk) \ 138 (((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == (uintptr_t)curthread) 139 140 static void assert_lockmgr(const struct lock_object *lock, int how); 141 #ifdef DDB 142 static void db_show_lockmgr(const struct lock_object *lock); 143 #endif 144 static void lock_lockmgr(struct lock_object *lock, int how); 145 #ifdef KDTRACE_HOOKS 146 static int owner_lockmgr(const struct lock_object *lock, 147 struct thread **owner); 148 #endif 149 static int unlock_lockmgr(struct lock_object *lock); 150 151 struct lock_class lock_class_lockmgr = { 152 .lc_name = "lockmgr", 153 .lc_flags = LC_RECURSABLE | LC_SLEEPABLE | LC_SLEEPLOCK | LC_UPGRADABLE, 154 .lc_assert = assert_lockmgr, 155 #ifdef DDB 156 .lc_ddb_show = db_show_lockmgr, 157 #endif 158 .lc_lock = lock_lockmgr, 159 .lc_unlock = unlock_lockmgr, 160 #ifdef KDTRACE_HOOKS 161 .lc_owner = owner_lockmgr, 162 #endif 163 }; 164 165 #ifdef ADAPTIVE_LOCKMGRS 166 static u_int alk_retries = 10; 167 static u_int alk_loops = 10000; 168 static SYSCTL_NODE(_debug, OID_AUTO, lockmgr, CTLFLAG_RD, NULL, 169 "lockmgr debugging"); 170 SYSCTL_UINT(_debug_lockmgr, OID_AUTO, retries, CTLFLAG_RW, &alk_retries, 0, ""); 171 SYSCTL_UINT(_debug_lockmgr, OID_AUTO, loops, CTLFLAG_RW, &alk_loops, 0, ""); 172 #endif 173 174 static __inline struct thread * 175 lockmgr_xholder(const struct lock *lk) 176 { 177 uintptr_t x; 178 179 x = lk->lk_lock; 180 return ((x & LK_SHARE) ? NULL : (struct thread *)LK_HOLDER(x)); 181 } 182 183 /* 184 * It assumes sleepq_lock held and returns with this one unheld. 185 * It also assumes the generic interlock is sane and previously checked. 186 * If LK_INTERLOCK is specified the interlock is not reacquired after the 187 * sleep. 188 */ 189 static __inline int 190 sleeplk(struct lock *lk, u_int flags, struct lock_object *ilk, 191 const char *wmesg, int pri, int timo, int queue) 192 { 193 GIANT_DECLARE; 194 struct lock_class *class; 195 int catch, error; 196 197 class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL; 198 catch = pri & PCATCH; 199 pri &= PRIMASK; 200 error = 0; 201 202 LOCK_LOG3(lk, "%s: %p blocking on the %s sleepqueue", __func__, lk, 203 (queue == SQ_EXCLUSIVE_QUEUE) ? "exclusive" : "shared"); 204 205 if (flags & LK_INTERLOCK) 206 class->lc_unlock(ilk); 207 if (queue == SQ_EXCLUSIVE_QUEUE && (flags & LK_SLEEPFAIL) != 0) 208 lk->lk_exslpfail++; 209 GIANT_SAVE(); 210 sleepq_add(&lk->lock_object, NULL, wmesg, SLEEPQ_LK | (catch ? 211 SLEEPQ_INTERRUPTIBLE : 0), queue); 212 if ((flags & LK_TIMELOCK) && timo) 213 sleepq_set_timeout(&lk->lock_object, timo); 214 215 /* 216 * Decisional switch for real sleeping. 217 */ 218 if ((flags & LK_TIMELOCK) && timo && catch) 219 error = sleepq_timedwait_sig(&lk->lock_object, pri); 220 else if ((flags & LK_TIMELOCK) && timo) 221 error = sleepq_timedwait(&lk->lock_object, pri); 222 else if (catch) 223 error = sleepq_wait_sig(&lk->lock_object, pri); 224 else 225 sleepq_wait(&lk->lock_object, pri); 226 GIANT_RESTORE(); 227 if ((flags & LK_SLEEPFAIL) && error == 0) 228 error = ENOLCK; 229 230 return (error); 231 } 232 233 static __inline int 234 wakeupshlk(struct lock *lk, const char *file, int line) 235 { 236 uintptr_t v, x; 237 u_int realexslp; 238 int queue, wakeup_swapper; 239 240 TD_LOCKS_DEC(curthread); 241 TD_SLOCKS_DEC(curthread); 242 WITNESS_UNLOCK(&lk->lock_object, 0, file, line); 243 LOCK_LOG_LOCK("SUNLOCK", &lk->lock_object, 0, 0, file, line); 244 245 wakeup_swapper = 0; 246 for (;;) { 247 x = lk->lk_lock; 248 249 /* 250 * If there is more than one shared lock held, just drop one 251 * and return. 252 */ 253 if (LK_SHARERS(x) > 1) { 254 if (atomic_cmpset_rel_ptr(&lk->lk_lock, x, 255 x - LK_ONE_SHARER)) 256 break; 257 continue; 258 } 259 260 /* 261 * If there are not waiters on the exclusive queue, drop the 262 * lock quickly. 263 */ 264 if ((x & LK_ALL_WAITERS) == 0) { 265 MPASS((x & ~LK_EXCLUSIVE_SPINNERS) == 266 LK_SHARERS_LOCK(1)); 267 if (atomic_cmpset_rel_ptr(&lk->lk_lock, x, LK_UNLOCKED)) 268 break; 269 continue; 270 } 271 272 /* 273 * We should have a sharer with waiters, so enter the hard 274 * path in order to handle wakeups correctly. 275 */ 276 sleepq_lock(&lk->lock_object); 277 x = lk->lk_lock & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS); 278 v = LK_UNLOCKED; 279 280 /* 281 * If the lock has exclusive waiters, give them preference in 282 * order to avoid deadlock with shared runners up. 283 * If interruptible sleeps left the exclusive queue empty 284 * avoid a starvation for the threads sleeping on the shared 285 * queue by giving them precedence and cleaning up the 286 * exclusive waiters bit anyway. 287 * Please note that lk_exslpfail count may be lying about 288 * the real number of waiters with the LK_SLEEPFAIL flag on 289 * because they may be used in conjuction with interruptible 290 * sleeps so lk_exslpfail might be considered an 'upper limit' 291 * bound, including the edge cases. 292 */ 293 realexslp = sleepq_sleepcnt(&lk->lock_object, 294 SQ_EXCLUSIVE_QUEUE); 295 if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) { 296 if (lk->lk_exslpfail < realexslp) { 297 lk->lk_exslpfail = 0; 298 queue = SQ_EXCLUSIVE_QUEUE; 299 v |= (x & LK_SHARED_WAITERS); 300 } else { 301 lk->lk_exslpfail = 0; 302 LOCK_LOG2(lk, 303 "%s: %p has only LK_SLEEPFAIL sleepers", 304 __func__, lk); 305 LOCK_LOG2(lk, 306 "%s: %p waking up threads on the exclusive queue", 307 __func__, lk); 308 wakeup_swapper = 309 sleepq_broadcast(&lk->lock_object, 310 SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE); 311 queue = SQ_SHARED_QUEUE; 312 } 313 314 } else { 315 316 /* 317 * Exclusive waiters sleeping with LK_SLEEPFAIL on 318 * and using interruptible sleeps/timeout may have 319 * left spourious lk_exslpfail counts on, so clean 320 * it up anyway. 321 */ 322 lk->lk_exslpfail = 0; 323 queue = SQ_SHARED_QUEUE; 324 } 325 326 if (!atomic_cmpset_rel_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x, 327 v)) { 328 sleepq_release(&lk->lock_object); 329 continue; 330 } 331 LOCK_LOG3(lk, "%s: %p waking up threads on the %s queue", 332 __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" : 333 "exclusive"); 334 wakeup_swapper |= sleepq_broadcast(&lk->lock_object, SLEEPQ_LK, 335 0, queue); 336 sleepq_release(&lk->lock_object); 337 break; 338 } 339 340 lock_profile_release_lock(&lk->lock_object); 341 return (wakeup_swapper); 342 } 343 344 static void 345 assert_lockmgr(const struct lock_object *lock, int what) 346 { 347 348 panic("lockmgr locks do not support assertions"); 349 } 350 351 static void 352 lock_lockmgr(struct lock_object *lock, int how) 353 { 354 355 panic("lockmgr locks do not support sleep interlocking"); 356 } 357 358 static int 359 unlock_lockmgr(struct lock_object *lock) 360 { 361 362 panic("lockmgr locks do not support sleep interlocking"); 363 } 364 365 #ifdef KDTRACE_HOOKS 366 static int 367 owner_lockmgr(const struct lock_object *lock, struct thread **owner) 368 { 369 370 panic("lockmgr locks do not support owner inquiring"); 371 } 372 #endif 373 374 void 375 lockinit(struct lock *lk, int pri, const char *wmesg, int timo, int flags) 376 { 377 int iflags; 378 379 MPASS((flags & ~LK_INIT_MASK) == 0); 380 ASSERT_ATOMIC_LOAD_PTR(lk->lk_lock, 381 ("%s: lockmgr not aligned for %s: %p", __func__, wmesg, 382 &lk->lk_lock)); 383 384 iflags = LO_SLEEPABLE | LO_UPGRADABLE; 385 if (flags & LK_CANRECURSE) 386 iflags |= LO_RECURSABLE; 387 if ((flags & LK_NODUP) == 0) 388 iflags |= LO_DUPOK; 389 if (flags & LK_NOPROFILE) 390 iflags |= LO_NOPROFILE; 391 if ((flags & LK_NOWITNESS) == 0) 392 iflags |= LO_WITNESS; 393 if (flags & LK_QUIET) 394 iflags |= LO_QUIET; 395 iflags |= flags & (LK_ADAPTIVE | LK_NOSHARE); 396 397 lk->lk_lock = LK_UNLOCKED; 398 lk->lk_recurse = 0; 399 lk->lk_exslpfail = 0; 400 lk->lk_timo = timo; 401 lk->lk_pri = pri; 402 lock_init(&lk->lock_object, &lock_class_lockmgr, wmesg, NULL, iflags); 403 STACK_ZERO(lk); 404 } 405 406 /* 407 * XXX: Gross hacks to manipulate external lock flags after 408 * initialization. Used for certain vnode and buf locks. 409 */ 410 void 411 lockallowshare(struct lock *lk) 412 { 413 414 lockmgr_assert(lk, KA_XLOCKED); 415 lk->lock_object.lo_flags &= ~LK_NOSHARE; 416 } 417 418 void 419 lockallowrecurse(struct lock *lk) 420 { 421 422 lockmgr_assert(lk, KA_XLOCKED); 423 lk->lock_object.lo_flags |= LO_RECURSABLE; 424 } 425 426 void 427 lockdisablerecurse(struct lock *lk) 428 { 429 430 lockmgr_assert(lk, KA_XLOCKED); 431 lk->lock_object.lo_flags &= ~LO_RECURSABLE; 432 } 433 434 void 435 lockdestroy(struct lock *lk) 436 { 437 438 KASSERT(lk->lk_lock == LK_UNLOCKED, ("lockmgr still held")); 439 KASSERT(lk->lk_recurse == 0, ("lockmgr still recursed")); 440 KASSERT(lk->lk_exslpfail == 0, ("lockmgr still exclusive waiters")); 441 lock_destroy(&lk->lock_object); 442 } 443 444 int 445 __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk, 446 const char *wmesg, int pri, int timo, const char *file, int line) 447 { 448 GIANT_DECLARE; 449 struct lock_class *class; 450 const char *iwmesg; 451 uintptr_t tid, v, x; 452 u_int op, realexslp; 453 int error, ipri, itimo, queue, wakeup_swapper; 454 #ifdef LOCK_PROFILING 455 uint64_t waittime = 0; 456 int contested = 0; 457 #endif 458 #ifdef ADAPTIVE_LOCKMGRS 459 volatile struct thread *owner; 460 u_int i, spintries = 0; 461 #endif 462 463 error = 0; 464 tid = (uintptr_t)curthread; 465 op = (flags & LK_TYPE_MASK); 466 iwmesg = (wmesg == LK_WMESG_DEFAULT) ? lk->lock_object.lo_name : wmesg; 467 ipri = (pri == LK_PRIO_DEFAULT) ? lk->lk_pri : pri; 468 itimo = (timo == LK_TIMO_DEFAULT) ? lk->lk_timo : timo; 469 470 MPASS((flags & ~LK_TOTAL_MASK) == 0); 471 KASSERT((op & (op - 1)) == 0, 472 ("%s: Invalid requested operation @ %s:%d", __func__, file, line)); 473 KASSERT((flags & (LK_NOWAIT | LK_SLEEPFAIL)) == 0 || 474 (op != LK_DOWNGRADE && op != LK_RELEASE), 475 ("%s: Invalid flags in regard of the operation desired @ %s:%d", 476 __func__, file, line)); 477 KASSERT((flags & LK_INTERLOCK) == 0 || ilk != NULL, 478 ("%s: LK_INTERLOCK passed without valid interlock @ %s:%d", 479 __func__, file, line)); 480 KASSERT(!TD_IS_IDLETHREAD(curthread), 481 ("%s: idle thread %p on lockmgr %s @ %s:%d", __func__, curthread, 482 lk->lock_object.lo_name, file, line)); 483 484 class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL; 485 if (panicstr != NULL) { 486 if (flags & LK_INTERLOCK) 487 class->lc_unlock(ilk); 488 return (0); 489 } 490 491 if (lk->lock_object.lo_flags & LK_NOSHARE) { 492 switch (op) { 493 case LK_SHARED: 494 op = LK_EXCLUSIVE; 495 break; 496 case LK_UPGRADE: 497 case LK_DOWNGRADE: 498 _lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED, 499 file, line); 500 return (0); 501 } 502 } 503 504 wakeup_swapper = 0; 505 switch (op) { 506 case LK_SHARED: 507 if (LK_CAN_WITNESS(flags)) 508 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER, 509 file, line, ilk); 510 for (;;) { 511 x = lk->lk_lock; 512 513 /* 514 * If no other thread has an exclusive lock, or 515 * no exclusive waiter is present, bump the count of 516 * sharers. Since we have to preserve the state of 517 * waiters, if we fail to acquire the shared lock 518 * loop back and retry. 519 */ 520 if (LK_CAN_SHARE(x)) { 521 if (atomic_cmpset_acq_ptr(&lk->lk_lock, x, 522 x + LK_ONE_SHARER)) 523 break; 524 continue; 525 } 526 #ifdef HWPMC_HOOKS 527 PMC_SOFT_CALL( , , lock, failed); 528 #endif 529 lock_profile_obtain_lock_failed(&lk->lock_object, 530 &contested, &waittime); 531 532 /* 533 * If the lock is already held by curthread in 534 * exclusive way avoid a deadlock. 535 */ 536 if (LK_HOLDER(x) == tid) { 537 LOCK_LOG2(lk, 538 "%s: %p already held in exclusive mode", 539 __func__, lk); 540 error = EDEADLK; 541 break; 542 } 543 544 /* 545 * If the lock is expected to not sleep just give up 546 * and return. 547 */ 548 if (LK_TRYOP(flags)) { 549 LOCK_LOG2(lk, "%s: %p fails the try operation", 550 __func__, lk); 551 error = EBUSY; 552 break; 553 } 554 555 #ifdef ADAPTIVE_LOCKMGRS 556 /* 557 * If the owner is running on another CPU, spin until 558 * the owner stops running or the state of the lock 559 * changes. We need a double-state handle here 560 * because for a failed acquisition the lock can be 561 * either held in exclusive mode or shared mode 562 * (for the writer starvation avoidance technique). 563 */ 564 if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 && 565 LK_HOLDER(x) != LK_KERNPROC) { 566 owner = (struct thread *)LK_HOLDER(x); 567 if (LOCK_LOG_TEST(&lk->lock_object, 0)) 568 CTR3(KTR_LOCK, 569 "%s: spinning on %p held by %p", 570 __func__, lk, owner); 571 572 /* 573 * If we are holding also an interlock drop it 574 * in order to avoid a deadlock if the lockmgr 575 * owner is adaptively spinning on the 576 * interlock itself. 577 */ 578 if (flags & LK_INTERLOCK) { 579 class->lc_unlock(ilk); 580 flags &= ~LK_INTERLOCK; 581 } 582 GIANT_SAVE(); 583 while (LK_HOLDER(lk->lk_lock) == 584 (uintptr_t)owner && TD_IS_RUNNING(owner)) 585 cpu_spinwait(); 586 GIANT_RESTORE(); 587 continue; 588 } else if (LK_CAN_ADAPT(lk, flags) && 589 (x & LK_SHARE) != 0 && LK_SHARERS(x) && 590 spintries < alk_retries) { 591 if (flags & LK_INTERLOCK) { 592 class->lc_unlock(ilk); 593 flags &= ~LK_INTERLOCK; 594 } 595 GIANT_SAVE(); 596 spintries++; 597 for (i = 0; i < alk_loops; i++) { 598 if (LOCK_LOG_TEST(&lk->lock_object, 0)) 599 CTR4(KTR_LOCK, 600 "%s: shared spinning on %p with %u and %u", 601 __func__, lk, spintries, i); 602 x = lk->lk_lock; 603 if ((x & LK_SHARE) == 0 || 604 LK_CAN_SHARE(x) != 0) 605 break; 606 cpu_spinwait(); 607 } 608 GIANT_RESTORE(); 609 if (i != alk_loops) 610 continue; 611 } 612 #endif 613 614 /* 615 * Acquire the sleepqueue chain lock because we 616 * probabilly will need to manipulate waiters flags. 617 */ 618 sleepq_lock(&lk->lock_object); 619 x = lk->lk_lock; 620 621 /* 622 * if the lock can be acquired in shared mode, try 623 * again. 624 */ 625 if (LK_CAN_SHARE(x)) { 626 sleepq_release(&lk->lock_object); 627 continue; 628 } 629 630 #ifdef ADAPTIVE_LOCKMGRS 631 /* 632 * The current lock owner might have started executing 633 * on another CPU (or the lock could have changed 634 * owner) while we were waiting on the turnstile 635 * chain lock. If so, drop the turnstile lock and try 636 * again. 637 */ 638 if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 && 639 LK_HOLDER(x) != LK_KERNPROC) { 640 owner = (struct thread *)LK_HOLDER(x); 641 if (TD_IS_RUNNING(owner)) { 642 sleepq_release(&lk->lock_object); 643 continue; 644 } 645 } 646 #endif 647 648 /* 649 * Try to set the LK_SHARED_WAITERS flag. If we fail, 650 * loop back and retry. 651 */ 652 if ((x & LK_SHARED_WAITERS) == 0) { 653 if (!atomic_cmpset_acq_ptr(&lk->lk_lock, x, 654 x | LK_SHARED_WAITERS)) { 655 sleepq_release(&lk->lock_object); 656 continue; 657 } 658 LOCK_LOG2(lk, "%s: %p set shared waiters flag", 659 __func__, lk); 660 } 661 662 /* 663 * As far as we have been unable to acquire the 664 * shared lock and the shared waiters flag is set, 665 * we will sleep. 666 */ 667 error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo, 668 SQ_SHARED_QUEUE); 669 flags &= ~LK_INTERLOCK; 670 if (error) { 671 LOCK_LOG3(lk, 672 "%s: interrupted sleep for %p with %d", 673 __func__, lk, error); 674 break; 675 } 676 LOCK_LOG2(lk, "%s: %p resuming from the sleep queue", 677 __func__, lk); 678 } 679 if (error == 0) { 680 lock_profile_obtain_lock_success(&lk->lock_object, 681 contested, waittime, file, line); 682 LOCK_LOG_LOCK("SLOCK", &lk->lock_object, 0, 0, file, 683 line); 684 WITNESS_LOCK(&lk->lock_object, LK_TRYWIT(flags), file, 685 line); 686 TD_LOCKS_INC(curthread); 687 TD_SLOCKS_INC(curthread); 688 STACK_SAVE(lk); 689 } 690 break; 691 case LK_UPGRADE: 692 _lockmgr_assert(lk, KA_SLOCKED, file, line); 693 v = lk->lk_lock; 694 x = v & LK_ALL_WAITERS; 695 v &= LK_EXCLUSIVE_SPINNERS; 696 697 /* 698 * Try to switch from one shared lock to an exclusive one. 699 * We need to preserve waiters flags during the operation. 700 */ 701 if (atomic_cmpset_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x | v, 702 tid | x)) { 703 LOCK_LOG_LOCK("XUPGRADE", &lk->lock_object, 0, 0, file, 704 line); 705 WITNESS_UPGRADE(&lk->lock_object, LOP_EXCLUSIVE | 706 LK_TRYWIT(flags), file, line); 707 TD_SLOCKS_DEC(curthread); 708 break; 709 } 710 711 /* 712 * We have been unable to succeed in upgrading, so just 713 * give up the shared lock. 714 */ 715 wakeup_swapper |= wakeupshlk(lk, file, line); 716 717 /* FALLTHROUGH */ 718 case LK_EXCLUSIVE: 719 if (LK_CAN_WITNESS(flags)) 720 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER | 721 LOP_EXCLUSIVE, file, line, ilk); 722 723 /* 724 * If curthread already holds the lock and this one is 725 * allowed to recurse, simply recurse on it. 726 */ 727 if (lockmgr_xlocked(lk)) { 728 if ((flags & LK_CANRECURSE) == 0 && 729 (lk->lock_object.lo_flags & LO_RECURSABLE) == 0) { 730 731 /* 732 * If the lock is expected to not panic just 733 * give up and return. 734 */ 735 if (LK_TRYOP(flags)) { 736 LOCK_LOG2(lk, 737 "%s: %p fails the try operation", 738 __func__, lk); 739 error = EBUSY; 740 break; 741 } 742 if (flags & LK_INTERLOCK) 743 class->lc_unlock(ilk); 744 panic("%s: recursing on non recursive lockmgr %s @ %s:%d\n", 745 __func__, iwmesg, file, line); 746 } 747 lk->lk_recurse++; 748 LOCK_LOG2(lk, "%s: %p recursing", __func__, lk); 749 LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0, 750 lk->lk_recurse, file, line); 751 WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE | 752 LK_TRYWIT(flags), file, line); 753 TD_LOCKS_INC(curthread); 754 break; 755 } 756 757 while (!atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, 758 tid)) { 759 #ifdef HWPMC_HOOKS 760 PMC_SOFT_CALL( , , lock, failed); 761 #endif 762 lock_profile_obtain_lock_failed(&lk->lock_object, 763 &contested, &waittime); 764 765 /* 766 * If the lock is expected to not sleep just give up 767 * and return. 768 */ 769 if (LK_TRYOP(flags)) { 770 LOCK_LOG2(lk, "%s: %p fails the try operation", 771 __func__, lk); 772 error = EBUSY; 773 break; 774 } 775 776 #ifdef ADAPTIVE_LOCKMGRS 777 /* 778 * If the owner is running on another CPU, spin until 779 * the owner stops running or the state of the lock 780 * changes. 781 */ 782 x = lk->lk_lock; 783 if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 && 784 LK_HOLDER(x) != LK_KERNPROC) { 785 owner = (struct thread *)LK_HOLDER(x); 786 if (LOCK_LOG_TEST(&lk->lock_object, 0)) 787 CTR3(KTR_LOCK, 788 "%s: spinning on %p held by %p", 789 __func__, lk, owner); 790 791 /* 792 * If we are holding also an interlock drop it 793 * in order to avoid a deadlock if the lockmgr 794 * owner is adaptively spinning on the 795 * interlock itself. 796 */ 797 if (flags & LK_INTERLOCK) { 798 class->lc_unlock(ilk); 799 flags &= ~LK_INTERLOCK; 800 } 801 GIANT_SAVE(); 802 while (LK_HOLDER(lk->lk_lock) == 803 (uintptr_t)owner && TD_IS_RUNNING(owner)) 804 cpu_spinwait(); 805 GIANT_RESTORE(); 806 continue; 807 } else if (LK_CAN_ADAPT(lk, flags) && 808 (x & LK_SHARE) != 0 && LK_SHARERS(x) && 809 spintries < alk_retries) { 810 if ((x & LK_EXCLUSIVE_SPINNERS) == 0 && 811 !atomic_cmpset_ptr(&lk->lk_lock, x, 812 x | LK_EXCLUSIVE_SPINNERS)) 813 continue; 814 if (flags & LK_INTERLOCK) { 815 class->lc_unlock(ilk); 816 flags &= ~LK_INTERLOCK; 817 } 818 GIANT_SAVE(); 819 spintries++; 820 for (i = 0; i < alk_loops; i++) { 821 if (LOCK_LOG_TEST(&lk->lock_object, 0)) 822 CTR4(KTR_LOCK, 823 "%s: shared spinning on %p with %u and %u", 824 __func__, lk, spintries, i); 825 if ((lk->lk_lock & 826 LK_EXCLUSIVE_SPINNERS) == 0) 827 break; 828 cpu_spinwait(); 829 } 830 GIANT_RESTORE(); 831 if (i != alk_loops) 832 continue; 833 } 834 #endif 835 836 /* 837 * Acquire the sleepqueue chain lock because we 838 * probabilly will need to manipulate waiters flags. 839 */ 840 sleepq_lock(&lk->lock_object); 841 x = lk->lk_lock; 842 843 /* 844 * if the lock has been released while we spun on 845 * the sleepqueue chain lock just try again. 846 */ 847 if (x == LK_UNLOCKED) { 848 sleepq_release(&lk->lock_object); 849 continue; 850 } 851 852 #ifdef ADAPTIVE_LOCKMGRS 853 /* 854 * The current lock owner might have started executing 855 * on another CPU (or the lock could have changed 856 * owner) while we were waiting on the turnstile 857 * chain lock. If so, drop the turnstile lock and try 858 * again. 859 */ 860 if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 && 861 LK_HOLDER(x) != LK_KERNPROC) { 862 owner = (struct thread *)LK_HOLDER(x); 863 if (TD_IS_RUNNING(owner)) { 864 sleepq_release(&lk->lock_object); 865 continue; 866 } 867 } 868 #endif 869 870 /* 871 * The lock can be in the state where there is a 872 * pending queue of waiters, but still no owner. 873 * This happens when the lock is contested and an 874 * owner is going to claim the lock. 875 * If curthread is the one successfully acquiring it 876 * claim lock ownership and return, preserving waiters 877 * flags. 878 */ 879 v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS); 880 if ((x & ~v) == LK_UNLOCKED) { 881 v &= ~LK_EXCLUSIVE_SPINNERS; 882 if (atomic_cmpset_acq_ptr(&lk->lk_lock, x, 883 tid | v)) { 884 sleepq_release(&lk->lock_object); 885 LOCK_LOG2(lk, 886 "%s: %p claimed by a new writer", 887 __func__, lk); 888 break; 889 } 890 sleepq_release(&lk->lock_object); 891 continue; 892 } 893 894 /* 895 * Try to set the LK_EXCLUSIVE_WAITERS flag. If we 896 * fail, loop back and retry. 897 */ 898 if ((x & LK_EXCLUSIVE_WAITERS) == 0) { 899 if (!atomic_cmpset_ptr(&lk->lk_lock, x, 900 x | LK_EXCLUSIVE_WAITERS)) { 901 sleepq_release(&lk->lock_object); 902 continue; 903 } 904 LOCK_LOG2(lk, "%s: %p set excl waiters flag", 905 __func__, lk); 906 } 907 908 /* 909 * As far as we have been unable to acquire the 910 * exclusive lock and the exclusive waiters flag 911 * is set, we will sleep. 912 */ 913 error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo, 914 SQ_EXCLUSIVE_QUEUE); 915 flags &= ~LK_INTERLOCK; 916 if (error) { 917 LOCK_LOG3(lk, 918 "%s: interrupted sleep for %p with %d", 919 __func__, lk, error); 920 break; 921 } 922 LOCK_LOG2(lk, "%s: %p resuming from the sleep queue", 923 __func__, lk); 924 } 925 if (error == 0) { 926 lock_profile_obtain_lock_success(&lk->lock_object, 927 contested, waittime, file, line); 928 LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0, 929 lk->lk_recurse, file, line); 930 WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE | 931 LK_TRYWIT(flags), file, line); 932 TD_LOCKS_INC(curthread); 933 STACK_SAVE(lk); 934 } 935 break; 936 case LK_DOWNGRADE: 937 _lockmgr_assert(lk, KA_XLOCKED, file, line); 938 LOCK_LOG_LOCK("XDOWNGRADE", &lk->lock_object, 0, 0, file, line); 939 WITNESS_DOWNGRADE(&lk->lock_object, 0, file, line); 940 941 /* 942 * Panic if the lock is recursed. 943 */ 944 if (lockmgr_xlocked(lk) && lockmgr_recursed(lk)) { 945 if (flags & LK_INTERLOCK) 946 class->lc_unlock(ilk); 947 panic("%s: downgrade a recursed lockmgr %s @ %s:%d\n", 948 __func__, iwmesg, file, line); 949 } 950 TD_SLOCKS_INC(curthread); 951 952 /* 953 * In order to preserve waiters flags, just spin. 954 */ 955 for (;;) { 956 x = lk->lk_lock; 957 MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0); 958 x &= LK_ALL_WAITERS; 959 if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x, 960 LK_SHARERS_LOCK(1) | x)) 961 break; 962 cpu_spinwait(); 963 } 964 break; 965 case LK_RELEASE: 966 _lockmgr_assert(lk, KA_LOCKED, file, line); 967 x = lk->lk_lock; 968 969 if ((x & LK_SHARE) == 0) { 970 971 /* 972 * As first option, treact the lock as if it has not 973 * any waiter. 974 * Fix-up the tid var if the lock has been disowned. 975 */ 976 if (LK_HOLDER(x) == LK_KERNPROC) 977 tid = LK_KERNPROC; 978 else { 979 WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, 980 file, line); 981 TD_LOCKS_DEC(curthread); 982 } 983 LOCK_LOG_LOCK("XUNLOCK", &lk->lock_object, 0, 984 lk->lk_recurse, file, line); 985 986 /* 987 * The lock is held in exclusive mode. 988 * If the lock is recursed also, then unrecurse it. 989 */ 990 if (lockmgr_xlocked(lk) && lockmgr_recursed(lk)) { 991 LOCK_LOG2(lk, "%s: %p unrecursing", __func__, 992 lk); 993 lk->lk_recurse--; 994 break; 995 } 996 if (tid != LK_KERNPROC) 997 lock_profile_release_lock(&lk->lock_object); 998 999 if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid, 1000 LK_UNLOCKED)) 1001 break; 1002 1003 sleepq_lock(&lk->lock_object); 1004 x = lk->lk_lock; 1005 v = LK_UNLOCKED; 1006 1007 /* 1008 * If the lock has exclusive waiters, give them 1009 * preference in order to avoid deadlock with 1010 * shared runners up. 1011 * If interruptible sleeps left the exclusive queue 1012 * empty avoid a starvation for the threads sleeping 1013 * on the shared queue by giving them precedence 1014 * and cleaning up the exclusive waiters bit anyway. 1015 * Please note that lk_exslpfail count may be lying 1016 * about the real number of waiters with the 1017 * LK_SLEEPFAIL flag on because they may be used in 1018 * conjuction with interruptible sleeps so 1019 * lk_exslpfail might be considered an 'upper limit' 1020 * bound, including the edge cases. 1021 */ 1022 MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0); 1023 realexslp = sleepq_sleepcnt(&lk->lock_object, 1024 SQ_EXCLUSIVE_QUEUE); 1025 if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) { 1026 if (lk->lk_exslpfail < realexslp) { 1027 lk->lk_exslpfail = 0; 1028 queue = SQ_EXCLUSIVE_QUEUE; 1029 v |= (x & LK_SHARED_WAITERS); 1030 } else { 1031 lk->lk_exslpfail = 0; 1032 LOCK_LOG2(lk, 1033 "%s: %p has only LK_SLEEPFAIL sleepers", 1034 __func__, lk); 1035 LOCK_LOG2(lk, 1036 "%s: %p waking up threads on the exclusive queue", 1037 __func__, lk); 1038 wakeup_swapper = 1039 sleepq_broadcast(&lk->lock_object, 1040 SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE); 1041 queue = SQ_SHARED_QUEUE; 1042 } 1043 } else { 1044 1045 /* 1046 * Exclusive waiters sleeping with LK_SLEEPFAIL 1047 * on and using interruptible sleeps/timeout 1048 * may have left spourious lk_exslpfail counts 1049 * on, so clean it up anyway. 1050 */ 1051 lk->lk_exslpfail = 0; 1052 queue = SQ_SHARED_QUEUE; 1053 } 1054 1055 LOCK_LOG3(lk, 1056 "%s: %p waking up threads on the %s queue", 1057 __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" : 1058 "exclusive"); 1059 atomic_store_rel_ptr(&lk->lk_lock, v); 1060 wakeup_swapper |= sleepq_broadcast(&lk->lock_object, 1061 SLEEPQ_LK, 0, queue); 1062 sleepq_release(&lk->lock_object); 1063 break; 1064 } else 1065 wakeup_swapper = wakeupshlk(lk, file, line); 1066 break; 1067 case LK_DRAIN: 1068 if (LK_CAN_WITNESS(flags)) 1069 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER | 1070 LOP_EXCLUSIVE, file, line, ilk); 1071 1072 /* 1073 * Trying to drain a lock we already own will result in a 1074 * deadlock. 1075 */ 1076 if (lockmgr_xlocked(lk)) { 1077 if (flags & LK_INTERLOCK) 1078 class->lc_unlock(ilk); 1079 panic("%s: draining %s with the lock held @ %s:%d\n", 1080 __func__, iwmesg, file, line); 1081 } 1082 1083 while (!atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid)) { 1084 #ifdef HWPMC_HOOKS 1085 PMC_SOFT_CALL( , , lock, failed); 1086 #endif 1087 lock_profile_obtain_lock_failed(&lk->lock_object, 1088 &contested, &waittime); 1089 1090 /* 1091 * If the lock is expected to not sleep just give up 1092 * and return. 1093 */ 1094 if (LK_TRYOP(flags)) { 1095 LOCK_LOG2(lk, "%s: %p fails the try operation", 1096 __func__, lk); 1097 error = EBUSY; 1098 break; 1099 } 1100 1101 /* 1102 * Acquire the sleepqueue chain lock because we 1103 * probabilly will need to manipulate waiters flags. 1104 */ 1105 sleepq_lock(&lk->lock_object); 1106 x = lk->lk_lock; 1107 1108 /* 1109 * if the lock has been released while we spun on 1110 * the sleepqueue chain lock just try again. 1111 */ 1112 if (x == LK_UNLOCKED) { 1113 sleepq_release(&lk->lock_object); 1114 continue; 1115 } 1116 1117 v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS); 1118 if ((x & ~v) == LK_UNLOCKED) { 1119 v = (x & ~LK_EXCLUSIVE_SPINNERS); 1120 1121 /* 1122 * If interruptible sleeps left the exclusive 1123 * queue empty avoid a starvation for the 1124 * threads sleeping on the shared queue by 1125 * giving them precedence and cleaning up the 1126 * exclusive waiters bit anyway. 1127 * Please note that lk_exslpfail count may be 1128 * lying about the real number of waiters with 1129 * the LK_SLEEPFAIL flag on because they may 1130 * be used in conjuction with interruptible 1131 * sleeps so lk_exslpfail might be considered 1132 * an 'upper limit' bound, including the edge 1133 * cases. 1134 */ 1135 if (v & LK_EXCLUSIVE_WAITERS) { 1136 queue = SQ_EXCLUSIVE_QUEUE; 1137 v &= ~LK_EXCLUSIVE_WAITERS; 1138 } else { 1139 1140 /* 1141 * Exclusive waiters sleeping with 1142 * LK_SLEEPFAIL on and using 1143 * interruptible sleeps/timeout may 1144 * have left spourious lk_exslpfail 1145 * counts on, so clean it up anyway. 1146 */ 1147 MPASS(v & LK_SHARED_WAITERS); 1148 lk->lk_exslpfail = 0; 1149 queue = SQ_SHARED_QUEUE; 1150 v &= ~LK_SHARED_WAITERS; 1151 } 1152 if (queue == SQ_EXCLUSIVE_QUEUE) { 1153 realexslp = 1154 sleepq_sleepcnt(&lk->lock_object, 1155 SQ_EXCLUSIVE_QUEUE); 1156 if (lk->lk_exslpfail >= realexslp) { 1157 lk->lk_exslpfail = 0; 1158 queue = SQ_SHARED_QUEUE; 1159 v &= ~LK_SHARED_WAITERS; 1160 if (realexslp != 0) { 1161 LOCK_LOG2(lk, 1162 "%s: %p has only LK_SLEEPFAIL sleepers", 1163 __func__, lk); 1164 LOCK_LOG2(lk, 1165 "%s: %p waking up threads on the exclusive queue", 1166 __func__, lk); 1167 wakeup_swapper = 1168 sleepq_broadcast( 1169 &lk->lock_object, 1170 SLEEPQ_LK, 0, 1171 SQ_EXCLUSIVE_QUEUE); 1172 } 1173 } else 1174 lk->lk_exslpfail = 0; 1175 } 1176 if (!atomic_cmpset_ptr(&lk->lk_lock, x, v)) { 1177 sleepq_release(&lk->lock_object); 1178 continue; 1179 } 1180 LOCK_LOG3(lk, 1181 "%s: %p waking up all threads on the %s queue", 1182 __func__, lk, queue == SQ_SHARED_QUEUE ? 1183 "shared" : "exclusive"); 1184 wakeup_swapper |= sleepq_broadcast( 1185 &lk->lock_object, SLEEPQ_LK, 0, queue); 1186 1187 /* 1188 * If shared waiters have been woken up we need 1189 * to wait for one of them to acquire the lock 1190 * before to set the exclusive waiters in 1191 * order to avoid a deadlock. 1192 */ 1193 if (queue == SQ_SHARED_QUEUE) { 1194 for (v = lk->lk_lock; 1195 (v & LK_SHARE) && !LK_SHARERS(v); 1196 v = lk->lk_lock) 1197 cpu_spinwait(); 1198 } 1199 } 1200 1201 /* 1202 * Try to set the LK_EXCLUSIVE_WAITERS flag. If we 1203 * fail, loop back and retry. 1204 */ 1205 if ((x & LK_EXCLUSIVE_WAITERS) == 0) { 1206 if (!atomic_cmpset_ptr(&lk->lk_lock, x, 1207 x | LK_EXCLUSIVE_WAITERS)) { 1208 sleepq_release(&lk->lock_object); 1209 continue; 1210 } 1211 LOCK_LOG2(lk, "%s: %p set drain waiters flag", 1212 __func__, lk); 1213 } 1214 1215 /* 1216 * As far as we have been unable to acquire the 1217 * exclusive lock and the exclusive waiters flag 1218 * is set, we will sleep. 1219 */ 1220 if (flags & LK_INTERLOCK) { 1221 class->lc_unlock(ilk); 1222 flags &= ~LK_INTERLOCK; 1223 } 1224 GIANT_SAVE(); 1225 sleepq_add(&lk->lock_object, NULL, iwmesg, SLEEPQ_LK, 1226 SQ_EXCLUSIVE_QUEUE); 1227 sleepq_wait(&lk->lock_object, ipri & PRIMASK); 1228 GIANT_RESTORE(); 1229 LOCK_LOG2(lk, "%s: %p resuming from the sleep queue", 1230 __func__, lk); 1231 } 1232 1233 if (error == 0) { 1234 lock_profile_obtain_lock_success(&lk->lock_object, 1235 contested, waittime, file, line); 1236 LOCK_LOG_LOCK("DRAIN", &lk->lock_object, 0, 1237 lk->lk_recurse, file, line); 1238 WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE | 1239 LK_TRYWIT(flags), file, line); 1240 TD_LOCKS_INC(curthread); 1241 STACK_SAVE(lk); 1242 } 1243 break; 1244 default: 1245 if (flags & LK_INTERLOCK) 1246 class->lc_unlock(ilk); 1247 panic("%s: unknown lockmgr request 0x%x\n", __func__, op); 1248 } 1249 1250 if (flags & LK_INTERLOCK) 1251 class->lc_unlock(ilk); 1252 if (wakeup_swapper) 1253 kick_proc0(); 1254 1255 return (error); 1256 } 1257 1258 void 1259 _lockmgr_disown(struct lock *lk, const char *file, int line) 1260 { 1261 uintptr_t tid, x; 1262 1263 if (SCHEDULER_STOPPED()) 1264 return; 1265 1266 tid = (uintptr_t)curthread; 1267 _lockmgr_assert(lk, KA_XLOCKED, file, line); 1268 1269 /* 1270 * Panic if the lock is recursed. 1271 */ 1272 if (lockmgr_xlocked(lk) && lockmgr_recursed(lk)) 1273 panic("%s: disown a recursed lockmgr @ %s:%d\n", 1274 __func__, file, line); 1275 1276 /* 1277 * If the owner is already LK_KERNPROC just skip the whole operation. 1278 */ 1279 if (LK_HOLDER(lk->lk_lock) != tid) 1280 return; 1281 lock_profile_release_lock(&lk->lock_object); 1282 LOCK_LOG_LOCK("XDISOWN", &lk->lock_object, 0, 0, file, line); 1283 WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, file, line); 1284 TD_LOCKS_DEC(curthread); 1285 STACK_SAVE(lk); 1286 1287 /* 1288 * In order to preserve waiters flags, just spin. 1289 */ 1290 for (;;) { 1291 x = lk->lk_lock; 1292 MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0); 1293 x &= LK_ALL_WAITERS; 1294 if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x, 1295 LK_KERNPROC | x)) 1296 return; 1297 cpu_spinwait(); 1298 } 1299 } 1300 1301 void 1302 lockmgr_printinfo(const struct lock *lk) 1303 { 1304 struct thread *td; 1305 uintptr_t x; 1306 1307 if (lk->lk_lock == LK_UNLOCKED) 1308 printf("lock type %s: UNLOCKED\n", lk->lock_object.lo_name); 1309 else if (lk->lk_lock & LK_SHARE) 1310 printf("lock type %s: SHARED (count %ju)\n", 1311 lk->lock_object.lo_name, 1312 (uintmax_t)LK_SHARERS(lk->lk_lock)); 1313 else { 1314 td = lockmgr_xholder(lk); 1315 printf("lock type %s: EXCL by thread %p " 1316 "(pid %d, %s, tid %d)\n", lk->lock_object.lo_name, td, 1317 td->td_proc->p_pid, td->td_proc->p_comm, td->td_tid); 1318 } 1319 1320 x = lk->lk_lock; 1321 if (x & LK_EXCLUSIVE_WAITERS) 1322 printf(" with exclusive waiters pending\n"); 1323 if (x & LK_SHARED_WAITERS) 1324 printf(" with shared waiters pending\n"); 1325 if (x & LK_EXCLUSIVE_SPINNERS) 1326 printf(" with exclusive spinners pending\n"); 1327 1328 STACK_PRINT(lk); 1329 } 1330 1331 int 1332 lockstatus(const struct lock *lk) 1333 { 1334 uintptr_t v, x; 1335 int ret; 1336 1337 ret = LK_SHARED; 1338 x = lk->lk_lock; 1339 v = LK_HOLDER(x); 1340 1341 if ((x & LK_SHARE) == 0) { 1342 if (v == (uintptr_t)curthread || v == LK_KERNPROC) 1343 ret = LK_EXCLUSIVE; 1344 else 1345 ret = LK_EXCLOTHER; 1346 } else if (x == LK_UNLOCKED) 1347 ret = 0; 1348 1349 return (ret); 1350 } 1351 1352 #ifdef INVARIANT_SUPPORT 1353 1354 FEATURE(invariant_support, 1355 "Support for modules compiled with INVARIANTS option"); 1356 1357 #ifndef INVARIANTS 1358 #undef _lockmgr_assert 1359 #endif 1360 1361 void 1362 _lockmgr_assert(const struct lock *lk, int what, const char *file, int line) 1363 { 1364 int slocked = 0; 1365 1366 if (panicstr != NULL) 1367 return; 1368 switch (what) { 1369 case KA_SLOCKED: 1370 case KA_SLOCKED | KA_NOTRECURSED: 1371 case KA_SLOCKED | KA_RECURSED: 1372 slocked = 1; 1373 case KA_LOCKED: 1374 case KA_LOCKED | KA_NOTRECURSED: 1375 case KA_LOCKED | KA_RECURSED: 1376 #ifdef WITNESS 1377 1378 /* 1379 * We cannot trust WITNESS if the lock is held in exclusive 1380 * mode and a call to lockmgr_disown() happened. 1381 * Workaround this skipping the check if the lock is held in 1382 * exclusive mode even for the KA_LOCKED case. 1383 */ 1384 if (slocked || (lk->lk_lock & LK_SHARE)) { 1385 witness_assert(&lk->lock_object, what, file, line); 1386 break; 1387 } 1388 #endif 1389 if (lk->lk_lock == LK_UNLOCKED || 1390 ((lk->lk_lock & LK_SHARE) == 0 && (slocked || 1391 (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk))))) 1392 panic("Lock %s not %slocked @ %s:%d\n", 1393 lk->lock_object.lo_name, slocked ? "share" : "", 1394 file, line); 1395 1396 if ((lk->lk_lock & LK_SHARE) == 0) { 1397 if (lockmgr_recursed(lk)) { 1398 if (what & KA_NOTRECURSED) 1399 panic("Lock %s recursed @ %s:%d\n", 1400 lk->lock_object.lo_name, file, 1401 line); 1402 } else if (what & KA_RECURSED) 1403 panic("Lock %s not recursed @ %s:%d\n", 1404 lk->lock_object.lo_name, file, line); 1405 } 1406 break; 1407 case KA_XLOCKED: 1408 case KA_XLOCKED | KA_NOTRECURSED: 1409 case KA_XLOCKED | KA_RECURSED: 1410 if (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk)) 1411 panic("Lock %s not exclusively locked @ %s:%d\n", 1412 lk->lock_object.lo_name, file, line); 1413 if (lockmgr_recursed(lk)) { 1414 if (what & KA_NOTRECURSED) 1415 panic("Lock %s recursed @ %s:%d\n", 1416 lk->lock_object.lo_name, file, line); 1417 } else if (what & KA_RECURSED) 1418 panic("Lock %s not recursed @ %s:%d\n", 1419 lk->lock_object.lo_name, file, line); 1420 break; 1421 case KA_UNLOCKED: 1422 if (lockmgr_xlocked(lk) || lockmgr_disowned(lk)) 1423 panic("Lock %s exclusively locked @ %s:%d\n", 1424 lk->lock_object.lo_name, file, line); 1425 break; 1426 default: 1427 panic("Unknown lockmgr assertion: %d @ %s:%d\n", what, file, 1428 line); 1429 } 1430 } 1431 #endif 1432 1433 #ifdef DDB 1434 int 1435 lockmgr_chain(struct thread *td, struct thread **ownerp) 1436 { 1437 struct lock *lk; 1438 1439 lk = td->td_wchan; 1440 1441 if (LOCK_CLASS(&lk->lock_object) != &lock_class_lockmgr) 1442 return (0); 1443 db_printf("blocked on lockmgr %s", lk->lock_object.lo_name); 1444 if (lk->lk_lock & LK_SHARE) 1445 db_printf("SHARED (count %ju)\n", 1446 (uintmax_t)LK_SHARERS(lk->lk_lock)); 1447 else 1448 db_printf("EXCL\n"); 1449 *ownerp = lockmgr_xholder(lk); 1450 1451 return (1); 1452 } 1453 1454 static void 1455 db_show_lockmgr(const struct lock_object *lock) 1456 { 1457 struct thread *td; 1458 const struct lock *lk; 1459 1460 lk = (const struct lock *)lock; 1461 1462 db_printf(" state: "); 1463 if (lk->lk_lock == LK_UNLOCKED) 1464 db_printf("UNLOCKED\n"); 1465 else if (lk->lk_lock & LK_SHARE) 1466 db_printf("SLOCK: %ju\n", (uintmax_t)LK_SHARERS(lk->lk_lock)); 1467 else { 1468 td = lockmgr_xholder(lk); 1469 if (td == (struct thread *)LK_KERNPROC) 1470 db_printf("XLOCK: LK_KERNPROC\n"); 1471 else 1472 db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td, 1473 td->td_tid, td->td_proc->p_pid, 1474 td->td_proc->p_comm); 1475 if (lockmgr_recursed(lk)) 1476 db_printf(" recursed: %d\n", lk->lk_recurse); 1477 } 1478 db_printf(" waiters: "); 1479 switch (lk->lk_lock & LK_ALL_WAITERS) { 1480 case LK_SHARED_WAITERS: 1481 db_printf("shared\n"); 1482 break; 1483 case LK_EXCLUSIVE_WAITERS: 1484 db_printf("exclusive\n"); 1485 break; 1486 case LK_ALL_WAITERS: 1487 db_printf("shared and exclusive\n"); 1488 break; 1489 default: 1490 db_printf("none\n"); 1491 } 1492 db_printf(" spinners: "); 1493 if (lk->lk_lock & LK_EXCLUSIVE_SPINNERS) 1494 db_printf("exclusive\n"); 1495 else 1496 db_printf("none\n"); 1497 } 1498 #endif 1499