1 /*- 2 * Copyright (c) 2007 Attilio Rao <attilio@freebsd.org> 3 * Copyright (c) 2001 Jason Evans <jasone@freebsd.org> 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice(s), this list of conditions and the following disclaimer as 11 * the first lines of this file unmodified other than the possible 12 * addition of one or more copyright notices. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice(s), this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY 18 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 19 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 20 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY 21 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 22 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 23 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 24 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 27 * DAMAGE. 28 */ 29 30 /* 31 * Shared/exclusive locks. This implementation attempts to ensure 32 * deterministic lock granting behavior, so that slocks and xlocks are 33 * interleaved. 34 * 35 * Priority propagation will not generally raise the priority of lock holders, 36 * so should not be relied upon in combination with sx locks. 37 */ 38 39 #include "opt_ddb.h" 40 #include "opt_hwpmc_hooks.h" 41 #include "opt_no_adaptive_sx.h" 42 43 #include <sys/cdefs.h> 44 __FBSDID("$FreeBSD$"); 45 46 #include <sys/param.h> 47 #include <sys/systm.h> 48 #include <sys/kdb.h> 49 #include <sys/kernel.h> 50 #include <sys/ktr.h> 51 #include <sys/lock.h> 52 #include <sys/mutex.h> 53 #include <sys/proc.h> 54 #include <sys/sched.h> 55 #include <sys/sleepqueue.h> 56 #include <sys/sx.h> 57 #include <sys/smp.h> 58 #include <sys/sysctl.h> 59 60 #if defined(SMP) && !defined(NO_ADAPTIVE_SX) 61 #include <machine/cpu.h> 62 #endif 63 64 #ifdef DDB 65 #include <ddb/ddb.h> 66 #endif 67 68 #if defined(SMP) && !defined(NO_ADAPTIVE_SX) 69 #define ADAPTIVE_SX 70 #endif 71 72 CTASSERT((SX_NOADAPTIVE & LO_CLASSFLAGS) == SX_NOADAPTIVE); 73 74 #ifdef HWPMC_HOOKS 75 #include <sys/pmckern.h> 76 PMC_SOFT_DECLARE( , , lock, failed); 77 #endif 78 79 /* Handy macros for sleep queues. */ 80 #define SQ_EXCLUSIVE_QUEUE 0 81 #define SQ_SHARED_QUEUE 1 82 83 /* 84 * Variations on DROP_GIANT()/PICKUP_GIANT() for use in this file. We 85 * drop Giant anytime we have to sleep or if we adaptively spin. 86 */ 87 #define GIANT_DECLARE \ 88 int _giantcnt = 0; \ 89 WITNESS_SAVE_DECL(Giant) \ 90 91 #define GIANT_SAVE() do { \ 92 if (mtx_owned(&Giant)) { \ 93 WITNESS_SAVE(&Giant.lock_object, Giant); \ 94 while (mtx_owned(&Giant)) { \ 95 _giantcnt++; \ 96 mtx_unlock(&Giant); \ 97 } \ 98 } \ 99 } while (0) 100 101 #define GIANT_RESTORE() do { \ 102 if (_giantcnt > 0) { \ 103 mtx_assert(&Giant, MA_NOTOWNED); \ 104 while (_giantcnt--) \ 105 mtx_lock(&Giant); \ 106 WITNESS_RESTORE(&Giant.lock_object, Giant); \ 107 } \ 108 } while (0) 109 110 /* 111 * Returns true if an exclusive lock is recursed. It assumes 112 * curthread currently has an exclusive lock. 113 */ 114 #define sx_recursed(sx) ((sx)->sx_recurse != 0) 115 116 static void assert_sx(const struct lock_object *lock, int what); 117 #ifdef DDB 118 static void db_show_sx(const struct lock_object *lock); 119 #endif 120 static void lock_sx(struct lock_object *lock, uintptr_t how); 121 #ifdef KDTRACE_HOOKS 122 static int owner_sx(const struct lock_object *lock, struct thread **owner); 123 #endif 124 static uintptr_t unlock_sx(struct lock_object *lock); 125 126 struct lock_class lock_class_sx = { 127 .lc_name = "sx", 128 .lc_flags = LC_SLEEPLOCK | LC_SLEEPABLE | LC_RECURSABLE | LC_UPGRADABLE, 129 .lc_assert = assert_sx, 130 #ifdef DDB 131 .lc_ddb_show = db_show_sx, 132 #endif 133 .lc_lock = lock_sx, 134 .lc_unlock = unlock_sx, 135 #ifdef KDTRACE_HOOKS 136 .lc_owner = owner_sx, 137 #endif 138 }; 139 140 #ifndef INVARIANTS 141 #define _sx_assert(sx, what, file, line) 142 #endif 143 144 #ifdef ADAPTIVE_SX 145 static u_int asx_retries = 10; 146 static u_int asx_loops = 10000; 147 static SYSCTL_NODE(_debug, OID_AUTO, sx, CTLFLAG_RD, NULL, "sxlock debugging"); 148 SYSCTL_UINT(_debug_sx, OID_AUTO, retries, CTLFLAG_RW, &asx_retries, 0, ""); 149 SYSCTL_UINT(_debug_sx, OID_AUTO, loops, CTLFLAG_RW, &asx_loops, 0, ""); 150 151 static struct lock_delay_config sx_delay = { 152 .initial = 1000, 153 .step = 500, 154 .min = 100, 155 .max = 5000, 156 }; 157 158 SYSCTL_INT(_debug_sx, OID_AUTO, delay_initial, CTLFLAG_RW, &sx_delay.initial, 159 0, ""); 160 SYSCTL_INT(_debug_sx, OID_AUTO, delay_step, CTLFLAG_RW, &sx_delay.step, 161 0, ""); 162 SYSCTL_INT(_debug_sx, OID_AUTO, delay_min, CTLFLAG_RW, &sx_delay.min, 163 0, ""); 164 SYSCTL_INT(_debug_sx, OID_AUTO, delay_max, CTLFLAG_RW, &sx_delay.max, 165 0, ""); 166 167 static void 168 sx_delay_sysinit(void *dummy) 169 { 170 171 sx_delay.initial = mp_ncpus * 25; 172 sx_delay.step = (mp_ncpus * 25) / 2; 173 sx_delay.min = mp_ncpus * 5; 174 sx_delay.max = mp_ncpus * 25 * 10; 175 } 176 LOCK_DELAY_SYSINIT(sx_delay_sysinit); 177 #endif 178 179 void 180 assert_sx(const struct lock_object *lock, int what) 181 { 182 183 sx_assert((const struct sx *)lock, what); 184 } 185 186 void 187 lock_sx(struct lock_object *lock, uintptr_t how) 188 { 189 struct sx *sx; 190 191 sx = (struct sx *)lock; 192 if (how) 193 sx_slock(sx); 194 else 195 sx_xlock(sx); 196 } 197 198 uintptr_t 199 unlock_sx(struct lock_object *lock) 200 { 201 struct sx *sx; 202 203 sx = (struct sx *)lock; 204 sx_assert(sx, SA_LOCKED | SA_NOTRECURSED); 205 if (sx_xlocked(sx)) { 206 sx_xunlock(sx); 207 return (0); 208 } else { 209 sx_sunlock(sx); 210 return (1); 211 } 212 } 213 214 #ifdef KDTRACE_HOOKS 215 int 216 owner_sx(const struct lock_object *lock, struct thread **owner) 217 { 218 const struct sx *sx; 219 uintptr_t x; 220 221 sx = (const struct sx *)lock; 222 x = sx->sx_lock; 223 *owner = NULL; 224 return ((x & SX_LOCK_SHARED) != 0 ? (SX_SHARERS(x) != 0) : 225 ((*owner = (struct thread *)SX_OWNER(x)) != NULL)); 226 } 227 #endif 228 229 void 230 sx_sysinit(void *arg) 231 { 232 struct sx_args *sargs = arg; 233 234 sx_init_flags(sargs->sa_sx, sargs->sa_desc, sargs->sa_flags); 235 } 236 237 void 238 sx_init_flags(struct sx *sx, const char *description, int opts) 239 { 240 int flags; 241 242 MPASS((opts & ~(SX_QUIET | SX_RECURSE | SX_NOWITNESS | SX_DUPOK | 243 SX_NOPROFILE | SX_NOADAPTIVE | SX_NEW)) == 0); 244 ASSERT_ATOMIC_LOAD_PTR(sx->sx_lock, 245 ("%s: sx_lock not aligned for %s: %p", __func__, description, 246 &sx->sx_lock)); 247 248 flags = LO_SLEEPABLE | LO_UPGRADABLE; 249 if (opts & SX_DUPOK) 250 flags |= LO_DUPOK; 251 if (opts & SX_NOPROFILE) 252 flags |= LO_NOPROFILE; 253 if (!(opts & SX_NOWITNESS)) 254 flags |= LO_WITNESS; 255 if (opts & SX_RECURSE) 256 flags |= LO_RECURSABLE; 257 if (opts & SX_QUIET) 258 flags |= LO_QUIET; 259 if (opts & SX_NEW) 260 flags |= LO_NEW; 261 262 flags |= opts & SX_NOADAPTIVE; 263 lock_init(&sx->lock_object, &lock_class_sx, description, NULL, flags); 264 sx->sx_lock = SX_LOCK_UNLOCKED; 265 sx->sx_recurse = 0; 266 } 267 268 void 269 sx_destroy(struct sx *sx) 270 { 271 272 KASSERT(sx->sx_lock == SX_LOCK_UNLOCKED, ("sx lock still held")); 273 KASSERT(sx->sx_recurse == 0, ("sx lock still recursed")); 274 sx->sx_lock = SX_LOCK_DESTROYED; 275 lock_destroy(&sx->lock_object); 276 } 277 278 int 279 _sx_slock(struct sx *sx, int opts, const char *file, int line) 280 { 281 int error = 0; 282 283 if (SCHEDULER_STOPPED()) 284 return (0); 285 KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread), 286 ("sx_slock() by idle thread %p on sx %s @ %s:%d", 287 curthread, sx->lock_object.lo_name, file, line)); 288 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED, 289 ("sx_slock() of destroyed sx @ %s:%d", file, line)); 290 WITNESS_CHECKORDER(&sx->lock_object, LOP_NEWORDER, file, line, NULL); 291 error = __sx_slock(sx, opts, file, line); 292 if (!error) { 293 LOCK_LOG_LOCK("SLOCK", &sx->lock_object, 0, 0, file, line); 294 WITNESS_LOCK(&sx->lock_object, 0, file, line); 295 TD_LOCKS_INC(curthread); 296 } 297 298 return (error); 299 } 300 301 int 302 sx_try_slock_(struct sx *sx, const char *file, int line) 303 { 304 uintptr_t x; 305 306 if (SCHEDULER_STOPPED()) 307 return (1); 308 309 KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread), 310 ("sx_try_slock() by idle thread %p on sx %s @ %s:%d", 311 curthread, sx->lock_object.lo_name, file, line)); 312 313 for (;;) { 314 x = sx->sx_lock; 315 KASSERT(x != SX_LOCK_DESTROYED, 316 ("sx_try_slock() of destroyed sx @ %s:%d", file, line)); 317 if (!(x & SX_LOCK_SHARED)) 318 break; 319 if (atomic_cmpset_acq_ptr(&sx->sx_lock, x, x + SX_ONE_SHARER)) { 320 LOCK_LOG_TRY("SLOCK", &sx->lock_object, 0, 1, file, line); 321 WITNESS_LOCK(&sx->lock_object, LOP_TRYLOCK, file, line); 322 LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire, 323 sx, 0, 0, file, line, LOCKSTAT_READER); 324 TD_LOCKS_INC(curthread); 325 return (1); 326 } 327 } 328 329 LOCK_LOG_TRY("SLOCK", &sx->lock_object, 0, 0, file, line); 330 return (0); 331 } 332 333 int 334 _sx_xlock(struct sx *sx, int opts, const char *file, int line) 335 { 336 int error = 0; 337 338 if (SCHEDULER_STOPPED()) 339 return (0); 340 KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread), 341 ("sx_xlock() by idle thread %p on sx %s @ %s:%d", 342 curthread, sx->lock_object.lo_name, file, line)); 343 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED, 344 ("sx_xlock() of destroyed sx @ %s:%d", file, line)); 345 WITNESS_CHECKORDER(&sx->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE, file, 346 line, NULL); 347 error = __sx_xlock(sx, curthread, opts, file, line); 348 if (!error) { 349 LOCK_LOG_LOCK("XLOCK", &sx->lock_object, 0, sx->sx_recurse, 350 file, line); 351 WITNESS_LOCK(&sx->lock_object, LOP_EXCLUSIVE, file, line); 352 TD_LOCKS_INC(curthread); 353 } 354 355 return (error); 356 } 357 358 int 359 sx_try_xlock_(struct sx *sx, const char *file, int line) 360 { 361 int rval; 362 363 if (SCHEDULER_STOPPED()) 364 return (1); 365 366 KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread), 367 ("sx_try_xlock() by idle thread %p on sx %s @ %s:%d", 368 curthread, sx->lock_object.lo_name, file, line)); 369 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED, 370 ("sx_try_xlock() of destroyed sx @ %s:%d", file, line)); 371 372 if (sx_xlocked(sx) && 373 (sx->lock_object.lo_flags & LO_RECURSABLE) != 0) { 374 sx->sx_recurse++; 375 atomic_set_ptr(&sx->sx_lock, SX_LOCK_RECURSED); 376 rval = 1; 377 } else 378 rval = atomic_cmpset_acq_ptr(&sx->sx_lock, SX_LOCK_UNLOCKED, 379 (uintptr_t)curthread); 380 LOCK_LOG_TRY("XLOCK", &sx->lock_object, 0, rval, file, line); 381 if (rval) { 382 WITNESS_LOCK(&sx->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK, 383 file, line); 384 if (!sx_recursed(sx)) 385 LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire, 386 sx, 0, 0, file, line, LOCKSTAT_WRITER); 387 TD_LOCKS_INC(curthread); 388 } 389 390 return (rval); 391 } 392 393 void 394 _sx_sunlock(struct sx *sx, const char *file, int line) 395 { 396 397 if (SCHEDULER_STOPPED()) 398 return; 399 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED, 400 ("sx_sunlock() of destroyed sx @ %s:%d", file, line)); 401 _sx_assert(sx, SA_SLOCKED, file, line); 402 WITNESS_UNLOCK(&sx->lock_object, 0, file, line); 403 LOCK_LOG_LOCK("SUNLOCK", &sx->lock_object, 0, 0, file, line); 404 __sx_sunlock(sx, file, line); 405 TD_LOCKS_DEC(curthread); 406 } 407 408 void 409 _sx_xunlock(struct sx *sx, const char *file, int line) 410 { 411 412 if (SCHEDULER_STOPPED()) 413 return; 414 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED, 415 ("sx_xunlock() of destroyed sx @ %s:%d", file, line)); 416 _sx_assert(sx, SA_XLOCKED, file, line); 417 WITNESS_UNLOCK(&sx->lock_object, LOP_EXCLUSIVE, file, line); 418 LOCK_LOG_LOCK("XUNLOCK", &sx->lock_object, 0, sx->sx_recurse, file, 419 line); 420 __sx_xunlock(sx, curthread, file, line); 421 TD_LOCKS_DEC(curthread); 422 } 423 424 /* 425 * Try to do a non-blocking upgrade from a shared lock to an exclusive lock. 426 * This will only succeed if this thread holds a single shared lock. 427 * Return 1 if if the upgrade succeed, 0 otherwise. 428 */ 429 int 430 sx_try_upgrade_(struct sx *sx, const char *file, int line) 431 { 432 uintptr_t x; 433 int success; 434 435 if (SCHEDULER_STOPPED()) 436 return (1); 437 438 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED, 439 ("sx_try_upgrade() of destroyed sx @ %s:%d", file, line)); 440 _sx_assert(sx, SA_SLOCKED, file, line); 441 442 /* 443 * Try to switch from one shared lock to an exclusive lock. We need 444 * to maintain the SX_LOCK_EXCLUSIVE_WAITERS flag if set so that 445 * we will wake up the exclusive waiters when we drop the lock. 446 */ 447 x = sx->sx_lock & SX_LOCK_EXCLUSIVE_WAITERS; 448 success = atomic_cmpset_ptr(&sx->sx_lock, SX_SHARERS_LOCK(1) | x, 449 (uintptr_t)curthread | x); 450 LOCK_LOG_TRY("XUPGRADE", &sx->lock_object, 0, success, file, line); 451 if (success) { 452 WITNESS_UPGRADE(&sx->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK, 453 file, line); 454 LOCKSTAT_RECORD0(sx__upgrade, sx); 455 } 456 return (success); 457 } 458 459 /* 460 * Downgrade an unrecursed exclusive lock into a single shared lock. 461 */ 462 void 463 sx_downgrade_(struct sx *sx, const char *file, int line) 464 { 465 uintptr_t x; 466 int wakeup_swapper; 467 468 if (SCHEDULER_STOPPED()) 469 return; 470 471 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED, 472 ("sx_downgrade() of destroyed sx @ %s:%d", file, line)); 473 _sx_assert(sx, SA_XLOCKED | SA_NOTRECURSED, file, line); 474 #ifndef INVARIANTS 475 if (sx_recursed(sx)) 476 panic("downgrade of a recursed lock"); 477 #endif 478 479 WITNESS_DOWNGRADE(&sx->lock_object, 0, file, line); 480 481 /* 482 * Try to switch from an exclusive lock with no shared waiters 483 * to one sharer with no shared waiters. If there are 484 * exclusive waiters, we don't need to lock the sleep queue so 485 * long as we preserve the flag. We do one quick try and if 486 * that fails we grab the sleepq lock to keep the flags from 487 * changing and do it the slow way. 488 * 489 * We have to lock the sleep queue if there are shared waiters 490 * so we can wake them up. 491 */ 492 x = sx->sx_lock; 493 if (!(x & SX_LOCK_SHARED_WAITERS) && 494 atomic_cmpset_rel_ptr(&sx->sx_lock, x, SX_SHARERS_LOCK(1) | 495 (x & SX_LOCK_EXCLUSIVE_WAITERS))) { 496 LOCK_LOG_LOCK("XDOWNGRADE", &sx->lock_object, 0, 0, file, line); 497 return; 498 } 499 500 /* 501 * Lock the sleep queue so we can read the waiters bits 502 * without any races and wakeup any shared waiters. 503 */ 504 sleepq_lock(&sx->lock_object); 505 506 /* 507 * Preserve SX_LOCK_EXCLUSIVE_WAITERS while downgraded to a single 508 * shared lock. If there are any shared waiters, wake them up. 509 */ 510 wakeup_swapper = 0; 511 x = sx->sx_lock; 512 atomic_store_rel_ptr(&sx->sx_lock, SX_SHARERS_LOCK(1) | 513 (x & SX_LOCK_EXCLUSIVE_WAITERS)); 514 if (x & SX_LOCK_SHARED_WAITERS) 515 wakeup_swapper = sleepq_broadcast(&sx->lock_object, SLEEPQ_SX, 516 0, SQ_SHARED_QUEUE); 517 sleepq_release(&sx->lock_object); 518 519 LOCK_LOG_LOCK("XDOWNGRADE", &sx->lock_object, 0, 0, file, line); 520 LOCKSTAT_RECORD0(sx__downgrade, sx); 521 522 if (wakeup_swapper) 523 kick_proc0(); 524 } 525 526 /* 527 * This function represents the so-called 'hard case' for sx_xlock 528 * operation. All 'easy case' failures are redirected to this. Note 529 * that ideally this would be a static function, but it needs to be 530 * accessible from at least sx.h. 531 */ 532 int 533 _sx_xlock_hard(struct sx *sx, uintptr_t tid, int opts, const char *file, 534 int line) 535 { 536 GIANT_DECLARE; 537 #ifdef ADAPTIVE_SX 538 volatile struct thread *owner; 539 u_int i, spintries = 0; 540 #endif 541 uintptr_t x; 542 #ifdef LOCK_PROFILING 543 uint64_t waittime = 0; 544 int contested = 0; 545 #endif 546 int error = 0; 547 #if defined(ADAPTIVE_SX) || defined(KDTRACE_HOOKS) 548 struct lock_delay_arg lda; 549 #endif 550 #ifdef KDTRACE_HOOKS 551 uintptr_t state; 552 u_int sleep_cnt = 0; 553 int64_t sleep_time = 0; 554 int64_t all_time = 0; 555 #endif 556 557 if (SCHEDULER_STOPPED()) 558 return (0); 559 560 #if defined(ADAPTIVE_SX) 561 lock_delay_arg_init(&lda, &sx_delay); 562 #elif defined(KDTRACE_HOOKS) 563 lock_delay_arg_init(&lda, NULL); 564 #endif 565 566 /* If we already hold an exclusive lock, then recurse. */ 567 if (sx_xlocked(sx)) { 568 KASSERT((sx->lock_object.lo_flags & LO_RECURSABLE) != 0, 569 ("_sx_xlock_hard: recursed on non-recursive sx %s @ %s:%d\n", 570 sx->lock_object.lo_name, file, line)); 571 sx->sx_recurse++; 572 atomic_set_ptr(&sx->sx_lock, SX_LOCK_RECURSED); 573 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 574 CTR2(KTR_LOCK, "%s: %p recursing", __func__, sx); 575 return (0); 576 } 577 578 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 579 CTR5(KTR_LOCK, "%s: %s contested (lock=%p) at %s:%d", __func__, 580 sx->lock_object.lo_name, (void *)sx->sx_lock, file, line); 581 582 #ifdef KDTRACE_HOOKS 583 all_time -= lockstat_nsecs(&sx->lock_object); 584 state = sx->sx_lock; 585 #endif 586 for (;;) { 587 if (sx->sx_lock == SX_LOCK_UNLOCKED && 588 atomic_cmpset_acq_ptr(&sx->sx_lock, SX_LOCK_UNLOCKED, tid)) 589 break; 590 #ifdef KDTRACE_HOOKS 591 lda.spin_cnt++; 592 #endif 593 #ifdef HWPMC_HOOKS 594 PMC_SOFT_CALL( , , lock, failed); 595 #endif 596 lock_profile_obtain_lock_failed(&sx->lock_object, &contested, 597 &waittime); 598 #ifdef ADAPTIVE_SX 599 /* 600 * If the lock is write locked and the owner is 601 * running on another CPU, spin until the owner stops 602 * running or the state of the lock changes. 603 */ 604 x = sx->sx_lock; 605 if ((sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0) { 606 if ((x & SX_LOCK_SHARED) == 0) { 607 x = SX_OWNER(x); 608 owner = (struct thread *)x; 609 if (TD_IS_RUNNING(owner)) { 610 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 611 CTR3(KTR_LOCK, 612 "%s: spinning on %p held by %p", 613 __func__, sx, owner); 614 KTR_STATE1(KTR_SCHED, "thread", 615 sched_tdname(curthread), "spinning", 616 "lockname:\"%s\"", 617 sx->lock_object.lo_name); 618 GIANT_SAVE(); 619 while (SX_OWNER(sx->sx_lock) == x && 620 TD_IS_RUNNING(owner)) 621 lock_delay(&lda); 622 KTR_STATE0(KTR_SCHED, "thread", 623 sched_tdname(curthread), "running"); 624 continue; 625 } 626 } else if (SX_SHARERS(x) && spintries < asx_retries) { 627 KTR_STATE1(KTR_SCHED, "thread", 628 sched_tdname(curthread), "spinning", 629 "lockname:\"%s\"", sx->lock_object.lo_name); 630 GIANT_SAVE(); 631 spintries++; 632 for (i = 0; i < asx_loops; i++) { 633 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 634 CTR4(KTR_LOCK, 635 "%s: shared spinning on %p with %u and %u", 636 __func__, sx, spintries, i); 637 x = sx->sx_lock; 638 if ((x & SX_LOCK_SHARED) == 0 || 639 SX_SHARERS(x) == 0) 640 break; 641 cpu_spinwait(); 642 #ifdef KDTRACE_HOOKS 643 lda.spin_cnt++; 644 #endif 645 } 646 KTR_STATE0(KTR_SCHED, "thread", 647 sched_tdname(curthread), "running"); 648 if (i != asx_loops) 649 continue; 650 } 651 } 652 #endif 653 654 sleepq_lock(&sx->lock_object); 655 x = sx->sx_lock; 656 657 /* 658 * If the lock was released while spinning on the 659 * sleep queue chain lock, try again. 660 */ 661 if (x == SX_LOCK_UNLOCKED) { 662 sleepq_release(&sx->lock_object); 663 continue; 664 } 665 666 #ifdef ADAPTIVE_SX 667 /* 668 * The current lock owner might have started executing 669 * on another CPU (or the lock could have changed 670 * owners) while we were waiting on the sleep queue 671 * chain lock. If so, drop the sleep queue lock and try 672 * again. 673 */ 674 if (!(x & SX_LOCK_SHARED) && 675 (sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0) { 676 owner = (struct thread *)SX_OWNER(x); 677 if (TD_IS_RUNNING(owner)) { 678 sleepq_release(&sx->lock_object); 679 continue; 680 } 681 } 682 #endif 683 684 /* 685 * If an exclusive lock was released with both shared 686 * and exclusive waiters and a shared waiter hasn't 687 * woken up and acquired the lock yet, sx_lock will be 688 * set to SX_LOCK_UNLOCKED | SX_LOCK_EXCLUSIVE_WAITERS. 689 * If we see that value, try to acquire it once. Note 690 * that we have to preserve SX_LOCK_EXCLUSIVE_WAITERS 691 * as there are other exclusive waiters still. If we 692 * fail, restart the loop. 693 */ 694 if (x == (SX_LOCK_UNLOCKED | SX_LOCK_EXCLUSIVE_WAITERS)) { 695 if (atomic_cmpset_acq_ptr(&sx->sx_lock, 696 SX_LOCK_UNLOCKED | SX_LOCK_EXCLUSIVE_WAITERS, 697 tid | SX_LOCK_EXCLUSIVE_WAITERS)) { 698 sleepq_release(&sx->lock_object); 699 CTR2(KTR_LOCK, "%s: %p claimed by new writer", 700 __func__, sx); 701 break; 702 } 703 sleepq_release(&sx->lock_object); 704 continue; 705 } 706 707 /* 708 * Try to set the SX_LOCK_EXCLUSIVE_WAITERS. If we fail, 709 * than loop back and retry. 710 */ 711 if (!(x & SX_LOCK_EXCLUSIVE_WAITERS)) { 712 if (!atomic_cmpset_ptr(&sx->sx_lock, x, 713 x | SX_LOCK_EXCLUSIVE_WAITERS)) { 714 sleepq_release(&sx->lock_object); 715 continue; 716 } 717 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 718 CTR2(KTR_LOCK, "%s: %p set excl waiters flag", 719 __func__, sx); 720 } 721 722 /* 723 * Since we have been unable to acquire the exclusive 724 * lock and the exclusive waiters flag is set, we have 725 * to sleep. 726 */ 727 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 728 CTR2(KTR_LOCK, "%s: %p blocking on sleep queue", 729 __func__, sx); 730 731 #ifdef KDTRACE_HOOKS 732 sleep_time -= lockstat_nsecs(&sx->lock_object); 733 #endif 734 GIANT_SAVE(); 735 sleepq_add(&sx->lock_object, NULL, sx->lock_object.lo_name, 736 SLEEPQ_SX | ((opts & SX_INTERRUPTIBLE) ? 737 SLEEPQ_INTERRUPTIBLE : 0), SQ_EXCLUSIVE_QUEUE); 738 if (!(opts & SX_INTERRUPTIBLE)) 739 sleepq_wait(&sx->lock_object, 0); 740 else 741 error = sleepq_wait_sig(&sx->lock_object, 0); 742 #ifdef KDTRACE_HOOKS 743 sleep_time += lockstat_nsecs(&sx->lock_object); 744 sleep_cnt++; 745 #endif 746 if (error) { 747 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 748 CTR2(KTR_LOCK, 749 "%s: interruptible sleep by %p suspended by signal", 750 __func__, sx); 751 break; 752 } 753 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 754 CTR2(KTR_LOCK, "%s: %p resuming from sleep queue", 755 __func__, sx); 756 } 757 #ifdef KDTRACE_HOOKS 758 all_time += lockstat_nsecs(&sx->lock_object); 759 if (sleep_time) 760 LOCKSTAT_RECORD4(sx__block, sx, sleep_time, 761 LOCKSTAT_WRITER, (state & SX_LOCK_SHARED) == 0, 762 (state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state)); 763 if (lda.spin_cnt > sleep_cnt) 764 LOCKSTAT_RECORD4(sx__spin, sx, all_time - sleep_time, 765 LOCKSTAT_WRITER, (state & SX_LOCK_SHARED) == 0, 766 (state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state)); 767 #endif 768 if (!error) 769 LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire, sx, 770 contested, waittime, file, line, LOCKSTAT_WRITER); 771 GIANT_RESTORE(); 772 return (error); 773 } 774 775 /* 776 * This function represents the so-called 'hard case' for sx_xunlock 777 * operation. All 'easy case' failures are redirected to this. Note 778 * that ideally this would be a static function, but it needs to be 779 * accessible from at least sx.h. 780 */ 781 void 782 _sx_xunlock_hard(struct sx *sx, uintptr_t tid, const char *file, int line) 783 { 784 uintptr_t x; 785 int queue, wakeup_swapper; 786 787 if (SCHEDULER_STOPPED()) 788 return; 789 790 MPASS(!(sx->sx_lock & SX_LOCK_SHARED)); 791 792 /* If the lock is recursed, then unrecurse one level. */ 793 if (sx_xlocked(sx) && sx_recursed(sx)) { 794 if ((--sx->sx_recurse) == 0) 795 atomic_clear_ptr(&sx->sx_lock, SX_LOCK_RECURSED); 796 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 797 CTR2(KTR_LOCK, "%s: %p unrecursing", __func__, sx); 798 return; 799 } 800 MPASS(sx->sx_lock & (SX_LOCK_SHARED_WAITERS | 801 SX_LOCK_EXCLUSIVE_WAITERS)); 802 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 803 CTR2(KTR_LOCK, "%s: %p contested", __func__, sx); 804 805 sleepq_lock(&sx->lock_object); 806 x = SX_LOCK_UNLOCKED; 807 808 /* 809 * The wake up algorithm here is quite simple and probably not 810 * ideal. It gives precedence to shared waiters if they are 811 * present. For this condition, we have to preserve the 812 * state of the exclusive waiters flag. 813 * If interruptible sleeps left the shared queue empty avoid a 814 * starvation for the threads sleeping on the exclusive queue by giving 815 * them precedence and cleaning up the shared waiters bit anyway. 816 */ 817 if ((sx->sx_lock & SX_LOCK_SHARED_WAITERS) != 0 && 818 sleepq_sleepcnt(&sx->lock_object, SQ_SHARED_QUEUE) != 0) { 819 queue = SQ_SHARED_QUEUE; 820 x |= (sx->sx_lock & SX_LOCK_EXCLUSIVE_WAITERS); 821 } else 822 queue = SQ_EXCLUSIVE_QUEUE; 823 824 /* Wake up all the waiters for the specific queue. */ 825 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 826 CTR3(KTR_LOCK, "%s: %p waking up all threads on %s queue", 827 __func__, sx, queue == SQ_SHARED_QUEUE ? "shared" : 828 "exclusive"); 829 atomic_store_rel_ptr(&sx->sx_lock, x); 830 wakeup_swapper = sleepq_broadcast(&sx->lock_object, SLEEPQ_SX, 0, 831 queue); 832 sleepq_release(&sx->lock_object); 833 if (wakeup_swapper) 834 kick_proc0(); 835 } 836 837 /* 838 * This function represents the so-called 'hard case' for sx_slock 839 * operation. All 'easy case' failures are redirected to this. Note 840 * that ideally this would be a static function, but it needs to be 841 * accessible from at least sx.h. 842 */ 843 int 844 _sx_slock_hard(struct sx *sx, int opts, const char *file, int line) 845 { 846 GIANT_DECLARE; 847 #ifdef ADAPTIVE_SX 848 volatile struct thread *owner; 849 #endif 850 #ifdef LOCK_PROFILING 851 uint64_t waittime = 0; 852 int contested = 0; 853 #endif 854 uintptr_t x; 855 int error = 0; 856 #if defined(ADAPTIVE_SX) || defined(KDTRACE_HOOKS) 857 struct lock_delay_arg lda; 858 #endif 859 #ifdef KDTRACE_HOOKS 860 uintptr_t state; 861 u_int sleep_cnt = 0; 862 int64_t sleep_time = 0; 863 int64_t all_time = 0; 864 #endif 865 866 if (SCHEDULER_STOPPED()) 867 return (0); 868 869 #if defined(ADAPTIVE_SX) 870 lock_delay_arg_init(&lda, &sx_delay); 871 #elif defined(KDTRACE_HOOKS) 872 lock_delay_arg_init(&lda, NULL); 873 #endif 874 #ifdef KDTRACE_HOOKS 875 state = sx->sx_lock; 876 all_time -= lockstat_nsecs(&sx->lock_object); 877 #endif 878 879 /* 880 * As with rwlocks, we don't make any attempt to try to block 881 * shared locks once there is an exclusive waiter. 882 */ 883 for (;;) { 884 #ifdef KDTRACE_HOOKS 885 lda.spin_cnt++; 886 #endif 887 x = sx->sx_lock; 888 889 /* 890 * If no other thread has an exclusive lock then try to bump up 891 * the count of sharers. Since we have to preserve the state 892 * of SX_LOCK_EXCLUSIVE_WAITERS, if we fail to acquire the 893 * shared lock loop back and retry. 894 */ 895 if (x & SX_LOCK_SHARED) { 896 MPASS(!(x & SX_LOCK_SHARED_WAITERS)); 897 if (atomic_cmpset_acq_ptr(&sx->sx_lock, x, 898 x + SX_ONE_SHARER)) { 899 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 900 CTR4(KTR_LOCK, 901 "%s: %p succeed %p -> %p", __func__, 902 sx, (void *)x, 903 (void *)(x + SX_ONE_SHARER)); 904 break; 905 } 906 continue; 907 } 908 #ifdef HWPMC_HOOKS 909 PMC_SOFT_CALL( , , lock, failed); 910 #endif 911 lock_profile_obtain_lock_failed(&sx->lock_object, &contested, 912 &waittime); 913 914 #ifdef ADAPTIVE_SX 915 /* 916 * If the owner is running on another CPU, spin until 917 * the owner stops running or the state of the lock 918 * changes. 919 */ 920 if ((sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0) { 921 x = SX_OWNER(x); 922 owner = (struct thread *)x; 923 if (TD_IS_RUNNING(owner)) { 924 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 925 CTR3(KTR_LOCK, 926 "%s: spinning on %p held by %p", 927 __func__, sx, owner); 928 KTR_STATE1(KTR_SCHED, "thread", 929 sched_tdname(curthread), "spinning", 930 "lockname:\"%s\"", sx->lock_object.lo_name); 931 GIANT_SAVE(); 932 while (SX_OWNER(sx->sx_lock) == x && 933 TD_IS_RUNNING(owner)) 934 lock_delay(&lda); 935 KTR_STATE0(KTR_SCHED, "thread", 936 sched_tdname(curthread), "running"); 937 continue; 938 } 939 } 940 #endif 941 942 /* 943 * Some other thread already has an exclusive lock, so 944 * start the process of blocking. 945 */ 946 sleepq_lock(&sx->lock_object); 947 x = sx->sx_lock; 948 949 /* 950 * The lock could have been released while we spun. 951 * In this case loop back and retry. 952 */ 953 if (x & SX_LOCK_SHARED) { 954 sleepq_release(&sx->lock_object); 955 continue; 956 } 957 958 #ifdef ADAPTIVE_SX 959 /* 960 * If the owner is running on another CPU, spin until 961 * the owner stops running or the state of the lock 962 * changes. 963 */ 964 if (!(x & SX_LOCK_SHARED) && 965 (sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0) { 966 owner = (struct thread *)SX_OWNER(x); 967 if (TD_IS_RUNNING(owner)) { 968 sleepq_release(&sx->lock_object); 969 continue; 970 } 971 } 972 #endif 973 974 /* 975 * Try to set the SX_LOCK_SHARED_WAITERS flag. If we 976 * fail to set it drop the sleep queue lock and loop 977 * back. 978 */ 979 if (!(x & SX_LOCK_SHARED_WAITERS)) { 980 if (!atomic_cmpset_ptr(&sx->sx_lock, x, 981 x | SX_LOCK_SHARED_WAITERS)) { 982 sleepq_release(&sx->lock_object); 983 continue; 984 } 985 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 986 CTR2(KTR_LOCK, "%s: %p set shared waiters flag", 987 __func__, sx); 988 } 989 990 /* 991 * Since we have been unable to acquire the shared lock, 992 * we have to sleep. 993 */ 994 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 995 CTR2(KTR_LOCK, "%s: %p blocking on sleep queue", 996 __func__, sx); 997 998 #ifdef KDTRACE_HOOKS 999 sleep_time -= lockstat_nsecs(&sx->lock_object); 1000 #endif 1001 GIANT_SAVE(); 1002 sleepq_add(&sx->lock_object, NULL, sx->lock_object.lo_name, 1003 SLEEPQ_SX | ((opts & SX_INTERRUPTIBLE) ? 1004 SLEEPQ_INTERRUPTIBLE : 0), SQ_SHARED_QUEUE); 1005 if (!(opts & SX_INTERRUPTIBLE)) 1006 sleepq_wait(&sx->lock_object, 0); 1007 else 1008 error = sleepq_wait_sig(&sx->lock_object, 0); 1009 #ifdef KDTRACE_HOOKS 1010 sleep_time += lockstat_nsecs(&sx->lock_object); 1011 sleep_cnt++; 1012 #endif 1013 if (error) { 1014 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 1015 CTR2(KTR_LOCK, 1016 "%s: interruptible sleep by %p suspended by signal", 1017 __func__, sx); 1018 break; 1019 } 1020 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 1021 CTR2(KTR_LOCK, "%s: %p resuming from sleep queue", 1022 __func__, sx); 1023 } 1024 #ifdef KDTRACE_HOOKS 1025 all_time += lockstat_nsecs(&sx->lock_object); 1026 if (sleep_time) 1027 LOCKSTAT_RECORD4(sx__block, sx, sleep_time, 1028 LOCKSTAT_READER, (state & SX_LOCK_SHARED) == 0, 1029 (state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state)); 1030 if (lda.spin_cnt > sleep_cnt) 1031 LOCKSTAT_RECORD4(sx__spin, sx, all_time - sleep_time, 1032 LOCKSTAT_READER, (state & SX_LOCK_SHARED) == 0, 1033 (state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state)); 1034 #endif 1035 if (error == 0) 1036 LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire, sx, 1037 contested, waittime, file, line, LOCKSTAT_READER); 1038 GIANT_RESTORE(); 1039 return (error); 1040 } 1041 1042 /* 1043 * This function represents the so-called 'hard case' for sx_sunlock 1044 * operation. All 'easy case' failures are redirected to this. Note 1045 * that ideally this would be a static function, but it needs to be 1046 * accessible from at least sx.h. 1047 */ 1048 void 1049 _sx_sunlock_hard(struct sx *sx, const char *file, int line) 1050 { 1051 uintptr_t x; 1052 int wakeup_swapper; 1053 1054 if (SCHEDULER_STOPPED()) 1055 return; 1056 1057 for (;;) { 1058 x = sx->sx_lock; 1059 1060 /* 1061 * We should never have sharers while at least one thread 1062 * holds a shared lock. 1063 */ 1064 KASSERT(!(x & SX_LOCK_SHARED_WAITERS), 1065 ("%s: waiting sharers", __func__)); 1066 1067 /* 1068 * See if there is more than one shared lock held. If 1069 * so, just drop one and return. 1070 */ 1071 if (SX_SHARERS(x) > 1) { 1072 if (atomic_cmpset_rel_ptr(&sx->sx_lock, x, 1073 x - SX_ONE_SHARER)) { 1074 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 1075 CTR4(KTR_LOCK, 1076 "%s: %p succeeded %p -> %p", 1077 __func__, sx, (void *)x, 1078 (void *)(x - SX_ONE_SHARER)); 1079 break; 1080 } 1081 continue; 1082 } 1083 1084 /* 1085 * If there aren't any waiters for an exclusive lock, 1086 * then try to drop it quickly. 1087 */ 1088 if (!(x & SX_LOCK_EXCLUSIVE_WAITERS)) { 1089 MPASS(x == SX_SHARERS_LOCK(1)); 1090 if (atomic_cmpset_rel_ptr(&sx->sx_lock, 1091 SX_SHARERS_LOCK(1), SX_LOCK_UNLOCKED)) { 1092 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 1093 CTR2(KTR_LOCK, "%s: %p last succeeded", 1094 __func__, sx); 1095 break; 1096 } 1097 continue; 1098 } 1099 1100 /* 1101 * At this point, there should just be one sharer with 1102 * exclusive waiters. 1103 */ 1104 MPASS(x == (SX_SHARERS_LOCK(1) | SX_LOCK_EXCLUSIVE_WAITERS)); 1105 1106 sleepq_lock(&sx->lock_object); 1107 1108 /* 1109 * Wake up semantic here is quite simple: 1110 * Just wake up all the exclusive waiters. 1111 * Note that the state of the lock could have changed, 1112 * so if it fails loop back and retry. 1113 */ 1114 if (!atomic_cmpset_rel_ptr(&sx->sx_lock, 1115 SX_SHARERS_LOCK(1) | SX_LOCK_EXCLUSIVE_WAITERS, 1116 SX_LOCK_UNLOCKED)) { 1117 sleepq_release(&sx->lock_object); 1118 continue; 1119 } 1120 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 1121 CTR2(KTR_LOCK, "%s: %p waking up all thread on" 1122 "exclusive queue", __func__, sx); 1123 wakeup_swapper = sleepq_broadcast(&sx->lock_object, SLEEPQ_SX, 1124 0, SQ_EXCLUSIVE_QUEUE); 1125 sleepq_release(&sx->lock_object); 1126 if (wakeup_swapper) 1127 kick_proc0(); 1128 break; 1129 } 1130 } 1131 1132 #ifdef INVARIANT_SUPPORT 1133 #ifndef INVARIANTS 1134 #undef _sx_assert 1135 #endif 1136 1137 /* 1138 * In the non-WITNESS case, sx_assert() can only detect that at least 1139 * *some* thread owns an slock, but it cannot guarantee that *this* 1140 * thread owns an slock. 1141 */ 1142 void 1143 _sx_assert(const struct sx *sx, int what, const char *file, int line) 1144 { 1145 #ifndef WITNESS 1146 int slocked = 0; 1147 #endif 1148 1149 if (panicstr != NULL) 1150 return; 1151 switch (what) { 1152 case SA_SLOCKED: 1153 case SA_SLOCKED | SA_NOTRECURSED: 1154 case SA_SLOCKED | SA_RECURSED: 1155 #ifndef WITNESS 1156 slocked = 1; 1157 /* FALLTHROUGH */ 1158 #endif 1159 case SA_LOCKED: 1160 case SA_LOCKED | SA_NOTRECURSED: 1161 case SA_LOCKED | SA_RECURSED: 1162 #ifdef WITNESS 1163 witness_assert(&sx->lock_object, what, file, line); 1164 #else 1165 /* 1166 * If some other thread has an exclusive lock or we 1167 * have one and are asserting a shared lock, fail. 1168 * Also, if no one has a lock at all, fail. 1169 */ 1170 if (sx->sx_lock == SX_LOCK_UNLOCKED || 1171 (!(sx->sx_lock & SX_LOCK_SHARED) && (slocked || 1172 sx_xholder(sx) != curthread))) 1173 panic("Lock %s not %slocked @ %s:%d\n", 1174 sx->lock_object.lo_name, slocked ? "share " : "", 1175 file, line); 1176 1177 if (!(sx->sx_lock & SX_LOCK_SHARED)) { 1178 if (sx_recursed(sx)) { 1179 if (what & SA_NOTRECURSED) 1180 panic("Lock %s recursed @ %s:%d\n", 1181 sx->lock_object.lo_name, file, 1182 line); 1183 } else if (what & SA_RECURSED) 1184 panic("Lock %s not recursed @ %s:%d\n", 1185 sx->lock_object.lo_name, file, line); 1186 } 1187 #endif 1188 break; 1189 case SA_XLOCKED: 1190 case SA_XLOCKED | SA_NOTRECURSED: 1191 case SA_XLOCKED | SA_RECURSED: 1192 if (sx_xholder(sx) != curthread) 1193 panic("Lock %s not exclusively locked @ %s:%d\n", 1194 sx->lock_object.lo_name, file, line); 1195 if (sx_recursed(sx)) { 1196 if (what & SA_NOTRECURSED) 1197 panic("Lock %s recursed @ %s:%d\n", 1198 sx->lock_object.lo_name, file, line); 1199 } else if (what & SA_RECURSED) 1200 panic("Lock %s not recursed @ %s:%d\n", 1201 sx->lock_object.lo_name, file, line); 1202 break; 1203 case SA_UNLOCKED: 1204 #ifdef WITNESS 1205 witness_assert(&sx->lock_object, what, file, line); 1206 #else 1207 /* 1208 * If we hold an exclusve lock fail. We can't 1209 * reliably check to see if we hold a shared lock or 1210 * not. 1211 */ 1212 if (sx_xholder(sx) == curthread) 1213 panic("Lock %s exclusively locked @ %s:%d\n", 1214 sx->lock_object.lo_name, file, line); 1215 #endif 1216 break; 1217 default: 1218 panic("Unknown sx lock assertion: %d @ %s:%d", what, file, 1219 line); 1220 } 1221 } 1222 #endif /* INVARIANT_SUPPORT */ 1223 1224 #ifdef DDB 1225 static void 1226 db_show_sx(const struct lock_object *lock) 1227 { 1228 struct thread *td; 1229 const struct sx *sx; 1230 1231 sx = (const struct sx *)lock; 1232 1233 db_printf(" state: "); 1234 if (sx->sx_lock == SX_LOCK_UNLOCKED) 1235 db_printf("UNLOCKED\n"); 1236 else if (sx->sx_lock == SX_LOCK_DESTROYED) { 1237 db_printf("DESTROYED\n"); 1238 return; 1239 } else if (sx->sx_lock & SX_LOCK_SHARED) 1240 db_printf("SLOCK: %ju\n", (uintmax_t)SX_SHARERS(sx->sx_lock)); 1241 else { 1242 td = sx_xholder(sx); 1243 db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td, 1244 td->td_tid, td->td_proc->p_pid, td->td_name); 1245 if (sx_recursed(sx)) 1246 db_printf(" recursed: %d\n", sx->sx_recurse); 1247 } 1248 1249 db_printf(" waiters: "); 1250 switch(sx->sx_lock & 1251 (SX_LOCK_SHARED_WAITERS | SX_LOCK_EXCLUSIVE_WAITERS)) { 1252 case SX_LOCK_SHARED_WAITERS: 1253 db_printf("shared\n"); 1254 break; 1255 case SX_LOCK_EXCLUSIVE_WAITERS: 1256 db_printf("exclusive\n"); 1257 break; 1258 case SX_LOCK_SHARED_WAITERS | SX_LOCK_EXCLUSIVE_WAITERS: 1259 db_printf("exclusive and shared\n"); 1260 break; 1261 default: 1262 db_printf("none\n"); 1263 } 1264 } 1265 1266 /* 1267 * Check to see if a thread that is blocked on a sleep queue is actually 1268 * blocked on an sx lock. If so, output some details and return true. 1269 * If the lock has an exclusive owner, return that in *ownerp. 1270 */ 1271 int 1272 sx_chain(struct thread *td, struct thread **ownerp) 1273 { 1274 struct sx *sx; 1275 1276 /* 1277 * Check to see if this thread is blocked on an sx lock. 1278 * First, we check the lock class. If that is ok, then we 1279 * compare the lock name against the wait message. 1280 */ 1281 sx = td->td_wchan; 1282 if (LOCK_CLASS(&sx->lock_object) != &lock_class_sx || 1283 sx->lock_object.lo_name != td->td_wmesg) 1284 return (0); 1285 1286 /* We think we have an sx lock, so output some details. */ 1287 db_printf("blocked on sx \"%s\" ", td->td_wmesg); 1288 *ownerp = sx_xholder(sx); 1289 if (sx->sx_lock & SX_LOCK_SHARED) 1290 db_printf("SLOCK (count %ju)\n", 1291 (uintmax_t)SX_SHARERS(sx->sx_lock)); 1292 else 1293 db_printf("XLOCK\n"); 1294 return (1); 1295 } 1296 #endif 1297