1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2007 Attilio Rao <attilio@freebsd.org> 5 * Copyright (c) 2001 Jason Evans <jasone@freebsd.org> 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice(s), this list of conditions and the following disclaimer as 13 * the first lines of this file unmodified other than the possible 14 * addition of one or more copyright notices. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice(s), this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY 20 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 21 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 22 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY 23 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 24 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 25 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 26 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 29 * DAMAGE. 30 */ 31 32 /* 33 * Shared/exclusive locks. This implementation attempts to ensure 34 * deterministic lock granting behavior, so that slocks and xlocks are 35 * interleaved. 36 * 37 * Priority propagation will not generally raise the priority of lock holders, 38 * so should not be relied upon in combination with sx locks. 39 */ 40 41 #include "opt_ddb.h" 42 #include "opt_hwpmc_hooks.h" 43 #include "opt_no_adaptive_sx.h" 44 45 #include <sys/cdefs.h> 46 __FBSDID("$FreeBSD$"); 47 48 #include <sys/param.h> 49 #include <sys/systm.h> 50 #include <sys/kdb.h> 51 #include <sys/kernel.h> 52 #include <sys/ktr.h> 53 #include <sys/lock.h> 54 #include <sys/mutex.h> 55 #include <sys/proc.h> 56 #include <sys/sched.h> 57 #include <sys/sleepqueue.h> 58 #include <sys/sx.h> 59 #include <sys/smp.h> 60 #include <sys/sysctl.h> 61 62 #if defined(SMP) && !defined(NO_ADAPTIVE_SX) 63 #include <machine/cpu.h> 64 #endif 65 66 #ifdef DDB 67 #include <ddb/ddb.h> 68 #endif 69 70 #if defined(SMP) && !defined(NO_ADAPTIVE_SX) 71 #define ADAPTIVE_SX 72 #endif 73 74 CTASSERT((SX_NOADAPTIVE & LO_CLASSFLAGS) == SX_NOADAPTIVE); 75 76 #ifdef HWPMC_HOOKS 77 #include <sys/pmckern.h> 78 PMC_SOFT_DECLARE( , , lock, failed); 79 #endif 80 81 /* Handy macros for sleep queues. */ 82 #define SQ_EXCLUSIVE_QUEUE 0 83 #define SQ_SHARED_QUEUE 1 84 85 /* 86 * Variations on DROP_GIANT()/PICKUP_GIANT() for use in this file. We 87 * drop Giant anytime we have to sleep or if we adaptively spin. 88 */ 89 #define GIANT_DECLARE \ 90 int _giantcnt = 0; \ 91 WITNESS_SAVE_DECL(Giant) \ 92 93 #define GIANT_SAVE(work) do { \ 94 if (mtx_owned(&Giant)) { \ 95 work++; \ 96 WITNESS_SAVE(&Giant.lock_object, Giant); \ 97 while (mtx_owned(&Giant)) { \ 98 _giantcnt++; \ 99 mtx_unlock(&Giant); \ 100 } \ 101 } \ 102 } while (0) 103 104 #define GIANT_RESTORE() do { \ 105 if (_giantcnt > 0) { \ 106 mtx_assert(&Giant, MA_NOTOWNED); \ 107 while (_giantcnt--) \ 108 mtx_lock(&Giant); \ 109 WITNESS_RESTORE(&Giant.lock_object, Giant); \ 110 } \ 111 } while (0) 112 113 /* 114 * Returns true if an exclusive lock is recursed. It assumes 115 * curthread currently has an exclusive lock. 116 */ 117 #define sx_recursed(sx) ((sx)->sx_recurse != 0) 118 119 static void assert_sx(const struct lock_object *lock, int what); 120 #ifdef DDB 121 static void db_show_sx(const struct lock_object *lock); 122 #endif 123 static void lock_sx(struct lock_object *lock, uintptr_t how); 124 #ifdef KDTRACE_HOOKS 125 static int owner_sx(const struct lock_object *lock, struct thread **owner); 126 #endif 127 static uintptr_t unlock_sx(struct lock_object *lock); 128 129 struct lock_class lock_class_sx = { 130 .lc_name = "sx", 131 .lc_flags = LC_SLEEPLOCK | LC_SLEEPABLE | LC_RECURSABLE | LC_UPGRADABLE, 132 .lc_assert = assert_sx, 133 #ifdef DDB 134 .lc_ddb_show = db_show_sx, 135 #endif 136 .lc_lock = lock_sx, 137 .lc_unlock = unlock_sx, 138 #ifdef KDTRACE_HOOKS 139 .lc_owner = owner_sx, 140 #endif 141 }; 142 143 #ifndef INVARIANTS 144 #define _sx_assert(sx, what, file, line) 145 #endif 146 147 #ifdef ADAPTIVE_SX 148 static __read_frequently u_int asx_retries = 10; 149 static __read_frequently u_int asx_loops = 10000; 150 static SYSCTL_NODE(_debug, OID_AUTO, sx, CTLFLAG_RD, NULL, "sxlock debugging"); 151 SYSCTL_UINT(_debug_sx, OID_AUTO, retries, CTLFLAG_RW, &asx_retries, 0, ""); 152 SYSCTL_UINT(_debug_sx, OID_AUTO, loops, CTLFLAG_RW, &asx_loops, 0, ""); 153 154 static struct lock_delay_config __read_frequently sx_delay; 155 156 SYSCTL_INT(_debug_sx, OID_AUTO, delay_base, CTLFLAG_RW, &sx_delay.base, 157 0, ""); 158 SYSCTL_INT(_debug_sx, OID_AUTO, delay_max, CTLFLAG_RW, &sx_delay.max, 159 0, ""); 160 161 LOCK_DELAY_SYSINIT_DEFAULT(sx_delay); 162 #endif 163 164 void 165 assert_sx(const struct lock_object *lock, int what) 166 { 167 168 sx_assert((const struct sx *)lock, what); 169 } 170 171 void 172 lock_sx(struct lock_object *lock, uintptr_t how) 173 { 174 struct sx *sx; 175 176 sx = (struct sx *)lock; 177 if (how) 178 sx_slock(sx); 179 else 180 sx_xlock(sx); 181 } 182 183 uintptr_t 184 unlock_sx(struct lock_object *lock) 185 { 186 struct sx *sx; 187 188 sx = (struct sx *)lock; 189 sx_assert(sx, SA_LOCKED | SA_NOTRECURSED); 190 if (sx_xlocked(sx)) { 191 sx_xunlock(sx); 192 return (0); 193 } else { 194 sx_sunlock(sx); 195 return (1); 196 } 197 } 198 199 #ifdef KDTRACE_HOOKS 200 int 201 owner_sx(const struct lock_object *lock, struct thread **owner) 202 { 203 const struct sx *sx; 204 uintptr_t x; 205 206 sx = (const struct sx *)lock; 207 x = sx->sx_lock; 208 *owner = NULL; 209 return ((x & SX_LOCK_SHARED) != 0 ? (SX_SHARERS(x) != 0) : 210 ((*owner = (struct thread *)SX_OWNER(x)) != NULL)); 211 } 212 #endif 213 214 void 215 sx_sysinit(void *arg) 216 { 217 struct sx_args *sargs = arg; 218 219 sx_init_flags(sargs->sa_sx, sargs->sa_desc, sargs->sa_flags); 220 } 221 222 void 223 sx_init_flags(struct sx *sx, const char *description, int opts) 224 { 225 int flags; 226 227 MPASS((opts & ~(SX_QUIET | SX_RECURSE | SX_NOWITNESS | SX_DUPOK | 228 SX_NOPROFILE | SX_NOADAPTIVE | SX_NEW)) == 0); 229 ASSERT_ATOMIC_LOAD_PTR(sx->sx_lock, 230 ("%s: sx_lock not aligned for %s: %p", __func__, description, 231 &sx->sx_lock)); 232 233 flags = LO_SLEEPABLE | LO_UPGRADABLE; 234 if (opts & SX_DUPOK) 235 flags |= LO_DUPOK; 236 if (opts & SX_NOPROFILE) 237 flags |= LO_NOPROFILE; 238 if (!(opts & SX_NOWITNESS)) 239 flags |= LO_WITNESS; 240 if (opts & SX_RECURSE) 241 flags |= LO_RECURSABLE; 242 if (opts & SX_QUIET) 243 flags |= LO_QUIET; 244 if (opts & SX_NEW) 245 flags |= LO_NEW; 246 247 flags |= opts & SX_NOADAPTIVE; 248 lock_init(&sx->lock_object, &lock_class_sx, description, NULL, flags); 249 sx->sx_lock = SX_LOCK_UNLOCKED; 250 sx->sx_recurse = 0; 251 } 252 253 void 254 sx_destroy(struct sx *sx) 255 { 256 257 KASSERT(sx->sx_lock == SX_LOCK_UNLOCKED, ("sx lock still held")); 258 KASSERT(sx->sx_recurse == 0, ("sx lock still recursed")); 259 sx->sx_lock = SX_LOCK_DESTROYED; 260 lock_destroy(&sx->lock_object); 261 } 262 263 int 264 sx_try_slock_int(struct sx *sx LOCK_FILE_LINE_ARG_DEF) 265 { 266 uintptr_t x; 267 268 if (SCHEDULER_STOPPED()) 269 return (1); 270 271 KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread), 272 ("sx_try_slock() by idle thread %p on sx %s @ %s:%d", 273 curthread, sx->lock_object.lo_name, file, line)); 274 275 x = sx->sx_lock; 276 for (;;) { 277 KASSERT(x != SX_LOCK_DESTROYED, 278 ("sx_try_slock() of destroyed sx @ %s:%d", file, line)); 279 if (!(x & SX_LOCK_SHARED)) 280 break; 281 if (atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, x + SX_ONE_SHARER)) { 282 LOCK_LOG_TRY("SLOCK", &sx->lock_object, 0, 1, file, line); 283 WITNESS_LOCK(&sx->lock_object, LOP_TRYLOCK, file, line); 284 LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire, 285 sx, 0, 0, file, line, LOCKSTAT_READER); 286 TD_LOCKS_INC(curthread); 287 return (1); 288 } 289 } 290 291 LOCK_LOG_TRY("SLOCK", &sx->lock_object, 0, 0, file, line); 292 return (0); 293 } 294 295 int 296 sx_try_slock_(struct sx *sx, const char *file, int line) 297 { 298 299 return (sx_try_slock_int(sx LOCK_FILE_LINE_ARG)); 300 } 301 302 int 303 _sx_xlock(struct sx *sx, int opts, const char *file, int line) 304 { 305 uintptr_t tid, x; 306 int error = 0; 307 308 KASSERT(kdb_active != 0 || SCHEDULER_STOPPED() || 309 !TD_IS_IDLETHREAD(curthread), 310 ("sx_xlock() by idle thread %p on sx %s @ %s:%d", 311 curthread, sx->lock_object.lo_name, file, line)); 312 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED, 313 ("sx_xlock() of destroyed sx @ %s:%d", file, line)); 314 WITNESS_CHECKORDER(&sx->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE, file, 315 line, NULL); 316 tid = (uintptr_t)curthread; 317 x = SX_LOCK_UNLOCKED; 318 if (!atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, tid)) 319 error = _sx_xlock_hard(sx, x, opts LOCK_FILE_LINE_ARG); 320 else 321 LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire, sx, 322 0, 0, file, line, LOCKSTAT_WRITER); 323 if (!error) { 324 LOCK_LOG_LOCK("XLOCK", &sx->lock_object, 0, sx->sx_recurse, 325 file, line); 326 WITNESS_LOCK(&sx->lock_object, LOP_EXCLUSIVE, file, line); 327 TD_LOCKS_INC(curthread); 328 } 329 330 return (error); 331 } 332 333 int 334 sx_try_xlock_int(struct sx *sx LOCK_FILE_LINE_ARG_DEF) 335 { 336 struct thread *td; 337 uintptr_t tid, x; 338 int rval; 339 bool recursed; 340 341 td = curthread; 342 tid = (uintptr_t)td; 343 if (SCHEDULER_STOPPED_TD(td)) 344 return (1); 345 346 KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(td), 347 ("sx_try_xlock() by idle thread %p on sx %s @ %s:%d", 348 curthread, sx->lock_object.lo_name, file, line)); 349 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED, 350 ("sx_try_xlock() of destroyed sx @ %s:%d", file, line)); 351 352 rval = 1; 353 recursed = false; 354 x = SX_LOCK_UNLOCKED; 355 for (;;) { 356 if (atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, tid)) 357 break; 358 if (x == SX_LOCK_UNLOCKED) 359 continue; 360 if (x == tid && (sx->lock_object.lo_flags & LO_RECURSABLE)) { 361 sx->sx_recurse++; 362 atomic_set_ptr(&sx->sx_lock, SX_LOCK_RECURSED); 363 break; 364 } 365 rval = 0; 366 break; 367 } 368 369 LOCK_LOG_TRY("XLOCK", &sx->lock_object, 0, rval, file, line); 370 if (rval) { 371 WITNESS_LOCK(&sx->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK, 372 file, line); 373 if (!recursed) 374 LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire, 375 sx, 0, 0, file, line, LOCKSTAT_WRITER); 376 TD_LOCKS_INC(curthread); 377 } 378 379 return (rval); 380 } 381 382 int 383 sx_try_xlock_(struct sx *sx, const char *file, int line) 384 { 385 386 return (sx_try_xlock_int(sx LOCK_FILE_LINE_ARG)); 387 } 388 389 void 390 _sx_xunlock(struct sx *sx, const char *file, int line) 391 { 392 393 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED, 394 ("sx_xunlock() of destroyed sx @ %s:%d", file, line)); 395 _sx_assert(sx, SA_XLOCKED, file, line); 396 WITNESS_UNLOCK(&sx->lock_object, LOP_EXCLUSIVE, file, line); 397 LOCK_LOG_LOCK("XUNLOCK", &sx->lock_object, 0, sx->sx_recurse, file, 398 line); 399 #if LOCK_DEBUG > 0 400 _sx_xunlock_hard(sx, (uintptr_t)curthread, file, line); 401 #else 402 __sx_xunlock(sx, curthread, file, line); 403 #endif 404 TD_LOCKS_DEC(curthread); 405 } 406 407 /* 408 * Try to do a non-blocking upgrade from a shared lock to an exclusive lock. 409 * This will only succeed if this thread holds a single shared lock. 410 * Return 1 if if the upgrade succeed, 0 otherwise. 411 */ 412 int 413 sx_try_upgrade_int(struct sx *sx LOCK_FILE_LINE_ARG_DEF) 414 { 415 uintptr_t x; 416 int success; 417 418 if (SCHEDULER_STOPPED()) 419 return (1); 420 421 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED, 422 ("sx_try_upgrade() of destroyed sx @ %s:%d", file, line)); 423 _sx_assert(sx, SA_SLOCKED, file, line); 424 425 /* 426 * Try to switch from one shared lock to an exclusive lock. We need 427 * to maintain the SX_LOCK_EXCLUSIVE_WAITERS flag if set so that 428 * we will wake up the exclusive waiters when we drop the lock. 429 */ 430 x = sx->sx_lock & SX_LOCK_EXCLUSIVE_WAITERS; 431 success = atomic_cmpset_acq_ptr(&sx->sx_lock, SX_SHARERS_LOCK(1) | x, 432 (uintptr_t)curthread | x); 433 LOCK_LOG_TRY("XUPGRADE", &sx->lock_object, 0, success, file, line); 434 if (success) { 435 WITNESS_UPGRADE(&sx->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK, 436 file, line); 437 LOCKSTAT_RECORD0(sx__upgrade, sx); 438 } 439 return (success); 440 } 441 442 int 443 sx_try_upgrade_(struct sx *sx, const char *file, int line) 444 { 445 446 return (sx_try_upgrade_int(sx LOCK_FILE_LINE_ARG)); 447 } 448 449 /* 450 * Downgrade an unrecursed exclusive lock into a single shared lock. 451 */ 452 void 453 sx_downgrade_int(struct sx *sx LOCK_FILE_LINE_ARG_DEF) 454 { 455 uintptr_t x; 456 int wakeup_swapper; 457 458 if (SCHEDULER_STOPPED()) 459 return; 460 461 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED, 462 ("sx_downgrade() of destroyed sx @ %s:%d", file, line)); 463 _sx_assert(sx, SA_XLOCKED | SA_NOTRECURSED, file, line); 464 #ifndef INVARIANTS 465 if (sx_recursed(sx)) 466 panic("downgrade of a recursed lock"); 467 #endif 468 469 WITNESS_DOWNGRADE(&sx->lock_object, 0, file, line); 470 471 /* 472 * Try to switch from an exclusive lock with no shared waiters 473 * to one sharer with no shared waiters. If there are 474 * exclusive waiters, we don't need to lock the sleep queue so 475 * long as we preserve the flag. We do one quick try and if 476 * that fails we grab the sleepq lock to keep the flags from 477 * changing and do it the slow way. 478 * 479 * We have to lock the sleep queue if there are shared waiters 480 * so we can wake them up. 481 */ 482 x = sx->sx_lock; 483 if (!(x & SX_LOCK_SHARED_WAITERS) && 484 atomic_cmpset_rel_ptr(&sx->sx_lock, x, SX_SHARERS_LOCK(1) | 485 (x & SX_LOCK_EXCLUSIVE_WAITERS))) 486 goto out; 487 488 /* 489 * Lock the sleep queue so we can read the waiters bits 490 * without any races and wakeup any shared waiters. 491 */ 492 sleepq_lock(&sx->lock_object); 493 494 /* 495 * Preserve SX_LOCK_EXCLUSIVE_WAITERS while downgraded to a single 496 * shared lock. If there are any shared waiters, wake them up. 497 */ 498 wakeup_swapper = 0; 499 x = sx->sx_lock; 500 atomic_store_rel_ptr(&sx->sx_lock, SX_SHARERS_LOCK(1) | 501 (x & SX_LOCK_EXCLUSIVE_WAITERS)); 502 if (x & SX_LOCK_SHARED_WAITERS) 503 wakeup_swapper = sleepq_broadcast(&sx->lock_object, SLEEPQ_SX, 504 0, SQ_SHARED_QUEUE); 505 sleepq_release(&sx->lock_object); 506 507 if (wakeup_swapper) 508 kick_proc0(); 509 510 out: 511 LOCK_LOG_LOCK("XDOWNGRADE", &sx->lock_object, 0, 0, file, line); 512 LOCKSTAT_RECORD0(sx__downgrade, sx); 513 } 514 515 void 516 sx_downgrade_(struct sx *sx, const char *file, int line) 517 { 518 519 sx_downgrade_int(sx LOCK_FILE_LINE_ARG); 520 } 521 522 /* 523 * This function represents the so-called 'hard case' for sx_xlock 524 * operation. All 'easy case' failures are redirected to this. Note 525 * that ideally this would be a static function, but it needs to be 526 * accessible from at least sx.h. 527 */ 528 int 529 _sx_xlock_hard(struct sx *sx, uintptr_t x, int opts LOCK_FILE_LINE_ARG_DEF) 530 { 531 GIANT_DECLARE; 532 uintptr_t tid; 533 #ifdef ADAPTIVE_SX 534 volatile struct thread *owner; 535 u_int i, n, spintries = 0; 536 #endif 537 #ifdef LOCK_PROFILING 538 uint64_t waittime = 0; 539 int contested = 0; 540 #endif 541 int error = 0; 542 #if defined(ADAPTIVE_SX) || defined(KDTRACE_HOOKS) 543 struct lock_delay_arg lda; 544 #endif 545 #ifdef KDTRACE_HOOKS 546 u_int sleep_cnt = 0; 547 int64_t sleep_time = 0; 548 int64_t all_time = 0; 549 #endif 550 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING) 551 uintptr_t state; 552 #endif 553 int extra_work = 0; 554 555 tid = (uintptr_t)curthread; 556 if (SCHEDULER_STOPPED()) 557 return (0); 558 559 #if defined(ADAPTIVE_SX) 560 lock_delay_arg_init(&lda, &sx_delay); 561 #elif defined(KDTRACE_HOOKS) 562 lock_delay_arg_init(&lda, NULL); 563 #endif 564 565 if (__predict_false(x == SX_LOCK_UNLOCKED)) 566 x = SX_READ_VALUE(sx); 567 568 /* If we already hold an exclusive lock, then recurse. */ 569 if (__predict_false(lv_sx_owner(x) == (struct thread *)tid)) { 570 KASSERT((sx->lock_object.lo_flags & LO_RECURSABLE) != 0, 571 ("_sx_xlock_hard: recursed on non-recursive sx %s @ %s:%d\n", 572 sx->lock_object.lo_name, file, line)); 573 sx->sx_recurse++; 574 atomic_set_ptr(&sx->sx_lock, SX_LOCK_RECURSED); 575 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 576 CTR2(KTR_LOCK, "%s: %p recursing", __func__, sx); 577 return (0); 578 } 579 580 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 581 CTR5(KTR_LOCK, "%s: %s contested (lock=%p) at %s:%d", __func__, 582 sx->lock_object.lo_name, (void *)sx->sx_lock, file, line); 583 584 #ifdef HWPMC_HOOKS 585 PMC_SOFT_CALL( , , lock, failed); 586 #endif 587 lock_profile_obtain_lock_failed(&sx->lock_object, &contested, 588 &waittime); 589 590 #ifdef LOCK_PROFILING 591 extra_work = 1; 592 state = x; 593 #elif defined(KDTRACE_HOOKS) 594 extra_work = lockstat_enabled; 595 if (__predict_false(extra_work)) { 596 all_time -= lockstat_nsecs(&sx->lock_object); 597 state = x; 598 } 599 #endif 600 601 for (;;) { 602 if (x == SX_LOCK_UNLOCKED) { 603 if (atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, tid)) 604 break; 605 continue; 606 } 607 #ifdef KDTRACE_HOOKS 608 lda.spin_cnt++; 609 #endif 610 #ifdef ADAPTIVE_SX 611 /* 612 * If the lock is write locked and the owner is 613 * running on another CPU, spin until the owner stops 614 * running or the state of the lock changes. 615 */ 616 if ((sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0) { 617 if ((x & SX_LOCK_SHARED) == 0) { 618 owner = lv_sx_owner(x); 619 if (TD_IS_RUNNING(owner)) { 620 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 621 CTR3(KTR_LOCK, 622 "%s: spinning on %p held by %p", 623 __func__, sx, owner); 624 KTR_STATE1(KTR_SCHED, "thread", 625 sched_tdname(curthread), "spinning", 626 "lockname:\"%s\"", 627 sx->lock_object.lo_name); 628 GIANT_SAVE(extra_work); 629 do { 630 lock_delay(&lda); 631 x = SX_READ_VALUE(sx); 632 owner = lv_sx_owner(x); 633 } while (owner != NULL && 634 TD_IS_RUNNING(owner)); 635 KTR_STATE0(KTR_SCHED, "thread", 636 sched_tdname(curthread), "running"); 637 continue; 638 } 639 } else if (SX_SHARERS(x) && spintries < asx_retries) { 640 KTR_STATE1(KTR_SCHED, "thread", 641 sched_tdname(curthread), "spinning", 642 "lockname:\"%s\"", sx->lock_object.lo_name); 643 GIANT_SAVE(extra_work); 644 spintries++; 645 for (i = 0; i < asx_loops; i += n) { 646 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 647 CTR4(KTR_LOCK, 648 "%s: shared spinning on %p with %u and %u", 649 __func__, sx, spintries, i); 650 n = SX_SHARERS(x); 651 lock_delay_spin(n); 652 x = SX_READ_VALUE(sx); 653 if ((x & SX_LOCK_SHARED) == 0 || 654 SX_SHARERS(x) == 0) 655 break; 656 } 657 #ifdef KDTRACE_HOOKS 658 lda.spin_cnt += i; 659 #endif 660 KTR_STATE0(KTR_SCHED, "thread", 661 sched_tdname(curthread), "running"); 662 if (i != asx_loops) 663 continue; 664 } 665 } 666 #endif 667 668 sleepq_lock(&sx->lock_object); 669 x = SX_READ_VALUE(sx); 670 retry_sleepq: 671 672 /* 673 * If the lock was released while spinning on the 674 * sleep queue chain lock, try again. 675 */ 676 if (x == SX_LOCK_UNLOCKED) { 677 sleepq_release(&sx->lock_object); 678 continue; 679 } 680 681 #ifdef ADAPTIVE_SX 682 /* 683 * The current lock owner might have started executing 684 * on another CPU (or the lock could have changed 685 * owners) while we were waiting on the sleep queue 686 * chain lock. If so, drop the sleep queue lock and try 687 * again. 688 */ 689 if (!(x & SX_LOCK_SHARED) && 690 (sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0) { 691 owner = (struct thread *)SX_OWNER(x); 692 if (TD_IS_RUNNING(owner)) { 693 sleepq_release(&sx->lock_object); 694 continue; 695 } 696 } 697 #endif 698 699 /* 700 * If an exclusive lock was released with both shared 701 * and exclusive waiters and a shared waiter hasn't 702 * woken up and acquired the lock yet, sx_lock will be 703 * set to SX_LOCK_UNLOCKED | SX_LOCK_EXCLUSIVE_WAITERS. 704 * If we see that value, try to acquire it once. Note 705 * that we have to preserve SX_LOCK_EXCLUSIVE_WAITERS 706 * as there are other exclusive waiters still. If we 707 * fail, restart the loop. 708 */ 709 if (x == (SX_LOCK_UNLOCKED | SX_LOCK_EXCLUSIVE_WAITERS)) { 710 if (!atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, 711 tid | SX_LOCK_EXCLUSIVE_WAITERS)) 712 goto retry_sleepq; 713 sleepq_release(&sx->lock_object); 714 CTR2(KTR_LOCK, "%s: %p claimed by new writer", 715 __func__, sx); 716 break; 717 } 718 719 /* 720 * Try to set the SX_LOCK_EXCLUSIVE_WAITERS. If we fail, 721 * than loop back and retry. 722 */ 723 if (!(x & SX_LOCK_EXCLUSIVE_WAITERS)) { 724 if (!atomic_fcmpset_ptr(&sx->sx_lock, &x, 725 x | SX_LOCK_EXCLUSIVE_WAITERS)) { 726 goto retry_sleepq; 727 } 728 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 729 CTR2(KTR_LOCK, "%s: %p set excl waiters flag", 730 __func__, sx); 731 } 732 733 /* 734 * Since we have been unable to acquire the exclusive 735 * lock and the exclusive waiters flag is set, we have 736 * to sleep. 737 */ 738 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 739 CTR2(KTR_LOCK, "%s: %p blocking on sleep queue", 740 __func__, sx); 741 742 #ifdef KDTRACE_HOOKS 743 sleep_time -= lockstat_nsecs(&sx->lock_object); 744 #endif 745 GIANT_SAVE(extra_work); 746 sleepq_add(&sx->lock_object, NULL, sx->lock_object.lo_name, 747 SLEEPQ_SX | ((opts & SX_INTERRUPTIBLE) ? 748 SLEEPQ_INTERRUPTIBLE : 0), SQ_EXCLUSIVE_QUEUE); 749 if (!(opts & SX_INTERRUPTIBLE)) 750 sleepq_wait(&sx->lock_object, 0); 751 else 752 error = sleepq_wait_sig(&sx->lock_object, 0); 753 #ifdef KDTRACE_HOOKS 754 sleep_time += lockstat_nsecs(&sx->lock_object); 755 sleep_cnt++; 756 #endif 757 if (error) { 758 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 759 CTR2(KTR_LOCK, 760 "%s: interruptible sleep by %p suspended by signal", 761 __func__, sx); 762 break; 763 } 764 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 765 CTR2(KTR_LOCK, "%s: %p resuming from sleep queue", 766 __func__, sx); 767 x = SX_READ_VALUE(sx); 768 } 769 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING) 770 if (__predict_true(!extra_work)) 771 return (error); 772 #endif 773 #ifdef KDTRACE_HOOKS 774 all_time += lockstat_nsecs(&sx->lock_object); 775 if (sleep_time) 776 LOCKSTAT_RECORD4(sx__block, sx, sleep_time, 777 LOCKSTAT_WRITER, (state & SX_LOCK_SHARED) == 0, 778 (state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state)); 779 if (lda.spin_cnt > sleep_cnt) 780 LOCKSTAT_RECORD4(sx__spin, sx, all_time - sleep_time, 781 LOCKSTAT_WRITER, (state & SX_LOCK_SHARED) == 0, 782 (state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state)); 783 #endif 784 if (!error) 785 LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire, sx, 786 contested, waittime, file, line, LOCKSTAT_WRITER); 787 GIANT_RESTORE(); 788 return (error); 789 } 790 791 /* 792 * This function represents the so-called 'hard case' for sx_xunlock 793 * operation. All 'easy case' failures are redirected to this. Note 794 * that ideally this would be a static function, but it needs to be 795 * accessible from at least sx.h. 796 */ 797 void 798 _sx_xunlock_hard(struct sx *sx, uintptr_t x LOCK_FILE_LINE_ARG_DEF) 799 { 800 uintptr_t tid, setx; 801 int queue, wakeup_swapper; 802 803 if (SCHEDULER_STOPPED()) 804 return; 805 806 tid = (uintptr_t)curthread; 807 808 if (__predict_false(x == tid)) 809 x = SX_READ_VALUE(sx); 810 811 MPASS(!(x & SX_LOCK_SHARED)); 812 813 if (__predict_false(x & SX_LOCK_RECURSED)) { 814 /* The lock is recursed, unrecurse one level. */ 815 if ((--sx->sx_recurse) == 0) 816 atomic_clear_ptr(&sx->sx_lock, SX_LOCK_RECURSED); 817 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 818 CTR2(KTR_LOCK, "%s: %p unrecursing", __func__, sx); 819 return; 820 } 821 822 LOCKSTAT_PROFILE_RELEASE_RWLOCK(sx__release, sx, LOCKSTAT_WRITER); 823 if (x == tid && 824 atomic_cmpset_rel_ptr(&sx->sx_lock, tid, SX_LOCK_UNLOCKED)) 825 return; 826 827 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 828 CTR2(KTR_LOCK, "%s: %p contested", __func__, sx); 829 830 sleepq_lock(&sx->lock_object); 831 x = SX_READ_VALUE(sx); 832 MPASS(x & (SX_LOCK_SHARED_WAITERS | SX_LOCK_EXCLUSIVE_WAITERS)); 833 834 /* 835 * The wake up algorithm here is quite simple and probably not 836 * ideal. It gives precedence to shared waiters if they are 837 * present. For this condition, we have to preserve the 838 * state of the exclusive waiters flag. 839 * If interruptible sleeps left the shared queue empty avoid a 840 * starvation for the threads sleeping on the exclusive queue by giving 841 * them precedence and cleaning up the shared waiters bit anyway. 842 */ 843 setx = SX_LOCK_UNLOCKED; 844 queue = SQ_EXCLUSIVE_QUEUE; 845 if ((x & SX_LOCK_SHARED_WAITERS) != 0 && 846 sleepq_sleepcnt(&sx->lock_object, SQ_SHARED_QUEUE) != 0) { 847 queue = SQ_SHARED_QUEUE; 848 setx |= (x & SX_LOCK_EXCLUSIVE_WAITERS); 849 } 850 atomic_store_rel_ptr(&sx->sx_lock, setx); 851 852 /* Wake up all the waiters for the specific queue. */ 853 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 854 CTR3(KTR_LOCK, "%s: %p waking up all threads on %s queue", 855 __func__, sx, queue == SQ_SHARED_QUEUE ? "shared" : 856 "exclusive"); 857 858 wakeup_swapper = sleepq_broadcast(&sx->lock_object, SLEEPQ_SX, 0, 859 queue); 860 sleepq_release(&sx->lock_object); 861 if (wakeup_swapper) 862 kick_proc0(); 863 } 864 865 static bool __always_inline 866 __sx_slock_try(struct sx *sx, uintptr_t *xp LOCK_FILE_LINE_ARG_DEF) 867 { 868 869 /* 870 * If no other thread has an exclusive lock then try to bump up 871 * the count of sharers. Since we have to preserve the state 872 * of SX_LOCK_EXCLUSIVE_WAITERS, if we fail to acquire the 873 * shared lock loop back and retry. 874 */ 875 while (*xp & SX_LOCK_SHARED) { 876 MPASS(!(*xp & SX_LOCK_SHARED_WAITERS)); 877 if (atomic_fcmpset_acq_ptr(&sx->sx_lock, xp, 878 *xp + SX_ONE_SHARER)) { 879 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 880 CTR4(KTR_LOCK, "%s: %p succeed %p -> %p", 881 __func__, sx, (void *)*xp, 882 (void *)(*xp + SX_ONE_SHARER)); 883 return (true); 884 } 885 } 886 return (false); 887 } 888 889 static int __noinline 890 _sx_slock_hard(struct sx *sx, int opts, uintptr_t x LOCK_FILE_LINE_ARG_DEF) 891 { 892 GIANT_DECLARE; 893 #ifdef ADAPTIVE_SX 894 volatile struct thread *owner; 895 #endif 896 #ifdef LOCK_PROFILING 897 uint64_t waittime = 0; 898 int contested = 0; 899 #endif 900 int error = 0; 901 #if defined(ADAPTIVE_SX) || defined(KDTRACE_HOOKS) 902 struct lock_delay_arg lda; 903 #endif 904 #ifdef KDTRACE_HOOKS 905 u_int sleep_cnt = 0; 906 int64_t sleep_time = 0; 907 int64_t all_time = 0; 908 #endif 909 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING) 910 uintptr_t state; 911 #endif 912 int extra_work = 0; 913 914 if (SCHEDULER_STOPPED()) 915 return (0); 916 917 #if defined(ADAPTIVE_SX) 918 lock_delay_arg_init(&lda, &sx_delay); 919 #elif defined(KDTRACE_HOOKS) 920 lock_delay_arg_init(&lda, NULL); 921 #endif 922 923 #ifdef HWPMC_HOOKS 924 PMC_SOFT_CALL( , , lock, failed); 925 #endif 926 lock_profile_obtain_lock_failed(&sx->lock_object, &contested, 927 &waittime); 928 929 #ifdef LOCK_PROFILING 930 extra_work = 1; 931 state = x; 932 #elif defined(KDTRACE_HOOKS) 933 extra_work = lockstat_enabled; 934 if (__predict_false(extra_work)) { 935 all_time -= lockstat_nsecs(&sx->lock_object); 936 state = x; 937 } 938 #endif 939 940 /* 941 * As with rwlocks, we don't make any attempt to try to block 942 * shared locks once there is an exclusive waiter. 943 */ 944 for (;;) { 945 if (__sx_slock_try(sx, &x LOCK_FILE_LINE_ARG)) 946 break; 947 #ifdef KDTRACE_HOOKS 948 lda.spin_cnt++; 949 #endif 950 951 #ifdef ADAPTIVE_SX 952 /* 953 * If the owner is running on another CPU, spin until 954 * the owner stops running or the state of the lock 955 * changes. 956 */ 957 if ((sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0) { 958 owner = lv_sx_owner(x); 959 if (TD_IS_RUNNING(owner)) { 960 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 961 CTR3(KTR_LOCK, 962 "%s: spinning on %p held by %p", 963 __func__, sx, owner); 964 KTR_STATE1(KTR_SCHED, "thread", 965 sched_tdname(curthread), "spinning", 966 "lockname:\"%s\"", sx->lock_object.lo_name); 967 GIANT_SAVE(extra_work); 968 do { 969 lock_delay(&lda); 970 x = SX_READ_VALUE(sx); 971 owner = lv_sx_owner(x); 972 } while (owner != NULL && TD_IS_RUNNING(owner)); 973 KTR_STATE0(KTR_SCHED, "thread", 974 sched_tdname(curthread), "running"); 975 continue; 976 } 977 } 978 #endif 979 980 /* 981 * Some other thread already has an exclusive lock, so 982 * start the process of blocking. 983 */ 984 sleepq_lock(&sx->lock_object); 985 x = SX_READ_VALUE(sx); 986 retry_sleepq: 987 /* 988 * The lock could have been released while we spun. 989 * In this case loop back and retry. 990 */ 991 if (x & SX_LOCK_SHARED) { 992 sleepq_release(&sx->lock_object); 993 continue; 994 } 995 996 #ifdef ADAPTIVE_SX 997 /* 998 * If the owner is running on another CPU, spin until 999 * the owner stops running or the state of the lock 1000 * changes. 1001 */ 1002 if (!(x & SX_LOCK_SHARED) && 1003 (sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0) { 1004 owner = (struct thread *)SX_OWNER(x); 1005 if (TD_IS_RUNNING(owner)) { 1006 sleepq_release(&sx->lock_object); 1007 x = SX_READ_VALUE(sx); 1008 continue; 1009 } 1010 } 1011 #endif 1012 1013 /* 1014 * Try to set the SX_LOCK_SHARED_WAITERS flag. If we 1015 * fail to set it drop the sleep queue lock and loop 1016 * back. 1017 */ 1018 if (!(x & SX_LOCK_SHARED_WAITERS)) { 1019 if (!atomic_fcmpset_ptr(&sx->sx_lock, &x, 1020 x | SX_LOCK_SHARED_WAITERS)) 1021 goto retry_sleepq; 1022 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 1023 CTR2(KTR_LOCK, "%s: %p set shared waiters flag", 1024 __func__, sx); 1025 } 1026 1027 /* 1028 * Since we have been unable to acquire the shared lock, 1029 * we have to sleep. 1030 */ 1031 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 1032 CTR2(KTR_LOCK, "%s: %p blocking on sleep queue", 1033 __func__, sx); 1034 1035 #ifdef KDTRACE_HOOKS 1036 sleep_time -= lockstat_nsecs(&sx->lock_object); 1037 #endif 1038 GIANT_SAVE(extra_work); 1039 sleepq_add(&sx->lock_object, NULL, sx->lock_object.lo_name, 1040 SLEEPQ_SX | ((opts & SX_INTERRUPTIBLE) ? 1041 SLEEPQ_INTERRUPTIBLE : 0), SQ_SHARED_QUEUE); 1042 if (!(opts & SX_INTERRUPTIBLE)) 1043 sleepq_wait(&sx->lock_object, 0); 1044 else 1045 error = sleepq_wait_sig(&sx->lock_object, 0); 1046 #ifdef KDTRACE_HOOKS 1047 sleep_time += lockstat_nsecs(&sx->lock_object); 1048 sleep_cnt++; 1049 #endif 1050 if (error) { 1051 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 1052 CTR2(KTR_LOCK, 1053 "%s: interruptible sleep by %p suspended by signal", 1054 __func__, sx); 1055 break; 1056 } 1057 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 1058 CTR2(KTR_LOCK, "%s: %p resuming from sleep queue", 1059 __func__, sx); 1060 x = SX_READ_VALUE(sx); 1061 } 1062 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING) 1063 if (__predict_true(!extra_work)) 1064 return (error); 1065 #endif 1066 #ifdef KDTRACE_HOOKS 1067 all_time += lockstat_nsecs(&sx->lock_object); 1068 if (sleep_time) 1069 LOCKSTAT_RECORD4(sx__block, sx, sleep_time, 1070 LOCKSTAT_READER, (state & SX_LOCK_SHARED) == 0, 1071 (state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state)); 1072 if (lda.spin_cnt > sleep_cnt) 1073 LOCKSTAT_RECORD4(sx__spin, sx, all_time - sleep_time, 1074 LOCKSTAT_READER, (state & SX_LOCK_SHARED) == 0, 1075 (state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state)); 1076 #endif 1077 if (error == 0) { 1078 LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire, sx, 1079 contested, waittime, file, line, LOCKSTAT_READER); 1080 } 1081 GIANT_RESTORE(); 1082 return (error); 1083 } 1084 1085 int 1086 _sx_slock_int(struct sx *sx, int opts LOCK_FILE_LINE_ARG_DEF) 1087 { 1088 uintptr_t x; 1089 int error; 1090 1091 KASSERT(kdb_active != 0 || SCHEDULER_STOPPED() || 1092 !TD_IS_IDLETHREAD(curthread), 1093 ("sx_slock() by idle thread %p on sx %s @ %s:%d", 1094 curthread, sx->lock_object.lo_name, file, line)); 1095 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED, 1096 ("sx_slock() of destroyed sx @ %s:%d", file, line)); 1097 WITNESS_CHECKORDER(&sx->lock_object, LOP_NEWORDER, file, line, NULL); 1098 1099 error = 0; 1100 x = SX_READ_VALUE(sx); 1101 if (__predict_false(LOCKSTAT_OOL_PROFILE_ENABLED(sx__acquire) || 1102 !__sx_slock_try(sx, &x LOCK_FILE_LINE_ARG))) 1103 error = _sx_slock_hard(sx, opts, x LOCK_FILE_LINE_ARG); 1104 if (error == 0) { 1105 LOCK_LOG_LOCK("SLOCK", &sx->lock_object, 0, 0, file, line); 1106 WITNESS_LOCK(&sx->lock_object, 0, file, line); 1107 TD_LOCKS_INC(curthread); 1108 } 1109 return (error); 1110 } 1111 1112 int 1113 _sx_slock(struct sx *sx, int opts, const char *file, int line) 1114 { 1115 1116 return (_sx_slock_int(sx, opts LOCK_FILE_LINE_ARG)); 1117 } 1118 1119 static bool __always_inline 1120 _sx_sunlock_try(struct sx *sx, uintptr_t *xp) 1121 { 1122 1123 for (;;) { 1124 /* 1125 * We should never have sharers while at least one thread 1126 * holds a shared lock. 1127 */ 1128 KASSERT(!(*xp & SX_LOCK_SHARED_WAITERS), 1129 ("%s: waiting sharers", __func__)); 1130 1131 /* 1132 * See if there is more than one shared lock held. If 1133 * so, just drop one and return. 1134 */ 1135 if (SX_SHARERS(*xp) > 1) { 1136 if (atomic_fcmpset_rel_ptr(&sx->sx_lock, xp, 1137 *xp - SX_ONE_SHARER)) { 1138 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 1139 CTR4(KTR_LOCK, 1140 "%s: %p succeeded %p -> %p", 1141 __func__, sx, (void *)*xp, 1142 (void *)(*xp - SX_ONE_SHARER)); 1143 return (true); 1144 } 1145 continue; 1146 } 1147 1148 /* 1149 * If there aren't any waiters for an exclusive lock, 1150 * then try to drop it quickly. 1151 */ 1152 if (!(*xp & SX_LOCK_EXCLUSIVE_WAITERS)) { 1153 MPASS(*xp == SX_SHARERS_LOCK(1)); 1154 *xp = SX_SHARERS_LOCK(1); 1155 if (atomic_fcmpset_rel_ptr(&sx->sx_lock, 1156 xp, SX_LOCK_UNLOCKED)) { 1157 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 1158 CTR2(KTR_LOCK, "%s: %p last succeeded", 1159 __func__, sx); 1160 return (true); 1161 } 1162 continue; 1163 } 1164 break; 1165 } 1166 return (false); 1167 } 1168 1169 static void __noinline 1170 _sx_sunlock_hard(struct sx *sx, uintptr_t x LOCK_FILE_LINE_ARG_DEF) 1171 { 1172 int wakeup_swapper; 1173 uintptr_t setx; 1174 1175 if (SCHEDULER_STOPPED()) 1176 return; 1177 1178 if (_sx_sunlock_try(sx, &x)) 1179 goto out_lockstat; 1180 1181 /* 1182 * At this point, there should just be one sharer with 1183 * exclusive waiters. 1184 */ 1185 MPASS(x == (SX_SHARERS_LOCK(1) | SX_LOCK_EXCLUSIVE_WAITERS)); 1186 1187 sleepq_lock(&sx->lock_object); 1188 x = SX_READ_VALUE(sx); 1189 for (;;) { 1190 MPASS(x & SX_LOCK_EXCLUSIVE_WAITERS); 1191 MPASS(!(x & SX_LOCK_SHARED_WAITERS)); 1192 /* 1193 * Wake up semantic here is quite simple: 1194 * Just wake up all the exclusive waiters. 1195 * Note that the state of the lock could have changed, 1196 * so if it fails loop back and retry. 1197 */ 1198 setx = x - SX_ONE_SHARER; 1199 setx &= ~SX_LOCK_EXCLUSIVE_WAITERS; 1200 if (!atomic_fcmpset_rel_ptr(&sx->sx_lock, &x, setx)) 1201 continue; 1202 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 1203 CTR2(KTR_LOCK, "%s: %p waking up all thread on" 1204 "exclusive queue", __func__, sx); 1205 wakeup_swapper = sleepq_broadcast(&sx->lock_object, SLEEPQ_SX, 1206 0, SQ_EXCLUSIVE_QUEUE); 1207 break; 1208 } 1209 sleepq_release(&sx->lock_object); 1210 if (wakeup_swapper) 1211 kick_proc0(); 1212 out_lockstat: 1213 LOCKSTAT_PROFILE_RELEASE_RWLOCK(sx__release, sx, LOCKSTAT_READER); 1214 } 1215 1216 void 1217 _sx_sunlock_int(struct sx *sx LOCK_FILE_LINE_ARG_DEF) 1218 { 1219 uintptr_t x; 1220 1221 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED, 1222 ("sx_sunlock() of destroyed sx @ %s:%d", file, line)); 1223 _sx_assert(sx, SA_SLOCKED, file, line); 1224 WITNESS_UNLOCK(&sx->lock_object, 0, file, line); 1225 LOCK_LOG_LOCK("SUNLOCK", &sx->lock_object, 0, 0, file, line); 1226 1227 x = SX_READ_VALUE(sx); 1228 if (__predict_false(LOCKSTAT_OOL_PROFILE_ENABLED(sx__release) || 1229 !_sx_sunlock_try(sx, &x))) 1230 _sx_sunlock_hard(sx, x LOCK_FILE_LINE_ARG); 1231 1232 TD_LOCKS_DEC(curthread); 1233 } 1234 1235 void 1236 _sx_sunlock(struct sx *sx, const char *file, int line) 1237 { 1238 1239 _sx_sunlock_int(sx LOCK_FILE_LINE_ARG); 1240 } 1241 1242 #ifdef INVARIANT_SUPPORT 1243 #ifndef INVARIANTS 1244 #undef _sx_assert 1245 #endif 1246 1247 /* 1248 * In the non-WITNESS case, sx_assert() can only detect that at least 1249 * *some* thread owns an slock, but it cannot guarantee that *this* 1250 * thread owns an slock. 1251 */ 1252 void 1253 _sx_assert(const struct sx *sx, int what, const char *file, int line) 1254 { 1255 #ifndef WITNESS 1256 int slocked = 0; 1257 #endif 1258 1259 if (panicstr != NULL) 1260 return; 1261 switch (what) { 1262 case SA_SLOCKED: 1263 case SA_SLOCKED | SA_NOTRECURSED: 1264 case SA_SLOCKED | SA_RECURSED: 1265 #ifndef WITNESS 1266 slocked = 1; 1267 /* FALLTHROUGH */ 1268 #endif 1269 case SA_LOCKED: 1270 case SA_LOCKED | SA_NOTRECURSED: 1271 case SA_LOCKED | SA_RECURSED: 1272 #ifdef WITNESS 1273 witness_assert(&sx->lock_object, what, file, line); 1274 #else 1275 /* 1276 * If some other thread has an exclusive lock or we 1277 * have one and are asserting a shared lock, fail. 1278 * Also, if no one has a lock at all, fail. 1279 */ 1280 if (sx->sx_lock == SX_LOCK_UNLOCKED || 1281 (!(sx->sx_lock & SX_LOCK_SHARED) && (slocked || 1282 sx_xholder(sx) != curthread))) 1283 panic("Lock %s not %slocked @ %s:%d\n", 1284 sx->lock_object.lo_name, slocked ? "share " : "", 1285 file, line); 1286 1287 if (!(sx->sx_lock & SX_LOCK_SHARED)) { 1288 if (sx_recursed(sx)) { 1289 if (what & SA_NOTRECURSED) 1290 panic("Lock %s recursed @ %s:%d\n", 1291 sx->lock_object.lo_name, file, 1292 line); 1293 } else if (what & SA_RECURSED) 1294 panic("Lock %s not recursed @ %s:%d\n", 1295 sx->lock_object.lo_name, file, line); 1296 } 1297 #endif 1298 break; 1299 case SA_XLOCKED: 1300 case SA_XLOCKED | SA_NOTRECURSED: 1301 case SA_XLOCKED | SA_RECURSED: 1302 if (sx_xholder(sx) != curthread) 1303 panic("Lock %s not exclusively locked @ %s:%d\n", 1304 sx->lock_object.lo_name, file, line); 1305 if (sx_recursed(sx)) { 1306 if (what & SA_NOTRECURSED) 1307 panic("Lock %s recursed @ %s:%d\n", 1308 sx->lock_object.lo_name, file, line); 1309 } else if (what & SA_RECURSED) 1310 panic("Lock %s not recursed @ %s:%d\n", 1311 sx->lock_object.lo_name, file, line); 1312 break; 1313 case SA_UNLOCKED: 1314 #ifdef WITNESS 1315 witness_assert(&sx->lock_object, what, file, line); 1316 #else 1317 /* 1318 * If we hold an exclusve lock fail. We can't 1319 * reliably check to see if we hold a shared lock or 1320 * not. 1321 */ 1322 if (sx_xholder(sx) == curthread) 1323 panic("Lock %s exclusively locked @ %s:%d\n", 1324 sx->lock_object.lo_name, file, line); 1325 #endif 1326 break; 1327 default: 1328 panic("Unknown sx lock assertion: %d @ %s:%d", what, file, 1329 line); 1330 } 1331 } 1332 #endif /* INVARIANT_SUPPORT */ 1333 1334 #ifdef DDB 1335 static void 1336 db_show_sx(const struct lock_object *lock) 1337 { 1338 struct thread *td; 1339 const struct sx *sx; 1340 1341 sx = (const struct sx *)lock; 1342 1343 db_printf(" state: "); 1344 if (sx->sx_lock == SX_LOCK_UNLOCKED) 1345 db_printf("UNLOCKED\n"); 1346 else if (sx->sx_lock == SX_LOCK_DESTROYED) { 1347 db_printf("DESTROYED\n"); 1348 return; 1349 } else if (sx->sx_lock & SX_LOCK_SHARED) 1350 db_printf("SLOCK: %ju\n", (uintmax_t)SX_SHARERS(sx->sx_lock)); 1351 else { 1352 td = sx_xholder(sx); 1353 db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td, 1354 td->td_tid, td->td_proc->p_pid, td->td_name); 1355 if (sx_recursed(sx)) 1356 db_printf(" recursed: %d\n", sx->sx_recurse); 1357 } 1358 1359 db_printf(" waiters: "); 1360 switch(sx->sx_lock & 1361 (SX_LOCK_SHARED_WAITERS | SX_LOCK_EXCLUSIVE_WAITERS)) { 1362 case SX_LOCK_SHARED_WAITERS: 1363 db_printf("shared\n"); 1364 break; 1365 case SX_LOCK_EXCLUSIVE_WAITERS: 1366 db_printf("exclusive\n"); 1367 break; 1368 case SX_LOCK_SHARED_WAITERS | SX_LOCK_EXCLUSIVE_WAITERS: 1369 db_printf("exclusive and shared\n"); 1370 break; 1371 default: 1372 db_printf("none\n"); 1373 } 1374 } 1375 1376 /* 1377 * Check to see if a thread that is blocked on a sleep queue is actually 1378 * blocked on an sx lock. If so, output some details and return true. 1379 * If the lock has an exclusive owner, return that in *ownerp. 1380 */ 1381 int 1382 sx_chain(struct thread *td, struct thread **ownerp) 1383 { 1384 struct sx *sx; 1385 1386 /* 1387 * Check to see if this thread is blocked on an sx lock. 1388 * First, we check the lock class. If that is ok, then we 1389 * compare the lock name against the wait message. 1390 */ 1391 sx = td->td_wchan; 1392 if (LOCK_CLASS(&sx->lock_object) != &lock_class_sx || 1393 sx->lock_object.lo_name != td->td_wmesg) 1394 return (0); 1395 1396 /* We think we have an sx lock, so output some details. */ 1397 db_printf("blocked on sx \"%s\" ", td->td_wmesg); 1398 *ownerp = sx_xholder(sx); 1399 if (sx->sx_lock & SX_LOCK_SHARED) 1400 db_printf("SLOCK (count %ju)\n", 1401 (uintmax_t)SX_SHARERS(sx->sx_lock)); 1402 else 1403 db_printf("XLOCK\n"); 1404 return (1); 1405 } 1406 #endif 1407