1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2007 Attilio Rao <attilio@freebsd.org> 5 * Copyright (c) 2001 Jason Evans <jasone@freebsd.org> 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice(s), this list of conditions and the following disclaimer as 13 * the first lines of this file unmodified other than the possible 14 * addition of one or more copyright notices. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice(s), this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY 20 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 21 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 22 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY 23 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 24 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 25 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 26 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 29 * DAMAGE. 30 */ 31 32 /* 33 * Shared/exclusive locks. This implementation attempts to ensure 34 * deterministic lock granting behavior, so that slocks and xlocks are 35 * interleaved. 36 * 37 * Priority propagation will not generally raise the priority of lock holders, 38 * so should not be relied upon in combination with sx locks. 39 */ 40 41 #include "opt_ddb.h" 42 #include "opt_hwpmc_hooks.h" 43 #include "opt_no_adaptive_sx.h" 44 45 #include <sys/cdefs.h> 46 __FBSDID("$FreeBSD$"); 47 48 #include <sys/param.h> 49 #include <sys/systm.h> 50 #include <sys/kdb.h> 51 #include <sys/kernel.h> 52 #include <sys/ktr.h> 53 #include <sys/lock.h> 54 #include <sys/mutex.h> 55 #include <sys/proc.h> 56 #include <sys/sched.h> 57 #include <sys/sleepqueue.h> 58 #include <sys/sx.h> 59 #include <sys/smp.h> 60 #include <sys/sysctl.h> 61 62 #if defined(SMP) && !defined(NO_ADAPTIVE_SX) 63 #include <machine/cpu.h> 64 #endif 65 66 #ifdef DDB 67 #include <ddb/ddb.h> 68 #endif 69 70 #if defined(SMP) && !defined(NO_ADAPTIVE_SX) 71 #define ADAPTIVE_SX 72 #endif 73 74 #ifdef HWPMC_HOOKS 75 #include <sys/pmckern.h> 76 PMC_SOFT_DECLARE( , , lock, failed); 77 #endif 78 79 /* Handy macros for sleep queues. */ 80 #define SQ_EXCLUSIVE_QUEUE 0 81 #define SQ_SHARED_QUEUE 1 82 83 /* 84 * Variations on DROP_GIANT()/PICKUP_GIANT() for use in this file. We 85 * drop Giant anytime we have to sleep or if we adaptively spin. 86 */ 87 #define GIANT_DECLARE \ 88 int _giantcnt = 0; \ 89 WITNESS_SAVE_DECL(Giant) \ 90 91 #define GIANT_SAVE(work) do { \ 92 if (__predict_false(mtx_owned(&Giant))) { \ 93 work++; \ 94 WITNESS_SAVE(&Giant.lock_object, Giant); \ 95 while (mtx_owned(&Giant)) { \ 96 _giantcnt++; \ 97 mtx_unlock(&Giant); \ 98 } \ 99 } \ 100 } while (0) 101 102 #define GIANT_RESTORE() do { \ 103 if (_giantcnt > 0) { \ 104 mtx_assert(&Giant, MA_NOTOWNED); \ 105 while (_giantcnt--) \ 106 mtx_lock(&Giant); \ 107 WITNESS_RESTORE(&Giant.lock_object, Giant); \ 108 } \ 109 } while (0) 110 111 /* 112 * Returns true if an exclusive lock is recursed. It assumes 113 * curthread currently has an exclusive lock. 114 */ 115 #define sx_recursed(sx) ((sx)->sx_recurse != 0) 116 117 static void assert_sx(const struct lock_object *lock, int what); 118 #ifdef DDB 119 static void db_show_sx(const struct lock_object *lock); 120 #endif 121 static void lock_sx(struct lock_object *lock, uintptr_t how); 122 #ifdef KDTRACE_HOOKS 123 static int owner_sx(const struct lock_object *lock, struct thread **owner); 124 #endif 125 static uintptr_t unlock_sx(struct lock_object *lock); 126 127 struct lock_class lock_class_sx = { 128 .lc_name = "sx", 129 .lc_flags = LC_SLEEPLOCK | LC_SLEEPABLE | LC_RECURSABLE | LC_UPGRADABLE, 130 .lc_assert = assert_sx, 131 #ifdef DDB 132 .lc_ddb_show = db_show_sx, 133 #endif 134 .lc_lock = lock_sx, 135 .lc_unlock = unlock_sx, 136 #ifdef KDTRACE_HOOKS 137 .lc_owner = owner_sx, 138 #endif 139 }; 140 141 #ifndef INVARIANTS 142 #define _sx_assert(sx, what, file, line) 143 #endif 144 145 #ifdef ADAPTIVE_SX 146 static __read_frequently u_int asx_retries; 147 static __read_frequently u_int asx_loops; 148 static SYSCTL_NODE(_debug, OID_AUTO, sx, CTLFLAG_RD, NULL, "sxlock debugging"); 149 SYSCTL_UINT(_debug_sx, OID_AUTO, retries, CTLFLAG_RW, &asx_retries, 0, ""); 150 SYSCTL_UINT(_debug_sx, OID_AUTO, loops, CTLFLAG_RW, &asx_loops, 0, ""); 151 152 static struct lock_delay_config __read_frequently sx_delay; 153 154 SYSCTL_INT(_debug_sx, OID_AUTO, delay_base, CTLFLAG_RW, &sx_delay.base, 155 0, ""); 156 SYSCTL_INT(_debug_sx, OID_AUTO, delay_max, CTLFLAG_RW, &sx_delay.max, 157 0, ""); 158 159 static void 160 sx_lock_delay_init(void *arg __unused) 161 { 162 163 lock_delay_default_init(&sx_delay); 164 asx_retries = 10; 165 asx_loops = max(10000, sx_delay.max); 166 } 167 LOCK_DELAY_SYSINIT(sx_lock_delay_init); 168 #endif 169 170 void 171 assert_sx(const struct lock_object *lock, int what) 172 { 173 174 sx_assert((const struct sx *)lock, what); 175 } 176 177 void 178 lock_sx(struct lock_object *lock, uintptr_t how) 179 { 180 struct sx *sx; 181 182 sx = (struct sx *)lock; 183 if (how) 184 sx_slock(sx); 185 else 186 sx_xlock(sx); 187 } 188 189 uintptr_t 190 unlock_sx(struct lock_object *lock) 191 { 192 struct sx *sx; 193 194 sx = (struct sx *)lock; 195 sx_assert(sx, SA_LOCKED | SA_NOTRECURSED); 196 if (sx_xlocked(sx)) { 197 sx_xunlock(sx); 198 return (0); 199 } else { 200 sx_sunlock(sx); 201 return (1); 202 } 203 } 204 205 #ifdef KDTRACE_HOOKS 206 int 207 owner_sx(const struct lock_object *lock, struct thread **owner) 208 { 209 const struct sx *sx; 210 uintptr_t x; 211 212 sx = (const struct sx *)lock; 213 x = sx->sx_lock; 214 *owner = NULL; 215 return ((x & SX_LOCK_SHARED) != 0 ? (SX_SHARERS(x) != 0) : 216 ((*owner = (struct thread *)SX_OWNER(x)) != NULL)); 217 } 218 #endif 219 220 void 221 sx_sysinit(void *arg) 222 { 223 struct sx_args *sargs = arg; 224 225 sx_init_flags(sargs->sa_sx, sargs->sa_desc, sargs->sa_flags); 226 } 227 228 void 229 sx_init_flags(struct sx *sx, const char *description, int opts) 230 { 231 int flags; 232 233 MPASS((opts & ~(SX_QUIET | SX_RECURSE | SX_NOWITNESS | SX_DUPOK | 234 SX_NOPROFILE | SX_NEW)) == 0); 235 ASSERT_ATOMIC_LOAD_PTR(sx->sx_lock, 236 ("%s: sx_lock not aligned for %s: %p", __func__, description, 237 &sx->sx_lock)); 238 239 flags = LO_SLEEPABLE | LO_UPGRADABLE; 240 if (opts & SX_DUPOK) 241 flags |= LO_DUPOK; 242 if (opts & SX_NOPROFILE) 243 flags |= LO_NOPROFILE; 244 if (!(opts & SX_NOWITNESS)) 245 flags |= LO_WITNESS; 246 if (opts & SX_RECURSE) 247 flags |= LO_RECURSABLE; 248 if (opts & SX_QUIET) 249 flags |= LO_QUIET; 250 if (opts & SX_NEW) 251 flags |= LO_NEW; 252 253 lock_init(&sx->lock_object, &lock_class_sx, description, NULL, flags); 254 sx->sx_lock = SX_LOCK_UNLOCKED; 255 sx->sx_recurse = 0; 256 } 257 258 void 259 sx_destroy(struct sx *sx) 260 { 261 262 KASSERT(sx->sx_lock == SX_LOCK_UNLOCKED, ("sx lock still held")); 263 KASSERT(sx->sx_recurse == 0, ("sx lock still recursed")); 264 sx->sx_lock = SX_LOCK_DESTROYED; 265 lock_destroy(&sx->lock_object); 266 } 267 268 int 269 sx_try_slock_int(struct sx *sx LOCK_FILE_LINE_ARG_DEF) 270 { 271 uintptr_t x; 272 273 if (SCHEDULER_STOPPED()) 274 return (1); 275 276 KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread), 277 ("sx_try_slock() by idle thread %p on sx %s @ %s:%d", 278 curthread, sx->lock_object.lo_name, file, line)); 279 280 x = sx->sx_lock; 281 for (;;) { 282 KASSERT(x != SX_LOCK_DESTROYED, 283 ("sx_try_slock() of destroyed sx @ %s:%d", file, line)); 284 if (!(x & SX_LOCK_SHARED)) 285 break; 286 if (atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, x + SX_ONE_SHARER)) { 287 LOCK_LOG_TRY("SLOCK", &sx->lock_object, 0, 1, file, line); 288 WITNESS_LOCK(&sx->lock_object, LOP_TRYLOCK, file, line); 289 LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire, 290 sx, 0, 0, file, line, LOCKSTAT_READER); 291 TD_LOCKS_INC(curthread); 292 curthread->td_sx_slocks++; 293 return (1); 294 } 295 } 296 297 LOCK_LOG_TRY("SLOCK", &sx->lock_object, 0, 0, file, line); 298 return (0); 299 } 300 301 int 302 sx_try_slock_(struct sx *sx, const char *file, int line) 303 { 304 305 return (sx_try_slock_int(sx LOCK_FILE_LINE_ARG)); 306 } 307 308 int 309 _sx_xlock(struct sx *sx, int opts, const char *file, int line) 310 { 311 uintptr_t tid, x; 312 int error = 0; 313 314 KASSERT(kdb_active != 0 || SCHEDULER_STOPPED() || 315 !TD_IS_IDLETHREAD(curthread), 316 ("sx_xlock() by idle thread %p on sx %s @ %s:%d", 317 curthread, sx->lock_object.lo_name, file, line)); 318 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED, 319 ("sx_xlock() of destroyed sx @ %s:%d", file, line)); 320 WITNESS_CHECKORDER(&sx->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE, file, 321 line, NULL); 322 tid = (uintptr_t)curthread; 323 x = SX_LOCK_UNLOCKED; 324 if (!atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, tid)) 325 error = _sx_xlock_hard(sx, x, opts LOCK_FILE_LINE_ARG); 326 else 327 LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire, sx, 328 0, 0, file, line, LOCKSTAT_WRITER); 329 if (!error) { 330 LOCK_LOG_LOCK("XLOCK", &sx->lock_object, 0, sx->sx_recurse, 331 file, line); 332 WITNESS_LOCK(&sx->lock_object, LOP_EXCLUSIVE, file, line); 333 TD_LOCKS_INC(curthread); 334 } 335 336 return (error); 337 } 338 339 int 340 sx_try_xlock_int(struct sx *sx LOCK_FILE_LINE_ARG_DEF) 341 { 342 struct thread *td; 343 uintptr_t tid, x; 344 int rval; 345 bool recursed; 346 347 td = curthread; 348 tid = (uintptr_t)td; 349 if (SCHEDULER_STOPPED_TD(td)) 350 return (1); 351 352 KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(td), 353 ("sx_try_xlock() by idle thread %p on sx %s @ %s:%d", 354 curthread, sx->lock_object.lo_name, file, line)); 355 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED, 356 ("sx_try_xlock() of destroyed sx @ %s:%d", file, line)); 357 358 rval = 1; 359 recursed = false; 360 x = SX_LOCK_UNLOCKED; 361 for (;;) { 362 if (atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, tid)) 363 break; 364 if (x == SX_LOCK_UNLOCKED) 365 continue; 366 if (x == tid && (sx->lock_object.lo_flags & LO_RECURSABLE)) { 367 sx->sx_recurse++; 368 atomic_set_ptr(&sx->sx_lock, SX_LOCK_RECURSED); 369 break; 370 } 371 rval = 0; 372 break; 373 } 374 375 LOCK_LOG_TRY("XLOCK", &sx->lock_object, 0, rval, file, line); 376 if (rval) { 377 WITNESS_LOCK(&sx->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK, 378 file, line); 379 if (!recursed) 380 LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire, 381 sx, 0, 0, file, line, LOCKSTAT_WRITER); 382 TD_LOCKS_INC(curthread); 383 } 384 385 return (rval); 386 } 387 388 int 389 sx_try_xlock_(struct sx *sx, const char *file, int line) 390 { 391 392 return (sx_try_xlock_int(sx LOCK_FILE_LINE_ARG)); 393 } 394 395 void 396 _sx_xunlock(struct sx *sx, const char *file, int line) 397 { 398 399 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED, 400 ("sx_xunlock() of destroyed sx @ %s:%d", file, line)); 401 _sx_assert(sx, SA_XLOCKED, file, line); 402 WITNESS_UNLOCK(&sx->lock_object, LOP_EXCLUSIVE, file, line); 403 LOCK_LOG_LOCK("XUNLOCK", &sx->lock_object, 0, sx->sx_recurse, file, 404 line); 405 #if LOCK_DEBUG > 0 406 _sx_xunlock_hard(sx, (uintptr_t)curthread, file, line); 407 #else 408 __sx_xunlock(sx, curthread, file, line); 409 #endif 410 TD_LOCKS_DEC(curthread); 411 } 412 413 /* 414 * Try to do a non-blocking upgrade from a shared lock to an exclusive lock. 415 * This will only succeed if this thread holds a single shared lock. 416 * Return 1 if if the upgrade succeed, 0 otherwise. 417 */ 418 int 419 sx_try_upgrade_int(struct sx *sx LOCK_FILE_LINE_ARG_DEF) 420 { 421 uintptr_t x; 422 uintptr_t waiters; 423 int success; 424 425 if (SCHEDULER_STOPPED()) 426 return (1); 427 428 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED, 429 ("sx_try_upgrade() of destroyed sx @ %s:%d", file, line)); 430 _sx_assert(sx, SA_SLOCKED, file, line); 431 432 /* 433 * Try to switch from one shared lock to an exclusive lock. We need 434 * to maintain the SX_LOCK_EXCLUSIVE_WAITERS flag if set so that 435 * we will wake up the exclusive waiters when we drop the lock. 436 */ 437 success = 0; 438 x = SX_READ_VALUE(sx); 439 for (;;) { 440 if (SX_SHARERS(x) > 1) 441 break; 442 waiters = (x & SX_LOCK_WAITERS); 443 if (atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, 444 (uintptr_t)curthread | waiters)) { 445 success = 1; 446 break; 447 } 448 } 449 LOCK_LOG_TRY("XUPGRADE", &sx->lock_object, 0, success, file, line); 450 if (success) { 451 curthread->td_sx_slocks--; 452 WITNESS_UPGRADE(&sx->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK, 453 file, line); 454 LOCKSTAT_RECORD0(sx__upgrade, sx); 455 } 456 return (success); 457 } 458 459 int 460 sx_try_upgrade_(struct sx *sx, const char *file, int line) 461 { 462 463 return (sx_try_upgrade_int(sx LOCK_FILE_LINE_ARG)); 464 } 465 466 /* 467 * Downgrade an unrecursed exclusive lock into a single shared lock. 468 */ 469 void 470 sx_downgrade_int(struct sx *sx LOCK_FILE_LINE_ARG_DEF) 471 { 472 uintptr_t x; 473 int wakeup_swapper; 474 475 if (SCHEDULER_STOPPED()) 476 return; 477 478 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED, 479 ("sx_downgrade() of destroyed sx @ %s:%d", file, line)); 480 _sx_assert(sx, SA_XLOCKED | SA_NOTRECURSED, file, line); 481 #ifndef INVARIANTS 482 if (sx_recursed(sx)) 483 panic("downgrade of a recursed lock"); 484 #endif 485 486 WITNESS_DOWNGRADE(&sx->lock_object, 0, file, line); 487 488 /* 489 * Try to switch from an exclusive lock with no shared waiters 490 * to one sharer with no shared waiters. If there are 491 * exclusive waiters, we don't need to lock the sleep queue so 492 * long as we preserve the flag. We do one quick try and if 493 * that fails we grab the sleepq lock to keep the flags from 494 * changing and do it the slow way. 495 * 496 * We have to lock the sleep queue if there are shared waiters 497 * so we can wake them up. 498 */ 499 x = sx->sx_lock; 500 if (!(x & SX_LOCK_SHARED_WAITERS) && 501 atomic_cmpset_rel_ptr(&sx->sx_lock, x, SX_SHARERS_LOCK(1) | 502 (x & SX_LOCK_EXCLUSIVE_WAITERS))) 503 goto out; 504 505 /* 506 * Lock the sleep queue so we can read the waiters bits 507 * without any races and wakeup any shared waiters. 508 */ 509 sleepq_lock(&sx->lock_object); 510 511 /* 512 * Preserve SX_LOCK_EXCLUSIVE_WAITERS while downgraded to a single 513 * shared lock. If there are any shared waiters, wake them up. 514 */ 515 wakeup_swapper = 0; 516 x = sx->sx_lock; 517 atomic_store_rel_ptr(&sx->sx_lock, SX_SHARERS_LOCK(1) | 518 (x & SX_LOCK_EXCLUSIVE_WAITERS)); 519 if (x & SX_LOCK_SHARED_WAITERS) 520 wakeup_swapper = sleepq_broadcast(&sx->lock_object, SLEEPQ_SX, 521 0, SQ_SHARED_QUEUE); 522 sleepq_release(&sx->lock_object); 523 524 if (wakeup_swapper) 525 kick_proc0(); 526 527 out: 528 curthread->td_sx_slocks++; 529 LOCK_LOG_LOCK("XDOWNGRADE", &sx->lock_object, 0, 0, file, line); 530 LOCKSTAT_RECORD0(sx__downgrade, sx); 531 } 532 533 void 534 sx_downgrade_(struct sx *sx, const char *file, int line) 535 { 536 537 sx_downgrade_int(sx LOCK_FILE_LINE_ARG); 538 } 539 540 #ifdef ADAPTIVE_SX 541 static inline void 542 sx_drop_critical(uintptr_t x, bool *in_critical, int *extra_work) 543 { 544 545 if (x & SX_LOCK_WRITE_SPINNER) 546 return; 547 if (*in_critical) { 548 critical_exit(); 549 *in_critical = false; 550 (*extra_work)--; 551 } 552 } 553 #else 554 #define sx_drop_critical(x, in_critical, extra_work) do { } while(0) 555 #endif 556 557 /* 558 * This function represents the so-called 'hard case' for sx_xlock 559 * operation. All 'easy case' failures are redirected to this. Note 560 * that ideally this would be a static function, but it needs to be 561 * accessible from at least sx.h. 562 */ 563 int 564 _sx_xlock_hard(struct sx *sx, uintptr_t x, int opts LOCK_FILE_LINE_ARG_DEF) 565 { 566 GIANT_DECLARE; 567 uintptr_t tid, setx; 568 #ifdef ADAPTIVE_SX 569 volatile struct thread *owner; 570 u_int i, n, spintries = 0; 571 enum { READERS, WRITER } sleep_reason = READERS; 572 bool in_critical = false; 573 #endif 574 #ifdef LOCK_PROFILING 575 uint64_t waittime = 0; 576 int contested = 0; 577 #endif 578 int error = 0; 579 #if defined(ADAPTIVE_SX) || defined(KDTRACE_HOOKS) 580 struct lock_delay_arg lda; 581 #endif 582 #ifdef KDTRACE_HOOKS 583 u_int sleep_cnt = 0; 584 int64_t sleep_time = 0; 585 int64_t all_time = 0; 586 #endif 587 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING) 588 uintptr_t state = 0; 589 int doing_lockprof = 0; 590 #endif 591 int extra_work = 0; 592 593 tid = (uintptr_t)curthread; 594 595 #ifdef KDTRACE_HOOKS 596 if (LOCKSTAT_PROFILE_ENABLED(sx__acquire)) { 597 while (x == SX_LOCK_UNLOCKED) { 598 if (atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, tid)) 599 goto out_lockstat; 600 } 601 extra_work = 1; 602 doing_lockprof = 1; 603 all_time -= lockstat_nsecs(&sx->lock_object); 604 state = x; 605 } 606 #endif 607 #ifdef LOCK_PROFILING 608 extra_work = 1; 609 doing_lockprof = 1; 610 state = x; 611 #endif 612 613 if (SCHEDULER_STOPPED()) 614 return (0); 615 616 #if defined(ADAPTIVE_SX) 617 lock_delay_arg_init(&lda, &sx_delay); 618 #elif defined(KDTRACE_HOOKS) 619 lock_delay_arg_init(&lda, NULL); 620 #endif 621 622 if (__predict_false(x == SX_LOCK_UNLOCKED)) 623 x = SX_READ_VALUE(sx); 624 625 /* If we already hold an exclusive lock, then recurse. */ 626 if (__predict_false(lv_sx_owner(x) == (struct thread *)tid)) { 627 KASSERT((sx->lock_object.lo_flags & LO_RECURSABLE) != 0, 628 ("_sx_xlock_hard: recursed on non-recursive sx %s @ %s:%d\n", 629 sx->lock_object.lo_name, file, line)); 630 sx->sx_recurse++; 631 atomic_set_ptr(&sx->sx_lock, SX_LOCK_RECURSED); 632 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 633 CTR2(KTR_LOCK, "%s: %p recursing", __func__, sx); 634 return (0); 635 } 636 637 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 638 CTR5(KTR_LOCK, "%s: %s contested (lock=%p) at %s:%d", __func__, 639 sx->lock_object.lo_name, (void *)sx->sx_lock, file, line); 640 641 #ifdef HWPMC_HOOKS 642 PMC_SOFT_CALL( , , lock, failed); 643 #endif 644 lock_profile_obtain_lock_failed(&sx->lock_object, &contested, 645 &waittime); 646 647 #ifndef INVARIANTS 648 GIANT_SAVE(extra_work); 649 #endif 650 651 for (;;) { 652 if (x == SX_LOCK_UNLOCKED) { 653 if (atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, tid)) 654 break; 655 continue; 656 } 657 #ifdef INVARIANTS 658 GIANT_SAVE(extra_work); 659 #endif 660 #ifdef KDTRACE_HOOKS 661 lda.spin_cnt++; 662 #endif 663 #ifdef ADAPTIVE_SX 664 /* 665 * If the lock is write locked and the owner is 666 * running on another CPU, spin until the owner stops 667 * running or the state of the lock changes. 668 */ 669 if ((x & SX_LOCK_SHARED) == 0) { 670 sx_drop_critical(x, &in_critical, &extra_work); 671 sleep_reason = WRITER; 672 owner = lv_sx_owner(x); 673 if (!TD_IS_RUNNING(owner)) 674 goto sleepq; 675 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 676 CTR3(KTR_LOCK, "%s: spinning on %p held by %p", 677 __func__, sx, owner); 678 KTR_STATE1(KTR_SCHED, "thread", sched_tdname(curthread), 679 "spinning", "lockname:\"%s\"", 680 sx->lock_object.lo_name); 681 do { 682 lock_delay(&lda); 683 x = SX_READ_VALUE(sx); 684 owner = lv_sx_owner(x); 685 } while (owner != NULL && TD_IS_RUNNING(owner)); 686 KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread), 687 "running"); 688 continue; 689 } else if (SX_SHARERS(x) > 0) { 690 sleep_reason = READERS; 691 if (spintries == asx_retries) 692 goto sleepq; 693 if (!(x & SX_LOCK_WRITE_SPINNER)) { 694 if (!in_critical) { 695 critical_enter(); 696 in_critical = true; 697 extra_work++; 698 } 699 if (!atomic_fcmpset_ptr(&sx->sx_lock, &x, 700 x | SX_LOCK_WRITE_SPINNER)) { 701 critical_exit(); 702 in_critical = false; 703 extra_work--; 704 continue; 705 } 706 } 707 spintries++; 708 KTR_STATE1(KTR_SCHED, "thread", sched_tdname(curthread), 709 "spinning", "lockname:\"%s\"", 710 sx->lock_object.lo_name); 711 n = SX_SHARERS(x); 712 for (i = 0; i < asx_loops; i += n) { 713 lock_delay_spin(n); 714 x = SX_READ_VALUE(sx); 715 if (!(x & SX_LOCK_WRITE_SPINNER)) 716 break; 717 if (!(x & SX_LOCK_SHARED)) 718 break; 719 n = SX_SHARERS(x); 720 if (n == 0) 721 break; 722 } 723 #ifdef KDTRACE_HOOKS 724 lda.spin_cnt += i; 725 #endif 726 KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread), 727 "running"); 728 if (i < asx_loops) 729 continue; 730 } 731 sleepq: 732 #endif 733 sleepq_lock(&sx->lock_object); 734 x = SX_READ_VALUE(sx); 735 retry_sleepq: 736 737 /* 738 * If the lock was released while spinning on the 739 * sleep queue chain lock, try again. 740 */ 741 if (x == SX_LOCK_UNLOCKED) { 742 sleepq_release(&sx->lock_object); 743 sx_drop_critical(x, &in_critical, &extra_work); 744 continue; 745 } 746 747 #ifdef ADAPTIVE_SX 748 /* 749 * The current lock owner might have started executing 750 * on another CPU (or the lock could have changed 751 * owners) while we were waiting on the sleep queue 752 * chain lock. If so, drop the sleep queue lock and try 753 * again. 754 */ 755 if (!(x & SX_LOCK_SHARED)) { 756 owner = (struct thread *)SX_OWNER(x); 757 if (TD_IS_RUNNING(owner)) { 758 sleepq_release(&sx->lock_object); 759 sx_drop_critical(x, &in_critical, 760 &extra_work); 761 continue; 762 } 763 } else if (SX_SHARERS(x) > 0 && sleep_reason == WRITER) { 764 sleepq_release(&sx->lock_object); 765 sx_drop_critical(x, &in_critical, &extra_work); 766 continue; 767 } 768 #endif 769 770 /* 771 * If an exclusive lock was released with both shared 772 * and exclusive waiters and a shared waiter hasn't 773 * woken up and acquired the lock yet, sx_lock will be 774 * set to SX_LOCK_UNLOCKED | SX_LOCK_EXCLUSIVE_WAITERS. 775 * If we see that value, try to acquire it once. Note 776 * that we have to preserve SX_LOCK_EXCLUSIVE_WAITERS 777 * as there are other exclusive waiters still. If we 778 * fail, restart the loop. 779 */ 780 setx = x & (SX_LOCK_WAITERS | SX_LOCK_WRITE_SPINNER); 781 if ((x & ~setx) == SX_LOCK_SHARED) { 782 setx &= ~SX_LOCK_WRITE_SPINNER; 783 if (!atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, tid | setx)) 784 goto retry_sleepq; 785 sleepq_release(&sx->lock_object); 786 CTR2(KTR_LOCK, "%s: %p claimed by new writer", 787 __func__, sx); 788 break; 789 } 790 791 #ifdef ADAPTIVE_SX 792 /* 793 * It is possible we set the SX_LOCK_WRITE_SPINNER bit. 794 * It is an invariant that when the bit is set, there is 795 * a writer ready to grab the lock. Thus clear the bit since 796 * we are going to sleep. 797 */ 798 if (in_critical) { 799 if ((x & SX_LOCK_WRITE_SPINNER) || 800 !((x & SX_LOCK_EXCLUSIVE_WAITERS))) { 801 setx = x & ~SX_LOCK_WRITE_SPINNER; 802 setx |= SX_LOCK_EXCLUSIVE_WAITERS; 803 if (!atomic_fcmpset_ptr(&sx->sx_lock, &x, 804 setx)) { 805 goto retry_sleepq; 806 } 807 } 808 critical_exit(); 809 in_critical = false; 810 } else { 811 #endif 812 /* 813 * Try to set the SX_LOCK_EXCLUSIVE_WAITERS. If we fail, 814 * than loop back and retry. 815 */ 816 if (!(x & SX_LOCK_EXCLUSIVE_WAITERS)) { 817 if (!atomic_fcmpset_ptr(&sx->sx_lock, &x, 818 x | SX_LOCK_EXCLUSIVE_WAITERS)) { 819 goto retry_sleepq; 820 } 821 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 822 CTR2(KTR_LOCK, "%s: %p set excl waiters flag", 823 __func__, sx); 824 } 825 #ifdef ADAPTIVE_SX 826 } 827 #endif 828 829 /* 830 * Since we have been unable to acquire the exclusive 831 * lock and the exclusive waiters flag is set, we have 832 * to sleep. 833 */ 834 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 835 CTR2(KTR_LOCK, "%s: %p blocking on sleep queue", 836 __func__, sx); 837 838 #ifdef KDTRACE_HOOKS 839 sleep_time -= lockstat_nsecs(&sx->lock_object); 840 #endif 841 sleepq_add(&sx->lock_object, NULL, sx->lock_object.lo_name, 842 SLEEPQ_SX | ((opts & SX_INTERRUPTIBLE) ? 843 SLEEPQ_INTERRUPTIBLE : 0), SQ_EXCLUSIVE_QUEUE); 844 if (!(opts & SX_INTERRUPTIBLE)) 845 sleepq_wait(&sx->lock_object, 0); 846 else 847 error = sleepq_wait_sig(&sx->lock_object, 0); 848 #ifdef KDTRACE_HOOKS 849 sleep_time += lockstat_nsecs(&sx->lock_object); 850 sleep_cnt++; 851 #endif 852 if (error) { 853 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 854 CTR2(KTR_LOCK, 855 "%s: interruptible sleep by %p suspended by signal", 856 __func__, sx); 857 break; 858 } 859 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 860 CTR2(KTR_LOCK, "%s: %p resuming from sleep queue", 861 __func__, sx); 862 x = SX_READ_VALUE(sx); 863 } 864 if (__predict_true(!extra_work)) 865 return (error); 866 #ifdef ADAPTIVE_SX 867 if (in_critical) 868 critical_exit(); 869 #endif 870 GIANT_RESTORE(); 871 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING) 872 if (__predict_true(!doing_lockprof)) 873 return (error); 874 #endif 875 #ifdef KDTRACE_HOOKS 876 all_time += lockstat_nsecs(&sx->lock_object); 877 if (sleep_time) 878 LOCKSTAT_RECORD4(sx__block, sx, sleep_time, 879 LOCKSTAT_WRITER, (state & SX_LOCK_SHARED) == 0, 880 (state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state)); 881 if (lda.spin_cnt > sleep_cnt) 882 LOCKSTAT_RECORD4(sx__spin, sx, all_time - sleep_time, 883 LOCKSTAT_WRITER, (state & SX_LOCK_SHARED) == 0, 884 (state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state)); 885 out_lockstat: 886 #endif 887 if (!error) 888 LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire, sx, 889 contested, waittime, file, line, LOCKSTAT_WRITER); 890 return (error); 891 } 892 893 /* 894 * This function represents the so-called 'hard case' for sx_xunlock 895 * operation. All 'easy case' failures are redirected to this. Note 896 * that ideally this would be a static function, but it needs to be 897 * accessible from at least sx.h. 898 */ 899 void 900 _sx_xunlock_hard(struct sx *sx, uintptr_t x LOCK_FILE_LINE_ARG_DEF) 901 { 902 uintptr_t tid, setx; 903 int queue, wakeup_swapper; 904 905 if (SCHEDULER_STOPPED()) 906 return; 907 908 tid = (uintptr_t)curthread; 909 910 if (__predict_false(x == tid)) 911 x = SX_READ_VALUE(sx); 912 913 MPASS(!(x & SX_LOCK_SHARED)); 914 915 if (__predict_false(x & SX_LOCK_RECURSED)) { 916 /* The lock is recursed, unrecurse one level. */ 917 if ((--sx->sx_recurse) == 0) 918 atomic_clear_ptr(&sx->sx_lock, SX_LOCK_RECURSED); 919 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 920 CTR2(KTR_LOCK, "%s: %p unrecursing", __func__, sx); 921 return; 922 } 923 924 LOCKSTAT_PROFILE_RELEASE_RWLOCK(sx__release, sx, LOCKSTAT_WRITER); 925 if (x == tid && 926 atomic_cmpset_rel_ptr(&sx->sx_lock, tid, SX_LOCK_UNLOCKED)) 927 return; 928 929 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 930 CTR2(KTR_LOCK, "%s: %p contested", __func__, sx); 931 932 sleepq_lock(&sx->lock_object); 933 x = SX_READ_VALUE(sx); 934 MPASS(x & (SX_LOCK_SHARED_WAITERS | SX_LOCK_EXCLUSIVE_WAITERS)); 935 936 /* 937 * The wake up algorithm here is quite simple and probably not 938 * ideal. It gives precedence to shared waiters if they are 939 * present. For this condition, we have to preserve the 940 * state of the exclusive waiters flag. 941 * If interruptible sleeps left the shared queue empty avoid a 942 * starvation for the threads sleeping on the exclusive queue by giving 943 * them precedence and cleaning up the shared waiters bit anyway. 944 */ 945 setx = SX_LOCK_UNLOCKED; 946 queue = SQ_SHARED_QUEUE; 947 if ((x & SX_LOCK_EXCLUSIVE_WAITERS) != 0 && 948 sleepq_sleepcnt(&sx->lock_object, SQ_EXCLUSIVE_QUEUE) != 0) { 949 queue = SQ_EXCLUSIVE_QUEUE; 950 setx |= (x & SX_LOCK_SHARED_WAITERS); 951 } 952 atomic_store_rel_ptr(&sx->sx_lock, setx); 953 954 /* Wake up all the waiters for the specific queue. */ 955 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 956 CTR3(KTR_LOCK, "%s: %p waking up all threads on %s queue", 957 __func__, sx, queue == SQ_SHARED_QUEUE ? "shared" : 958 "exclusive"); 959 960 wakeup_swapper = sleepq_broadcast(&sx->lock_object, SLEEPQ_SX, 0, 961 queue); 962 sleepq_release(&sx->lock_object); 963 if (wakeup_swapper) 964 kick_proc0(); 965 } 966 967 static bool __always_inline 968 __sx_can_read(struct thread *td, uintptr_t x, bool fp) 969 { 970 971 if ((x & (SX_LOCK_SHARED | SX_LOCK_EXCLUSIVE_WAITERS | SX_LOCK_WRITE_SPINNER)) 972 == SX_LOCK_SHARED) 973 return (true); 974 if (!fp && td->td_sx_slocks && (x & SX_LOCK_SHARED)) 975 return (true); 976 return (false); 977 } 978 979 static bool __always_inline 980 __sx_slock_try(struct sx *sx, struct thread *td, uintptr_t *xp, bool fp 981 LOCK_FILE_LINE_ARG_DEF) 982 { 983 984 /* 985 * If no other thread has an exclusive lock then try to bump up 986 * the count of sharers. Since we have to preserve the state 987 * of SX_LOCK_EXCLUSIVE_WAITERS, if we fail to acquire the 988 * shared lock loop back and retry. 989 */ 990 while (__sx_can_read(td, *xp, fp)) { 991 if (atomic_fcmpset_acq_ptr(&sx->sx_lock, xp, 992 *xp + SX_ONE_SHARER)) { 993 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 994 CTR4(KTR_LOCK, "%s: %p succeed %p -> %p", 995 __func__, sx, (void *)*xp, 996 (void *)(*xp + SX_ONE_SHARER)); 997 td->td_sx_slocks++; 998 return (true); 999 } 1000 } 1001 return (false); 1002 } 1003 1004 static int __noinline 1005 _sx_slock_hard(struct sx *sx, int opts, uintptr_t x LOCK_FILE_LINE_ARG_DEF) 1006 { 1007 GIANT_DECLARE; 1008 struct thread *td; 1009 #ifdef ADAPTIVE_SX 1010 volatile struct thread *owner; 1011 u_int i, n, spintries = 0; 1012 #endif 1013 #ifdef LOCK_PROFILING 1014 uint64_t waittime = 0; 1015 int contested = 0; 1016 #endif 1017 int error = 0; 1018 #if defined(ADAPTIVE_SX) || defined(KDTRACE_HOOKS) 1019 struct lock_delay_arg lda; 1020 #endif 1021 #ifdef KDTRACE_HOOKS 1022 u_int sleep_cnt = 0; 1023 int64_t sleep_time = 0; 1024 int64_t all_time = 0; 1025 #endif 1026 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING) 1027 uintptr_t state = 0; 1028 #endif 1029 int extra_work = 0; 1030 1031 td = curthread; 1032 1033 #ifdef KDTRACE_HOOKS 1034 if (LOCKSTAT_PROFILE_ENABLED(sx__acquire)) { 1035 if (__sx_slock_try(sx, td, &x, false LOCK_FILE_LINE_ARG)) 1036 goto out_lockstat; 1037 extra_work = 1; 1038 all_time -= lockstat_nsecs(&sx->lock_object); 1039 state = x; 1040 } 1041 #endif 1042 #ifdef LOCK_PROFILING 1043 extra_work = 1; 1044 state = x; 1045 #endif 1046 1047 if (SCHEDULER_STOPPED()) 1048 return (0); 1049 1050 #if defined(ADAPTIVE_SX) 1051 lock_delay_arg_init(&lda, &sx_delay); 1052 #elif defined(KDTRACE_HOOKS) 1053 lock_delay_arg_init(&lda, NULL); 1054 #endif 1055 1056 #ifdef HWPMC_HOOKS 1057 PMC_SOFT_CALL( , , lock, failed); 1058 #endif 1059 lock_profile_obtain_lock_failed(&sx->lock_object, &contested, 1060 &waittime); 1061 1062 #ifndef INVARIANTS 1063 GIANT_SAVE(extra_work); 1064 #endif 1065 1066 /* 1067 * As with rwlocks, we don't make any attempt to try to block 1068 * shared locks once there is an exclusive waiter. 1069 */ 1070 for (;;) { 1071 if (__sx_slock_try(sx, td, &x, false LOCK_FILE_LINE_ARG)) 1072 break; 1073 #ifdef INVARIANTS 1074 GIANT_SAVE(extra_work); 1075 #endif 1076 #ifdef KDTRACE_HOOKS 1077 lda.spin_cnt++; 1078 #endif 1079 1080 #ifdef ADAPTIVE_SX 1081 /* 1082 * If the owner is running on another CPU, spin until 1083 * the owner stops running or the state of the lock 1084 * changes. 1085 */ 1086 if ((x & SX_LOCK_SHARED) == 0) { 1087 owner = lv_sx_owner(x); 1088 if (TD_IS_RUNNING(owner)) { 1089 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 1090 CTR3(KTR_LOCK, 1091 "%s: spinning on %p held by %p", 1092 __func__, sx, owner); 1093 KTR_STATE1(KTR_SCHED, "thread", 1094 sched_tdname(curthread), "spinning", 1095 "lockname:\"%s\"", sx->lock_object.lo_name); 1096 do { 1097 lock_delay(&lda); 1098 x = SX_READ_VALUE(sx); 1099 owner = lv_sx_owner(x); 1100 } while (owner != NULL && TD_IS_RUNNING(owner)); 1101 KTR_STATE0(KTR_SCHED, "thread", 1102 sched_tdname(curthread), "running"); 1103 continue; 1104 } 1105 } else { 1106 if ((x & SX_LOCK_WRITE_SPINNER) && SX_SHARERS(x) == 0) { 1107 MPASS(!__sx_can_read(td, x, false)); 1108 lock_delay_spin(2); 1109 x = SX_READ_VALUE(sx); 1110 continue; 1111 } 1112 if (spintries < asx_retries) { 1113 KTR_STATE1(KTR_SCHED, "thread", sched_tdname(curthread), 1114 "spinning", "lockname:\"%s\"", 1115 sx->lock_object.lo_name); 1116 n = SX_SHARERS(x); 1117 for (i = 0; i < asx_loops; i += n) { 1118 lock_delay_spin(n); 1119 x = SX_READ_VALUE(sx); 1120 if (!(x & SX_LOCK_SHARED)) 1121 break; 1122 n = SX_SHARERS(x); 1123 if (n == 0) 1124 break; 1125 if (__sx_can_read(td, x, false)) 1126 break; 1127 } 1128 #ifdef KDTRACE_HOOKS 1129 lda.spin_cnt += i; 1130 #endif 1131 KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread), 1132 "running"); 1133 if (i < asx_loops) 1134 continue; 1135 } 1136 } 1137 #endif 1138 1139 /* 1140 * Some other thread already has an exclusive lock, so 1141 * start the process of blocking. 1142 */ 1143 sleepq_lock(&sx->lock_object); 1144 x = SX_READ_VALUE(sx); 1145 retry_sleepq: 1146 if (((x & SX_LOCK_WRITE_SPINNER) && SX_SHARERS(x) == 0) || 1147 __sx_can_read(td, x, false)) { 1148 sleepq_release(&sx->lock_object); 1149 continue; 1150 } 1151 1152 #ifdef ADAPTIVE_SX 1153 /* 1154 * If the owner is running on another CPU, spin until 1155 * the owner stops running or the state of the lock 1156 * changes. 1157 */ 1158 if (!(x & SX_LOCK_SHARED)) { 1159 owner = (struct thread *)SX_OWNER(x); 1160 if (TD_IS_RUNNING(owner)) { 1161 sleepq_release(&sx->lock_object); 1162 x = SX_READ_VALUE(sx); 1163 continue; 1164 } 1165 } 1166 #endif 1167 1168 /* 1169 * Try to set the SX_LOCK_SHARED_WAITERS flag. If we 1170 * fail to set it drop the sleep queue lock and loop 1171 * back. 1172 */ 1173 if (!(x & SX_LOCK_SHARED_WAITERS)) { 1174 if (!atomic_fcmpset_ptr(&sx->sx_lock, &x, 1175 x | SX_LOCK_SHARED_WAITERS)) 1176 goto retry_sleepq; 1177 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 1178 CTR2(KTR_LOCK, "%s: %p set shared waiters flag", 1179 __func__, sx); 1180 } 1181 1182 /* 1183 * Since we have been unable to acquire the shared lock, 1184 * we have to sleep. 1185 */ 1186 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 1187 CTR2(KTR_LOCK, "%s: %p blocking on sleep queue", 1188 __func__, sx); 1189 1190 #ifdef KDTRACE_HOOKS 1191 sleep_time -= lockstat_nsecs(&sx->lock_object); 1192 #endif 1193 sleepq_add(&sx->lock_object, NULL, sx->lock_object.lo_name, 1194 SLEEPQ_SX | ((opts & SX_INTERRUPTIBLE) ? 1195 SLEEPQ_INTERRUPTIBLE : 0), SQ_SHARED_QUEUE); 1196 if (!(opts & SX_INTERRUPTIBLE)) 1197 sleepq_wait(&sx->lock_object, 0); 1198 else 1199 error = sleepq_wait_sig(&sx->lock_object, 0); 1200 #ifdef KDTRACE_HOOKS 1201 sleep_time += lockstat_nsecs(&sx->lock_object); 1202 sleep_cnt++; 1203 #endif 1204 if (error) { 1205 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 1206 CTR2(KTR_LOCK, 1207 "%s: interruptible sleep by %p suspended by signal", 1208 __func__, sx); 1209 break; 1210 } 1211 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 1212 CTR2(KTR_LOCK, "%s: %p resuming from sleep queue", 1213 __func__, sx); 1214 x = SX_READ_VALUE(sx); 1215 } 1216 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING) 1217 if (__predict_true(!extra_work)) 1218 return (error); 1219 #endif 1220 #ifdef KDTRACE_HOOKS 1221 all_time += lockstat_nsecs(&sx->lock_object); 1222 if (sleep_time) 1223 LOCKSTAT_RECORD4(sx__block, sx, sleep_time, 1224 LOCKSTAT_READER, (state & SX_LOCK_SHARED) == 0, 1225 (state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state)); 1226 if (lda.spin_cnt > sleep_cnt) 1227 LOCKSTAT_RECORD4(sx__spin, sx, all_time - sleep_time, 1228 LOCKSTAT_READER, (state & SX_LOCK_SHARED) == 0, 1229 (state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state)); 1230 out_lockstat: 1231 #endif 1232 if (error == 0) { 1233 LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire, sx, 1234 contested, waittime, file, line, LOCKSTAT_READER); 1235 } 1236 GIANT_RESTORE(); 1237 return (error); 1238 } 1239 1240 int 1241 _sx_slock_int(struct sx *sx, int opts LOCK_FILE_LINE_ARG_DEF) 1242 { 1243 struct thread *td; 1244 uintptr_t x; 1245 int error; 1246 1247 KASSERT(kdb_active != 0 || SCHEDULER_STOPPED() || 1248 !TD_IS_IDLETHREAD(curthread), 1249 ("sx_slock() by idle thread %p on sx %s @ %s:%d", 1250 curthread, sx->lock_object.lo_name, file, line)); 1251 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED, 1252 ("sx_slock() of destroyed sx @ %s:%d", file, line)); 1253 WITNESS_CHECKORDER(&sx->lock_object, LOP_NEWORDER, file, line, NULL); 1254 1255 error = 0; 1256 td = curthread; 1257 x = SX_READ_VALUE(sx); 1258 if (__predict_false(LOCKSTAT_PROFILE_ENABLED(sx__acquire) || 1259 !__sx_slock_try(sx, td, &x, true LOCK_FILE_LINE_ARG))) 1260 error = _sx_slock_hard(sx, opts, x LOCK_FILE_LINE_ARG); 1261 else 1262 lock_profile_obtain_lock_success(&sx->lock_object, 0, 0, 1263 file, line); 1264 if (error == 0) { 1265 LOCK_LOG_LOCK("SLOCK", &sx->lock_object, 0, 0, file, line); 1266 WITNESS_LOCK(&sx->lock_object, 0, file, line); 1267 TD_LOCKS_INC(curthread); 1268 } 1269 return (error); 1270 } 1271 1272 int 1273 _sx_slock(struct sx *sx, int opts, const char *file, int line) 1274 { 1275 1276 return (_sx_slock_int(sx, opts LOCK_FILE_LINE_ARG)); 1277 } 1278 1279 static bool __always_inline 1280 _sx_sunlock_try(struct sx *sx, struct thread *td, uintptr_t *xp) 1281 { 1282 1283 for (;;) { 1284 if (SX_SHARERS(*xp) > 1 || !(*xp & SX_LOCK_WAITERS)) { 1285 if (atomic_fcmpset_rel_ptr(&sx->sx_lock, xp, 1286 *xp - SX_ONE_SHARER)) { 1287 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 1288 CTR4(KTR_LOCK, 1289 "%s: %p succeeded %p -> %p", 1290 __func__, sx, (void *)*xp, 1291 (void *)(*xp - SX_ONE_SHARER)); 1292 td->td_sx_slocks--; 1293 return (true); 1294 } 1295 continue; 1296 } 1297 break; 1298 } 1299 return (false); 1300 } 1301 1302 static void __noinline 1303 _sx_sunlock_hard(struct sx *sx, struct thread *td, uintptr_t x 1304 LOCK_FILE_LINE_ARG_DEF) 1305 { 1306 int wakeup_swapper = 0; 1307 uintptr_t setx, queue; 1308 1309 if (SCHEDULER_STOPPED()) 1310 return; 1311 1312 if (_sx_sunlock_try(sx, td, &x)) 1313 goto out_lockstat; 1314 1315 sleepq_lock(&sx->lock_object); 1316 x = SX_READ_VALUE(sx); 1317 for (;;) { 1318 if (_sx_sunlock_try(sx, td, &x)) 1319 break; 1320 1321 /* 1322 * Wake up semantic here is quite simple: 1323 * Just wake up all the exclusive waiters. 1324 * Note that the state of the lock could have changed, 1325 * so if it fails loop back and retry. 1326 */ 1327 setx = SX_LOCK_UNLOCKED; 1328 queue = SQ_SHARED_QUEUE; 1329 if (x & SX_LOCK_EXCLUSIVE_WAITERS) { 1330 setx |= (x & SX_LOCK_SHARED_WAITERS); 1331 queue = SQ_EXCLUSIVE_QUEUE; 1332 } 1333 setx |= (x & SX_LOCK_WRITE_SPINNER); 1334 if (!atomic_fcmpset_rel_ptr(&sx->sx_lock, &x, setx)) 1335 continue; 1336 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 1337 CTR2(KTR_LOCK, "%s: %p waking up all thread on" 1338 "exclusive queue", __func__, sx); 1339 wakeup_swapper = sleepq_broadcast(&sx->lock_object, SLEEPQ_SX, 1340 0, queue); 1341 td->td_sx_slocks--; 1342 break; 1343 } 1344 sleepq_release(&sx->lock_object); 1345 if (wakeup_swapper) 1346 kick_proc0(); 1347 out_lockstat: 1348 LOCKSTAT_PROFILE_RELEASE_RWLOCK(sx__release, sx, LOCKSTAT_READER); 1349 } 1350 1351 void 1352 _sx_sunlock_int(struct sx *sx LOCK_FILE_LINE_ARG_DEF) 1353 { 1354 struct thread *td; 1355 uintptr_t x; 1356 1357 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED, 1358 ("sx_sunlock() of destroyed sx @ %s:%d", file, line)); 1359 _sx_assert(sx, SA_SLOCKED, file, line); 1360 WITNESS_UNLOCK(&sx->lock_object, 0, file, line); 1361 LOCK_LOG_LOCK("SUNLOCK", &sx->lock_object, 0, 0, file, line); 1362 1363 td = curthread; 1364 x = SX_READ_VALUE(sx); 1365 if (__predict_false(LOCKSTAT_PROFILE_ENABLED(sx__release) || 1366 !_sx_sunlock_try(sx, td, &x))) 1367 _sx_sunlock_hard(sx, td, x LOCK_FILE_LINE_ARG); 1368 else 1369 lock_profile_release_lock(&sx->lock_object); 1370 1371 TD_LOCKS_DEC(curthread); 1372 } 1373 1374 void 1375 _sx_sunlock(struct sx *sx, const char *file, int line) 1376 { 1377 1378 _sx_sunlock_int(sx LOCK_FILE_LINE_ARG); 1379 } 1380 1381 #ifdef INVARIANT_SUPPORT 1382 #ifndef INVARIANTS 1383 #undef _sx_assert 1384 #endif 1385 1386 /* 1387 * In the non-WITNESS case, sx_assert() can only detect that at least 1388 * *some* thread owns an slock, but it cannot guarantee that *this* 1389 * thread owns an slock. 1390 */ 1391 void 1392 _sx_assert(const struct sx *sx, int what, const char *file, int line) 1393 { 1394 #ifndef WITNESS 1395 int slocked = 0; 1396 #endif 1397 1398 if (SCHEDULER_STOPPED()) 1399 return; 1400 switch (what) { 1401 case SA_SLOCKED: 1402 case SA_SLOCKED | SA_NOTRECURSED: 1403 case SA_SLOCKED | SA_RECURSED: 1404 #ifndef WITNESS 1405 slocked = 1; 1406 /* FALLTHROUGH */ 1407 #endif 1408 case SA_LOCKED: 1409 case SA_LOCKED | SA_NOTRECURSED: 1410 case SA_LOCKED | SA_RECURSED: 1411 #ifdef WITNESS 1412 witness_assert(&sx->lock_object, what, file, line); 1413 #else 1414 /* 1415 * If some other thread has an exclusive lock or we 1416 * have one and are asserting a shared lock, fail. 1417 * Also, if no one has a lock at all, fail. 1418 */ 1419 if (sx->sx_lock == SX_LOCK_UNLOCKED || 1420 (!(sx->sx_lock & SX_LOCK_SHARED) && (slocked || 1421 sx_xholder(sx) != curthread))) 1422 panic("Lock %s not %slocked @ %s:%d\n", 1423 sx->lock_object.lo_name, slocked ? "share " : "", 1424 file, line); 1425 1426 if (!(sx->sx_lock & SX_LOCK_SHARED)) { 1427 if (sx_recursed(sx)) { 1428 if (what & SA_NOTRECURSED) 1429 panic("Lock %s recursed @ %s:%d\n", 1430 sx->lock_object.lo_name, file, 1431 line); 1432 } else if (what & SA_RECURSED) 1433 panic("Lock %s not recursed @ %s:%d\n", 1434 sx->lock_object.lo_name, file, line); 1435 } 1436 #endif 1437 break; 1438 case SA_XLOCKED: 1439 case SA_XLOCKED | SA_NOTRECURSED: 1440 case SA_XLOCKED | SA_RECURSED: 1441 if (sx_xholder(sx) != curthread) 1442 panic("Lock %s not exclusively locked @ %s:%d\n", 1443 sx->lock_object.lo_name, file, line); 1444 if (sx_recursed(sx)) { 1445 if (what & SA_NOTRECURSED) 1446 panic("Lock %s recursed @ %s:%d\n", 1447 sx->lock_object.lo_name, file, line); 1448 } else if (what & SA_RECURSED) 1449 panic("Lock %s not recursed @ %s:%d\n", 1450 sx->lock_object.lo_name, file, line); 1451 break; 1452 case SA_UNLOCKED: 1453 #ifdef WITNESS 1454 witness_assert(&sx->lock_object, what, file, line); 1455 #else 1456 /* 1457 * If we hold an exclusve lock fail. We can't 1458 * reliably check to see if we hold a shared lock or 1459 * not. 1460 */ 1461 if (sx_xholder(sx) == curthread) 1462 panic("Lock %s exclusively locked @ %s:%d\n", 1463 sx->lock_object.lo_name, file, line); 1464 #endif 1465 break; 1466 default: 1467 panic("Unknown sx lock assertion: %d @ %s:%d", what, file, 1468 line); 1469 } 1470 } 1471 #endif /* INVARIANT_SUPPORT */ 1472 1473 #ifdef DDB 1474 static void 1475 db_show_sx(const struct lock_object *lock) 1476 { 1477 struct thread *td; 1478 const struct sx *sx; 1479 1480 sx = (const struct sx *)lock; 1481 1482 db_printf(" state: "); 1483 if (sx->sx_lock == SX_LOCK_UNLOCKED) 1484 db_printf("UNLOCKED\n"); 1485 else if (sx->sx_lock == SX_LOCK_DESTROYED) { 1486 db_printf("DESTROYED\n"); 1487 return; 1488 } else if (sx->sx_lock & SX_LOCK_SHARED) 1489 db_printf("SLOCK: %ju\n", (uintmax_t)SX_SHARERS(sx->sx_lock)); 1490 else { 1491 td = sx_xholder(sx); 1492 db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td, 1493 td->td_tid, td->td_proc->p_pid, td->td_name); 1494 if (sx_recursed(sx)) 1495 db_printf(" recursed: %d\n", sx->sx_recurse); 1496 } 1497 1498 db_printf(" waiters: "); 1499 switch(sx->sx_lock & 1500 (SX_LOCK_SHARED_WAITERS | SX_LOCK_EXCLUSIVE_WAITERS)) { 1501 case SX_LOCK_SHARED_WAITERS: 1502 db_printf("shared\n"); 1503 break; 1504 case SX_LOCK_EXCLUSIVE_WAITERS: 1505 db_printf("exclusive\n"); 1506 break; 1507 case SX_LOCK_SHARED_WAITERS | SX_LOCK_EXCLUSIVE_WAITERS: 1508 db_printf("exclusive and shared\n"); 1509 break; 1510 default: 1511 db_printf("none\n"); 1512 } 1513 } 1514 1515 /* 1516 * Check to see if a thread that is blocked on a sleep queue is actually 1517 * blocked on an sx lock. If so, output some details and return true. 1518 * If the lock has an exclusive owner, return that in *ownerp. 1519 */ 1520 int 1521 sx_chain(struct thread *td, struct thread **ownerp) 1522 { 1523 struct sx *sx; 1524 1525 /* 1526 * Check to see if this thread is blocked on an sx lock. 1527 * First, we check the lock class. If that is ok, then we 1528 * compare the lock name against the wait message. 1529 */ 1530 sx = td->td_wchan; 1531 if (LOCK_CLASS(&sx->lock_object) != &lock_class_sx || 1532 sx->lock_object.lo_name != td->td_wmesg) 1533 return (0); 1534 1535 /* We think we have an sx lock, so output some details. */ 1536 db_printf("blocked on sx \"%s\" ", td->td_wmesg); 1537 *ownerp = sx_xholder(sx); 1538 if (sx->sx_lock & SX_LOCK_SHARED) 1539 db_printf("SLOCK (count %ju)\n", 1540 (uintmax_t)SX_SHARERS(sx->sx_lock)); 1541 else 1542 db_printf("XLOCK\n"); 1543 return (1); 1544 } 1545 #endif 1546