1 /*- 2 * Copyright (c) 2006 John Baldwin <jhb@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 /* 28 * Machine independent bits of reader/writer lock implementation. 29 */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 #include "opt_ddb.h" 35 #include "opt_hwpmc_hooks.h" 36 #include "opt_no_adaptive_rwlocks.h" 37 38 #include <sys/param.h> 39 #include <sys/kdb.h> 40 #include <sys/ktr.h> 41 #include <sys/kernel.h> 42 #include <sys/lock.h> 43 #include <sys/mutex.h> 44 #include <sys/proc.h> 45 #include <sys/rwlock.h> 46 #include <sys/sched.h> 47 #include <sys/smp.h> 48 #include <sys/sysctl.h> 49 #include <sys/systm.h> 50 #include <sys/turnstile.h> 51 52 #include <machine/cpu.h> 53 54 #if defined(SMP) && !defined(NO_ADAPTIVE_RWLOCKS) 55 #define ADAPTIVE_RWLOCKS 56 #endif 57 58 #ifdef HWPMC_HOOKS 59 #include <sys/pmckern.h> 60 PMC_SOFT_DECLARE( , , lock, failed); 61 #endif 62 63 /* 64 * Return the rwlock address when the lock cookie address is provided. 65 * This functionality assumes that struct rwlock* have a member named rw_lock. 66 */ 67 #define rwlock2rw(c) (__containerof(c, struct rwlock, rw_lock)) 68 69 #ifdef DDB 70 #include <ddb/ddb.h> 71 72 static void db_show_rwlock(const struct lock_object *lock); 73 #endif 74 static void assert_rw(const struct lock_object *lock, int what); 75 static void lock_rw(struct lock_object *lock, uintptr_t how); 76 #ifdef KDTRACE_HOOKS 77 static int owner_rw(const struct lock_object *lock, struct thread **owner); 78 #endif 79 static uintptr_t unlock_rw(struct lock_object *lock); 80 81 struct lock_class lock_class_rw = { 82 .lc_name = "rw", 83 .lc_flags = LC_SLEEPLOCK | LC_RECURSABLE | LC_UPGRADABLE, 84 .lc_assert = assert_rw, 85 #ifdef DDB 86 .lc_ddb_show = db_show_rwlock, 87 #endif 88 .lc_lock = lock_rw, 89 .lc_unlock = unlock_rw, 90 #ifdef KDTRACE_HOOKS 91 .lc_owner = owner_rw, 92 #endif 93 }; 94 95 #ifdef ADAPTIVE_RWLOCKS 96 static int __read_frequently rowner_retries = 10; 97 static int __read_frequently rowner_loops = 10000; 98 static SYSCTL_NODE(_debug, OID_AUTO, rwlock, CTLFLAG_RD, NULL, 99 "rwlock debugging"); 100 SYSCTL_INT(_debug_rwlock, OID_AUTO, retry, CTLFLAG_RW, &rowner_retries, 0, ""); 101 SYSCTL_INT(_debug_rwlock, OID_AUTO, loops, CTLFLAG_RW, &rowner_loops, 0, ""); 102 103 static struct lock_delay_config __read_frequently rw_delay; 104 105 SYSCTL_INT(_debug_rwlock, OID_AUTO, delay_base, CTLFLAG_RW, &rw_delay.base, 106 0, ""); 107 SYSCTL_INT(_debug_rwlock, OID_AUTO, delay_max, CTLFLAG_RW, &rw_delay.max, 108 0, ""); 109 110 LOCK_DELAY_SYSINIT_DEFAULT(rw_delay); 111 #endif 112 113 /* 114 * Return a pointer to the owning thread if the lock is write-locked or 115 * NULL if the lock is unlocked or read-locked. 116 */ 117 118 #define lv_rw_wowner(v) \ 119 ((v) & RW_LOCK_READ ? NULL : \ 120 (struct thread *)RW_OWNER((v))) 121 122 #define rw_wowner(rw) lv_rw_wowner(RW_READ_VALUE(rw)) 123 124 /* 125 * Returns if a write owner is recursed. Write ownership is not assured 126 * here and should be previously checked. 127 */ 128 #define rw_recursed(rw) ((rw)->rw_recurse != 0) 129 130 /* 131 * Return true if curthread helds the lock. 132 */ 133 #define rw_wlocked(rw) (rw_wowner((rw)) == curthread) 134 135 /* 136 * Return a pointer to the owning thread for this lock who should receive 137 * any priority lent by threads that block on this lock. Currently this 138 * is identical to rw_wowner(). 139 */ 140 #define rw_owner(rw) rw_wowner(rw) 141 142 #ifndef INVARIANTS 143 #define __rw_assert(c, what, file, line) 144 #endif 145 146 void 147 assert_rw(const struct lock_object *lock, int what) 148 { 149 150 rw_assert((const struct rwlock *)lock, what); 151 } 152 153 void 154 lock_rw(struct lock_object *lock, uintptr_t how) 155 { 156 struct rwlock *rw; 157 158 rw = (struct rwlock *)lock; 159 if (how) 160 rw_rlock(rw); 161 else 162 rw_wlock(rw); 163 } 164 165 uintptr_t 166 unlock_rw(struct lock_object *lock) 167 { 168 struct rwlock *rw; 169 170 rw = (struct rwlock *)lock; 171 rw_assert(rw, RA_LOCKED | LA_NOTRECURSED); 172 if (rw->rw_lock & RW_LOCK_READ) { 173 rw_runlock(rw); 174 return (1); 175 } else { 176 rw_wunlock(rw); 177 return (0); 178 } 179 } 180 181 #ifdef KDTRACE_HOOKS 182 int 183 owner_rw(const struct lock_object *lock, struct thread **owner) 184 { 185 const struct rwlock *rw = (const struct rwlock *)lock; 186 uintptr_t x = rw->rw_lock; 187 188 *owner = rw_wowner(rw); 189 return ((x & RW_LOCK_READ) != 0 ? (RW_READERS(x) != 0) : 190 (*owner != NULL)); 191 } 192 #endif 193 194 void 195 _rw_init_flags(volatile uintptr_t *c, const char *name, int opts) 196 { 197 struct rwlock *rw; 198 int flags; 199 200 rw = rwlock2rw(c); 201 202 MPASS((opts & ~(RW_DUPOK | RW_NOPROFILE | RW_NOWITNESS | RW_QUIET | 203 RW_RECURSE | RW_NEW)) == 0); 204 ASSERT_ATOMIC_LOAD_PTR(rw->rw_lock, 205 ("%s: rw_lock not aligned for %s: %p", __func__, name, 206 &rw->rw_lock)); 207 208 flags = LO_UPGRADABLE; 209 if (opts & RW_DUPOK) 210 flags |= LO_DUPOK; 211 if (opts & RW_NOPROFILE) 212 flags |= LO_NOPROFILE; 213 if (!(opts & RW_NOWITNESS)) 214 flags |= LO_WITNESS; 215 if (opts & RW_RECURSE) 216 flags |= LO_RECURSABLE; 217 if (opts & RW_QUIET) 218 flags |= LO_QUIET; 219 if (opts & RW_NEW) 220 flags |= LO_NEW; 221 222 lock_init(&rw->lock_object, &lock_class_rw, name, NULL, flags); 223 rw->rw_lock = RW_UNLOCKED; 224 rw->rw_recurse = 0; 225 } 226 227 void 228 _rw_destroy(volatile uintptr_t *c) 229 { 230 struct rwlock *rw; 231 232 rw = rwlock2rw(c); 233 234 KASSERT(rw->rw_lock == RW_UNLOCKED, ("rw lock %p not unlocked", rw)); 235 KASSERT(rw->rw_recurse == 0, ("rw lock %p still recursed", rw)); 236 rw->rw_lock = RW_DESTROYED; 237 lock_destroy(&rw->lock_object); 238 } 239 240 void 241 rw_sysinit(void *arg) 242 { 243 struct rw_args *args; 244 245 args = arg; 246 rw_init_flags((struct rwlock *)args->ra_rw, args->ra_desc, 247 args->ra_flags); 248 } 249 250 int 251 _rw_wowned(const volatile uintptr_t *c) 252 { 253 254 return (rw_wowner(rwlock2rw(c)) == curthread); 255 } 256 257 void 258 _rw_wlock_cookie(volatile uintptr_t *c, const char *file, int line) 259 { 260 struct rwlock *rw; 261 uintptr_t tid, v; 262 263 rw = rwlock2rw(c); 264 265 KASSERT(kdb_active != 0 || SCHEDULER_STOPPED() || 266 !TD_IS_IDLETHREAD(curthread), 267 ("rw_wlock() by idle thread %p on rwlock %s @ %s:%d", 268 curthread, rw->lock_object.lo_name, file, line)); 269 KASSERT(rw->rw_lock != RW_DESTROYED, 270 ("rw_wlock() of destroyed rwlock @ %s:%d", file, line)); 271 WITNESS_CHECKORDER(&rw->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE, file, 272 line, NULL); 273 tid = (uintptr_t)curthread; 274 v = RW_UNLOCKED; 275 if (!_rw_write_lock_fetch(rw, &v, tid)) 276 _rw_wlock_hard(rw, v, file, line); 277 else 278 LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(rw__acquire, rw, 279 0, 0, file, line, LOCKSTAT_WRITER); 280 281 LOCK_LOG_LOCK("WLOCK", &rw->lock_object, 0, rw->rw_recurse, file, line); 282 WITNESS_LOCK(&rw->lock_object, LOP_EXCLUSIVE, file, line); 283 TD_LOCKS_INC(curthread); 284 } 285 286 int 287 __rw_try_wlock_int(struct rwlock *rw LOCK_FILE_LINE_ARG_DEF) 288 { 289 struct thread *td; 290 uintptr_t tid, v; 291 int rval; 292 bool recursed; 293 294 td = curthread; 295 tid = (uintptr_t)td; 296 if (SCHEDULER_STOPPED_TD(td)) 297 return (1); 298 299 KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(td), 300 ("rw_try_wlock() by idle thread %p on rwlock %s @ %s:%d", 301 curthread, rw->lock_object.lo_name, file, line)); 302 KASSERT(rw->rw_lock != RW_DESTROYED, 303 ("rw_try_wlock() of destroyed rwlock @ %s:%d", file, line)); 304 305 rval = 1; 306 recursed = false; 307 v = RW_UNLOCKED; 308 for (;;) { 309 if (atomic_fcmpset_acq_ptr(&rw->rw_lock, &v, tid)) 310 break; 311 if (v == RW_UNLOCKED) 312 continue; 313 if (v == tid && (rw->lock_object.lo_flags & LO_RECURSABLE)) { 314 rw->rw_recurse++; 315 atomic_set_ptr(&rw->rw_lock, RW_LOCK_WRITER_RECURSED); 316 break; 317 } 318 rval = 0; 319 break; 320 } 321 322 LOCK_LOG_TRY("WLOCK", &rw->lock_object, 0, rval, file, line); 323 if (rval) { 324 WITNESS_LOCK(&rw->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK, 325 file, line); 326 if (!recursed) 327 LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(rw__acquire, 328 rw, 0, 0, file, line, LOCKSTAT_WRITER); 329 TD_LOCKS_INC(curthread); 330 } 331 return (rval); 332 } 333 334 int 335 __rw_try_wlock(volatile uintptr_t *c, const char *file, int line) 336 { 337 struct rwlock *rw; 338 339 rw = rwlock2rw(c); 340 return (__rw_try_wlock_int(rw LOCK_FILE_LINE_ARG)); 341 } 342 343 void 344 _rw_wunlock_cookie(volatile uintptr_t *c, const char *file, int line) 345 { 346 struct rwlock *rw; 347 348 rw = rwlock2rw(c); 349 350 KASSERT(rw->rw_lock != RW_DESTROYED, 351 ("rw_wunlock() of destroyed rwlock @ %s:%d", file, line)); 352 __rw_assert(c, RA_WLOCKED, file, line); 353 WITNESS_UNLOCK(&rw->lock_object, LOP_EXCLUSIVE, file, line); 354 LOCK_LOG_LOCK("WUNLOCK", &rw->lock_object, 0, rw->rw_recurse, file, 355 line); 356 357 #ifdef LOCK_PROFILING 358 _rw_wunlock_hard(rw, (uintptr_t)curthread, file, line); 359 #else 360 __rw_wunlock(rw, curthread, file, line); 361 #endif 362 363 TD_LOCKS_DEC(curthread); 364 } 365 366 /* 367 * Determines whether a new reader can acquire a lock. Succeeds if the 368 * reader already owns a read lock and the lock is locked for read to 369 * prevent deadlock from reader recursion. Also succeeds if the lock 370 * is unlocked and has no writer waiters or spinners. Failing otherwise 371 * prioritizes writers before readers. 372 */ 373 static bool __always_inline 374 __rw_can_read(struct thread *td, uintptr_t v, bool fp) 375 { 376 377 if ((v & (RW_LOCK_READ | RW_LOCK_WRITE_WAITERS | RW_LOCK_WRITE_SPINNER)) 378 == RW_LOCK_READ) 379 return (true); 380 if (!fp && td->td_rw_rlocks && (v & RW_LOCK_READ)) 381 return (true); 382 return (false); 383 } 384 385 static bool __always_inline 386 __rw_rlock_try(struct rwlock *rw, struct thread *td, uintptr_t *vp, bool fp 387 LOCK_FILE_LINE_ARG_DEF) 388 { 389 390 /* 391 * Handle the easy case. If no other thread has a write 392 * lock, then try to bump up the count of read locks. Note 393 * that we have to preserve the current state of the 394 * RW_LOCK_WRITE_WAITERS flag. If we fail to acquire a 395 * read lock, then rw_lock must have changed, so restart 396 * the loop. Note that this handles the case of a 397 * completely unlocked rwlock since such a lock is encoded 398 * as a read lock with no waiters. 399 */ 400 while (__rw_can_read(td, *vp, fp)) { 401 if (atomic_fcmpset_acq_ptr(&rw->rw_lock, vp, 402 *vp + RW_ONE_READER)) { 403 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 404 CTR4(KTR_LOCK, 405 "%s: %p succeed %p -> %p", __func__, 406 rw, (void *)*vp, 407 (void *)(*vp + RW_ONE_READER)); 408 td->td_rw_rlocks++; 409 return (true); 410 } 411 } 412 return (false); 413 } 414 415 static void __noinline 416 __rw_rlock_hard(struct rwlock *rw, struct thread *td, uintptr_t v 417 LOCK_FILE_LINE_ARG_DEF) 418 { 419 struct turnstile *ts; 420 struct thread *owner; 421 #ifdef ADAPTIVE_RWLOCKS 422 int spintries = 0; 423 int i, n; 424 #endif 425 #ifdef LOCK_PROFILING 426 uint64_t waittime = 0; 427 int contested = 0; 428 #endif 429 #if defined(ADAPTIVE_RWLOCKS) || defined(KDTRACE_HOOKS) 430 struct lock_delay_arg lda; 431 #endif 432 #ifdef KDTRACE_HOOKS 433 u_int sleep_cnt = 0; 434 int64_t sleep_time = 0; 435 int64_t all_time = 0; 436 #endif 437 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING) 438 uintptr_t state; 439 int doing_lockprof; 440 #endif 441 442 if (SCHEDULER_STOPPED()) 443 return; 444 445 #if defined(ADAPTIVE_RWLOCKS) 446 lock_delay_arg_init(&lda, &rw_delay); 447 #elif defined(KDTRACE_HOOKS) 448 lock_delay_arg_init(&lda, NULL); 449 #endif 450 451 #ifdef HWPMC_HOOKS 452 PMC_SOFT_CALL( , , lock, failed); 453 #endif 454 lock_profile_obtain_lock_failed(&rw->lock_object, 455 &contested, &waittime); 456 457 #ifdef LOCK_PROFILING 458 doing_lockprof = 1; 459 state = v; 460 #elif defined(KDTRACE_HOOKS) 461 doing_lockprof = lockstat_enabled; 462 if (__predict_false(doing_lockprof)) { 463 all_time -= lockstat_nsecs(&rw->lock_object); 464 state = v; 465 } 466 #endif 467 468 for (;;) { 469 if (__rw_rlock_try(rw, td, &v, false LOCK_FILE_LINE_ARG)) 470 break; 471 #ifdef KDTRACE_HOOKS 472 lda.spin_cnt++; 473 #endif 474 475 #ifdef ADAPTIVE_RWLOCKS 476 /* 477 * If the owner is running on another CPU, spin until 478 * the owner stops running or the state of the lock 479 * changes. 480 */ 481 if ((v & RW_LOCK_READ) == 0) { 482 owner = (struct thread *)RW_OWNER(v); 483 if (TD_IS_RUNNING(owner)) { 484 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 485 CTR3(KTR_LOCK, 486 "%s: spinning on %p held by %p", 487 __func__, rw, owner); 488 KTR_STATE1(KTR_SCHED, "thread", 489 sched_tdname(curthread), "spinning", 490 "lockname:\"%s\"", rw->lock_object.lo_name); 491 do { 492 lock_delay(&lda); 493 v = RW_READ_VALUE(rw); 494 owner = lv_rw_wowner(v); 495 } while (owner != NULL && TD_IS_RUNNING(owner)); 496 KTR_STATE0(KTR_SCHED, "thread", 497 sched_tdname(curthread), "running"); 498 continue; 499 } 500 } else if (spintries < rowner_retries) { 501 spintries++; 502 KTR_STATE1(KTR_SCHED, "thread", sched_tdname(curthread), 503 "spinning", "lockname:\"%s\"", 504 rw->lock_object.lo_name); 505 for (i = 0; i < rowner_loops; i += n) { 506 n = RW_READERS(v); 507 lock_delay_spin(n); 508 v = RW_READ_VALUE(rw); 509 if ((v & RW_LOCK_READ) == 0 || __rw_can_read(td, v, false)) 510 break; 511 } 512 #ifdef KDTRACE_HOOKS 513 lda.spin_cnt += rowner_loops - i; 514 #endif 515 KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread), 516 "running"); 517 if (i != rowner_loops) 518 continue; 519 } 520 #endif 521 522 /* 523 * Okay, now it's the hard case. Some other thread already 524 * has a write lock or there are write waiters present, 525 * acquire the turnstile lock so we can begin the process 526 * of blocking. 527 */ 528 ts = turnstile_trywait(&rw->lock_object); 529 530 /* 531 * The lock might have been released while we spun, so 532 * recheck its state and restart the loop if needed. 533 */ 534 v = RW_READ_VALUE(rw); 535 retry_ts: 536 if (__rw_can_read(td, v, false)) { 537 turnstile_cancel(ts); 538 continue; 539 } 540 541 owner = lv_rw_wowner(v); 542 543 #ifdef ADAPTIVE_RWLOCKS 544 /* 545 * The current lock owner might have started executing 546 * on another CPU (or the lock could have changed 547 * owners) while we were waiting on the turnstile 548 * chain lock. If so, drop the turnstile lock and try 549 * again. 550 */ 551 if (owner != NULL) { 552 if (TD_IS_RUNNING(owner)) { 553 turnstile_cancel(ts); 554 continue; 555 } 556 } 557 #endif 558 559 /* 560 * The lock is held in write mode or it already has waiters. 561 */ 562 MPASS(!__rw_can_read(td, v, false)); 563 564 /* 565 * If the RW_LOCK_READ_WAITERS flag is already set, then 566 * we can go ahead and block. If it is not set then try 567 * to set it. If we fail to set it drop the turnstile 568 * lock and restart the loop. 569 */ 570 if (!(v & RW_LOCK_READ_WAITERS)) { 571 if (!atomic_fcmpset_ptr(&rw->rw_lock, &v, 572 v | RW_LOCK_READ_WAITERS)) 573 goto retry_ts; 574 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 575 CTR2(KTR_LOCK, "%s: %p set read waiters flag", 576 __func__, rw); 577 } 578 579 /* 580 * We were unable to acquire the lock and the read waiters 581 * flag is set, so we must block on the turnstile. 582 */ 583 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 584 CTR2(KTR_LOCK, "%s: %p blocking on turnstile", __func__, 585 rw); 586 #ifdef KDTRACE_HOOKS 587 sleep_time -= lockstat_nsecs(&rw->lock_object); 588 #endif 589 MPASS(owner == rw_owner(rw)); 590 turnstile_wait(ts, owner, TS_SHARED_QUEUE); 591 #ifdef KDTRACE_HOOKS 592 sleep_time += lockstat_nsecs(&rw->lock_object); 593 sleep_cnt++; 594 #endif 595 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 596 CTR2(KTR_LOCK, "%s: %p resuming from turnstile", 597 __func__, rw); 598 v = RW_READ_VALUE(rw); 599 } 600 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING) 601 if (__predict_true(!doing_lockprof)) 602 return; 603 #endif 604 #ifdef KDTRACE_HOOKS 605 all_time += lockstat_nsecs(&rw->lock_object); 606 if (sleep_time) 607 LOCKSTAT_RECORD4(rw__block, rw, sleep_time, 608 LOCKSTAT_READER, (state & RW_LOCK_READ) == 0, 609 (state & RW_LOCK_READ) == 0 ? 0 : RW_READERS(state)); 610 611 /* Record only the loops spinning and not sleeping. */ 612 if (lda.spin_cnt > sleep_cnt) 613 LOCKSTAT_RECORD4(rw__spin, rw, all_time - sleep_time, 614 LOCKSTAT_READER, (state & RW_LOCK_READ) == 0, 615 (state & RW_LOCK_READ) == 0 ? 0 : RW_READERS(state)); 616 #endif 617 /* 618 * TODO: acquire "owner of record" here. Here be turnstile dragons 619 * however. turnstiles don't like owners changing between calls to 620 * turnstile_wait() currently. 621 */ 622 LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(rw__acquire, rw, contested, 623 waittime, file, line, LOCKSTAT_READER); 624 } 625 626 void 627 __rw_rlock_int(struct rwlock *rw LOCK_FILE_LINE_ARG_DEF) 628 { 629 struct thread *td; 630 uintptr_t v; 631 632 td = curthread; 633 634 KASSERT(kdb_active != 0 || SCHEDULER_STOPPED_TD(td) || 635 !TD_IS_IDLETHREAD(td), 636 ("rw_rlock() by idle thread %p on rwlock %s @ %s:%d", 637 td, rw->lock_object.lo_name, file, line)); 638 KASSERT(rw->rw_lock != RW_DESTROYED, 639 ("rw_rlock() of destroyed rwlock @ %s:%d", file, line)); 640 KASSERT(rw_wowner(rw) != td, 641 ("rw_rlock: wlock already held for %s @ %s:%d", 642 rw->lock_object.lo_name, file, line)); 643 WITNESS_CHECKORDER(&rw->lock_object, LOP_NEWORDER, file, line, NULL); 644 645 v = RW_READ_VALUE(rw); 646 if (__predict_false(LOCKSTAT_OOL_PROFILE_ENABLED(rw__acquire) || 647 !__rw_rlock_try(rw, td, &v, true LOCK_FILE_LINE_ARG))) 648 __rw_rlock_hard(rw, td, v LOCK_FILE_LINE_ARG); 649 650 LOCK_LOG_LOCK("RLOCK", &rw->lock_object, 0, 0, file, line); 651 WITNESS_LOCK(&rw->lock_object, 0, file, line); 652 TD_LOCKS_INC(curthread); 653 } 654 655 void 656 __rw_rlock(volatile uintptr_t *c, const char *file, int line) 657 { 658 struct rwlock *rw; 659 660 rw = rwlock2rw(c); 661 __rw_rlock_int(rw LOCK_FILE_LINE_ARG); 662 } 663 664 int 665 __rw_try_rlock_int(struct rwlock *rw LOCK_FILE_LINE_ARG_DEF) 666 { 667 uintptr_t x; 668 669 if (SCHEDULER_STOPPED()) 670 return (1); 671 672 KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread), 673 ("rw_try_rlock() by idle thread %p on rwlock %s @ %s:%d", 674 curthread, rw->lock_object.lo_name, file, line)); 675 676 x = rw->rw_lock; 677 for (;;) { 678 KASSERT(rw->rw_lock != RW_DESTROYED, 679 ("rw_try_rlock() of destroyed rwlock @ %s:%d", file, line)); 680 if (!(x & RW_LOCK_READ)) 681 break; 682 if (atomic_fcmpset_acq_ptr(&rw->rw_lock, &x, x + RW_ONE_READER)) { 683 LOCK_LOG_TRY("RLOCK", &rw->lock_object, 0, 1, file, 684 line); 685 WITNESS_LOCK(&rw->lock_object, LOP_TRYLOCK, file, line); 686 LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(rw__acquire, 687 rw, 0, 0, file, line, LOCKSTAT_READER); 688 TD_LOCKS_INC(curthread); 689 curthread->td_rw_rlocks++; 690 return (1); 691 } 692 } 693 694 LOCK_LOG_TRY("RLOCK", &rw->lock_object, 0, 0, file, line); 695 return (0); 696 } 697 698 int 699 __rw_try_rlock(volatile uintptr_t *c, const char *file, int line) 700 { 701 struct rwlock *rw; 702 703 rw = rwlock2rw(c); 704 return (__rw_try_rlock_int(rw LOCK_FILE_LINE_ARG)); 705 } 706 707 static bool __always_inline 708 __rw_runlock_try(struct rwlock *rw, struct thread *td, uintptr_t *vp) 709 { 710 711 for (;;) { 712 /* 713 * See if there is more than one read lock held. If so, 714 * just drop one and return. 715 */ 716 if (RW_READERS(*vp) > 1) { 717 if (atomic_fcmpset_rel_ptr(&rw->rw_lock, vp, 718 *vp - RW_ONE_READER)) { 719 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 720 CTR4(KTR_LOCK, 721 "%s: %p succeeded %p -> %p", 722 __func__, rw, (void *)*vp, 723 (void *)(*vp - RW_ONE_READER)); 724 td->td_rw_rlocks--; 725 return (true); 726 } 727 continue; 728 } 729 /* 730 * If there aren't any waiters for a write lock, then try 731 * to drop it quickly. 732 */ 733 if (!(*vp & RW_LOCK_WAITERS)) { 734 MPASS((*vp & ~RW_LOCK_WRITE_SPINNER) == 735 RW_READERS_LOCK(1)); 736 if (atomic_fcmpset_rel_ptr(&rw->rw_lock, vp, 737 RW_UNLOCKED)) { 738 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 739 CTR2(KTR_LOCK, "%s: %p last succeeded", 740 __func__, rw); 741 td->td_rw_rlocks--; 742 return (true); 743 } 744 continue; 745 } 746 break; 747 } 748 return (false); 749 } 750 751 static void __noinline 752 __rw_runlock_hard(struct rwlock *rw, struct thread *td, uintptr_t v 753 LOCK_FILE_LINE_ARG_DEF) 754 { 755 struct turnstile *ts; 756 uintptr_t x, queue; 757 758 if (SCHEDULER_STOPPED()) 759 return; 760 761 for (;;) { 762 if (__rw_runlock_try(rw, td, &v)) 763 break; 764 765 /* 766 * Ok, we know we have waiters and we think we are the 767 * last reader, so grab the turnstile lock. 768 */ 769 turnstile_chain_lock(&rw->lock_object); 770 v = RW_READ_VALUE(rw); 771 retry_ts: 772 if (__predict_false(RW_READERS(v) > 1)) { 773 turnstile_chain_unlock(&rw->lock_object); 774 continue; 775 } 776 777 v &= (RW_LOCK_WAITERS | RW_LOCK_WRITE_SPINNER); 778 MPASS(v & RW_LOCK_WAITERS); 779 780 /* 781 * Try to drop our lock leaving the lock in a unlocked 782 * state. 783 * 784 * If you wanted to do explicit lock handoff you'd have to 785 * do it here. You'd also want to use turnstile_signal() 786 * and you'd have to handle the race where a higher 787 * priority thread blocks on the write lock before the 788 * thread you wakeup actually runs and have the new thread 789 * "steal" the lock. For now it's a lot simpler to just 790 * wakeup all of the waiters. 791 * 792 * As above, if we fail, then another thread might have 793 * acquired a read lock, so drop the turnstile lock and 794 * restart. 795 */ 796 x = RW_UNLOCKED; 797 if (v & RW_LOCK_WRITE_WAITERS) { 798 queue = TS_EXCLUSIVE_QUEUE; 799 x |= (v & RW_LOCK_READ_WAITERS); 800 } else 801 queue = TS_SHARED_QUEUE; 802 v |= RW_READERS_LOCK(1); 803 if (!atomic_fcmpset_rel_ptr(&rw->rw_lock, &v, x)) 804 goto retry_ts; 805 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 806 CTR2(KTR_LOCK, "%s: %p last succeeded with waiters", 807 __func__, rw); 808 809 /* 810 * Ok. The lock is released and all that's left is to 811 * wake up the waiters. Note that the lock might not be 812 * free anymore, but in that case the writers will just 813 * block again if they run before the new lock holder(s) 814 * release the lock. 815 */ 816 ts = turnstile_lookup(&rw->lock_object); 817 MPASS(ts != NULL); 818 turnstile_broadcast(ts, queue); 819 turnstile_unpend(ts, TS_SHARED_LOCK); 820 turnstile_chain_unlock(&rw->lock_object); 821 td->td_rw_rlocks--; 822 break; 823 } 824 LOCKSTAT_PROFILE_RELEASE_RWLOCK(rw__release, rw, LOCKSTAT_READER); 825 } 826 827 void 828 _rw_runlock_cookie_int(struct rwlock *rw LOCK_FILE_LINE_ARG_DEF) 829 { 830 struct thread *td; 831 uintptr_t v; 832 833 KASSERT(rw->rw_lock != RW_DESTROYED, 834 ("rw_runlock() of destroyed rwlock @ %s:%d", file, line)); 835 __rw_assert(&rw->rw_lock, RA_RLOCKED, file, line); 836 WITNESS_UNLOCK(&rw->lock_object, 0, file, line); 837 LOCK_LOG_LOCK("RUNLOCK", &rw->lock_object, 0, 0, file, line); 838 839 td = curthread; 840 v = RW_READ_VALUE(rw); 841 842 if (__predict_false(LOCKSTAT_OOL_PROFILE_ENABLED(rw__release) || 843 !__rw_runlock_try(rw, td, &v))) 844 __rw_runlock_hard(rw, td, v LOCK_FILE_LINE_ARG); 845 846 TD_LOCKS_DEC(curthread); 847 } 848 849 void 850 _rw_runlock_cookie(volatile uintptr_t *c, const char *file, int line) 851 { 852 struct rwlock *rw; 853 854 rw = rwlock2rw(c); 855 _rw_runlock_cookie_int(rw LOCK_FILE_LINE_ARG); 856 } 857 858 /* 859 * This function is called when we are unable to obtain a write lock on the 860 * first try. This means that at least one other thread holds either a 861 * read or write lock. 862 */ 863 void 864 __rw_wlock_hard(volatile uintptr_t *c, uintptr_t v LOCK_FILE_LINE_ARG_DEF) 865 { 866 uintptr_t tid; 867 struct rwlock *rw; 868 struct turnstile *ts; 869 struct thread *owner; 870 #ifdef ADAPTIVE_RWLOCKS 871 int spintries = 0; 872 int i, n; 873 #endif 874 uintptr_t x; 875 #ifdef LOCK_PROFILING 876 uint64_t waittime = 0; 877 int contested = 0; 878 #endif 879 #if defined(ADAPTIVE_RWLOCKS) || defined(KDTRACE_HOOKS) 880 struct lock_delay_arg lda; 881 #endif 882 #ifdef KDTRACE_HOOKS 883 u_int sleep_cnt = 0; 884 int64_t sleep_time = 0; 885 int64_t all_time = 0; 886 #endif 887 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING) 888 uintptr_t state; 889 int doing_lockprof; 890 #endif 891 892 tid = (uintptr_t)curthread; 893 if (SCHEDULER_STOPPED()) 894 return; 895 896 #if defined(ADAPTIVE_RWLOCKS) 897 lock_delay_arg_init(&lda, &rw_delay); 898 #elif defined(KDTRACE_HOOKS) 899 lock_delay_arg_init(&lda, NULL); 900 #endif 901 rw = rwlock2rw(c); 902 if (__predict_false(v == RW_UNLOCKED)) 903 v = RW_READ_VALUE(rw); 904 905 if (__predict_false(lv_rw_wowner(v) == (struct thread *)tid)) { 906 KASSERT(rw->lock_object.lo_flags & LO_RECURSABLE, 907 ("%s: recursing but non-recursive rw %s @ %s:%d\n", 908 __func__, rw->lock_object.lo_name, file, line)); 909 rw->rw_recurse++; 910 atomic_set_ptr(&rw->rw_lock, RW_LOCK_WRITER_RECURSED); 911 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 912 CTR2(KTR_LOCK, "%s: %p recursing", __func__, rw); 913 return; 914 } 915 916 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 917 CTR5(KTR_LOCK, "%s: %s contested (lock=%p) at %s:%d", __func__, 918 rw->lock_object.lo_name, (void *)rw->rw_lock, file, line); 919 920 #ifdef HWPMC_HOOKS 921 PMC_SOFT_CALL( , , lock, failed); 922 #endif 923 lock_profile_obtain_lock_failed(&rw->lock_object, 924 &contested, &waittime); 925 926 #ifdef LOCK_PROFILING 927 doing_lockprof = 1; 928 state = v; 929 #elif defined(KDTRACE_HOOKS) 930 doing_lockprof = lockstat_enabled; 931 if (__predict_false(doing_lockprof)) { 932 all_time -= lockstat_nsecs(&rw->lock_object); 933 state = v; 934 } 935 #endif 936 937 for (;;) { 938 if (v == RW_UNLOCKED) { 939 if (_rw_write_lock_fetch(rw, &v, tid)) 940 break; 941 continue; 942 } 943 #ifdef KDTRACE_HOOKS 944 lda.spin_cnt++; 945 #endif 946 947 #ifdef ADAPTIVE_RWLOCKS 948 /* 949 * If the lock is write locked and the owner is 950 * running on another CPU, spin until the owner stops 951 * running or the state of the lock changes. 952 */ 953 owner = lv_rw_wowner(v); 954 if (!(v & RW_LOCK_READ) && TD_IS_RUNNING(owner)) { 955 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 956 CTR3(KTR_LOCK, "%s: spinning on %p held by %p", 957 __func__, rw, owner); 958 KTR_STATE1(KTR_SCHED, "thread", sched_tdname(curthread), 959 "spinning", "lockname:\"%s\"", 960 rw->lock_object.lo_name); 961 do { 962 lock_delay(&lda); 963 v = RW_READ_VALUE(rw); 964 owner = lv_rw_wowner(v); 965 } while (owner != NULL && TD_IS_RUNNING(owner)); 966 KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread), 967 "running"); 968 continue; 969 } 970 if ((v & RW_LOCK_READ) && RW_READERS(v) && 971 spintries < rowner_retries) { 972 if (!(v & RW_LOCK_WRITE_SPINNER)) { 973 if (!atomic_fcmpset_ptr(&rw->rw_lock, &v, 974 v | RW_LOCK_WRITE_SPINNER)) { 975 continue; 976 } 977 } 978 spintries++; 979 KTR_STATE1(KTR_SCHED, "thread", sched_tdname(curthread), 980 "spinning", "lockname:\"%s\"", 981 rw->lock_object.lo_name); 982 for (i = 0; i < rowner_loops; i += n) { 983 n = RW_READERS(v); 984 lock_delay_spin(n); 985 v = RW_READ_VALUE(rw); 986 if ((v & RW_LOCK_WRITE_SPINNER) == 0) 987 break; 988 } 989 KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread), 990 "running"); 991 #ifdef KDTRACE_HOOKS 992 lda.spin_cnt += rowner_loops - i; 993 #endif 994 if (i != rowner_loops) 995 continue; 996 } 997 #endif 998 ts = turnstile_trywait(&rw->lock_object); 999 v = RW_READ_VALUE(rw); 1000 retry_ts: 1001 owner = lv_rw_wowner(v); 1002 1003 #ifdef ADAPTIVE_RWLOCKS 1004 /* 1005 * The current lock owner might have started executing 1006 * on another CPU (or the lock could have changed 1007 * owners) while we were waiting on the turnstile 1008 * chain lock. If so, drop the turnstile lock and try 1009 * again. 1010 */ 1011 if (owner != NULL) { 1012 if (TD_IS_RUNNING(owner)) { 1013 turnstile_cancel(ts); 1014 continue; 1015 } 1016 } 1017 #endif 1018 /* 1019 * Check for the waiters flags about this rwlock. 1020 * If the lock was released, without maintain any pending 1021 * waiters queue, simply try to acquire it. 1022 * If a pending waiters queue is present, claim the lock 1023 * ownership and maintain the pending queue. 1024 */ 1025 x = v & (RW_LOCK_WAITERS | RW_LOCK_WRITE_SPINNER); 1026 if ((v & ~x) == RW_UNLOCKED) { 1027 x &= ~RW_LOCK_WRITE_SPINNER; 1028 if (atomic_fcmpset_acq_ptr(&rw->rw_lock, &v, tid | x)) { 1029 if (x) 1030 turnstile_claim(ts); 1031 else 1032 turnstile_cancel(ts); 1033 break; 1034 } 1035 goto retry_ts; 1036 } 1037 /* 1038 * If the RW_LOCK_WRITE_WAITERS flag isn't set, then try to 1039 * set it. If we fail to set it, then loop back and try 1040 * again. 1041 */ 1042 if (!(v & RW_LOCK_WRITE_WAITERS)) { 1043 if (!atomic_fcmpset_ptr(&rw->rw_lock, &v, 1044 v | RW_LOCK_WRITE_WAITERS)) 1045 goto retry_ts; 1046 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 1047 CTR2(KTR_LOCK, "%s: %p set write waiters flag", 1048 __func__, rw); 1049 } 1050 /* 1051 * We were unable to acquire the lock and the write waiters 1052 * flag is set, so we must block on the turnstile. 1053 */ 1054 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 1055 CTR2(KTR_LOCK, "%s: %p blocking on turnstile", __func__, 1056 rw); 1057 #ifdef KDTRACE_HOOKS 1058 sleep_time -= lockstat_nsecs(&rw->lock_object); 1059 #endif 1060 MPASS(owner == rw_owner(rw)); 1061 turnstile_wait(ts, owner, TS_EXCLUSIVE_QUEUE); 1062 #ifdef KDTRACE_HOOKS 1063 sleep_time += lockstat_nsecs(&rw->lock_object); 1064 sleep_cnt++; 1065 #endif 1066 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 1067 CTR2(KTR_LOCK, "%s: %p resuming from turnstile", 1068 __func__, rw); 1069 #ifdef ADAPTIVE_RWLOCKS 1070 spintries = 0; 1071 #endif 1072 v = RW_READ_VALUE(rw); 1073 } 1074 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING) 1075 if (__predict_true(!doing_lockprof)) 1076 return; 1077 #endif 1078 #ifdef KDTRACE_HOOKS 1079 all_time += lockstat_nsecs(&rw->lock_object); 1080 if (sleep_time) 1081 LOCKSTAT_RECORD4(rw__block, rw, sleep_time, 1082 LOCKSTAT_WRITER, (state & RW_LOCK_READ) == 0, 1083 (state & RW_LOCK_READ) == 0 ? 0 : RW_READERS(state)); 1084 1085 /* Record only the loops spinning and not sleeping. */ 1086 if (lda.spin_cnt > sleep_cnt) 1087 LOCKSTAT_RECORD4(rw__spin, rw, all_time - sleep_time, 1088 LOCKSTAT_WRITER, (state & RW_LOCK_READ) == 0, 1089 (state & RW_LOCK_READ) == 0 ? 0 : RW_READERS(state)); 1090 #endif 1091 LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(rw__acquire, rw, contested, 1092 waittime, file, line, LOCKSTAT_WRITER); 1093 } 1094 1095 /* 1096 * This function is called if lockstat is active or the first try at releasing 1097 * a write lock failed. The latter means that the lock is recursed or one of 1098 * the 2 waiter bits must be set indicating that at least one thread is waiting 1099 * on this lock. 1100 */ 1101 void 1102 __rw_wunlock_hard(volatile uintptr_t *c, uintptr_t v LOCK_FILE_LINE_ARG_DEF) 1103 { 1104 struct rwlock *rw; 1105 struct turnstile *ts; 1106 uintptr_t tid, setv; 1107 int queue; 1108 1109 tid = (uintptr_t)curthread; 1110 if (SCHEDULER_STOPPED()) 1111 return; 1112 1113 rw = rwlock2rw(c); 1114 if (__predict_false(v == tid)) 1115 v = RW_READ_VALUE(rw); 1116 1117 if (v & RW_LOCK_WRITER_RECURSED) { 1118 if (--(rw->rw_recurse) == 0) 1119 atomic_clear_ptr(&rw->rw_lock, RW_LOCK_WRITER_RECURSED); 1120 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 1121 CTR2(KTR_LOCK, "%s: %p unrecursing", __func__, rw); 1122 return; 1123 } 1124 1125 LOCKSTAT_PROFILE_RELEASE_RWLOCK(rw__release, rw, LOCKSTAT_WRITER); 1126 if (v == tid && _rw_write_unlock(rw, tid)) 1127 return; 1128 1129 KASSERT(rw->rw_lock & (RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS), 1130 ("%s: neither of the waiter flags are set", __func__)); 1131 1132 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 1133 CTR2(KTR_LOCK, "%s: %p contested", __func__, rw); 1134 1135 turnstile_chain_lock(&rw->lock_object); 1136 1137 /* 1138 * Use the same algo as sx locks for now. Prefer waking up shared 1139 * waiters if we have any over writers. This is probably not ideal. 1140 * 1141 * 'v' is the value we are going to write back to rw_lock. If we 1142 * have waiters on both queues, we need to preserve the state of 1143 * the waiter flag for the queue we don't wake up. For now this is 1144 * hardcoded for the algorithm mentioned above. 1145 * 1146 * In the case of both readers and writers waiting we wakeup the 1147 * readers but leave the RW_LOCK_WRITE_WAITERS flag set. If a 1148 * new writer comes in before a reader it will claim the lock up 1149 * above. There is probably a potential priority inversion in 1150 * there that could be worked around either by waking both queues 1151 * of waiters or doing some complicated lock handoff gymnastics. 1152 */ 1153 setv = RW_UNLOCKED; 1154 v = RW_READ_VALUE(rw); 1155 queue = TS_SHARED_QUEUE; 1156 if (v & RW_LOCK_WRITE_WAITERS) { 1157 queue = TS_EXCLUSIVE_QUEUE; 1158 setv |= (v & RW_LOCK_READ_WAITERS); 1159 } 1160 atomic_store_rel_ptr(&rw->rw_lock, setv); 1161 1162 /* Wake up all waiters for the specific queue. */ 1163 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 1164 CTR3(KTR_LOCK, "%s: %p waking up %s waiters", __func__, rw, 1165 queue == TS_SHARED_QUEUE ? "read" : "write"); 1166 1167 ts = turnstile_lookup(&rw->lock_object); 1168 MPASS(ts != NULL); 1169 turnstile_broadcast(ts, queue); 1170 turnstile_unpend(ts, TS_EXCLUSIVE_LOCK); 1171 turnstile_chain_unlock(&rw->lock_object); 1172 } 1173 1174 /* 1175 * Attempt to do a non-blocking upgrade from a read lock to a write 1176 * lock. This will only succeed if this thread holds a single read 1177 * lock. Returns true if the upgrade succeeded and false otherwise. 1178 */ 1179 int 1180 __rw_try_upgrade_int(struct rwlock *rw LOCK_FILE_LINE_ARG_DEF) 1181 { 1182 uintptr_t v, x, tid; 1183 struct turnstile *ts; 1184 int success; 1185 1186 if (SCHEDULER_STOPPED()) 1187 return (1); 1188 1189 KASSERT(rw->rw_lock != RW_DESTROYED, 1190 ("rw_try_upgrade() of destroyed rwlock @ %s:%d", file, line)); 1191 __rw_assert(&rw->rw_lock, RA_RLOCKED, file, line); 1192 1193 /* 1194 * Attempt to switch from one reader to a writer. If there 1195 * are any write waiters, then we will have to lock the 1196 * turnstile first to prevent races with another writer 1197 * calling turnstile_wait() before we have claimed this 1198 * turnstile. So, do the simple case of no waiters first. 1199 */ 1200 tid = (uintptr_t)curthread; 1201 success = 0; 1202 for (;;) { 1203 v = rw->rw_lock; 1204 if (RW_READERS(v) > 1) 1205 break; 1206 if (!(v & RW_LOCK_WAITERS)) { 1207 success = atomic_cmpset_acq_ptr(&rw->rw_lock, v, tid); 1208 if (!success) 1209 continue; 1210 break; 1211 } 1212 1213 /* 1214 * Ok, we think we have waiters, so lock the turnstile. 1215 */ 1216 ts = turnstile_trywait(&rw->lock_object); 1217 v = rw->rw_lock; 1218 if (RW_READERS(v) > 1) { 1219 turnstile_cancel(ts); 1220 break; 1221 } 1222 /* 1223 * Try to switch from one reader to a writer again. This time 1224 * we honor the current state of the waiters flags. 1225 * If we obtain the lock with the flags set, then claim 1226 * ownership of the turnstile. 1227 */ 1228 x = rw->rw_lock & RW_LOCK_WAITERS; 1229 success = atomic_cmpset_ptr(&rw->rw_lock, v, tid | x); 1230 if (success) { 1231 if (x) 1232 turnstile_claim(ts); 1233 else 1234 turnstile_cancel(ts); 1235 break; 1236 } 1237 turnstile_cancel(ts); 1238 } 1239 LOCK_LOG_TRY("WUPGRADE", &rw->lock_object, 0, success, file, line); 1240 if (success) { 1241 curthread->td_rw_rlocks--; 1242 WITNESS_UPGRADE(&rw->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK, 1243 file, line); 1244 LOCKSTAT_RECORD0(rw__upgrade, rw); 1245 } 1246 return (success); 1247 } 1248 1249 int 1250 __rw_try_upgrade(volatile uintptr_t *c, const char *file, int line) 1251 { 1252 struct rwlock *rw; 1253 1254 rw = rwlock2rw(c); 1255 return (__rw_try_upgrade_int(rw LOCK_FILE_LINE_ARG)); 1256 } 1257 1258 /* 1259 * Downgrade a write lock into a single read lock. 1260 */ 1261 void 1262 __rw_downgrade_int(struct rwlock *rw LOCK_FILE_LINE_ARG_DEF) 1263 { 1264 struct turnstile *ts; 1265 uintptr_t tid, v; 1266 int rwait, wwait; 1267 1268 if (SCHEDULER_STOPPED()) 1269 return; 1270 1271 KASSERT(rw->rw_lock != RW_DESTROYED, 1272 ("rw_downgrade() of destroyed rwlock @ %s:%d", file, line)); 1273 __rw_assert(&rw->rw_lock, RA_WLOCKED | RA_NOTRECURSED, file, line); 1274 #ifndef INVARIANTS 1275 if (rw_recursed(rw)) 1276 panic("downgrade of a recursed lock"); 1277 #endif 1278 1279 WITNESS_DOWNGRADE(&rw->lock_object, 0, file, line); 1280 1281 /* 1282 * Convert from a writer to a single reader. First we handle 1283 * the easy case with no waiters. If there are any waiters, we 1284 * lock the turnstile and "disown" the lock. 1285 */ 1286 tid = (uintptr_t)curthread; 1287 if (atomic_cmpset_rel_ptr(&rw->rw_lock, tid, RW_READERS_LOCK(1))) 1288 goto out; 1289 1290 /* 1291 * Ok, we think we have waiters, so lock the turnstile so we can 1292 * read the waiter flags without any races. 1293 */ 1294 turnstile_chain_lock(&rw->lock_object); 1295 v = rw->rw_lock & RW_LOCK_WAITERS; 1296 rwait = v & RW_LOCK_READ_WAITERS; 1297 wwait = v & RW_LOCK_WRITE_WAITERS; 1298 MPASS(rwait | wwait); 1299 1300 /* 1301 * Downgrade from a write lock while preserving waiters flag 1302 * and give up ownership of the turnstile. 1303 */ 1304 ts = turnstile_lookup(&rw->lock_object); 1305 MPASS(ts != NULL); 1306 if (!wwait) 1307 v &= ~RW_LOCK_READ_WAITERS; 1308 atomic_store_rel_ptr(&rw->rw_lock, RW_READERS_LOCK(1) | v); 1309 /* 1310 * Wake other readers if there are no writers pending. Otherwise they 1311 * won't be able to acquire the lock anyway. 1312 */ 1313 if (rwait && !wwait) { 1314 turnstile_broadcast(ts, TS_SHARED_QUEUE); 1315 turnstile_unpend(ts, TS_EXCLUSIVE_LOCK); 1316 } else 1317 turnstile_disown(ts); 1318 turnstile_chain_unlock(&rw->lock_object); 1319 out: 1320 curthread->td_rw_rlocks++; 1321 LOCK_LOG_LOCK("WDOWNGRADE", &rw->lock_object, 0, 0, file, line); 1322 LOCKSTAT_RECORD0(rw__downgrade, rw); 1323 } 1324 1325 void 1326 __rw_downgrade(volatile uintptr_t *c, const char *file, int line) 1327 { 1328 struct rwlock *rw; 1329 1330 rw = rwlock2rw(c); 1331 __rw_downgrade_int(rw LOCK_FILE_LINE_ARG); 1332 } 1333 1334 #ifdef INVARIANT_SUPPORT 1335 #ifndef INVARIANTS 1336 #undef __rw_assert 1337 #endif 1338 1339 /* 1340 * In the non-WITNESS case, rw_assert() can only detect that at least 1341 * *some* thread owns an rlock, but it cannot guarantee that *this* 1342 * thread owns an rlock. 1343 */ 1344 void 1345 __rw_assert(const volatile uintptr_t *c, int what, const char *file, int line) 1346 { 1347 const struct rwlock *rw; 1348 1349 if (panicstr != NULL) 1350 return; 1351 1352 rw = rwlock2rw(c); 1353 1354 switch (what) { 1355 case RA_LOCKED: 1356 case RA_LOCKED | RA_RECURSED: 1357 case RA_LOCKED | RA_NOTRECURSED: 1358 case RA_RLOCKED: 1359 case RA_RLOCKED | RA_RECURSED: 1360 case RA_RLOCKED | RA_NOTRECURSED: 1361 #ifdef WITNESS 1362 witness_assert(&rw->lock_object, what, file, line); 1363 #else 1364 /* 1365 * If some other thread has a write lock or we have one 1366 * and are asserting a read lock, fail. Also, if no one 1367 * has a lock at all, fail. 1368 */ 1369 if (rw->rw_lock == RW_UNLOCKED || 1370 (!(rw->rw_lock & RW_LOCK_READ) && (what & RA_RLOCKED || 1371 rw_wowner(rw) != curthread))) 1372 panic("Lock %s not %slocked @ %s:%d\n", 1373 rw->lock_object.lo_name, (what & RA_RLOCKED) ? 1374 "read " : "", file, line); 1375 1376 if (!(rw->rw_lock & RW_LOCK_READ) && !(what & RA_RLOCKED)) { 1377 if (rw_recursed(rw)) { 1378 if (what & RA_NOTRECURSED) 1379 panic("Lock %s recursed @ %s:%d\n", 1380 rw->lock_object.lo_name, file, 1381 line); 1382 } else if (what & RA_RECURSED) 1383 panic("Lock %s not recursed @ %s:%d\n", 1384 rw->lock_object.lo_name, file, line); 1385 } 1386 #endif 1387 break; 1388 case RA_WLOCKED: 1389 case RA_WLOCKED | RA_RECURSED: 1390 case RA_WLOCKED | RA_NOTRECURSED: 1391 if (rw_wowner(rw) != curthread) 1392 panic("Lock %s not exclusively locked @ %s:%d\n", 1393 rw->lock_object.lo_name, file, line); 1394 if (rw_recursed(rw)) { 1395 if (what & RA_NOTRECURSED) 1396 panic("Lock %s recursed @ %s:%d\n", 1397 rw->lock_object.lo_name, file, line); 1398 } else if (what & RA_RECURSED) 1399 panic("Lock %s not recursed @ %s:%d\n", 1400 rw->lock_object.lo_name, file, line); 1401 break; 1402 case RA_UNLOCKED: 1403 #ifdef WITNESS 1404 witness_assert(&rw->lock_object, what, file, line); 1405 #else 1406 /* 1407 * If we hold a write lock fail. We can't reliably check 1408 * to see if we hold a read lock or not. 1409 */ 1410 if (rw_wowner(rw) == curthread) 1411 panic("Lock %s exclusively locked @ %s:%d\n", 1412 rw->lock_object.lo_name, file, line); 1413 #endif 1414 break; 1415 default: 1416 panic("Unknown rw lock assertion: %d @ %s:%d", what, file, 1417 line); 1418 } 1419 } 1420 #endif /* INVARIANT_SUPPORT */ 1421 1422 #ifdef DDB 1423 void 1424 db_show_rwlock(const struct lock_object *lock) 1425 { 1426 const struct rwlock *rw; 1427 struct thread *td; 1428 1429 rw = (const struct rwlock *)lock; 1430 1431 db_printf(" state: "); 1432 if (rw->rw_lock == RW_UNLOCKED) 1433 db_printf("UNLOCKED\n"); 1434 else if (rw->rw_lock == RW_DESTROYED) { 1435 db_printf("DESTROYED\n"); 1436 return; 1437 } else if (rw->rw_lock & RW_LOCK_READ) 1438 db_printf("RLOCK: %ju locks\n", 1439 (uintmax_t)(RW_READERS(rw->rw_lock))); 1440 else { 1441 td = rw_wowner(rw); 1442 db_printf("WLOCK: %p (tid %d, pid %d, \"%s\")\n", td, 1443 td->td_tid, td->td_proc->p_pid, td->td_name); 1444 if (rw_recursed(rw)) 1445 db_printf(" recursed: %u\n", rw->rw_recurse); 1446 } 1447 db_printf(" waiters: "); 1448 switch (rw->rw_lock & (RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS)) { 1449 case RW_LOCK_READ_WAITERS: 1450 db_printf("readers\n"); 1451 break; 1452 case RW_LOCK_WRITE_WAITERS: 1453 db_printf("writers\n"); 1454 break; 1455 case RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS: 1456 db_printf("readers and writers\n"); 1457 break; 1458 default: 1459 db_printf("none\n"); 1460 break; 1461 } 1462 } 1463 1464 #endif 1465