1 /*- 2 * Copyright (c) 2006 John Baldwin <jhb@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 /* 28 * Machine independent bits of reader/writer lock implementation. 29 */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 #include "opt_ddb.h" 35 #include "opt_hwpmc_hooks.h" 36 #include "opt_no_adaptive_rwlocks.h" 37 38 #include <sys/param.h> 39 #include <sys/kdb.h> 40 #include <sys/ktr.h> 41 #include <sys/kernel.h> 42 #include <sys/lock.h> 43 #include <sys/mutex.h> 44 #include <sys/proc.h> 45 #include <sys/rwlock.h> 46 #include <sys/sched.h> 47 #include <sys/smp.h> 48 #include <sys/sysctl.h> 49 #include <sys/systm.h> 50 #include <sys/turnstile.h> 51 52 #include <machine/cpu.h> 53 54 #if defined(SMP) && !defined(NO_ADAPTIVE_RWLOCKS) 55 #define ADAPTIVE_RWLOCKS 56 #endif 57 58 #ifdef HWPMC_HOOKS 59 #include <sys/pmckern.h> 60 PMC_SOFT_DECLARE( , , lock, failed); 61 #endif 62 63 /* 64 * Return the rwlock address when the lock cookie address is provided. 65 * This functionality assumes that struct rwlock* have a member named rw_lock. 66 */ 67 #define rwlock2rw(c) (__containerof(c, struct rwlock, rw_lock)) 68 69 #ifdef DDB 70 #include <ddb/ddb.h> 71 72 static void db_show_rwlock(const struct lock_object *lock); 73 #endif 74 static void assert_rw(const struct lock_object *lock, int what); 75 static void lock_rw(struct lock_object *lock, uintptr_t how); 76 #ifdef KDTRACE_HOOKS 77 static int owner_rw(const struct lock_object *lock, struct thread **owner); 78 #endif 79 static uintptr_t unlock_rw(struct lock_object *lock); 80 81 struct lock_class lock_class_rw = { 82 .lc_name = "rw", 83 .lc_flags = LC_SLEEPLOCK | LC_RECURSABLE | LC_UPGRADABLE, 84 .lc_assert = assert_rw, 85 #ifdef DDB 86 .lc_ddb_show = db_show_rwlock, 87 #endif 88 .lc_lock = lock_rw, 89 .lc_unlock = unlock_rw, 90 #ifdef KDTRACE_HOOKS 91 .lc_owner = owner_rw, 92 #endif 93 }; 94 95 #ifdef ADAPTIVE_RWLOCKS 96 static int rowner_retries = 10; 97 static int rowner_loops = 10000; 98 static SYSCTL_NODE(_debug, OID_AUTO, rwlock, CTLFLAG_RD, NULL, 99 "rwlock debugging"); 100 SYSCTL_INT(_debug_rwlock, OID_AUTO, retry, CTLFLAG_RW, &rowner_retries, 0, ""); 101 SYSCTL_INT(_debug_rwlock, OID_AUTO, loops, CTLFLAG_RW, &rowner_loops, 0, ""); 102 103 static struct lock_delay_config __read_mostly rw_delay = { 104 .initial = 1000, 105 .step = 500, 106 .min = 100, 107 .max = 5000, 108 }; 109 110 SYSCTL_INT(_debug_rwlock, OID_AUTO, delay_initial, CTLFLAG_RW, &rw_delay.initial, 111 0, ""); 112 SYSCTL_INT(_debug_rwlock, OID_AUTO, delay_step, CTLFLAG_RW, &rw_delay.step, 113 0, ""); 114 SYSCTL_INT(_debug_rwlock, OID_AUTO, delay_min, CTLFLAG_RW, &rw_delay.min, 115 0, ""); 116 SYSCTL_INT(_debug_rwlock, OID_AUTO, delay_max, CTLFLAG_RW, &rw_delay.max, 117 0, ""); 118 119 static void 120 rw_delay_sysinit(void *dummy) 121 { 122 123 rw_delay.initial = mp_ncpus * 25; 124 rw_delay.step = (mp_ncpus * 25) / 2; 125 rw_delay.min = mp_ncpus * 5; 126 rw_delay.max = mp_ncpus * 25 * 10; 127 } 128 LOCK_DELAY_SYSINIT(rw_delay_sysinit); 129 #endif 130 131 /* 132 * Return a pointer to the owning thread if the lock is write-locked or 133 * NULL if the lock is unlocked or read-locked. 134 */ 135 136 #define lv_rw_wowner(v) \ 137 ((v) & RW_LOCK_READ ? NULL : \ 138 (struct thread *)RW_OWNER((v))) 139 140 #define rw_wowner(rw) lv_rw_wowner(RW_READ_VALUE(rw)) 141 142 /* 143 * Returns if a write owner is recursed. Write ownership is not assured 144 * here and should be previously checked. 145 */ 146 #define rw_recursed(rw) ((rw)->rw_recurse != 0) 147 148 /* 149 * Return true if curthread helds the lock. 150 */ 151 #define rw_wlocked(rw) (rw_wowner((rw)) == curthread) 152 153 /* 154 * Return a pointer to the owning thread for this lock who should receive 155 * any priority lent by threads that block on this lock. Currently this 156 * is identical to rw_wowner(). 157 */ 158 #define rw_owner(rw) rw_wowner(rw) 159 160 #ifndef INVARIANTS 161 #define __rw_assert(c, what, file, line) 162 #endif 163 164 void 165 assert_rw(const struct lock_object *lock, int what) 166 { 167 168 rw_assert((const struct rwlock *)lock, what); 169 } 170 171 void 172 lock_rw(struct lock_object *lock, uintptr_t how) 173 { 174 struct rwlock *rw; 175 176 rw = (struct rwlock *)lock; 177 if (how) 178 rw_rlock(rw); 179 else 180 rw_wlock(rw); 181 } 182 183 uintptr_t 184 unlock_rw(struct lock_object *lock) 185 { 186 struct rwlock *rw; 187 188 rw = (struct rwlock *)lock; 189 rw_assert(rw, RA_LOCKED | LA_NOTRECURSED); 190 if (rw->rw_lock & RW_LOCK_READ) { 191 rw_runlock(rw); 192 return (1); 193 } else { 194 rw_wunlock(rw); 195 return (0); 196 } 197 } 198 199 #ifdef KDTRACE_HOOKS 200 int 201 owner_rw(const struct lock_object *lock, struct thread **owner) 202 { 203 const struct rwlock *rw = (const struct rwlock *)lock; 204 uintptr_t x = rw->rw_lock; 205 206 *owner = rw_wowner(rw); 207 return ((x & RW_LOCK_READ) != 0 ? (RW_READERS(x) != 0) : 208 (*owner != NULL)); 209 } 210 #endif 211 212 void 213 _rw_init_flags(volatile uintptr_t *c, const char *name, int opts) 214 { 215 struct rwlock *rw; 216 int flags; 217 218 rw = rwlock2rw(c); 219 220 MPASS((opts & ~(RW_DUPOK | RW_NOPROFILE | RW_NOWITNESS | RW_QUIET | 221 RW_RECURSE | RW_NEW)) == 0); 222 ASSERT_ATOMIC_LOAD_PTR(rw->rw_lock, 223 ("%s: rw_lock not aligned for %s: %p", __func__, name, 224 &rw->rw_lock)); 225 226 flags = LO_UPGRADABLE; 227 if (opts & RW_DUPOK) 228 flags |= LO_DUPOK; 229 if (opts & RW_NOPROFILE) 230 flags |= LO_NOPROFILE; 231 if (!(opts & RW_NOWITNESS)) 232 flags |= LO_WITNESS; 233 if (opts & RW_RECURSE) 234 flags |= LO_RECURSABLE; 235 if (opts & RW_QUIET) 236 flags |= LO_QUIET; 237 if (opts & RW_NEW) 238 flags |= LO_NEW; 239 240 lock_init(&rw->lock_object, &lock_class_rw, name, NULL, flags); 241 rw->rw_lock = RW_UNLOCKED; 242 rw->rw_recurse = 0; 243 } 244 245 void 246 _rw_destroy(volatile uintptr_t *c) 247 { 248 struct rwlock *rw; 249 250 rw = rwlock2rw(c); 251 252 KASSERT(rw->rw_lock == RW_UNLOCKED, ("rw lock %p not unlocked", rw)); 253 KASSERT(rw->rw_recurse == 0, ("rw lock %p still recursed", rw)); 254 rw->rw_lock = RW_DESTROYED; 255 lock_destroy(&rw->lock_object); 256 } 257 258 void 259 rw_sysinit(void *arg) 260 { 261 struct rw_args *args = arg; 262 263 rw_init((struct rwlock *)args->ra_rw, args->ra_desc); 264 } 265 266 void 267 rw_sysinit_flags(void *arg) 268 { 269 struct rw_args_flags *args = arg; 270 271 rw_init_flags((struct rwlock *)args->ra_rw, args->ra_desc, 272 args->ra_flags); 273 } 274 275 int 276 _rw_wowned(const volatile uintptr_t *c) 277 { 278 279 return (rw_wowner(rwlock2rw(c)) == curthread); 280 } 281 282 void 283 _rw_wlock_cookie(volatile uintptr_t *c, const char *file, int line) 284 { 285 struct rwlock *rw; 286 uintptr_t tid, v; 287 288 if (SCHEDULER_STOPPED()) 289 return; 290 291 rw = rwlock2rw(c); 292 293 KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread), 294 ("rw_wlock() by idle thread %p on rwlock %s @ %s:%d", 295 curthread, rw->lock_object.lo_name, file, line)); 296 KASSERT(rw->rw_lock != RW_DESTROYED, 297 ("rw_wlock() of destroyed rwlock @ %s:%d", file, line)); 298 WITNESS_CHECKORDER(&rw->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE, file, 299 line, NULL); 300 tid = (uintptr_t)curthread; 301 v = RW_UNLOCKED; 302 if (!_rw_write_lock_fetch(rw, &v, tid)) 303 _rw_wlock_hard(rw, v, tid, file, line); 304 else 305 LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(rw__acquire, rw, 306 0, 0, file, line, LOCKSTAT_WRITER); 307 308 LOCK_LOG_LOCK("WLOCK", &rw->lock_object, 0, rw->rw_recurse, file, line); 309 WITNESS_LOCK(&rw->lock_object, LOP_EXCLUSIVE, file, line); 310 TD_LOCKS_INC(curthread); 311 } 312 313 int 314 __rw_try_wlock(volatile uintptr_t *c, const char *file, int line) 315 { 316 struct rwlock *rw; 317 int rval; 318 319 if (SCHEDULER_STOPPED()) 320 return (1); 321 322 rw = rwlock2rw(c); 323 324 KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread), 325 ("rw_try_wlock() by idle thread %p on rwlock %s @ %s:%d", 326 curthread, rw->lock_object.lo_name, file, line)); 327 KASSERT(rw->rw_lock != RW_DESTROYED, 328 ("rw_try_wlock() of destroyed rwlock @ %s:%d", file, line)); 329 330 if (rw_wlocked(rw) && 331 (rw->lock_object.lo_flags & LO_RECURSABLE) != 0) { 332 rw->rw_recurse++; 333 rval = 1; 334 } else 335 rval = atomic_cmpset_acq_ptr(&rw->rw_lock, RW_UNLOCKED, 336 (uintptr_t)curthread); 337 338 LOCK_LOG_TRY("WLOCK", &rw->lock_object, 0, rval, file, line); 339 if (rval) { 340 WITNESS_LOCK(&rw->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK, 341 file, line); 342 if (!rw_recursed(rw)) 343 LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(rw__acquire, 344 rw, 0, 0, file, line, LOCKSTAT_WRITER); 345 TD_LOCKS_INC(curthread); 346 } 347 return (rval); 348 } 349 350 void 351 _rw_wunlock_cookie(volatile uintptr_t *c, const char *file, int line) 352 { 353 struct rwlock *rw; 354 355 if (SCHEDULER_STOPPED()) 356 return; 357 358 rw = rwlock2rw(c); 359 360 KASSERT(rw->rw_lock != RW_DESTROYED, 361 ("rw_wunlock() of destroyed rwlock @ %s:%d", file, line)); 362 __rw_assert(c, RA_WLOCKED, file, line); 363 WITNESS_UNLOCK(&rw->lock_object, LOP_EXCLUSIVE, file, line); 364 LOCK_LOG_LOCK("WUNLOCK", &rw->lock_object, 0, rw->rw_recurse, file, 365 line); 366 if (rw->rw_recurse) 367 rw->rw_recurse--; 368 else 369 _rw_wunlock_hard(rw, (uintptr_t)curthread, file, line); 370 371 TD_LOCKS_DEC(curthread); 372 } 373 374 /* 375 * Determines whether a new reader can acquire a lock. Succeeds if the 376 * reader already owns a read lock and the lock is locked for read to 377 * prevent deadlock from reader recursion. Also succeeds if the lock 378 * is unlocked and has no writer waiters or spinners. Failing otherwise 379 * prioritizes writers before readers. 380 */ 381 #define RW_CAN_READ(_rw) \ 382 ((curthread->td_rw_rlocks && (_rw) & RW_LOCK_READ) || ((_rw) & \ 383 (RW_LOCK_READ | RW_LOCK_WRITE_WAITERS | RW_LOCK_WRITE_SPINNER)) == \ 384 RW_LOCK_READ) 385 386 void 387 __rw_rlock(volatile uintptr_t *c, const char *file, int line) 388 { 389 struct rwlock *rw; 390 struct turnstile *ts; 391 #ifdef ADAPTIVE_RWLOCKS 392 volatile struct thread *owner; 393 int spintries = 0; 394 int i; 395 #endif 396 #ifdef LOCK_PROFILING 397 uint64_t waittime = 0; 398 int contested = 0; 399 #endif 400 uintptr_t v; 401 #if defined(ADAPTIVE_RWLOCKS) || defined(KDTRACE_HOOKS) 402 struct lock_delay_arg lda; 403 #endif 404 #ifdef KDTRACE_HOOKS 405 uintptr_t state; 406 u_int sleep_cnt = 0; 407 int64_t sleep_time = 0; 408 int64_t all_time = 0; 409 #endif 410 411 if (SCHEDULER_STOPPED()) 412 return; 413 414 #if defined(ADAPTIVE_RWLOCKS) 415 lock_delay_arg_init(&lda, &rw_delay); 416 #elif defined(KDTRACE_HOOKS) 417 lock_delay_arg_init(&lda, NULL); 418 #endif 419 rw = rwlock2rw(c); 420 421 KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread), 422 ("rw_rlock() by idle thread %p on rwlock %s @ %s:%d", 423 curthread, rw->lock_object.lo_name, file, line)); 424 KASSERT(rw->rw_lock != RW_DESTROYED, 425 ("rw_rlock() of destroyed rwlock @ %s:%d", file, line)); 426 KASSERT(rw_wowner(rw) != curthread, 427 ("rw_rlock: wlock already held for %s @ %s:%d", 428 rw->lock_object.lo_name, file, line)); 429 WITNESS_CHECKORDER(&rw->lock_object, LOP_NEWORDER, file, line, NULL); 430 431 #ifdef KDTRACE_HOOKS 432 all_time -= lockstat_nsecs(&rw->lock_object); 433 #endif 434 v = RW_READ_VALUE(rw); 435 #ifdef KDTRACE_HOOKS 436 state = v; 437 #endif 438 for (;;) { 439 /* 440 * Handle the easy case. If no other thread has a write 441 * lock, then try to bump up the count of read locks. Note 442 * that we have to preserve the current state of the 443 * RW_LOCK_WRITE_WAITERS flag. If we fail to acquire a 444 * read lock, then rw_lock must have changed, so restart 445 * the loop. Note that this handles the case of a 446 * completely unlocked rwlock since such a lock is encoded 447 * as a read lock with no waiters. 448 */ 449 if (RW_CAN_READ(v)) { 450 /* 451 * The RW_LOCK_READ_WAITERS flag should only be set 452 * if the lock has been unlocked and write waiters 453 * were present. 454 */ 455 if (atomic_fcmpset_acq_ptr(&rw->rw_lock, &v, 456 v + RW_ONE_READER)) { 457 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 458 CTR4(KTR_LOCK, 459 "%s: %p succeed %p -> %p", __func__, 460 rw, (void *)v, 461 (void *)(v + RW_ONE_READER)); 462 break; 463 } 464 continue; 465 } 466 #ifdef KDTRACE_HOOKS 467 lda.spin_cnt++; 468 #endif 469 #ifdef HWPMC_HOOKS 470 PMC_SOFT_CALL( , , lock, failed); 471 #endif 472 lock_profile_obtain_lock_failed(&rw->lock_object, 473 &contested, &waittime); 474 475 #ifdef ADAPTIVE_RWLOCKS 476 /* 477 * If the owner is running on another CPU, spin until 478 * the owner stops running or the state of the lock 479 * changes. 480 */ 481 if ((v & RW_LOCK_READ) == 0) { 482 owner = (struct thread *)RW_OWNER(v); 483 if (TD_IS_RUNNING(owner)) { 484 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 485 CTR3(KTR_LOCK, 486 "%s: spinning on %p held by %p", 487 __func__, rw, owner); 488 KTR_STATE1(KTR_SCHED, "thread", 489 sched_tdname(curthread), "spinning", 490 "lockname:\"%s\"", rw->lock_object.lo_name); 491 do { 492 lock_delay(&lda); 493 v = RW_READ_VALUE(rw); 494 owner = lv_rw_wowner(v); 495 } while (owner != NULL && TD_IS_RUNNING(owner)); 496 KTR_STATE0(KTR_SCHED, "thread", 497 sched_tdname(curthread), "running"); 498 continue; 499 } 500 } else if (spintries < rowner_retries) { 501 spintries++; 502 KTR_STATE1(KTR_SCHED, "thread", sched_tdname(curthread), 503 "spinning", "lockname:\"%s\"", 504 rw->lock_object.lo_name); 505 for (i = 0; i < rowner_loops; i++) { 506 v = RW_READ_VALUE(rw); 507 if ((v & RW_LOCK_READ) == 0 || RW_CAN_READ(v)) 508 break; 509 cpu_spinwait(); 510 } 511 v = RW_READ_VALUE(rw); 512 #ifdef KDTRACE_HOOKS 513 lda.spin_cnt += rowner_loops - i; 514 #endif 515 KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread), 516 "running"); 517 if (i != rowner_loops) 518 continue; 519 } 520 #endif 521 522 /* 523 * Okay, now it's the hard case. Some other thread already 524 * has a write lock or there are write waiters present, 525 * acquire the turnstile lock so we can begin the process 526 * of blocking. 527 */ 528 ts = turnstile_trywait(&rw->lock_object); 529 530 /* 531 * The lock might have been released while we spun, so 532 * recheck its state and restart the loop if needed. 533 */ 534 v = RW_READ_VALUE(rw); 535 if (RW_CAN_READ(v)) { 536 turnstile_cancel(ts); 537 continue; 538 } 539 540 #ifdef ADAPTIVE_RWLOCKS 541 /* 542 * The current lock owner might have started executing 543 * on another CPU (or the lock could have changed 544 * owners) while we were waiting on the turnstile 545 * chain lock. If so, drop the turnstile lock and try 546 * again. 547 */ 548 if ((v & RW_LOCK_READ) == 0) { 549 owner = (struct thread *)RW_OWNER(v); 550 if (TD_IS_RUNNING(owner)) { 551 turnstile_cancel(ts); 552 continue; 553 } 554 } 555 #endif 556 557 /* 558 * The lock is held in write mode or it already has waiters. 559 */ 560 MPASS(!RW_CAN_READ(v)); 561 562 /* 563 * If the RW_LOCK_READ_WAITERS flag is already set, then 564 * we can go ahead and block. If it is not set then try 565 * to set it. If we fail to set it drop the turnstile 566 * lock and restart the loop. 567 */ 568 if (!(v & RW_LOCK_READ_WAITERS)) { 569 if (!atomic_cmpset_ptr(&rw->rw_lock, v, 570 v | RW_LOCK_READ_WAITERS)) { 571 turnstile_cancel(ts); 572 v = RW_READ_VALUE(rw); 573 continue; 574 } 575 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 576 CTR2(KTR_LOCK, "%s: %p set read waiters flag", 577 __func__, rw); 578 } 579 580 /* 581 * We were unable to acquire the lock and the read waiters 582 * flag is set, so we must block on the turnstile. 583 */ 584 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 585 CTR2(KTR_LOCK, "%s: %p blocking on turnstile", __func__, 586 rw); 587 #ifdef KDTRACE_HOOKS 588 sleep_time -= lockstat_nsecs(&rw->lock_object); 589 #endif 590 turnstile_wait(ts, rw_owner(rw), TS_SHARED_QUEUE); 591 #ifdef KDTRACE_HOOKS 592 sleep_time += lockstat_nsecs(&rw->lock_object); 593 sleep_cnt++; 594 #endif 595 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 596 CTR2(KTR_LOCK, "%s: %p resuming from turnstile", 597 __func__, rw); 598 v = RW_READ_VALUE(rw); 599 } 600 #ifdef KDTRACE_HOOKS 601 all_time += lockstat_nsecs(&rw->lock_object); 602 if (sleep_time) 603 LOCKSTAT_RECORD4(rw__block, rw, sleep_time, 604 LOCKSTAT_READER, (state & RW_LOCK_READ) == 0, 605 (state & RW_LOCK_READ) == 0 ? 0 : RW_READERS(state)); 606 607 /* Record only the loops spinning and not sleeping. */ 608 if (lda.spin_cnt > sleep_cnt) 609 LOCKSTAT_RECORD4(rw__spin, rw, all_time - sleep_time, 610 LOCKSTAT_READER, (state & RW_LOCK_READ) == 0, 611 (state & RW_LOCK_READ) == 0 ? 0 : RW_READERS(state)); 612 #endif 613 /* 614 * TODO: acquire "owner of record" here. Here be turnstile dragons 615 * however. turnstiles don't like owners changing between calls to 616 * turnstile_wait() currently. 617 */ 618 LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(rw__acquire, rw, contested, 619 waittime, file, line, LOCKSTAT_READER); 620 LOCK_LOG_LOCK("RLOCK", &rw->lock_object, 0, 0, file, line); 621 WITNESS_LOCK(&rw->lock_object, 0, file, line); 622 TD_LOCKS_INC(curthread); 623 curthread->td_rw_rlocks++; 624 } 625 626 int 627 __rw_try_rlock(volatile uintptr_t *c, const char *file, int line) 628 { 629 struct rwlock *rw; 630 uintptr_t x; 631 632 if (SCHEDULER_STOPPED()) 633 return (1); 634 635 rw = rwlock2rw(c); 636 637 KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread), 638 ("rw_try_rlock() by idle thread %p on rwlock %s @ %s:%d", 639 curthread, rw->lock_object.lo_name, file, line)); 640 641 for (;;) { 642 x = rw->rw_lock; 643 KASSERT(rw->rw_lock != RW_DESTROYED, 644 ("rw_try_rlock() of destroyed rwlock @ %s:%d", file, line)); 645 if (!(x & RW_LOCK_READ)) 646 break; 647 if (atomic_cmpset_acq_ptr(&rw->rw_lock, x, x + RW_ONE_READER)) { 648 LOCK_LOG_TRY("RLOCK", &rw->lock_object, 0, 1, file, 649 line); 650 WITNESS_LOCK(&rw->lock_object, LOP_TRYLOCK, file, line); 651 LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(rw__acquire, 652 rw, 0, 0, file, line, LOCKSTAT_READER); 653 TD_LOCKS_INC(curthread); 654 curthread->td_rw_rlocks++; 655 return (1); 656 } 657 } 658 659 LOCK_LOG_TRY("RLOCK", &rw->lock_object, 0, 0, file, line); 660 return (0); 661 } 662 663 void 664 _rw_runlock_cookie(volatile uintptr_t *c, const char *file, int line) 665 { 666 struct rwlock *rw; 667 struct turnstile *ts; 668 uintptr_t x, v, queue; 669 670 if (SCHEDULER_STOPPED()) 671 return; 672 673 rw = rwlock2rw(c); 674 675 KASSERT(rw->rw_lock != RW_DESTROYED, 676 ("rw_runlock() of destroyed rwlock @ %s:%d", file, line)); 677 __rw_assert(c, RA_RLOCKED, file, line); 678 WITNESS_UNLOCK(&rw->lock_object, 0, file, line); 679 LOCK_LOG_LOCK("RUNLOCK", &rw->lock_object, 0, 0, file, line); 680 681 /* TODO: drop "owner of record" here. */ 682 x = RW_READ_VALUE(rw); 683 for (;;) { 684 /* 685 * See if there is more than one read lock held. If so, 686 * just drop one and return. 687 */ 688 if (RW_READERS(x) > 1) { 689 if (atomic_fcmpset_rel_ptr(&rw->rw_lock, &x, 690 x - RW_ONE_READER)) { 691 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 692 CTR4(KTR_LOCK, 693 "%s: %p succeeded %p -> %p", 694 __func__, rw, (void *)x, 695 (void *)(x - RW_ONE_READER)); 696 break; 697 } 698 continue; 699 } 700 /* 701 * If there aren't any waiters for a write lock, then try 702 * to drop it quickly. 703 */ 704 if (!(x & RW_LOCK_WAITERS)) { 705 MPASS((x & ~RW_LOCK_WRITE_SPINNER) == 706 RW_READERS_LOCK(1)); 707 if (atomic_fcmpset_rel_ptr(&rw->rw_lock, &x, 708 RW_UNLOCKED)) { 709 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 710 CTR2(KTR_LOCK, "%s: %p last succeeded", 711 __func__, rw); 712 break; 713 } 714 continue; 715 } 716 /* 717 * Ok, we know we have waiters and we think we are the 718 * last reader, so grab the turnstile lock. 719 */ 720 turnstile_chain_lock(&rw->lock_object); 721 v = rw->rw_lock & (RW_LOCK_WAITERS | RW_LOCK_WRITE_SPINNER); 722 MPASS(v & RW_LOCK_WAITERS); 723 724 /* 725 * Try to drop our lock leaving the lock in a unlocked 726 * state. 727 * 728 * If you wanted to do explicit lock handoff you'd have to 729 * do it here. You'd also want to use turnstile_signal() 730 * and you'd have to handle the race where a higher 731 * priority thread blocks on the write lock before the 732 * thread you wakeup actually runs and have the new thread 733 * "steal" the lock. For now it's a lot simpler to just 734 * wakeup all of the waiters. 735 * 736 * As above, if we fail, then another thread might have 737 * acquired a read lock, so drop the turnstile lock and 738 * restart. 739 */ 740 x = RW_UNLOCKED; 741 if (v & RW_LOCK_WRITE_WAITERS) { 742 queue = TS_EXCLUSIVE_QUEUE; 743 x |= (v & RW_LOCK_READ_WAITERS); 744 } else 745 queue = TS_SHARED_QUEUE; 746 if (!atomic_cmpset_rel_ptr(&rw->rw_lock, RW_READERS_LOCK(1) | v, 747 x)) { 748 turnstile_chain_unlock(&rw->lock_object); 749 x = RW_READ_VALUE(rw); 750 continue; 751 } 752 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 753 CTR2(KTR_LOCK, "%s: %p last succeeded with waiters", 754 __func__, rw); 755 756 /* 757 * Ok. The lock is released and all that's left is to 758 * wake up the waiters. Note that the lock might not be 759 * free anymore, but in that case the writers will just 760 * block again if they run before the new lock holder(s) 761 * release the lock. 762 */ 763 ts = turnstile_lookup(&rw->lock_object); 764 MPASS(ts != NULL); 765 turnstile_broadcast(ts, queue); 766 turnstile_unpend(ts, TS_SHARED_LOCK); 767 turnstile_chain_unlock(&rw->lock_object); 768 break; 769 } 770 LOCKSTAT_PROFILE_RELEASE_RWLOCK(rw__release, rw, LOCKSTAT_READER); 771 TD_LOCKS_DEC(curthread); 772 curthread->td_rw_rlocks--; 773 } 774 775 /* 776 * This function is called when we are unable to obtain a write lock on the 777 * first try. This means that at least one other thread holds either a 778 * read or write lock. 779 */ 780 void 781 __rw_wlock_hard(volatile uintptr_t *c, uintptr_t v, uintptr_t tid, 782 const char *file, int line) 783 { 784 struct rwlock *rw; 785 struct turnstile *ts; 786 #ifdef ADAPTIVE_RWLOCKS 787 volatile struct thread *owner; 788 int spintries = 0; 789 int i; 790 #endif 791 uintptr_t x; 792 #ifdef LOCK_PROFILING 793 uint64_t waittime = 0; 794 int contested = 0; 795 #endif 796 #if defined(ADAPTIVE_RWLOCKS) || defined(KDTRACE_HOOKS) 797 struct lock_delay_arg lda; 798 #endif 799 #ifdef KDTRACE_HOOKS 800 uintptr_t state; 801 u_int sleep_cnt = 0; 802 int64_t sleep_time = 0; 803 int64_t all_time = 0; 804 #endif 805 806 if (SCHEDULER_STOPPED()) 807 return; 808 809 #if defined(ADAPTIVE_RWLOCKS) 810 lock_delay_arg_init(&lda, &rw_delay); 811 #elif defined(KDTRACE_HOOKS) 812 lock_delay_arg_init(&lda, NULL); 813 #endif 814 rw = rwlock2rw(c); 815 if (__predict_false(v == RW_UNLOCKED)) 816 v = RW_READ_VALUE(rw); 817 818 if (__predict_false(lv_rw_wowner(v) == (struct thread *)tid)) { 819 KASSERT(rw->lock_object.lo_flags & LO_RECURSABLE, 820 ("%s: recursing but non-recursive rw %s @ %s:%d\n", 821 __func__, rw->lock_object.lo_name, file, line)); 822 rw->rw_recurse++; 823 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 824 CTR2(KTR_LOCK, "%s: %p recursing", __func__, rw); 825 return; 826 } 827 828 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 829 CTR5(KTR_LOCK, "%s: %s contested (lock=%p) at %s:%d", __func__, 830 rw->lock_object.lo_name, (void *)rw->rw_lock, file, line); 831 832 #ifdef KDTRACE_HOOKS 833 all_time -= lockstat_nsecs(&rw->lock_object); 834 state = v; 835 #endif 836 for (;;) { 837 if (v == RW_UNLOCKED) { 838 if (_rw_write_lock_fetch(rw, &v, tid)) 839 break; 840 continue; 841 } 842 #ifdef KDTRACE_HOOKS 843 lda.spin_cnt++; 844 #endif 845 #ifdef HWPMC_HOOKS 846 PMC_SOFT_CALL( , , lock, failed); 847 #endif 848 lock_profile_obtain_lock_failed(&rw->lock_object, 849 &contested, &waittime); 850 #ifdef ADAPTIVE_RWLOCKS 851 /* 852 * If the lock is write locked and the owner is 853 * running on another CPU, spin until the owner stops 854 * running or the state of the lock changes. 855 */ 856 owner = lv_rw_wowner(v); 857 if (!(v & RW_LOCK_READ) && TD_IS_RUNNING(owner)) { 858 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 859 CTR3(KTR_LOCK, "%s: spinning on %p held by %p", 860 __func__, rw, owner); 861 KTR_STATE1(KTR_SCHED, "thread", sched_tdname(curthread), 862 "spinning", "lockname:\"%s\"", 863 rw->lock_object.lo_name); 864 do { 865 lock_delay(&lda); 866 v = RW_READ_VALUE(rw); 867 owner = lv_rw_wowner(v); 868 } while (owner != NULL && TD_IS_RUNNING(owner)); 869 KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread), 870 "running"); 871 continue; 872 } 873 if ((v & RW_LOCK_READ) && RW_READERS(v) && 874 spintries < rowner_retries) { 875 if (!(v & RW_LOCK_WRITE_SPINNER)) { 876 if (!atomic_cmpset_ptr(&rw->rw_lock, v, 877 v | RW_LOCK_WRITE_SPINNER)) { 878 v = RW_READ_VALUE(rw); 879 continue; 880 } 881 } 882 spintries++; 883 KTR_STATE1(KTR_SCHED, "thread", sched_tdname(curthread), 884 "spinning", "lockname:\"%s\"", 885 rw->lock_object.lo_name); 886 for (i = 0; i < rowner_loops; i++) { 887 if ((rw->rw_lock & RW_LOCK_WRITE_SPINNER) == 0) 888 break; 889 cpu_spinwait(); 890 } 891 KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread), 892 "running"); 893 v = RW_READ_VALUE(rw); 894 #ifdef KDTRACE_HOOKS 895 lda.spin_cnt += rowner_loops - i; 896 #endif 897 if (i != rowner_loops) 898 continue; 899 } 900 #endif 901 ts = turnstile_trywait(&rw->lock_object); 902 v = RW_READ_VALUE(rw); 903 904 #ifdef ADAPTIVE_RWLOCKS 905 /* 906 * The current lock owner might have started executing 907 * on another CPU (or the lock could have changed 908 * owners) while we were waiting on the turnstile 909 * chain lock. If so, drop the turnstile lock and try 910 * again. 911 */ 912 if (!(v & RW_LOCK_READ)) { 913 owner = (struct thread *)RW_OWNER(v); 914 if (TD_IS_RUNNING(owner)) { 915 turnstile_cancel(ts); 916 continue; 917 } 918 } 919 #endif 920 /* 921 * Check for the waiters flags about this rwlock. 922 * If the lock was released, without maintain any pending 923 * waiters queue, simply try to acquire it. 924 * If a pending waiters queue is present, claim the lock 925 * ownership and maintain the pending queue. 926 */ 927 x = v & (RW_LOCK_WAITERS | RW_LOCK_WRITE_SPINNER); 928 if ((v & ~x) == RW_UNLOCKED) { 929 x &= ~RW_LOCK_WRITE_SPINNER; 930 if (atomic_cmpset_acq_ptr(&rw->rw_lock, v, tid | x)) { 931 if (x) 932 turnstile_claim(ts); 933 else 934 turnstile_cancel(ts); 935 break; 936 } 937 turnstile_cancel(ts); 938 v = RW_READ_VALUE(rw); 939 continue; 940 } 941 /* 942 * If the RW_LOCK_WRITE_WAITERS flag isn't set, then try to 943 * set it. If we fail to set it, then loop back and try 944 * again. 945 */ 946 if (!(v & RW_LOCK_WRITE_WAITERS)) { 947 if (!atomic_cmpset_ptr(&rw->rw_lock, v, 948 v | RW_LOCK_WRITE_WAITERS)) { 949 turnstile_cancel(ts); 950 v = RW_READ_VALUE(rw); 951 continue; 952 } 953 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 954 CTR2(KTR_LOCK, "%s: %p set write waiters flag", 955 __func__, rw); 956 } 957 /* 958 * We were unable to acquire the lock and the write waiters 959 * flag is set, so we must block on the turnstile. 960 */ 961 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 962 CTR2(KTR_LOCK, "%s: %p blocking on turnstile", __func__, 963 rw); 964 #ifdef KDTRACE_HOOKS 965 sleep_time -= lockstat_nsecs(&rw->lock_object); 966 #endif 967 turnstile_wait(ts, rw_owner(rw), TS_EXCLUSIVE_QUEUE); 968 #ifdef KDTRACE_HOOKS 969 sleep_time += lockstat_nsecs(&rw->lock_object); 970 sleep_cnt++; 971 #endif 972 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 973 CTR2(KTR_LOCK, "%s: %p resuming from turnstile", 974 __func__, rw); 975 #ifdef ADAPTIVE_RWLOCKS 976 spintries = 0; 977 #endif 978 v = RW_READ_VALUE(rw); 979 } 980 #ifdef KDTRACE_HOOKS 981 all_time += lockstat_nsecs(&rw->lock_object); 982 if (sleep_time) 983 LOCKSTAT_RECORD4(rw__block, rw, sleep_time, 984 LOCKSTAT_WRITER, (state & RW_LOCK_READ) == 0, 985 (state & RW_LOCK_READ) == 0 ? 0 : RW_READERS(state)); 986 987 /* Record only the loops spinning and not sleeping. */ 988 if (lda.spin_cnt > sleep_cnt) 989 LOCKSTAT_RECORD4(rw__spin, rw, all_time - sleep_time, 990 LOCKSTAT_WRITER, (state & RW_LOCK_READ) == 0, 991 (state & RW_LOCK_READ) == 0 ? 0 : RW_READERS(state)); 992 #endif 993 LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(rw__acquire, rw, contested, 994 waittime, file, line, LOCKSTAT_WRITER); 995 } 996 997 /* 998 * This function is called if the first try at releasing a write lock failed. 999 * This means that one of the 2 waiter bits must be set indicating that at 1000 * least one thread is waiting on this lock. 1001 */ 1002 void 1003 __rw_wunlock_hard(volatile uintptr_t *c, uintptr_t tid, const char *file, 1004 int line) 1005 { 1006 struct rwlock *rw; 1007 struct turnstile *ts; 1008 uintptr_t v; 1009 int queue; 1010 1011 if (SCHEDULER_STOPPED()) 1012 return; 1013 1014 rw = rwlock2rw(c); 1015 MPASS(!rw_recursed(rw)); 1016 1017 LOCKSTAT_PROFILE_RELEASE_RWLOCK(rw__release, rw, 1018 LOCKSTAT_WRITER); 1019 if (_rw_write_unlock(rw, tid)) 1020 return; 1021 1022 KASSERT(rw->rw_lock & (RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS), 1023 ("%s: neither of the waiter flags are set", __func__)); 1024 1025 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 1026 CTR2(KTR_LOCK, "%s: %p contested", __func__, rw); 1027 1028 turnstile_chain_lock(&rw->lock_object); 1029 ts = turnstile_lookup(&rw->lock_object); 1030 MPASS(ts != NULL); 1031 1032 /* 1033 * Use the same algo as sx locks for now. Prefer waking up shared 1034 * waiters if we have any over writers. This is probably not ideal. 1035 * 1036 * 'v' is the value we are going to write back to rw_lock. If we 1037 * have waiters on both queues, we need to preserve the state of 1038 * the waiter flag for the queue we don't wake up. For now this is 1039 * hardcoded for the algorithm mentioned above. 1040 * 1041 * In the case of both readers and writers waiting we wakeup the 1042 * readers but leave the RW_LOCK_WRITE_WAITERS flag set. If a 1043 * new writer comes in before a reader it will claim the lock up 1044 * above. There is probably a potential priority inversion in 1045 * there that could be worked around either by waking both queues 1046 * of waiters or doing some complicated lock handoff gymnastics. 1047 */ 1048 v = RW_UNLOCKED; 1049 if (rw->rw_lock & RW_LOCK_WRITE_WAITERS) { 1050 queue = TS_EXCLUSIVE_QUEUE; 1051 v |= (rw->rw_lock & RW_LOCK_READ_WAITERS); 1052 } else 1053 queue = TS_SHARED_QUEUE; 1054 1055 /* Wake up all waiters for the specific queue. */ 1056 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 1057 CTR3(KTR_LOCK, "%s: %p waking up %s waiters", __func__, rw, 1058 queue == TS_SHARED_QUEUE ? "read" : "write"); 1059 turnstile_broadcast(ts, queue); 1060 atomic_store_rel_ptr(&rw->rw_lock, v); 1061 turnstile_unpend(ts, TS_EXCLUSIVE_LOCK); 1062 turnstile_chain_unlock(&rw->lock_object); 1063 } 1064 1065 /* 1066 * Attempt to do a non-blocking upgrade from a read lock to a write 1067 * lock. This will only succeed if this thread holds a single read 1068 * lock. Returns true if the upgrade succeeded and false otherwise. 1069 */ 1070 int 1071 __rw_try_upgrade(volatile uintptr_t *c, const char *file, int line) 1072 { 1073 struct rwlock *rw; 1074 uintptr_t v, x, tid; 1075 struct turnstile *ts; 1076 int success; 1077 1078 if (SCHEDULER_STOPPED()) 1079 return (1); 1080 1081 rw = rwlock2rw(c); 1082 1083 KASSERT(rw->rw_lock != RW_DESTROYED, 1084 ("rw_try_upgrade() of destroyed rwlock @ %s:%d", file, line)); 1085 __rw_assert(c, RA_RLOCKED, file, line); 1086 1087 /* 1088 * Attempt to switch from one reader to a writer. If there 1089 * are any write waiters, then we will have to lock the 1090 * turnstile first to prevent races with another writer 1091 * calling turnstile_wait() before we have claimed this 1092 * turnstile. So, do the simple case of no waiters first. 1093 */ 1094 tid = (uintptr_t)curthread; 1095 success = 0; 1096 for (;;) { 1097 v = rw->rw_lock; 1098 if (RW_READERS(v) > 1) 1099 break; 1100 if (!(v & RW_LOCK_WAITERS)) { 1101 success = atomic_cmpset_ptr(&rw->rw_lock, v, tid); 1102 if (!success) 1103 continue; 1104 break; 1105 } 1106 1107 /* 1108 * Ok, we think we have waiters, so lock the turnstile. 1109 */ 1110 ts = turnstile_trywait(&rw->lock_object); 1111 v = rw->rw_lock; 1112 if (RW_READERS(v) > 1) { 1113 turnstile_cancel(ts); 1114 break; 1115 } 1116 /* 1117 * Try to switch from one reader to a writer again. This time 1118 * we honor the current state of the waiters flags. 1119 * If we obtain the lock with the flags set, then claim 1120 * ownership of the turnstile. 1121 */ 1122 x = rw->rw_lock & RW_LOCK_WAITERS; 1123 success = atomic_cmpset_ptr(&rw->rw_lock, v, tid | x); 1124 if (success) { 1125 if (x) 1126 turnstile_claim(ts); 1127 else 1128 turnstile_cancel(ts); 1129 break; 1130 } 1131 turnstile_cancel(ts); 1132 } 1133 LOCK_LOG_TRY("WUPGRADE", &rw->lock_object, 0, success, file, line); 1134 if (success) { 1135 curthread->td_rw_rlocks--; 1136 WITNESS_UPGRADE(&rw->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK, 1137 file, line); 1138 LOCKSTAT_RECORD0(rw__upgrade, rw); 1139 } 1140 return (success); 1141 } 1142 1143 /* 1144 * Downgrade a write lock into a single read lock. 1145 */ 1146 void 1147 __rw_downgrade(volatile uintptr_t *c, const char *file, int line) 1148 { 1149 struct rwlock *rw; 1150 struct turnstile *ts; 1151 uintptr_t tid, v; 1152 int rwait, wwait; 1153 1154 if (SCHEDULER_STOPPED()) 1155 return; 1156 1157 rw = rwlock2rw(c); 1158 1159 KASSERT(rw->rw_lock != RW_DESTROYED, 1160 ("rw_downgrade() of destroyed rwlock @ %s:%d", file, line)); 1161 __rw_assert(c, RA_WLOCKED | RA_NOTRECURSED, file, line); 1162 #ifndef INVARIANTS 1163 if (rw_recursed(rw)) 1164 panic("downgrade of a recursed lock"); 1165 #endif 1166 1167 WITNESS_DOWNGRADE(&rw->lock_object, 0, file, line); 1168 1169 /* 1170 * Convert from a writer to a single reader. First we handle 1171 * the easy case with no waiters. If there are any waiters, we 1172 * lock the turnstile and "disown" the lock. 1173 */ 1174 tid = (uintptr_t)curthread; 1175 if (atomic_cmpset_rel_ptr(&rw->rw_lock, tid, RW_READERS_LOCK(1))) 1176 goto out; 1177 1178 /* 1179 * Ok, we think we have waiters, so lock the turnstile so we can 1180 * read the waiter flags without any races. 1181 */ 1182 turnstile_chain_lock(&rw->lock_object); 1183 v = rw->rw_lock & RW_LOCK_WAITERS; 1184 rwait = v & RW_LOCK_READ_WAITERS; 1185 wwait = v & RW_LOCK_WRITE_WAITERS; 1186 MPASS(rwait | wwait); 1187 1188 /* 1189 * Downgrade from a write lock while preserving waiters flag 1190 * and give up ownership of the turnstile. 1191 */ 1192 ts = turnstile_lookup(&rw->lock_object); 1193 MPASS(ts != NULL); 1194 if (!wwait) 1195 v &= ~RW_LOCK_READ_WAITERS; 1196 atomic_store_rel_ptr(&rw->rw_lock, RW_READERS_LOCK(1) | v); 1197 /* 1198 * Wake other readers if there are no writers pending. Otherwise they 1199 * won't be able to acquire the lock anyway. 1200 */ 1201 if (rwait && !wwait) { 1202 turnstile_broadcast(ts, TS_SHARED_QUEUE); 1203 turnstile_unpend(ts, TS_EXCLUSIVE_LOCK); 1204 } else 1205 turnstile_disown(ts); 1206 turnstile_chain_unlock(&rw->lock_object); 1207 out: 1208 curthread->td_rw_rlocks++; 1209 LOCK_LOG_LOCK("WDOWNGRADE", &rw->lock_object, 0, 0, file, line); 1210 LOCKSTAT_RECORD0(rw__downgrade, rw); 1211 } 1212 1213 #ifdef INVARIANT_SUPPORT 1214 #ifndef INVARIANTS 1215 #undef __rw_assert 1216 #endif 1217 1218 /* 1219 * In the non-WITNESS case, rw_assert() can only detect that at least 1220 * *some* thread owns an rlock, but it cannot guarantee that *this* 1221 * thread owns an rlock. 1222 */ 1223 void 1224 __rw_assert(const volatile uintptr_t *c, int what, const char *file, int line) 1225 { 1226 const struct rwlock *rw; 1227 1228 if (panicstr != NULL) 1229 return; 1230 1231 rw = rwlock2rw(c); 1232 1233 switch (what) { 1234 case RA_LOCKED: 1235 case RA_LOCKED | RA_RECURSED: 1236 case RA_LOCKED | RA_NOTRECURSED: 1237 case RA_RLOCKED: 1238 case RA_RLOCKED | RA_RECURSED: 1239 case RA_RLOCKED | RA_NOTRECURSED: 1240 #ifdef WITNESS 1241 witness_assert(&rw->lock_object, what, file, line); 1242 #else 1243 /* 1244 * If some other thread has a write lock or we have one 1245 * and are asserting a read lock, fail. Also, if no one 1246 * has a lock at all, fail. 1247 */ 1248 if (rw->rw_lock == RW_UNLOCKED || 1249 (!(rw->rw_lock & RW_LOCK_READ) && (what & RA_RLOCKED || 1250 rw_wowner(rw) != curthread))) 1251 panic("Lock %s not %slocked @ %s:%d\n", 1252 rw->lock_object.lo_name, (what & RA_RLOCKED) ? 1253 "read " : "", file, line); 1254 1255 if (!(rw->rw_lock & RW_LOCK_READ) && !(what & RA_RLOCKED)) { 1256 if (rw_recursed(rw)) { 1257 if (what & RA_NOTRECURSED) 1258 panic("Lock %s recursed @ %s:%d\n", 1259 rw->lock_object.lo_name, file, 1260 line); 1261 } else if (what & RA_RECURSED) 1262 panic("Lock %s not recursed @ %s:%d\n", 1263 rw->lock_object.lo_name, file, line); 1264 } 1265 #endif 1266 break; 1267 case RA_WLOCKED: 1268 case RA_WLOCKED | RA_RECURSED: 1269 case RA_WLOCKED | RA_NOTRECURSED: 1270 if (rw_wowner(rw) != curthread) 1271 panic("Lock %s not exclusively locked @ %s:%d\n", 1272 rw->lock_object.lo_name, file, line); 1273 if (rw_recursed(rw)) { 1274 if (what & RA_NOTRECURSED) 1275 panic("Lock %s recursed @ %s:%d\n", 1276 rw->lock_object.lo_name, file, line); 1277 } else if (what & RA_RECURSED) 1278 panic("Lock %s not recursed @ %s:%d\n", 1279 rw->lock_object.lo_name, file, line); 1280 break; 1281 case RA_UNLOCKED: 1282 #ifdef WITNESS 1283 witness_assert(&rw->lock_object, what, file, line); 1284 #else 1285 /* 1286 * If we hold a write lock fail. We can't reliably check 1287 * to see if we hold a read lock or not. 1288 */ 1289 if (rw_wowner(rw) == curthread) 1290 panic("Lock %s exclusively locked @ %s:%d\n", 1291 rw->lock_object.lo_name, file, line); 1292 #endif 1293 break; 1294 default: 1295 panic("Unknown rw lock assertion: %d @ %s:%d", what, file, 1296 line); 1297 } 1298 } 1299 #endif /* INVARIANT_SUPPORT */ 1300 1301 #ifdef DDB 1302 void 1303 db_show_rwlock(const struct lock_object *lock) 1304 { 1305 const struct rwlock *rw; 1306 struct thread *td; 1307 1308 rw = (const struct rwlock *)lock; 1309 1310 db_printf(" state: "); 1311 if (rw->rw_lock == RW_UNLOCKED) 1312 db_printf("UNLOCKED\n"); 1313 else if (rw->rw_lock == RW_DESTROYED) { 1314 db_printf("DESTROYED\n"); 1315 return; 1316 } else if (rw->rw_lock & RW_LOCK_READ) 1317 db_printf("RLOCK: %ju locks\n", 1318 (uintmax_t)(RW_READERS(rw->rw_lock))); 1319 else { 1320 td = rw_wowner(rw); 1321 db_printf("WLOCK: %p (tid %d, pid %d, \"%s\")\n", td, 1322 td->td_tid, td->td_proc->p_pid, td->td_name); 1323 if (rw_recursed(rw)) 1324 db_printf(" recursed: %u\n", rw->rw_recurse); 1325 } 1326 db_printf(" waiters: "); 1327 switch (rw->rw_lock & (RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS)) { 1328 case RW_LOCK_READ_WAITERS: 1329 db_printf("readers\n"); 1330 break; 1331 case RW_LOCK_WRITE_WAITERS: 1332 db_printf("writers\n"); 1333 break; 1334 case RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS: 1335 db_printf("readers and writers\n"); 1336 break; 1337 default: 1338 db_printf("none\n"); 1339 break; 1340 } 1341 } 1342 1343 #endif 1344