1 /*- 2 * Copyright (c) 2006 John Baldwin <jhb@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 /* 28 * Machine independent bits of reader/writer lock implementation. 29 */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 #include "opt_ddb.h" 35 #include "opt_hwpmc_hooks.h" 36 #include "opt_no_adaptive_rwlocks.h" 37 38 #include <sys/param.h> 39 #include <sys/kdb.h> 40 #include <sys/ktr.h> 41 #include <sys/kernel.h> 42 #include <sys/lock.h> 43 #include <sys/mutex.h> 44 #include <sys/proc.h> 45 #include <sys/rwlock.h> 46 #include <sys/sched.h> 47 #include <sys/smp.h> 48 #include <sys/sysctl.h> 49 #include <sys/systm.h> 50 #include <sys/turnstile.h> 51 52 #include <machine/cpu.h> 53 54 #if defined(SMP) && !defined(NO_ADAPTIVE_RWLOCKS) 55 #define ADAPTIVE_RWLOCKS 56 #endif 57 58 #ifdef HWPMC_HOOKS 59 #include <sys/pmckern.h> 60 PMC_SOFT_DECLARE( , , lock, failed); 61 #endif 62 63 /* 64 * Return the rwlock address when the lock cookie address is provided. 65 * This functionality assumes that struct rwlock* have a member named rw_lock. 66 */ 67 #define rwlock2rw(c) (__containerof(c, struct rwlock, rw_lock)) 68 69 #ifdef DDB 70 #include <ddb/ddb.h> 71 72 static void db_show_rwlock(const struct lock_object *lock); 73 #endif 74 static void assert_rw(const struct lock_object *lock, int what); 75 static void lock_rw(struct lock_object *lock, uintptr_t how); 76 #ifdef KDTRACE_HOOKS 77 static int owner_rw(const struct lock_object *lock, struct thread **owner); 78 #endif 79 static uintptr_t unlock_rw(struct lock_object *lock); 80 81 struct lock_class lock_class_rw = { 82 .lc_name = "rw", 83 .lc_flags = LC_SLEEPLOCK | LC_RECURSABLE | LC_UPGRADABLE, 84 .lc_assert = assert_rw, 85 #ifdef DDB 86 .lc_ddb_show = db_show_rwlock, 87 #endif 88 .lc_lock = lock_rw, 89 .lc_unlock = unlock_rw, 90 #ifdef KDTRACE_HOOKS 91 .lc_owner = owner_rw, 92 #endif 93 }; 94 95 #ifdef ADAPTIVE_RWLOCKS 96 static int rowner_retries = 10; 97 static int rowner_loops = 10000; 98 static SYSCTL_NODE(_debug, OID_AUTO, rwlock, CTLFLAG_RD, NULL, 99 "rwlock debugging"); 100 SYSCTL_INT(_debug_rwlock, OID_AUTO, retry, CTLFLAG_RW, &rowner_retries, 0, ""); 101 SYSCTL_INT(_debug_rwlock, OID_AUTO, loops, CTLFLAG_RW, &rowner_loops, 0, ""); 102 103 static struct lock_delay_config __read_mostly rw_delay = { 104 .initial = 1000, 105 .step = 500, 106 .min = 100, 107 .max = 5000, 108 }; 109 110 SYSCTL_INT(_debug_rwlock, OID_AUTO, delay_initial, CTLFLAG_RW, &rw_delay.initial, 111 0, ""); 112 SYSCTL_INT(_debug_rwlock, OID_AUTO, delay_step, CTLFLAG_RW, &rw_delay.step, 113 0, ""); 114 SYSCTL_INT(_debug_rwlock, OID_AUTO, delay_min, CTLFLAG_RW, &rw_delay.min, 115 0, ""); 116 SYSCTL_INT(_debug_rwlock, OID_AUTO, delay_max, CTLFLAG_RW, &rw_delay.max, 117 0, ""); 118 119 static void 120 rw_delay_sysinit(void *dummy) 121 { 122 123 rw_delay.initial = mp_ncpus * 25; 124 rw_delay.step = (mp_ncpus * 25) / 2; 125 rw_delay.min = mp_ncpus * 5; 126 rw_delay.max = mp_ncpus * 25 * 10; 127 } 128 LOCK_DELAY_SYSINIT(rw_delay_sysinit); 129 #endif 130 131 /* 132 * Return a pointer to the owning thread if the lock is write-locked or 133 * NULL if the lock is unlocked or read-locked. 134 */ 135 136 #define lv_rw_wowner(v) \ 137 ((v) & RW_LOCK_READ ? NULL : \ 138 (struct thread *)RW_OWNER((v))) 139 140 #define rw_wowner(rw) lv_rw_wowner(RW_READ_VALUE(rw)) 141 142 /* 143 * Returns if a write owner is recursed. Write ownership is not assured 144 * here and should be previously checked. 145 */ 146 #define rw_recursed(rw) ((rw)->rw_recurse != 0) 147 148 /* 149 * Return true if curthread helds the lock. 150 */ 151 #define rw_wlocked(rw) (rw_wowner((rw)) == curthread) 152 153 /* 154 * Return a pointer to the owning thread for this lock who should receive 155 * any priority lent by threads that block on this lock. Currently this 156 * is identical to rw_wowner(). 157 */ 158 #define rw_owner(rw) rw_wowner(rw) 159 160 #ifndef INVARIANTS 161 #define __rw_assert(c, what, file, line) 162 #endif 163 164 void 165 assert_rw(const struct lock_object *lock, int what) 166 { 167 168 rw_assert((const struct rwlock *)lock, what); 169 } 170 171 void 172 lock_rw(struct lock_object *lock, uintptr_t how) 173 { 174 struct rwlock *rw; 175 176 rw = (struct rwlock *)lock; 177 if (how) 178 rw_rlock(rw); 179 else 180 rw_wlock(rw); 181 } 182 183 uintptr_t 184 unlock_rw(struct lock_object *lock) 185 { 186 struct rwlock *rw; 187 188 rw = (struct rwlock *)lock; 189 rw_assert(rw, RA_LOCKED | LA_NOTRECURSED); 190 if (rw->rw_lock & RW_LOCK_READ) { 191 rw_runlock(rw); 192 return (1); 193 } else { 194 rw_wunlock(rw); 195 return (0); 196 } 197 } 198 199 #ifdef KDTRACE_HOOKS 200 int 201 owner_rw(const struct lock_object *lock, struct thread **owner) 202 { 203 const struct rwlock *rw = (const struct rwlock *)lock; 204 uintptr_t x = rw->rw_lock; 205 206 *owner = rw_wowner(rw); 207 return ((x & RW_LOCK_READ) != 0 ? (RW_READERS(x) != 0) : 208 (*owner != NULL)); 209 } 210 #endif 211 212 void 213 _rw_init_flags(volatile uintptr_t *c, const char *name, int opts) 214 { 215 struct rwlock *rw; 216 int flags; 217 218 rw = rwlock2rw(c); 219 220 MPASS((opts & ~(RW_DUPOK | RW_NOPROFILE | RW_NOWITNESS | RW_QUIET | 221 RW_RECURSE | RW_NEW)) == 0); 222 ASSERT_ATOMIC_LOAD_PTR(rw->rw_lock, 223 ("%s: rw_lock not aligned for %s: %p", __func__, name, 224 &rw->rw_lock)); 225 226 flags = LO_UPGRADABLE; 227 if (opts & RW_DUPOK) 228 flags |= LO_DUPOK; 229 if (opts & RW_NOPROFILE) 230 flags |= LO_NOPROFILE; 231 if (!(opts & RW_NOWITNESS)) 232 flags |= LO_WITNESS; 233 if (opts & RW_RECURSE) 234 flags |= LO_RECURSABLE; 235 if (opts & RW_QUIET) 236 flags |= LO_QUIET; 237 if (opts & RW_NEW) 238 flags |= LO_NEW; 239 240 lock_init(&rw->lock_object, &lock_class_rw, name, NULL, flags); 241 rw->rw_lock = RW_UNLOCKED; 242 rw->rw_recurse = 0; 243 } 244 245 void 246 _rw_destroy(volatile uintptr_t *c) 247 { 248 struct rwlock *rw; 249 250 rw = rwlock2rw(c); 251 252 KASSERT(rw->rw_lock == RW_UNLOCKED, ("rw lock %p not unlocked", rw)); 253 KASSERT(rw->rw_recurse == 0, ("rw lock %p still recursed", rw)); 254 rw->rw_lock = RW_DESTROYED; 255 lock_destroy(&rw->lock_object); 256 } 257 258 void 259 rw_sysinit(void *arg) 260 { 261 struct rw_args *args = arg; 262 263 rw_init((struct rwlock *)args->ra_rw, args->ra_desc); 264 } 265 266 void 267 rw_sysinit_flags(void *arg) 268 { 269 struct rw_args_flags *args = arg; 270 271 rw_init_flags((struct rwlock *)args->ra_rw, args->ra_desc, 272 args->ra_flags); 273 } 274 275 int 276 _rw_wowned(const volatile uintptr_t *c) 277 { 278 279 return (rw_wowner(rwlock2rw(c)) == curthread); 280 } 281 282 void 283 _rw_wlock_cookie(volatile uintptr_t *c, const char *file, int line) 284 { 285 struct rwlock *rw; 286 uintptr_t tid, v; 287 288 if (SCHEDULER_STOPPED()) 289 return; 290 291 rw = rwlock2rw(c); 292 293 KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread), 294 ("rw_wlock() by idle thread %p on rwlock %s @ %s:%d", 295 curthread, rw->lock_object.lo_name, file, line)); 296 KASSERT(rw->rw_lock != RW_DESTROYED, 297 ("rw_wlock() of destroyed rwlock @ %s:%d", file, line)); 298 WITNESS_CHECKORDER(&rw->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE, file, 299 line, NULL); 300 tid = (uintptr_t)curthread; 301 v = RW_UNLOCKED; 302 if (!_rw_write_lock_fetch(rw, &v, tid)) 303 _rw_wlock_hard(rw, v, tid, file, line); 304 else 305 LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(rw__acquire, rw, 306 0, 0, file, line, LOCKSTAT_WRITER); 307 308 LOCK_LOG_LOCK("WLOCK", &rw->lock_object, 0, rw->rw_recurse, file, line); 309 WITNESS_LOCK(&rw->lock_object, LOP_EXCLUSIVE, file, line); 310 TD_LOCKS_INC(curthread); 311 } 312 313 int 314 __rw_try_wlock(volatile uintptr_t *c, const char *file, int line) 315 { 316 struct rwlock *rw; 317 int rval; 318 319 if (SCHEDULER_STOPPED()) 320 return (1); 321 322 rw = rwlock2rw(c); 323 324 KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread), 325 ("rw_try_wlock() by idle thread %p on rwlock %s @ %s:%d", 326 curthread, rw->lock_object.lo_name, file, line)); 327 KASSERT(rw->rw_lock != RW_DESTROYED, 328 ("rw_try_wlock() of destroyed rwlock @ %s:%d", file, line)); 329 330 if (rw_wlocked(rw) && 331 (rw->lock_object.lo_flags & LO_RECURSABLE) != 0) { 332 rw->rw_recurse++; 333 rval = 1; 334 } else 335 rval = atomic_cmpset_acq_ptr(&rw->rw_lock, RW_UNLOCKED, 336 (uintptr_t)curthread); 337 338 LOCK_LOG_TRY("WLOCK", &rw->lock_object, 0, rval, file, line); 339 if (rval) { 340 WITNESS_LOCK(&rw->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK, 341 file, line); 342 if (!rw_recursed(rw)) 343 LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(rw__acquire, 344 rw, 0, 0, file, line, LOCKSTAT_WRITER); 345 TD_LOCKS_INC(curthread); 346 } 347 return (rval); 348 } 349 350 void 351 _rw_wunlock_cookie(volatile uintptr_t *c, const char *file, int line) 352 { 353 struct rwlock *rw; 354 355 if (SCHEDULER_STOPPED()) 356 return; 357 358 rw = rwlock2rw(c); 359 360 KASSERT(rw->rw_lock != RW_DESTROYED, 361 ("rw_wunlock() of destroyed rwlock @ %s:%d", file, line)); 362 __rw_assert(c, RA_WLOCKED, file, line); 363 WITNESS_UNLOCK(&rw->lock_object, LOP_EXCLUSIVE, file, line); 364 LOCK_LOG_LOCK("WUNLOCK", &rw->lock_object, 0, rw->rw_recurse, file, 365 line); 366 if (rw->rw_recurse) 367 rw->rw_recurse--; 368 else 369 _rw_wunlock_hard(rw, (uintptr_t)curthread, file, line); 370 371 TD_LOCKS_DEC(curthread); 372 } 373 374 /* 375 * Determines whether a new reader can acquire a lock. Succeeds if the 376 * reader already owns a read lock and the lock is locked for read to 377 * prevent deadlock from reader recursion. Also succeeds if the lock 378 * is unlocked and has no writer waiters or spinners. Failing otherwise 379 * prioritizes writers before readers. 380 */ 381 #define RW_CAN_READ(_rw) \ 382 ((curthread->td_rw_rlocks && (_rw) & RW_LOCK_READ) || ((_rw) & \ 383 (RW_LOCK_READ | RW_LOCK_WRITE_WAITERS | RW_LOCK_WRITE_SPINNER)) == \ 384 RW_LOCK_READ) 385 386 void 387 __rw_rlock(volatile uintptr_t *c, const char *file, int line) 388 { 389 struct rwlock *rw; 390 struct turnstile *ts; 391 #ifdef ADAPTIVE_RWLOCKS 392 volatile struct thread *owner; 393 int spintries = 0; 394 int i; 395 #endif 396 #ifdef LOCK_PROFILING 397 uint64_t waittime = 0; 398 int contested = 0; 399 #endif 400 uintptr_t v; 401 #if defined(ADAPTIVE_RWLOCKS) || defined(KDTRACE_HOOKS) 402 struct lock_delay_arg lda; 403 #endif 404 #ifdef KDTRACE_HOOKS 405 uintptr_t state; 406 u_int sleep_cnt = 0; 407 int64_t sleep_time = 0; 408 int64_t all_time = 0; 409 #endif 410 411 if (SCHEDULER_STOPPED()) 412 return; 413 414 #if defined(ADAPTIVE_RWLOCKS) 415 lock_delay_arg_init(&lda, &rw_delay); 416 #elif defined(KDTRACE_HOOKS) 417 lock_delay_arg_init(&lda, NULL); 418 #endif 419 rw = rwlock2rw(c); 420 421 KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread), 422 ("rw_rlock() by idle thread %p on rwlock %s @ %s:%d", 423 curthread, rw->lock_object.lo_name, file, line)); 424 KASSERT(rw->rw_lock != RW_DESTROYED, 425 ("rw_rlock() of destroyed rwlock @ %s:%d", file, line)); 426 KASSERT(rw_wowner(rw) != curthread, 427 ("rw_rlock: wlock already held for %s @ %s:%d", 428 rw->lock_object.lo_name, file, line)); 429 WITNESS_CHECKORDER(&rw->lock_object, LOP_NEWORDER, file, line, NULL); 430 431 #ifdef KDTRACE_HOOKS 432 all_time -= lockstat_nsecs(&rw->lock_object); 433 #endif 434 v = RW_READ_VALUE(rw); 435 #ifdef KDTRACE_HOOKS 436 state = v; 437 #endif 438 for (;;) { 439 /* 440 * Handle the easy case. If no other thread has a write 441 * lock, then try to bump up the count of read locks. Note 442 * that we have to preserve the current state of the 443 * RW_LOCK_WRITE_WAITERS flag. If we fail to acquire a 444 * read lock, then rw_lock must have changed, so restart 445 * the loop. Note that this handles the case of a 446 * completely unlocked rwlock since such a lock is encoded 447 * as a read lock with no waiters. 448 */ 449 if (RW_CAN_READ(v)) { 450 /* 451 * The RW_LOCK_READ_WAITERS flag should only be set 452 * if the lock has been unlocked and write waiters 453 * were present. 454 */ 455 if (atomic_fcmpset_acq_ptr(&rw->rw_lock, &v, 456 v + RW_ONE_READER)) { 457 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 458 CTR4(KTR_LOCK, 459 "%s: %p succeed %p -> %p", __func__, 460 rw, (void *)v, 461 (void *)(v + RW_ONE_READER)); 462 break; 463 } 464 continue; 465 } 466 #ifdef KDTRACE_HOOKS 467 lda.spin_cnt++; 468 #endif 469 #ifdef HWPMC_HOOKS 470 PMC_SOFT_CALL( , , lock, failed); 471 #endif 472 lock_profile_obtain_lock_failed(&rw->lock_object, 473 &contested, &waittime); 474 475 #ifdef ADAPTIVE_RWLOCKS 476 /* 477 * If the owner is running on another CPU, spin until 478 * the owner stops running or the state of the lock 479 * changes. 480 */ 481 if ((v & RW_LOCK_READ) == 0) { 482 owner = (struct thread *)RW_OWNER(v); 483 if (TD_IS_RUNNING(owner)) { 484 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 485 CTR3(KTR_LOCK, 486 "%s: spinning on %p held by %p", 487 __func__, rw, owner); 488 KTR_STATE1(KTR_SCHED, "thread", 489 sched_tdname(curthread), "spinning", 490 "lockname:\"%s\"", rw->lock_object.lo_name); 491 do { 492 lock_delay(&lda); 493 v = RW_READ_VALUE(rw); 494 owner = lv_rw_wowner(v); 495 } while (owner != NULL && TD_IS_RUNNING(owner)); 496 KTR_STATE0(KTR_SCHED, "thread", 497 sched_tdname(curthread), "running"); 498 continue; 499 } 500 } else if (spintries < rowner_retries) { 501 spintries++; 502 KTR_STATE1(KTR_SCHED, "thread", sched_tdname(curthread), 503 "spinning", "lockname:\"%s\"", 504 rw->lock_object.lo_name); 505 for (i = 0; i < rowner_loops; i++) { 506 v = RW_READ_VALUE(rw); 507 if ((v & RW_LOCK_READ) == 0 || RW_CAN_READ(v)) 508 break; 509 cpu_spinwait(); 510 } 511 v = RW_READ_VALUE(rw); 512 #ifdef KDTRACE_HOOKS 513 lda.spin_cnt += rowner_loops - i; 514 #endif 515 KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread), 516 "running"); 517 if (i != rowner_loops) 518 continue; 519 } 520 #endif 521 522 /* 523 * Okay, now it's the hard case. Some other thread already 524 * has a write lock or there are write waiters present, 525 * acquire the turnstile lock so we can begin the process 526 * of blocking. 527 */ 528 ts = turnstile_trywait(&rw->lock_object); 529 530 /* 531 * The lock might have been released while we spun, so 532 * recheck its state and restart the loop if needed. 533 */ 534 v = RW_READ_VALUE(rw); 535 if (RW_CAN_READ(v)) { 536 turnstile_cancel(ts); 537 continue; 538 } 539 540 #ifdef ADAPTIVE_RWLOCKS 541 /* 542 * The current lock owner might have started executing 543 * on another CPU (or the lock could have changed 544 * owners) while we were waiting on the turnstile 545 * chain lock. If so, drop the turnstile lock and try 546 * again. 547 */ 548 if ((v & RW_LOCK_READ) == 0) { 549 owner = (struct thread *)RW_OWNER(v); 550 if (TD_IS_RUNNING(owner)) { 551 turnstile_cancel(ts); 552 continue; 553 } 554 } 555 #endif 556 557 /* 558 * The lock is held in write mode or it already has waiters. 559 */ 560 MPASS(!RW_CAN_READ(v)); 561 562 /* 563 * If the RW_LOCK_READ_WAITERS flag is already set, then 564 * we can go ahead and block. If it is not set then try 565 * to set it. If we fail to set it drop the turnstile 566 * lock and restart the loop. 567 */ 568 if (!(v & RW_LOCK_READ_WAITERS)) { 569 if (!atomic_cmpset_ptr(&rw->rw_lock, v, 570 v | RW_LOCK_READ_WAITERS)) { 571 turnstile_cancel(ts); 572 v = RW_READ_VALUE(rw); 573 continue; 574 } 575 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 576 CTR2(KTR_LOCK, "%s: %p set read waiters flag", 577 __func__, rw); 578 } 579 580 /* 581 * We were unable to acquire the lock and the read waiters 582 * flag is set, so we must block on the turnstile. 583 */ 584 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 585 CTR2(KTR_LOCK, "%s: %p blocking on turnstile", __func__, 586 rw); 587 #ifdef KDTRACE_HOOKS 588 sleep_time -= lockstat_nsecs(&rw->lock_object); 589 #endif 590 turnstile_wait(ts, rw_owner(rw), TS_SHARED_QUEUE); 591 #ifdef KDTRACE_HOOKS 592 sleep_time += lockstat_nsecs(&rw->lock_object); 593 sleep_cnt++; 594 #endif 595 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 596 CTR2(KTR_LOCK, "%s: %p resuming from turnstile", 597 __func__, rw); 598 v = RW_READ_VALUE(rw); 599 } 600 #ifdef KDTRACE_HOOKS 601 all_time += lockstat_nsecs(&rw->lock_object); 602 if (sleep_time) 603 LOCKSTAT_RECORD4(rw__block, rw, sleep_time, 604 LOCKSTAT_READER, (state & RW_LOCK_READ) == 0, 605 (state & RW_LOCK_READ) == 0 ? 0 : RW_READERS(state)); 606 607 /* Record only the loops spinning and not sleeping. */ 608 if (lda.spin_cnt > sleep_cnt) 609 LOCKSTAT_RECORD4(rw__spin, rw, all_time - sleep_time, 610 LOCKSTAT_READER, (state & RW_LOCK_READ) == 0, 611 (state & RW_LOCK_READ) == 0 ? 0 : RW_READERS(state)); 612 #endif 613 /* 614 * TODO: acquire "owner of record" here. Here be turnstile dragons 615 * however. turnstiles don't like owners changing between calls to 616 * turnstile_wait() currently. 617 */ 618 LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(rw__acquire, rw, contested, 619 waittime, file, line, LOCKSTAT_READER); 620 LOCK_LOG_LOCK("RLOCK", &rw->lock_object, 0, 0, file, line); 621 WITNESS_LOCK(&rw->lock_object, 0, file, line); 622 TD_LOCKS_INC(curthread); 623 curthread->td_rw_rlocks++; 624 } 625 626 int 627 __rw_try_rlock(volatile uintptr_t *c, const char *file, int line) 628 { 629 struct rwlock *rw; 630 uintptr_t x; 631 632 if (SCHEDULER_STOPPED()) 633 return (1); 634 635 rw = rwlock2rw(c); 636 637 KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread), 638 ("rw_try_rlock() by idle thread %p on rwlock %s @ %s:%d", 639 curthread, rw->lock_object.lo_name, file, line)); 640 641 for (;;) { 642 x = rw->rw_lock; 643 KASSERT(rw->rw_lock != RW_DESTROYED, 644 ("rw_try_rlock() of destroyed rwlock @ %s:%d", file, line)); 645 if (!(x & RW_LOCK_READ)) 646 break; 647 if (atomic_cmpset_acq_ptr(&rw->rw_lock, x, x + RW_ONE_READER)) { 648 LOCK_LOG_TRY("RLOCK", &rw->lock_object, 0, 1, file, 649 line); 650 WITNESS_LOCK(&rw->lock_object, LOP_TRYLOCK, file, line); 651 LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(rw__acquire, 652 rw, 0, 0, file, line, LOCKSTAT_READER); 653 TD_LOCKS_INC(curthread); 654 curthread->td_rw_rlocks++; 655 return (1); 656 } 657 } 658 659 LOCK_LOG_TRY("RLOCK", &rw->lock_object, 0, 0, file, line); 660 return (0); 661 } 662 663 void 664 _rw_runlock_cookie(volatile uintptr_t *c, const char *file, int line) 665 { 666 struct rwlock *rw; 667 struct turnstile *ts; 668 uintptr_t x, v, queue; 669 670 if (SCHEDULER_STOPPED()) 671 return; 672 673 rw = rwlock2rw(c); 674 675 KASSERT(rw->rw_lock != RW_DESTROYED, 676 ("rw_runlock() of destroyed rwlock @ %s:%d", file, line)); 677 __rw_assert(c, RA_RLOCKED, file, line); 678 WITNESS_UNLOCK(&rw->lock_object, 0, file, line); 679 LOCK_LOG_LOCK("RUNLOCK", &rw->lock_object, 0, 0, file, line); 680 681 /* TODO: drop "owner of record" here. */ 682 x = RW_READ_VALUE(rw); 683 for (;;) { 684 /* 685 * See if there is more than one read lock held. If so, 686 * just drop one and return. 687 */ 688 if (RW_READERS(x) > 1) { 689 if (atomic_fcmpset_rel_ptr(&rw->rw_lock, &x, 690 x - RW_ONE_READER)) { 691 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 692 CTR4(KTR_LOCK, 693 "%s: %p succeeded %p -> %p", 694 __func__, rw, (void *)x, 695 (void *)(x - RW_ONE_READER)); 696 break; 697 } 698 continue; 699 } 700 /* 701 * If there aren't any waiters for a write lock, then try 702 * to drop it quickly. 703 */ 704 if (!(x & RW_LOCK_WAITERS)) { 705 MPASS((x & ~RW_LOCK_WRITE_SPINNER) == 706 RW_READERS_LOCK(1)); 707 if (atomic_fcmpset_rel_ptr(&rw->rw_lock, &x, 708 RW_UNLOCKED)) { 709 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 710 CTR2(KTR_LOCK, "%s: %p last succeeded", 711 __func__, rw); 712 break; 713 } 714 continue; 715 } 716 /* 717 * Ok, we know we have waiters and we think we are the 718 * last reader, so grab the turnstile lock. 719 */ 720 turnstile_chain_lock(&rw->lock_object); 721 v = rw->rw_lock & (RW_LOCK_WAITERS | RW_LOCK_WRITE_SPINNER); 722 MPASS(v & RW_LOCK_WAITERS); 723 724 /* 725 * Try to drop our lock leaving the lock in a unlocked 726 * state. 727 * 728 * If you wanted to do explicit lock handoff you'd have to 729 * do it here. You'd also want to use turnstile_signal() 730 * and you'd have to handle the race where a higher 731 * priority thread blocks on the write lock before the 732 * thread you wakeup actually runs and have the new thread 733 * "steal" the lock. For now it's a lot simpler to just 734 * wakeup all of the waiters. 735 * 736 * As above, if we fail, then another thread might have 737 * acquired a read lock, so drop the turnstile lock and 738 * restart. 739 */ 740 x = RW_UNLOCKED; 741 if (v & RW_LOCK_WRITE_WAITERS) { 742 queue = TS_EXCLUSIVE_QUEUE; 743 x |= (v & RW_LOCK_READ_WAITERS); 744 } else 745 queue = TS_SHARED_QUEUE; 746 if (!atomic_cmpset_rel_ptr(&rw->rw_lock, RW_READERS_LOCK(1) | v, 747 x)) { 748 turnstile_chain_unlock(&rw->lock_object); 749 x = RW_READ_VALUE(rw); 750 continue; 751 } 752 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 753 CTR2(KTR_LOCK, "%s: %p last succeeded with waiters", 754 __func__, rw); 755 756 /* 757 * Ok. The lock is released and all that's left is to 758 * wake up the waiters. Note that the lock might not be 759 * free anymore, but in that case the writers will just 760 * block again if they run before the new lock holder(s) 761 * release the lock. 762 */ 763 ts = turnstile_lookup(&rw->lock_object); 764 MPASS(ts != NULL); 765 turnstile_broadcast(ts, queue); 766 turnstile_unpend(ts, TS_SHARED_LOCK); 767 turnstile_chain_unlock(&rw->lock_object); 768 break; 769 } 770 LOCKSTAT_PROFILE_RELEASE_RWLOCK(rw__release, rw, LOCKSTAT_READER); 771 TD_LOCKS_DEC(curthread); 772 curthread->td_rw_rlocks--; 773 } 774 775 /* 776 * This function is called when we are unable to obtain a write lock on the 777 * first try. This means that at least one other thread holds either a 778 * read or write lock. 779 */ 780 void 781 __rw_wlock_hard(volatile uintptr_t *c, uintptr_t v, uintptr_t tid, 782 const char *file, int line) 783 { 784 struct rwlock *rw; 785 struct turnstile *ts; 786 #ifdef ADAPTIVE_RWLOCKS 787 volatile struct thread *owner; 788 int spintries = 0; 789 int i; 790 #endif 791 uintptr_t x; 792 #ifdef LOCK_PROFILING 793 uint64_t waittime = 0; 794 int contested = 0; 795 #endif 796 #if defined(ADAPTIVE_RWLOCKS) || defined(KDTRACE_HOOKS) 797 struct lock_delay_arg lda; 798 #endif 799 #ifdef KDTRACE_HOOKS 800 uintptr_t state; 801 u_int sleep_cnt = 0; 802 int64_t sleep_time = 0; 803 int64_t all_time = 0; 804 #endif 805 806 if (SCHEDULER_STOPPED()) 807 return; 808 809 #if defined(ADAPTIVE_RWLOCKS) 810 lock_delay_arg_init(&lda, &rw_delay); 811 #elif defined(KDTRACE_HOOKS) 812 lock_delay_arg_init(&lda, NULL); 813 #endif 814 rw = rwlock2rw(c); 815 816 if (__predict_false(lv_rw_wowner(v) == (struct thread *)tid)) { 817 KASSERT(rw->lock_object.lo_flags & LO_RECURSABLE, 818 ("%s: recursing but non-recursive rw %s @ %s:%d\n", 819 __func__, rw->lock_object.lo_name, file, line)); 820 rw->rw_recurse++; 821 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 822 CTR2(KTR_LOCK, "%s: %p recursing", __func__, rw); 823 return; 824 } 825 826 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 827 CTR5(KTR_LOCK, "%s: %s contested (lock=%p) at %s:%d", __func__, 828 rw->lock_object.lo_name, (void *)rw->rw_lock, file, line); 829 830 #ifdef KDTRACE_HOOKS 831 all_time -= lockstat_nsecs(&rw->lock_object); 832 state = v; 833 #endif 834 for (;;) { 835 if (v == RW_UNLOCKED) { 836 if (_rw_write_lock_fetch(rw, &v, tid)) 837 break; 838 continue; 839 } 840 #ifdef KDTRACE_HOOKS 841 lda.spin_cnt++; 842 #endif 843 #ifdef HWPMC_HOOKS 844 PMC_SOFT_CALL( , , lock, failed); 845 #endif 846 lock_profile_obtain_lock_failed(&rw->lock_object, 847 &contested, &waittime); 848 #ifdef ADAPTIVE_RWLOCKS 849 /* 850 * If the lock is write locked and the owner is 851 * running on another CPU, spin until the owner stops 852 * running or the state of the lock changes. 853 */ 854 owner = lv_rw_wowner(v); 855 if (!(v & RW_LOCK_READ) && TD_IS_RUNNING(owner)) { 856 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 857 CTR3(KTR_LOCK, "%s: spinning on %p held by %p", 858 __func__, rw, owner); 859 KTR_STATE1(KTR_SCHED, "thread", sched_tdname(curthread), 860 "spinning", "lockname:\"%s\"", 861 rw->lock_object.lo_name); 862 do { 863 lock_delay(&lda); 864 v = RW_READ_VALUE(rw); 865 owner = lv_rw_wowner(v); 866 } while (owner != NULL && TD_IS_RUNNING(owner)); 867 KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread), 868 "running"); 869 continue; 870 } 871 if ((v & RW_LOCK_READ) && RW_READERS(v) && 872 spintries < rowner_retries) { 873 if (!(v & RW_LOCK_WRITE_SPINNER)) { 874 if (!atomic_cmpset_ptr(&rw->rw_lock, v, 875 v | RW_LOCK_WRITE_SPINNER)) { 876 v = RW_READ_VALUE(rw); 877 continue; 878 } 879 } 880 spintries++; 881 KTR_STATE1(KTR_SCHED, "thread", sched_tdname(curthread), 882 "spinning", "lockname:\"%s\"", 883 rw->lock_object.lo_name); 884 for (i = 0; i < rowner_loops; i++) { 885 if ((rw->rw_lock & RW_LOCK_WRITE_SPINNER) == 0) 886 break; 887 cpu_spinwait(); 888 } 889 KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread), 890 "running"); 891 v = RW_READ_VALUE(rw); 892 #ifdef KDTRACE_HOOKS 893 lda.spin_cnt += rowner_loops - i; 894 #endif 895 if (i != rowner_loops) 896 continue; 897 } 898 #endif 899 ts = turnstile_trywait(&rw->lock_object); 900 v = RW_READ_VALUE(rw); 901 902 #ifdef ADAPTIVE_RWLOCKS 903 /* 904 * The current lock owner might have started executing 905 * on another CPU (or the lock could have changed 906 * owners) while we were waiting on the turnstile 907 * chain lock. If so, drop the turnstile lock and try 908 * again. 909 */ 910 if (!(v & RW_LOCK_READ)) { 911 owner = (struct thread *)RW_OWNER(v); 912 if (TD_IS_RUNNING(owner)) { 913 turnstile_cancel(ts); 914 continue; 915 } 916 } 917 #endif 918 /* 919 * Check for the waiters flags about this rwlock. 920 * If the lock was released, without maintain any pending 921 * waiters queue, simply try to acquire it. 922 * If a pending waiters queue is present, claim the lock 923 * ownership and maintain the pending queue. 924 */ 925 x = v & (RW_LOCK_WAITERS | RW_LOCK_WRITE_SPINNER); 926 if ((v & ~x) == RW_UNLOCKED) { 927 x &= ~RW_LOCK_WRITE_SPINNER; 928 if (atomic_cmpset_acq_ptr(&rw->rw_lock, v, tid | x)) { 929 if (x) 930 turnstile_claim(ts); 931 else 932 turnstile_cancel(ts); 933 break; 934 } 935 turnstile_cancel(ts); 936 v = RW_READ_VALUE(rw); 937 continue; 938 } 939 /* 940 * If the RW_LOCK_WRITE_WAITERS flag isn't set, then try to 941 * set it. If we fail to set it, then loop back and try 942 * again. 943 */ 944 if (!(v & RW_LOCK_WRITE_WAITERS)) { 945 if (!atomic_cmpset_ptr(&rw->rw_lock, v, 946 v | RW_LOCK_WRITE_WAITERS)) { 947 turnstile_cancel(ts); 948 v = RW_READ_VALUE(rw); 949 continue; 950 } 951 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 952 CTR2(KTR_LOCK, "%s: %p set write waiters flag", 953 __func__, rw); 954 } 955 /* 956 * We were unable to acquire the lock and the write waiters 957 * flag is set, so we must block on the turnstile. 958 */ 959 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 960 CTR2(KTR_LOCK, "%s: %p blocking on turnstile", __func__, 961 rw); 962 #ifdef KDTRACE_HOOKS 963 sleep_time -= lockstat_nsecs(&rw->lock_object); 964 #endif 965 turnstile_wait(ts, rw_owner(rw), TS_EXCLUSIVE_QUEUE); 966 #ifdef KDTRACE_HOOKS 967 sleep_time += lockstat_nsecs(&rw->lock_object); 968 sleep_cnt++; 969 #endif 970 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 971 CTR2(KTR_LOCK, "%s: %p resuming from turnstile", 972 __func__, rw); 973 #ifdef ADAPTIVE_RWLOCKS 974 spintries = 0; 975 #endif 976 v = RW_READ_VALUE(rw); 977 } 978 #ifdef KDTRACE_HOOKS 979 all_time += lockstat_nsecs(&rw->lock_object); 980 if (sleep_time) 981 LOCKSTAT_RECORD4(rw__block, rw, sleep_time, 982 LOCKSTAT_WRITER, (state & RW_LOCK_READ) == 0, 983 (state & RW_LOCK_READ) == 0 ? 0 : RW_READERS(state)); 984 985 /* Record only the loops spinning and not sleeping. */ 986 if (lda.spin_cnt > sleep_cnt) 987 LOCKSTAT_RECORD4(rw__spin, rw, all_time - sleep_time, 988 LOCKSTAT_WRITER, (state & RW_LOCK_READ) == 0, 989 (state & RW_LOCK_READ) == 0 ? 0 : RW_READERS(state)); 990 #endif 991 LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(rw__acquire, rw, contested, 992 waittime, file, line, LOCKSTAT_WRITER); 993 } 994 995 /* 996 * This function is called if the first try at releasing a write lock failed. 997 * This means that one of the 2 waiter bits must be set indicating that at 998 * least one thread is waiting on this lock. 999 */ 1000 void 1001 __rw_wunlock_hard(volatile uintptr_t *c, uintptr_t tid, const char *file, 1002 int line) 1003 { 1004 struct rwlock *rw; 1005 struct turnstile *ts; 1006 uintptr_t v; 1007 int queue; 1008 1009 if (SCHEDULER_STOPPED()) 1010 return; 1011 1012 rw = rwlock2rw(c); 1013 MPASS(!rw_recursed(rw)); 1014 1015 LOCKSTAT_PROFILE_RELEASE_RWLOCK(rw__release, rw, 1016 LOCKSTAT_WRITER); 1017 if (_rw_write_unlock(rw, tid)) 1018 return; 1019 1020 KASSERT(rw->rw_lock & (RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS), 1021 ("%s: neither of the waiter flags are set", __func__)); 1022 1023 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 1024 CTR2(KTR_LOCK, "%s: %p contested", __func__, rw); 1025 1026 turnstile_chain_lock(&rw->lock_object); 1027 ts = turnstile_lookup(&rw->lock_object); 1028 MPASS(ts != NULL); 1029 1030 /* 1031 * Use the same algo as sx locks for now. Prefer waking up shared 1032 * waiters if we have any over writers. This is probably not ideal. 1033 * 1034 * 'v' is the value we are going to write back to rw_lock. If we 1035 * have waiters on both queues, we need to preserve the state of 1036 * the waiter flag for the queue we don't wake up. For now this is 1037 * hardcoded for the algorithm mentioned above. 1038 * 1039 * In the case of both readers and writers waiting we wakeup the 1040 * readers but leave the RW_LOCK_WRITE_WAITERS flag set. If a 1041 * new writer comes in before a reader it will claim the lock up 1042 * above. There is probably a potential priority inversion in 1043 * there that could be worked around either by waking both queues 1044 * of waiters or doing some complicated lock handoff gymnastics. 1045 */ 1046 v = RW_UNLOCKED; 1047 if (rw->rw_lock & RW_LOCK_WRITE_WAITERS) { 1048 queue = TS_EXCLUSIVE_QUEUE; 1049 v |= (rw->rw_lock & RW_LOCK_READ_WAITERS); 1050 } else 1051 queue = TS_SHARED_QUEUE; 1052 1053 /* Wake up all waiters for the specific queue. */ 1054 if (LOCK_LOG_TEST(&rw->lock_object, 0)) 1055 CTR3(KTR_LOCK, "%s: %p waking up %s waiters", __func__, rw, 1056 queue == TS_SHARED_QUEUE ? "read" : "write"); 1057 turnstile_broadcast(ts, queue); 1058 atomic_store_rel_ptr(&rw->rw_lock, v); 1059 turnstile_unpend(ts, TS_EXCLUSIVE_LOCK); 1060 turnstile_chain_unlock(&rw->lock_object); 1061 } 1062 1063 /* 1064 * Attempt to do a non-blocking upgrade from a read lock to a write 1065 * lock. This will only succeed if this thread holds a single read 1066 * lock. Returns true if the upgrade succeeded and false otherwise. 1067 */ 1068 int 1069 __rw_try_upgrade(volatile uintptr_t *c, const char *file, int line) 1070 { 1071 struct rwlock *rw; 1072 uintptr_t v, x, tid; 1073 struct turnstile *ts; 1074 int success; 1075 1076 if (SCHEDULER_STOPPED()) 1077 return (1); 1078 1079 rw = rwlock2rw(c); 1080 1081 KASSERT(rw->rw_lock != RW_DESTROYED, 1082 ("rw_try_upgrade() of destroyed rwlock @ %s:%d", file, line)); 1083 __rw_assert(c, RA_RLOCKED, file, line); 1084 1085 /* 1086 * Attempt to switch from one reader to a writer. If there 1087 * are any write waiters, then we will have to lock the 1088 * turnstile first to prevent races with another writer 1089 * calling turnstile_wait() before we have claimed this 1090 * turnstile. So, do the simple case of no waiters first. 1091 */ 1092 tid = (uintptr_t)curthread; 1093 success = 0; 1094 for (;;) { 1095 v = rw->rw_lock; 1096 if (RW_READERS(v) > 1) 1097 break; 1098 if (!(v & RW_LOCK_WAITERS)) { 1099 success = atomic_cmpset_ptr(&rw->rw_lock, v, tid); 1100 if (!success) 1101 continue; 1102 break; 1103 } 1104 1105 /* 1106 * Ok, we think we have waiters, so lock the turnstile. 1107 */ 1108 ts = turnstile_trywait(&rw->lock_object); 1109 v = rw->rw_lock; 1110 if (RW_READERS(v) > 1) { 1111 turnstile_cancel(ts); 1112 break; 1113 } 1114 /* 1115 * Try to switch from one reader to a writer again. This time 1116 * we honor the current state of the waiters flags. 1117 * If we obtain the lock with the flags set, then claim 1118 * ownership of the turnstile. 1119 */ 1120 x = rw->rw_lock & RW_LOCK_WAITERS; 1121 success = atomic_cmpset_ptr(&rw->rw_lock, v, tid | x); 1122 if (success) { 1123 if (x) 1124 turnstile_claim(ts); 1125 else 1126 turnstile_cancel(ts); 1127 break; 1128 } 1129 turnstile_cancel(ts); 1130 } 1131 LOCK_LOG_TRY("WUPGRADE", &rw->lock_object, 0, success, file, line); 1132 if (success) { 1133 curthread->td_rw_rlocks--; 1134 WITNESS_UPGRADE(&rw->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK, 1135 file, line); 1136 LOCKSTAT_RECORD0(rw__upgrade, rw); 1137 } 1138 return (success); 1139 } 1140 1141 /* 1142 * Downgrade a write lock into a single read lock. 1143 */ 1144 void 1145 __rw_downgrade(volatile uintptr_t *c, const char *file, int line) 1146 { 1147 struct rwlock *rw; 1148 struct turnstile *ts; 1149 uintptr_t tid, v; 1150 int rwait, wwait; 1151 1152 if (SCHEDULER_STOPPED()) 1153 return; 1154 1155 rw = rwlock2rw(c); 1156 1157 KASSERT(rw->rw_lock != RW_DESTROYED, 1158 ("rw_downgrade() of destroyed rwlock @ %s:%d", file, line)); 1159 __rw_assert(c, RA_WLOCKED | RA_NOTRECURSED, file, line); 1160 #ifndef INVARIANTS 1161 if (rw_recursed(rw)) 1162 panic("downgrade of a recursed lock"); 1163 #endif 1164 1165 WITNESS_DOWNGRADE(&rw->lock_object, 0, file, line); 1166 1167 /* 1168 * Convert from a writer to a single reader. First we handle 1169 * the easy case with no waiters. If there are any waiters, we 1170 * lock the turnstile and "disown" the lock. 1171 */ 1172 tid = (uintptr_t)curthread; 1173 if (atomic_cmpset_rel_ptr(&rw->rw_lock, tid, RW_READERS_LOCK(1))) 1174 goto out; 1175 1176 /* 1177 * Ok, we think we have waiters, so lock the turnstile so we can 1178 * read the waiter flags without any races. 1179 */ 1180 turnstile_chain_lock(&rw->lock_object); 1181 v = rw->rw_lock & RW_LOCK_WAITERS; 1182 rwait = v & RW_LOCK_READ_WAITERS; 1183 wwait = v & RW_LOCK_WRITE_WAITERS; 1184 MPASS(rwait | wwait); 1185 1186 /* 1187 * Downgrade from a write lock while preserving waiters flag 1188 * and give up ownership of the turnstile. 1189 */ 1190 ts = turnstile_lookup(&rw->lock_object); 1191 MPASS(ts != NULL); 1192 if (!wwait) 1193 v &= ~RW_LOCK_READ_WAITERS; 1194 atomic_store_rel_ptr(&rw->rw_lock, RW_READERS_LOCK(1) | v); 1195 /* 1196 * Wake other readers if there are no writers pending. Otherwise they 1197 * won't be able to acquire the lock anyway. 1198 */ 1199 if (rwait && !wwait) { 1200 turnstile_broadcast(ts, TS_SHARED_QUEUE); 1201 turnstile_unpend(ts, TS_EXCLUSIVE_LOCK); 1202 } else 1203 turnstile_disown(ts); 1204 turnstile_chain_unlock(&rw->lock_object); 1205 out: 1206 curthread->td_rw_rlocks++; 1207 LOCK_LOG_LOCK("WDOWNGRADE", &rw->lock_object, 0, 0, file, line); 1208 LOCKSTAT_RECORD0(rw__downgrade, rw); 1209 } 1210 1211 #ifdef INVARIANT_SUPPORT 1212 #ifndef INVARIANTS 1213 #undef __rw_assert 1214 #endif 1215 1216 /* 1217 * In the non-WITNESS case, rw_assert() can only detect that at least 1218 * *some* thread owns an rlock, but it cannot guarantee that *this* 1219 * thread owns an rlock. 1220 */ 1221 void 1222 __rw_assert(const volatile uintptr_t *c, int what, const char *file, int line) 1223 { 1224 const struct rwlock *rw; 1225 1226 if (panicstr != NULL) 1227 return; 1228 1229 rw = rwlock2rw(c); 1230 1231 switch (what) { 1232 case RA_LOCKED: 1233 case RA_LOCKED | RA_RECURSED: 1234 case RA_LOCKED | RA_NOTRECURSED: 1235 case RA_RLOCKED: 1236 case RA_RLOCKED | RA_RECURSED: 1237 case RA_RLOCKED | RA_NOTRECURSED: 1238 #ifdef WITNESS 1239 witness_assert(&rw->lock_object, what, file, line); 1240 #else 1241 /* 1242 * If some other thread has a write lock or we have one 1243 * and are asserting a read lock, fail. Also, if no one 1244 * has a lock at all, fail. 1245 */ 1246 if (rw->rw_lock == RW_UNLOCKED || 1247 (!(rw->rw_lock & RW_LOCK_READ) && (what & RA_RLOCKED || 1248 rw_wowner(rw) != curthread))) 1249 panic("Lock %s not %slocked @ %s:%d\n", 1250 rw->lock_object.lo_name, (what & RA_RLOCKED) ? 1251 "read " : "", file, line); 1252 1253 if (!(rw->rw_lock & RW_LOCK_READ) && !(what & RA_RLOCKED)) { 1254 if (rw_recursed(rw)) { 1255 if (what & RA_NOTRECURSED) 1256 panic("Lock %s recursed @ %s:%d\n", 1257 rw->lock_object.lo_name, file, 1258 line); 1259 } else if (what & RA_RECURSED) 1260 panic("Lock %s not recursed @ %s:%d\n", 1261 rw->lock_object.lo_name, file, line); 1262 } 1263 #endif 1264 break; 1265 case RA_WLOCKED: 1266 case RA_WLOCKED | RA_RECURSED: 1267 case RA_WLOCKED | RA_NOTRECURSED: 1268 if (rw_wowner(rw) != curthread) 1269 panic("Lock %s not exclusively locked @ %s:%d\n", 1270 rw->lock_object.lo_name, file, line); 1271 if (rw_recursed(rw)) { 1272 if (what & RA_NOTRECURSED) 1273 panic("Lock %s recursed @ %s:%d\n", 1274 rw->lock_object.lo_name, file, line); 1275 } else if (what & RA_RECURSED) 1276 panic("Lock %s not recursed @ %s:%d\n", 1277 rw->lock_object.lo_name, file, line); 1278 break; 1279 case RA_UNLOCKED: 1280 #ifdef WITNESS 1281 witness_assert(&rw->lock_object, what, file, line); 1282 #else 1283 /* 1284 * If we hold a write lock fail. We can't reliably check 1285 * to see if we hold a read lock or not. 1286 */ 1287 if (rw_wowner(rw) == curthread) 1288 panic("Lock %s exclusively locked @ %s:%d\n", 1289 rw->lock_object.lo_name, file, line); 1290 #endif 1291 break; 1292 default: 1293 panic("Unknown rw lock assertion: %d @ %s:%d", what, file, 1294 line); 1295 } 1296 } 1297 #endif /* INVARIANT_SUPPORT */ 1298 1299 #ifdef DDB 1300 void 1301 db_show_rwlock(const struct lock_object *lock) 1302 { 1303 const struct rwlock *rw; 1304 struct thread *td; 1305 1306 rw = (const struct rwlock *)lock; 1307 1308 db_printf(" state: "); 1309 if (rw->rw_lock == RW_UNLOCKED) 1310 db_printf("UNLOCKED\n"); 1311 else if (rw->rw_lock == RW_DESTROYED) { 1312 db_printf("DESTROYED\n"); 1313 return; 1314 } else if (rw->rw_lock & RW_LOCK_READ) 1315 db_printf("RLOCK: %ju locks\n", 1316 (uintmax_t)(RW_READERS(rw->rw_lock))); 1317 else { 1318 td = rw_wowner(rw); 1319 db_printf("WLOCK: %p (tid %d, pid %d, \"%s\")\n", td, 1320 td->td_tid, td->td_proc->p_pid, td->td_name); 1321 if (rw_recursed(rw)) 1322 db_printf(" recursed: %u\n", rw->rw_recurse); 1323 } 1324 db_printf(" waiters: "); 1325 switch (rw->rw_lock & (RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS)) { 1326 case RW_LOCK_READ_WAITERS: 1327 db_printf("readers\n"); 1328 break; 1329 case RW_LOCK_WRITE_WAITERS: 1330 db_printf("writers\n"); 1331 break; 1332 case RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS: 1333 db_printf("readers and writers\n"); 1334 break; 1335 default: 1336 db_printf("none\n"); 1337 break; 1338 } 1339 } 1340 1341 #endif 1342