1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 2007 Stephan Uphoff <ups@FreeBSD.org> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. Neither the name of the author nor the names of any co-contributors 16 * may be used to endorse or promote products derived from this software 17 * without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 */ 31 32 /* 33 * Machine independent bits of reader/writer lock implementation. 34 */ 35 36 #include <sys/cdefs.h> 37 __FBSDID("$FreeBSD$"); 38 39 #include "opt_ddb.h" 40 41 #include <sys/param.h> 42 #include <sys/systm.h> 43 44 #include <sys/kernel.h> 45 #include <sys/kdb.h> 46 #include <sys/ktr.h> 47 #include <sys/lock.h> 48 #include <sys/mutex.h> 49 #include <sys/proc.h> 50 #include <sys/rmlock.h> 51 #include <sys/sched.h> 52 #include <sys/smp.h> 53 #include <sys/turnstile.h> 54 #include <sys/lock_profile.h> 55 #include <machine/cpu.h> 56 57 #ifdef DDB 58 #include <ddb/ddb.h> 59 #endif 60 61 /* 62 * A cookie to mark destroyed rmlocks. This is stored in the head of 63 * rm_activeReaders. 64 */ 65 #define RM_DESTROYED ((void *)0xdead) 66 67 #define rm_destroyed(rm) \ 68 (LIST_FIRST(&(rm)->rm_activeReaders) == RM_DESTROYED) 69 70 #define RMPF_ONQUEUE 1 71 #define RMPF_SIGNAL 2 72 73 #ifndef INVARIANTS 74 #define _rm_assert(c, what, file, line) 75 #endif 76 77 static void assert_rm(const struct lock_object *lock, int what); 78 #ifdef DDB 79 static void db_show_rm(const struct lock_object *lock); 80 #endif 81 static void lock_rm(struct lock_object *lock, uintptr_t how); 82 #ifdef KDTRACE_HOOKS 83 static int owner_rm(const struct lock_object *lock, struct thread **owner); 84 #endif 85 static uintptr_t unlock_rm(struct lock_object *lock); 86 87 struct lock_class lock_class_rm = { 88 .lc_name = "rm", 89 .lc_flags = LC_SLEEPLOCK | LC_RECURSABLE, 90 .lc_assert = assert_rm, 91 #ifdef DDB 92 .lc_ddb_show = db_show_rm, 93 #endif 94 .lc_lock = lock_rm, 95 .lc_unlock = unlock_rm, 96 #ifdef KDTRACE_HOOKS 97 .lc_owner = owner_rm, 98 #endif 99 }; 100 101 struct lock_class lock_class_rm_sleepable = { 102 .lc_name = "sleepable rm", 103 .lc_flags = LC_SLEEPLOCK | LC_SLEEPABLE | LC_RECURSABLE, 104 .lc_assert = assert_rm, 105 #ifdef DDB 106 .lc_ddb_show = db_show_rm, 107 #endif 108 .lc_lock = lock_rm, 109 .lc_unlock = unlock_rm, 110 #ifdef KDTRACE_HOOKS 111 .lc_owner = owner_rm, 112 #endif 113 }; 114 115 static void 116 assert_rm(const struct lock_object *lock, int what) 117 { 118 119 rm_assert((const struct rmlock *)lock, what); 120 } 121 122 static void 123 lock_rm(struct lock_object *lock, uintptr_t how) 124 { 125 struct rmlock *rm; 126 struct rm_priotracker *tracker; 127 128 rm = (struct rmlock *)lock; 129 if (how == 0) 130 rm_wlock(rm); 131 else { 132 tracker = (struct rm_priotracker *)how; 133 rm_rlock(rm, tracker); 134 } 135 } 136 137 static uintptr_t 138 unlock_rm(struct lock_object *lock) 139 { 140 struct thread *td; 141 struct pcpu *pc; 142 struct rmlock *rm; 143 struct rm_queue *queue; 144 struct rm_priotracker *tracker; 145 uintptr_t how; 146 147 rm = (struct rmlock *)lock; 148 tracker = NULL; 149 how = 0; 150 rm_assert(rm, RA_LOCKED | RA_NOTRECURSED); 151 if (rm_wowned(rm)) 152 rm_wunlock(rm); 153 else { 154 /* 155 * Find the right rm_priotracker structure for curthread. 156 * The guarantee about its uniqueness is given by the fact 157 * we already asserted the lock wasn't recursively acquired. 158 */ 159 critical_enter(); 160 td = curthread; 161 pc = get_pcpu(); 162 for (queue = pc->pc_rm_queue.rmq_next; 163 queue != &pc->pc_rm_queue; queue = queue->rmq_next) { 164 tracker = (struct rm_priotracker *)queue; 165 if ((tracker->rmp_rmlock == rm) && 166 (tracker->rmp_thread == td)) { 167 how = (uintptr_t)tracker; 168 break; 169 } 170 } 171 KASSERT(tracker != NULL, 172 ("rm_priotracker is non-NULL when lock held in read mode")); 173 critical_exit(); 174 rm_runlock(rm, tracker); 175 } 176 return (how); 177 } 178 179 #ifdef KDTRACE_HOOKS 180 static int 181 owner_rm(const struct lock_object *lock, struct thread **owner) 182 { 183 const struct rmlock *rm; 184 struct lock_class *lc; 185 186 rm = (const struct rmlock *)lock; 187 lc = LOCK_CLASS(&rm->rm_wlock_object); 188 return (lc->lc_owner(&rm->rm_wlock_object, owner)); 189 } 190 #endif 191 192 static struct mtx rm_spinlock; 193 194 MTX_SYSINIT(rm_spinlock, &rm_spinlock, "rm_spinlock", MTX_SPIN); 195 196 /* 197 * Add or remove tracker from per-cpu list. 198 * 199 * The per-cpu list can be traversed at any time in forward direction from an 200 * interrupt on the *local* cpu. 201 */ 202 static void inline 203 rm_tracker_add(struct pcpu *pc, struct rm_priotracker *tracker) 204 { 205 struct rm_queue *next; 206 207 /* Initialize all tracker pointers */ 208 tracker->rmp_cpuQueue.rmq_prev = &pc->pc_rm_queue; 209 next = pc->pc_rm_queue.rmq_next; 210 tracker->rmp_cpuQueue.rmq_next = next; 211 212 /* rmq_prev is not used during froward traversal. */ 213 next->rmq_prev = &tracker->rmp_cpuQueue; 214 215 /* Update pointer to first element. */ 216 pc->pc_rm_queue.rmq_next = &tracker->rmp_cpuQueue; 217 } 218 219 /* 220 * Return a count of the number of trackers the thread 'td' already 221 * has on this CPU for the lock 'rm'. 222 */ 223 static int 224 rm_trackers_present(const struct pcpu *pc, const struct rmlock *rm, 225 const struct thread *td) 226 { 227 struct rm_queue *queue; 228 struct rm_priotracker *tracker; 229 int count; 230 231 count = 0; 232 for (queue = pc->pc_rm_queue.rmq_next; queue != &pc->pc_rm_queue; 233 queue = queue->rmq_next) { 234 tracker = (struct rm_priotracker *)queue; 235 if ((tracker->rmp_rmlock == rm) && (tracker->rmp_thread == td)) 236 count++; 237 } 238 return (count); 239 } 240 241 static void inline 242 rm_tracker_remove(struct pcpu *pc, struct rm_priotracker *tracker) 243 { 244 struct rm_queue *next, *prev; 245 246 next = tracker->rmp_cpuQueue.rmq_next; 247 prev = tracker->rmp_cpuQueue.rmq_prev; 248 249 /* Not used during forward traversal. */ 250 next->rmq_prev = prev; 251 252 /* Remove from list. */ 253 prev->rmq_next = next; 254 } 255 256 static void 257 rm_cleanIPI(void *arg) 258 { 259 struct pcpu *pc; 260 struct rmlock *rm = arg; 261 struct rm_priotracker *tracker; 262 struct rm_queue *queue; 263 pc = get_pcpu(); 264 265 for (queue = pc->pc_rm_queue.rmq_next; queue != &pc->pc_rm_queue; 266 queue = queue->rmq_next) { 267 tracker = (struct rm_priotracker *)queue; 268 if (tracker->rmp_rmlock == rm && tracker->rmp_flags == 0) { 269 tracker->rmp_flags = RMPF_ONQUEUE; 270 mtx_lock_spin(&rm_spinlock); 271 LIST_INSERT_HEAD(&rm->rm_activeReaders, tracker, 272 rmp_qentry); 273 mtx_unlock_spin(&rm_spinlock); 274 } 275 } 276 } 277 278 void 279 rm_init_flags(struct rmlock *rm, const char *name, int opts) 280 { 281 struct lock_class *lc; 282 int liflags, xflags; 283 284 liflags = 0; 285 if (!(opts & RM_NOWITNESS)) 286 liflags |= LO_WITNESS; 287 if (opts & RM_RECURSE) 288 liflags |= LO_RECURSABLE; 289 if (opts & RM_NEW) 290 liflags |= LO_NEW; 291 rm->rm_writecpus = all_cpus; 292 LIST_INIT(&rm->rm_activeReaders); 293 if (opts & RM_SLEEPABLE) { 294 liflags |= LO_SLEEPABLE; 295 lc = &lock_class_rm_sleepable; 296 xflags = (opts & RM_NEW ? SX_NEW : 0); 297 sx_init_flags(&rm->rm_lock_sx, "rmlock_sx", 298 xflags | SX_NOWITNESS); 299 } else { 300 lc = &lock_class_rm; 301 xflags = (opts & RM_NEW ? MTX_NEW : 0); 302 mtx_init(&rm->rm_lock_mtx, name, "rmlock_mtx", 303 xflags | MTX_NOWITNESS); 304 } 305 lock_init(&rm->lock_object, lc, name, NULL, liflags); 306 } 307 308 void 309 rm_init(struct rmlock *rm, const char *name) 310 { 311 312 rm_init_flags(rm, name, 0); 313 } 314 315 void 316 rm_destroy(struct rmlock *rm) 317 { 318 319 rm_assert(rm, RA_UNLOCKED); 320 LIST_FIRST(&rm->rm_activeReaders) = RM_DESTROYED; 321 if (rm->lock_object.lo_flags & LO_SLEEPABLE) 322 sx_destroy(&rm->rm_lock_sx); 323 else 324 mtx_destroy(&rm->rm_lock_mtx); 325 lock_destroy(&rm->lock_object); 326 } 327 328 int 329 rm_wowned(const struct rmlock *rm) 330 { 331 332 if (rm->lock_object.lo_flags & LO_SLEEPABLE) 333 return (sx_xlocked(&rm->rm_lock_sx)); 334 else 335 return (mtx_owned(&rm->rm_lock_mtx)); 336 } 337 338 void 339 rm_sysinit(void *arg) 340 { 341 struct rm_args *args = arg; 342 343 rm_init(args->ra_rm, args->ra_desc); 344 } 345 346 void 347 rm_sysinit_flags(void *arg) 348 { 349 struct rm_args_flags *args = arg; 350 351 rm_init_flags(args->ra_rm, args->ra_desc, args->ra_opts); 352 } 353 354 static int 355 _rm_rlock_hard(struct rmlock *rm, struct rm_priotracker *tracker, int trylock) 356 { 357 struct pcpu *pc; 358 359 critical_enter(); 360 pc = get_pcpu(); 361 362 /* Check if we just need to do a proper critical_exit. */ 363 if (!CPU_ISSET(pc->pc_cpuid, &rm->rm_writecpus)) { 364 critical_exit(); 365 return (1); 366 } 367 368 /* Remove our tracker from the per-cpu list. */ 369 rm_tracker_remove(pc, tracker); 370 371 /* Check to see if the IPI granted us the lock after all. */ 372 if (tracker->rmp_flags) { 373 /* Just add back tracker - we hold the lock. */ 374 rm_tracker_add(pc, tracker); 375 critical_exit(); 376 return (1); 377 } 378 379 /* 380 * We allow readers to acquire a lock even if a writer is blocked if 381 * the lock is recursive and the reader already holds the lock. 382 */ 383 if ((rm->lock_object.lo_flags & LO_RECURSABLE) != 0) { 384 /* 385 * Just grant the lock if this thread already has a tracker 386 * for this lock on the per-cpu queue. 387 */ 388 if (rm_trackers_present(pc, rm, curthread) != 0) { 389 mtx_lock_spin(&rm_spinlock); 390 LIST_INSERT_HEAD(&rm->rm_activeReaders, tracker, 391 rmp_qentry); 392 tracker->rmp_flags = RMPF_ONQUEUE; 393 mtx_unlock_spin(&rm_spinlock); 394 rm_tracker_add(pc, tracker); 395 critical_exit(); 396 return (1); 397 } 398 } 399 400 sched_unpin(); 401 critical_exit(); 402 403 if (trylock) { 404 if (rm->lock_object.lo_flags & LO_SLEEPABLE) { 405 if (!sx_try_xlock(&rm->rm_lock_sx)) 406 return (0); 407 } else { 408 if (!mtx_trylock(&rm->rm_lock_mtx)) 409 return (0); 410 } 411 } else { 412 if (rm->lock_object.lo_flags & LO_SLEEPABLE) { 413 THREAD_SLEEPING_OK(); 414 sx_xlock(&rm->rm_lock_sx); 415 THREAD_NO_SLEEPING(); 416 } else 417 mtx_lock(&rm->rm_lock_mtx); 418 } 419 420 critical_enter(); 421 pc = get_pcpu(); 422 CPU_CLR(pc->pc_cpuid, &rm->rm_writecpus); 423 rm_tracker_add(pc, tracker); 424 sched_pin(); 425 critical_exit(); 426 427 if (rm->lock_object.lo_flags & LO_SLEEPABLE) 428 sx_xunlock(&rm->rm_lock_sx); 429 else 430 mtx_unlock(&rm->rm_lock_mtx); 431 432 return (1); 433 } 434 435 int 436 _rm_rlock(struct rmlock *rm, struct rm_priotracker *tracker, int trylock) 437 { 438 struct thread *td = curthread; 439 struct pcpu *pc; 440 441 if (SCHEDULER_STOPPED()) 442 return (1); 443 444 tracker->rmp_flags = 0; 445 tracker->rmp_thread = td; 446 tracker->rmp_rmlock = rm; 447 448 if (rm->lock_object.lo_flags & LO_SLEEPABLE) 449 THREAD_NO_SLEEPING(); 450 451 td->td_critnest++; /* critical_enter(); */ 452 453 __compiler_membar(); 454 455 pc = cpuid_to_pcpu[td->td_oncpu]; /* pcpu_find(td->td_oncpu); */ 456 457 rm_tracker_add(pc, tracker); 458 459 sched_pin(); 460 461 __compiler_membar(); 462 463 td->td_critnest--; 464 465 /* 466 * Fast path to combine two common conditions into a single 467 * conditional jump. 468 */ 469 if (0 == (td->td_owepreempt | 470 CPU_ISSET(pc->pc_cpuid, &rm->rm_writecpus))) 471 return (1); 472 473 /* We do not have a read token and need to acquire one. */ 474 return _rm_rlock_hard(rm, tracker, trylock); 475 } 476 477 static void 478 _rm_unlock_hard(struct thread *td,struct rm_priotracker *tracker) 479 { 480 481 if (td->td_owepreempt) { 482 td->td_critnest++; 483 critical_exit(); 484 } 485 486 if (!tracker->rmp_flags) 487 return; 488 489 mtx_lock_spin(&rm_spinlock); 490 LIST_REMOVE(tracker, rmp_qentry); 491 492 if (tracker->rmp_flags & RMPF_SIGNAL) { 493 struct rmlock *rm; 494 struct turnstile *ts; 495 496 rm = tracker->rmp_rmlock; 497 498 turnstile_chain_lock(&rm->lock_object); 499 mtx_unlock_spin(&rm_spinlock); 500 501 ts = turnstile_lookup(&rm->lock_object); 502 503 turnstile_signal(ts, TS_EXCLUSIVE_QUEUE); 504 turnstile_unpend(ts, TS_EXCLUSIVE_LOCK); 505 turnstile_chain_unlock(&rm->lock_object); 506 } else 507 mtx_unlock_spin(&rm_spinlock); 508 } 509 510 void 511 _rm_runlock(struct rmlock *rm, struct rm_priotracker *tracker) 512 { 513 struct pcpu *pc; 514 struct thread *td = tracker->rmp_thread; 515 516 if (SCHEDULER_STOPPED()) 517 return; 518 519 td->td_critnest++; /* critical_enter(); */ 520 pc = cpuid_to_pcpu[td->td_oncpu]; /* pcpu_find(td->td_oncpu); */ 521 rm_tracker_remove(pc, tracker); 522 td->td_critnest--; 523 sched_unpin(); 524 525 if (rm->lock_object.lo_flags & LO_SLEEPABLE) 526 THREAD_SLEEPING_OK(); 527 528 if (0 == (td->td_owepreempt | tracker->rmp_flags)) 529 return; 530 531 _rm_unlock_hard(td, tracker); 532 } 533 534 void 535 _rm_wlock(struct rmlock *rm) 536 { 537 struct rm_priotracker *prio; 538 struct turnstile *ts; 539 cpuset_t readcpus; 540 541 if (SCHEDULER_STOPPED()) 542 return; 543 544 if (rm->lock_object.lo_flags & LO_SLEEPABLE) 545 sx_xlock(&rm->rm_lock_sx); 546 else 547 mtx_lock(&rm->rm_lock_mtx); 548 549 if (CPU_CMP(&rm->rm_writecpus, &all_cpus)) { 550 /* Get all read tokens back */ 551 readcpus = all_cpus; 552 CPU_NAND(&readcpus, &rm->rm_writecpus); 553 rm->rm_writecpus = all_cpus; 554 555 /* 556 * Assumes rm->rm_writecpus update is visible on other CPUs 557 * before rm_cleanIPI is called. 558 */ 559 #ifdef SMP 560 smp_rendezvous_cpus(readcpus, 561 smp_no_rendezvous_barrier, 562 rm_cleanIPI, 563 smp_no_rendezvous_barrier, 564 rm); 565 566 #else 567 rm_cleanIPI(rm); 568 #endif 569 570 mtx_lock_spin(&rm_spinlock); 571 while ((prio = LIST_FIRST(&rm->rm_activeReaders)) != NULL) { 572 ts = turnstile_trywait(&rm->lock_object); 573 prio->rmp_flags = RMPF_ONQUEUE | RMPF_SIGNAL; 574 mtx_unlock_spin(&rm_spinlock); 575 turnstile_wait(ts, prio->rmp_thread, 576 TS_EXCLUSIVE_QUEUE); 577 mtx_lock_spin(&rm_spinlock); 578 } 579 mtx_unlock_spin(&rm_spinlock); 580 } 581 } 582 583 void 584 _rm_wunlock(struct rmlock *rm) 585 { 586 587 if (rm->lock_object.lo_flags & LO_SLEEPABLE) 588 sx_xunlock(&rm->rm_lock_sx); 589 else 590 mtx_unlock(&rm->rm_lock_mtx); 591 } 592 593 #if LOCK_DEBUG > 0 594 595 void 596 _rm_wlock_debug(struct rmlock *rm, const char *file, int line) 597 { 598 599 if (SCHEDULER_STOPPED()) 600 return; 601 602 KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread), 603 ("rm_wlock() by idle thread %p on rmlock %s @ %s:%d", 604 curthread, rm->lock_object.lo_name, file, line)); 605 KASSERT(!rm_destroyed(rm), 606 ("rm_wlock() of destroyed rmlock @ %s:%d", file, line)); 607 _rm_assert(rm, RA_UNLOCKED, file, line); 608 609 WITNESS_CHECKORDER(&rm->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE, 610 file, line, NULL); 611 612 _rm_wlock(rm); 613 614 LOCK_LOG_LOCK("RMWLOCK", &rm->lock_object, 0, 0, file, line); 615 WITNESS_LOCK(&rm->lock_object, LOP_EXCLUSIVE, file, line); 616 TD_LOCKS_INC(curthread); 617 } 618 619 void 620 _rm_wunlock_debug(struct rmlock *rm, const char *file, int line) 621 { 622 623 if (SCHEDULER_STOPPED()) 624 return; 625 626 KASSERT(!rm_destroyed(rm), 627 ("rm_wunlock() of destroyed rmlock @ %s:%d", file, line)); 628 _rm_assert(rm, RA_WLOCKED, file, line); 629 WITNESS_UNLOCK(&rm->lock_object, LOP_EXCLUSIVE, file, line); 630 LOCK_LOG_LOCK("RMWUNLOCK", &rm->lock_object, 0, 0, file, line); 631 _rm_wunlock(rm); 632 TD_LOCKS_DEC(curthread); 633 } 634 635 int 636 _rm_rlock_debug(struct rmlock *rm, struct rm_priotracker *tracker, 637 int trylock, const char *file, int line) 638 { 639 640 if (SCHEDULER_STOPPED()) 641 return (1); 642 643 #ifdef INVARIANTS 644 if (!(rm->lock_object.lo_flags & LO_RECURSABLE) && !trylock) { 645 critical_enter(); 646 KASSERT(rm_trackers_present(get_pcpu(), rm, 647 curthread) == 0, 648 ("rm_rlock: recursed on non-recursive rmlock %s @ %s:%d\n", 649 rm->lock_object.lo_name, file, line)); 650 critical_exit(); 651 } 652 #endif 653 KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread), 654 ("rm_rlock() by idle thread %p on rmlock %s @ %s:%d", 655 curthread, rm->lock_object.lo_name, file, line)); 656 KASSERT(!rm_destroyed(rm), 657 ("rm_rlock() of destroyed rmlock @ %s:%d", file, line)); 658 if (!trylock) { 659 KASSERT(!rm_wowned(rm), 660 ("rm_rlock: wlock already held for %s @ %s:%d", 661 rm->lock_object.lo_name, file, line)); 662 WITNESS_CHECKORDER(&rm->lock_object, LOP_NEWORDER, file, line, 663 NULL); 664 } 665 666 if (_rm_rlock(rm, tracker, trylock)) { 667 if (trylock) 668 LOCK_LOG_TRY("RMRLOCK", &rm->lock_object, 0, 1, file, 669 line); 670 else 671 LOCK_LOG_LOCK("RMRLOCK", &rm->lock_object, 0, 0, file, 672 line); 673 WITNESS_LOCK(&rm->lock_object, 0, file, line); 674 TD_LOCKS_INC(curthread); 675 return (1); 676 } else if (trylock) 677 LOCK_LOG_TRY("RMRLOCK", &rm->lock_object, 0, 0, file, line); 678 679 return (0); 680 } 681 682 void 683 _rm_runlock_debug(struct rmlock *rm, struct rm_priotracker *tracker, 684 const char *file, int line) 685 { 686 687 if (SCHEDULER_STOPPED()) 688 return; 689 690 KASSERT(!rm_destroyed(rm), 691 ("rm_runlock() of destroyed rmlock @ %s:%d", file, line)); 692 _rm_assert(rm, RA_RLOCKED, file, line); 693 WITNESS_UNLOCK(&rm->lock_object, 0, file, line); 694 LOCK_LOG_LOCK("RMRUNLOCK", &rm->lock_object, 0, 0, file, line); 695 _rm_runlock(rm, tracker); 696 TD_LOCKS_DEC(curthread); 697 } 698 699 #else 700 701 /* 702 * Just strip out file and line arguments if no lock debugging is enabled in 703 * the kernel - we are called from a kernel module. 704 */ 705 void 706 _rm_wlock_debug(struct rmlock *rm, const char *file, int line) 707 { 708 709 _rm_wlock(rm); 710 } 711 712 void 713 _rm_wunlock_debug(struct rmlock *rm, const char *file, int line) 714 { 715 716 _rm_wunlock(rm); 717 } 718 719 int 720 _rm_rlock_debug(struct rmlock *rm, struct rm_priotracker *tracker, 721 int trylock, const char *file, int line) 722 { 723 724 return _rm_rlock(rm, tracker, trylock); 725 } 726 727 void 728 _rm_runlock_debug(struct rmlock *rm, struct rm_priotracker *tracker, 729 const char *file, int line) 730 { 731 732 _rm_runlock(rm, tracker); 733 } 734 735 #endif 736 737 #ifdef INVARIANT_SUPPORT 738 #ifndef INVARIANTS 739 #undef _rm_assert 740 #endif 741 742 /* 743 * Note that this does not need to use witness_assert() for read lock 744 * assertions since an exact count of read locks held by this thread 745 * is computable. 746 */ 747 void 748 _rm_assert(const struct rmlock *rm, int what, const char *file, int line) 749 { 750 int count; 751 752 if (panicstr != NULL) 753 return; 754 switch (what) { 755 case RA_LOCKED: 756 case RA_LOCKED | RA_RECURSED: 757 case RA_LOCKED | RA_NOTRECURSED: 758 case RA_RLOCKED: 759 case RA_RLOCKED | RA_RECURSED: 760 case RA_RLOCKED | RA_NOTRECURSED: 761 /* 762 * Handle the write-locked case. Unlike other 763 * primitives, writers can never recurse. 764 */ 765 if (rm_wowned(rm)) { 766 if (what & RA_RLOCKED) 767 panic("Lock %s exclusively locked @ %s:%d\n", 768 rm->lock_object.lo_name, file, line); 769 if (what & RA_RECURSED) 770 panic("Lock %s not recursed @ %s:%d\n", 771 rm->lock_object.lo_name, file, line); 772 break; 773 } 774 775 critical_enter(); 776 count = rm_trackers_present(get_pcpu(), rm, curthread); 777 critical_exit(); 778 779 if (count == 0) 780 panic("Lock %s not %slocked @ %s:%d\n", 781 rm->lock_object.lo_name, (what & RA_RLOCKED) ? 782 "read " : "", file, line); 783 if (count > 1) { 784 if (what & RA_NOTRECURSED) 785 panic("Lock %s recursed @ %s:%d\n", 786 rm->lock_object.lo_name, file, line); 787 } else if (what & RA_RECURSED) 788 panic("Lock %s not recursed @ %s:%d\n", 789 rm->lock_object.lo_name, file, line); 790 break; 791 case RA_WLOCKED: 792 if (!rm_wowned(rm)) 793 panic("Lock %s not exclusively locked @ %s:%d\n", 794 rm->lock_object.lo_name, file, line); 795 break; 796 case RA_UNLOCKED: 797 if (rm_wowned(rm)) 798 panic("Lock %s exclusively locked @ %s:%d\n", 799 rm->lock_object.lo_name, file, line); 800 801 critical_enter(); 802 count = rm_trackers_present(get_pcpu(), rm, curthread); 803 critical_exit(); 804 805 if (count != 0) 806 panic("Lock %s read locked @ %s:%d\n", 807 rm->lock_object.lo_name, file, line); 808 break; 809 default: 810 panic("Unknown rm lock assertion: %d @ %s:%d", what, file, 811 line); 812 } 813 } 814 #endif /* INVARIANT_SUPPORT */ 815 816 #ifdef DDB 817 static void 818 print_tracker(struct rm_priotracker *tr) 819 { 820 struct thread *td; 821 822 td = tr->rmp_thread; 823 db_printf(" thread %p (tid %d, pid %d, \"%s\") {", td, td->td_tid, 824 td->td_proc->p_pid, td->td_name); 825 if (tr->rmp_flags & RMPF_ONQUEUE) { 826 db_printf("ONQUEUE"); 827 if (tr->rmp_flags & RMPF_SIGNAL) 828 db_printf(",SIGNAL"); 829 } else 830 db_printf("0"); 831 db_printf("}\n"); 832 } 833 834 static void 835 db_show_rm(const struct lock_object *lock) 836 { 837 struct rm_priotracker *tr; 838 struct rm_queue *queue; 839 const struct rmlock *rm; 840 struct lock_class *lc; 841 struct pcpu *pc; 842 843 rm = (const struct rmlock *)lock; 844 db_printf(" writecpus: "); 845 ddb_display_cpuset(__DEQUALIFY(const cpuset_t *, &rm->rm_writecpus)); 846 db_printf("\n"); 847 db_printf(" per-CPU readers:\n"); 848 STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) 849 for (queue = pc->pc_rm_queue.rmq_next; 850 queue != &pc->pc_rm_queue; queue = queue->rmq_next) { 851 tr = (struct rm_priotracker *)queue; 852 if (tr->rmp_rmlock == rm) 853 print_tracker(tr); 854 } 855 db_printf(" active readers:\n"); 856 LIST_FOREACH(tr, &rm->rm_activeReaders, rmp_qentry) 857 print_tracker(tr); 858 lc = LOCK_CLASS(&rm->rm_wlock_object); 859 db_printf("Backing write-lock (%s):\n", lc->lc_name); 860 lc->lc_ddb_show(&rm->rm_wlock_object); 861 } 862 #endif 863