1 /*- 2 * Copyright (c) 2007 Stephan Uphoff <ups@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. Neither the name of the author nor the names of any co-contributors 14 * may be used to endorse or promote products derived from this software 15 * without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 /* 31 * Machine independent bits of reader/writer lock implementation. 32 */ 33 34 #include <sys/cdefs.h> 35 __FBSDID("$FreeBSD$"); 36 37 #include "opt_ddb.h" 38 39 #include <sys/param.h> 40 #include <sys/systm.h> 41 42 #include <sys/kernel.h> 43 #include <sys/kdb.h> 44 #include <sys/ktr.h> 45 #include <sys/lock.h> 46 #include <sys/mutex.h> 47 #include <sys/proc.h> 48 #include <sys/rmlock.h> 49 #include <sys/sched.h> 50 #include <sys/smp.h> 51 #include <sys/turnstile.h> 52 #include <sys/lock_profile.h> 53 #include <machine/cpu.h> 54 55 #ifdef DDB 56 #include <ddb/ddb.h> 57 #endif 58 59 /* 60 * A cookie to mark destroyed rmlocks. This is stored in the head of 61 * rm_activeReaders. 62 */ 63 #define RM_DESTROYED ((void *)0xdead) 64 65 #define rm_destroyed(rm) \ 66 (LIST_FIRST(&(rm)->rm_activeReaders) == RM_DESTROYED) 67 68 #define RMPF_ONQUEUE 1 69 #define RMPF_SIGNAL 2 70 71 #ifndef INVARIANTS 72 #define _rm_assert(c, what, file, line) 73 #endif 74 75 static void assert_rm(const struct lock_object *lock, int what); 76 #ifdef DDB 77 static void db_show_rm(const struct lock_object *lock); 78 #endif 79 static void lock_rm(struct lock_object *lock, uintptr_t how); 80 #ifdef KDTRACE_HOOKS 81 static int owner_rm(const struct lock_object *lock, struct thread **owner); 82 #endif 83 static uintptr_t unlock_rm(struct lock_object *lock); 84 85 struct lock_class lock_class_rm = { 86 .lc_name = "rm", 87 .lc_flags = LC_SLEEPLOCK | LC_RECURSABLE, 88 .lc_assert = assert_rm, 89 #ifdef DDB 90 .lc_ddb_show = db_show_rm, 91 #endif 92 .lc_lock = lock_rm, 93 .lc_unlock = unlock_rm, 94 #ifdef KDTRACE_HOOKS 95 .lc_owner = owner_rm, 96 #endif 97 }; 98 99 struct lock_class lock_class_rm_sleepable = { 100 .lc_name = "sleepable rm", 101 .lc_flags = LC_SLEEPLOCK | LC_SLEEPABLE | LC_RECURSABLE, 102 .lc_assert = assert_rm, 103 #ifdef DDB 104 .lc_ddb_show = db_show_rm, 105 #endif 106 .lc_lock = lock_rm, 107 .lc_unlock = unlock_rm, 108 #ifdef KDTRACE_HOOKS 109 .lc_owner = owner_rm, 110 #endif 111 }; 112 113 static void 114 assert_rm(const struct lock_object *lock, int what) 115 { 116 117 rm_assert((const struct rmlock *)lock, what); 118 } 119 120 static void 121 lock_rm(struct lock_object *lock, uintptr_t how) 122 { 123 struct rmlock *rm; 124 struct rm_priotracker *tracker; 125 126 rm = (struct rmlock *)lock; 127 if (how == 0) 128 rm_wlock(rm); 129 else { 130 tracker = (struct rm_priotracker *)how; 131 rm_rlock(rm, tracker); 132 } 133 } 134 135 static uintptr_t 136 unlock_rm(struct lock_object *lock) 137 { 138 struct thread *td; 139 struct pcpu *pc; 140 struct rmlock *rm; 141 struct rm_queue *queue; 142 struct rm_priotracker *tracker; 143 uintptr_t how; 144 145 rm = (struct rmlock *)lock; 146 tracker = NULL; 147 how = 0; 148 rm_assert(rm, RA_LOCKED | RA_NOTRECURSED); 149 if (rm_wowned(rm)) 150 rm_wunlock(rm); 151 else { 152 /* 153 * Find the right rm_priotracker structure for curthread. 154 * The guarantee about its uniqueness is given by the fact 155 * we already asserted the lock wasn't recursively acquired. 156 */ 157 critical_enter(); 158 td = curthread; 159 pc = pcpu_find(curcpu); 160 for (queue = pc->pc_rm_queue.rmq_next; 161 queue != &pc->pc_rm_queue; queue = queue->rmq_next) { 162 tracker = (struct rm_priotracker *)queue; 163 if ((tracker->rmp_rmlock == rm) && 164 (tracker->rmp_thread == td)) { 165 how = (uintptr_t)tracker; 166 break; 167 } 168 } 169 KASSERT(tracker != NULL, 170 ("rm_priotracker is non-NULL when lock held in read mode")); 171 critical_exit(); 172 rm_runlock(rm, tracker); 173 } 174 return (how); 175 } 176 177 #ifdef KDTRACE_HOOKS 178 static int 179 owner_rm(const struct lock_object *lock, struct thread **owner) 180 { 181 const struct rmlock *rm; 182 struct lock_class *lc; 183 184 rm = (const struct rmlock *)lock; 185 lc = LOCK_CLASS(&rm->rm_wlock_object); 186 return (lc->lc_owner(&rm->rm_wlock_object, owner)); 187 } 188 #endif 189 190 static struct mtx rm_spinlock; 191 192 MTX_SYSINIT(rm_spinlock, &rm_spinlock, "rm_spinlock", MTX_SPIN); 193 194 /* 195 * Add or remove tracker from per-cpu list. 196 * 197 * The per-cpu list can be traversed at any time in forward direction from an 198 * interrupt on the *local* cpu. 199 */ 200 static void inline 201 rm_tracker_add(struct pcpu *pc, struct rm_priotracker *tracker) 202 { 203 struct rm_queue *next; 204 205 /* Initialize all tracker pointers */ 206 tracker->rmp_cpuQueue.rmq_prev = &pc->pc_rm_queue; 207 next = pc->pc_rm_queue.rmq_next; 208 tracker->rmp_cpuQueue.rmq_next = next; 209 210 /* rmq_prev is not used during froward traversal. */ 211 next->rmq_prev = &tracker->rmp_cpuQueue; 212 213 /* Update pointer to first element. */ 214 pc->pc_rm_queue.rmq_next = &tracker->rmp_cpuQueue; 215 } 216 217 /* 218 * Return a count of the number of trackers the thread 'td' already 219 * has on this CPU for the lock 'rm'. 220 */ 221 static int 222 rm_trackers_present(const struct pcpu *pc, const struct rmlock *rm, 223 const struct thread *td) 224 { 225 struct rm_queue *queue; 226 struct rm_priotracker *tracker; 227 int count; 228 229 count = 0; 230 for (queue = pc->pc_rm_queue.rmq_next; queue != &pc->pc_rm_queue; 231 queue = queue->rmq_next) { 232 tracker = (struct rm_priotracker *)queue; 233 if ((tracker->rmp_rmlock == rm) && (tracker->rmp_thread == td)) 234 count++; 235 } 236 return (count); 237 } 238 239 static void inline 240 rm_tracker_remove(struct pcpu *pc, struct rm_priotracker *tracker) 241 { 242 struct rm_queue *next, *prev; 243 244 next = tracker->rmp_cpuQueue.rmq_next; 245 prev = tracker->rmp_cpuQueue.rmq_prev; 246 247 /* Not used during forward traversal. */ 248 next->rmq_prev = prev; 249 250 /* Remove from list. */ 251 prev->rmq_next = next; 252 } 253 254 static void 255 rm_cleanIPI(void *arg) 256 { 257 struct pcpu *pc; 258 struct rmlock *rm = arg; 259 struct rm_priotracker *tracker; 260 struct rm_queue *queue; 261 pc = pcpu_find(curcpu); 262 263 for (queue = pc->pc_rm_queue.rmq_next; queue != &pc->pc_rm_queue; 264 queue = queue->rmq_next) { 265 tracker = (struct rm_priotracker *)queue; 266 if (tracker->rmp_rmlock == rm && tracker->rmp_flags == 0) { 267 tracker->rmp_flags = RMPF_ONQUEUE; 268 mtx_lock_spin(&rm_spinlock); 269 LIST_INSERT_HEAD(&rm->rm_activeReaders, tracker, 270 rmp_qentry); 271 mtx_unlock_spin(&rm_spinlock); 272 } 273 } 274 } 275 276 void 277 rm_init_flags(struct rmlock *rm, const char *name, int opts) 278 { 279 struct lock_class *lc; 280 int liflags; 281 282 liflags = 0; 283 if (!(opts & RM_NOWITNESS)) 284 liflags |= LO_WITNESS; 285 if (opts & RM_RECURSE) 286 liflags |= LO_RECURSABLE; 287 rm->rm_writecpus = all_cpus; 288 LIST_INIT(&rm->rm_activeReaders); 289 if (opts & RM_SLEEPABLE) { 290 liflags |= LO_SLEEPABLE; 291 lc = &lock_class_rm_sleepable; 292 sx_init_flags(&rm->rm_lock_sx, "rmlock_sx", SX_NOWITNESS); 293 } else { 294 lc = &lock_class_rm; 295 mtx_init(&rm->rm_lock_mtx, name, "rmlock_mtx", MTX_NOWITNESS); 296 } 297 lock_init(&rm->lock_object, lc, name, NULL, liflags); 298 } 299 300 void 301 rm_init(struct rmlock *rm, const char *name) 302 { 303 304 rm_init_flags(rm, name, 0); 305 } 306 307 void 308 rm_destroy(struct rmlock *rm) 309 { 310 311 rm_assert(rm, RA_UNLOCKED); 312 LIST_FIRST(&rm->rm_activeReaders) = RM_DESTROYED; 313 if (rm->lock_object.lo_flags & LO_SLEEPABLE) 314 sx_destroy(&rm->rm_lock_sx); 315 else 316 mtx_destroy(&rm->rm_lock_mtx); 317 lock_destroy(&rm->lock_object); 318 } 319 320 int 321 rm_wowned(const struct rmlock *rm) 322 { 323 324 if (rm->lock_object.lo_flags & LO_SLEEPABLE) 325 return (sx_xlocked(&rm->rm_lock_sx)); 326 else 327 return (mtx_owned(&rm->rm_lock_mtx)); 328 } 329 330 void 331 rm_sysinit(void *arg) 332 { 333 struct rm_args *args = arg; 334 335 rm_init(args->ra_rm, args->ra_desc); 336 } 337 338 void 339 rm_sysinit_flags(void *arg) 340 { 341 struct rm_args_flags *args = arg; 342 343 rm_init_flags(args->ra_rm, args->ra_desc, args->ra_opts); 344 } 345 346 static int 347 _rm_rlock_hard(struct rmlock *rm, struct rm_priotracker *tracker, int trylock) 348 { 349 struct pcpu *pc; 350 351 critical_enter(); 352 pc = pcpu_find(curcpu); 353 354 /* Check if we just need to do a proper critical_exit. */ 355 if (!CPU_ISSET(pc->pc_cpuid, &rm->rm_writecpus)) { 356 critical_exit(); 357 return (1); 358 } 359 360 /* Remove our tracker from the per-cpu list. */ 361 rm_tracker_remove(pc, tracker); 362 363 /* Check to see if the IPI granted us the lock after all. */ 364 if (tracker->rmp_flags) { 365 /* Just add back tracker - we hold the lock. */ 366 rm_tracker_add(pc, tracker); 367 critical_exit(); 368 return (1); 369 } 370 371 /* 372 * We allow readers to aquire a lock even if a writer is blocked if 373 * the lock is recursive and the reader already holds the lock. 374 */ 375 if ((rm->lock_object.lo_flags & LO_RECURSABLE) != 0) { 376 /* 377 * Just grant the lock if this thread already has a tracker 378 * for this lock on the per-cpu queue. 379 */ 380 if (rm_trackers_present(pc, rm, curthread) != 0) { 381 mtx_lock_spin(&rm_spinlock); 382 LIST_INSERT_HEAD(&rm->rm_activeReaders, tracker, 383 rmp_qentry); 384 tracker->rmp_flags = RMPF_ONQUEUE; 385 mtx_unlock_spin(&rm_spinlock); 386 rm_tracker_add(pc, tracker); 387 critical_exit(); 388 return (1); 389 } 390 } 391 392 sched_unpin(); 393 critical_exit(); 394 395 if (trylock) { 396 if (rm->lock_object.lo_flags & LO_SLEEPABLE) { 397 if (!sx_try_xlock(&rm->rm_lock_sx)) 398 return (0); 399 } else { 400 if (!mtx_trylock(&rm->rm_lock_mtx)) 401 return (0); 402 } 403 } else { 404 if (rm->lock_object.lo_flags & LO_SLEEPABLE) 405 sx_xlock(&rm->rm_lock_sx); 406 else 407 mtx_lock(&rm->rm_lock_mtx); 408 } 409 410 critical_enter(); 411 pc = pcpu_find(curcpu); 412 CPU_CLR(pc->pc_cpuid, &rm->rm_writecpus); 413 rm_tracker_add(pc, tracker); 414 sched_pin(); 415 critical_exit(); 416 417 if (rm->lock_object.lo_flags & LO_SLEEPABLE) 418 sx_xunlock(&rm->rm_lock_sx); 419 else 420 mtx_unlock(&rm->rm_lock_mtx); 421 422 return (1); 423 } 424 425 int 426 _rm_rlock(struct rmlock *rm, struct rm_priotracker *tracker, int trylock) 427 { 428 struct thread *td = curthread; 429 struct pcpu *pc; 430 431 if (SCHEDULER_STOPPED()) 432 return (1); 433 434 tracker->rmp_flags = 0; 435 tracker->rmp_thread = td; 436 tracker->rmp_rmlock = rm; 437 438 if (rm->lock_object.lo_flags & LO_SLEEPABLE) 439 THREAD_NO_SLEEPING(); 440 441 td->td_critnest++; /* critical_enter(); */ 442 443 __compiler_membar(); 444 445 pc = cpuid_to_pcpu[td->td_oncpu]; /* pcpu_find(td->td_oncpu); */ 446 447 rm_tracker_add(pc, tracker); 448 449 sched_pin(); 450 451 __compiler_membar(); 452 453 td->td_critnest--; 454 455 /* 456 * Fast path to combine two common conditions into a single 457 * conditional jump. 458 */ 459 if (0 == (td->td_owepreempt | 460 CPU_ISSET(pc->pc_cpuid, &rm->rm_writecpus))) 461 return (1); 462 463 /* We do not have a read token and need to acquire one. */ 464 return _rm_rlock_hard(rm, tracker, trylock); 465 } 466 467 static void 468 _rm_unlock_hard(struct thread *td,struct rm_priotracker *tracker) 469 { 470 471 if (td->td_owepreempt) { 472 td->td_critnest++; 473 critical_exit(); 474 } 475 476 if (!tracker->rmp_flags) 477 return; 478 479 mtx_lock_spin(&rm_spinlock); 480 LIST_REMOVE(tracker, rmp_qentry); 481 482 if (tracker->rmp_flags & RMPF_SIGNAL) { 483 struct rmlock *rm; 484 struct turnstile *ts; 485 486 rm = tracker->rmp_rmlock; 487 488 turnstile_chain_lock(&rm->lock_object); 489 mtx_unlock_spin(&rm_spinlock); 490 491 ts = turnstile_lookup(&rm->lock_object); 492 493 turnstile_signal(ts, TS_EXCLUSIVE_QUEUE); 494 turnstile_unpend(ts, TS_EXCLUSIVE_LOCK); 495 turnstile_chain_unlock(&rm->lock_object); 496 } else 497 mtx_unlock_spin(&rm_spinlock); 498 } 499 500 void 501 _rm_runlock(struct rmlock *rm, struct rm_priotracker *tracker) 502 { 503 struct pcpu *pc; 504 struct thread *td = tracker->rmp_thread; 505 506 if (SCHEDULER_STOPPED()) 507 return; 508 509 td->td_critnest++; /* critical_enter(); */ 510 pc = cpuid_to_pcpu[td->td_oncpu]; /* pcpu_find(td->td_oncpu); */ 511 rm_tracker_remove(pc, tracker); 512 td->td_critnest--; 513 sched_unpin(); 514 515 if (rm->lock_object.lo_flags & LO_SLEEPABLE) 516 THREAD_SLEEPING_OK(); 517 518 if (0 == (td->td_owepreempt | tracker->rmp_flags)) 519 return; 520 521 _rm_unlock_hard(td, tracker); 522 } 523 524 void 525 _rm_wlock(struct rmlock *rm) 526 { 527 struct rm_priotracker *prio; 528 struct turnstile *ts; 529 cpuset_t readcpus; 530 531 if (SCHEDULER_STOPPED()) 532 return; 533 534 if (rm->lock_object.lo_flags & LO_SLEEPABLE) 535 sx_xlock(&rm->rm_lock_sx); 536 else 537 mtx_lock(&rm->rm_lock_mtx); 538 539 if (CPU_CMP(&rm->rm_writecpus, &all_cpus)) { 540 /* Get all read tokens back */ 541 readcpus = all_cpus; 542 CPU_NAND(&readcpus, &rm->rm_writecpus); 543 rm->rm_writecpus = all_cpus; 544 545 /* 546 * Assumes rm->rm_writecpus update is visible on other CPUs 547 * before rm_cleanIPI is called. 548 */ 549 #ifdef SMP 550 smp_rendezvous_cpus(readcpus, 551 smp_no_rendevous_barrier, 552 rm_cleanIPI, 553 smp_no_rendevous_barrier, 554 rm); 555 556 #else 557 rm_cleanIPI(rm); 558 #endif 559 560 mtx_lock_spin(&rm_spinlock); 561 while ((prio = LIST_FIRST(&rm->rm_activeReaders)) != NULL) { 562 ts = turnstile_trywait(&rm->lock_object); 563 prio->rmp_flags = RMPF_ONQUEUE | RMPF_SIGNAL; 564 mtx_unlock_spin(&rm_spinlock); 565 turnstile_wait(ts, prio->rmp_thread, 566 TS_EXCLUSIVE_QUEUE); 567 mtx_lock_spin(&rm_spinlock); 568 } 569 mtx_unlock_spin(&rm_spinlock); 570 } 571 } 572 573 void 574 _rm_wunlock(struct rmlock *rm) 575 { 576 577 if (rm->lock_object.lo_flags & LO_SLEEPABLE) 578 sx_xunlock(&rm->rm_lock_sx); 579 else 580 mtx_unlock(&rm->rm_lock_mtx); 581 } 582 583 #ifdef LOCK_DEBUG 584 585 void 586 _rm_wlock_debug(struct rmlock *rm, const char *file, int line) 587 { 588 589 if (SCHEDULER_STOPPED()) 590 return; 591 592 KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread), 593 ("rm_wlock() by idle thread %p on rmlock %s @ %s:%d", 594 curthread, rm->lock_object.lo_name, file, line)); 595 KASSERT(!rm_destroyed(rm), 596 ("rm_wlock() of destroyed rmlock @ %s:%d", file, line)); 597 _rm_assert(rm, RA_UNLOCKED, file, line); 598 599 WITNESS_CHECKORDER(&rm->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE, 600 file, line, NULL); 601 602 _rm_wlock(rm); 603 604 LOCK_LOG_LOCK("RMWLOCK", &rm->lock_object, 0, 0, file, line); 605 606 WITNESS_LOCK(&rm->lock_object, LOP_EXCLUSIVE, file, line); 607 608 curthread->td_locks++; 609 610 } 611 612 void 613 _rm_wunlock_debug(struct rmlock *rm, const char *file, int line) 614 { 615 616 if (SCHEDULER_STOPPED()) 617 return; 618 619 KASSERT(!rm_destroyed(rm), 620 ("rm_wunlock() of destroyed rmlock @ %s:%d", file, line)); 621 _rm_assert(rm, RA_WLOCKED, file, line); 622 WITNESS_UNLOCK(&rm->lock_object, LOP_EXCLUSIVE, file, line); 623 LOCK_LOG_LOCK("RMWUNLOCK", &rm->lock_object, 0, 0, file, line); 624 _rm_wunlock(rm); 625 curthread->td_locks--; 626 } 627 628 int 629 _rm_rlock_debug(struct rmlock *rm, struct rm_priotracker *tracker, 630 int trylock, const char *file, int line) 631 { 632 633 if (SCHEDULER_STOPPED()) 634 return (1); 635 636 #ifdef INVARIANTS 637 if (!(rm->lock_object.lo_flags & LO_RECURSABLE) && !trylock) { 638 critical_enter(); 639 KASSERT(rm_trackers_present(pcpu_find(curcpu), rm, 640 curthread) == 0, 641 ("rm_rlock: recursed on non-recursive rmlock %s @ %s:%d\n", 642 rm->lock_object.lo_name, file, line)); 643 critical_exit(); 644 } 645 #endif 646 KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread), 647 ("rm_rlock() by idle thread %p on rmlock %s @ %s:%d", 648 curthread, rm->lock_object.lo_name, file, line)); 649 KASSERT(!rm_destroyed(rm), 650 ("rm_rlock() of destroyed rmlock @ %s:%d", file, line)); 651 if (!trylock) { 652 KASSERT(!rm_wowned(rm), 653 ("rm_rlock: wlock already held for %s @ %s:%d", 654 rm->lock_object.lo_name, file, line)); 655 WITNESS_CHECKORDER(&rm->lock_object, LOP_NEWORDER, file, line, 656 NULL); 657 } 658 659 if (_rm_rlock(rm, tracker, trylock)) { 660 if (trylock) 661 LOCK_LOG_TRY("RMRLOCK", &rm->lock_object, 0, 1, file, 662 line); 663 else 664 LOCK_LOG_LOCK("RMRLOCK", &rm->lock_object, 0, 0, file, 665 line); 666 WITNESS_LOCK(&rm->lock_object, 0, file, line); 667 668 curthread->td_locks++; 669 670 return (1); 671 } else if (trylock) 672 LOCK_LOG_TRY("RMRLOCK", &rm->lock_object, 0, 0, file, line); 673 674 return (0); 675 } 676 677 void 678 _rm_runlock_debug(struct rmlock *rm, struct rm_priotracker *tracker, 679 const char *file, int line) 680 { 681 682 if (SCHEDULER_STOPPED()) 683 return; 684 685 KASSERT(!rm_destroyed(rm), 686 ("rm_runlock() of destroyed rmlock @ %s:%d", file, line)); 687 _rm_assert(rm, RA_RLOCKED, file, line); 688 WITNESS_UNLOCK(&rm->lock_object, 0, file, line); 689 LOCK_LOG_LOCK("RMRUNLOCK", &rm->lock_object, 0, 0, file, line); 690 _rm_runlock(rm, tracker); 691 curthread->td_locks--; 692 } 693 694 #else 695 696 /* 697 * Just strip out file and line arguments if no lock debugging is enabled in 698 * the kernel - we are called from a kernel module. 699 */ 700 void 701 _rm_wlock_debug(struct rmlock *rm, const char *file, int line) 702 { 703 704 _rm_wlock(rm); 705 } 706 707 void 708 _rm_wunlock_debug(struct rmlock *rm, const char *file, int line) 709 { 710 711 _rm_wunlock(rm); 712 } 713 714 int 715 _rm_rlock_debug(struct rmlock *rm, struct rm_priotracker *tracker, 716 int trylock, const char *file, int line) 717 { 718 719 return _rm_rlock(rm, tracker, trylock); 720 } 721 722 void 723 _rm_runlock_debug(struct rmlock *rm, struct rm_priotracker *tracker, 724 const char *file, int line) 725 { 726 727 _rm_runlock(rm, tracker); 728 } 729 730 #endif 731 732 #ifdef INVARIANT_SUPPORT 733 #ifndef INVARIANTS 734 #undef _rm_assert 735 #endif 736 737 /* 738 * Note that this does not need to use witness_assert() for read lock 739 * assertions since an exact count of read locks held by this thread 740 * is computable. 741 */ 742 void 743 _rm_assert(const struct rmlock *rm, int what, const char *file, int line) 744 { 745 int count; 746 747 if (panicstr != NULL) 748 return; 749 switch (what) { 750 case RA_LOCKED: 751 case RA_LOCKED | RA_RECURSED: 752 case RA_LOCKED | RA_NOTRECURSED: 753 case RA_RLOCKED: 754 case RA_RLOCKED | RA_RECURSED: 755 case RA_RLOCKED | RA_NOTRECURSED: 756 /* 757 * Handle the write-locked case. Unlike other 758 * primitives, writers can never recurse. 759 */ 760 if (rm_wowned(rm)) { 761 if (what & RA_RLOCKED) 762 panic("Lock %s exclusively locked @ %s:%d\n", 763 rm->lock_object.lo_name, file, line); 764 if (what & RA_RECURSED) 765 panic("Lock %s not recursed @ %s:%d\n", 766 rm->lock_object.lo_name, file, line); 767 break; 768 } 769 770 critical_enter(); 771 count = rm_trackers_present(pcpu_find(curcpu), rm, curthread); 772 critical_exit(); 773 774 if (count == 0) 775 panic("Lock %s not %slocked @ %s:%d\n", 776 rm->lock_object.lo_name, (what & RA_RLOCKED) ? 777 "read " : "", file, line); 778 if (count > 1) { 779 if (what & RA_NOTRECURSED) 780 panic("Lock %s recursed @ %s:%d\n", 781 rm->lock_object.lo_name, file, line); 782 } else if (what & RA_RECURSED) 783 panic("Lock %s not recursed @ %s:%d\n", 784 rm->lock_object.lo_name, file, line); 785 break; 786 case RA_WLOCKED: 787 if (!rm_wowned(rm)) 788 panic("Lock %s not exclusively locked @ %s:%d\n", 789 rm->lock_object.lo_name, file, line); 790 break; 791 case RA_UNLOCKED: 792 if (rm_wowned(rm)) 793 panic("Lock %s exclusively locked @ %s:%d\n", 794 rm->lock_object.lo_name, file, line); 795 796 critical_enter(); 797 count = rm_trackers_present(pcpu_find(curcpu), rm, curthread); 798 critical_exit(); 799 800 if (count != 0) 801 panic("Lock %s read locked @ %s:%d\n", 802 rm->lock_object.lo_name, file, line); 803 break; 804 default: 805 panic("Unknown rm lock assertion: %d @ %s:%d", what, file, 806 line); 807 } 808 } 809 #endif /* INVARIANT_SUPPORT */ 810 811 #ifdef DDB 812 static void 813 print_tracker(struct rm_priotracker *tr) 814 { 815 struct thread *td; 816 817 td = tr->rmp_thread; 818 db_printf(" thread %p (tid %d, pid %d, \"%s\") {", td, td->td_tid, 819 td->td_proc->p_pid, td->td_name); 820 if (tr->rmp_flags & RMPF_ONQUEUE) { 821 db_printf("ONQUEUE"); 822 if (tr->rmp_flags & RMPF_SIGNAL) 823 db_printf(",SIGNAL"); 824 } else 825 db_printf("0"); 826 db_printf("}\n"); 827 } 828 829 static void 830 db_show_rm(const struct lock_object *lock) 831 { 832 struct rm_priotracker *tr; 833 struct rm_queue *queue; 834 const struct rmlock *rm; 835 struct lock_class *lc; 836 struct pcpu *pc; 837 838 rm = (const struct rmlock *)lock; 839 db_printf(" writecpus: "); 840 ddb_display_cpuset(__DEQUALIFY(const cpuset_t *, &rm->rm_writecpus)); 841 db_printf("\n"); 842 db_printf(" per-CPU readers:\n"); 843 STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) 844 for (queue = pc->pc_rm_queue.rmq_next; 845 queue != &pc->pc_rm_queue; queue = queue->rmq_next) { 846 tr = (struct rm_priotracker *)queue; 847 if (tr->rmp_rmlock == rm) 848 print_tracker(tr); 849 } 850 db_printf(" active readers:\n"); 851 LIST_FOREACH(tr, &rm->rm_activeReaders, rmp_qentry) 852 print_tracker(tr); 853 lc = LOCK_CLASS(&rm->rm_wlock_object); 854 db_printf("Backing write-lock (%s):\n", lc->lc_name); 855 lc->lc_ddb_show(&rm->rm_wlock_object); 856 } 857 #endif 858