1 /*- 2 * Copyright (c) 1995 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Copyright (C) 1997 6 * John S. Dyson. All rights reserved. 7 * 8 * This code contains ideas from software contributed to Berkeley by 9 * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating 10 * System project at Carnegie-Mellon University. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. All advertising materials mentioning features or use of this software 21 * must display the following acknowledgement: 22 * This product includes software developed by the University of 23 * California, Berkeley and its contributors. 24 * 4. Neither the name of the University nor the names of its contributors 25 * may be used to endorse or promote products derived from this software 26 * without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 38 * SUCH DAMAGE. 39 * 40 * @(#)kern_lock.c 8.18 (Berkeley) 5/21/95 41 */ 42 43 #include <sys/cdefs.h> 44 __FBSDID("$FreeBSD$"); 45 46 #include "opt_ddb.h" 47 #include "opt_global.h" 48 49 #include <sys/param.h> 50 #include <sys/kdb.h> 51 #include <sys/kernel.h> 52 #include <sys/ktr.h> 53 #include <sys/lock.h> 54 #include <sys/lockmgr.h> 55 #include <sys/mutex.h> 56 #include <sys/proc.h> 57 #include <sys/systm.h> 58 #include <sys/lock_profile.h> 59 #ifdef DEBUG_LOCKS 60 #include <sys/stack.h> 61 #endif 62 63 static void assert_lockmgr(struct lock_object *lock, int what); 64 #ifdef DDB 65 #include <ddb/ddb.h> 66 static void db_show_lockmgr(struct lock_object *lock); 67 #endif 68 static void lock_lockmgr(struct lock_object *lock, int how); 69 static int unlock_lockmgr(struct lock_object *lock); 70 71 struct lock_class lock_class_lockmgr = { 72 .lc_name = "lockmgr", 73 .lc_flags = LC_SLEEPLOCK | LC_SLEEPABLE | LC_RECURSABLE | LC_UPGRADABLE, 74 .lc_assert = assert_lockmgr, 75 #ifdef DDB 76 .lc_ddb_show = db_show_lockmgr, 77 #endif 78 .lc_lock = lock_lockmgr, 79 .lc_unlock = unlock_lockmgr, 80 }; 81 82 /* 83 * Locking primitives implementation. 84 * Locks provide shared/exclusive sychronization. 85 */ 86 87 void 88 assert_lockmgr(struct lock_object *lock, int what) 89 { 90 91 panic("lockmgr locks do not support assertions"); 92 } 93 94 void 95 lock_lockmgr(struct lock_object *lock, int how) 96 { 97 98 panic("lockmgr locks do not support sleep interlocking"); 99 } 100 101 int 102 unlock_lockmgr(struct lock_object *lock) 103 { 104 105 panic("lockmgr locks do not support sleep interlocking"); 106 } 107 108 #define COUNT(td, x) if ((td)) (td)->td_locks += (x) 109 #define LK_ALL (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | \ 110 LK_SHARE_NONZERO | LK_WAIT_NONZERO) 111 112 static int acquire(struct lock **lkpp, int extflags, int wanted, int *contested, uint64_t *waittime); 113 static int acquiredrain(struct lock *lkp, int extflags) ; 114 115 static __inline void 116 sharelock(struct thread *td, struct lock *lkp, int incr) { 117 lkp->lk_flags |= LK_SHARE_NONZERO; 118 lkp->lk_sharecount += incr; 119 COUNT(td, incr); 120 } 121 122 static __inline void 123 shareunlock(struct thread *td, struct lock *lkp, int decr) { 124 125 KASSERT(lkp->lk_sharecount >= decr, ("shareunlock: count < decr")); 126 127 COUNT(td, -decr); 128 if (lkp->lk_sharecount == decr) { 129 lkp->lk_flags &= ~LK_SHARE_NONZERO; 130 if (lkp->lk_flags & (LK_WANT_UPGRADE | LK_WANT_EXCL)) { 131 wakeup(lkp); 132 } 133 lkp->lk_sharecount = 0; 134 } else { 135 lkp->lk_sharecount -= decr; 136 } 137 } 138 139 static int 140 acquire(struct lock **lkpp, int extflags, int wanted, int *contested, uint64_t *waittime) 141 { 142 struct lock *lkp = *lkpp; 143 int error; 144 CTR3(KTR_LOCK, 145 "acquire(): lkp == %p, extflags == 0x%x, wanted == 0x%x", 146 lkp, extflags, wanted); 147 148 if ((extflags & LK_NOWAIT) && (lkp->lk_flags & wanted)) 149 return EBUSY; 150 error = 0; 151 if ((lkp->lk_flags & wanted) != 0) 152 lock_profile_obtain_lock_failed(&lkp->lk_object, contested, waittime); 153 154 while ((lkp->lk_flags & wanted) != 0) { 155 CTR2(KTR_LOCK, 156 "acquire(): lkp == %p, lk_flags == 0x%x sleeping", 157 lkp, lkp->lk_flags); 158 lkp->lk_flags |= LK_WAIT_NONZERO; 159 lkp->lk_waitcount++; 160 error = msleep(lkp, lkp->lk_interlock, lkp->lk_prio, 161 lkp->lk_wmesg, 162 ((extflags & LK_TIMELOCK) ? lkp->lk_timo : 0)); 163 lkp->lk_waitcount--; 164 if (lkp->lk_waitcount == 0) 165 lkp->lk_flags &= ~LK_WAIT_NONZERO; 166 if (error) 167 break; 168 if (extflags & LK_SLEEPFAIL) { 169 error = ENOLCK; 170 break; 171 } 172 if (lkp->lk_newlock != NULL) { 173 mtx_lock(lkp->lk_newlock->lk_interlock); 174 mtx_unlock(lkp->lk_interlock); 175 if (lkp->lk_waitcount == 0) 176 wakeup((void *)(&lkp->lk_newlock)); 177 *lkpp = lkp = lkp->lk_newlock; 178 } 179 } 180 mtx_assert(lkp->lk_interlock, MA_OWNED); 181 return (error); 182 } 183 184 /* 185 * Set, change, or release a lock. 186 * 187 * Shared requests increment the shared count. Exclusive requests set the 188 * LK_WANT_EXCL flag (preventing further shared locks), and wait for already 189 * accepted shared locks and shared-to-exclusive upgrades to go away. 190 */ 191 int 192 _lockmgr(struct lock *lkp, u_int flags, struct mtx *interlkp, 193 struct thread *td, char *file, int line) 194 195 { 196 int error; 197 struct thread *thr; 198 int extflags, lockflags; 199 int contested = 0; 200 uint64_t waitstart = 0; 201 202 /* 203 * Lock owner can only be curthread or, at least, NULL in order to 204 * have a deadlock free implementation of the primitive. 205 */ 206 KASSERT(td == NULL || td == curthread, 207 ("lockmgr: owner thread (%p) cannot differ from curthread or NULL", 208 td)); 209 210 error = 0; 211 if (td == NULL) 212 thr = LK_KERNPROC; 213 else 214 thr = td; 215 216 if ((flags & LK_INTERNAL) == 0) 217 mtx_lock(lkp->lk_interlock); 218 CTR6(KTR_LOCK, 219 "lockmgr(): lkp == %p (lk_wmesg == \"%s\"), owner == %p, exclusivecount == %d, flags == 0x%x, " 220 "td == %p", lkp, lkp->lk_wmesg, lkp->lk_lockholder, 221 lkp->lk_exclusivecount, flags, td); 222 #ifdef DEBUG_LOCKS 223 { 224 struct stack stack; /* XXX */ 225 stack_save(&stack); 226 CTRSTACK(KTR_LOCK, &stack, 0, 1); 227 } 228 #endif 229 230 if (flags & LK_INTERLOCK) { 231 mtx_assert(interlkp, MA_OWNED | MA_NOTRECURSED); 232 mtx_unlock(interlkp); 233 } 234 235 if ((flags & (LK_NOWAIT|LK_RELEASE)) == 0) 236 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, 237 &lkp->lk_interlock->lock_object, 238 "Acquiring lockmgr lock \"%s\"", lkp->lk_wmesg); 239 240 if (panicstr != NULL) { 241 mtx_unlock(lkp->lk_interlock); 242 return (0); 243 } 244 if ((lkp->lk_flags & LK_NOSHARE) && 245 (flags & LK_TYPE_MASK) == LK_SHARED) { 246 flags &= ~LK_TYPE_MASK; 247 flags |= LK_EXCLUSIVE; 248 } 249 extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK; 250 251 switch (flags & LK_TYPE_MASK) { 252 253 case LK_SHARED: 254 /* 255 * If we are not the exclusive lock holder, we have to block 256 * while there is an exclusive lock holder or while an 257 * exclusive lock request or upgrade request is in progress. 258 * 259 * However, if TDP_DEADLKTREAT is set, we override exclusive 260 * lock requests or upgrade requests ( but not the exclusive 261 * lock itself ). 262 */ 263 if (lkp->lk_lockholder != thr) { 264 lockflags = LK_HAVE_EXCL; 265 if (td != NULL && !(td->td_pflags & TDP_DEADLKTREAT)) 266 lockflags |= LK_WANT_EXCL | LK_WANT_UPGRADE; 267 error = acquire(&lkp, extflags, lockflags, &contested, &waitstart); 268 if (error) 269 break; 270 sharelock(td, lkp, 1); 271 if (lkp->lk_sharecount == 1) 272 lock_profile_obtain_lock_success(&lkp->lk_object, contested, waitstart, file, line); 273 274 #if defined(DEBUG_LOCKS) 275 stack_save(&lkp->lk_stack); 276 #endif 277 break; 278 } 279 /* 280 * We hold an exclusive lock, so downgrade it to shared. 281 * An alternative would be to fail with EDEADLK. 282 */ 283 sharelock(td, lkp, 1); 284 if (lkp->lk_sharecount == 1) 285 lock_profile_obtain_lock_success(&lkp->lk_object, contested, waitstart, file, line); 286 /* FALLTHROUGH downgrade */ 287 288 case LK_DOWNGRADE: 289 KASSERT(lkp->lk_lockholder == thr && lkp->lk_exclusivecount != 0, 290 ("lockmgr: not holding exclusive lock " 291 "(owner thread (%p) != thread (%p), exlcnt (%d) != 0", 292 lkp->lk_lockholder, thr, lkp->lk_exclusivecount)); 293 sharelock(td, lkp, lkp->lk_exclusivecount); 294 COUNT(td, -lkp->lk_exclusivecount); 295 lkp->lk_exclusivecount = 0; 296 lkp->lk_flags &= ~LK_HAVE_EXCL; 297 lkp->lk_lockholder = LK_NOPROC; 298 if (lkp->lk_waitcount) 299 wakeup((void *)lkp); 300 break; 301 302 case LK_UPGRADE: 303 /* 304 * Upgrade a shared lock to an exclusive one. If another 305 * shared lock has already requested an upgrade to an 306 * exclusive lock, our shared lock is released and an 307 * exclusive lock is requested (which will be granted 308 * after the upgrade). If we return an error, the file 309 * will always be unlocked. 310 */ 311 if (lkp->lk_lockholder == thr) 312 panic("lockmgr: upgrade exclusive lock"); 313 if (lkp->lk_sharecount <= 0) 314 panic("lockmgr: upgrade without shared"); 315 shareunlock(td, lkp, 1); 316 if (lkp->lk_sharecount == 0) 317 lock_profile_release_lock(&lkp->lk_object); 318 /* 319 * If we are just polling, check to see if we will block. 320 */ 321 if ((extflags & LK_NOWAIT) && 322 ((lkp->lk_flags & LK_WANT_UPGRADE) || 323 lkp->lk_sharecount > 1)) { 324 error = EBUSY; 325 break; 326 } 327 if ((lkp->lk_flags & LK_WANT_UPGRADE) == 0) { 328 /* 329 * We are first shared lock to request an upgrade, so 330 * request upgrade and wait for the shared count to 331 * drop to zero, then take exclusive lock. 332 */ 333 lkp->lk_flags |= LK_WANT_UPGRADE; 334 error = acquire(&lkp, extflags, LK_SHARE_NONZERO, &contested, &waitstart); 335 lkp->lk_flags &= ~LK_WANT_UPGRADE; 336 337 if (error) { 338 if ((lkp->lk_flags & ( LK_WANT_EXCL | LK_WAIT_NONZERO)) == (LK_WANT_EXCL | LK_WAIT_NONZERO)) 339 wakeup((void *)lkp); 340 break; 341 } 342 if (lkp->lk_exclusivecount != 0) 343 panic("lockmgr: non-zero exclusive count"); 344 lkp->lk_flags |= LK_HAVE_EXCL; 345 lkp->lk_lockholder = thr; 346 lkp->lk_exclusivecount = 1; 347 COUNT(td, 1); 348 lock_profile_obtain_lock_success(&lkp->lk_object, contested, waitstart, file, line); 349 #if defined(DEBUG_LOCKS) 350 stack_save(&lkp->lk_stack); 351 #endif 352 break; 353 } 354 /* 355 * Someone else has requested upgrade. Release our shared 356 * lock, awaken upgrade requestor if we are the last shared 357 * lock, then request an exclusive lock. 358 */ 359 if ( (lkp->lk_flags & (LK_SHARE_NONZERO|LK_WAIT_NONZERO)) == 360 LK_WAIT_NONZERO) 361 wakeup((void *)lkp); 362 /* FALLTHROUGH exclusive request */ 363 364 case LK_EXCLUSIVE: 365 if (lkp->lk_lockholder == thr && thr != LK_KERNPROC) { 366 /* 367 * Recursive lock. 368 */ 369 if ((extflags & (LK_NOWAIT | LK_CANRECURSE)) == 0) 370 panic("lockmgr: locking against myself"); 371 if ((extflags & LK_CANRECURSE) != 0) { 372 lkp->lk_exclusivecount++; 373 COUNT(td, 1); 374 break; 375 } 376 } 377 /* 378 * If we are just polling, check to see if we will sleep. 379 */ 380 if ((extflags & LK_NOWAIT) && 381 (lkp->lk_flags & (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | LK_SHARE_NONZERO))) { 382 error = EBUSY; 383 break; 384 } 385 /* 386 * Try to acquire the want_exclusive flag. 387 */ 388 error = acquire(&lkp, extflags, (LK_HAVE_EXCL | LK_WANT_EXCL), &contested, &waitstart); 389 if (error) 390 break; 391 lkp->lk_flags |= LK_WANT_EXCL; 392 /* 393 * Wait for shared locks and upgrades to finish. 394 */ 395 error = acquire(&lkp, extflags, LK_HAVE_EXCL | LK_WANT_UPGRADE | LK_SHARE_NONZERO, &contested, &waitstart); 396 lkp->lk_flags &= ~LK_WANT_EXCL; 397 if (error) { 398 if (lkp->lk_flags & LK_WAIT_NONZERO) 399 wakeup((void *)lkp); 400 break; 401 } 402 lkp->lk_flags |= LK_HAVE_EXCL; 403 lkp->lk_lockholder = thr; 404 if (lkp->lk_exclusivecount != 0) 405 panic("lockmgr: non-zero exclusive count"); 406 lkp->lk_exclusivecount = 1; 407 COUNT(td, 1); 408 lock_profile_obtain_lock_success(&lkp->lk_object, contested, waitstart, file, line); 409 #if defined(DEBUG_LOCKS) 410 stack_save(&lkp->lk_stack); 411 #endif 412 break; 413 414 case LK_RELEASE: 415 if (lkp->lk_exclusivecount != 0) { 416 if (lkp->lk_lockholder != thr && 417 lkp->lk_lockholder != LK_KERNPROC) { 418 panic("lockmgr: thread %p, not %s %p unlocking", 419 thr, "exclusive lock holder", 420 lkp->lk_lockholder); 421 } 422 if (lkp->lk_lockholder != LK_KERNPROC) 423 COUNT(td, -1); 424 if (lkp->lk_exclusivecount == 1) { 425 lkp->lk_flags &= ~LK_HAVE_EXCL; 426 lkp->lk_lockholder = LK_NOPROC; 427 lkp->lk_exclusivecount = 0; 428 lock_profile_release_lock(&lkp->lk_object); 429 } else { 430 lkp->lk_exclusivecount--; 431 } 432 } else if (lkp->lk_flags & LK_SHARE_NONZERO) 433 shareunlock(td, lkp, 1); 434 else { 435 printf("lockmgr: thread %p unlocking unheld lock\n", 436 thr); 437 kdb_backtrace(); 438 } 439 440 if (lkp->lk_flags & LK_WAIT_NONZERO) 441 wakeup((void *)lkp); 442 break; 443 444 case LK_DRAIN: 445 /* 446 * Check that we do not already hold the lock, as it can 447 * never drain if we do. Unfortunately, we have no way to 448 * check for holding a shared lock, but at least we can 449 * check for an exclusive one. 450 */ 451 if (lkp->lk_lockholder == thr) 452 panic("lockmgr: draining against myself"); 453 454 error = acquiredrain(lkp, extflags); 455 if (error) 456 break; 457 lkp->lk_flags |= LK_DRAINING | LK_HAVE_EXCL; 458 lkp->lk_lockholder = thr; 459 lkp->lk_exclusivecount = 1; 460 COUNT(td, 1); 461 #if defined(DEBUG_LOCKS) 462 stack_save(&lkp->lk_stack); 463 #endif 464 break; 465 466 default: 467 mtx_unlock(lkp->lk_interlock); 468 panic("lockmgr: unknown locktype request %d", 469 flags & LK_TYPE_MASK); 470 /* NOTREACHED */ 471 } 472 if ((lkp->lk_flags & LK_WAITDRAIN) && 473 (lkp->lk_flags & (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | 474 LK_SHARE_NONZERO | LK_WAIT_NONZERO)) == 0) { 475 lkp->lk_flags &= ~LK_WAITDRAIN; 476 wakeup((void *)&lkp->lk_flags); 477 } 478 mtx_unlock(lkp->lk_interlock); 479 return (error); 480 } 481 482 static int 483 acquiredrain(struct lock *lkp, int extflags) { 484 int error; 485 486 if ((extflags & LK_NOWAIT) && (lkp->lk_flags & LK_ALL)) { 487 return EBUSY; 488 } 489 while (lkp->lk_flags & LK_ALL) { 490 lkp->lk_flags |= LK_WAITDRAIN; 491 error = msleep(&lkp->lk_flags, lkp->lk_interlock, lkp->lk_prio, 492 lkp->lk_wmesg, 493 ((extflags & LK_TIMELOCK) ? lkp->lk_timo : 0)); 494 if (error) 495 return error; 496 if (extflags & LK_SLEEPFAIL) { 497 return ENOLCK; 498 } 499 } 500 return 0; 501 } 502 503 /* 504 * Initialize a lock; required before use. 505 */ 506 void 507 lockinit(lkp, prio, wmesg, timo, flags) 508 struct lock *lkp; 509 int prio; 510 const char *wmesg; 511 int timo; 512 int flags; 513 { 514 CTR5(KTR_LOCK, "lockinit(): lkp == %p, prio == %d, wmesg == \"%s\", " 515 "timo == %d, flags = 0x%x\n", lkp, prio, wmesg, timo, flags); 516 517 lkp->lk_interlock = mtx_pool_alloc(mtxpool_lockbuilder); 518 lkp->lk_flags = (flags & LK_EXTFLG_MASK); 519 lkp->lk_sharecount = 0; 520 lkp->lk_waitcount = 0; 521 lkp->lk_exclusivecount = 0; 522 lkp->lk_prio = prio; 523 lkp->lk_timo = timo; 524 lkp->lk_lockholder = LK_NOPROC; 525 lkp->lk_newlock = NULL; 526 #ifdef DEBUG_LOCKS 527 stack_zero(&lkp->lk_stack); 528 #endif 529 lock_init(&lkp->lk_object, &lock_class_lockmgr, wmesg, NULL, 530 LO_RECURSABLE | LO_SLEEPABLE | LO_UPGRADABLE); 531 } 532 533 /* 534 * Destroy a lock. 535 */ 536 void 537 lockdestroy(lkp) 538 struct lock *lkp; 539 { 540 541 CTR2(KTR_LOCK, "lockdestroy(): lkp == %p (lk_wmesg == \"%s\")", 542 lkp, lkp->lk_wmesg); 543 lock_destroy(&lkp->lk_object); 544 } 545 546 /* 547 * Determine the status of a lock. 548 */ 549 int 550 lockstatus(lkp, td) 551 struct lock *lkp; 552 struct thread *td; 553 { 554 int lock_type = 0; 555 int interlocked; 556 557 if (!kdb_active) { 558 interlocked = 1; 559 mtx_lock(lkp->lk_interlock); 560 } else 561 interlocked = 0; 562 if (lkp->lk_exclusivecount != 0) { 563 if (td == NULL || lkp->lk_lockholder == td) 564 lock_type = LK_EXCLUSIVE; 565 else 566 lock_type = LK_EXCLOTHER; 567 } else if (lkp->lk_sharecount != 0) 568 lock_type = LK_SHARED; 569 if (interlocked) 570 mtx_unlock(lkp->lk_interlock); 571 return (lock_type); 572 } 573 574 /* 575 * Determine the number of holders of a lock. 576 */ 577 int 578 lockcount(lkp) 579 struct lock *lkp; 580 { 581 int count; 582 583 mtx_lock(lkp->lk_interlock); 584 count = lkp->lk_exclusivecount + lkp->lk_sharecount; 585 mtx_unlock(lkp->lk_interlock); 586 return (count); 587 } 588 589 /* 590 * Determine the number of waiters on a lock. 591 */ 592 int 593 lockwaiters(lkp) 594 struct lock *lkp; 595 { 596 int count; 597 598 mtx_lock(lkp->lk_interlock); 599 count = lkp->lk_waitcount; 600 mtx_unlock(lkp->lk_interlock); 601 return (count); 602 } 603 604 /* 605 * Print out information about state of a lock. Used by VOP_PRINT 606 * routines to display status about contained locks. 607 */ 608 void 609 lockmgr_printinfo(lkp) 610 struct lock *lkp; 611 { 612 613 if (lkp->lk_sharecount) 614 printf(" lock type %s: SHARED (count %d)", lkp->lk_wmesg, 615 lkp->lk_sharecount); 616 else if (lkp->lk_flags & LK_HAVE_EXCL) 617 printf(" lock type %s: EXCL (count %d) by thread %p (pid %d)", 618 lkp->lk_wmesg, lkp->lk_exclusivecount, 619 lkp->lk_lockholder, lkp->lk_lockholder->td_proc->p_pid); 620 if (lkp->lk_waitcount > 0) 621 printf(" with %d pending", lkp->lk_waitcount); 622 #ifdef DEBUG_LOCKS 623 stack_print_ddb(&lkp->lk_stack); 624 #endif 625 } 626 627 #ifdef DDB 628 /* 629 * Check to see if a thread that is blocked on a sleep queue is actually 630 * blocked on a 'struct lock'. If so, output some details and return true. 631 * If the lock has an exclusive owner, return that in *ownerp. 632 */ 633 int 634 lockmgr_chain(struct thread *td, struct thread **ownerp) 635 { 636 struct lock *lkp; 637 638 lkp = td->td_wchan; 639 640 /* Simple test to see if wchan points to a lockmgr lock. */ 641 if (LOCK_CLASS(&lkp->lk_object) == &lock_class_lockmgr && 642 lkp->lk_wmesg == td->td_wmesg) 643 goto ok; 644 645 /* 646 * If this thread is doing a DRAIN, then it would be asleep on 647 * &lkp->lk_flags rather than lkp. 648 */ 649 lkp = (struct lock *)((char *)td->td_wchan - 650 offsetof(struct lock, lk_flags)); 651 if (LOCK_CLASS(&lkp->lk_object) == &lock_class_lockmgr && 652 lkp->lk_wmesg == td->td_wmesg && (lkp->lk_flags & LK_WAITDRAIN)) 653 goto ok; 654 655 /* Doen't seem to be a lockmgr lock. */ 656 return (0); 657 658 ok: 659 /* Ok, we think we have a lockmgr lock, so output some details. */ 660 db_printf("blocked on lk \"%s\" ", lkp->lk_wmesg); 661 if (lkp->lk_sharecount) { 662 db_printf("SHARED (count %d)\n", lkp->lk_sharecount); 663 *ownerp = NULL; 664 } else { 665 db_printf("EXCL (count %d)\n", lkp->lk_exclusivecount); 666 *ownerp = lkp->lk_lockholder; 667 } 668 return (1); 669 } 670 671 void 672 db_show_lockmgr(struct lock_object *lock) 673 { 674 struct thread *td; 675 struct lock *lkp; 676 677 lkp = (struct lock *)lock; 678 679 db_printf(" lock type: %s\n", lkp->lk_wmesg); 680 db_printf(" state: "); 681 if (lkp->lk_sharecount) 682 db_printf("SHARED (count %d)\n", lkp->lk_sharecount); 683 else if (lkp->lk_flags & LK_HAVE_EXCL) { 684 td = lkp->lk_lockholder; 685 db_printf("EXCL (count %d) %p ", lkp->lk_exclusivecount, td); 686 db_printf("(tid %d, pid %d, \"%s\")\n", td->td_tid, 687 td->td_proc->p_pid, td->td_name); 688 } else 689 db_printf("UNLOCKED\n"); 690 if (lkp->lk_waitcount > 0) 691 db_printf(" waiters: %d\n", lkp->lk_waitcount); 692 } 693 #endif 694