1 /*- 2 * Copyright (c) 1995 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Copyright (C) 1997 6 * John S. Dyson. All rights reserved. 7 * 8 * This code contains ideas from software contributed to Berkeley by 9 * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating 10 * System project at Carnegie-Mellon University. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. All advertising materials mentioning features or use of this software 21 * must display the following acknowledgement: 22 * This product includes software developed by the University of 23 * California, Berkeley and its contributors. 24 * 4. Neither the name of the University nor the names of its contributors 25 * may be used to endorse or promote products derived from this software 26 * without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 38 * SUCH DAMAGE. 39 * 40 * @(#)kern_lock.c 8.18 (Berkeley) 5/21/95 41 */ 42 43 #include <sys/cdefs.h> 44 __FBSDID("$FreeBSD$"); 45 46 #include "opt_ddb.h" 47 #include "opt_global.h" 48 49 #include <sys/param.h> 50 #include <sys/kdb.h> 51 #include <sys/kernel.h> 52 #include <sys/ktr.h> 53 #include <sys/lock.h> 54 #include <sys/lockmgr.h> 55 #include <sys/mutex.h> 56 #include <sys/proc.h> 57 #include <sys/systm.h> 58 #include <sys/lock_profile.h> 59 #ifdef DEBUG_LOCKS 60 #include <sys/stack.h> 61 #endif 62 63 #ifdef DDB 64 #include <ddb/ddb.h> 65 static void db_show_lockmgr(struct lock_object *lock); 66 #endif 67 static void lock_lockmgr(struct lock_object *lock, int how); 68 static int unlock_lockmgr(struct lock_object *lock); 69 70 struct lock_class lock_class_lockmgr = { 71 .lc_name = "lockmgr", 72 .lc_flags = LC_SLEEPLOCK | LC_SLEEPABLE | LC_RECURSABLE | LC_UPGRADABLE, 73 #ifdef DDB 74 .lc_ddb_show = db_show_lockmgr, 75 #endif 76 .lc_lock = lock_lockmgr, 77 .lc_unlock = unlock_lockmgr, 78 }; 79 80 /* 81 * Locking primitives implementation. 82 * Locks provide shared/exclusive sychronization. 83 */ 84 85 void 86 lock_lockmgr(struct lock_object *lock, int how) 87 { 88 89 panic("lockmgr locks do not support sleep interlocking"); 90 } 91 92 int 93 unlock_lockmgr(struct lock_object *lock) 94 { 95 96 panic("lockmgr locks do not support sleep interlocking"); 97 } 98 99 #define COUNT(td, x) if ((td)) (td)->td_locks += (x) 100 #define LK_ALL (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | \ 101 LK_SHARE_NONZERO | LK_WAIT_NONZERO) 102 103 static int acquire(struct lock **lkpp, int extflags, int wanted, int *contested, uint64_t *waittime); 104 static int acquiredrain(struct lock *lkp, int extflags) ; 105 106 static __inline void 107 sharelock(struct thread *td, struct lock *lkp, int incr) { 108 lkp->lk_flags |= LK_SHARE_NONZERO; 109 lkp->lk_sharecount += incr; 110 COUNT(td, incr); 111 } 112 113 static __inline void 114 shareunlock(struct thread *td, struct lock *lkp, int decr) { 115 116 KASSERT(lkp->lk_sharecount >= decr, ("shareunlock: count < decr")); 117 118 COUNT(td, -decr); 119 if (lkp->lk_sharecount == decr) { 120 lkp->lk_flags &= ~LK_SHARE_NONZERO; 121 if (lkp->lk_flags & (LK_WANT_UPGRADE | LK_WANT_EXCL)) { 122 wakeup(lkp); 123 } 124 lkp->lk_sharecount = 0; 125 } else { 126 lkp->lk_sharecount -= decr; 127 } 128 } 129 130 static int 131 acquire(struct lock **lkpp, int extflags, int wanted, int *contested, uint64_t *waittime) 132 { 133 struct lock *lkp = *lkpp; 134 int error; 135 CTR3(KTR_LOCK, 136 "acquire(): lkp == %p, extflags == 0x%x, wanted == 0x%x", 137 lkp, extflags, wanted); 138 139 if ((extflags & LK_NOWAIT) && (lkp->lk_flags & wanted)) 140 return EBUSY; 141 error = 0; 142 if ((lkp->lk_flags & wanted) != 0) 143 lock_profile_obtain_lock_failed(&lkp->lk_object, contested, waittime); 144 145 while ((lkp->lk_flags & wanted) != 0) { 146 CTR2(KTR_LOCK, 147 "acquire(): lkp == %p, lk_flags == 0x%x sleeping", 148 lkp, lkp->lk_flags); 149 lkp->lk_flags |= LK_WAIT_NONZERO; 150 lkp->lk_waitcount++; 151 error = msleep(lkp, lkp->lk_interlock, lkp->lk_prio, 152 lkp->lk_wmesg, 153 ((extflags & LK_TIMELOCK) ? lkp->lk_timo : 0)); 154 lkp->lk_waitcount--; 155 if (lkp->lk_waitcount == 0) 156 lkp->lk_flags &= ~LK_WAIT_NONZERO; 157 if (error) 158 break; 159 if (extflags & LK_SLEEPFAIL) { 160 error = ENOLCK; 161 break; 162 } 163 if (lkp->lk_newlock != NULL) { 164 mtx_lock(lkp->lk_newlock->lk_interlock); 165 mtx_unlock(lkp->lk_interlock); 166 if (lkp->lk_waitcount == 0) 167 wakeup((void *)(&lkp->lk_newlock)); 168 *lkpp = lkp = lkp->lk_newlock; 169 } 170 } 171 mtx_assert(lkp->lk_interlock, MA_OWNED); 172 return (error); 173 } 174 175 /* 176 * Set, change, or release a lock. 177 * 178 * Shared requests increment the shared count. Exclusive requests set the 179 * LK_WANT_EXCL flag (preventing further shared locks), and wait for already 180 * accepted shared locks and shared-to-exclusive upgrades to go away. 181 */ 182 int 183 _lockmgr(struct lock *lkp, u_int flags, struct mtx *interlkp, 184 struct thread *td, char *file, int line) 185 186 { 187 int error; 188 struct thread *thr; 189 int extflags, lockflags; 190 int contested = 0; 191 uint64_t waitstart = 0; 192 193 error = 0; 194 if (td == NULL) 195 thr = LK_KERNPROC; 196 else 197 thr = td; 198 199 if ((flags & LK_INTERNAL) == 0) 200 mtx_lock(lkp->lk_interlock); 201 CTR6(KTR_LOCK, 202 "lockmgr(): lkp == %p (lk_wmesg == \"%s\"), owner == %p, exclusivecount == %d, flags == 0x%x, " 203 "td == %p", lkp, lkp->lk_wmesg, lkp->lk_lockholder, 204 lkp->lk_exclusivecount, flags, td); 205 #ifdef DEBUG_LOCKS 206 { 207 struct stack stack; /* XXX */ 208 stack_save(&stack); 209 CTRSTACK(KTR_LOCK, &stack, 0, 1); 210 } 211 #endif 212 213 if (flags & LK_INTERLOCK) { 214 mtx_assert(interlkp, MA_OWNED | MA_NOTRECURSED); 215 mtx_unlock(interlkp); 216 } 217 218 if ((flags & (LK_NOWAIT|LK_RELEASE)) == 0) 219 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, 220 &lkp->lk_interlock->lock_object, 221 "Acquiring lockmgr lock \"%s\"", lkp->lk_wmesg); 222 223 if (panicstr != NULL) { 224 mtx_unlock(lkp->lk_interlock); 225 return (0); 226 } 227 if ((lkp->lk_flags & LK_NOSHARE) && 228 (flags & LK_TYPE_MASK) == LK_SHARED) { 229 flags &= ~LK_TYPE_MASK; 230 flags |= LK_EXCLUSIVE; 231 } 232 extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK; 233 234 switch (flags & LK_TYPE_MASK) { 235 236 case LK_SHARED: 237 /* 238 * If we are not the exclusive lock holder, we have to block 239 * while there is an exclusive lock holder or while an 240 * exclusive lock request or upgrade request is in progress. 241 * 242 * However, if TDP_DEADLKTREAT is set, we override exclusive 243 * lock requests or upgrade requests ( but not the exclusive 244 * lock itself ). 245 */ 246 if (lkp->lk_lockholder != thr) { 247 lockflags = LK_HAVE_EXCL; 248 if (td != NULL && !(td->td_pflags & TDP_DEADLKTREAT)) 249 lockflags |= LK_WANT_EXCL | LK_WANT_UPGRADE; 250 error = acquire(&lkp, extflags, lockflags, &contested, &waitstart); 251 if (error) 252 break; 253 sharelock(td, lkp, 1); 254 if (lkp->lk_sharecount == 1) 255 lock_profile_obtain_lock_success(&lkp->lk_object, contested, waitstart, file, line); 256 257 #if defined(DEBUG_LOCKS) 258 stack_save(&lkp->lk_stack); 259 #endif 260 break; 261 } 262 /* 263 * We hold an exclusive lock, so downgrade it to shared. 264 * An alternative would be to fail with EDEADLK. 265 */ 266 sharelock(td, lkp, 1); 267 if (lkp->lk_sharecount == 1) 268 lock_profile_obtain_lock_success(&lkp->lk_object, contested, waitstart, file, line); 269 /* FALLTHROUGH downgrade */ 270 271 case LK_DOWNGRADE: 272 KASSERT(lkp->lk_lockholder == thr && lkp->lk_exclusivecount != 0, 273 ("lockmgr: not holding exclusive lock " 274 "(owner thread (%p) != thread (%p), exlcnt (%d) != 0", 275 lkp->lk_lockholder, thr, lkp->lk_exclusivecount)); 276 sharelock(td, lkp, lkp->lk_exclusivecount); 277 COUNT(td, -lkp->lk_exclusivecount); 278 lkp->lk_exclusivecount = 0; 279 lkp->lk_flags &= ~LK_HAVE_EXCL; 280 lkp->lk_lockholder = LK_NOPROC; 281 if (lkp->lk_waitcount) 282 wakeup((void *)lkp); 283 break; 284 285 case LK_EXCLUPGRADE: 286 /* 287 * If another process is ahead of us to get an upgrade, 288 * then we want to fail rather than have an intervening 289 * exclusive access. 290 */ 291 if (lkp->lk_flags & LK_WANT_UPGRADE) { 292 shareunlock(td, lkp, 1); 293 error = EBUSY; 294 break; 295 } 296 /* FALLTHROUGH normal upgrade */ 297 298 case LK_UPGRADE: 299 /* 300 * Upgrade a shared lock to an exclusive one. If another 301 * shared lock has already requested an upgrade to an 302 * exclusive lock, our shared lock is released and an 303 * exclusive lock is requested (which will be granted 304 * after the upgrade). If we return an error, the file 305 * will always be unlocked. 306 */ 307 if (lkp->lk_lockholder == thr) 308 panic("lockmgr: upgrade exclusive lock"); 309 if (lkp->lk_sharecount <= 0) 310 panic("lockmgr: upgrade without shared"); 311 shareunlock(td, lkp, 1); 312 if (lkp->lk_sharecount == 0) 313 lock_profile_release_lock(&lkp->lk_object); 314 /* 315 * If we are just polling, check to see if we will block. 316 */ 317 if ((extflags & LK_NOWAIT) && 318 ((lkp->lk_flags & LK_WANT_UPGRADE) || 319 lkp->lk_sharecount > 1)) { 320 error = EBUSY; 321 break; 322 } 323 if ((lkp->lk_flags & LK_WANT_UPGRADE) == 0) { 324 /* 325 * We are first shared lock to request an upgrade, so 326 * request upgrade and wait for the shared count to 327 * drop to zero, then take exclusive lock. 328 */ 329 lkp->lk_flags |= LK_WANT_UPGRADE; 330 error = acquire(&lkp, extflags, LK_SHARE_NONZERO, &contested, &waitstart); 331 lkp->lk_flags &= ~LK_WANT_UPGRADE; 332 333 if (error) { 334 if ((lkp->lk_flags & ( LK_WANT_EXCL | LK_WAIT_NONZERO)) == (LK_WANT_EXCL | LK_WAIT_NONZERO)) 335 wakeup((void *)lkp); 336 break; 337 } 338 if (lkp->lk_exclusivecount != 0) 339 panic("lockmgr: non-zero exclusive count"); 340 lkp->lk_flags |= LK_HAVE_EXCL; 341 lkp->lk_lockholder = thr; 342 lkp->lk_exclusivecount = 1; 343 COUNT(td, 1); 344 lock_profile_obtain_lock_success(&lkp->lk_object, contested, waitstart, file, line); 345 #if defined(DEBUG_LOCKS) 346 stack_save(&lkp->lk_stack); 347 #endif 348 break; 349 } 350 /* 351 * Someone else has requested upgrade. Release our shared 352 * lock, awaken upgrade requestor if we are the last shared 353 * lock, then request an exclusive lock. 354 */ 355 if ( (lkp->lk_flags & (LK_SHARE_NONZERO|LK_WAIT_NONZERO)) == 356 LK_WAIT_NONZERO) 357 wakeup((void *)lkp); 358 /* FALLTHROUGH exclusive request */ 359 360 case LK_EXCLUSIVE: 361 if (lkp->lk_lockholder == thr && thr != LK_KERNPROC) { 362 /* 363 * Recursive lock. 364 */ 365 if ((extflags & (LK_NOWAIT | LK_CANRECURSE)) == 0) 366 panic("lockmgr: locking against myself"); 367 if ((extflags & LK_CANRECURSE) != 0) { 368 lkp->lk_exclusivecount++; 369 COUNT(td, 1); 370 break; 371 } 372 } 373 /* 374 * If we are just polling, check to see if we will sleep. 375 */ 376 if ((extflags & LK_NOWAIT) && 377 (lkp->lk_flags & (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | LK_SHARE_NONZERO))) { 378 error = EBUSY; 379 break; 380 } 381 /* 382 * Try to acquire the want_exclusive flag. 383 */ 384 error = acquire(&lkp, extflags, (LK_HAVE_EXCL | LK_WANT_EXCL), &contested, &waitstart); 385 if (error) 386 break; 387 lkp->lk_flags |= LK_WANT_EXCL; 388 /* 389 * Wait for shared locks and upgrades to finish. 390 */ 391 error = acquire(&lkp, extflags, LK_HAVE_EXCL | LK_WANT_UPGRADE | LK_SHARE_NONZERO, &contested, &waitstart); 392 lkp->lk_flags &= ~LK_WANT_EXCL; 393 if (error) { 394 if (lkp->lk_flags & LK_WAIT_NONZERO) 395 wakeup((void *)lkp); 396 break; 397 } 398 lkp->lk_flags |= LK_HAVE_EXCL; 399 lkp->lk_lockholder = thr; 400 if (lkp->lk_exclusivecount != 0) 401 panic("lockmgr: non-zero exclusive count"); 402 lkp->lk_exclusivecount = 1; 403 COUNT(td, 1); 404 lock_profile_obtain_lock_success(&lkp->lk_object, contested, waitstart, file, line); 405 #if defined(DEBUG_LOCKS) 406 stack_save(&lkp->lk_stack); 407 #endif 408 break; 409 410 case LK_RELEASE: 411 if (lkp->lk_exclusivecount != 0) { 412 if (lkp->lk_lockholder != thr && 413 lkp->lk_lockholder != LK_KERNPROC) { 414 panic("lockmgr: thread %p, not %s %p unlocking", 415 thr, "exclusive lock holder", 416 lkp->lk_lockholder); 417 } 418 if (lkp->lk_lockholder != LK_KERNPROC) 419 COUNT(td, -1); 420 if (lkp->lk_exclusivecount == 1) { 421 lkp->lk_flags &= ~LK_HAVE_EXCL; 422 lkp->lk_lockholder = LK_NOPROC; 423 lkp->lk_exclusivecount = 0; 424 lock_profile_release_lock(&lkp->lk_object); 425 } else { 426 lkp->lk_exclusivecount--; 427 } 428 } else if (lkp->lk_flags & LK_SHARE_NONZERO) 429 shareunlock(td, lkp, 1); 430 else { 431 printf("lockmgr: thread %p unlocking unheld lock\n", 432 thr); 433 kdb_backtrace(); 434 } 435 436 if (lkp->lk_flags & LK_WAIT_NONZERO) 437 wakeup((void *)lkp); 438 break; 439 440 case LK_DRAIN: 441 /* 442 * Check that we do not already hold the lock, as it can 443 * never drain if we do. Unfortunately, we have no way to 444 * check for holding a shared lock, but at least we can 445 * check for an exclusive one. 446 */ 447 if (lkp->lk_lockholder == thr) 448 panic("lockmgr: draining against myself"); 449 450 error = acquiredrain(lkp, extflags); 451 if (error) 452 break; 453 lkp->lk_flags |= LK_DRAINING | LK_HAVE_EXCL; 454 lkp->lk_lockholder = thr; 455 lkp->lk_exclusivecount = 1; 456 COUNT(td, 1); 457 #if defined(DEBUG_LOCKS) 458 stack_save(&lkp->lk_stack); 459 #endif 460 break; 461 462 default: 463 mtx_unlock(lkp->lk_interlock); 464 panic("lockmgr: unknown locktype request %d", 465 flags & LK_TYPE_MASK); 466 /* NOTREACHED */ 467 } 468 if ((lkp->lk_flags & LK_WAITDRAIN) && 469 (lkp->lk_flags & (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | 470 LK_SHARE_NONZERO | LK_WAIT_NONZERO)) == 0) { 471 lkp->lk_flags &= ~LK_WAITDRAIN; 472 wakeup((void *)&lkp->lk_flags); 473 } 474 mtx_unlock(lkp->lk_interlock); 475 return (error); 476 } 477 478 static int 479 acquiredrain(struct lock *lkp, int extflags) { 480 int error; 481 482 if ((extflags & LK_NOWAIT) && (lkp->lk_flags & LK_ALL)) { 483 return EBUSY; 484 } 485 while (lkp->lk_flags & LK_ALL) { 486 lkp->lk_flags |= LK_WAITDRAIN; 487 error = msleep(&lkp->lk_flags, lkp->lk_interlock, lkp->lk_prio, 488 lkp->lk_wmesg, 489 ((extflags & LK_TIMELOCK) ? lkp->lk_timo : 0)); 490 if (error) 491 return error; 492 if (extflags & LK_SLEEPFAIL) { 493 return ENOLCK; 494 } 495 } 496 return 0; 497 } 498 499 /* 500 * Transfer any waiting processes from one lock to another. 501 */ 502 void 503 transferlockers(from, to) 504 struct lock *from; 505 struct lock *to; 506 { 507 508 KASSERT(from != to, ("lock transfer to self")); 509 KASSERT((from->lk_flags&LK_WAITDRAIN) == 0, ("transfer draining lock")); 510 511 mtx_lock(from->lk_interlock); 512 if (from->lk_waitcount == 0) { 513 mtx_unlock(from->lk_interlock); 514 return; 515 } 516 from->lk_newlock = to; 517 wakeup((void *)from); 518 msleep(&from->lk_newlock, from->lk_interlock, from->lk_prio, 519 "lkxfer", 0); 520 from->lk_newlock = NULL; 521 from->lk_flags &= ~(LK_WANT_EXCL | LK_WANT_UPGRADE); 522 KASSERT(from->lk_waitcount == 0, ("active lock")); 523 mtx_unlock(from->lk_interlock); 524 } 525 526 527 /* 528 * Initialize a lock; required before use. 529 */ 530 void 531 lockinit(lkp, prio, wmesg, timo, flags) 532 struct lock *lkp; 533 int prio; 534 const char *wmesg; 535 int timo; 536 int flags; 537 { 538 CTR5(KTR_LOCK, "lockinit(): lkp == %p, prio == %d, wmesg == \"%s\", " 539 "timo == %d, flags = 0x%x\n", lkp, prio, wmesg, timo, flags); 540 541 lkp->lk_interlock = mtx_pool_alloc(mtxpool_lockbuilder); 542 lkp->lk_flags = (flags & LK_EXTFLG_MASK); 543 lkp->lk_sharecount = 0; 544 lkp->lk_waitcount = 0; 545 lkp->lk_exclusivecount = 0; 546 lkp->lk_prio = prio; 547 lkp->lk_timo = timo; 548 lkp->lk_lockholder = LK_NOPROC; 549 lkp->lk_newlock = NULL; 550 #ifdef DEBUG_LOCKS 551 stack_zero(&lkp->lk_stack); 552 #endif 553 lock_profile_object_init(&lkp->lk_object, &lock_class_lockmgr, wmesg); 554 lock_init(&lkp->lk_object, &lock_class_lockmgr, wmesg, NULL, 555 LO_RECURSABLE | LO_SLEEPABLE | LO_UPGRADABLE); 556 } 557 558 /* 559 * Destroy a lock. 560 */ 561 void 562 lockdestroy(lkp) 563 struct lock *lkp; 564 { 565 CTR2(KTR_LOCK, "lockdestroy(): lkp == %p (lk_wmesg == \"%s\")", 566 lkp, lkp->lk_wmesg); 567 lock_profile_object_destroy(&lkp->lk_object); 568 lock_destroy(&lkp->lk_object); 569 } 570 571 /* 572 * Determine the status of a lock. 573 */ 574 int 575 lockstatus(lkp, td) 576 struct lock *lkp; 577 struct thread *td; 578 { 579 int lock_type = 0; 580 int interlocked; 581 582 if (!kdb_active) { 583 interlocked = 1; 584 mtx_lock(lkp->lk_interlock); 585 } else 586 interlocked = 0; 587 if (lkp->lk_exclusivecount != 0) { 588 if (td == NULL || lkp->lk_lockholder == td) 589 lock_type = LK_EXCLUSIVE; 590 else 591 lock_type = LK_EXCLOTHER; 592 } else if (lkp->lk_sharecount != 0) 593 lock_type = LK_SHARED; 594 if (interlocked) 595 mtx_unlock(lkp->lk_interlock); 596 return (lock_type); 597 } 598 599 /* 600 * Determine the number of holders of a lock. 601 */ 602 int 603 lockcount(lkp) 604 struct lock *lkp; 605 { 606 int count; 607 608 mtx_lock(lkp->lk_interlock); 609 count = lkp->lk_exclusivecount + lkp->lk_sharecount; 610 mtx_unlock(lkp->lk_interlock); 611 return (count); 612 } 613 614 /* 615 * Determine the number of waiters on a lock. 616 */ 617 int 618 lockwaiters(lkp) 619 struct lock *lkp; 620 { 621 int count; 622 623 mtx_lock(lkp->lk_interlock); 624 count = lkp->lk_waitcount; 625 mtx_unlock(lkp->lk_interlock); 626 return (count); 627 } 628 629 /* 630 * Print out information about state of a lock. Used by VOP_PRINT 631 * routines to display status about contained locks. 632 */ 633 void 634 lockmgr_printinfo(lkp) 635 struct lock *lkp; 636 { 637 638 if (lkp->lk_sharecount) 639 printf(" lock type %s: SHARED (count %d)", lkp->lk_wmesg, 640 lkp->lk_sharecount); 641 else if (lkp->lk_flags & LK_HAVE_EXCL) 642 printf(" lock type %s: EXCL (count %d) by thread %p (pid %d)", 643 lkp->lk_wmesg, lkp->lk_exclusivecount, 644 lkp->lk_lockholder, lkp->lk_lockholder->td_proc->p_pid); 645 if (lkp->lk_waitcount > 0) 646 printf(" with %d pending", lkp->lk_waitcount); 647 #ifdef DEBUG_LOCKS 648 stack_print(&lkp->lk_stack); 649 #endif 650 } 651 652 #ifdef DDB 653 /* 654 * Check to see if a thread that is blocked on a sleep queue is actually 655 * blocked on a 'struct lock'. If so, output some details and return true. 656 * If the lock has an exclusive owner, return that in *ownerp. 657 */ 658 int 659 lockmgr_chain(struct thread *td, struct thread **ownerp) 660 { 661 struct lock *lkp; 662 663 lkp = td->td_wchan; 664 665 /* Simple test to see if wchan points to a lockmgr lock. */ 666 if (LOCK_CLASS(&lkp->lk_object) == &lock_class_lockmgr && 667 lkp->lk_wmesg == td->td_wmesg) 668 goto ok; 669 670 /* 671 * If this thread is doing a DRAIN, then it would be asleep on 672 * &lkp->lk_flags rather than lkp. 673 */ 674 lkp = (struct lock *)((char *)td->td_wchan - 675 offsetof(struct lock, lk_flags)); 676 if (LOCK_CLASS(&lkp->lk_object) == &lock_class_lockmgr && 677 lkp->lk_wmesg == td->td_wmesg && (lkp->lk_flags & LK_WAITDRAIN)) 678 goto ok; 679 680 /* Doen't seem to be a lockmgr lock. */ 681 return (0); 682 683 ok: 684 /* Ok, we think we have a lockmgr lock, so output some details. */ 685 db_printf("blocked on lk \"%s\" ", lkp->lk_wmesg); 686 if (lkp->lk_sharecount) { 687 db_printf("SHARED (count %d)\n", lkp->lk_sharecount); 688 *ownerp = NULL; 689 } else { 690 db_printf("EXCL (count %d)\n", lkp->lk_exclusivecount); 691 *ownerp = lkp->lk_lockholder; 692 } 693 return (1); 694 } 695 696 void 697 db_show_lockmgr(struct lock_object *lock) 698 { 699 struct thread *td; 700 struct lock *lkp; 701 702 lkp = (struct lock *)lock; 703 704 db_printf(" lock type: %s\n", lkp->lk_wmesg); 705 db_printf(" state: "); 706 if (lkp->lk_sharecount) 707 db_printf("SHARED (count %d)\n", lkp->lk_sharecount); 708 else if (lkp->lk_flags & LK_HAVE_EXCL) { 709 td = lkp->lk_lockholder; 710 db_printf("EXCL (count %d) %p ", lkp->lk_exclusivecount, td); 711 db_printf("(tid %d, pid %d, \"%s\")\n", td->td_tid, 712 td->td_proc->p_pid, td->td_proc->p_comm); 713 } else 714 db_printf("UNLOCKED\n"); 715 if (lkp->lk_waitcount > 0) 716 db_printf(" waiters: %d\n", lkp->lk_waitcount); 717 } 718 #endif 719