1 /*- 2 * Copyright (c) 1995 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Copyright (C) 1997 6 * John S. Dyson. All rights reserved. 7 * 8 * This code contains ideas from software contributed to Berkeley by 9 * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating 10 * System project at Carnegie-Mellon University. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. All advertising materials mentioning features or use of this software 21 * must display the following acknowledgement: 22 * This product includes software developed by the University of 23 * California, Berkeley and its contributors. 24 * 4. Neither the name of the University nor the names of its contributors 25 * may be used to endorse or promote products derived from this software 26 * without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 38 * SUCH DAMAGE. 39 * 40 * @(#)kern_lock.c 8.18 (Berkeley) 5/21/95 41 */ 42 43 #include <sys/cdefs.h> 44 __FBSDID("$FreeBSD$"); 45 46 #include <sys/param.h> 47 #include <sys/kdb.h> 48 #include <sys/kernel.h> 49 #include <sys/ktr.h> 50 #include <sys/lock.h> 51 #include <sys/lockmgr.h> 52 #include <sys/mutex.h> 53 #include <sys/proc.h> 54 #include <sys/systm.h> 55 #ifdef DEBUG_LOCKS 56 #include <sys/stack.h> 57 #endif 58 #include <sys/kdb.h> 59 60 /* 61 * Locking primitives implementation. 62 * Locks provide shared/exclusive sychronization. 63 */ 64 65 #define COUNT(td, x) if ((td)) (td)->td_locks += (x) 66 #define LK_ALL (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | \ 67 LK_SHARE_NONZERO | LK_WAIT_NONZERO) 68 69 static int acquire(struct lock **lkpp, int extflags, int wanted); 70 static int acquiredrain(struct lock *lkp, int extflags) ; 71 72 static __inline void 73 sharelock(struct thread *td, struct lock *lkp, int incr) { 74 lkp->lk_flags |= LK_SHARE_NONZERO; 75 lkp->lk_sharecount += incr; 76 COUNT(td, incr); 77 } 78 79 static __inline void 80 shareunlock(struct thread *td, struct lock *lkp, int decr) { 81 82 KASSERT(lkp->lk_sharecount >= decr, ("shareunlock: count < decr")); 83 84 COUNT(td, -decr); 85 if (lkp->lk_sharecount == decr) { 86 lkp->lk_flags &= ~LK_SHARE_NONZERO; 87 if (lkp->lk_flags & (LK_WANT_UPGRADE | LK_WANT_EXCL)) { 88 wakeup(lkp); 89 } 90 lkp->lk_sharecount = 0; 91 } else { 92 lkp->lk_sharecount -= decr; 93 } 94 } 95 96 static int 97 acquire(struct lock **lkpp, int extflags, int wanted) 98 { 99 struct lock *lkp = *lkpp; 100 int error; 101 CTR3(KTR_LOCK, 102 "acquire(): lkp == %p, extflags == 0x%x, wanted == 0x%x", 103 lkp, extflags, wanted); 104 105 if ((extflags & LK_NOWAIT) && (lkp->lk_flags & wanted)) 106 return EBUSY; 107 error = 0; 108 while ((lkp->lk_flags & wanted) != 0) { 109 CTR2(KTR_LOCK, 110 "acquire(): lkp == %p, lk_flags == 0x%x sleeping", 111 lkp, lkp->lk_flags); 112 lkp->lk_flags |= LK_WAIT_NONZERO; 113 lkp->lk_waitcount++; 114 error = msleep(lkp, lkp->lk_interlock, lkp->lk_prio, 115 lkp->lk_wmesg, 116 ((extflags & LK_TIMELOCK) ? lkp->lk_timo : 0)); 117 lkp->lk_waitcount--; 118 if (lkp->lk_waitcount == 0) 119 lkp->lk_flags &= ~LK_WAIT_NONZERO; 120 if (error) 121 break; 122 if (extflags & LK_SLEEPFAIL) { 123 error = ENOLCK; 124 break; 125 } 126 if (lkp->lk_newlock != NULL) { 127 mtx_lock(lkp->lk_newlock->lk_interlock); 128 mtx_unlock(lkp->lk_interlock); 129 if (lkp->lk_waitcount == 0) 130 wakeup((void *)(&lkp->lk_newlock)); 131 *lkpp = lkp = lkp->lk_newlock; 132 } 133 } 134 mtx_assert(lkp->lk_interlock, MA_OWNED); 135 return (error); 136 } 137 138 /* 139 * Set, change, or release a lock. 140 * 141 * Shared requests increment the shared count. Exclusive requests set the 142 * LK_WANT_EXCL flag (preventing further shared locks), and wait for already 143 * accepted shared locks and shared-to-exclusive upgrades to go away. 144 */ 145 int 146 lockmgr(lkp, flags, interlkp, td) 147 struct lock *lkp; 148 u_int flags; 149 struct mtx *interlkp; 150 struct thread *td; 151 { 152 int error; 153 struct thread *thr; 154 int extflags, lockflags; 155 156 error = 0; 157 if (td == NULL) 158 thr = LK_KERNPROC; 159 else 160 thr = td; 161 162 if ((flags & LK_INTERNAL) == 0) 163 mtx_lock(lkp->lk_interlock); 164 CTR6(KTR_LOCK, 165 "lockmgr(): lkp == %p (lk_wmesg == \"%s\"), owner == %p, exclusivecount == %d, flags == 0x%x, " 166 "td == %p", lkp, lkp->lk_wmesg, lkp->lk_lockholder, 167 lkp->lk_exclusivecount, flags, td); 168 #ifdef DEBUG_LOCKS 169 { 170 struct stack stack; /* XXX */ 171 stack_save(&stack); 172 CTRSTACK(KTR_LOCK, &stack, 0, 1); 173 } 174 #endif 175 176 if (flags & LK_INTERLOCK) { 177 mtx_assert(interlkp, MA_OWNED | MA_NOTRECURSED); 178 mtx_unlock(interlkp); 179 } 180 181 if ((flags & (LK_NOWAIT|LK_RELEASE)) == 0) 182 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, 183 &lkp->lk_interlock->mtx_object, 184 "Acquiring lockmgr lock \"%s\"", lkp->lk_wmesg); 185 186 if (panicstr != NULL) { 187 mtx_unlock(lkp->lk_interlock); 188 return (0); 189 } 190 if ((lkp->lk_flags & LK_NOSHARE) && 191 (flags & LK_TYPE_MASK) == LK_SHARED) { 192 flags &= ~LK_TYPE_MASK; 193 flags |= LK_EXCLUSIVE; 194 } 195 extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK; 196 197 switch (flags & LK_TYPE_MASK) { 198 199 case LK_SHARED: 200 /* 201 * If we are not the exclusive lock holder, we have to block 202 * while there is an exclusive lock holder or while an 203 * exclusive lock request or upgrade request is in progress. 204 * 205 * However, if TDP_DEADLKTREAT is set, we override exclusive 206 * lock requests or upgrade requests ( but not the exclusive 207 * lock itself ). 208 */ 209 if (lkp->lk_lockholder != thr) { 210 lockflags = LK_HAVE_EXCL; 211 if (td != NULL && !(td->td_pflags & TDP_DEADLKTREAT)) 212 lockflags |= LK_WANT_EXCL | LK_WANT_UPGRADE; 213 error = acquire(&lkp, extflags, lockflags); 214 if (error) 215 break; 216 sharelock(td, lkp, 1); 217 #if defined(DEBUG_LOCKS) 218 stack_save(&lkp->lk_stack); 219 #endif 220 break; 221 } 222 /* 223 * We hold an exclusive lock, so downgrade it to shared. 224 * An alternative would be to fail with EDEADLK. 225 */ 226 sharelock(td, lkp, 1); 227 /* FALLTHROUGH downgrade */ 228 229 case LK_DOWNGRADE: 230 KASSERT(lkp->lk_lockholder == thr && lkp->lk_exclusivecount != 0, 231 ("lockmgr: not holding exclusive lock " 232 "(owner thread (%p) != thread (%p), exlcnt (%d) != 0", 233 lkp->lk_lockholder, thr, lkp->lk_exclusivecount)); 234 sharelock(td, lkp, lkp->lk_exclusivecount); 235 COUNT(td, -lkp->lk_exclusivecount); 236 lkp->lk_exclusivecount = 0; 237 lkp->lk_flags &= ~LK_HAVE_EXCL; 238 lkp->lk_lockholder = LK_NOPROC; 239 if (lkp->lk_waitcount) 240 wakeup((void *)lkp); 241 break; 242 243 case LK_EXCLUPGRADE: 244 /* 245 * If another process is ahead of us to get an upgrade, 246 * then we want to fail rather than have an intervening 247 * exclusive access. 248 */ 249 if (lkp->lk_flags & LK_WANT_UPGRADE) { 250 shareunlock(td, lkp, 1); 251 error = EBUSY; 252 break; 253 } 254 /* FALLTHROUGH normal upgrade */ 255 256 case LK_UPGRADE: 257 /* 258 * Upgrade a shared lock to an exclusive one. If another 259 * shared lock has already requested an upgrade to an 260 * exclusive lock, our shared lock is released and an 261 * exclusive lock is requested (which will be granted 262 * after the upgrade). If we return an error, the file 263 * will always be unlocked. 264 */ 265 if (lkp->lk_lockholder == thr) 266 panic("lockmgr: upgrade exclusive lock"); 267 if (lkp->lk_sharecount <= 0) 268 panic("lockmgr: upgrade without shared"); 269 shareunlock(td, lkp, 1); 270 /* 271 * If we are just polling, check to see if we will block. 272 */ 273 if ((extflags & LK_NOWAIT) && 274 ((lkp->lk_flags & LK_WANT_UPGRADE) || 275 lkp->lk_sharecount > 1)) { 276 error = EBUSY; 277 break; 278 } 279 if ((lkp->lk_flags & LK_WANT_UPGRADE) == 0) { 280 /* 281 * We are first shared lock to request an upgrade, so 282 * request upgrade and wait for the shared count to 283 * drop to zero, then take exclusive lock. 284 */ 285 lkp->lk_flags |= LK_WANT_UPGRADE; 286 error = acquire(&lkp, extflags, LK_SHARE_NONZERO); 287 lkp->lk_flags &= ~LK_WANT_UPGRADE; 288 289 if (error) { 290 if ((lkp->lk_flags & ( LK_WANT_EXCL | LK_WAIT_NONZERO)) == (LK_WANT_EXCL | LK_WAIT_NONZERO)) 291 wakeup((void *)lkp); 292 break; 293 } 294 if (lkp->lk_exclusivecount != 0) 295 panic("lockmgr: non-zero exclusive count"); 296 lkp->lk_flags |= LK_HAVE_EXCL; 297 lkp->lk_lockholder = thr; 298 lkp->lk_exclusivecount = 1; 299 COUNT(td, 1); 300 #if defined(DEBUG_LOCKS) 301 stack_save(&lkp->lk_stack); 302 #endif 303 break; 304 } 305 /* 306 * Someone else has requested upgrade. Release our shared 307 * lock, awaken upgrade requestor if we are the last shared 308 * lock, then request an exclusive lock. 309 */ 310 if ( (lkp->lk_flags & (LK_SHARE_NONZERO|LK_WAIT_NONZERO)) == 311 LK_WAIT_NONZERO) 312 wakeup((void *)lkp); 313 /* FALLTHROUGH exclusive request */ 314 315 case LK_EXCLUSIVE: 316 if (lkp->lk_lockholder == thr && thr != LK_KERNPROC) { 317 /* 318 * Recursive lock. 319 */ 320 if ((extflags & (LK_NOWAIT | LK_CANRECURSE)) == 0) 321 panic("lockmgr: locking against myself"); 322 if ((extflags & LK_CANRECURSE) != 0) { 323 lkp->lk_exclusivecount++; 324 COUNT(td, 1); 325 break; 326 } 327 } 328 /* 329 * If we are just polling, check to see if we will sleep. 330 */ 331 if ((extflags & LK_NOWAIT) && 332 (lkp->lk_flags & (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | LK_SHARE_NONZERO))) { 333 error = EBUSY; 334 break; 335 } 336 /* 337 * Try to acquire the want_exclusive flag. 338 */ 339 error = acquire(&lkp, extflags, (LK_HAVE_EXCL | LK_WANT_EXCL)); 340 if (error) 341 break; 342 lkp->lk_flags |= LK_WANT_EXCL; 343 /* 344 * Wait for shared locks and upgrades to finish. 345 */ 346 error = acquire(&lkp, extflags, LK_HAVE_EXCL | LK_WANT_UPGRADE | LK_SHARE_NONZERO); 347 lkp->lk_flags &= ~LK_WANT_EXCL; 348 if (error) { 349 if (lkp->lk_flags & LK_WAIT_NONZERO) 350 wakeup((void *)lkp); 351 break; 352 } 353 lkp->lk_flags |= LK_HAVE_EXCL; 354 lkp->lk_lockholder = thr; 355 if (lkp->lk_exclusivecount != 0) 356 panic("lockmgr: non-zero exclusive count"); 357 lkp->lk_exclusivecount = 1; 358 COUNT(td, 1); 359 #if defined(DEBUG_LOCKS) 360 stack_save(&lkp->lk_stack); 361 #endif 362 break; 363 364 case LK_RELEASE: 365 if (lkp->lk_exclusivecount != 0) { 366 if (lkp->lk_lockholder != thr && 367 lkp->lk_lockholder != LK_KERNPROC) { 368 panic("lockmgr: thread %p, not %s %p unlocking", 369 thr, "exclusive lock holder", 370 lkp->lk_lockholder); 371 } 372 if (lkp->lk_lockholder != LK_KERNPROC) 373 COUNT(td, -1); 374 if (lkp->lk_exclusivecount == 1) { 375 lkp->lk_flags &= ~LK_HAVE_EXCL; 376 lkp->lk_lockholder = LK_NOPROC; 377 lkp->lk_exclusivecount = 0; 378 } else { 379 lkp->lk_exclusivecount--; 380 } 381 } else if (lkp->lk_flags & LK_SHARE_NONZERO) 382 shareunlock(td, lkp, 1); 383 else { 384 printf("lockmgr: thread %p unlocking unheld lock\n", 385 thr); 386 kdb_backtrace(); 387 } 388 389 if (lkp->lk_flags & LK_WAIT_NONZERO) 390 wakeup((void *)lkp); 391 break; 392 393 case LK_DRAIN: 394 /* 395 * Check that we do not already hold the lock, as it can 396 * never drain if we do. Unfortunately, we have no way to 397 * check for holding a shared lock, but at least we can 398 * check for an exclusive one. 399 */ 400 if (lkp->lk_lockholder == thr) 401 panic("lockmgr: draining against myself"); 402 403 error = acquiredrain(lkp, extflags); 404 if (error) 405 break; 406 lkp->lk_flags |= LK_DRAINING | LK_HAVE_EXCL; 407 lkp->lk_lockholder = thr; 408 lkp->lk_exclusivecount = 1; 409 COUNT(td, 1); 410 #if defined(DEBUG_LOCKS) 411 stack_save(&lkp->lk_stack); 412 #endif 413 break; 414 415 default: 416 mtx_unlock(lkp->lk_interlock); 417 panic("lockmgr: unknown locktype request %d", 418 flags & LK_TYPE_MASK); 419 /* NOTREACHED */ 420 } 421 if ((lkp->lk_flags & LK_WAITDRAIN) && 422 (lkp->lk_flags & (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | 423 LK_SHARE_NONZERO | LK_WAIT_NONZERO)) == 0) { 424 lkp->lk_flags &= ~LK_WAITDRAIN; 425 wakeup((void *)&lkp->lk_flags); 426 } 427 mtx_unlock(lkp->lk_interlock); 428 return (error); 429 } 430 431 static int 432 acquiredrain(struct lock *lkp, int extflags) { 433 int error; 434 435 if ((extflags & LK_NOWAIT) && (lkp->lk_flags & LK_ALL)) { 436 return EBUSY; 437 } 438 while (lkp->lk_flags & LK_ALL) { 439 lkp->lk_flags |= LK_WAITDRAIN; 440 error = msleep(&lkp->lk_flags, lkp->lk_interlock, lkp->lk_prio, 441 lkp->lk_wmesg, 442 ((extflags & LK_TIMELOCK) ? lkp->lk_timo : 0)); 443 if (error) 444 return error; 445 if (extflags & LK_SLEEPFAIL) { 446 return ENOLCK; 447 } 448 } 449 return 0; 450 } 451 452 /* 453 * Transfer any waiting processes from one lock to another. 454 */ 455 void 456 transferlockers(from, to) 457 struct lock *from; 458 struct lock *to; 459 { 460 461 KASSERT(from != to, ("lock transfer to self")); 462 KASSERT((from->lk_flags&LK_WAITDRAIN) == 0, ("transfer draining lock")); 463 464 mtx_lock(from->lk_interlock); 465 if (from->lk_waitcount == 0) { 466 mtx_unlock(from->lk_interlock); 467 return; 468 } 469 from->lk_newlock = to; 470 wakeup((void *)from); 471 msleep(&from->lk_newlock, from->lk_interlock, from->lk_prio, 472 "lkxfer", 0); 473 from->lk_newlock = NULL; 474 from->lk_flags &= ~(LK_WANT_EXCL | LK_WANT_UPGRADE); 475 KASSERT(from->lk_waitcount == 0, ("active lock")); 476 mtx_unlock(from->lk_interlock); 477 } 478 479 480 /* 481 * Initialize a lock; required before use. 482 */ 483 void 484 lockinit(lkp, prio, wmesg, timo, flags) 485 struct lock *lkp; 486 int prio; 487 const char *wmesg; 488 int timo; 489 int flags; 490 { 491 CTR5(KTR_LOCK, "lockinit(): lkp == %p, prio == %d, wmesg == \"%s\", " 492 "timo == %d, flags = 0x%x\n", lkp, prio, wmesg, timo, flags); 493 494 lkp->lk_interlock = mtx_pool_alloc(mtxpool_lockbuilder); 495 lkp->lk_flags = (flags & LK_EXTFLG_MASK); 496 lkp->lk_sharecount = 0; 497 lkp->lk_waitcount = 0; 498 lkp->lk_exclusivecount = 0; 499 lkp->lk_prio = prio; 500 lkp->lk_wmesg = wmesg; 501 lkp->lk_timo = timo; 502 lkp->lk_lockholder = LK_NOPROC; 503 lkp->lk_newlock = NULL; 504 #ifdef DEBUG_LOCKS 505 stack_zero(&lkp->lk_stack); 506 #endif 507 } 508 509 /* 510 * Destroy a lock. 511 */ 512 void 513 lockdestroy(lkp) 514 struct lock *lkp; 515 { 516 CTR2(KTR_LOCK, "lockdestroy(): lkp == %p (lk_wmesg == \"%s\")", 517 lkp, lkp->lk_wmesg); 518 } 519 520 /* 521 * Determine the status of a lock. 522 */ 523 int 524 lockstatus(lkp, td) 525 struct lock *lkp; 526 struct thread *td; 527 { 528 int lock_type = 0; 529 int interlocked; 530 531 if (!kdb_active) { 532 interlocked = 1; 533 mtx_lock(lkp->lk_interlock); 534 } else 535 interlocked = 0; 536 if (lkp->lk_exclusivecount != 0) { 537 if (td == NULL || lkp->lk_lockholder == td) 538 lock_type = LK_EXCLUSIVE; 539 else 540 lock_type = LK_EXCLOTHER; 541 } else if (lkp->lk_sharecount != 0) 542 lock_type = LK_SHARED; 543 if (interlocked) 544 mtx_unlock(lkp->lk_interlock); 545 return (lock_type); 546 } 547 548 /* 549 * Determine the number of holders of a lock. 550 */ 551 int 552 lockcount(lkp) 553 struct lock *lkp; 554 { 555 int count; 556 557 mtx_lock(lkp->lk_interlock); 558 count = lkp->lk_exclusivecount + lkp->lk_sharecount; 559 mtx_unlock(lkp->lk_interlock); 560 return (count); 561 } 562 563 /* 564 * Print out information about state of a lock. Used by VOP_PRINT 565 * routines to display status about contained locks. 566 */ 567 void 568 lockmgr_printinfo(lkp) 569 struct lock *lkp; 570 { 571 572 if (lkp->lk_sharecount) 573 printf(" lock type %s: SHARED (count %d)", lkp->lk_wmesg, 574 lkp->lk_sharecount); 575 else if (lkp->lk_flags & LK_HAVE_EXCL) 576 printf(" lock type %s: EXCL (count %d) by thread %p (pid %d)", 577 lkp->lk_wmesg, lkp->lk_exclusivecount, 578 lkp->lk_lockholder, lkp->lk_lockholder->td_proc->p_pid); 579 if (lkp->lk_waitcount > 0) 580 printf(" with %d pending", lkp->lk_waitcount); 581 #ifdef DEBUG_LOCKS 582 stack_print(&lkp->lk_stack); 583 #endif 584 } 585