1 /* 2 * Copyright (c) 1995 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Copyright (C) 1997 6 * John S. Dyson. All rights reserved. 7 * 8 * This code contains ideas from software contributed to Berkeley by 9 * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating 10 * System project at Carnegie-Mellon University. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. All advertising materials mentioning features or use of this software 21 * must display the following acknowledgement: 22 * This product includes software developed by the University of 23 * California, Berkeley and its contributors. 24 * 4. Neither the name of the University nor the names of its contributors 25 * may be used to endorse or promote products derived from this software 26 * without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 38 * SUCH DAMAGE. 39 * 40 * @(#)kern_lock.c 8.18 (Berkeley) 5/21/95 41 */ 42 43 #include <sys/cdefs.h> 44 __FBSDID("$FreeBSD$"); 45 46 #include <sys/param.h> 47 #include <sys/kernel.h> 48 #include <sys/ktr.h> 49 #include <sys/lock.h> 50 #include <sys/lockmgr.h> 51 #include <sys/mutex.h> 52 #include <sys/proc.h> 53 #include <sys/systm.h> 54 55 /* 56 * Locking primitives implementation. 57 * Locks provide shared/exclusive sychronization. 58 */ 59 60 #define LOCK_WAIT_TIME 100 61 #define LOCK_SAMPLE_WAIT 7 62 63 #if defined(DIAGNOSTIC) 64 #define LOCK_INLINE 65 #else 66 #define LOCK_INLINE __inline 67 #endif 68 69 #define LK_ALL (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | \ 70 LK_SHARE_NONZERO | LK_WAIT_NONZERO) 71 72 /* 73 * Mutex array variables. Rather than each lockmgr lock having its own mutex, 74 * share a fixed (at boot time) number of mutexes across all lockmgr locks in 75 * order to keep sizeof(struct lock) down. 76 */ 77 static struct mtx lock_mtx; 78 79 static int acquire(struct lock **lkpp, int extflags, int wanted); 80 static int apause(struct lock *lkp, int flags); 81 static int acquiredrain(struct lock *lkp, int extflags) ; 82 83 static void 84 lockmgr_init(void *dummy __unused) 85 { 86 mtx_init(&lock_mtx, "lockmgr", NULL, MTX_DEF); 87 } 88 SYSINIT(lmgrinit, SI_SUB_LOCKMGR, SI_ORDER_FIRST, lockmgr_init, NULL) 89 90 static LOCK_INLINE void 91 sharelock(struct lock *lkp, int incr) { 92 lkp->lk_flags |= LK_SHARE_NONZERO; 93 lkp->lk_sharecount += incr; 94 } 95 96 static LOCK_INLINE void 97 shareunlock(struct lock *lkp, int decr) { 98 99 KASSERT(lkp->lk_sharecount >= decr, ("shareunlock: count < decr")); 100 101 if (lkp->lk_sharecount == decr) { 102 lkp->lk_flags &= ~LK_SHARE_NONZERO; 103 if (lkp->lk_flags & (LK_WANT_UPGRADE | LK_WANT_EXCL)) { 104 wakeup(lkp); 105 } 106 lkp->lk_sharecount = 0; 107 } else { 108 lkp->lk_sharecount -= decr; 109 } 110 } 111 112 /* 113 * This is the waitloop optimization. 114 */ 115 static int 116 apause(struct lock *lkp, int flags) 117 { 118 #ifdef SMP 119 int i, lock_wait; 120 #endif 121 122 if ((lkp->lk_flags & flags) == 0) 123 return 0; 124 #ifdef SMP 125 for (lock_wait = LOCK_WAIT_TIME; lock_wait > 0; lock_wait--) { 126 mtx_unlock(lkp->lk_interlock); 127 for (i = LOCK_SAMPLE_WAIT; i > 0; i--) 128 if ((lkp->lk_flags & flags) == 0) 129 break; 130 mtx_lock(lkp->lk_interlock); 131 if ((lkp->lk_flags & flags) == 0) 132 return 0; 133 } 134 #endif 135 return 1; 136 } 137 138 static int 139 acquire(struct lock **lkpp, int extflags, int wanted) { 140 struct lock *lkp = *lkpp; 141 int s, error; 142 143 CTR3(KTR_LOCK, 144 "acquire(): lkp == %p, extflags == 0x%x, wanted == 0x%x", 145 lkp, extflags, wanted); 146 147 if ((extflags & LK_NOWAIT) && (lkp->lk_flags & wanted)) { 148 return EBUSY; 149 } 150 151 if (((lkp->lk_flags | extflags) & LK_NOPAUSE) == 0) { 152 error = apause(lkp, wanted); 153 if (error == 0) 154 return 0; 155 } 156 157 s = splhigh(); 158 while ((lkp->lk_flags & wanted) != 0) { 159 lkp->lk_flags |= LK_WAIT_NONZERO; 160 lkp->lk_waitcount++; 161 error = msleep(lkp, lkp->lk_interlock, lkp->lk_prio, 162 lkp->lk_wmesg, 163 ((extflags & LK_TIMELOCK) ? lkp->lk_timo : 0)); 164 if (lkp->lk_waitcount == 1) { 165 lkp->lk_flags &= ~LK_WAIT_NONZERO; 166 lkp->lk_waitcount = 0; 167 } else { 168 lkp->lk_waitcount--; 169 } 170 if (error) { 171 splx(s); 172 return error; 173 } 174 if (extflags & LK_SLEEPFAIL) { 175 splx(s); 176 return ENOLCK; 177 } 178 if (lkp->lk_newlock != NULL) { 179 mtx_lock(lkp->lk_newlock->lk_interlock); 180 mtx_unlock(lkp->lk_interlock); 181 if (lkp->lk_waitcount == 0) 182 wakeup((void *)(&lkp->lk_newlock)); 183 *lkpp = lkp = lkp->lk_newlock; 184 } 185 } 186 splx(s); 187 return 0; 188 } 189 190 /* 191 * Set, change, or release a lock. 192 * 193 * Shared requests increment the shared count. Exclusive requests set the 194 * LK_WANT_EXCL flag (preventing further shared locks), and wait for already 195 * accepted shared locks and shared-to-exclusive upgrades to go away. 196 */ 197 int 198 #ifndef DEBUG_LOCKS 199 lockmgr(lkp, flags, interlkp, td) 200 #else 201 debuglockmgr(lkp, flags, interlkp, td, name, file, line) 202 #endif 203 struct lock *lkp; 204 u_int flags; 205 struct mtx *interlkp; 206 struct thread *td; 207 #ifdef DEBUG_LOCKS 208 const char *name; /* Name of lock function */ 209 const char *file; /* Name of file call is from */ 210 int line; /* Line number in file */ 211 #endif 212 { 213 int error; 214 struct thread *thr; 215 int extflags, lockflags; 216 217 CTR5(KTR_LOCK, 218 "lockmgr(): lkp == %p (lk_wmesg == \"%s\"), flags == 0x%x, " 219 "interlkp == %p, td == %p", lkp, lkp->lk_wmesg, flags, interlkp, td); 220 221 error = 0; 222 if (td == NULL) 223 thr = LK_KERNPROC; 224 else 225 thr = td; 226 227 if ((flags & LK_INTERNAL) == 0) 228 mtx_lock(lkp->lk_interlock); 229 if (flags & LK_INTERLOCK) { 230 mtx_assert(interlkp, MA_OWNED | MA_NOTRECURSED); 231 mtx_unlock(interlkp); 232 } 233 234 if ((flags & (LK_NOWAIT|LK_RELEASE)) == 0) 235 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, 236 &lkp->lk_interlock->mtx_object, 237 "Acquiring lockmgr lock \"%s\"", lkp->lk_wmesg); 238 239 if (panicstr != NULL) { 240 mtx_unlock(lkp->lk_interlock); 241 return (0); 242 } 243 244 extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK; 245 246 switch (flags & LK_TYPE_MASK) { 247 248 case LK_SHARED: 249 /* 250 * If we are not the exclusive lock holder, we have to block 251 * while there is an exclusive lock holder or while an 252 * exclusive lock request or upgrade request is in progress. 253 * 254 * However, if TDP_DEADLKTREAT is set, we override exclusive 255 * lock requests or upgrade requests ( but not the exclusive 256 * lock itself ). 257 */ 258 if (lkp->lk_lockholder != thr) { 259 lockflags = LK_HAVE_EXCL; 260 if (td != NULL && !(td->td_pflags & TDP_DEADLKTREAT)) 261 lockflags |= LK_WANT_EXCL | LK_WANT_UPGRADE; 262 error = acquire(&lkp, extflags, lockflags); 263 if (error) 264 break; 265 sharelock(lkp, 1); 266 #if defined(DEBUG_LOCKS) 267 lkp->lk_slockholder = thr; 268 lkp->lk_sfilename = file; 269 lkp->lk_slineno = line; 270 lkp->lk_slockername = name; 271 #endif 272 break; 273 } 274 /* 275 * We hold an exclusive lock, so downgrade it to shared. 276 * An alternative would be to fail with EDEADLK. 277 */ 278 sharelock(lkp, 1); 279 /* FALLTHROUGH downgrade */ 280 281 case LK_DOWNGRADE: 282 KASSERT(lkp->lk_lockholder == thr && lkp->lk_exclusivecount != 0, 283 ("lockmgr: not holding exclusive lock " 284 "(owner thread (%p) != thread (%p), exlcnt (%d) != 0", 285 lkp->lk_lockholder, thr, lkp->lk_exclusivecount)); 286 sharelock(lkp, lkp->lk_exclusivecount); 287 lkp->lk_exclusivecount = 0; 288 lkp->lk_flags &= ~LK_HAVE_EXCL; 289 lkp->lk_lockholder = LK_NOPROC; 290 if (lkp->lk_waitcount) 291 wakeup((void *)lkp); 292 break; 293 294 case LK_EXCLUPGRADE: 295 /* 296 * If another process is ahead of us to get an upgrade, 297 * then we want to fail rather than have an intervening 298 * exclusive access. 299 */ 300 if (lkp->lk_flags & LK_WANT_UPGRADE) { 301 shareunlock(lkp, 1); 302 error = EBUSY; 303 break; 304 } 305 /* FALLTHROUGH normal upgrade */ 306 307 case LK_UPGRADE: 308 /* 309 * Upgrade a shared lock to an exclusive one. If another 310 * shared lock has already requested an upgrade to an 311 * exclusive lock, our shared lock is released and an 312 * exclusive lock is requested (which will be granted 313 * after the upgrade). If we return an error, the file 314 * will always be unlocked. 315 */ 316 if ((lkp->lk_lockholder == thr) || (lkp->lk_sharecount <= 0)) 317 panic("lockmgr: upgrade exclusive lock"); 318 shareunlock(lkp, 1); 319 /* 320 * If we are just polling, check to see if we will block. 321 */ 322 if ((extflags & LK_NOWAIT) && 323 ((lkp->lk_flags & LK_WANT_UPGRADE) || 324 lkp->lk_sharecount > 1)) { 325 error = EBUSY; 326 break; 327 } 328 if ((lkp->lk_flags & LK_WANT_UPGRADE) == 0) { 329 /* 330 * We are first shared lock to request an upgrade, so 331 * request upgrade and wait for the shared count to 332 * drop to zero, then take exclusive lock. 333 */ 334 lkp->lk_flags |= LK_WANT_UPGRADE; 335 error = acquire(&lkp, extflags, LK_SHARE_NONZERO); 336 lkp->lk_flags &= ~LK_WANT_UPGRADE; 337 338 if (error) 339 break; 340 lkp->lk_flags |= LK_HAVE_EXCL; 341 lkp->lk_lockholder = thr; 342 if (lkp->lk_exclusivecount != 0) 343 panic("lockmgr: non-zero exclusive count"); 344 lkp->lk_exclusivecount = 1; 345 #if defined(DEBUG_LOCKS) 346 lkp->lk_filename = file; 347 lkp->lk_lineno = line; 348 lkp->lk_lockername = name; 349 #endif 350 break; 351 } 352 /* 353 * Someone else has requested upgrade. Release our shared 354 * lock, awaken upgrade requestor if we are the last shared 355 * lock, then request an exclusive lock. 356 */ 357 if ( (lkp->lk_flags & (LK_SHARE_NONZERO|LK_WAIT_NONZERO)) == 358 LK_WAIT_NONZERO) 359 wakeup((void *)lkp); 360 /* FALLTHROUGH exclusive request */ 361 362 case LK_EXCLUSIVE: 363 if (lkp->lk_lockholder == thr && thr != LK_KERNPROC) { 364 /* 365 * Recursive lock. 366 */ 367 if ((extflags & (LK_NOWAIT | LK_CANRECURSE)) == 0) 368 panic("lockmgr: locking against myself"); 369 if ((extflags & LK_CANRECURSE) != 0) { 370 lkp->lk_exclusivecount++; 371 break; 372 } 373 } 374 /* 375 * If we are just polling, check to see if we will sleep. 376 */ 377 if ((extflags & LK_NOWAIT) && 378 (lkp->lk_flags & (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | LK_SHARE_NONZERO))) { 379 error = EBUSY; 380 break; 381 } 382 /* 383 * Try to acquire the want_exclusive flag. 384 */ 385 error = acquire(&lkp, extflags, (LK_HAVE_EXCL | LK_WANT_EXCL)); 386 if (error) 387 break; 388 lkp->lk_flags |= LK_WANT_EXCL; 389 /* 390 * Wait for shared locks and upgrades to finish. 391 */ 392 error = acquire(&lkp, extflags, LK_WANT_UPGRADE | LK_SHARE_NONZERO); 393 lkp->lk_flags &= ~LK_WANT_EXCL; 394 if (error) 395 break; 396 lkp->lk_flags |= LK_HAVE_EXCL; 397 lkp->lk_lockholder = thr; 398 if (lkp->lk_exclusivecount != 0) 399 panic("lockmgr: non-zero exclusive count"); 400 lkp->lk_exclusivecount = 1; 401 #if defined(DEBUG_LOCKS) 402 lkp->lk_filename = file; 403 lkp->lk_lineno = line; 404 lkp->lk_lockername = name; 405 #endif 406 break; 407 408 case LK_RELEASE: 409 if (lkp->lk_exclusivecount != 0) { 410 if (lkp->lk_lockholder != thr && 411 lkp->lk_lockholder != LK_KERNPROC) { 412 panic("lockmgr: thread %p, not %s %p unlocking", 413 thr, "exclusive lock holder", 414 lkp->lk_lockholder); 415 } 416 if (lkp->lk_exclusivecount == 1) { 417 lkp->lk_flags &= ~LK_HAVE_EXCL; 418 lkp->lk_lockholder = LK_NOPROC; 419 lkp->lk_exclusivecount = 0; 420 } else { 421 lkp->lk_exclusivecount--; 422 } 423 } else if (lkp->lk_flags & LK_SHARE_NONZERO) 424 shareunlock(lkp, 1); 425 if (lkp->lk_flags & LK_WAIT_NONZERO) 426 wakeup((void *)lkp); 427 break; 428 429 case LK_DRAIN: 430 /* 431 * Check that we do not already hold the lock, as it can 432 * never drain if we do. Unfortunately, we have no way to 433 * check for holding a shared lock, but at least we can 434 * check for an exclusive one. 435 */ 436 if (lkp->lk_lockholder == thr) 437 panic("lockmgr: draining against myself"); 438 439 error = acquiredrain(lkp, extflags); 440 if (error) 441 break; 442 lkp->lk_flags |= LK_DRAINING | LK_HAVE_EXCL; 443 lkp->lk_lockholder = thr; 444 lkp->lk_exclusivecount = 1; 445 #if defined(DEBUG_LOCKS) 446 lkp->lk_filename = file; 447 lkp->lk_lineno = line; 448 lkp->lk_lockername = name; 449 #endif 450 break; 451 452 default: 453 mtx_unlock(lkp->lk_interlock); 454 panic("lockmgr: unknown locktype request %d", 455 flags & LK_TYPE_MASK); 456 /* NOTREACHED */ 457 } 458 if ((lkp->lk_flags & LK_WAITDRAIN) && 459 (lkp->lk_flags & (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | 460 LK_SHARE_NONZERO | LK_WAIT_NONZERO)) == 0) { 461 lkp->lk_flags &= ~LK_WAITDRAIN; 462 wakeup((void *)&lkp->lk_flags); 463 } 464 mtx_unlock(lkp->lk_interlock); 465 return (error); 466 } 467 468 static int 469 acquiredrain(struct lock *lkp, int extflags) { 470 int error; 471 472 if ((extflags & LK_NOWAIT) && (lkp->lk_flags & LK_ALL)) { 473 return EBUSY; 474 } 475 476 error = apause(lkp, LK_ALL); 477 if (error == 0) 478 return 0; 479 480 while (lkp->lk_flags & LK_ALL) { 481 lkp->lk_flags |= LK_WAITDRAIN; 482 error = msleep(&lkp->lk_flags, lkp->lk_interlock, lkp->lk_prio, 483 lkp->lk_wmesg, 484 ((extflags & LK_TIMELOCK) ? lkp->lk_timo : 0)); 485 if (error) 486 return error; 487 if (extflags & LK_SLEEPFAIL) { 488 return ENOLCK; 489 } 490 } 491 return 0; 492 } 493 494 /* 495 * Transfer any waiting processes from one lock to another. 496 */ 497 void 498 transferlockers(from, to) 499 struct lock *from; 500 struct lock *to; 501 { 502 503 KASSERT(from != to, ("lock transfer to self")); 504 KASSERT((from->lk_flags&LK_WAITDRAIN) == 0, ("transfer draining lock")); 505 if (from->lk_waitcount == 0) 506 return; 507 from->lk_newlock = to; 508 wakeup((void *)from); 509 msleep(&from->lk_newlock, NULL, from->lk_prio, "lkxfer", 0); 510 from->lk_newlock = NULL; 511 from->lk_flags &= ~(LK_WANT_EXCL | LK_WANT_UPGRADE); 512 KASSERT(from->lk_waitcount == 0, ("active lock")); 513 } 514 515 516 /* 517 * Initialize a lock; required before use. 518 */ 519 void 520 lockinit(lkp, prio, wmesg, timo, flags) 521 struct lock *lkp; 522 int prio; 523 const char *wmesg; 524 int timo; 525 int flags; 526 { 527 CTR5(KTR_LOCK, "lockinit(): lkp == %p, prio == %d, wmesg == \"%s\", " 528 "timo == %d, flags = 0x%x\n", lkp, prio, wmesg, timo, flags); 529 530 lkp->lk_interlock = mtx_pool_alloc(mtxpool_lockbuilder); 531 lkp->lk_flags = (flags & LK_EXTFLG_MASK); 532 lkp->lk_sharecount = 0; 533 lkp->lk_waitcount = 0; 534 lkp->lk_exclusivecount = 0; 535 lkp->lk_prio = prio; 536 lkp->lk_wmesg = wmesg; 537 lkp->lk_timo = timo; 538 lkp->lk_lockholder = LK_NOPROC; 539 lkp->lk_newlock = NULL; 540 #ifdef DEBUG_LOCKS 541 lkp->lk_filename = "none"; 542 lkp->lk_lockername = "never exclusive locked"; 543 lkp->lk_lineno = 0; 544 lkp->lk_slockholder = LK_NOPROC; 545 lkp->lk_sfilename = "none"; 546 lkp->lk_slockername = "never share locked"; 547 lkp->lk_slineno = 0; 548 #endif 549 } 550 551 /* 552 * Destroy a lock. 553 */ 554 void 555 lockdestroy(lkp) 556 struct lock *lkp; 557 { 558 CTR2(KTR_LOCK, "lockdestroy(): lkp == %p (lk_wmesg == \"%s\")", 559 lkp, lkp->lk_wmesg); 560 } 561 562 /* 563 * Determine the status of a lock. 564 */ 565 int 566 lockstatus(lkp, td) 567 struct lock *lkp; 568 struct thread *td; 569 { 570 int lock_type = 0; 571 572 mtx_lock(lkp->lk_interlock); 573 if (lkp->lk_exclusivecount != 0) { 574 if (td == NULL || lkp->lk_lockholder == td) 575 lock_type = LK_EXCLUSIVE; 576 else 577 lock_type = LK_EXCLOTHER; 578 } else if (lkp->lk_sharecount != 0) 579 lock_type = LK_SHARED; 580 mtx_unlock(lkp->lk_interlock); 581 return (lock_type); 582 } 583 584 /* 585 * Determine the number of holders of a lock. 586 */ 587 int 588 lockcount(lkp) 589 struct lock *lkp; 590 { 591 int count; 592 593 mtx_lock(lkp->lk_interlock); 594 count = lkp->lk_exclusivecount + lkp->lk_sharecount; 595 mtx_unlock(lkp->lk_interlock); 596 return (count); 597 } 598 599 /* 600 * Print out information about state of a lock. Used by VOP_PRINT 601 * routines to display status about contained locks. 602 */ 603 void 604 lockmgr_printinfo(lkp) 605 struct lock *lkp; 606 { 607 608 if (lkp->lk_sharecount) 609 printf(" lock type %s: SHARED (count %d)", lkp->lk_wmesg, 610 lkp->lk_sharecount); 611 else if (lkp->lk_flags & LK_HAVE_EXCL) 612 printf(" lock type %s: EXCL (count %d) by thread %p (pid %d)", 613 lkp->lk_wmesg, lkp->lk_exclusivecount, 614 lkp->lk_lockholder, lkp->lk_lockholder->td_proc->p_pid); 615 if (lkp->lk_waitcount > 0) 616 printf(" with %d pending", lkp->lk_waitcount); 617 } 618