1 /* 2 * Copyright (c) 1995 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Copyright (C) 1997 6 * John S. Dyson. All rights reserved. 7 * 8 * This code contains ideas from software contributed to Berkeley by 9 * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating 10 * System project at Carnegie-Mellon University. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. All advertising materials mentioning features or use of this software 21 * must display the following acknowledgement: 22 * This product includes software developed by the University of 23 * California, Berkeley and its contributors. 24 * 4. Neither the name of the University nor the names of its contributors 25 * may be used to endorse or promote products derived from this software 26 * without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 38 * SUCH DAMAGE. 39 * 40 * @(#)kern_lock.c 8.18 (Berkeley) 5/21/95 41 * $FreeBSD$ 42 */ 43 44 #include <sys/param.h> 45 #include <sys/proc.h> 46 #include <sys/kernel.h> 47 #include <sys/lock.h> 48 #include <sys/malloc.h> 49 #include <sys/mutex.h> 50 #include <sys/systm.h> 51 52 /* 53 * Locking primitives implementation. 54 * Locks provide shared/exclusive sychronization. 55 */ 56 57 #ifdef SIMPLELOCK_DEBUG 58 #define COUNT(p, x) if (p) (p)->p_locks += (x) 59 #else 60 #define COUNT(p, x) 61 #endif 62 63 #define LOCK_WAIT_TIME 100 64 #define LOCK_SAMPLE_WAIT 7 65 66 #if defined(DIAGNOSTIC) 67 #define LOCK_INLINE 68 #else 69 #define LOCK_INLINE __inline 70 #endif 71 72 #define LK_ALL (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | \ 73 LK_SHARE_NONZERO | LK_WAIT_NONZERO) 74 75 /* 76 * Mutex array variables. Rather than each lockmgr lock having its own mutex, 77 * share a fixed (at boot time) number of mutexes across all lockmgr locks in 78 * order to keep sizeof(struct lock) down. 79 */ 80 extern int lock_nmtx; 81 int lock_mtx_selector; 82 struct mtx *lock_mtx_array; 83 MUTEX_DECLARE(static, lock_mtx); 84 85 static int acquire(struct lock *lkp, int extflags, int wanted); 86 static int apause(struct lock *lkp, int flags); 87 static int acquiredrain(struct lock *lkp, int extflags) ; 88 89 static void 90 lockmgr_init(void *dummy __unused) 91 { 92 int i; 93 94 /* 95 * Initialize the lockmgr protection mutex if it hasn't already been 96 * done. Unless something changes about kernel startup order, VM 97 * initialization will always cause this mutex to already be 98 * initialized in a call to lockinit(). 99 */ 100 if (lock_mtx_selector == 0) 101 mtx_init(&lock_mtx, "lockmgr", MTX_DEF | MTX_COLD); 102 else { 103 /* 104 * This is necessary if (lock_nmtx == 1) and doesn't hurt 105 * otherwise. 106 */ 107 lock_mtx_selector = 0; 108 } 109 110 lock_mtx_array = (struct mtx *)malloc(sizeof(struct mtx) * lock_nmtx, 111 M_CACHE, M_WAITOK); 112 for (i = 0; i < lock_nmtx; i++) 113 mtx_init(&lock_mtx_array[i], "lockmgr interlock", MTX_DEF); 114 } 115 SYSINIT(lmgrinit, SI_SUB_LOCK, SI_ORDER_FIRST, lockmgr_init, NULL) 116 117 static LOCK_INLINE void 118 sharelock(struct lock *lkp, int incr) { 119 lkp->lk_flags |= LK_SHARE_NONZERO; 120 lkp->lk_sharecount += incr; 121 } 122 123 static LOCK_INLINE void 124 shareunlock(struct lock *lkp, int decr) { 125 126 KASSERT(lkp->lk_sharecount >= decr, ("shareunlock: count < decr")); 127 128 if (lkp->lk_sharecount == decr) { 129 lkp->lk_flags &= ~LK_SHARE_NONZERO; 130 if (lkp->lk_flags & (LK_WANT_UPGRADE | LK_WANT_EXCL)) { 131 wakeup(lkp); 132 } 133 lkp->lk_sharecount = 0; 134 } else { 135 lkp->lk_sharecount -= decr; 136 } 137 } 138 139 /* 140 * This is the waitloop optimization, and note for this to work 141 * simple_lock and simple_unlock should be subroutines to avoid 142 * optimization troubles. 143 */ 144 static int 145 apause(struct lock *lkp, int flags) 146 { 147 #ifdef SMP 148 int i, lock_wait; 149 #endif 150 151 if ((lkp->lk_flags & flags) == 0) 152 return 0; 153 #ifdef SMP 154 for (lock_wait = LOCK_WAIT_TIME; lock_wait > 0; lock_wait--) { 155 mtx_exit(lkp->lk_interlock, MTX_DEF); 156 for (i = LOCK_SAMPLE_WAIT; i > 0; i--) 157 if ((lkp->lk_flags & flags) == 0) 158 break; 159 mtx_enter(lkp->lk_interlock, MTX_DEF); 160 if ((lkp->lk_flags & flags) == 0) 161 return 0; 162 } 163 #endif 164 return 1; 165 } 166 167 static int 168 acquire(struct lock *lkp, int extflags, int wanted) { 169 int s, error; 170 171 CTR3(KTR_LOCKMGR, 172 "acquire(): lkp == %p, extflags == 0x%x, wanted == 0x%x\n", 173 lkp, extflags, wanted); 174 175 if ((extflags & LK_NOWAIT) && (lkp->lk_flags & wanted)) { 176 return EBUSY; 177 } 178 179 if (((lkp->lk_flags | extflags) & LK_NOPAUSE) == 0) { 180 error = apause(lkp, wanted); 181 if (error == 0) 182 return 0; 183 } 184 185 s = splhigh(); 186 while ((lkp->lk_flags & wanted) != 0) { 187 lkp->lk_flags |= LK_WAIT_NONZERO; 188 lkp->lk_waitcount++; 189 mtx_exit(lkp->lk_interlock, MTX_DEF); 190 error = tsleep(lkp, lkp->lk_prio, lkp->lk_wmesg, lkp->lk_timo); 191 mtx_enter(lkp->lk_interlock, MTX_DEF); 192 if (lkp->lk_waitcount == 1) { 193 lkp->lk_flags &= ~LK_WAIT_NONZERO; 194 lkp->lk_waitcount = 0; 195 } else { 196 lkp->lk_waitcount--; 197 } 198 if (error) { 199 splx(s); 200 return error; 201 } 202 if (extflags & LK_SLEEPFAIL) { 203 splx(s); 204 return ENOLCK; 205 } 206 } 207 splx(s); 208 return 0; 209 } 210 211 /* 212 * Set, change, or release a lock. 213 * 214 * Shared requests increment the shared count. Exclusive requests set the 215 * LK_WANT_EXCL flag (preventing further shared locks), and wait for already 216 * accepted shared locks and shared-to-exclusive upgrades to go away. 217 */ 218 int 219 #ifndef DEBUG_LOCKS 220 lockmgr(lkp, flags, interlkp, p) 221 #else 222 debuglockmgr(lkp, flags, interlkp, p, name, file, line) 223 #endif 224 struct lock *lkp; 225 u_int flags; 226 struct mtx *interlkp; 227 struct proc *p; 228 #ifdef DEBUG_LOCKS 229 const char *name; /* Name of lock function */ 230 const char *file; /* Name of file call is from */ 231 int line; /* Line number in file */ 232 #endif 233 { 234 int error; 235 pid_t pid; 236 int extflags; 237 238 CTR5(KTR_LOCKMGR, 239 "lockmgr(): lkp == %p (lk_wmesg == \"%s\"), flags == 0x%x, " 240 "interlkp == %p, p == %p", lkp, lkp->lk_wmesg, flags, interlkp, p); 241 242 error = 0; 243 if (p == NULL) 244 pid = LK_KERNPROC; 245 else 246 pid = p->p_pid; 247 248 mtx_enter(lkp->lk_interlock, MTX_DEF); 249 if (flags & LK_INTERLOCK) 250 mtx_exit(interlkp, MTX_DEF); 251 252 extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK; 253 254 switch (flags & LK_TYPE_MASK) { 255 256 case LK_SHARED: 257 /* 258 * If we are not the exclusive lock holder, we have to block 259 * while there is an exclusive lock holder or while an 260 * exclusive lock request or upgrade request is in progress. 261 * 262 * However, if P_DEADLKTREAT is set, we override exclusive 263 * lock requests or upgrade requests ( but not the exclusive 264 * lock itself ). 265 */ 266 if (lkp->lk_lockholder != pid) { 267 if (p && (p->p_flag & P_DEADLKTREAT)) { 268 error = acquire( 269 lkp, 270 extflags, 271 LK_HAVE_EXCL 272 ); 273 } else { 274 error = acquire( 275 lkp, 276 extflags, 277 LK_HAVE_EXCL | LK_WANT_EXCL | 278 LK_WANT_UPGRADE 279 ); 280 } 281 if (error) 282 break; 283 sharelock(lkp, 1); 284 COUNT(p, 1); 285 break; 286 } 287 /* 288 * We hold an exclusive lock, so downgrade it to shared. 289 * An alternative would be to fail with EDEADLK. 290 */ 291 sharelock(lkp, 1); 292 COUNT(p, 1); 293 /* fall into downgrade */ 294 295 case LK_DOWNGRADE: 296 if (lkp->lk_lockholder != pid || lkp->lk_exclusivecount == 0) 297 panic("lockmgr: not holding exclusive lock"); 298 sharelock(lkp, lkp->lk_exclusivecount); 299 lkp->lk_exclusivecount = 0; 300 lkp->lk_flags &= ~LK_HAVE_EXCL; 301 lkp->lk_lockholder = LK_NOPROC; 302 if (lkp->lk_waitcount) 303 wakeup((void *)lkp); 304 break; 305 306 case LK_EXCLUPGRADE: 307 /* 308 * If another process is ahead of us to get an upgrade, 309 * then we want to fail rather than have an intervening 310 * exclusive access. 311 */ 312 if (lkp->lk_flags & LK_WANT_UPGRADE) { 313 shareunlock(lkp, 1); 314 COUNT(p, -1); 315 error = EBUSY; 316 break; 317 } 318 /* fall into normal upgrade */ 319 320 case LK_UPGRADE: 321 /* 322 * Upgrade a shared lock to an exclusive one. If another 323 * shared lock has already requested an upgrade to an 324 * exclusive lock, our shared lock is released and an 325 * exclusive lock is requested (which will be granted 326 * after the upgrade). If we return an error, the file 327 * will always be unlocked. 328 */ 329 if ((lkp->lk_lockholder == pid) || (lkp->lk_sharecount <= 0)) 330 panic("lockmgr: upgrade exclusive lock"); 331 shareunlock(lkp, 1); 332 COUNT(p, -1); 333 /* 334 * If we are just polling, check to see if we will block. 335 */ 336 if ((extflags & LK_NOWAIT) && 337 ((lkp->lk_flags & LK_WANT_UPGRADE) || 338 lkp->lk_sharecount > 1)) { 339 error = EBUSY; 340 break; 341 } 342 if ((lkp->lk_flags & LK_WANT_UPGRADE) == 0) { 343 /* 344 * We are first shared lock to request an upgrade, so 345 * request upgrade and wait for the shared count to 346 * drop to zero, then take exclusive lock. 347 */ 348 lkp->lk_flags |= LK_WANT_UPGRADE; 349 error = acquire(lkp, extflags, LK_SHARE_NONZERO); 350 lkp->lk_flags &= ~LK_WANT_UPGRADE; 351 352 if (error) 353 break; 354 lkp->lk_flags |= LK_HAVE_EXCL; 355 lkp->lk_lockholder = pid; 356 if (lkp->lk_exclusivecount != 0) 357 panic("lockmgr: non-zero exclusive count"); 358 lkp->lk_exclusivecount = 1; 359 #if defined(DEBUG_LOCKS) 360 lkp->lk_filename = file; 361 lkp->lk_lineno = line; 362 lkp->lk_lockername = name; 363 #endif 364 COUNT(p, 1); 365 break; 366 } 367 /* 368 * Someone else has requested upgrade. Release our shared 369 * lock, awaken upgrade requestor if we are the last shared 370 * lock, then request an exclusive lock. 371 */ 372 if ( (lkp->lk_flags & (LK_SHARE_NONZERO|LK_WAIT_NONZERO)) == 373 LK_WAIT_NONZERO) 374 wakeup((void *)lkp); 375 /* fall into exclusive request */ 376 377 case LK_EXCLUSIVE: 378 if (lkp->lk_lockholder == pid && pid != LK_KERNPROC) { 379 /* 380 * Recursive lock. 381 */ 382 if ((extflags & (LK_NOWAIT | LK_CANRECURSE)) == 0) 383 panic("lockmgr: locking against myself"); 384 if ((extflags & LK_CANRECURSE) != 0) { 385 lkp->lk_exclusivecount++; 386 COUNT(p, 1); 387 break; 388 } 389 } 390 /* 391 * If we are just polling, check to see if we will sleep. 392 */ 393 if ((extflags & LK_NOWAIT) && 394 (lkp->lk_flags & (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | LK_SHARE_NONZERO))) { 395 error = EBUSY; 396 break; 397 } 398 /* 399 * Try to acquire the want_exclusive flag. 400 */ 401 error = acquire(lkp, extflags, (LK_HAVE_EXCL | LK_WANT_EXCL)); 402 if (error) 403 break; 404 lkp->lk_flags |= LK_WANT_EXCL; 405 /* 406 * Wait for shared locks and upgrades to finish. 407 */ 408 error = acquire(lkp, extflags, LK_WANT_UPGRADE | LK_SHARE_NONZERO); 409 lkp->lk_flags &= ~LK_WANT_EXCL; 410 if (error) 411 break; 412 lkp->lk_flags |= LK_HAVE_EXCL; 413 lkp->lk_lockholder = pid; 414 if (lkp->lk_exclusivecount != 0) 415 panic("lockmgr: non-zero exclusive count"); 416 lkp->lk_exclusivecount = 1; 417 #if defined(DEBUG_LOCKS) 418 lkp->lk_filename = file; 419 lkp->lk_lineno = line; 420 lkp->lk_lockername = name; 421 #endif 422 COUNT(p, 1); 423 break; 424 425 case LK_RELEASE: 426 if (lkp->lk_exclusivecount != 0) { 427 if (lkp->lk_lockholder != pid && 428 lkp->lk_lockholder != LK_KERNPROC) { 429 panic("lockmgr: pid %d, not %s %d unlocking", 430 pid, "exclusive lock holder", 431 lkp->lk_lockholder); 432 } 433 if (lkp->lk_lockholder != LK_KERNPROC) { 434 COUNT(p, -1); 435 } 436 if (lkp->lk_exclusivecount == 1) { 437 lkp->lk_flags &= ~LK_HAVE_EXCL; 438 lkp->lk_lockholder = LK_NOPROC; 439 lkp->lk_exclusivecount = 0; 440 } else { 441 lkp->lk_exclusivecount--; 442 } 443 } else if (lkp->lk_flags & LK_SHARE_NONZERO) { 444 shareunlock(lkp, 1); 445 COUNT(p, -1); 446 } 447 if (lkp->lk_flags & LK_WAIT_NONZERO) 448 wakeup((void *)lkp); 449 break; 450 451 case LK_DRAIN: 452 /* 453 * Check that we do not already hold the lock, as it can 454 * never drain if we do. Unfortunately, we have no way to 455 * check for holding a shared lock, but at least we can 456 * check for an exclusive one. 457 */ 458 if (lkp->lk_lockholder == pid) 459 panic("lockmgr: draining against myself"); 460 461 error = acquiredrain(lkp, extflags); 462 if (error) 463 break; 464 lkp->lk_flags |= LK_DRAINING | LK_HAVE_EXCL; 465 lkp->lk_lockholder = pid; 466 lkp->lk_exclusivecount = 1; 467 #if defined(DEBUG_LOCKS) 468 lkp->lk_filename = file; 469 lkp->lk_lineno = line; 470 lkp->lk_lockername = name; 471 #endif 472 COUNT(p, 1); 473 break; 474 475 default: 476 mtx_exit(lkp->lk_interlock, MTX_DEF); 477 panic("lockmgr: unknown locktype request %d", 478 flags & LK_TYPE_MASK); 479 /* NOTREACHED */ 480 } 481 if ((lkp->lk_flags & LK_WAITDRAIN) && 482 (lkp->lk_flags & (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | 483 LK_SHARE_NONZERO | LK_WAIT_NONZERO)) == 0) { 484 lkp->lk_flags &= ~LK_WAITDRAIN; 485 wakeup((void *)&lkp->lk_flags); 486 } 487 mtx_exit(lkp->lk_interlock, MTX_DEF); 488 return (error); 489 } 490 491 static int 492 acquiredrain(struct lock *lkp, int extflags) { 493 int error; 494 495 if ((extflags & LK_NOWAIT) && (lkp->lk_flags & LK_ALL)) { 496 return EBUSY; 497 } 498 499 error = apause(lkp, LK_ALL); 500 if (error == 0) 501 return 0; 502 503 while (lkp->lk_flags & LK_ALL) { 504 lkp->lk_flags |= LK_WAITDRAIN; 505 mtx_exit(lkp->lk_interlock, MTX_DEF); 506 error = tsleep(&lkp->lk_flags, lkp->lk_prio, 507 lkp->lk_wmesg, lkp->lk_timo); 508 mtx_enter(lkp->lk_interlock, MTX_DEF); 509 if (error) 510 return error; 511 if (extflags & LK_SLEEPFAIL) { 512 return ENOLCK; 513 } 514 } 515 return 0; 516 } 517 518 /* 519 * Initialize a lock; required before use. 520 */ 521 void 522 lockinit(lkp, prio, wmesg, timo, flags) 523 struct lock *lkp; 524 int prio; 525 char *wmesg; 526 int timo; 527 int flags; 528 { 529 CTR5(KTR_LOCKMGR, "lockinit(): lkp == %p, prio == %d, wmesg == \"%s\", " 530 "timo == %d, flags = 0x%x\n", lkp, prio, wmesg, timo, flags); 531 532 if (lock_mtx_array != NULL) { 533 mtx_enter(&lock_mtx, MTX_DEF); 534 lkp->lk_interlock = &lock_mtx_array[lock_mtx_selector]; 535 lock_mtx_selector++; 536 if (lock_mtx_selector == lock_nmtx) 537 lock_mtx_selector = 0; 538 mtx_exit(&lock_mtx, MTX_DEF); 539 } else { 540 /* 541 * Giving lockmgr locks that are initialized during boot a 542 * pointer to the internal lockmgr mutex is safe, since the 543 * lockmgr code itself doesn't call lockinit() (which could 544 * cause mutex recursion). 545 */ 546 if (lock_mtx_selector == 0) { 547 /* 548 * This case only happens during kernel bootstrapping, 549 * so there's no reason to protect modification of 550 * lock_mtx_selector or lock_mtx. 551 */ 552 mtx_init(&lock_mtx, "lockmgr", MTX_DEF | MTX_COLD); 553 lock_mtx_selector = 1; 554 } 555 lkp->lk_interlock = &lock_mtx; 556 } 557 lkp->lk_flags = (flags & LK_EXTFLG_MASK); 558 lkp->lk_sharecount = 0; 559 lkp->lk_waitcount = 0; 560 lkp->lk_exclusivecount = 0; 561 lkp->lk_prio = prio; 562 lkp->lk_wmesg = wmesg; 563 lkp->lk_timo = timo; 564 lkp->lk_lockholder = LK_NOPROC; 565 } 566 567 /* 568 * Destroy a lock. 569 */ 570 void 571 lockdestroy(lkp) 572 struct lock *lkp; 573 { 574 CTR2(KTR_LOCKMGR, "lockdestroy(): lkp == %p (lk_wmesg == \"%s\")", 575 lkp, lkp->lk_wmesg); 576 } 577 578 /* 579 * Determine the status of a lock. 580 */ 581 int 582 lockstatus(lkp, p) 583 struct lock *lkp; 584 struct proc *p; 585 { 586 int lock_type = 0; 587 588 mtx_enter(lkp->lk_interlock, MTX_DEF); 589 if (lkp->lk_exclusivecount != 0) { 590 if (p == NULL || lkp->lk_lockholder == p->p_pid) 591 lock_type = LK_EXCLUSIVE; 592 else 593 lock_type = LK_EXCLOTHER; 594 } else if (lkp->lk_sharecount != 0) 595 lock_type = LK_SHARED; 596 mtx_exit(lkp->lk_interlock, MTX_DEF); 597 return (lock_type); 598 } 599 600 /* 601 * Determine the number of holders of a lock. 602 */ 603 int 604 lockcount(lkp) 605 struct lock *lkp; 606 { 607 int count; 608 609 mtx_enter(lkp->lk_interlock, MTX_DEF); 610 count = lkp->lk_exclusivecount + lkp->lk_sharecount; 611 mtx_exit(lkp->lk_interlock, MTX_DEF); 612 return (count); 613 } 614 615 /* 616 * Print out information about state of a lock. Used by VOP_PRINT 617 * routines to display status about contained locks. 618 */ 619 void 620 lockmgr_printinfo(lkp) 621 struct lock *lkp; 622 { 623 624 if (lkp->lk_sharecount) 625 printf(" lock type %s: SHARED (count %d)", lkp->lk_wmesg, 626 lkp->lk_sharecount); 627 else if (lkp->lk_flags & LK_HAVE_EXCL) 628 printf(" lock type %s: EXCL (count %d) by pid %d", 629 lkp->lk_wmesg, lkp->lk_exclusivecount, lkp->lk_lockholder); 630 if (lkp->lk_waitcount > 0) 631 printf(" with %d pending", lkp->lk_waitcount); 632 } 633 634 #if defined(SIMPLELOCK_DEBUG) && (MAXCPU == 1 || defined(COMPILING_LINT)) 635 #include <sys/kernel.h> 636 #include <sys/sysctl.h> 637 638 static int lockpausetime = 0; 639 SYSCTL_INT(_debug, OID_AUTO, lockpausetime, CTLFLAG_RW, &lockpausetime, 0, ""); 640 641 static int simplelockrecurse; 642 643 /* 644 * Simple lock functions so that the debugger can see from whence 645 * they are being called. 646 */ 647 void 648 simple_lock_init(alp) 649 struct simplelock *alp; 650 { 651 652 alp->lock_data = 0; 653 } 654 655 void 656 _simple_lock(alp, id, l) 657 struct simplelock *alp; 658 const char *id; 659 int l; 660 { 661 662 if (simplelockrecurse) 663 return; 664 if (alp->lock_data == 1) { 665 if (lockpausetime == -1) 666 panic("%s:%d: simple_lock: lock held", id, l); 667 printf("%s:%d: simple_lock: lock held\n", id, l); 668 if (lockpausetime == 1) { 669 Debugger("simple_lock"); 670 /*BACKTRACE(curproc); */ 671 } else if (lockpausetime > 1) { 672 printf("%s:%d: simple_lock: lock held...", id, l); 673 tsleep(&lockpausetime, PCATCH | PPAUSE, "slock", 674 lockpausetime * hz); 675 printf(" continuing\n"); 676 } 677 } 678 alp->lock_data = 1; 679 if (curproc) 680 curproc->p_simple_locks++; 681 } 682 683 int 684 _simple_lock_try(alp, id, l) 685 struct simplelock *alp; 686 const char *id; 687 int l; 688 { 689 690 if (alp->lock_data) 691 return (0); 692 if (simplelockrecurse) 693 return (1); 694 alp->lock_data = 1; 695 if (curproc) 696 curproc->p_simple_locks++; 697 return (1); 698 } 699 700 void 701 _simple_unlock(alp, id, l) 702 struct simplelock *alp; 703 const char *id; 704 int l; 705 { 706 707 if (simplelockrecurse) 708 return; 709 if (alp->lock_data == 0) { 710 if (lockpausetime == -1) 711 panic("%s:%d: simple_unlock: lock not held", id, l); 712 printf("%s:%d: simple_unlock: lock not held\n", id, l); 713 if (lockpausetime == 1) { 714 Debugger("simple_unlock"); 715 /* BACKTRACE(curproc); */ 716 } else if (lockpausetime > 1) { 717 printf("%s:%d: simple_unlock: lock not held...", id, l); 718 tsleep(&lockpausetime, PCATCH | PPAUSE, "sunlock", 719 lockpausetime * hz); 720 printf(" continuing\n"); 721 } 722 } 723 alp->lock_data = 0; 724 if (curproc) 725 curproc->p_simple_locks--; 726 } 727 #elif defined(SIMPLELOCK_DEBUG) 728 #error "SIMPLELOCK_DEBUG is not compatible with SMP!" 729 #endif /* SIMPLELOCK_DEBUG && MAXCPU == 1 */ 730