1 /* 2 * Copyright (c) 1995 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Copyright (C) 1997 6 * John S. Dyson. All rights reserved. 7 * 8 * This code contains ideas from software contributed to Berkeley by 9 * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating 10 * System project at Carnegie-Mellon University. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. All advertising materials mentioning features or use of this software 21 * must display the following acknowledgement: 22 * This product includes software developed by the University of 23 * California, Berkeley and its contributors. 24 * 4. Neither the name of the University nor the names of its contributors 25 * may be used to endorse or promote products derived from this software 26 * without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 38 * SUCH DAMAGE. 39 * 40 * @(#)kern_lock.c 8.18 (Berkeley) 5/21/95 41 * $FreeBSD$ 42 */ 43 44 #include <sys/param.h> 45 #include <sys/proc.h> 46 #include <sys/kernel.h> 47 #include <sys/lock.h> 48 #include <sys/malloc.h> 49 #include <sys/mutex.h> 50 #include <sys/systm.h> 51 52 /* 53 * Locking primitives implementation. 54 * Locks provide shared/exclusive sychronization. 55 */ 56 57 #ifdef SIMPLELOCK_DEBUG 58 #define COUNT(p, x) if (p) (p)->p_locks += (x) 59 #else 60 #define COUNT(p, x) 61 #endif 62 63 #define LOCK_WAIT_TIME 100 64 #define LOCK_SAMPLE_WAIT 7 65 66 #if defined(DIAGNOSTIC) 67 #define LOCK_INLINE 68 #else 69 #define LOCK_INLINE __inline 70 #endif 71 72 #define LK_ALL (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | \ 73 LK_SHARE_NONZERO | LK_WAIT_NONZERO) 74 75 /* 76 * Mutex array variables. Rather than each lockmgr lock having its own mutex, 77 * share a fixed (at boot time) number of mutexes across all lockmgr locks in 78 * order to keep sizeof(struct lock) down. 79 */ 80 extern int lock_nmtx; 81 int lock_mtx_selector; 82 struct mtx *lock_mtx_array; 83 MUTEX_DECLARE(static, lock_mtx); 84 85 static int acquire(struct lock *lkp, int extflags, int wanted); 86 static int apause(struct lock *lkp, int flags); 87 static int acquiredrain(struct lock *lkp, int extflags) ; 88 89 static void 90 lockmgr_init(void *dummy __unused) 91 { 92 int i; 93 94 /* 95 * Initialize the lockmgr protection mutex if it hasn't already been 96 * done. Unless something changes about kernel startup order, VM 97 * initialization will always cause this mutex to already be 98 * initialized in a call to lockinit(). 99 */ 100 if (lock_mtx_selector == 0) 101 mtx_init(&lock_mtx, "lockmgr", MTX_DEF | MTX_COLD); 102 else { 103 /* 104 * This is necessary if (lock_nmtx == 1) and doesn't hurt 105 * otherwise. 106 */ 107 lock_mtx_selector = 0; 108 } 109 110 lock_mtx_array = (struct mtx *)malloc(sizeof(struct mtx) * lock_nmtx, 111 M_CACHE, M_WAITOK); 112 for (i = 0; i < lock_nmtx; i++) 113 mtx_init(&lock_mtx_array[i], "lockmgr interlock", MTX_DEF); 114 } 115 SYSINIT(lmgrinit, SI_SUB_LOCK, SI_ORDER_FIRST, lockmgr_init, NULL) 116 117 static LOCK_INLINE void 118 sharelock(struct lock *lkp, int incr) { 119 lkp->lk_flags |= LK_SHARE_NONZERO; 120 lkp->lk_sharecount += incr; 121 } 122 123 static LOCK_INLINE void 124 shareunlock(struct lock *lkp, int decr) { 125 126 KASSERT(lkp->lk_sharecount >= decr, ("shareunlock: count < decr")); 127 128 if (lkp->lk_sharecount == decr) { 129 lkp->lk_flags &= ~LK_SHARE_NONZERO; 130 if (lkp->lk_flags & (LK_WANT_UPGRADE | LK_WANT_EXCL)) { 131 wakeup(lkp); 132 } 133 lkp->lk_sharecount = 0; 134 } else { 135 lkp->lk_sharecount -= decr; 136 } 137 } 138 139 /* 140 * This is the waitloop optimization, and note for this to work 141 * simple_lock and simple_unlock should be subroutines to avoid 142 * optimization troubles. 143 */ 144 static int 145 apause(struct lock *lkp, int flags) 146 { 147 #ifdef SMP 148 int i, lock_wait; 149 #endif 150 151 if ((lkp->lk_flags & flags) == 0) 152 return 0; 153 #ifdef SMP 154 for (lock_wait = LOCK_WAIT_TIME; lock_wait > 0; lock_wait--) { 155 mtx_exit(lkp->lk_interlock, MTX_DEF); 156 for (i = LOCK_SAMPLE_WAIT; i > 0; i--) 157 if ((lkp->lk_flags & flags) == 0) 158 break; 159 mtx_enter(lkp->lk_interlock, MTX_DEF); 160 if ((lkp->lk_flags & flags) == 0) 161 return 0; 162 } 163 #endif 164 return 1; 165 } 166 167 static int 168 acquire(struct lock *lkp, int extflags, int wanted) { 169 int s, error; 170 171 CTR3(KTR_LOCKMGR, 172 "acquire(): lkp == %p, extflags == 0x%x, wanted == 0x%x\n", 173 lkp, extflags, wanted); 174 175 if ((extflags & LK_NOWAIT) && (lkp->lk_flags & wanted)) { 176 return EBUSY; 177 } 178 179 if (((lkp->lk_flags | extflags) & LK_NOPAUSE) == 0) { 180 error = apause(lkp, wanted); 181 if (error == 0) 182 return 0; 183 } 184 185 s = splhigh(); 186 while ((lkp->lk_flags & wanted) != 0) { 187 lkp->lk_flags |= LK_WAIT_NONZERO; 188 lkp->lk_waitcount++; 189 error = msleep(lkp, lkp->lk_interlock, lkp->lk_prio, 190 lkp->lk_wmesg, lkp->lk_timo); 191 if (lkp->lk_waitcount == 1) { 192 lkp->lk_flags &= ~LK_WAIT_NONZERO; 193 lkp->lk_waitcount = 0; 194 } else { 195 lkp->lk_waitcount--; 196 } 197 if (error) { 198 splx(s); 199 return error; 200 } 201 if (extflags & LK_SLEEPFAIL) { 202 splx(s); 203 return ENOLCK; 204 } 205 } 206 splx(s); 207 return 0; 208 } 209 210 /* 211 * Set, change, or release a lock. 212 * 213 * Shared requests increment the shared count. Exclusive requests set the 214 * LK_WANT_EXCL flag (preventing further shared locks), and wait for already 215 * accepted shared locks and shared-to-exclusive upgrades to go away. 216 */ 217 int 218 #ifndef DEBUG_LOCKS 219 lockmgr(lkp, flags, interlkp, p) 220 #else 221 debuglockmgr(lkp, flags, interlkp, p, name, file, line) 222 #endif 223 struct lock *lkp; 224 u_int flags; 225 struct mtx *interlkp; 226 struct proc *p; 227 #ifdef DEBUG_LOCKS 228 const char *name; /* Name of lock function */ 229 const char *file; /* Name of file call is from */ 230 int line; /* Line number in file */ 231 #endif 232 { 233 int error; 234 pid_t pid; 235 int extflags; 236 237 CTR5(KTR_LOCKMGR, 238 "lockmgr(): lkp == %p (lk_wmesg == \"%s\"), flags == 0x%x, " 239 "interlkp == %p, p == %p", lkp, lkp->lk_wmesg, flags, interlkp, p); 240 241 error = 0; 242 if (p == NULL) 243 pid = LK_KERNPROC; 244 else 245 pid = p->p_pid; 246 247 mtx_enter(lkp->lk_interlock, MTX_DEF); 248 if (flags & LK_INTERLOCK) 249 mtx_exit(interlkp, MTX_DEF); 250 251 extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK; 252 253 switch (flags & LK_TYPE_MASK) { 254 255 case LK_SHARED: 256 /* 257 * If we are not the exclusive lock holder, we have to block 258 * while there is an exclusive lock holder or while an 259 * exclusive lock request or upgrade request is in progress. 260 * 261 * However, if P_DEADLKTREAT is set, we override exclusive 262 * lock requests or upgrade requests ( but not the exclusive 263 * lock itself ). 264 */ 265 if (lkp->lk_lockholder != pid) { 266 if (p && (p->p_flag & P_DEADLKTREAT)) { 267 error = acquire( 268 lkp, 269 extflags, 270 LK_HAVE_EXCL 271 ); 272 } else { 273 error = acquire( 274 lkp, 275 extflags, 276 LK_HAVE_EXCL | LK_WANT_EXCL | 277 LK_WANT_UPGRADE 278 ); 279 } 280 if (error) 281 break; 282 sharelock(lkp, 1); 283 COUNT(p, 1); 284 break; 285 } 286 /* 287 * We hold an exclusive lock, so downgrade it to shared. 288 * An alternative would be to fail with EDEADLK. 289 */ 290 sharelock(lkp, 1); 291 COUNT(p, 1); 292 /* fall into downgrade */ 293 294 case LK_DOWNGRADE: 295 if (lkp->lk_lockholder != pid || lkp->lk_exclusivecount == 0) 296 panic("lockmgr: not holding exclusive lock"); 297 sharelock(lkp, lkp->lk_exclusivecount); 298 lkp->lk_exclusivecount = 0; 299 lkp->lk_flags &= ~LK_HAVE_EXCL; 300 lkp->lk_lockholder = LK_NOPROC; 301 if (lkp->lk_waitcount) 302 wakeup((void *)lkp); 303 break; 304 305 case LK_EXCLUPGRADE: 306 /* 307 * If another process is ahead of us to get an upgrade, 308 * then we want to fail rather than have an intervening 309 * exclusive access. 310 */ 311 if (lkp->lk_flags & LK_WANT_UPGRADE) { 312 shareunlock(lkp, 1); 313 COUNT(p, -1); 314 error = EBUSY; 315 break; 316 } 317 /* fall into normal upgrade */ 318 319 case LK_UPGRADE: 320 /* 321 * Upgrade a shared lock to an exclusive one. If another 322 * shared lock has already requested an upgrade to an 323 * exclusive lock, our shared lock is released and an 324 * exclusive lock is requested (which will be granted 325 * after the upgrade). If we return an error, the file 326 * will always be unlocked. 327 */ 328 if ((lkp->lk_lockholder == pid) || (lkp->lk_sharecount <= 0)) 329 panic("lockmgr: upgrade exclusive lock"); 330 shareunlock(lkp, 1); 331 COUNT(p, -1); 332 /* 333 * If we are just polling, check to see if we will block. 334 */ 335 if ((extflags & LK_NOWAIT) && 336 ((lkp->lk_flags & LK_WANT_UPGRADE) || 337 lkp->lk_sharecount > 1)) { 338 error = EBUSY; 339 break; 340 } 341 if ((lkp->lk_flags & LK_WANT_UPGRADE) == 0) { 342 /* 343 * We are first shared lock to request an upgrade, so 344 * request upgrade and wait for the shared count to 345 * drop to zero, then take exclusive lock. 346 */ 347 lkp->lk_flags |= LK_WANT_UPGRADE; 348 error = acquire(lkp, extflags, LK_SHARE_NONZERO); 349 lkp->lk_flags &= ~LK_WANT_UPGRADE; 350 351 if (error) 352 break; 353 lkp->lk_flags |= LK_HAVE_EXCL; 354 lkp->lk_lockholder = pid; 355 if (lkp->lk_exclusivecount != 0) 356 panic("lockmgr: non-zero exclusive count"); 357 lkp->lk_exclusivecount = 1; 358 #if defined(DEBUG_LOCKS) 359 lkp->lk_filename = file; 360 lkp->lk_lineno = line; 361 lkp->lk_lockername = name; 362 #endif 363 COUNT(p, 1); 364 break; 365 } 366 /* 367 * Someone else has requested upgrade. Release our shared 368 * lock, awaken upgrade requestor if we are the last shared 369 * lock, then request an exclusive lock. 370 */ 371 if ( (lkp->lk_flags & (LK_SHARE_NONZERO|LK_WAIT_NONZERO)) == 372 LK_WAIT_NONZERO) 373 wakeup((void *)lkp); 374 /* fall into exclusive request */ 375 376 case LK_EXCLUSIVE: 377 if (lkp->lk_lockholder == pid && pid != LK_KERNPROC) { 378 /* 379 * Recursive lock. 380 */ 381 if ((extflags & (LK_NOWAIT | LK_CANRECURSE)) == 0) 382 panic("lockmgr: locking against myself"); 383 if ((extflags & LK_CANRECURSE) != 0) { 384 lkp->lk_exclusivecount++; 385 COUNT(p, 1); 386 break; 387 } 388 } 389 /* 390 * If we are just polling, check to see if we will sleep. 391 */ 392 if ((extflags & LK_NOWAIT) && 393 (lkp->lk_flags & (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | LK_SHARE_NONZERO))) { 394 error = EBUSY; 395 break; 396 } 397 /* 398 * Try to acquire the want_exclusive flag. 399 */ 400 error = acquire(lkp, extflags, (LK_HAVE_EXCL | LK_WANT_EXCL)); 401 if (error) 402 break; 403 lkp->lk_flags |= LK_WANT_EXCL; 404 /* 405 * Wait for shared locks and upgrades to finish. 406 */ 407 error = acquire(lkp, extflags, LK_WANT_UPGRADE | LK_SHARE_NONZERO); 408 lkp->lk_flags &= ~LK_WANT_EXCL; 409 if (error) 410 break; 411 lkp->lk_flags |= LK_HAVE_EXCL; 412 lkp->lk_lockholder = pid; 413 if (lkp->lk_exclusivecount != 0) 414 panic("lockmgr: non-zero exclusive count"); 415 lkp->lk_exclusivecount = 1; 416 #if defined(DEBUG_LOCKS) 417 lkp->lk_filename = file; 418 lkp->lk_lineno = line; 419 lkp->lk_lockername = name; 420 #endif 421 COUNT(p, 1); 422 break; 423 424 case LK_RELEASE: 425 if (lkp->lk_exclusivecount != 0) { 426 if (lkp->lk_lockholder != pid && 427 lkp->lk_lockholder != LK_KERNPROC) { 428 panic("lockmgr: pid %d, not %s %d unlocking", 429 pid, "exclusive lock holder", 430 lkp->lk_lockholder); 431 } 432 if (lkp->lk_lockholder != LK_KERNPROC) { 433 COUNT(p, -1); 434 } 435 if (lkp->lk_exclusivecount == 1) { 436 lkp->lk_flags &= ~LK_HAVE_EXCL; 437 lkp->lk_lockholder = LK_NOPROC; 438 lkp->lk_exclusivecount = 0; 439 } else { 440 lkp->lk_exclusivecount--; 441 } 442 } else if (lkp->lk_flags & LK_SHARE_NONZERO) { 443 shareunlock(lkp, 1); 444 COUNT(p, -1); 445 } 446 if (lkp->lk_flags & LK_WAIT_NONZERO) 447 wakeup((void *)lkp); 448 break; 449 450 case LK_DRAIN: 451 /* 452 * Check that we do not already hold the lock, as it can 453 * never drain if we do. Unfortunately, we have no way to 454 * check for holding a shared lock, but at least we can 455 * check for an exclusive one. 456 */ 457 if (lkp->lk_lockholder == pid) 458 panic("lockmgr: draining against myself"); 459 460 error = acquiredrain(lkp, extflags); 461 if (error) 462 break; 463 lkp->lk_flags |= LK_DRAINING | LK_HAVE_EXCL; 464 lkp->lk_lockholder = pid; 465 lkp->lk_exclusivecount = 1; 466 #if defined(DEBUG_LOCKS) 467 lkp->lk_filename = file; 468 lkp->lk_lineno = line; 469 lkp->lk_lockername = name; 470 #endif 471 COUNT(p, 1); 472 break; 473 474 default: 475 mtx_exit(lkp->lk_interlock, MTX_DEF); 476 panic("lockmgr: unknown locktype request %d", 477 flags & LK_TYPE_MASK); 478 /* NOTREACHED */ 479 } 480 if ((lkp->lk_flags & LK_WAITDRAIN) && 481 (lkp->lk_flags & (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | 482 LK_SHARE_NONZERO | LK_WAIT_NONZERO)) == 0) { 483 lkp->lk_flags &= ~LK_WAITDRAIN; 484 wakeup((void *)&lkp->lk_flags); 485 } 486 mtx_exit(lkp->lk_interlock, MTX_DEF); 487 return (error); 488 } 489 490 static int 491 acquiredrain(struct lock *lkp, int extflags) { 492 int error; 493 494 if ((extflags & LK_NOWAIT) && (lkp->lk_flags & LK_ALL)) { 495 return EBUSY; 496 } 497 498 error = apause(lkp, LK_ALL); 499 if (error == 0) 500 return 0; 501 502 while (lkp->lk_flags & LK_ALL) { 503 lkp->lk_flags |= LK_WAITDRAIN; 504 error = msleep(&lkp->lk_flags, lkp->lk_interlock, lkp->lk_prio, 505 lkp->lk_wmesg, lkp->lk_timo); 506 if (error) 507 return error; 508 if (extflags & LK_SLEEPFAIL) { 509 return ENOLCK; 510 } 511 } 512 return 0; 513 } 514 515 /* 516 * Initialize a lock; required before use. 517 */ 518 void 519 lockinit(lkp, prio, wmesg, timo, flags) 520 struct lock *lkp; 521 int prio; 522 char *wmesg; 523 int timo; 524 int flags; 525 { 526 CTR5(KTR_LOCKMGR, "lockinit(): lkp == %p, prio == %d, wmesg == \"%s\", " 527 "timo == %d, flags = 0x%x\n", lkp, prio, wmesg, timo, flags); 528 529 if (lock_mtx_array != NULL) { 530 mtx_enter(&lock_mtx, MTX_DEF); 531 lkp->lk_interlock = &lock_mtx_array[lock_mtx_selector]; 532 lock_mtx_selector++; 533 if (lock_mtx_selector == lock_nmtx) 534 lock_mtx_selector = 0; 535 mtx_exit(&lock_mtx, MTX_DEF); 536 } else { 537 /* 538 * Giving lockmgr locks that are initialized during boot a 539 * pointer to the internal lockmgr mutex is safe, since the 540 * lockmgr code itself doesn't call lockinit() (which could 541 * cause mutex recursion). 542 */ 543 if (lock_mtx_selector == 0) { 544 /* 545 * This case only happens during kernel bootstrapping, 546 * so there's no reason to protect modification of 547 * lock_mtx_selector or lock_mtx. 548 */ 549 mtx_init(&lock_mtx, "lockmgr", MTX_DEF | MTX_COLD); 550 lock_mtx_selector = 1; 551 } 552 lkp->lk_interlock = &lock_mtx; 553 } 554 lkp->lk_flags = (flags & LK_EXTFLG_MASK); 555 lkp->lk_sharecount = 0; 556 lkp->lk_waitcount = 0; 557 lkp->lk_exclusivecount = 0; 558 lkp->lk_prio = prio; 559 lkp->lk_wmesg = wmesg; 560 lkp->lk_timo = timo; 561 lkp->lk_lockholder = LK_NOPROC; 562 } 563 564 /* 565 * Destroy a lock. 566 */ 567 void 568 lockdestroy(lkp) 569 struct lock *lkp; 570 { 571 CTR2(KTR_LOCKMGR, "lockdestroy(): lkp == %p (lk_wmesg == \"%s\")", 572 lkp, lkp->lk_wmesg); 573 } 574 575 /* 576 * Determine the status of a lock. 577 */ 578 int 579 lockstatus(lkp, p) 580 struct lock *lkp; 581 struct proc *p; 582 { 583 int lock_type = 0; 584 585 mtx_enter(lkp->lk_interlock, MTX_DEF); 586 if (lkp->lk_exclusivecount != 0) { 587 if (p == NULL || lkp->lk_lockholder == p->p_pid) 588 lock_type = LK_EXCLUSIVE; 589 else 590 lock_type = LK_EXCLOTHER; 591 } else if (lkp->lk_sharecount != 0) 592 lock_type = LK_SHARED; 593 mtx_exit(lkp->lk_interlock, MTX_DEF); 594 return (lock_type); 595 } 596 597 /* 598 * Determine the number of holders of a lock. 599 */ 600 int 601 lockcount(lkp) 602 struct lock *lkp; 603 { 604 int count; 605 606 mtx_enter(lkp->lk_interlock, MTX_DEF); 607 count = lkp->lk_exclusivecount + lkp->lk_sharecount; 608 mtx_exit(lkp->lk_interlock, MTX_DEF); 609 return (count); 610 } 611 612 /* 613 * Print out information about state of a lock. Used by VOP_PRINT 614 * routines to display status about contained locks. 615 */ 616 void 617 lockmgr_printinfo(lkp) 618 struct lock *lkp; 619 { 620 621 if (lkp->lk_sharecount) 622 printf(" lock type %s: SHARED (count %d)", lkp->lk_wmesg, 623 lkp->lk_sharecount); 624 else if (lkp->lk_flags & LK_HAVE_EXCL) 625 printf(" lock type %s: EXCL (count %d) by pid %d", 626 lkp->lk_wmesg, lkp->lk_exclusivecount, lkp->lk_lockholder); 627 if (lkp->lk_waitcount > 0) 628 printf(" with %d pending", lkp->lk_waitcount); 629 } 630 631 #if defined(SIMPLELOCK_DEBUG) && (MAXCPU == 1 || defined(COMPILING_LINT)) 632 #include <sys/kernel.h> 633 #include <sys/sysctl.h> 634 635 static int lockpausetime = 0; 636 SYSCTL_INT(_debug, OID_AUTO, lockpausetime, CTLFLAG_RW, &lockpausetime, 0, ""); 637 638 static int simplelockrecurse; 639 640 /* 641 * Simple lock functions so that the debugger can see from whence 642 * they are being called. 643 */ 644 void 645 simple_lock_init(alp) 646 struct simplelock *alp; 647 { 648 649 alp->lock_data = 0; 650 } 651 652 void 653 _simple_lock(alp, id, l) 654 struct simplelock *alp; 655 const char *id; 656 int l; 657 { 658 659 if (simplelockrecurse) 660 return; 661 if (alp->lock_data == 1) { 662 if (lockpausetime == -1) 663 panic("%s:%d: simple_lock: lock held", id, l); 664 printf("%s:%d: simple_lock: lock held\n", id, l); 665 if (lockpausetime == 1) { 666 Debugger("simple_lock"); 667 /*BACKTRACE(curproc); */ 668 } else if (lockpausetime > 1) { 669 printf("%s:%d: simple_lock: lock held...", id, l); 670 tsleep(&lockpausetime, PCATCH | PPAUSE, "slock", 671 lockpausetime * hz); 672 printf(" continuing\n"); 673 } 674 } 675 alp->lock_data = 1; 676 if (curproc) 677 curproc->p_simple_locks++; 678 } 679 680 int 681 _simple_lock_try(alp, id, l) 682 struct simplelock *alp; 683 const char *id; 684 int l; 685 { 686 687 if (alp->lock_data) 688 return (0); 689 if (simplelockrecurse) 690 return (1); 691 alp->lock_data = 1; 692 if (curproc) 693 curproc->p_simple_locks++; 694 return (1); 695 } 696 697 void 698 _simple_unlock(alp, id, l) 699 struct simplelock *alp; 700 const char *id; 701 int l; 702 { 703 704 if (simplelockrecurse) 705 return; 706 if (alp->lock_data == 0) { 707 if (lockpausetime == -1) 708 panic("%s:%d: simple_unlock: lock not held", id, l); 709 printf("%s:%d: simple_unlock: lock not held\n", id, l); 710 if (lockpausetime == 1) { 711 Debugger("simple_unlock"); 712 /* BACKTRACE(curproc); */ 713 } else if (lockpausetime > 1) { 714 printf("%s:%d: simple_unlock: lock not held...", id, l); 715 tsleep(&lockpausetime, PCATCH | PPAUSE, "sunlock", 716 lockpausetime * hz); 717 printf(" continuing\n"); 718 } 719 } 720 alp->lock_data = 0; 721 if (curproc) 722 curproc->p_simple_locks--; 723 } 724 #elif defined(SIMPLELOCK_DEBUG) 725 #error "SIMPLELOCK_DEBUG is not compatible with SMP!" 726 #endif /* SIMPLELOCK_DEBUG && MAXCPU == 1 */ 727