1 /* 2 * Copyright (c) 1982, 1986, 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * Scooter Morris at Genentech Inc. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * @(#)ufs_lockf.c 8.3 (Berkeley) 1/6/94 37 * $Id: kern_lockf.c,v 1.5 1995/12/14 08:31:26 phk Exp $ 38 */ 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/kernel.h> 43 #include <sys/proc.h> 44 #include <sys/unistd.h> 45 #include <sys/vnode.h> 46 #include <sys/malloc.h> 47 #include <sys/fcntl.h> 48 49 #include <sys/lockf.h> 50 51 /* 52 * This variable controls the maximum number of processes that will 53 * be checked in doing deadlock detection. 54 */ 55 int maxlockdepth = MAXDEPTH; 56 57 #ifdef LOCKF_DEBUG 58 int lockf_debug = 0; 59 #endif 60 61 #define NOLOCKF (struct lockf *)0 62 #define SELF 0x1 63 #define OTHERS 0x2 64 static void lf_addblock __P((struct lockf *, struct lockf *)); 65 static int lf_clearlock __P((struct lockf *)); 66 static int lf_findoverlap __P((struct lockf *, 67 struct lockf *, int, struct lockf ***, struct lockf **)); 68 static struct lockf * 69 lf_getblock __P((struct lockf *)); 70 static int lf_getlock __P((struct lockf *, struct flock *)); 71 static int lf_setlock __P((struct lockf *)); 72 static void lf_split __P((struct lockf *, struct lockf *)); 73 static void lf_wakelock __P((struct lockf *)); 74 75 /* 76 * Advisory record locking support 77 */ 78 int 79 lf_advlock(ap, head, size) 80 struct vop_advlock_args /* { 81 struct vnode *a_vp; 82 caddr_t a_id; 83 int a_op; 84 struct flock *a_fl; 85 int a_flags; 86 } */ *ap; 87 struct lockf **head; 88 u_quad_t size; 89 { 90 register struct flock *fl = ap->a_fl; 91 register struct lockf *lock; 92 off_t start, end; 93 int error; 94 95 /* 96 * Avoid the common case of unlocking when inode has no locks. 97 */ 98 if (*head == (struct lockf *)0) { 99 if (ap->a_op != F_SETLK) { 100 fl->l_type = F_UNLCK; 101 return (0); 102 } 103 } 104 /* 105 * Convert the flock structure into a start and end. 106 */ 107 switch (fl->l_whence) { 108 109 case SEEK_SET: 110 case SEEK_CUR: 111 /* 112 * Caller is responsible for adding any necessary offset 113 * when SEEK_CUR is used. 114 */ 115 start = fl->l_start; 116 break; 117 118 case SEEK_END: 119 start = size + fl->l_start; 120 break; 121 122 default: 123 return (EINVAL); 124 } 125 if (start < 0) 126 return (EINVAL); 127 if (fl->l_len == 0) 128 end = -1; 129 else 130 end = start + fl->l_len - 1; 131 /* 132 * Create the lockf structure 133 */ 134 MALLOC(lock, struct lockf *, sizeof *lock, M_LOCKF, M_WAITOK); 135 lock->lf_start = start; 136 lock->lf_end = end; 137 lock->lf_id = ap->a_id; 138 lock->lf_head = head; 139 lock->lf_type = fl->l_type; 140 lock->lf_next = (struct lockf *)0; 141 lock->lf_block = (struct lockf *)0; 142 lock->lf_flags = ap->a_flags; 143 /* 144 * Do the requested operation. 145 */ 146 switch(ap->a_op) { 147 case F_SETLK: 148 return (lf_setlock(lock)); 149 150 case F_UNLCK: 151 error = lf_clearlock(lock); 152 FREE(lock, M_LOCKF); 153 return (error); 154 155 case F_GETLK: 156 error = lf_getlock(lock, fl); 157 FREE(lock, M_LOCKF); 158 return (error); 159 160 default: 161 free(lock, M_LOCKF); 162 return (EINVAL); 163 } 164 /* NOTREACHED */ 165 } 166 167 /* 168 * Set a byte-range lock. 169 */ 170 static int 171 lf_setlock(lock) 172 register struct lockf *lock; 173 { 174 register struct lockf *block; 175 struct lockf **head = lock->lf_head; 176 struct lockf **prev, *overlap, *ltmp; 177 static char lockstr[] = "lockf"; 178 int ovcase, priority, needtolink, error; 179 180 #ifdef LOCKF_DEBUG 181 if (lockf_debug & 1) 182 lf_print("lf_setlock", lock); 183 #endif /* LOCKF_DEBUG */ 184 185 /* 186 * Set the priority 187 */ 188 priority = PLOCK; 189 if (lock->lf_type == F_WRLCK) 190 priority += 4; 191 priority |= PCATCH; 192 /* 193 * Scan lock list for this file looking for locks that would block us. 194 */ 195 while ((block = lf_getblock(lock))) { 196 /* 197 * Free the structure and return if nonblocking. 198 */ 199 if ((lock->lf_flags & F_WAIT) == 0) { 200 FREE(lock, M_LOCKF); 201 return (EAGAIN); 202 } 203 /* 204 * We are blocked. Since flock style locks cover 205 * the whole file, there is no chance for deadlock. 206 * For byte-range locks we must check for deadlock. 207 * 208 * Deadlock detection is done by looking through the 209 * wait channels to see if there are any cycles that 210 * involve us. MAXDEPTH is set just to make sure we 211 * do not go off into neverland. 212 */ 213 if ((lock->lf_flags & F_POSIX) && 214 (block->lf_flags & F_POSIX)) { 215 register struct proc *wproc; 216 register struct lockf *waitblock; 217 int i = 0; 218 219 /* The block is waiting on something */ 220 wproc = (struct proc *)block->lf_id; 221 while (wproc->p_wchan && 222 (wproc->p_wmesg == lockstr) && 223 (i++ < maxlockdepth)) { 224 waitblock = (struct lockf *)wproc->p_wchan; 225 /* Get the owner of the blocking lock */ 226 waitblock = waitblock->lf_next; 227 if ((waitblock->lf_flags & F_POSIX) == 0) 228 break; 229 wproc = (struct proc *)waitblock->lf_id; 230 if (wproc == (struct proc *)lock->lf_id) { 231 free(lock, M_LOCKF); 232 return (EDEADLK); 233 } 234 } 235 } 236 /* 237 * For flock type locks, we must first remove 238 * any shared locks that we hold before we sleep 239 * waiting for an exclusive lock. 240 */ 241 if ((lock->lf_flags & F_FLOCK) && 242 lock->lf_type == F_WRLCK) { 243 lock->lf_type = F_UNLCK; 244 (void) lf_clearlock(lock); 245 lock->lf_type = F_WRLCK; 246 } 247 /* 248 * Add our lock to the blocked list and sleep until we're free. 249 * Remember who blocked us (for deadlock detection). 250 */ 251 lock->lf_next = block; 252 lf_addblock(block, lock); 253 #ifdef LOCKF_DEBUG 254 if (lockf_debug & 1) { 255 lf_print("lf_setlock: blocking on", block); 256 lf_printlist("lf_setlock", block); 257 } 258 #endif /* LOCKF_DEBUG */ 259 if ((error = tsleep((caddr_t)lock, priority, lockstr, 0))) { 260 /* 261 * Delete ourselves from the waiting to lock list. 262 */ 263 for (block = lock->lf_next; 264 block != NOLOCKF; 265 block = block->lf_block) { 266 if (block->lf_block != lock) 267 continue; 268 block->lf_block = block->lf_block->lf_block; 269 break; 270 } 271 /* 272 * If we did not find ourselves on the list, but 273 * are still linked onto a lock list, then something 274 * is very wrong. 275 */ 276 if (block == NOLOCKF && lock->lf_next != NOLOCKF) 277 panic("lf_setlock: lost lock"); 278 free(lock, M_LOCKF); 279 return (error); 280 } 281 } 282 /* 283 * No blocks!! Add the lock. Note that we will 284 * downgrade or upgrade any overlapping locks this 285 * process already owns. 286 * 287 * Skip over locks owned by other processes. 288 * Handle any locks that overlap and are owned by ourselves. 289 */ 290 prev = head; 291 block = *head; 292 needtolink = 1; 293 for (;;) { 294 ovcase = lf_findoverlap(block, lock, SELF, &prev, &overlap); 295 if (ovcase) 296 block = overlap->lf_next; 297 /* 298 * Six cases: 299 * 0) no overlap 300 * 1) overlap == lock 301 * 2) overlap contains lock 302 * 3) lock contains overlap 303 * 4) overlap starts before lock 304 * 5) overlap ends after lock 305 */ 306 switch (ovcase) { 307 case 0: /* no overlap */ 308 if (needtolink) { 309 *prev = lock; 310 lock->lf_next = overlap; 311 } 312 break; 313 314 case 1: /* overlap == lock */ 315 /* 316 * If downgrading lock, others may be 317 * able to acquire it. 318 */ 319 if (lock->lf_type == F_RDLCK && 320 overlap->lf_type == F_WRLCK) 321 lf_wakelock(overlap); 322 overlap->lf_type = lock->lf_type; 323 FREE(lock, M_LOCKF); 324 lock = overlap; /* for debug output below */ 325 break; 326 327 case 2: /* overlap contains lock */ 328 /* 329 * Check for common starting point and different types. 330 */ 331 if (overlap->lf_type == lock->lf_type) { 332 free(lock, M_LOCKF); 333 lock = overlap; /* for debug output below */ 334 break; 335 } 336 if (overlap->lf_start == lock->lf_start) { 337 *prev = lock; 338 lock->lf_next = overlap; 339 overlap->lf_start = lock->lf_end + 1; 340 } else 341 lf_split(overlap, lock); 342 lf_wakelock(overlap); 343 break; 344 345 case 3: /* lock contains overlap */ 346 /* 347 * If downgrading lock, others may be able to 348 * acquire it, otherwise take the list. 349 */ 350 if (lock->lf_type == F_RDLCK && 351 overlap->lf_type == F_WRLCK) { 352 lf_wakelock(overlap); 353 } else { 354 ltmp = lock->lf_block; 355 lock->lf_block = overlap->lf_block; 356 lf_addblock(lock, ltmp); 357 } 358 /* 359 * Add the new lock if necessary and delete the overlap. 360 */ 361 if (needtolink) { 362 *prev = lock; 363 lock->lf_next = overlap->lf_next; 364 prev = &lock->lf_next; 365 needtolink = 0; 366 } else 367 *prev = overlap->lf_next; 368 free(overlap, M_LOCKF); 369 continue; 370 371 case 4: /* overlap starts before lock */ 372 /* 373 * Add lock after overlap on the list. 374 */ 375 lock->lf_next = overlap->lf_next; 376 overlap->lf_next = lock; 377 overlap->lf_end = lock->lf_start - 1; 378 prev = &lock->lf_next; 379 lf_wakelock(overlap); 380 needtolink = 0; 381 continue; 382 383 case 5: /* overlap ends after lock */ 384 /* 385 * Add the new lock before overlap. 386 */ 387 if (needtolink) { 388 *prev = lock; 389 lock->lf_next = overlap; 390 } 391 overlap->lf_start = lock->lf_end + 1; 392 lf_wakelock(overlap); 393 break; 394 } 395 break; 396 } 397 #ifdef LOCKF_DEBUG 398 if (lockf_debug & 1) { 399 lf_print("lf_setlock: got the lock", lock); 400 lf_printlist("lf_setlock", lock); 401 } 402 #endif /* LOCKF_DEBUG */ 403 return (0); 404 } 405 406 /* 407 * Remove a byte-range lock on an inode. 408 * 409 * Generally, find the lock (or an overlap to that lock) 410 * and remove it (or shrink it), then wakeup anyone we can. 411 */ 412 static int 413 lf_clearlock(unlock) 414 register struct lockf *unlock; 415 { 416 struct lockf **head = unlock->lf_head; 417 register struct lockf *lf = *head; 418 struct lockf *overlap, **prev; 419 int ovcase; 420 421 if (lf == NOLOCKF) 422 return (0); 423 #ifdef LOCKF_DEBUG 424 if (unlock->lf_type != F_UNLCK) 425 panic("lf_clearlock: bad type"); 426 if (lockf_debug & 1) 427 lf_print("lf_clearlock", unlock); 428 #endif /* LOCKF_DEBUG */ 429 prev = head; 430 while ((ovcase = lf_findoverlap(lf, unlock, SELF, &prev, &overlap))) { 431 /* 432 * Wakeup the list of locks to be retried. 433 */ 434 lf_wakelock(overlap); 435 436 switch (ovcase) { 437 438 case 1: /* overlap == lock */ 439 *prev = overlap->lf_next; 440 FREE(overlap, M_LOCKF); 441 break; 442 443 case 2: /* overlap contains lock: split it */ 444 if (overlap->lf_start == unlock->lf_start) { 445 overlap->lf_start = unlock->lf_end + 1; 446 break; 447 } 448 lf_split(overlap, unlock); 449 overlap->lf_next = unlock->lf_next; 450 break; 451 452 case 3: /* lock contains overlap */ 453 *prev = overlap->lf_next; 454 lf = overlap->lf_next; 455 free(overlap, M_LOCKF); 456 continue; 457 458 case 4: /* overlap starts before lock */ 459 overlap->lf_end = unlock->lf_start - 1; 460 prev = &overlap->lf_next; 461 lf = overlap->lf_next; 462 continue; 463 464 case 5: /* overlap ends after lock */ 465 overlap->lf_start = unlock->lf_end + 1; 466 break; 467 } 468 break; 469 } 470 #ifdef LOCKF_DEBUG 471 if (lockf_debug & 1) 472 lf_printlist("lf_clearlock", unlock); 473 #endif /* LOCKF_DEBUG */ 474 return (0); 475 } 476 477 /* 478 * Check whether there is a blocking lock, 479 * and if so return its process identifier. 480 */ 481 static int 482 lf_getlock(lock, fl) 483 register struct lockf *lock; 484 register struct flock *fl; 485 { 486 register struct lockf *block; 487 488 #ifdef LOCKF_DEBUG 489 if (lockf_debug & 1) 490 lf_print("lf_getlock", lock); 491 #endif /* LOCKF_DEBUG */ 492 493 if ((block = lf_getblock(lock))) { 494 fl->l_type = block->lf_type; 495 fl->l_whence = SEEK_SET; 496 fl->l_start = block->lf_start; 497 if (block->lf_end == -1) 498 fl->l_len = 0; 499 else 500 fl->l_len = block->lf_end - block->lf_start + 1; 501 if (block->lf_flags & F_POSIX) 502 fl->l_pid = ((struct proc *)(block->lf_id))->p_pid; 503 else 504 fl->l_pid = -1; 505 } else { 506 fl->l_type = F_UNLCK; 507 } 508 return (0); 509 } 510 511 /* 512 * Walk the list of locks for an inode and 513 * return the first blocking lock. 514 */ 515 static struct lockf * 516 lf_getblock(lock) 517 register struct lockf *lock; 518 { 519 struct lockf **prev, *overlap, *lf = *(lock->lf_head); 520 int ovcase; 521 522 prev = lock->lf_head; 523 while ((ovcase = lf_findoverlap(lf, lock, OTHERS, &prev, &overlap))) { 524 /* 525 * We've found an overlap, see if it blocks us 526 */ 527 if ((lock->lf_type == F_WRLCK || overlap->lf_type == F_WRLCK)) 528 return (overlap); 529 /* 530 * Nope, point to the next one on the list and 531 * see if it blocks us 532 */ 533 lf = overlap->lf_next; 534 } 535 return (NOLOCKF); 536 } 537 538 /* 539 * Walk the list of locks for an inode to 540 * find an overlapping lock (if any). 541 * 542 * NOTE: this returns only the FIRST overlapping lock. There 543 * may be more than one. 544 */ 545 static int 546 lf_findoverlap(lf, lock, type, prev, overlap) 547 register struct lockf *lf; 548 struct lockf *lock; 549 int type; 550 struct lockf ***prev; 551 struct lockf **overlap; 552 { 553 off_t start, end; 554 555 *overlap = lf; 556 if (lf == NOLOCKF) 557 return (0); 558 #ifdef LOCKF_DEBUG 559 if (lockf_debug & 2) 560 lf_print("lf_findoverlap: looking for overlap in", lock); 561 #endif /* LOCKF_DEBUG */ 562 start = lock->lf_start; 563 end = lock->lf_end; 564 while (lf != NOLOCKF) { 565 if (((type & SELF) && lf->lf_id != lock->lf_id) || 566 ((type & OTHERS) && lf->lf_id == lock->lf_id)) { 567 *prev = &lf->lf_next; 568 *overlap = lf = lf->lf_next; 569 continue; 570 } 571 #ifdef LOCKF_DEBUG 572 if (lockf_debug & 2) 573 lf_print("\tchecking", lf); 574 #endif /* LOCKF_DEBUG */ 575 /* 576 * OK, check for overlap 577 * 578 * Six cases: 579 * 0) no overlap 580 * 1) overlap == lock 581 * 2) overlap contains lock 582 * 3) lock contains overlap 583 * 4) overlap starts before lock 584 * 5) overlap ends after lock 585 */ 586 if ((lf->lf_end != -1 && start > lf->lf_end) || 587 (end != -1 && lf->lf_start > end)) { 588 /* Case 0 */ 589 #ifdef LOCKF_DEBUG 590 if (lockf_debug & 2) 591 printf("no overlap\n"); 592 #endif /* LOCKF_DEBUG */ 593 if ((type & SELF) && end != -1 && lf->lf_start > end) 594 return (0); 595 *prev = &lf->lf_next; 596 *overlap = lf = lf->lf_next; 597 continue; 598 } 599 if ((lf->lf_start == start) && (lf->lf_end == end)) { 600 /* Case 1 */ 601 #ifdef LOCKF_DEBUG 602 if (lockf_debug & 2) 603 printf("overlap == lock\n"); 604 #endif /* LOCKF_DEBUG */ 605 return (1); 606 } 607 if ((lf->lf_start <= start) && 608 (end != -1) && 609 ((lf->lf_end >= end) || (lf->lf_end == -1))) { 610 /* Case 2 */ 611 #ifdef LOCKF_DEBUG 612 if (lockf_debug & 2) 613 printf("overlap contains lock\n"); 614 #endif /* LOCKF_DEBUG */ 615 return (2); 616 } 617 if (start <= lf->lf_start && 618 (end == -1 || 619 (lf->lf_end != -1 && end >= lf->lf_end))) { 620 /* Case 3 */ 621 #ifdef LOCKF_DEBUG 622 if (lockf_debug & 2) 623 printf("lock contains overlap\n"); 624 #endif /* LOCKF_DEBUG */ 625 return (3); 626 } 627 if ((lf->lf_start < start) && 628 ((lf->lf_end >= start) || (lf->lf_end == -1))) { 629 /* Case 4 */ 630 #ifdef LOCKF_DEBUG 631 if (lockf_debug & 2) 632 printf("overlap starts before lock\n"); 633 #endif /* LOCKF_DEBUG */ 634 return (4); 635 } 636 if ((lf->lf_start > start) && 637 (end != -1) && 638 ((lf->lf_end > end) || (lf->lf_end == -1))) { 639 /* Case 5 */ 640 #ifdef LOCKF_DEBUG 641 if (lockf_debug & 2) 642 printf("overlap ends after lock\n"); 643 #endif /* LOCKF_DEBUG */ 644 return (5); 645 } 646 panic("lf_findoverlap: default"); 647 } 648 return (0); 649 } 650 651 /* 652 * Add a lock to the end of the blocked list. 653 */ 654 static void 655 lf_addblock(blocklist, lock) 656 struct lockf *blocklist; 657 struct lockf *lock; 658 { 659 register struct lockf *lf; 660 661 if (lock == NOLOCKF) 662 return; 663 #ifdef LOCKF_DEBUG 664 if (lockf_debug & 2) { 665 lf_print("addblock: adding", lock); 666 lf_print("to blocked list of", blocklist); 667 } 668 #endif /* LOCKF_DEBUG */ 669 if ((lf = blocklist->lf_block) == NOLOCKF) { 670 blocklist->lf_block = lock; 671 return; 672 } 673 while (lf->lf_block != NOLOCKF) 674 lf = lf->lf_block; 675 lf->lf_block = lock; 676 return; 677 } 678 679 /* 680 * Split a lock and a contained region into 681 * two or three locks as necessary. 682 */ 683 static void 684 lf_split(lock1, lock2) 685 register struct lockf *lock1; 686 register struct lockf *lock2; 687 { 688 register struct lockf *splitlock; 689 690 #ifdef LOCKF_DEBUG 691 if (lockf_debug & 2) { 692 lf_print("lf_split", lock1); 693 lf_print("splitting from", lock2); 694 } 695 #endif /* LOCKF_DEBUG */ 696 /* 697 * Check to see if spliting into only two pieces. 698 */ 699 if (lock1->lf_start == lock2->lf_start) { 700 lock1->lf_start = lock2->lf_end + 1; 701 lock2->lf_next = lock1; 702 return; 703 } 704 if (lock1->lf_end == lock2->lf_end) { 705 lock1->lf_end = lock2->lf_start - 1; 706 lock2->lf_next = lock1->lf_next; 707 lock1->lf_next = lock2; 708 return; 709 } 710 /* 711 * Make a new lock consisting of the last part of 712 * the encompassing lock 713 */ 714 MALLOC(splitlock, struct lockf *, sizeof *splitlock, M_LOCKF, M_WAITOK); 715 bcopy((caddr_t)lock1, (caddr_t)splitlock, sizeof *splitlock); 716 splitlock->lf_start = lock2->lf_end + 1; 717 splitlock->lf_block = NOLOCKF; 718 lock1->lf_end = lock2->lf_start - 1; 719 /* 720 * OK, now link it in 721 */ 722 splitlock->lf_next = lock1->lf_next; 723 lock2->lf_next = splitlock; 724 lock1->lf_next = lock2; 725 } 726 727 /* 728 * Wakeup a blocklist 729 */ 730 static void 731 lf_wakelock(listhead) 732 struct lockf *listhead; 733 { 734 register struct lockf *blocklist, *wakelock; 735 736 blocklist = listhead->lf_block; 737 listhead->lf_block = NOLOCKF; 738 while (blocklist != NOLOCKF) { 739 wakelock = blocklist; 740 blocklist = blocklist->lf_block; 741 wakelock->lf_block = NOLOCKF; 742 wakelock->lf_next = NOLOCKF; 743 #ifdef LOCKF_DEBUG 744 if (lockf_debug & 2) 745 lf_print("lf_wakelock: awakening", wakelock); 746 #endif /* LOCKF_DEBUG */ 747 wakeup((caddr_t)wakelock); 748 } 749 } 750 751 #ifdef LOCKF_DEBUG 752 /* 753 * Print out a lock. 754 */ 755 void 756 lf_print(tag, lock) 757 char *tag; 758 register struct lockf *lock; 759 { 760 761 printf("%s: lock 0x%lx for ", tag, lock); 762 if (lock->lf_flags & F_POSIX) 763 printf("proc %d", ((struct proc *)(lock->lf_id))->p_pid); 764 else 765 printf("id 0x%x", lock->lf_id); 766 printf(" in ino %d on dev <%d, %d>, %s, start %d, end %d", 767 lock->lf_inode->i_number, 768 major(lock->lf_inode->i_dev), 769 minor(lock->lf_inode->i_dev), 770 lock->lf_type == F_RDLCK ? "shared" : 771 lock->lf_type == F_WRLCK ? "exclusive" : 772 lock->lf_type == F_UNLCK ? "unlock" : 773 "unknown", lock->lf_start, lock->lf_end); 774 if (lock->lf_block) 775 printf(" block 0x%x\n", lock->lf_block); 776 else 777 printf("\n"); 778 } 779 780 void 781 lf_printlist(tag, lock) 782 char *tag; 783 struct lockf *lock; 784 { 785 register struct lockf *lf; 786 787 printf("%s: Lock list for ino %d on dev <%d, %d>:\n", 788 tag, lock->lf_inode->i_number, 789 major(lock->lf_inode->i_dev), 790 minor(lock->lf_inode->i_dev)); 791 for (lf = lock->lf_inode->i_lockf; lf; lf = lf->lf_next) { 792 printf("\tlock 0x%lx for ", lf); 793 if (lf->lf_flags & F_POSIX) 794 printf("proc %d", ((struct proc *)(lf->lf_id))->p_pid); 795 else 796 printf("id 0x%x", lf->lf_id); 797 printf(", %s, start %d, end %d", 798 lf->lf_type == F_RDLCK ? "shared" : 799 lf->lf_type == F_WRLCK ? "exclusive" : 800 lf->lf_type == F_UNLCK ? "unlock" : 801 "unknown", lf->lf_start, lf->lf_end); 802 if (lf->lf_block) 803 printf(" block 0x%x\n", lf->lf_block); 804 else 805 printf("\n"); 806 } 807 } 808 #endif /* LOCKF_DEBUG */ 809