1 /* 2 * Copyright (c) 1982, 1986, 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * Scooter Morris at Genentech Inc. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * @(#)ufs_lockf.c 8.3 (Berkeley) 1/6/94 37 * $Id: kern_lockf.c,v 1.7 1996/12/19 13:22:30 bde Exp $ 38 */ 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/kernel.h> 43 #include <sys/proc.h> 44 #include <sys/unistd.h> 45 #include <sys/vnode.h> 46 #include <sys/malloc.h> 47 #include <sys/fcntl.h> 48 49 #include <sys/lockf.h> 50 51 /* 52 * This variable controls the maximum number of processes that will 53 * be checked in doing deadlock detection. 54 */ 55 int maxlockdepth = MAXDEPTH; 56 57 #ifdef LOCKF_DEBUG 58 int lockf_debug = 0; 59 #endif 60 61 #define NOLOCKF (struct lockf *)0 62 #define SELF 0x1 63 #define OTHERS 0x2 64 static void lf_addblock __P((struct lockf *, struct lockf *)); 65 static int lf_clearlock __P((struct lockf *)); 66 static int lf_findoverlap __P((struct lockf *, 67 struct lockf *, int, struct lockf ***, struct lockf **)); 68 static struct lockf * 69 lf_getblock __P((struct lockf *)); 70 static int lf_getlock __P((struct lockf *, struct flock *)); 71 static int lf_setlock __P((struct lockf *)); 72 static void lf_split __P((struct lockf *, struct lockf *)); 73 static void lf_wakelock __P((struct lockf *)); 74 75 /* 76 * Advisory record locking support 77 */ 78 int 79 lf_advlock(ap, head, size) 80 struct vop_advlock_args /* { 81 struct vnode *a_vp; 82 caddr_t a_id; 83 int a_op; 84 struct flock *a_fl; 85 int a_flags; 86 } */ *ap; 87 struct lockf **head; 88 u_quad_t size; 89 { 90 register struct flock *fl = ap->a_fl; 91 register struct lockf *lock; 92 off_t start, end; 93 int error; 94 95 /* 96 * Convert the flock structure into a start and end. 97 */ 98 switch (fl->l_whence) { 99 100 case SEEK_SET: 101 case SEEK_CUR: 102 /* 103 * Caller is responsible for adding any necessary offset 104 * when SEEK_CUR is used. 105 */ 106 start = fl->l_start; 107 break; 108 109 case SEEK_END: 110 start = size + fl->l_start; 111 break; 112 113 default: 114 return (EINVAL); 115 } 116 if (start < 0) 117 return (EINVAL); 118 if (fl->l_len == 0) 119 end = -1; 120 else { 121 end = start + fl->l_len - 1; 122 if (end < start) 123 return (EINVAL); 124 } 125 /* 126 * Avoid the common case of unlocking when inode has no locks. 127 */ 128 if (*head == (struct lockf *)0) { 129 if (ap->a_op != F_SETLK) { 130 fl->l_type = F_UNLCK; 131 return (0); 132 } 133 } 134 /* 135 * Create the lockf structure 136 */ 137 MALLOC(lock, struct lockf *, sizeof *lock, M_LOCKF, M_WAITOK); 138 lock->lf_start = start; 139 lock->lf_end = end; 140 lock->lf_id = ap->a_id; 141 lock->lf_head = head; 142 lock->lf_type = fl->l_type; 143 lock->lf_next = (struct lockf *)0; 144 lock->lf_block = (struct lockf *)0; 145 lock->lf_flags = ap->a_flags; 146 /* 147 * Do the requested operation. 148 */ 149 switch(ap->a_op) { 150 case F_SETLK: 151 return (lf_setlock(lock)); 152 153 case F_UNLCK: 154 error = lf_clearlock(lock); 155 FREE(lock, M_LOCKF); 156 return (error); 157 158 case F_GETLK: 159 error = lf_getlock(lock, fl); 160 FREE(lock, M_LOCKF); 161 return (error); 162 163 default: 164 free(lock, M_LOCKF); 165 return (EINVAL); 166 } 167 /* NOTREACHED */ 168 } 169 170 /* 171 * Set a byte-range lock. 172 */ 173 static int 174 lf_setlock(lock) 175 register struct lockf *lock; 176 { 177 register struct lockf *block; 178 struct lockf **head = lock->lf_head; 179 struct lockf **prev, *overlap, *ltmp; 180 static char lockstr[] = "lockf"; 181 int ovcase, priority, needtolink, error; 182 183 #ifdef LOCKF_DEBUG 184 if (lockf_debug & 1) 185 lf_print("lf_setlock", lock); 186 #endif /* LOCKF_DEBUG */ 187 188 /* 189 * Set the priority 190 */ 191 priority = PLOCK; 192 if (lock->lf_type == F_WRLCK) 193 priority += 4; 194 priority |= PCATCH; 195 /* 196 * Scan lock list for this file looking for locks that would block us. 197 */ 198 while ((block = lf_getblock(lock))) { 199 /* 200 * Free the structure and return if nonblocking. 201 */ 202 if ((lock->lf_flags & F_WAIT) == 0) { 203 FREE(lock, M_LOCKF); 204 return (EAGAIN); 205 } 206 /* 207 * We are blocked. Since flock style locks cover 208 * the whole file, there is no chance for deadlock. 209 * For byte-range locks we must check for deadlock. 210 * 211 * Deadlock detection is done by looking through the 212 * wait channels to see if there are any cycles that 213 * involve us. MAXDEPTH is set just to make sure we 214 * do not go off into neverland. 215 */ 216 if ((lock->lf_flags & F_POSIX) && 217 (block->lf_flags & F_POSIX)) { 218 register struct proc *wproc; 219 register struct lockf *waitblock; 220 int i = 0; 221 222 /* The block is waiting on something */ 223 wproc = (struct proc *)block->lf_id; 224 while (wproc->p_wchan && 225 (wproc->p_wmesg == lockstr) && 226 (i++ < maxlockdepth)) { 227 waitblock = (struct lockf *)wproc->p_wchan; 228 /* Get the owner of the blocking lock */ 229 waitblock = waitblock->lf_next; 230 if ((waitblock->lf_flags & F_POSIX) == 0) 231 break; 232 wproc = (struct proc *)waitblock->lf_id; 233 if (wproc == (struct proc *)lock->lf_id) { 234 free(lock, M_LOCKF); 235 return (EDEADLK); 236 } 237 } 238 } 239 /* 240 * For flock type locks, we must first remove 241 * any shared locks that we hold before we sleep 242 * waiting for an exclusive lock. 243 */ 244 if ((lock->lf_flags & F_FLOCK) && 245 lock->lf_type == F_WRLCK) { 246 lock->lf_type = F_UNLCK; 247 (void) lf_clearlock(lock); 248 lock->lf_type = F_WRLCK; 249 } 250 /* 251 * Add our lock to the blocked list and sleep until we're free. 252 * Remember who blocked us (for deadlock detection). 253 */ 254 lock->lf_next = block; 255 lf_addblock(block, lock); 256 #ifdef LOCKF_DEBUG 257 if (lockf_debug & 1) { 258 lf_print("lf_setlock: blocking on", block); 259 lf_printlist("lf_setlock", block); 260 } 261 #endif /* LOCKF_DEBUG */ 262 if ((error = tsleep((caddr_t)lock, priority, lockstr, 0))) { 263 /* 264 * Delete ourselves from the waiting to lock list. 265 */ 266 for (block = lock->lf_next; 267 block != NOLOCKF; 268 block = block->lf_block) { 269 if (block->lf_block != lock) 270 continue; 271 block->lf_block = block->lf_block->lf_block; 272 break; 273 } 274 free(lock, M_LOCKF); 275 return (error); 276 } 277 } 278 /* 279 * No blocks!! Add the lock. Note that we will 280 * downgrade or upgrade any overlapping locks this 281 * process already owns. 282 * 283 * Skip over locks owned by other processes. 284 * Handle any locks that overlap and are owned by ourselves. 285 */ 286 prev = head; 287 block = *head; 288 needtolink = 1; 289 for (;;) { 290 ovcase = lf_findoverlap(block, lock, SELF, &prev, &overlap); 291 if (ovcase) 292 block = overlap->lf_next; 293 /* 294 * Six cases: 295 * 0) no overlap 296 * 1) overlap == lock 297 * 2) overlap contains lock 298 * 3) lock contains overlap 299 * 4) overlap starts before lock 300 * 5) overlap ends after lock 301 */ 302 switch (ovcase) { 303 case 0: /* no overlap */ 304 if (needtolink) { 305 *prev = lock; 306 lock->lf_next = overlap; 307 } 308 break; 309 310 case 1: /* overlap == lock */ 311 /* 312 * If downgrading lock, others may be 313 * able to acquire it. 314 */ 315 if (lock->lf_type == F_RDLCK && 316 overlap->lf_type == F_WRLCK) 317 lf_wakelock(overlap); 318 overlap->lf_type = lock->lf_type; 319 FREE(lock, M_LOCKF); 320 lock = overlap; /* for debug output below */ 321 break; 322 323 case 2: /* overlap contains lock */ 324 /* 325 * Check for common starting point and different types. 326 */ 327 if (overlap->lf_type == lock->lf_type) { 328 free(lock, M_LOCKF); 329 lock = overlap; /* for debug output below */ 330 break; 331 } 332 if (overlap->lf_start == lock->lf_start) { 333 *prev = lock; 334 lock->lf_next = overlap; 335 overlap->lf_start = lock->lf_end + 1; 336 } else 337 lf_split(overlap, lock); 338 lf_wakelock(overlap); 339 break; 340 341 case 3: /* lock contains overlap */ 342 /* 343 * If downgrading lock, others may be able to 344 * acquire it, otherwise take the list. 345 */ 346 if (lock->lf_type == F_RDLCK && 347 overlap->lf_type == F_WRLCK) { 348 lf_wakelock(overlap); 349 } else { 350 ltmp = lock->lf_block; 351 lock->lf_block = overlap->lf_block; 352 lf_addblock(lock, ltmp); 353 } 354 /* 355 * Add the new lock if necessary and delete the overlap. 356 */ 357 if (needtolink) { 358 *prev = lock; 359 lock->lf_next = overlap->lf_next; 360 prev = &lock->lf_next; 361 needtolink = 0; 362 } else 363 *prev = overlap->lf_next; 364 free(overlap, M_LOCKF); 365 continue; 366 367 case 4: /* overlap starts before lock */ 368 /* 369 * Add lock after overlap on the list. 370 */ 371 lock->lf_next = overlap->lf_next; 372 overlap->lf_next = lock; 373 overlap->lf_end = lock->lf_start - 1; 374 prev = &lock->lf_next; 375 lf_wakelock(overlap); 376 needtolink = 0; 377 continue; 378 379 case 5: /* overlap ends after lock */ 380 /* 381 * Add the new lock before overlap. 382 */ 383 if (needtolink) { 384 *prev = lock; 385 lock->lf_next = overlap; 386 } 387 overlap->lf_start = lock->lf_end + 1; 388 lf_wakelock(overlap); 389 break; 390 } 391 break; 392 } 393 #ifdef LOCKF_DEBUG 394 if (lockf_debug & 1) { 395 lf_print("lf_setlock: got the lock", lock); 396 lf_printlist("lf_setlock", lock); 397 } 398 #endif /* LOCKF_DEBUG */ 399 return (0); 400 } 401 402 /* 403 * Remove a byte-range lock on an inode. 404 * 405 * Generally, find the lock (or an overlap to that lock) 406 * and remove it (or shrink it), then wakeup anyone we can. 407 */ 408 static int 409 lf_clearlock(unlock) 410 register struct lockf *unlock; 411 { 412 struct lockf **head = unlock->lf_head; 413 register struct lockf *lf = *head; 414 struct lockf *overlap, **prev; 415 int ovcase; 416 417 if (lf == NOLOCKF) 418 return (0); 419 #ifdef LOCKF_DEBUG 420 if (unlock->lf_type != F_UNLCK) 421 panic("lf_clearlock: bad type"); 422 if (lockf_debug & 1) 423 lf_print("lf_clearlock", unlock); 424 #endif /* LOCKF_DEBUG */ 425 prev = head; 426 while ((ovcase = lf_findoverlap(lf, unlock, SELF, &prev, &overlap))) { 427 /* 428 * Wakeup the list of locks to be retried. 429 */ 430 lf_wakelock(overlap); 431 432 switch (ovcase) { 433 434 case 1: /* overlap == lock */ 435 *prev = overlap->lf_next; 436 FREE(overlap, M_LOCKF); 437 break; 438 439 case 2: /* overlap contains lock: split it */ 440 if (overlap->lf_start == unlock->lf_start) { 441 overlap->lf_start = unlock->lf_end + 1; 442 break; 443 } 444 lf_split(overlap, unlock); 445 overlap->lf_next = unlock->lf_next; 446 break; 447 448 case 3: /* lock contains overlap */ 449 *prev = overlap->lf_next; 450 lf = overlap->lf_next; 451 free(overlap, M_LOCKF); 452 continue; 453 454 case 4: /* overlap starts before lock */ 455 overlap->lf_end = unlock->lf_start - 1; 456 prev = &overlap->lf_next; 457 lf = overlap->lf_next; 458 continue; 459 460 case 5: /* overlap ends after lock */ 461 overlap->lf_start = unlock->lf_end + 1; 462 break; 463 } 464 break; 465 } 466 #ifdef LOCKF_DEBUG 467 if (lockf_debug & 1) 468 lf_printlist("lf_clearlock", unlock); 469 #endif /* LOCKF_DEBUG */ 470 return (0); 471 } 472 473 /* 474 * Check whether there is a blocking lock, 475 * and if so return its process identifier. 476 */ 477 static int 478 lf_getlock(lock, fl) 479 register struct lockf *lock; 480 register struct flock *fl; 481 { 482 register struct lockf *block; 483 484 #ifdef LOCKF_DEBUG 485 if (lockf_debug & 1) 486 lf_print("lf_getlock", lock); 487 #endif /* LOCKF_DEBUG */ 488 489 if ((block = lf_getblock(lock))) { 490 fl->l_type = block->lf_type; 491 fl->l_whence = SEEK_SET; 492 fl->l_start = block->lf_start; 493 if (block->lf_end == -1) 494 fl->l_len = 0; 495 else 496 fl->l_len = block->lf_end - block->lf_start + 1; 497 if (block->lf_flags & F_POSIX) 498 fl->l_pid = ((struct proc *)(block->lf_id))->p_pid; 499 else 500 fl->l_pid = -1; 501 } else { 502 fl->l_type = F_UNLCK; 503 } 504 return (0); 505 } 506 507 /* 508 * Walk the list of locks for an inode and 509 * return the first blocking lock. 510 */ 511 static struct lockf * 512 lf_getblock(lock) 513 register struct lockf *lock; 514 { 515 struct lockf **prev, *overlap, *lf = *(lock->lf_head); 516 int ovcase; 517 518 prev = lock->lf_head; 519 while ((ovcase = lf_findoverlap(lf, lock, OTHERS, &prev, &overlap))) { 520 /* 521 * We've found an overlap, see if it blocks us 522 */ 523 if ((lock->lf_type == F_WRLCK || overlap->lf_type == F_WRLCK)) 524 return (overlap); 525 /* 526 * Nope, point to the next one on the list and 527 * see if it blocks us 528 */ 529 lf = overlap->lf_next; 530 } 531 return (NOLOCKF); 532 } 533 534 /* 535 * Walk the list of locks for an inode to 536 * find an overlapping lock (if any). 537 * 538 * NOTE: this returns only the FIRST overlapping lock. There 539 * may be more than one. 540 */ 541 static int 542 lf_findoverlap(lf, lock, type, prev, overlap) 543 register struct lockf *lf; 544 struct lockf *lock; 545 int type; 546 struct lockf ***prev; 547 struct lockf **overlap; 548 { 549 off_t start, end; 550 551 *overlap = lf; 552 if (lf == NOLOCKF) 553 return (0); 554 #ifdef LOCKF_DEBUG 555 if (lockf_debug & 2) 556 lf_print("lf_findoverlap: looking for overlap in", lock); 557 #endif /* LOCKF_DEBUG */ 558 start = lock->lf_start; 559 end = lock->lf_end; 560 while (lf != NOLOCKF) { 561 if (((type & SELF) && lf->lf_id != lock->lf_id) || 562 ((type & OTHERS) && lf->lf_id == lock->lf_id)) { 563 *prev = &lf->lf_next; 564 *overlap = lf = lf->lf_next; 565 continue; 566 } 567 #ifdef LOCKF_DEBUG 568 if (lockf_debug & 2) 569 lf_print("\tchecking", lf); 570 #endif /* LOCKF_DEBUG */ 571 /* 572 * OK, check for overlap 573 * 574 * Six cases: 575 * 0) no overlap 576 * 1) overlap == lock 577 * 2) overlap contains lock 578 * 3) lock contains overlap 579 * 4) overlap starts before lock 580 * 5) overlap ends after lock 581 */ 582 if ((lf->lf_end != -1 && start > lf->lf_end) || 583 (end != -1 && lf->lf_start > end)) { 584 /* Case 0 */ 585 #ifdef LOCKF_DEBUG 586 if (lockf_debug & 2) 587 printf("no overlap\n"); 588 #endif /* LOCKF_DEBUG */ 589 if ((type & SELF) && end != -1 && lf->lf_start > end) 590 return (0); 591 *prev = &lf->lf_next; 592 *overlap = lf = lf->lf_next; 593 continue; 594 } 595 if ((lf->lf_start == start) && (lf->lf_end == end)) { 596 /* Case 1 */ 597 #ifdef LOCKF_DEBUG 598 if (lockf_debug & 2) 599 printf("overlap == lock\n"); 600 #endif /* LOCKF_DEBUG */ 601 return (1); 602 } 603 if ((lf->lf_start <= start) && 604 (end != -1) && 605 ((lf->lf_end >= end) || (lf->lf_end == -1))) { 606 /* Case 2 */ 607 #ifdef LOCKF_DEBUG 608 if (lockf_debug & 2) 609 printf("overlap contains lock\n"); 610 #endif /* LOCKF_DEBUG */ 611 return (2); 612 } 613 if (start <= lf->lf_start && 614 (end == -1 || 615 (lf->lf_end != -1 && end >= lf->lf_end))) { 616 /* Case 3 */ 617 #ifdef LOCKF_DEBUG 618 if (lockf_debug & 2) 619 printf("lock contains overlap\n"); 620 #endif /* LOCKF_DEBUG */ 621 return (3); 622 } 623 if ((lf->lf_start < start) && 624 ((lf->lf_end >= start) || (lf->lf_end == -1))) { 625 /* Case 4 */ 626 #ifdef LOCKF_DEBUG 627 if (lockf_debug & 2) 628 printf("overlap starts before lock\n"); 629 #endif /* LOCKF_DEBUG */ 630 return (4); 631 } 632 if ((lf->lf_start > start) && 633 (end != -1) && 634 ((lf->lf_end > end) || (lf->lf_end == -1))) { 635 /* Case 5 */ 636 #ifdef LOCKF_DEBUG 637 if (lockf_debug & 2) 638 printf("overlap ends after lock\n"); 639 #endif /* LOCKF_DEBUG */ 640 return (5); 641 } 642 panic("lf_findoverlap: default"); 643 } 644 return (0); 645 } 646 647 /* 648 * Add a lock to the end of the blocked list. 649 */ 650 static void 651 lf_addblock(blocklist, lock) 652 struct lockf *blocklist; 653 struct lockf *lock; 654 { 655 register struct lockf *lf; 656 657 if (lock == NOLOCKF) 658 return; 659 #ifdef LOCKF_DEBUG 660 if (lockf_debug & 2) { 661 lf_print("addblock: adding", lock); 662 lf_print("to blocked list of", blocklist); 663 } 664 #endif /* LOCKF_DEBUG */ 665 if ((lf = blocklist->lf_block) == NOLOCKF) { 666 blocklist->lf_block = lock; 667 return; 668 } 669 while (lf->lf_block != NOLOCKF) 670 lf = lf->lf_block; 671 lf->lf_block = lock; 672 return; 673 } 674 675 /* 676 * Split a lock and a contained region into 677 * two or three locks as necessary. 678 */ 679 static void 680 lf_split(lock1, lock2) 681 register struct lockf *lock1; 682 register struct lockf *lock2; 683 { 684 register struct lockf *splitlock; 685 686 #ifdef LOCKF_DEBUG 687 if (lockf_debug & 2) { 688 lf_print("lf_split", lock1); 689 lf_print("splitting from", lock2); 690 } 691 #endif /* LOCKF_DEBUG */ 692 /* 693 * Check to see if spliting into only two pieces. 694 */ 695 if (lock1->lf_start == lock2->lf_start) { 696 lock1->lf_start = lock2->lf_end + 1; 697 lock2->lf_next = lock1; 698 return; 699 } 700 if (lock1->lf_end == lock2->lf_end) { 701 lock1->lf_end = lock2->lf_start - 1; 702 lock2->lf_next = lock1->lf_next; 703 lock1->lf_next = lock2; 704 return; 705 } 706 /* 707 * Make a new lock consisting of the last part of 708 * the encompassing lock 709 */ 710 MALLOC(splitlock, struct lockf *, sizeof *splitlock, M_LOCKF, M_WAITOK); 711 bcopy((caddr_t)lock1, (caddr_t)splitlock, sizeof *splitlock); 712 splitlock->lf_start = lock2->lf_end + 1; 713 splitlock->lf_block = NOLOCKF; 714 lock1->lf_end = lock2->lf_start - 1; 715 /* 716 * OK, now link it in 717 */ 718 splitlock->lf_next = lock1->lf_next; 719 lock2->lf_next = splitlock; 720 lock1->lf_next = lock2; 721 } 722 723 /* 724 * Wakeup a blocklist 725 */ 726 static void 727 lf_wakelock(listhead) 728 struct lockf *listhead; 729 { 730 register struct lockf *blocklist, *wakelock; 731 732 blocklist = listhead->lf_block; 733 listhead->lf_block = NOLOCKF; 734 while (blocklist != NOLOCKF) { 735 wakelock = blocklist; 736 blocklist = blocklist->lf_block; 737 wakelock->lf_block = NOLOCKF; 738 wakelock->lf_next = NOLOCKF; 739 #ifdef LOCKF_DEBUG 740 if (lockf_debug & 2) 741 lf_print("lf_wakelock: awakening", wakelock); 742 #endif /* LOCKF_DEBUG */ 743 wakeup((caddr_t)wakelock); 744 } 745 } 746 747 #ifdef LOCKF_DEBUG 748 /* 749 * Print out a lock. 750 */ 751 void 752 lf_print(tag, lock) 753 char *tag; 754 register struct lockf *lock; 755 { 756 757 printf("%s: lock 0x%lx for ", tag, lock); 758 if (lock->lf_flags & F_POSIX) 759 printf("proc %d", ((struct proc *)(lock->lf_id))->p_pid); 760 else 761 printf("id 0x%x", lock->lf_id); 762 printf(" in ino %d on dev <%d, %d>, %s, start %d, end %d", 763 lock->lf_inode->i_number, 764 major(lock->lf_inode->i_dev), 765 minor(lock->lf_inode->i_dev), 766 lock->lf_type == F_RDLCK ? "shared" : 767 lock->lf_type == F_WRLCK ? "exclusive" : 768 lock->lf_type == F_UNLCK ? "unlock" : 769 "unknown", lock->lf_start, lock->lf_end); 770 if (lock->lf_block) 771 printf(" block 0x%x\n", lock->lf_block); 772 else 773 printf("\n"); 774 } 775 776 void 777 lf_printlist(tag, lock) 778 char *tag; 779 struct lockf *lock; 780 { 781 register struct lockf *lf; 782 783 printf("%s: Lock list for ino %d on dev <%d, %d>:\n", 784 tag, lock->lf_inode->i_number, 785 major(lock->lf_inode->i_dev), 786 minor(lock->lf_inode->i_dev)); 787 for (lf = lock->lf_inode->i_lockf; lf; lf = lf->lf_next) { 788 printf("\tlock 0x%lx for ", lf); 789 if (lf->lf_flags & F_POSIX) 790 printf("proc %d", ((struct proc *)(lf->lf_id))->p_pid); 791 else 792 printf("id 0x%x", lf->lf_id); 793 printf(", %s, start %d, end %d", 794 lf->lf_type == F_RDLCK ? "shared" : 795 lf->lf_type == F_WRLCK ? "exclusive" : 796 lf->lf_type == F_UNLCK ? "unlock" : 797 "unknown", lf->lf_start, lf->lf_end); 798 if (lf->lf_block) 799 printf(" block 0x%x\n", lf->lf_block); 800 else 801 printf("\n"); 802 } 803 } 804 #endif /* LOCKF_DEBUG */ 805