1 /* 2 * Copyright (c) 1982, 1986, 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * Scooter Morris at Genentech Inc. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * @(#)ufs_lockf.c 8.3 (Berkeley) 1/6/94 37 * $Id: kern_lockf.c,v 1.2 1994/09/25 19:33:37 phk Exp $ 38 */ 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/kernel.h> 43 #include <sys/file.h> 44 #include <sys/proc.h> 45 #include <sys/vnode.h> 46 #include <sys/malloc.h> 47 #include <sys/fcntl.h> 48 49 #include <sys/lockf.h> 50 51 /* 52 * This variable controls the maximum number of processes that will 53 * be checked in doing deadlock detection. 54 */ 55 int maxlockdepth = MAXDEPTH; 56 57 #ifdef LOCKF_DEBUG 58 int lockf_debug = 0; 59 #endif 60 61 #define NOLOCKF (struct lockf *)0 62 #define SELF 0x1 63 #define OTHERS 0x2 64 65 /* 66 * Advisory record locking support 67 */ 68 int 69 lf_advlock(ap, head, size) 70 struct vop_advlock_args /* { 71 struct vnode *a_vp; 72 caddr_t a_id; 73 int a_op; 74 struct flock *a_fl; 75 int a_flags; 76 } */ *ap; 77 struct lockf **head; 78 u_quad_t size; 79 { 80 register struct flock *fl = ap->a_fl; 81 register struct lockf *lock; 82 off_t start, end; 83 int error; 84 85 /* 86 * Avoid the common case of unlocking when inode has no locks. 87 */ 88 if (*head == (struct lockf *)0) { 89 if (ap->a_op != F_SETLK) { 90 fl->l_type = F_UNLCK; 91 return (0); 92 } 93 } 94 /* 95 * Convert the flock structure into a start and end. 96 */ 97 switch (fl->l_whence) { 98 99 case SEEK_SET: 100 case SEEK_CUR: 101 /* 102 * Caller is responsible for adding any necessary offset 103 * when SEEK_CUR is used. 104 */ 105 start = fl->l_start; 106 break; 107 108 case SEEK_END: 109 start = size + fl->l_start; 110 break; 111 112 default: 113 return (EINVAL); 114 } 115 if (start < 0) 116 return (EINVAL); 117 if (fl->l_len == 0) 118 end = -1; 119 else 120 end = start + fl->l_len - 1; 121 /* 122 * Create the lockf structure 123 */ 124 MALLOC(lock, struct lockf *, sizeof *lock, M_LOCKF, M_WAITOK); 125 lock->lf_start = start; 126 lock->lf_end = end; 127 lock->lf_id = ap->a_id; 128 lock->lf_head = head; 129 lock->lf_type = fl->l_type; 130 lock->lf_next = (struct lockf *)0; 131 lock->lf_block = (struct lockf *)0; 132 lock->lf_flags = ap->a_flags; 133 /* 134 * Do the requested operation. 135 */ 136 switch(ap->a_op) { 137 case F_SETLK: 138 return (lf_setlock(lock)); 139 140 case F_UNLCK: 141 error = lf_clearlock(lock); 142 FREE(lock, M_LOCKF); 143 return (error); 144 145 case F_GETLK: 146 error = lf_getlock(lock, fl); 147 FREE(lock, M_LOCKF); 148 return (error); 149 150 default: 151 free(lock, M_LOCKF); 152 return (EINVAL); 153 } 154 /* NOTREACHED */ 155 } 156 157 /* 158 * Set a byte-range lock. 159 */ 160 int 161 lf_setlock(lock) 162 register struct lockf *lock; 163 { 164 register struct lockf *block; 165 struct lockf **head = lock->lf_head; 166 struct lockf **prev, *overlap, *ltmp; 167 static char lockstr[] = "lockf"; 168 int ovcase, priority, needtolink, error; 169 170 #ifdef LOCKF_DEBUG 171 if (lockf_debug & 1) 172 lf_print("lf_setlock", lock); 173 #endif /* LOCKF_DEBUG */ 174 175 /* 176 * Set the priority 177 */ 178 priority = PLOCK; 179 if (lock->lf_type == F_WRLCK) 180 priority += 4; 181 priority |= PCATCH; 182 /* 183 * Scan lock list for this file looking for locks that would block us. 184 */ 185 while ((block = lf_getblock(lock))) { 186 /* 187 * Free the structure and return if nonblocking. 188 */ 189 if ((lock->lf_flags & F_WAIT) == 0) { 190 FREE(lock, M_LOCKF); 191 return (EAGAIN); 192 } 193 /* 194 * We are blocked. Since flock style locks cover 195 * the whole file, there is no chance for deadlock. 196 * For byte-range locks we must check for deadlock. 197 * 198 * Deadlock detection is done by looking through the 199 * wait channels to see if there are any cycles that 200 * involve us. MAXDEPTH is set just to make sure we 201 * do not go off into neverland. 202 */ 203 if ((lock->lf_flags & F_POSIX) && 204 (block->lf_flags & F_POSIX)) { 205 register struct proc *wproc; 206 register struct lockf *waitblock; 207 int i = 0; 208 209 /* The block is waiting on something */ 210 wproc = (struct proc *)block->lf_id; 211 while (wproc->p_wchan && 212 (wproc->p_wmesg == lockstr) && 213 (i++ < maxlockdepth)) { 214 waitblock = (struct lockf *)wproc->p_wchan; 215 /* Get the owner of the blocking lock */ 216 waitblock = waitblock->lf_next; 217 if ((waitblock->lf_flags & F_POSIX) == 0) 218 break; 219 wproc = (struct proc *)waitblock->lf_id; 220 if (wproc == (struct proc *)lock->lf_id) { 221 free(lock, M_LOCKF); 222 return (EDEADLK); 223 } 224 } 225 } 226 /* 227 * For flock type locks, we must first remove 228 * any shared locks that we hold before we sleep 229 * waiting for an exclusive lock. 230 */ 231 if ((lock->lf_flags & F_FLOCK) && 232 lock->lf_type == F_WRLCK) { 233 lock->lf_type = F_UNLCK; 234 (void) lf_clearlock(lock); 235 lock->lf_type = F_WRLCK; 236 } 237 /* 238 * Add our lock to the blocked list and sleep until we're free. 239 * Remember who blocked us (for deadlock detection). 240 */ 241 lock->lf_next = block; 242 lf_addblock(block, lock); 243 #ifdef LOCKF_DEBUG 244 if (lockf_debug & 1) { 245 lf_print("lf_setlock: blocking on", block); 246 lf_printlist("lf_setlock", block); 247 } 248 #endif /* LOCKF_DEBUG */ 249 if ((error = tsleep((caddr_t)lock, priority, lockstr, 0))) { 250 /* 251 * Delete ourselves from the waiting to lock list. 252 */ 253 for (block = lock->lf_next; 254 block != NOLOCKF; 255 block = block->lf_block) { 256 if (block->lf_block != lock) 257 continue; 258 block->lf_block = block->lf_block->lf_block; 259 break; 260 } 261 /* 262 * If we did not find ourselves on the list, but 263 * are still linked onto a lock list, then something 264 * is very wrong. 265 */ 266 if (block == NOLOCKF && lock->lf_next != NOLOCKF) 267 panic("lf_setlock: lost lock"); 268 free(lock, M_LOCKF); 269 return (error); 270 } 271 } 272 /* 273 * No blocks!! Add the lock. Note that we will 274 * downgrade or upgrade any overlapping locks this 275 * process already owns. 276 * 277 * Skip over locks owned by other processes. 278 * Handle any locks that overlap and are owned by ourselves. 279 */ 280 prev = head; 281 block = *head; 282 needtolink = 1; 283 for (;;) { 284 ovcase = lf_findoverlap(block, lock, SELF, &prev, &overlap); 285 if (ovcase) 286 block = overlap->lf_next; 287 /* 288 * Six cases: 289 * 0) no overlap 290 * 1) overlap == lock 291 * 2) overlap contains lock 292 * 3) lock contains overlap 293 * 4) overlap starts before lock 294 * 5) overlap ends after lock 295 */ 296 switch (ovcase) { 297 case 0: /* no overlap */ 298 if (needtolink) { 299 *prev = lock; 300 lock->lf_next = overlap; 301 } 302 break; 303 304 case 1: /* overlap == lock */ 305 /* 306 * If downgrading lock, others may be 307 * able to acquire it. 308 */ 309 if (lock->lf_type == F_RDLCK && 310 overlap->lf_type == F_WRLCK) 311 lf_wakelock(overlap); 312 overlap->lf_type = lock->lf_type; 313 FREE(lock, M_LOCKF); 314 lock = overlap; /* for debug output below */ 315 break; 316 317 case 2: /* overlap contains lock */ 318 /* 319 * Check for common starting point and different types. 320 */ 321 if (overlap->lf_type == lock->lf_type) { 322 free(lock, M_LOCKF); 323 lock = overlap; /* for debug output below */ 324 break; 325 } 326 if (overlap->lf_start == lock->lf_start) { 327 *prev = lock; 328 lock->lf_next = overlap; 329 overlap->lf_start = lock->lf_end + 1; 330 } else 331 lf_split(overlap, lock); 332 lf_wakelock(overlap); 333 break; 334 335 case 3: /* lock contains overlap */ 336 /* 337 * If downgrading lock, others may be able to 338 * acquire it, otherwise take the list. 339 */ 340 if (lock->lf_type == F_RDLCK && 341 overlap->lf_type == F_WRLCK) { 342 lf_wakelock(overlap); 343 } else { 344 ltmp = lock->lf_block; 345 lock->lf_block = overlap->lf_block; 346 lf_addblock(lock, ltmp); 347 } 348 /* 349 * Add the new lock if necessary and delete the overlap. 350 */ 351 if (needtolink) { 352 *prev = lock; 353 lock->lf_next = overlap->lf_next; 354 prev = &lock->lf_next; 355 needtolink = 0; 356 } else 357 *prev = overlap->lf_next; 358 free(overlap, M_LOCKF); 359 continue; 360 361 case 4: /* overlap starts before lock */ 362 /* 363 * Add lock after overlap on the list. 364 */ 365 lock->lf_next = overlap->lf_next; 366 overlap->lf_next = lock; 367 overlap->lf_end = lock->lf_start - 1; 368 prev = &lock->lf_next; 369 lf_wakelock(overlap); 370 needtolink = 0; 371 continue; 372 373 case 5: /* overlap ends after lock */ 374 /* 375 * Add the new lock before overlap. 376 */ 377 if (needtolink) { 378 *prev = lock; 379 lock->lf_next = overlap; 380 } 381 overlap->lf_start = lock->lf_end + 1; 382 lf_wakelock(overlap); 383 break; 384 } 385 break; 386 } 387 #ifdef LOCKF_DEBUG 388 if (lockf_debug & 1) { 389 lf_print("lf_setlock: got the lock", lock); 390 lf_printlist("lf_setlock", lock); 391 } 392 #endif /* LOCKF_DEBUG */ 393 return (0); 394 } 395 396 /* 397 * Remove a byte-range lock on an inode. 398 * 399 * Generally, find the lock (or an overlap to that lock) 400 * and remove it (or shrink it), then wakeup anyone we can. 401 */ 402 int 403 lf_clearlock(unlock) 404 register struct lockf *unlock; 405 { 406 struct lockf **head = unlock->lf_head; 407 register struct lockf *lf = *head; 408 struct lockf *overlap, **prev; 409 int ovcase; 410 411 if (lf == NOLOCKF) 412 return (0); 413 #ifdef LOCKF_DEBUG 414 if (unlock->lf_type != F_UNLCK) 415 panic("lf_clearlock: bad type"); 416 if (lockf_debug & 1) 417 lf_print("lf_clearlock", unlock); 418 #endif /* LOCKF_DEBUG */ 419 prev = head; 420 while ((ovcase = lf_findoverlap(lf, unlock, SELF, &prev, &overlap))) { 421 /* 422 * Wakeup the list of locks to be retried. 423 */ 424 lf_wakelock(overlap); 425 426 switch (ovcase) { 427 428 case 1: /* overlap == lock */ 429 *prev = overlap->lf_next; 430 FREE(overlap, M_LOCKF); 431 break; 432 433 case 2: /* overlap contains lock: split it */ 434 if (overlap->lf_start == unlock->lf_start) { 435 overlap->lf_start = unlock->lf_end + 1; 436 break; 437 } 438 lf_split(overlap, unlock); 439 overlap->lf_next = unlock->lf_next; 440 break; 441 442 case 3: /* lock contains overlap */ 443 *prev = overlap->lf_next; 444 lf = overlap->lf_next; 445 free(overlap, M_LOCKF); 446 continue; 447 448 case 4: /* overlap starts before lock */ 449 overlap->lf_end = unlock->lf_start - 1; 450 prev = &overlap->lf_next; 451 lf = overlap->lf_next; 452 continue; 453 454 case 5: /* overlap ends after lock */ 455 overlap->lf_start = unlock->lf_end + 1; 456 break; 457 } 458 break; 459 } 460 #ifdef LOCKF_DEBUG 461 if (lockf_debug & 1) 462 lf_printlist("lf_clearlock", unlock); 463 #endif /* LOCKF_DEBUG */ 464 return (0); 465 } 466 467 /* 468 * Check whether there is a blocking lock, 469 * and if so return its process identifier. 470 */ 471 int 472 lf_getlock(lock, fl) 473 register struct lockf *lock; 474 register struct flock *fl; 475 { 476 register struct lockf *block; 477 478 #ifdef LOCKF_DEBUG 479 if (lockf_debug & 1) 480 lf_print("lf_getlock", lock); 481 #endif /* LOCKF_DEBUG */ 482 483 if ((block = lf_getblock(lock))) { 484 fl->l_type = block->lf_type; 485 fl->l_whence = SEEK_SET; 486 fl->l_start = block->lf_start; 487 if (block->lf_end == -1) 488 fl->l_len = 0; 489 else 490 fl->l_len = block->lf_end - block->lf_start + 1; 491 if (block->lf_flags & F_POSIX) 492 fl->l_pid = ((struct proc *)(block->lf_id))->p_pid; 493 else 494 fl->l_pid = -1; 495 } else { 496 fl->l_type = F_UNLCK; 497 } 498 return (0); 499 } 500 501 /* 502 * Walk the list of locks for an inode and 503 * return the first blocking lock. 504 */ 505 struct lockf * 506 lf_getblock(lock) 507 register struct lockf *lock; 508 { 509 struct lockf **prev, *overlap, *lf = *(lock->lf_head); 510 int ovcase; 511 512 prev = lock->lf_head; 513 while ((ovcase = lf_findoverlap(lf, lock, OTHERS, &prev, &overlap))) { 514 /* 515 * We've found an overlap, see if it blocks us 516 */ 517 if ((lock->lf_type == F_WRLCK || overlap->lf_type == F_WRLCK)) 518 return (overlap); 519 /* 520 * Nope, point to the next one on the list and 521 * see if it blocks us 522 */ 523 lf = overlap->lf_next; 524 } 525 return (NOLOCKF); 526 } 527 528 /* 529 * Walk the list of locks for an inode to 530 * find an overlapping lock (if any). 531 * 532 * NOTE: this returns only the FIRST overlapping lock. There 533 * may be more than one. 534 */ 535 int 536 lf_findoverlap(lf, lock, type, prev, overlap) 537 register struct lockf *lf; 538 struct lockf *lock; 539 int type; 540 struct lockf ***prev; 541 struct lockf **overlap; 542 { 543 off_t start, end; 544 545 *overlap = lf; 546 if (lf == NOLOCKF) 547 return (0); 548 #ifdef LOCKF_DEBUG 549 if (lockf_debug & 2) 550 lf_print("lf_findoverlap: looking for overlap in", lock); 551 #endif /* LOCKF_DEBUG */ 552 start = lock->lf_start; 553 end = lock->lf_end; 554 while (lf != NOLOCKF) { 555 if (((type & SELF) && lf->lf_id != lock->lf_id) || 556 ((type & OTHERS) && lf->lf_id == lock->lf_id)) { 557 *prev = &lf->lf_next; 558 *overlap = lf = lf->lf_next; 559 continue; 560 } 561 #ifdef LOCKF_DEBUG 562 if (lockf_debug & 2) 563 lf_print("\tchecking", lf); 564 #endif /* LOCKF_DEBUG */ 565 /* 566 * OK, check for overlap 567 * 568 * Six cases: 569 * 0) no overlap 570 * 1) overlap == lock 571 * 2) overlap contains lock 572 * 3) lock contains overlap 573 * 4) overlap starts before lock 574 * 5) overlap ends after lock 575 */ 576 if ((lf->lf_end != -1 && start > lf->lf_end) || 577 (end != -1 && lf->lf_start > end)) { 578 /* Case 0 */ 579 #ifdef LOCKF_DEBUG 580 if (lockf_debug & 2) 581 printf("no overlap\n"); 582 #endif /* LOCKF_DEBUG */ 583 if ((type & SELF) && end != -1 && lf->lf_start > end) 584 return (0); 585 *prev = &lf->lf_next; 586 *overlap = lf = lf->lf_next; 587 continue; 588 } 589 if ((lf->lf_start == start) && (lf->lf_end == end)) { 590 /* Case 1 */ 591 #ifdef LOCKF_DEBUG 592 if (lockf_debug & 2) 593 printf("overlap == lock\n"); 594 #endif /* LOCKF_DEBUG */ 595 return (1); 596 } 597 if ((lf->lf_start <= start) && 598 (end != -1) && 599 ((lf->lf_end >= end) || (lf->lf_end == -1))) { 600 /* Case 2 */ 601 #ifdef LOCKF_DEBUG 602 if (lockf_debug & 2) 603 printf("overlap contains lock\n"); 604 #endif /* LOCKF_DEBUG */ 605 return (2); 606 } 607 if (start <= lf->lf_start && 608 (end == -1 || 609 (lf->lf_end != -1 && end >= lf->lf_end))) { 610 /* Case 3 */ 611 #ifdef LOCKF_DEBUG 612 if (lockf_debug & 2) 613 printf("lock contains overlap\n"); 614 #endif /* LOCKF_DEBUG */ 615 return (3); 616 } 617 if ((lf->lf_start < start) && 618 ((lf->lf_end >= start) || (lf->lf_end == -1))) { 619 /* Case 4 */ 620 #ifdef LOCKF_DEBUG 621 if (lockf_debug & 2) 622 printf("overlap starts before lock\n"); 623 #endif /* LOCKF_DEBUG */ 624 return (4); 625 } 626 if ((lf->lf_start > start) && 627 (end != -1) && 628 ((lf->lf_end > end) || (lf->lf_end == -1))) { 629 /* Case 5 */ 630 #ifdef LOCKF_DEBUG 631 if (lockf_debug & 2) 632 printf("overlap ends after lock\n"); 633 #endif /* LOCKF_DEBUG */ 634 return (5); 635 } 636 panic("lf_findoverlap: default"); 637 } 638 return (0); 639 } 640 641 /* 642 * Add a lock to the end of the blocked list. 643 */ 644 void 645 lf_addblock(blocklist, lock) 646 struct lockf *blocklist; 647 struct lockf *lock; 648 { 649 register struct lockf *lf; 650 651 if (lock == NOLOCKF) 652 return; 653 #ifdef LOCKF_DEBUG 654 if (lockf_debug & 2) { 655 lf_print("addblock: adding", lock); 656 lf_print("to blocked list of", blocklist); 657 } 658 #endif /* LOCKF_DEBUG */ 659 if ((lf = blocklist->lf_block) == NOLOCKF) { 660 blocklist->lf_block = lock; 661 return; 662 } 663 while (lf->lf_block != NOLOCKF) 664 lf = lf->lf_block; 665 lf->lf_block = lock; 666 return; 667 } 668 669 /* 670 * Split a lock and a contained region into 671 * two or three locks as necessary. 672 */ 673 void 674 lf_split(lock1, lock2) 675 register struct lockf *lock1; 676 register struct lockf *lock2; 677 { 678 register struct lockf *splitlock; 679 680 #ifdef LOCKF_DEBUG 681 if (lockf_debug & 2) { 682 lf_print("lf_split", lock1); 683 lf_print("splitting from", lock2); 684 } 685 #endif /* LOCKF_DEBUG */ 686 /* 687 * Check to see if spliting into only two pieces. 688 */ 689 if (lock1->lf_start == lock2->lf_start) { 690 lock1->lf_start = lock2->lf_end + 1; 691 lock2->lf_next = lock1; 692 return; 693 } 694 if (lock1->lf_end == lock2->lf_end) { 695 lock1->lf_end = lock2->lf_start - 1; 696 lock2->lf_next = lock1->lf_next; 697 lock1->lf_next = lock2; 698 return; 699 } 700 /* 701 * Make a new lock consisting of the last part of 702 * the encompassing lock 703 */ 704 MALLOC(splitlock, struct lockf *, sizeof *splitlock, M_LOCKF, M_WAITOK); 705 bcopy((caddr_t)lock1, (caddr_t)splitlock, sizeof *splitlock); 706 splitlock->lf_start = lock2->lf_end + 1; 707 splitlock->lf_block = NOLOCKF; 708 lock1->lf_end = lock2->lf_start - 1; 709 /* 710 * OK, now link it in 711 */ 712 splitlock->lf_next = lock1->lf_next; 713 lock2->lf_next = splitlock; 714 lock1->lf_next = lock2; 715 } 716 717 /* 718 * Wakeup a blocklist 719 */ 720 void 721 lf_wakelock(listhead) 722 struct lockf *listhead; 723 { 724 register struct lockf *blocklist, *wakelock; 725 726 blocklist = listhead->lf_block; 727 listhead->lf_block = NOLOCKF; 728 while (blocklist != NOLOCKF) { 729 wakelock = blocklist; 730 blocklist = blocklist->lf_block; 731 wakelock->lf_block = NOLOCKF; 732 wakelock->lf_next = NOLOCKF; 733 #ifdef LOCKF_DEBUG 734 if (lockf_debug & 2) 735 lf_print("lf_wakelock: awakening", wakelock); 736 #endif /* LOCKF_DEBUG */ 737 wakeup((caddr_t)wakelock); 738 } 739 } 740 741 #ifdef LOCKF_DEBUG 742 /* 743 * Print out a lock. 744 */ 745 void 746 lf_print(tag, lock) 747 char *tag; 748 register struct lockf *lock; 749 { 750 751 printf("%s: lock 0x%lx for ", tag, lock); 752 if (lock->lf_flags & F_POSIX) 753 printf("proc %d", ((struct proc *)(lock->lf_id))->p_pid); 754 else 755 printf("id 0x%x", lock->lf_id); 756 printf(" in ino %d on dev <%d, %d>, %s, start %d, end %d", 757 lock->lf_inode->i_number, 758 major(lock->lf_inode->i_dev), 759 minor(lock->lf_inode->i_dev), 760 lock->lf_type == F_RDLCK ? "shared" : 761 lock->lf_type == F_WRLCK ? "exclusive" : 762 lock->lf_type == F_UNLCK ? "unlock" : 763 "unknown", lock->lf_start, lock->lf_end); 764 if (lock->lf_block) 765 printf(" block 0x%x\n", lock->lf_block); 766 else 767 printf("\n"); 768 } 769 770 void 771 lf_printlist(tag, lock) 772 char *tag; 773 struct lockf *lock; 774 { 775 register struct lockf *lf; 776 777 printf("%s: Lock list for ino %d on dev <%d, %d>:\n", 778 tag, lock->lf_inode->i_number, 779 major(lock->lf_inode->i_dev), 780 minor(lock->lf_inode->i_dev)); 781 for (lf = lock->lf_inode->i_lockf; lf; lf = lf->lf_next) { 782 printf("\tlock 0x%lx for ", lf); 783 if (lf->lf_flags & F_POSIX) 784 printf("proc %d", ((struct proc *)(lf->lf_id))->p_pid); 785 else 786 printf("id 0x%x", lf->lf_id); 787 printf(", %s, start %d, end %d", 788 lf->lf_type == F_RDLCK ? "shared" : 789 lf->lf_type == F_WRLCK ? "exclusive" : 790 lf->lf_type == F_UNLCK ? "unlock" : 791 "unknown", lf->lf_start, lf->lf_end); 792 if (lf->lf_block) 793 printf(" block 0x%x\n", lf->lf_block); 794 else 795 printf("\n"); 796 } 797 } 798 #endif /* LOCKF_DEBUG */ 799