1 /* 2 * Copyright (c) 1982, 1986, 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * Scooter Morris at Genentech Inc. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * @(#)ufs_lockf.c 8.3 (Berkeley) 1/6/94 37 * $Id: kern_lockf.c,v 1.13 1997/02/22 09:39:06 peter Exp $ 38 */ 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/proc.h> 43 #include <sys/unistd.h> 44 #include <sys/vnode.h> 45 #include <sys/malloc.h> 46 #include <sys/fcntl.h> 47 48 #include <sys/lockf.h> 49 50 /* 51 * This variable controls the maximum number of processes that will 52 * be checked in doing deadlock detection. 53 */ 54 static int maxlockdepth = MAXDEPTH; 55 56 #ifdef LOCKF_DEBUG 57 #include <sys/kernel.h> 58 #include <sys/sysctl.h> 59 60 #include <ufs/ufs/quota.h> 61 #include <ufs/ufs/inode.h> 62 63 static int lockf_debug = 0; 64 SYSCTL_INT(_debug, OID_AUTO, lockf_debug, CTLFLAG_RW, &lockf_debug, 0, ""); 65 #endif 66 67 #define NOLOCKF (struct lockf *)0 68 #define SELF 0x1 69 #define OTHERS 0x2 70 static int lf_clearlock __P((struct lockf *)); 71 static int lf_findoverlap __P((struct lockf *, 72 struct lockf *, int, struct lockf ***, struct lockf **)); 73 static struct lockf * 74 lf_getblock __P((struct lockf *)); 75 static int lf_getlock __P((struct lockf *, struct flock *)); 76 static int lf_setlock __P((struct lockf *)); 77 static void lf_split __P((struct lockf *, struct lockf *)); 78 static void lf_wakelock __P((struct lockf *)); 79 80 /* 81 * Advisory record locking support 82 */ 83 int 84 lf_advlock(ap, head, size) 85 struct vop_advlock_args /* { 86 struct vnode *a_vp; 87 caddr_t a_id; 88 int a_op; 89 struct flock *a_fl; 90 int a_flags; 91 } */ *ap; 92 struct lockf **head; 93 u_quad_t size; 94 { 95 register struct flock *fl = ap->a_fl; 96 register struct lockf *lock; 97 off_t start, end; 98 int error; 99 100 /* 101 * Convert the flock structure into a start and end. 102 */ 103 switch (fl->l_whence) { 104 105 case SEEK_SET: 106 case SEEK_CUR: 107 /* 108 * Caller is responsible for adding any necessary offset 109 * when SEEK_CUR is used. 110 */ 111 start = fl->l_start; 112 break; 113 114 case SEEK_END: 115 start = size + fl->l_start; 116 break; 117 118 default: 119 return (EINVAL); 120 } 121 if (start < 0) 122 return (EINVAL); 123 if (fl->l_len == 0) 124 end = -1; 125 else { 126 end = start + fl->l_len - 1; 127 if (end < start) 128 return (EINVAL); 129 } 130 /* 131 * Avoid the common case of unlocking when inode has no locks. 132 */ 133 if (*head == (struct lockf *)0) { 134 if (ap->a_op != F_SETLK) { 135 fl->l_type = F_UNLCK; 136 return (0); 137 } 138 } 139 /* 140 * Create the lockf structure 141 */ 142 MALLOC(lock, struct lockf *, sizeof *lock, M_LOCKF, M_WAITOK); 143 lock->lf_start = start; 144 lock->lf_end = end; 145 lock->lf_id = ap->a_id; 146 /* lock->lf_inode = ip; */ /* XXX JH */ 147 lock->lf_type = fl->l_type; 148 lock->lf_head = head; 149 lock->lf_next = (struct lockf *)0; 150 TAILQ_INIT(&lock->lf_blkhd); 151 lock->lf_flags = ap->a_flags; 152 /* 153 * Do the requested operation. 154 */ 155 switch(ap->a_op) { 156 case F_SETLK: 157 return (lf_setlock(lock)); 158 159 case F_UNLCK: 160 error = lf_clearlock(lock); 161 FREE(lock, M_LOCKF); 162 return (error); 163 164 case F_GETLK: 165 error = lf_getlock(lock, fl); 166 FREE(lock, M_LOCKF); 167 return (error); 168 169 default: 170 free(lock, M_LOCKF); 171 return (EINVAL); 172 } 173 /* NOTREACHED */ 174 } 175 176 /* 177 * Set a byte-range lock. 178 */ 179 static int 180 lf_setlock(lock) 181 register struct lockf *lock; 182 { 183 register struct lockf *block; 184 struct lockf **head = lock->lf_head; 185 struct lockf **prev, *overlap, *ltmp; 186 static char lockstr[] = "lockf"; 187 int ovcase, priority, needtolink, error; 188 189 #ifdef LOCKF_DEBUG 190 if (lockf_debug & 1) 191 lf_print("lf_setlock", lock); 192 #endif /* LOCKF_DEBUG */ 193 194 /* 195 * Set the priority 196 */ 197 priority = PLOCK; 198 if (lock->lf_type == F_WRLCK) 199 priority += 4; 200 priority |= PCATCH; 201 /* 202 * Scan lock list for this file looking for locks that would block us. 203 */ 204 while ((block = lf_getblock(lock))) { 205 /* 206 * Free the structure and return if nonblocking. 207 */ 208 if ((lock->lf_flags & F_WAIT) == 0) { 209 FREE(lock, M_LOCKF); 210 return (EAGAIN); 211 } 212 /* 213 * We are blocked. Since flock style locks cover 214 * the whole file, there is no chance for deadlock. 215 * For byte-range locks we must check for deadlock. 216 * 217 * Deadlock detection is done by looking through the 218 * wait channels to see if there are any cycles that 219 * involve us. MAXDEPTH is set just to make sure we 220 * do not go off into neverland. 221 */ 222 if ((lock->lf_flags & F_POSIX) && 223 (block->lf_flags & F_POSIX)) { 224 register struct proc *wproc; 225 register struct lockf *waitblock; 226 int i = 0; 227 228 /* The block is waiting on something */ 229 wproc = (struct proc *)block->lf_id; 230 while (wproc->p_wchan && 231 (wproc->p_wmesg == lockstr) && 232 (i++ < maxlockdepth)) { 233 waitblock = (struct lockf *)wproc->p_wchan; 234 /* Get the owner of the blocking lock */ 235 waitblock = waitblock->lf_next; 236 if ((waitblock->lf_flags & F_POSIX) == 0) 237 break; 238 wproc = (struct proc *)waitblock->lf_id; 239 if (wproc == (struct proc *)lock->lf_id) { 240 free(lock, M_LOCKF); 241 return (EDEADLK); 242 } 243 } 244 } 245 /* 246 * For flock type locks, we must first remove 247 * any shared locks that we hold before we sleep 248 * waiting for an exclusive lock. 249 */ 250 if ((lock->lf_flags & F_FLOCK) && 251 lock->lf_type == F_WRLCK) { 252 lock->lf_type = F_UNLCK; 253 (void) lf_clearlock(lock); 254 lock->lf_type = F_WRLCK; 255 } 256 /* 257 * Add our lock to the blocked list and sleep until we're free. 258 * Remember who blocked us (for deadlock detection). 259 */ 260 lock->lf_next = block; 261 TAILQ_INSERT_TAIL(&block->lf_blkhd, lock, lf_block); 262 #ifdef LOCKF_DEBUG 263 if (lockf_debug & 1) { 264 lf_print("lf_setlock: blocking on", block); 265 lf_printlist("lf_setlock", block); 266 } 267 #endif /* LOCKF_DEBUG */ 268 if ((error = tsleep((caddr_t)lock, priority, lockstr, 0))) { 269 /* 270 * We may have been awakened by a signal (in 271 * which case we must remove ourselves from the 272 * blocked list) and/or by another process 273 * releasing a lock (in which case we have already 274 * been removed from the blocked list and our 275 * lf_next field set to NOLOCKF). 276 */ 277 if (lock->lf_next) 278 TAILQ_REMOVE(&lock->lf_next->lf_blkhd, lock, 279 lf_block); 280 free(lock, M_LOCKF); 281 return (error); 282 } 283 } 284 /* 285 * No blocks!! Add the lock. Note that we will 286 * downgrade or upgrade any overlapping locks this 287 * process already owns. 288 * 289 * Skip over locks owned by other processes. 290 * Handle any locks that overlap and are owned by ourselves. 291 */ 292 prev = head; 293 block = *head; 294 needtolink = 1; 295 for (;;) { 296 ovcase = lf_findoverlap(block, lock, SELF, &prev, &overlap); 297 if (ovcase) 298 block = overlap->lf_next; 299 /* 300 * Six cases: 301 * 0) no overlap 302 * 1) overlap == lock 303 * 2) overlap contains lock 304 * 3) lock contains overlap 305 * 4) overlap starts before lock 306 * 5) overlap ends after lock 307 */ 308 switch (ovcase) { 309 case 0: /* no overlap */ 310 if (needtolink) { 311 *prev = lock; 312 lock->lf_next = overlap; 313 } 314 break; 315 316 case 1: /* overlap == lock */ 317 /* 318 * If downgrading lock, others may be 319 * able to acquire it. 320 */ 321 if (lock->lf_type == F_RDLCK && 322 overlap->lf_type == F_WRLCK) 323 lf_wakelock(overlap); 324 overlap->lf_type = lock->lf_type; 325 FREE(lock, M_LOCKF); 326 lock = overlap; /* for debug output below */ 327 break; 328 329 case 2: /* overlap contains lock */ 330 /* 331 * Check for common starting point and different types. 332 */ 333 if (overlap->lf_type == lock->lf_type) { 334 free(lock, M_LOCKF); 335 lock = overlap; /* for debug output below */ 336 break; 337 } 338 if (overlap->lf_start == lock->lf_start) { 339 *prev = lock; 340 lock->lf_next = overlap; 341 overlap->lf_start = lock->lf_end + 1; 342 } else 343 lf_split(overlap, lock); 344 lf_wakelock(overlap); 345 break; 346 347 case 3: /* lock contains overlap */ 348 /* 349 * If downgrading lock, others may be able to 350 * acquire it, otherwise take the list. 351 */ 352 if (lock->lf_type == F_RDLCK && 353 overlap->lf_type == F_WRLCK) { 354 lf_wakelock(overlap); 355 } else { 356 while (ltmp = overlap->lf_blkhd.tqh_first) { 357 TAILQ_REMOVE(&overlap->lf_blkhd, ltmp, 358 lf_block); 359 TAILQ_INSERT_TAIL(&lock->lf_blkhd, 360 ltmp, lf_block); 361 } 362 } 363 /* 364 * Add the new lock if necessary and delete the overlap. 365 */ 366 if (needtolink) { 367 *prev = lock; 368 lock->lf_next = overlap->lf_next; 369 prev = &lock->lf_next; 370 needtolink = 0; 371 } else 372 *prev = overlap->lf_next; 373 free(overlap, M_LOCKF); 374 continue; 375 376 case 4: /* overlap starts before lock */ 377 /* 378 * Add lock after overlap on the list. 379 */ 380 lock->lf_next = overlap->lf_next; 381 overlap->lf_next = lock; 382 overlap->lf_end = lock->lf_start - 1; 383 prev = &lock->lf_next; 384 lf_wakelock(overlap); 385 needtolink = 0; 386 continue; 387 388 case 5: /* overlap ends after lock */ 389 /* 390 * Add the new lock before overlap. 391 */ 392 if (needtolink) { 393 *prev = lock; 394 lock->lf_next = overlap; 395 } 396 overlap->lf_start = lock->lf_end + 1; 397 lf_wakelock(overlap); 398 break; 399 } 400 break; 401 } 402 #ifdef LOCKF_DEBUG 403 if (lockf_debug & 1) { 404 lf_print("lf_setlock: got the lock", lock); 405 lf_printlist("lf_setlock", lock); 406 } 407 #endif /* LOCKF_DEBUG */ 408 return (0); 409 } 410 411 /* 412 * Remove a byte-range lock on an inode. 413 * 414 * Generally, find the lock (or an overlap to that lock) 415 * and remove it (or shrink it), then wakeup anyone we can. 416 */ 417 static int 418 lf_clearlock(unlock) 419 register struct lockf *unlock; 420 { 421 struct lockf **head = unlock->lf_head; 422 register struct lockf *lf = *head; 423 struct lockf *overlap, **prev; 424 int ovcase; 425 426 if (lf == NOLOCKF) 427 return (0); 428 #ifdef LOCKF_DEBUG 429 if (unlock->lf_type != F_UNLCK) 430 panic("lf_clearlock: bad type"); 431 if (lockf_debug & 1) 432 lf_print("lf_clearlock", unlock); 433 #endif /* LOCKF_DEBUG */ 434 prev = head; 435 while ((ovcase = lf_findoverlap(lf, unlock, SELF, &prev, &overlap))) { 436 /* 437 * Wakeup the list of locks to be retried. 438 */ 439 lf_wakelock(overlap); 440 441 switch (ovcase) { 442 443 case 1: /* overlap == lock */ 444 *prev = overlap->lf_next; 445 FREE(overlap, M_LOCKF); 446 break; 447 448 case 2: /* overlap contains lock: split it */ 449 if (overlap->lf_start == unlock->lf_start) { 450 overlap->lf_start = unlock->lf_end + 1; 451 break; 452 } 453 lf_split(overlap, unlock); 454 overlap->lf_next = unlock->lf_next; 455 break; 456 457 case 3: /* lock contains overlap */ 458 *prev = overlap->lf_next; 459 lf = overlap->lf_next; 460 free(overlap, M_LOCKF); 461 continue; 462 463 case 4: /* overlap starts before lock */ 464 overlap->lf_end = unlock->lf_start - 1; 465 prev = &overlap->lf_next; 466 lf = overlap->lf_next; 467 continue; 468 469 case 5: /* overlap ends after lock */ 470 overlap->lf_start = unlock->lf_end + 1; 471 break; 472 } 473 break; 474 } 475 #ifdef LOCKF_DEBUG 476 if (lockf_debug & 1) 477 lf_printlist("lf_clearlock", unlock); 478 #endif /* LOCKF_DEBUG */ 479 return (0); 480 } 481 482 /* 483 * Check whether there is a blocking lock, 484 * and if so return its process identifier. 485 */ 486 static int 487 lf_getlock(lock, fl) 488 register struct lockf *lock; 489 register struct flock *fl; 490 { 491 register struct lockf *block; 492 493 #ifdef LOCKF_DEBUG 494 if (lockf_debug & 1) 495 lf_print("lf_getlock", lock); 496 #endif /* LOCKF_DEBUG */ 497 498 if ((block = lf_getblock(lock))) { 499 fl->l_type = block->lf_type; 500 fl->l_whence = SEEK_SET; 501 fl->l_start = block->lf_start; 502 if (block->lf_end == -1) 503 fl->l_len = 0; 504 else 505 fl->l_len = block->lf_end - block->lf_start + 1; 506 if (block->lf_flags & F_POSIX) 507 fl->l_pid = ((struct proc *)(block->lf_id))->p_pid; 508 else 509 fl->l_pid = -1; 510 } else { 511 fl->l_type = F_UNLCK; 512 } 513 return (0); 514 } 515 516 /* 517 * Walk the list of locks for an inode and 518 * return the first blocking lock. 519 */ 520 static struct lockf * 521 lf_getblock(lock) 522 register struct lockf *lock; 523 { 524 struct lockf **prev, *overlap, *lf = *(lock->lf_head); 525 int ovcase; 526 527 prev = lock->lf_head; 528 while ((ovcase = lf_findoverlap(lf, lock, OTHERS, &prev, &overlap))) { 529 /* 530 * We've found an overlap, see if it blocks us 531 */ 532 if ((lock->lf_type == F_WRLCK || overlap->lf_type == F_WRLCK)) 533 return (overlap); 534 /* 535 * Nope, point to the next one on the list and 536 * see if it blocks us 537 */ 538 lf = overlap->lf_next; 539 } 540 return (NOLOCKF); 541 } 542 543 /* 544 * Walk the list of locks for an inode to 545 * find an overlapping lock (if any). 546 * 547 * NOTE: this returns only the FIRST overlapping lock. There 548 * may be more than one. 549 */ 550 static int 551 lf_findoverlap(lf, lock, type, prev, overlap) 552 register struct lockf *lf; 553 struct lockf *lock; 554 int type; 555 struct lockf ***prev; 556 struct lockf **overlap; 557 { 558 off_t start, end; 559 560 *overlap = lf; 561 if (lf == NOLOCKF) 562 return (0); 563 #ifdef LOCKF_DEBUG 564 if (lockf_debug & 2) 565 lf_print("lf_findoverlap: looking for overlap in", lock); 566 #endif /* LOCKF_DEBUG */ 567 start = lock->lf_start; 568 end = lock->lf_end; 569 while (lf != NOLOCKF) { 570 if (((type & SELF) && lf->lf_id != lock->lf_id) || 571 ((type & OTHERS) && lf->lf_id == lock->lf_id)) { 572 *prev = &lf->lf_next; 573 *overlap = lf = lf->lf_next; 574 continue; 575 } 576 #ifdef LOCKF_DEBUG 577 if (lockf_debug & 2) 578 lf_print("\tchecking", lf); 579 #endif /* LOCKF_DEBUG */ 580 /* 581 * OK, check for overlap 582 * 583 * Six cases: 584 * 0) no overlap 585 * 1) overlap == lock 586 * 2) overlap contains lock 587 * 3) lock contains overlap 588 * 4) overlap starts before lock 589 * 5) overlap ends after lock 590 */ 591 if ((lf->lf_end != -1 && start > lf->lf_end) || 592 (end != -1 && lf->lf_start > end)) { 593 /* Case 0 */ 594 #ifdef LOCKF_DEBUG 595 if (lockf_debug & 2) 596 printf("no overlap\n"); 597 #endif /* LOCKF_DEBUG */ 598 if ((type & SELF) && end != -1 && lf->lf_start > end) 599 return (0); 600 *prev = &lf->lf_next; 601 *overlap = lf = lf->lf_next; 602 continue; 603 } 604 if ((lf->lf_start == start) && (lf->lf_end == end)) { 605 /* Case 1 */ 606 #ifdef LOCKF_DEBUG 607 if (lockf_debug & 2) 608 printf("overlap == lock\n"); 609 #endif /* LOCKF_DEBUG */ 610 return (1); 611 } 612 if ((lf->lf_start <= start) && 613 (end != -1) && 614 ((lf->lf_end >= end) || (lf->lf_end == -1))) { 615 /* Case 2 */ 616 #ifdef LOCKF_DEBUG 617 if (lockf_debug & 2) 618 printf("overlap contains lock\n"); 619 #endif /* LOCKF_DEBUG */ 620 return (2); 621 } 622 if (start <= lf->lf_start && 623 (end == -1 || 624 (lf->lf_end != -1 && end >= lf->lf_end))) { 625 /* Case 3 */ 626 #ifdef LOCKF_DEBUG 627 if (lockf_debug & 2) 628 printf("lock contains overlap\n"); 629 #endif /* LOCKF_DEBUG */ 630 return (3); 631 } 632 if ((lf->lf_start < start) && 633 ((lf->lf_end >= start) || (lf->lf_end == -1))) { 634 /* Case 4 */ 635 #ifdef LOCKF_DEBUG 636 if (lockf_debug & 2) 637 printf("overlap starts before lock\n"); 638 #endif /* LOCKF_DEBUG */ 639 return (4); 640 } 641 if ((lf->lf_start > start) && 642 (end != -1) && 643 ((lf->lf_end > end) || (lf->lf_end == -1))) { 644 /* Case 5 */ 645 #ifdef LOCKF_DEBUG 646 if (lockf_debug & 2) 647 printf("overlap ends after lock\n"); 648 #endif /* LOCKF_DEBUG */ 649 return (5); 650 } 651 panic("lf_findoverlap: default"); 652 } 653 return (0); 654 } 655 656 /* 657 * Split a lock and a contained region into 658 * two or three locks as necessary. 659 */ 660 static void 661 lf_split(lock1, lock2) 662 register struct lockf *lock1; 663 register struct lockf *lock2; 664 { 665 register struct lockf *splitlock; 666 667 #ifdef LOCKF_DEBUG 668 if (lockf_debug & 2) { 669 lf_print("lf_split", lock1); 670 lf_print("splitting from", lock2); 671 } 672 #endif /* LOCKF_DEBUG */ 673 /* 674 * Check to see if spliting into only two pieces. 675 */ 676 if (lock1->lf_start == lock2->lf_start) { 677 lock1->lf_start = lock2->lf_end + 1; 678 lock2->lf_next = lock1; 679 return; 680 } 681 if (lock1->lf_end == lock2->lf_end) { 682 lock1->lf_end = lock2->lf_start - 1; 683 lock2->lf_next = lock1->lf_next; 684 lock1->lf_next = lock2; 685 return; 686 } 687 /* 688 * Make a new lock consisting of the last part of 689 * the encompassing lock 690 */ 691 MALLOC(splitlock, struct lockf *, sizeof *splitlock, M_LOCKF, M_WAITOK); 692 bcopy((caddr_t)lock1, (caddr_t)splitlock, sizeof *splitlock); 693 splitlock->lf_start = lock2->lf_end + 1; 694 TAILQ_INIT(&splitlock->lf_blkhd); 695 lock1->lf_end = lock2->lf_start - 1; 696 /* 697 * OK, now link it in 698 */ 699 splitlock->lf_next = lock1->lf_next; 700 lock2->lf_next = splitlock; 701 lock1->lf_next = lock2; 702 } 703 704 /* 705 * Wakeup a blocklist 706 */ 707 static void 708 lf_wakelock(listhead) 709 struct lockf *listhead; 710 { 711 register struct lockf *wakelock; 712 713 while (wakelock = listhead->lf_blkhd.tqh_first) { 714 TAILQ_REMOVE(&listhead->lf_blkhd, wakelock, lf_block); 715 wakelock->lf_next = NOLOCKF; 716 #ifdef LOCKF_DEBUG 717 if (lockf_debug & 2) 718 lf_print("lf_wakelock: awakening", wakelock); 719 #endif /* LOCKF_DEBUG */ 720 wakeup((caddr_t)wakelock); 721 } 722 } 723 724 #ifdef LOCKF_DEBUG 725 /* 726 * Print out a lock. 727 */ 728 void 729 lf_print(tag, lock) 730 char *tag; 731 register struct lockf *lock; 732 { 733 734 printf("%s: lock 0x%lx for ", tag, lock); 735 if (lock->lf_flags & F_POSIX) 736 printf("proc %d", ((struct proc *)(lock->lf_id))->p_pid); 737 else 738 printf("id 0x%x", lock->lf_id); 739 printf(" in ino %d on dev <%d, %d>, %s, start %d, end %d", 740 lock->lf_inode->i_number, 741 major(lock->lf_inode->i_dev), 742 minor(lock->lf_inode->i_dev), 743 lock->lf_type == F_RDLCK ? "shared" : 744 lock->lf_type == F_WRLCK ? "exclusive" : 745 lock->lf_type == F_UNLCK ? "unlock" : 746 "unknown", lock->lf_start, lock->lf_end); 747 if (lock->lf_blkhd.tqh_first) 748 printf(" block 0x%x\n", lock->lf_blkhd.tqh_first); 749 else 750 printf("\n"); 751 } 752 753 void 754 lf_printlist(tag, lock) 755 char *tag; 756 struct lockf *lock; 757 { 758 register struct lockf *lf, *blk; 759 760 printf("%s: Lock list for ino %d on dev <%d, %d>:\n", 761 tag, lock->lf_inode->i_number, 762 major(lock->lf_inode->i_dev), 763 minor(lock->lf_inode->i_dev)); 764 for (lf = lock->lf_inode->i_lockf; lf; lf = lf->lf_next) { 765 printf("\tlock 0x%lx for ", lf); 766 if (lf->lf_flags & F_POSIX) 767 printf("proc %d", ((struct proc *)(lf->lf_id))->p_pid); 768 else 769 printf("id 0x%x", lf->lf_id); 770 printf(", %s, start %d, end %d", 771 lf->lf_type == F_RDLCK ? "shared" : 772 lf->lf_type == F_WRLCK ? "exclusive" : 773 lf->lf_type == F_UNLCK ? "unlock" : 774 "unknown", lf->lf_start, lf->lf_end); 775 for (blk = lf->lf_blkhd.tqh_first; blk; 776 blk = blk->lf_block.tqe_next) { 777 printf("\n\t\tlock request 0x%lx for ", blk); 778 if (blk->lf_flags & F_POSIX) 779 printf("proc %d", 780 ((struct proc *)(blk->lf_id))->p_pid); 781 else 782 printf("id 0x%x", blk->lf_id); 783 printf(", %s, start %d, end %d", 784 blk->lf_type == F_RDLCK ? "shared" : 785 blk->lf_type == F_WRLCK ? "exclusive" : 786 blk->lf_type == F_UNLCK ? "unlock" : 787 "unknown", blk->lf_start, blk->lf_end); 788 if (blk->lf_blkhd.tqh_first) 789 panic("lf_printlist: bad list"); 790 } 791 printf("\n"); 792 } 793 } 794 #endif /* LOCKF_DEBUG */ 795