1 /* 2 * Copyright (c) 1982, 1986, 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * Scooter Morris at Genentech Inc. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * @(#)ufs_lockf.c 8.3 (Berkeley) 1/6/94 37 * $Id$ 38 */ 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/proc.h> 43 #include <sys/unistd.h> 44 #include <sys/vnode.h> 45 #include <sys/malloc.h> 46 #include <sys/fcntl.h> 47 48 #include <sys/lockf.h> 49 50 /* 51 * This variable controls the maximum number of processes that will 52 * be checked in doing deadlock detection. 53 */ 54 static int maxlockdepth = MAXDEPTH; 55 56 #ifdef LOCKF_DEBUG 57 #include <sys/kernel.h> 58 #include <sys/sysctl.h> 59 60 #include <vm/vm.h> 61 62 #include <ufs/ufs/quota.h> 63 #include <ufs/ufs/inode.h> 64 65 int lockf_debug = 0; 66 SYSCTL_INT(_debug, 4, lockf_debug, CTLFLAG_RW, &lockf_debug, 0, ""); 67 #endif 68 69 #define NOLOCKF (struct lockf *)0 70 #define SELF 0x1 71 #define OTHERS 0x2 72 static int lf_clearlock __P((struct lockf *)); 73 static int lf_findoverlap __P((struct lockf *, 74 struct lockf *, int, struct lockf ***, struct lockf **)); 75 static struct lockf * 76 lf_getblock __P((struct lockf *)); 77 static int lf_getlock __P((struct lockf *, struct flock *)); 78 static int lf_setlock __P((struct lockf *)); 79 static void lf_split __P((struct lockf *, struct lockf *)); 80 static void lf_wakelock __P((struct lockf *)); 81 82 /* 83 * Advisory record locking support 84 */ 85 int 86 lf_advlock(ap, head, size) 87 struct vop_advlock_args /* { 88 struct vnode *a_vp; 89 caddr_t a_id; 90 int a_op; 91 struct flock *a_fl; 92 int a_flags; 93 } */ *ap; 94 struct lockf **head; 95 u_quad_t size; 96 { 97 register struct flock *fl = ap->a_fl; 98 register struct lockf *lock; 99 off_t start, end; 100 int error; 101 102 /* 103 * Convert the flock structure into a start and end. 104 */ 105 switch (fl->l_whence) { 106 107 case SEEK_SET: 108 case SEEK_CUR: 109 /* 110 * Caller is responsible for adding any necessary offset 111 * when SEEK_CUR is used. 112 */ 113 start = fl->l_start; 114 break; 115 116 case SEEK_END: 117 start = size + fl->l_start; 118 break; 119 120 default: 121 return (EINVAL); 122 } 123 if (start < 0) 124 return (EINVAL); 125 if (fl->l_len == 0) 126 end = -1; 127 else { 128 end = start + fl->l_len - 1; 129 if (end < start) 130 return (EINVAL); 131 } 132 /* 133 * Avoid the common case of unlocking when inode has no locks. 134 */ 135 if (*head == (struct lockf *)0) { 136 if (ap->a_op != F_SETLK) { 137 fl->l_type = F_UNLCK; 138 return (0); 139 } 140 } 141 /* 142 * Create the lockf structure 143 */ 144 MALLOC(lock, struct lockf *, sizeof *lock, M_LOCKF, M_WAITOK); 145 lock->lf_start = start; 146 lock->lf_end = end; 147 lock->lf_id = ap->a_id; 148 /* lock->lf_inode = ip; */ /* XXX JH */ 149 lock->lf_type = fl->l_type; 150 lock->lf_head = head; 151 lock->lf_next = (struct lockf *)0; 152 TAILQ_INIT(&lock->lf_blkhd); 153 lock->lf_flags = ap->a_flags; 154 /* 155 * Do the requested operation. 156 */ 157 switch(ap->a_op) { 158 case F_SETLK: 159 return (lf_setlock(lock)); 160 161 case F_UNLCK: 162 error = lf_clearlock(lock); 163 FREE(lock, M_LOCKF); 164 return (error); 165 166 case F_GETLK: 167 error = lf_getlock(lock, fl); 168 FREE(lock, M_LOCKF); 169 return (error); 170 171 default: 172 free(lock, M_LOCKF); 173 return (EINVAL); 174 } 175 /* NOTREACHED */ 176 } 177 178 /* 179 * Set a byte-range lock. 180 */ 181 static int 182 lf_setlock(lock) 183 register struct lockf *lock; 184 { 185 register struct lockf *block; 186 struct lockf **head = lock->lf_head; 187 struct lockf **prev, *overlap, *ltmp; 188 static char lockstr[] = "lockf"; 189 int ovcase, priority, needtolink, error; 190 191 #ifdef LOCKF_DEBUG 192 if (lockf_debug & 1) 193 lf_print("lf_setlock", lock); 194 #endif /* LOCKF_DEBUG */ 195 196 /* 197 * Set the priority 198 */ 199 priority = PLOCK; 200 if (lock->lf_type == F_WRLCK) 201 priority += 4; 202 priority |= PCATCH; 203 /* 204 * Scan lock list for this file looking for locks that would block us. 205 */ 206 while ((block = lf_getblock(lock))) { 207 /* 208 * Free the structure and return if nonblocking. 209 */ 210 if ((lock->lf_flags & F_WAIT) == 0) { 211 FREE(lock, M_LOCKF); 212 return (EAGAIN); 213 } 214 /* 215 * We are blocked. Since flock style locks cover 216 * the whole file, there is no chance for deadlock. 217 * For byte-range locks we must check for deadlock. 218 * 219 * Deadlock detection is done by looking through the 220 * wait channels to see if there are any cycles that 221 * involve us. MAXDEPTH is set just to make sure we 222 * do not go off into neverland. 223 */ 224 if ((lock->lf_flags & F_POSIX) && 225 (block->lf_flags & F_POSIX)) { 226 register struct proc *wproc; 227 register struct lockf *waitblock; 228 int i = 0; 229 230 /* The block is waiting on something */ 231 wproc = (struct proc *)block->lf_id; 232 while (wproc->p_wchan && 233 (wproc->p_wmesg == lockstr) && 234 (i++ < maxlockdepth)) { 235 waitblock = (struct lockf *)wproc->p_wchan; 236 /* Get the owner of the blocking lock */ 237 waitblock = waitblock->lf_next; 238 if ((waitblock->lf_flags & F_POSIX) == 0) 239 break; 240 wproc = (struct proc *)waitblock->lf_id; 241 if (wproc == (struct proc *)lock->lf_id) { 242 free(lock, M_LOCKF); 243 return (EDEADLK); 244 } 245 } 246 } 247 /* 248 * For flock type locks, we must first remove 249 * any shared locks that we hold before we sleep 250 * waiting for an exclusive lock. 251 */ 252 if ((lock->lf_flags & F_FLOCK) && 253 lock->lf_type == F_WRLCK) { 254 lock->lf_type = F_UNLCK; 255 (void) lf_clearlock(lock); 256 lock->lf_type = F_WRLCK; 257 } 258 /* 259 * Add our lock to the blocked list and sleep until we're free. 260 * Remember who blocked us (for deadlock detection). 261 */ 262 lock->lf_next = block; 263 TAILQ_INSERT_TAIL(&block->lf_blkhd, lock, lf_block); 264 #ifdef LOCKF_DEBUG 265 if (lockf_debug & 1) { 266 lf_print("lf_setlock: blocking on", block); 267 lf_printlist("lf_setlock", block); 268 } 269 #endif /* LOCKF_DEBUG */ 270 if ((error = tsleep((caddr_t)lock, priority, lockstr, 0))) { 271 /* 272 * We may have been awakened by a signal (in 273 * which case we must remove ourselves from the 274 * blocked list) and/or by another process 275 * releasing a lock (in which case we have already 276 * been removed from the blocked list and our 277 * lf_next field set to NOLOCKF). 278 */ 279 if (lock->lf_next) 280 TAILQ_REMOVE(&lock->lf_next->lf_blkhd, lock, 281 lf_block); 282 free(lock, M_LOCKF); 283 return (error); 284 } 285 } 286 /* 287 * No blocks!! Add the lock. Note that we will 288 * downgrade or upgrade any overlapping locks this 289 * process already owns. 290 * 291 * Skip over locks owned by other processes. 292 * Handle any locks that overlap and are owned by ourselves. 293 */ 294 prev = head; 295 block = *head; 296 needtolink = 1; 297 for (;;) { 298 ovcase = lf_findoverlap(block, lock, SELF, &prev, &overlap); 299 if (ovcase) 300 block = overlap->lf_next; 301 /* 302 * Six cases: 303 * 0) no overlap 304 * 1) overlap == lock 305 * 2) overlap contains lock 306 * 3) lock contains overlap 307 * 4) overlap starts before lock 308 * 5) overlap ends after lock 309 */ 310 switch (ovcase) { 311 case 0: /* no overlap */ 312 if (needtolink) { 313 *prev = lock; 314 lock->lf_next = overlap; 315 } 316 break; 317 318 case 1: /* overlap == lock */ 319 /* 320 * If downgrading lock, others may be 321 * able to acquire it. 322 */ 323 if (lock->lf_type == F_RDLCK && 324 overlap->lf_type == F_WRLCK) 325 lf_wakelock(overlap); 326 overlap->lf_type = lock->lf_type; 327 FREE(lock, M_LOCKF); 328 lock = overlap; /* for debug output below */ 329 break; 330 331 case 2: /* overlap contains lock */ 332 /* 333 * Check for common starting point and different types. 334 */ 335 if (overlap->lf_type == lock->lf_type) { 336 free(lock, M_LOCKF); 337 lock = overlap; /* for debug output below */ 338 break; 339 } 340 if (overlap->lf_start == lock->lf_start) { 341 *prev = lock; 342 lock->lf_next = overlap; 343 overlap->lf_start = lock->lf_end + 1; 344 } else 345 lf_split(overlap, lock); 346 lf_wakelock(overlap); 347 break; 348 349 case 3: /* lock contains overlap */ 350 /* 351 * If downgrading lock, others may be able to 352 * acquire it, otherwise take the list. 353 */ 354 if (lock->lf_type == F_RDLCK && 355 overlap->lf_type == F_WRLCK) { 356 lf_wakelock(overlap); 357 } else { 358 while (ltmp = overlap->lf_blkhd.tqh_first) { 359 TAILQ_REMOVE(&overlap->lf_blkhd, ltmp, 360 lf_block); 361 TAILQ_INSERT_TAIL(&lock->lf_blkhd, 362 ltmp, lf_block); 363 } 364 } 365 /* 366 * Add the new lock if necessary and delete the overlap. 367 */ 368 if (needtolink) { 369 *prev = lock; 370 lock->lf_next = overlap->lf_next; 371 prev = &lock->lf_next; 372 needtolink = 0; 373 } else 374 *prev = overlap->lf_next; 375 free(overlap, M_LOCKF); 376 continue; 377 378 case 4: /* overlap starts before lock */ 379 /* 380 * Add lock after overlap on the list. 381 */ 382 lock->lf_next = overlap->lf_next; 383 overlap->lf_next = lock; 384 overlap->lf_end = lock->lf_start - 1; 385 prev = &lock->lf_next; 386 lf_wakelock(overlap); 387 needtolink = 0; 388 continue; 389 390 case 5: /* overlap ends after lock */ 391 /* 392 * Add the new lock before overlap. 393 */ 394 if (needtolink) { 395 *prev = lock; 396 lock->lf_next = overlap; 397 } 398 overlap->lf_start = lock->lf_end + 1; 399 lf_wakelock(overlap); 400 break; 401 } 402 break; 403 } 404 #ifdef LOCKF_DEBUG 405 if (lockf_debug & 1) { 406 lf_print("lf_setlock: got the lock", lock); 407 lf_printlist("lf_setlock", lock); 408 } 409 #endif /* LOCKF_DEBUG */ 410 return (0); 411 } 412 413 /* 414 * Remove a byte-range lock on an inode. 415 * 416 * Generally, find the lock (or an overlap to that lock) 417 * and remove it (or shrink it), then wakeup anyone we can. 418 */ 419 static int 420 lf_clearlock(unlock) 421 register struct lockf *unlock; 422 { 423 struct lockf **head = unlock->lf_head; 424 register struct lockf *lf = *head; 425 struct lockf *overlap, **prev; 426 int ovcase; 427 428 if (lf == NOLOCKF) 429 return (0); 430 #ifdef LOCKF_DEBUG 431 if (unlock->lf_type != F_UNLCK) 432 panic("lf_clearlock: bad type"); 433 if (lockf_debug & 1) 434 lf_print("lf_clearlock", unlock); 435 #endif /* LOCKF_DEBUG */ 436 prev = head; 437 while ((ovcase = lf_findoverlap(lf, unlock, SELF, &prev, &overlap))) { 438 /* 439 * Wakeup the list of locks to be retried. 440 */ 441 lf_wakelock(overlap); 442 443 switch (ovcase) { 444 445 case 1: /* overlap == lock */ 446 *prev = overlap->lf_next; 447 FREE(overlap, M_LOCKF); 448 break; 449 450 case 2: /* overlap contains lock: split it */ 451 if (overlap->lf_start == unlock->lf_start) { 452 overlap->lf_start = unlock->lf_end + 1; 453 break; 454 } 455 lf_split(overlap, unlock); 456 overlap->lf_next = unlock->lf_next; 457 break; 458 459 case 3: /* lock contains overlap */ 460 *prev = overlap->lf_next; 461 lf = overlap->lf_next; 462 free(overlap, M_LOCKF); 463 continue; 464 465 case 4: /* overlap starts before lock */ 466 overlap->lf_end = unlock->lf_start - 1; 467 prev = &overlap->lf_next; 468 lf = overlap->lf_next; 469 continue; 470 471 case 5: /* overlap ends after lock */ 472 overlap->lf_start = unlock->lf_end + 1; 473 break; 474 } 475 break; 476 } 477 #ifdef LOCKF_DEBUG 478 if (lockf_debug & 1) 479 lf_printlist("lf_clearlock", unlock); 480 #endif /* LOCKF_DEBUG */ 481 return (0); 482 } 483 484 /* 485 * Check whether there is a blocking lock, 486 * and if so return its process identifier. 487 */ 488 static int 489 lf_getlock(lock, fl) 490 register struct lockf *lock; 491 register struct flock *fl; 492 { 493 register struct lockf *block; 494 495 #ifdef LOCKF_DEBUG 496 if (lockf_debug & 1) 497 lf_print("lf_getlock", lock); 498 #endif /* LOCKF_DEBUG */ 499 500 if ((block = lf_getblock(lock))) { 501 fl->l_type = block->lf_type; 502 fl->l_whence = SEEK_SET; 503 fl->l_start = block->lf_start; 504 if (block->lf_end == -1) 505 fl->l_len = 0; 506 else 507 fl->l_len = block->lf_end - block->lf_start + 1; 508 if (block->lf_flags & F_POSIX) 509 fl->l_pid = ((struct proc *)(block->lf_id))->p_pid; 510 else 511 fl->l_pid = -1; 512 } else { 513 fl->l_type = F_UNLCK; 514 } 515 return (0); 516 } 517 518 /* 519 * Walk the list of locks for an inode and 520 * return the first blocking lock. 521 */ 522 static struct lockf * 523 lf_getblock(lock) 524 register struct lockf *lock; 525 { 526 struct lockf **prev, *overlap, *lf = *(lock->lf_head); 527 int ovcase; 528 529 prev = lock->lf_head; 530 while ((ovcase = lf_findoverlap(lf, lock, OTHERS, &prev, &overlap))) { 531 /* 532 * We've found an overlap, see if it blocks us 533 */ 534 if ((lock->lf_type == F_WRLCK || overlap->lf_type == F_WRLCK)) 535 return (overlap); 536 /* 537 * Nope, point to the next one on the list and 538 * see if it blocks us 539 */ 540 lf = overlap->lf_next; 541 } 542 return (NOLOCKF); 543 } 544 545 /* 546 * Walk the list of locks for an inode to 547 * find an overlapping lock (if any). 548 * 549 * NOTE: this returns only the FIRST overlapping lock. There 550 * may be more than one. 551 */ 552 static int 553 lf_findoverlap(lf, lock, type, prev, overlap) 554 register struct lockf *lf; 555 struct lockf *lock; 556 int type; 557 struct lockf ***prev; 558 struct lockf **overlap; 559 { 560 off_t start, end; 561 562 *overlap = lf; 563 if (lf == NOLOCKF) 564 return (0); 565 #ifdef LOCKF_DEBUG 566 if (lockf_debug & 2) 567 lf_print("lf_findoverlap: looking for overlap in", lock); 568 #endif /* LOCKF_DEBUG */ 569 start = lock->lf_start; 570 end = lock->lf_end; 571 while (lf != NOLOCKF) { 572 if (((type & SELF) && lf->lf_id != lock->lf_id) || 573 ((type & OTHERS) && lf->lf_id == lock->lf_id)) { 574 *prev = &lf->lf_next; 575 *overlap = lf = lf->lf_next; 576 continue; 577 } 578 #ifdef LOCKF_DEBUG 579 if (lockf_debug & 2) 580 lf_print("\tchecking", lf); 581 #endif /* LOCKF_DEBUG */ 582 /* 583 * OK, check for overlap 584 * 585 * Six cases: 586 * 0) no overlap 587 * 1) overlap == lock 588 * 2) overlap contains lock 589 * 3) lock contains overlap 590 * 4) overlap starts before lock 591 * 5) overlap ends after lock 592 */ 593 if ((lf->lf_end != -1 && start > lf->lf_end) || 594 (end != -1 && lf->lf_start > end)) { 595 /* Case 0 */ 596 #ifdef LOCKF_DEBUG 597 if (lockf_debug & 2) 598 printf("no overlap\n"); 599 #endif /* LOCKF_DEBUG */ 600 if ((type & SELF) && end != -1 && lf->lf_start > end) 601 return (0); 602 *prev = &lf->lf_next; 603 *overlap = lf = lf->lf_next; 604 continue; 605 } 606 if ((lf->lf_start == start) && (lf->lf_end == end)) { 607 /* Case 1 */ 608 #ifdef LOCKF_DEBUG 609 if (lockf_debug & 2) 610 printf("overlap == lock\n"); 611 #endif /* LOCKF_DEBUG */ 612 return (1); 613 } 614 if ((lf->lf_start <= start) && 615 (end != -1) && 616 ((lf->lf_end >= end) || (lf->lf_end == -1))) { 617 /* Case 2 */ 618 #ifdef LOCKF_DEBUG 619 if (lockf_debug & 2) 620 printf("overlap contains lock\n"); 621 #endif /* LOCKF_DEBUG */ 622 return (2); 623 } 624 if (start <= lf->lf_start && 625 (end == -1 || 626 (lf->lf_end != -1 && end >= lf->lf_end))) { 627 /* Case 3 */ 628 #ifdef LOCKF_DEBUG 629 if (lockf_debug & 2) 630 printf("lock contains overlap\n"); 631 #endif /* LOCKF_DEBUG */ 632 return (3); 633 } 634 if ((lf->lf_start < start) && 635 ((lf->lf_end >= start) || (lf->lf_end == -1))) { 636 /* Case 4 */ 637 #ifdef LOCKF_DEBUG 638 if (lockf_debug & 2) 639 printf("overlap starts before lock\n"); 640 #endif /* LOCKF_DEBUG */ 641 return (4); 642 } 643 if ((lf->lf_start > start) && 644 (end != -1) && 645 ((lf->lf_end > end) || (lf->lf_end == -1))) { 646 /* Case 5 */ 647 #ifdef LOCKF_DEBUG 648 if (lockf_debug & 2) 649 printf("overlap ends after lock\n"); 650 #endif /* LOCKF_DEBUG */ 651 return (5); 652 } 653 panic("lf_findoverlap: default"); 654 } 655 return (0); 656 } 657 658 /* 659 * Split a lock and a contained region into 660 * two or three locks as necessary. 661 */ 662 static void 663 lf_split(lock1, lock2) 664 register struct lockf *lock1; 665 register struct lockf *lock2; 666 { 667 register struct lockf *splitlock; 668 669 #ifdef LOCKF_DEBUG 670 if (lockf_debug & 2) { 671 lf_print("lf_split", lock1); 672 lf_print("splitting from", lock2); 673 } 674 #endif /* LOCKF_DEBUG */ 675 /* 676 * Check to see if spliting into only two pieces. 677 */ 678 if (lock1->lf_start == lock2->lf_start) { 679 lock1->lf_start = lock2->lf_end + 1; 680 lock2->lf_next = lock1; 681 return; 682 } 683 if (lock1->lf_end == lock2->lf_end) { 684 lock1->lf_end = lock2->lf_start - 1; 685 lock2->lf_next = lock1->lf_next; 686 lock1->lf_next = lock2; 687 return; 688 } 689 /* 690 * Make a new lock consisting of the last part of 691 * the encompassing lock 692 */ 693 MALLOC(splitlock, struct lockf *, sizeof *splitlock, M_LOCKF, M_WAITOK); 694 bcopy((caddr_t)lock1, (caddr_t)splitlock, sizeof *splitlock); 695 splitlock->lf_start = lock2->lf_end + 1; 696 TAILQ_INIT(&splitlock->lf_blkhd); 697 lock1->lf_end = lock2->lf_start - 1; 698 /* 699 * OK, now link it in 700 */ 701 splitlock->lf_next = lock1->lf_next; 702 lock2->lf_next = splitlock; 703 lock1->lf_next = lock2; 704 } 705 706 /* 707 * Wakeup a blocklist 708 */ 709 static void 710 lf_wakelock(listhead) 711 struct lockf *listhead; 712 { 713 register struct lockf *wakelock; 714 715 while (wakelock = listhead->lf_blkhd.tqh_first) { 716 TAILQ_REMOVE(&listhead->lf_blkhd, wakelock, lf_block); 717 wakelock->lf_next = NOLOCKF; 718 #ifdef LOCKF_DEBUG 719 if (lockf_debug & 2) 720 lf_print("lf_wakelock: awakening", wakelock); 721 #endif /* LOCKF_DEBUG */ 722 wakeup((caddr_t)wakelock); 723 } 724 } 725 726 #ifdef LOCKF_DEBUG 727 /* 728 * Print out a lock. 729 */ 730 void 731 lf_print(tag, lock) 732 char *tag; 733 register struct lockf *lock; 734 { 735 736 printf("%s: lock 0x%lx for ", tag, lock); 737 if (lock->lf_flags & F_POSIX) 738 printf("proc %d", ((struct proc *)(lock->lf_id))->p_pid); 739 else 740 printf("id 0x%x", lock->lf_id); 741 printf(" in ino %d on dev <%d, %d>, %s, start %d, end %d", 742 lock->lf_inode->i_number, 743 major(lock->lf_inode->i_dev), 744 minor(lock->lf_inode->i_dev), 745 lock->lf_type == F_RDLCK ? "shared" : 746 lock->lf_type == F_WRLCK ? "exclusive" : 747 lock->lf_type == F_UNLCK ? "unlock" : 748 "unknown", lock->lf_start, lock->lf_end); 749 if (lock->lf_blkhd.tqh_first) 750 printf(" block 0x%x\n", lock->lf_blkhd.tqh_first); 751 else 752 printf("\n"); 753 } 754 755 void 756 lf_printlist(tag, lock) 757 char *tag; 758 struct lockf *lock; 759 { 760 register struct lockf *lf, *blk; 761 762 printf("%s: Lock list for ino %d on dev <%d, %d>:\n", 763 tag, lock->lf_inode->i_number, 764 major(lock->lf_inode->i_dev), 765 minor(lock->lf_inode->i_dev)); 766 for (lf = lock->lf_inode->i_lockf; lf; lf = lf->lf_next) { 767 printf("\tlock 0x%lx for ", lf); 768 if (lf->lf_flags & F_POSIX) 769 printf("proc %d", ((struct proc *)(lf->lf_id))->p_pid); 770 else 771 printf("id 0x%x", lf->lf_id); 772 printf(", %s, start %d, end %d", 773 lf->lf_type == F_RDLCK ? "shared" : 774 lf->lf_type == F_WRLCK ? "exclusive" : 775 lf->lf_type == F_UNLCK ? "unlock" : 776 "unknown", lf->lf_start, lf->lf_end); 777 for (blk = lf->lf_blkhd.tqh_first; blk; 778 blk = blk->lf_block.tqe_next) { 779 printf("\n\t\tlock request 0x%lx for ", blk); 780 if (blk->lf_flags & F_POSIX) 781 printf("proc %d", 782 ((struct proc *)(blk->lf_id))->p_pid); 783 else 784 printf("id 0x%x", blk->lf_id); 785 printf(", %s, start %d, end %d", 786 blk->lf_type == F_RDLCK ? "shared" : 787 blk->lf_type == F_WRLCK ? "exclusive" : 788 blk->lf_type == F_UNLCK ? "unlock" : 789 "unknown", blk->lf_start, blk->lf_end); 790 if (blk->lf_blkhd.tqh_first) 791 panic("lf_printlist: bad list"); 792 } 793 printf("\n"); 794 } 795 } 796 #endif /* LOCKF_DEBUG */ 797