1 /* 2 * Copyright (c) 1982, 1986, 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * Scooter Morris at Genentech Inc. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * @(#)ufs_lockf.c 8.3 (Berkeley) 1/6/94 37 * $FreeBSD$ 38 */ 39 40 #include "opt_debug_lockf.h" 41 42 #include <sys/param.h> 43 #include <sys/systm.h> 44 #include <sys/kernel.h> 45 #include <sys/lock.h> 46 #include <sys/proc.h> 47 #include <sys/unistd.h> 48 #include <sys/vnode.h> 49 #include <sys/malloc.h> 50 #include <sys/fcntl.h> 51 52 #include <sys/lockf.h> 53 54 /* 55 * This variable controls the maximum number of processes that will 56 * be checked in doing deadlock detection. 57 */ 58 static int maxlockdepth = MAXDEPTH; 59 60 #ifdef LOCKF_DEBUG 61 #include <sys/kernel.h> 62 #include <sys/sysctl.h> 63 64 #include <ufs/ufs/quota.h> 65 #include <ufs/ufs/inode.h> 66 67 68 static int lockf_debug = 0; 69 SYSCTL_INT(_debug, OID_AUTO, lockf_debug, CTLFLAG_RW, &lockf_debug, 0, ""); 70 #endif 71 72 static MALLOC_DEFINE(M_LOCKF, "lockf", "Byte-range locking structures"); 73 74 #define NOLOCKF (struct lockf *)0 75 #define SELF 0x1 76 #define OTHERS 0x2 77 static int lf_clearlock __P((struct lockf *)); 78 static int lf_findoverlap __P((struct lockf *, 79 struct lockf *, int, struct lockf ***, struct lockf **)); 80 static struct lockf * 81 lf_getblock __P((struct lockf *)); 82 static int lf_getlock __P((struct lockf *, struct flock *)); 83 static int lf_setlock __P((struct lockf *)); 84 static void lf_split __P((struct lockf *, struct lockf *)); 85 static void lf_wakelock __P((struct lockf *)); 86 87 /* 88 * Advisory record locking support 89 */ 90 int 91 lf_advlock(ap, head, size) 92 struct vop_advlock_args /* { 93 struct vnode *a_vp; 94 caddr_t a_id; 95 int a_op; 96 struct flock *a_fl; 97 int a_flags; 98 } */ *ap; 99 struct lockf **head; 100 u_quad_t size; 101 { 102 register struct flock *fl = ap->a_fl; 103 register struct lockf *lock; 104 off_t start, end; 105 int error; 106 107 /* 108 * Convert the flock structure into a start and end. 109 */ 110 switch (fl->l_whence) { 111 112 case SEEK_SET: 113 case SEEK_CUR: 114 /* 115 * Caller is responsible for adding any necessary offset 116 * when SEEK_CUR is used. 117 */ 118 start = fl->l_start; 119 break; 120 121 case SEEK_END: 122 start = size + fl->l_start; 123 break; 124 125 default: 126 return (EINVAL); 127 } 128 if (start < 0) 129 return (EINVAL); 130 if (fl->l_len == 0) 131 end = -1; 132 else { 133 end = start + fl->l_len - 1; 134 if (end < start) 135 return (EINVAL); 136 } 137 /* 138 * Avoid the common case of unlocking when inode has no locks. 139 */ 140 if (*head == (struct lockf *)0) { 141 if (ap->a_op != F_SETLK) { 142 fl->l_type = F_UNLCK; 143 return (0); 144 } 145 } 146 /* 147 * Create the lockf structure 148 */ 149 MALLOC(lock, struct lockf *, sizeof *lock, M_LOCKF, M_WAITOK); 150 lock->lf_start = start; 151 lock->lf_end = end; 152 lock->lf_id = ap->a_id; 153 /* lock->lf_inode = ip; */ /* XXX JH */ 154 lock->lf_type = fl->l_type; 155 lock->lf_head = head; 156 lock->lf_next = (struct lockf *)0; 157 TAILQ_INIT(&lock->lf_blkhd); 158 lock->lf_flags = ap->a_flags; 159 /* 160 * Do the requested operation. 161 */ 162 switch(ap->a_op) { 163 case F_SETLK: 164 return (lf_setlock(lock)); 165 166 case F_UNLCK: 167 error = lf_clearlock(lock); 168 FREE(lock, M_LOCKF); 169 return (error); 170 171 case F_GETLK: 172 error = lf_getlock(lock, fl); 173 FREE(lock, M_LOCKF); 174 return (error); 175 176 default: 177 free(lock, M_LOCKF); 178 return (EINVAL); 179 } 180 /* NOTREACHED */ 181 } 182 183 /* 184 * Set a byte-range lock. 185 */ 186 static int 187 lf_setlock(lock) 188 register struct lockf *lock; 189 { 190 register struct lockf *block; 191 struct lockf **head = lock->lf_head; 192 struct lockf **prev, *overlap, *ltmp; 193 static char lockstr[] = "lockf"; 194 int ovcase, priority, needtolink, error; 195 196 #ifdef LOCKF_DEBUG 197 if (lockf_debug & 1) 198 lf_print("lf_setlock", lock); 199 #endif /* LOCKF_DEBUG */ 200 201 /* 202 * Set the priority 203 */ 204 priority = PLOCK; 205 if (lock->lf_type == F_WRLCK) 206 priority += 4; 207 priority |= PCATCH; 208 /* 209 * Scan lock list for this file looking for locks that would block us. 210 */ 211 while ((block = lf_getblock(lock))) { 212 /* 213 * Free the structure and return if nonblocking. 214 */ 215 if ((lock->lf_flags & F_WAIT) == 0) { 216 FREE(lock, M_LOCKF); 217 return (EAGAIN); 218 } 219 /* 220 * We are blocked. Since flock style locks cover 221 * the whole file, there is no chance for deadlock. 222 * For byte-range locks we must check for deadlock. 223 * 224 * Deadlock detection is done by looking through the 225 * wait channels to see if there are any cycles that 226 * involve us. MAXDEPTH is set just to make sure we 227 * do not go off into neverland. 228 */ 229 if ((lock->lf_flags & F_POSIX) && 230 (block->lf_flags & F_POSIX)) { 231 register struct proc *wproc; 232 register struct lockf *waitblock; 233 int i = 0; 234 235 /* The block is waiting on something */ 236 wproc = (struct proc *)block->lf_id; 237 while (wproc->p_wchan && 238 (wproc->p_wmesg == lockstr) && 239 (i++ < maxlockdepth)) { 240 waitblock = (struct lockf *)wproc->p_wchan; 241 /* Get the owner of the blocking lock */ 242 waitblock = waitblock->lf_next; 243 if ((waitblock->lf_flags & F_POSIX) == 0) 244 break; 245 wproc = (struct proc *)waitblock->lf_id; 246 if (wproc == (struct proc *)lock->lf_id) { 247 free(lock, M_LOCKF); 248 return (EDEADLK); 249 } 250 } 251 } 252 /* 253 * For flock type locks, we must first remove 254 * any shared locks that we hold before we sleep 255 * waiting for an exclusive lock. 256 */ 257 if ((lock->lf_flags & F_FLOCK) && 258 lock->lf_type == F_WRLCK) { 259 lock->lf_type = F_UNLCK; 260 (void) lf_clearlock(lock); 261 lock->lf_type = F_WRLCK; 262 } 263 /* 264 * Add our lock to the blocked list and sleep until we're free. 265 * Remember who blocked us (for deadlock detection). 266 */ 267 lock->lf_next = block; 268 TAILQ_INSERT_TAIL(&block->lf_blkhd, lock, lf_block); 269 #ifdef LOCKF_DEBUG 270 if (lockf_debug & 1) { 271 lf_print("lf_setlock: blocking on", block); 272 lf_printlist("lf_setlock", block); 273 } 274 #endif /* LOCKF_DEBUG */ 275 error = tsleep((caddr_t)lock, priority, lockstr, 0); 276 /* 277 * We may have been awakened by a signal and/or by a 278 * debugger continuing us (in which cases we must remove 279 * ourselves from the blocked list) and/or by another 280 * process releasing a lock (in which case we have 281 * already been removed from the blocked list and our 282 * lf_next field set to NOLOCKF). 283 */ 284 if (lock->lf_next) { 285 TAILQ_REMOVE(&lock->lf_next->lf_blkhd, lock, lf_block); 286 lock->lf_next = NOLOCKF; 287 } 288 if (error) { 289 free(lock, M_LOCKF); 290 return (error); 291 } 292 } 293 /* 294 * No blocks!! Add the lock. Note that we will 295 * downgrade or upgrade any overlapping locks this 296 * process already owns. 297 * 298 * Skip over locks owned by other processes. 299 * Handle any locks that overlap and are owned by ourselves. 300 */ 301 prev = head; 302 block = *head; 303 needtolink = 1; 304 for (;;) { 305 ovcase = lf_findoverlap(block, lock, SELF, &prev, &overlap); 306 if (ovcase) 307 block = overlap->lf_next; 308 /* 309 * Six cases: 310 * 0) no overlap 311 * 1) overlap == lock 312 * 2) overlap contains lock 313 * 3) lock contains overlap 314 * 4) overlap starts before lock 315 * 5) overlap ends after lock 316 */ 317 switch (ovcase) { 318 case 0: /* no overlap */ 319 if (needtolink) { 320 *prev = lock; 321 lock->lf_next = overlap; 322 } 323 break; 324 325 case 1: /* overlap == lock */ 326 /* 327 * If downgrading lock, others may be 328 * able to acquire it. 329 */ 330 if (lock->lf_type == F_RDLCK && 331 overlap->lf_type == F_WRLCK) 332 lf_wakelock(overlap); 333 overlap->lf_type = lock->lf_type; 334 FREE(lock, M_LOCKF); 335 lock = overlap; /* for debug output below */ 336 break; 337 338 case 2: /* overlap contains lock */ 339 /* 340 * Check for common starting point and different types. 341 */ 342 if (overlap->lf_type == lock->lf_type) { 343 free(lock, M_LOCKF); 344 lock = overlap; /* for debug output below */ 345 break; 346 } 347 if (overlap->lf_start == lock->lf_start) { 348 *prev = lock; 349 lock->lf_next = overlap; 350 overlap->lf_start = lock->lf_end + 1; 351 } else 352 lf_split(overlap, lock); 353 lf_wakelock(overlap); 354 break; 355 356 case 3: /* lock contains overlap */ 357 /* 358 * If downgrading lock, others may be able to 359 * acquire it, otherwise take the list. 360 */ 361 if (lock->lf_type == F_RDLCK && 362 overlap->lf_type == F_WRLCK) { 363 lf_wakelock(overlap); 364 } else { 365 while ((ltmp = overlap->lf_blkhd.tqh_first) != 366 NOLOCKF) { 367 TAILQ_REMOVE(&overlap->lf_blkhd, ltmp, 368 lf_block); 369 TAILQ_INSERT_TAIL(&lock->lf_blkhd, 370 ltmp, lf_block); 371 ltmp->lf_next = lock; 372 } 373 } 374 /* 375 * Add the new lock if necessary and delete the overlap. 376 */ 377 if (needtolink) { 378 *prev = lock; 379 lock->lf_next = overlap->lf_next; 380 prev = &lock->lf_next; 381 needtolink = 0; 382 } else 383 *prev = overlap->lf_next; 384 free(overlap, M_LOCKF); 385 continue; 386 387 case 4: /* overlap starts before lock */ 388 /* 389 * Add lock after overlap on the list. 390 */ 391 lock->lf_next = overlap->lf_next; 392 overlap->lf_next = lock; 393 overlap->lf_end = lock->lf_start - 1; 394 prev = &lock->lf_next; 395 lf_wakelock(overlap); 396 needtolink = 0; 397 continue; 398 399 case 5: /* overlap ends after lock */ 400 /* 401 * Add the new lock before overlap. 402 */ 403 if (needtolink) { 404 *prev = lock; 405 lock->lf_next = overlap; 406 } 407 overlap->lf_start = lock->lf_end + 1; 408 lf_wakelock(overlap); 409 break; 410 } 411 break; 412 } 413 #ifdef LOCKF_DEBUG 414 if (lockf_debug & 1) { 415 lf_print("lf_setlock: got the lock", lock); 416 lf_printlist("lf_setlock", lock); 417 } 418 #endif /* LOCKF_DEBUG */ 419 return (0); 420 } 421 422 /* 423 * Remove a byte-range lock on an inode. 424 * 425 * Generally, find the lock (or an overlap to that lock) 426 * and remove it (or shrink it), then wakeup anyone we can. 427 */ 428 static int 429 lf_clearlock(unlock) 430 register struct lockf *unlock; 431 { 432 struct lockf **head = unlock->lf_head; 433 register struct lockf *lf = *head; 434 struct lockf *overlap, **prev; 435 int ovcase; 436 437 if (lf == NOLOCKF) 438 return (0); 439 #ifdef LOCKF_DEBUG 440 if (unlock->lf_type != F_UNLCK) 441 panic("lf_clearlock: bad type"); 442 if (lockf_debug & 1) 443 lf_print("lf_clearlock", unlock); 444 #endif /* LOCKF_DEBUG */ 445 prev = head; 446 while ((ovcase = lf_findoverlap(lf, unlock, SELF, &prev, &overlap))) { 447 /* 448 * Wakeup the list of locks to be retried. 449 */ 450 lf_wakelock(overlap); 451 452 switch (ovcase) { 453 454 case 1: /* overlap == lock */ 455 *prev = overlap->lf_next; 456 FREE(overlap, M_LOCKF); 457 break; 458 459 case 2: /* overlap contains lock: split it */ 460 if (overlap->lf_start == unlock->lf_start) { 461 overlap->lf_start = unlock->lf_end + 1; 462 break; 463 } 464 lf_split(overlap, unlock); 465 overlap->lf_next = unlock->lf_next; 466 break; 467 468 case 3: /* lock contains overlap */ 469 *prev = overlap->lf_next; 470 lf = overlap->lf_next; 471 free(overlap, M_LOCKF); 472 continue; 473 474 case 4: /* overlap starts before lock */ 475 overlap->lf_end = unlock->lf_start - 1; 476 prev = &overlap->lf_next; 477 lf = overlap->lf_next; 478 continue; 479 480 case 5: /* overlap ends after lock */ 481 overlap->lf_start = unlock->lf_end + 1; 482 break; 483 } 484 break; 485 } 486 #ifdef LOCKF_DEBUG 487 if (lockf_debug & 1) 488 lf_printlist("lf_clearlock", unlock); 489 #endif /* LOCKF_DEBUG */ 490 return (0); 491 } 492 493 /* 494 * Check whether there is a blocking lock, 495 * and if so return its process identifier. 496 */ 497 static int 498 lf_getlock(lock, fl) 499 register struct lockf *lock; 500 register struct flock *fl; 501 { 502 register struct lockf *block; 503 504 #ifdef LOCKF_DEBUG 505 if (lockf_debug & 1) 506 lf_print("lf_getlock", lock); 507 #endif /* LOCKF_DEBUG */ 508 509 if ((block = lf_getblock(lock))) { 510 fl->l_type = block->lf_type; 511 fl->l_whence = SEEK_SET; 512 fl->l_start = block->lf_start; 513 if (block->lf_end == -1) 514 fl->l_len = 0; 515 else 516 fl->l_len = block->lf_end - block->lf_start + 1; 517 if (block->lf_flags & F_POSIX) 518 fl->l_pid = ((struct proc *)(block->lf_id))->p_pid; 519 else 520 fl->l_pid = -1; 521 } else { 522 fl->l_type = F_UNLCK; 523 } 524 return (0); 525 } 526 527 /* 528 * Walk the list of locks for an inode and 529 * return the first blocking lock. 530 */ 531 static struct lockf * 532 lf_getblock(lock) 533 register struct lockf *lock; 534 { 535 struct lockf **prev, *overlap, *lf = *(lock->lf_head); 536 int ovcase; 537 538 prev = lock->lf_head; 539 while ((ovcase = lf_findoverlap(lf, lock, OTHERS, &prev, &overlap))) { 540 /* 541 * We've found an overlap, see if it blocks us 542 */ 543 if ((lock->lf_type == F_WRLCK || overlap->lf_type == F_WRLCK)) 544 return (overlap); 545 /* 546 * Nope, point to the next one on the list and 547 * see if it blocks us 548 */ 549 lf = overlap->lf_next; 550 } 551 return (NOLOCKF); 552 } 553 554 /* 555 * Walk the list of locks for an inode to 556 * find an overlapping lock (if any). 557 * 558 * NOTE: this returns only the FIRST overlapping lock. There 559 * may be more than one. 560 */ 561 static int 562 lf_findoverlap(lf, lock, type, prev, overlap) 563 register struct lockf *lf; 564 struct lockf *lock; 565 int type; 566 struct lockf ***prev; 567 struct lockf **overlap; 568 { 569 off_t start, end; 570 571 *overlap = lf; 572 if (lf == NOLOCKF) 573 return (0); 574 #ifdef LOCKF_DEBUG 575 if (lockf_debug & 2) 576 lf_print("lf_findoverlap: looking for overlap in", lock); 577 #endif /* LOCKF_DEBUG */ 578 start = lock->lf_start; 579 end = lock->lf_end; 580 while (lf != NOLOCKF) { 581 if (((type & SELF) && lf->lf_id != lock->lf_id) || 582 ((type & OTHERS) && lf->lf_id == lock->lf_id)) { 583 *prev = &lf->lf_next; 584 *overlap = lf = lf->lf_next; 585 continue; 586 } 587 #ifdef LOCKF_DEBUG 588 if (lockf_debug & 2) 589 lf_print("\tchecking", lf); 590 #endif /* LOCKF_DEBUG */ 591 /* 592 * OK, check for overlap 593 * 594 * Six cases: 595 * 0) no overlap 596 * 1) overlap == lock 597 * 2) overlap contains lock 598 * 3) lock contains overlap 599 * 4) overlap starts before lock 600 * 5) overlap ends after lock 601 */ 602 if ((lf->lf_end != -1 && start > lf->lf_end) || 603 (end != -1 && lf->lf_start > end)) { 604 /* Case 0 */ 605 #ifdef LOCKF_DEBUG 606 if (lockf_debug & 2) 607 printf("no overlap\n"); 608 #endif /* LOCKF_DEBUG */ 609 if ((type & SELF) && end != -1 && lf->lf_start > end) 610 return (0); 611 *prev = &lf->lf_next; 612 *overlap = lf = lf->lf_next; 613 continue; 614 } 615 if ((lf->lf_start == start) && (lf->lf_end == end)) { 616 /* Case 1 */ 617 #ifdef LOCKF_DEBUG 618 if (lockf_debug & 2) 619 printf("overlap == lock\n"); 620 #endif /* LOCKF_DEBUG */ 621 return (1); 622 } 623 if ((lf->lf_start <= start) && 624 (end != -1) && 625 ((lf->lf_end >= end) || (lf->lf_end == -1))) { 626 /* Case 2 */ 627 #ifdef LOCKF_DEBUG 628 if (lockf_debug & 2) 629 printf("overlap contains lock\n"); 630 #endif /* LOCKF_DEBUG */ 631 return (2); 632 } 633 if (start <= lf->lf_start && 634 (end == -1 || 635 (lf->lf_end != -1 && end >= lf->lf_end))) { 636 /* Case 3 */ 637 #ifdef LOCKF_DEBUG 638 if (lockf_debug & 2) 639 printf("lock contains overlap\n"); 640 #endif /* LOCKF_DEBUG */ 641 return (3); 642 } 643 if ((lf->lf_start < start) && 644 ((lf->lf_end >= start) || (lf->lf_end == -1))) { 645 /* Case 4 */ 646 #ifdef LOCKF_DEBUG 647 if (lockf_debug & 2) 648 printf("overlap starts before lock\n"); 649 #endif /* LOCKF_DEBUG */ 650 return (4); 651 } 652 if ((lf->lf_start > start) && 653 (end != -1) && 654 ((lf->lf_end > end) || (lf->lf_end == -1))) { 655 /* Case 5 */ 656 #ifdef LOCKF_DEBUG 657 if (lockf_debug & 2) 658 printf("overlap ends after lock\n"); 659 #endif /* LOCKF_DEBUG */ 660 return (5); 661 } 662 panic("lf_findoverlap: default"); 663 } 664 return (0); 665 } 666 667 /* 668 * Split a lock and a contained region into 669 * two or three locks as necessary. 670 */ 671 static void 672 lf_split(lock1, lock2) 673 register struct lockf *lock1; 674 register struct lockf *lock2; 675 { 676 register struct lockf *splitlock; 677 678 #ifdef LOCKF_DEBUG 679 if (lockf_debug & 2) { 680 lf_print("lf_split", lock1); 681 lf_print("splitting from", lock2); 682 } 683 #endif /* LOCKF_DEBUG */ 684 /* 685 * Check to see if spliting into only two pieces. 686 */ 687 if (lock1->lf_start == lock2->lf_start) { 688 lock1->lf_start = lock2->lf_end + 1; 689 lock2->lf_next = lock1; 690 return; 691 } 692 if (lock1->lf_end == lock2->lf_end) { 693 lock1->lf_end = lock2->lf_start - 1; 694 lock2->lf_next = lock1->lf_next; 695 lock1->lf_next = lock2; 696 return; 697 } 698 /* 699 * Make a new lock consisting of the last part of 700 * the encompassing lock 701 */ 702 MALLOC(splitlock, struct lockf *, sizeof *splitlock, M_LOCKF, M_WAITOK); 703 bcopy((caddr_t)lock1, (caddr_t)splitlock, sizeof *splitlock); 704 splitlock->lf_start = lock2->lf_end + 1; 705 TAILQ_INIT(&splitlock->lf_blkhd); 706 lock1->lf_end = lock2->lf_start - 1; 707 /* 708 * OK, now link it in 709 */ 710 splitlock->lf_next = lock1->lf_next; 711 lock2->lf_next = splitlock; 712 lock1->lf_next = lock2; 713 } 714 715 /* 716 * Wakeup a blocklist 717 */ 718 static void 719 lf_wakelock(listhead) 720 struct lockf *listhead; 721 { 722 register struct lockf *wakelock; 723 724 while ((wakelock = listhead->lf_blkhd.tqh_first) != NOLOCKF) { 725 TAILQ_REMOVE(&listhead->lf_blkhd, wakelock, lf_block); 726 wakelock->lf_next = NOLOCKF; 727 #ifdef LOCKF_DEBUG 728 if (lockf_debug & 2) 729 lf_print("lf_wakelock: awakening", wakelock); 730 #endif /* LOCKF_DEBUG */ 731 wakeup((caddr_t)wakelock); 732 } 733 } 734 735 #ifdef LOCKF_DEBUG 736 /* 737 * Print out a lock. 738 */ 739 void 740 lf_print(tag, lock) 741 char *tag; 742 register struct lockf *lock; 743 { 744 745 printf("%s: lock %p for ", tag, (void *)lock); 746 if (lock->lf_flags & F_POSIX) 747 printf("proc %ld", (long)((struct proc *)lock->lf_id)->p_pid); 748 else 749 printf("id %p", (void *)lock->lf_id); 750 /* XXX no %qd in kernel. Truncate. */ 751 printf(" in ino %lu on dev <%d, %d>, %s, start %ld, end %ld", 752 (u_long)lock->lf_inode->i_number, 753 major(lock->lf_inode->i_dev), 754 minor(lock->lf_inode->i_dev), 755 lock->lf_type == F_RDLCK ? "shared" : 756 lock->lf_type == F_WRLCK ? "exclusive" : 757 lock->lf_type == F_UNLCK ? "unlock" : 758 "unknown", (long)lock->lf_start, (long)lock->lf_end); 759 if (lock->lf_blkhd.tqh_first) 760 printf(" block %p\n", (void *)lock->lf_blkhd.tqh_first); 761 else 762 printf("\n"); 763 } 764 765 void 766 lf_printlist(tag, lock) 767 char *tag; 768 struct lockf *lock; 769 { 770 register struct lockf *lf, *blk; 771 772 printf("%s: Lock list for ino %lu on dev <%d, %d>:\n", 773 tag, (u_long)lock->lf_inode->i_number, 774 major(lock->lf_inode->i_dev), 775 minor(lock->lf_inode->i_dev)); 776 for (lf = lock->lf_inode->i_lockf; lf; lf = lf->lf_next) { 777 printf("\tlock %p for ",(void *)lf); 778 if (lf->lf_flags & F_POSIX) 779 printf("proc %ld", 780 (long)((struct proc *)lf->lf_id)->p_pid); 781 else 782 printf("id %p", (void *)lf->lf_id); 783 /* XXX no %qd in kernel. Truncate. */ 784 printf(", %s, start %ld, end %ld", 785 lf->lf_type == F_RDLCK ? "shared" : 786 lf->lf_type == F_WRLCK ? "exclusive" : 787 lf->lf_type == F_UNLCK ? "unlock" : 788 "unknown", (long)lf->lf_start, (long)lf->lf_end); 789 for (blk = lf->lf_blkhd.tqh_first; blk; 790 blk = blk->lf_block.tqe_next) { 791 printf("\n\t\tlock request %p for ", (void *)blk); 792 if (blk->lf_flags & F_POSIX) 793 printf("proc %ld", 794 (long)((struct proc *)blk->lf_id)->p_pid); 795 else 796 printf("id %p", (void *)blk->lf_id); 797 /* XXX no %qd in kernel. Truncate. */ 798 printf(", %s, start %ld, end %ld", 799 blk->lf_type == F_RDLCK ? "shared" : 800 blk->lf_type == F_WRLCK ? "exclusive" : 801 blk->lf_type == F_UNLCK ? "unlock" : 802 "unknown", (long)blk->lf_start, 803 (long)blk->lf_end); 804 if (blk->lf_blkhd.tqh_first) 805 panic("lf_printlist: bad list"); 806 } 807 printf("\n"); 808 } 809 } 810 #endif /* LOCKF_DEBUG */ 811