1 /* 2 * Copyright (c) 1982, 1986, 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * Scooter Morris at Genentech Inc. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * @(#)ufs_lockf.c 8.3 (Berkeley) 1/6/94 37 * $FreeBSD$ 38 */ 39 40 #include "opt_debug_lockf.h" 41 42 #include <sys/param.h> 43 #include <sys/systm.h> 44 #include <sys/kernel.h> 45 #include <sys/lock.h> 46 #include <sys/mutex.h> 47 #include <sys/proc.h> 48 #include <sys/unistd.h> 49 #include <sys/vnode.h> 50 #include <sys/malloc.h> 51 #include <sys/fcntl.h> 52 53 #include <sys/lockf.h> 54 55 /* 56 * This variable controls the maximum number of processes that will 57 * be checked in doing deadlock detection. 58 */ 59 static int maxlockdepth = MAXDEPTH; 60 61 #ifdef LOCKF_DEBUG 62 #include <sys/kernel.h> 63 #include <sys/sysctl.h> 64 65 #include <ufs/ufs/quota.h> 66 #include <ufs/ufs/inode.h> 67 68 69 static int lockf_debug = 0; 70 SYSCTL_INT(_debug, OID_AUTO, lockf_debug, CTLFLAG_RW, &lockf_debug, 0, ""); 71 #endif 72 73 MALLOC_DEFINE(M_LOCKF, "lockf", "Byte-range locking structures"); 74 75 #define NOLOCKF (struct lockf *)0 76 #define SELF 0x1 77 #define OTHERS 0x2 78 static int lf_clearlock __P((struct lockf *)); 79 static int lf_findoverlap __P((struct lockf *, 80 struct lockf *, int, struct lockf ***, struct lockf **)); 81 static struct lockf * 82 lf_getblock __P((struct lockf *)); 83 static int lf_getlock __P((struct lockf *, struct flock *)); 84 static int lf_setlock __P((struct lockf *)); 85 static void lf_split __P((struct lockf *, struct lockf *)); 86 static void lf_wakelock __P((struct lockf *)); 87 88 /* 89 * Advisory record locking support 90 */ 91 int 92 lf_advlock(ap, head, size) 93 struct vop_advlock_args /* { 94 struct vnode *a_vp; 95 caddr_t a_id; 96 int a_op; 97 struct flock *a_fl; 98 int a_flags; 99 } */ *ap; 100 struct lockf **head; 101 u_quad_t size; 102 { 103 register struct flock *fl = ap->a_fl; 104 register struct lockf *lock; 105 off_t start, end; 106 int error; 107 108 /* 109 * Convert the flock structure into a start and end. 110 */ 111 switch (fl->l_whence) { 112 113 case SEEK_SET: 114 case SEEK_CUR: 115 /* 116 * Caller is responsible for adding any necessary offset 117 * when SEEK_CUR is used. 118 */ 119 start = fl->l_start; 120 break; 121 122 case SEEK_END: 123 start = size + fl->l_start; 124 break; 125 126 default: 127 return (EINVAL); 128 } 129 if (start < 0) 130 return (EINVAL); 131 if (fl->l_len == 0) 132 end = -1; 133 else { 134 end = start + fl->l_len - 1; 135 if (end < start) 136 return (EINVAL); 137 } 138 /* 139 * Avoid the common case of unlocking when inode has no locks. 140 */ 141 if (*head == (struct lockf *)0) { 142 if (ap->a_op != F_SETLK) { 143 fl->l_type = F_UNLCK; 144 return (0); 145 } 146 } 147 /* 148 * Create the lockf structure 149 */ 150 MALLOC(lock, struct lockf *, sizeof *lock, M_LOCKF, M_WAITOK); 151 lock->lf_start = start; 152 lock->lf_end = end; 153 lock->lf_id = ap->a_id; 154 /* lock->lf_inode = ip; */ /* XXX JH */ 155 lock->lf_type = fl->l_type; 156 lock->lf_head = head; 157 lock->lf_next = (struct lockf *)0; 158 TAILQ_INIT(&lock->lf_blkhd); 159 lock->lf_flags = ap->a_flags; 160 /* 161 * Do the requested operation. 162 */ 163 switch(ap->a_op) { 164 case F_SETLK: 165 return (lf_setlock(lock)); 166 167 case F_UNLCK: 168 error = lf_clearlock(lock); 169 FREE(lock, M_LOCKF); 170 return (error); 171 172 case F_GETLK: 173 error = lf_getlock(lock, fl); 174 FREE(lock, M_LOCKF); 175 return (error); 176 177 default: 178 free(lock, M_LOCKF); 179 return (EINVAL); 180 } 181 /* NOTREACHED */ 182 } 183 184 /* 185 * Set a byte-range lock. 186 */ 187 static int 188 lf_setlock(lock) 189 register struct lockf *lock; 190 { 191 register struct lockf *block; 192 struct lockf **head = lock->lf_head; 193 struct lockf **prev, *overlap, *ltmp; 194 static char lockstr[] = "lockf"; 195 int ovcase, priority, needtolink, error; 196 197 #ifdef LOCKF_DEBUG 198 if (lockf_debug & 1) 199 lf_print("lf_setlock", lock); 200 #endif /* LOCKF_DEBUG */ 201 202 /* 203 * Set the priority 204 */ 205 priority = PLOCK; 206 if (lock->lf_type == F_WRLCK) 207 priority += 4; 208 priority |= PCATCH; 209 /* 210 * Scan lock list for this file looking for locks that would block us. 211 */ 212 while ((block = lf_getblock(lock))) { 213 /* 214 * Free the structure and return if nonblocking. 215 */ 216 if ((lock->lf_flags & F_WAIT) == 0) { 217 FREE(lock, M_LOCKF); 218 return (EAGAIN); 219 } 220 /* 221 * We are blocked. Since flock style locks cover 222 * the whole file, there is no chance for deadlock. 223 * For byte-range locks we must check for deadlock. 224 * 225 * Deadlock detection is done by looking through the 226 * wait channels to see if there are any cycles that 227 * involve us. MAXDEPTH is set just to make sure we 228 * do not go off into neverland. 229 */ 230 if ((lock->lf_flags & F_POSIX) && 231 (block->lf_flags & F_POSIX)) { 232 register struct proc *wproc; 233 register struct lockf *waitblock; 234 int i = 0; 235 236 /* The block is waiting on something */ 237 wproc = (struct proc *)block->lf_id; 238 mtx_lock_spin(&sched_lock); 239 while (wproc->p_wchan && 240 (wproc->p_wmesg == lockstr) && 241 (i++ < maxlockdepth)) { 242 waitblock = (struct lockf *)wproc->p_wchan; 243 /* Get the owner of the blocking lock */ 244 waitblock = waitblock->lf_next; 245 if ((waitblock->lf_flags & F_POSIX) == 0) 246 break; 247 wproc = (struct proc *)waitblock->lf_id; 248 if (wproc == (struct proc *)lock->lf_id) { 249 mtx_unlock_spin(&sched_lock); 250 free(lock, M_LOCKF); 251 return (EDEADLK); 252 } 253 } 254 mtx_unlock_spin(&sched_lock); 255 } 256 /* 257 * For flock type locks, we must first remove 258 * any shared locks that we hold before we sleep 259 * waiting for an exclusive lock. 260 */ 261 if ((lock->lf_flags & F_FLOCK) && 262 lock->lf_type == F_WRLCK) { 263 lock->lf_type = F_UNLCK; 264 (void) lf_clearlock(lock); 265 lock->lf_type = F_WRLCK; 266 } 267 /* 268 * Add our lock to the blocked list and sleep until we're free. 269 * Remember who blocked us (for deadlock detection). 270 */ 271 lock->lf_next = block; 272 TAILQ_INSERT_TAIL(&block->lf_blkhd, lock, lf_block); 273 #ifdef LOCKF_DEBUG 274 if (lockf_debug & 1) { 275 lf_print("lf_setlock: blocking on", block); 276 lf_printlist("lf_setlock", block); 277 } 278 #endif /* LOCKF_DEBUG */ 279 error = tsleep((caddr_t)lock, priority, lockstr, 0); 280 /* 281 * We may have been awakened by a signal and/or by a 282 * debugger continuing us (in which cases we must remove 283 * ourselves from the blocked list) and/or by another 284 * process releasing a lock (in which case we have 285 * already been removed from the blocked list and our 286 * lf_next field set to NOLOCKF). 287 */ 288 if (lock->lf_next) { 289 TAILQ_REMOVE(&lock->lf_next->lf_blkhd, lock, lf_block); 290 lock->lf_next = NOLOCKF; 291 } 292 if (error) { 293 free(lock, M_LOCKF); 294 return (error); 295 } 296 } 297 /* 298 * No blocks!! Add the lock. Note that we will 299 * downgrade or upgrade any overlapping locks this 300 * process already owns. 301 * 302 * Skip over locks owned by other processes. 303 * Handle any locks that overlap and are owned by ourselves. 304 */ 305 prev = head; 306 block = *head; 307 needtolink = 1; 308 for (;;) { 309 ovcase = lf_findoverlap(block, lock, SELF, &prev, &overlap); 310 if (ovcase) 311 block = overlap->lf_next; 312 /* 313 * Six cases: 314 * 0) no overlap 315 * 1) overlap == lock 316 * 2) overlap contains lock 317 * 3) lock contains overlap 318 * 4) overlap starts before lock 319 * 5) overlap ends after lock 320 */ 321 switch (ovcase) { 322 case 0: /* no overlap */ 323 if (needtolink) { 324 *prev = lock; 325 lock->lf_next = overlap; 326 } 327 break; 328 329 case 1: /* overlap == lock */ 330 /* 331 * If downgrading lock, others may be 332 * able to acquire it. 333 */ 334 if (lock->lf_type == F_RDLCK && 335 overlap->lf_type == F_WRLCK) 336 lf_wakelock(overlap); 337 overlap->lf_type = lock->lf_type; 338 FREE(lock, M_LOCKF); 339 lock = overlap; /* for debug output below */ 340 break; 341 342 case 2: /* overlap contains lock */ 343 /* 344 * Check for common starting point and different types. 345 */ 346 if (overlap->lf_type == lock->lf_type) { 347 free(lock, M_LOCKF); 348 lock = overlap; /* for debug output below */ 349 break; 350 } 351 if (overlap->lf_start == lock->lf_start) { 352 *prev = lock; 353 lock->lf_next = overlap; 354 overlap->lf_start = lock->lf_end + 1; 355 } else 356 lf_split(overlap, lock); 357 lf_wakelock(overlap); 358 break; 359 360 case 3: /* lock contains overlap */ 361 /* 362 * If downgrading lock, others may be able to 363 * acquire it, otherwise take the list. 364 */ 365 if (lock->lf_type == F_RDLCK && 366 overlap->lf_type == F_WRLCK) { 367 lf_wakelock(overlap); 368 } else { 369 while (!TAILQ_EMPTY(&overlap->lf_blkhd)) { 370 ltmp = TAILQ_FIRST(&overlap->lf_blkhd); 371 TAILQ_REMOVE(&overlap->lf_blkhd, ltmp, 372 lf_block); 373 TAILQ_INSERT_TAIL(&lock->lf_blkhd, 374 ltmp, lf_block); 375 ltmp->lf_next = lock; 376 } 377 } 378 /* 379 * Add the new lock if necessary and delete the overlap. 380 */ 381 if (needtolink) { 382 *prev = lock; 383 lock->lf_next = overlap->lf_next; 384 prev = &lock->lf_next; 385 needtolink = 0; 386 } else 387 *prev = overlap->lf_next; 388 free(overlap, M_LOCKF); 389 continue; 390 391 case 4: /* overlap starts before lock */ 392 /* 393 * Add lock after overlap on the list. 394 */ 395 lock->lf_next = overlap->lf_next; 396 overlap->lf_next = lock; 397 overlap->lf_end = lock->lf_start - 1; 398 prev = &lock->lf_next; 399 lf_wakelock(overlap); 400 needtolink = 0; 401 continue; 402 403 case 5: /* overlap ends after lock */ 404 /* 405 * Add the new lock before overlap. 406 */ 407 if (needtolink) { 408 *prev = lock; 409 lock->lf_next = overlap; 410 } 411 overlap->lf_start = lock->lf_end + 1; 412 lf_wakelock(overlap); 413 break; 414 } 415 break; 416 } 417 #ifdef LOCKF_DEBUG 418 if (lockf_debug & 1) { 419 lf_print("lf_setlock: got the lock", lock); 420 lf_printlist("lf_setlock", lock); 421 } 422 #endif /* LOCKF_DEBUG */ 423 return (0); 424 } 425 426 /* 427 * Remove a byte-range lock on an inode. 428 * 429 * Generally, find the lock (or an overlap to that lock) 430 * and remove it (or shrink it), then wakeup anyone we can. 431 */ 432 static int 433 lf_clearlock(unlock) 434 register struct lockf *unlock; 435 { 436 struct lockf **head = unlock->lf_head; 437 register struct lockf *lf = *head; 438 struct lockf *overlap, **prev; 439 int ovcase; 440 441 if (lf == NOLOCKF) 442 return (0); 443 #ifdef LOCKF_DEBUG 444 if (unlock->lf_type != F_UNLCK) 445 panic("lf_clearlock: bad type"); 446 if (lockf_debug & 1) 447 lf_print("lf_clearlock", unlock); 448 #endif /* LOCKF_DEBUG */ 449 prev = head; 450 while ((ovcase = lf_findoverlap(lf, unlock, SELF, &prev, &overlap))) { 451 /* 452 * Wakeup the list of locks to be retried. 453 */ 454 lf_wakelock(overlap); 455 456 switch (ovcase) { 457 458 case 1: /* overlap == lock */ 459 *prev = overlap->lf_next; 460 FREE(overlap, M_LOCKF); 461 break; 462 463 case 2: /* overlap contains lock: split it */ 464 if (overlap->lf_start == unlock->lf_start) { 465 overlap->lf_start = unlock->lf_end + 1; 466 break; 467 } 468 lf_split(overlap, unlock); 469 overlap->lf_next = unlock->lf_next; 470 break; 471 472 case 3: /* lock contains overlap */ 473 *prev = overlap->lf_next; 474 lf = overlap->lf_next; 475 free(overlap, M_LOCKF); 476 continue; 477 478 case 4: /* overlap starts before lock */ 479 overlap->lf_end = unlock->lf_start - 1; 480 prev = &overlap->lf_next; 481 lf = overlap->lf_next; 482 continue; 483 484 case 5: /* overlap ends after lock */ 485 overlap->lf_start = unlock->lf_end + 1; 486 break; 487 } 488 break; 489 } 490 #ifdef LOCKF_DEBUG 491 if (lockf_debug & 1) 492 lf_printlist("lf_clearlock", unlock); 493 #endif /* LOCKF_DEBUG */ 494 return (0); 495 } 496 497 /* 498 * Check whether there is a blocking lock, 499 * and if so return its process identifier. 500 */ 501 static int 502 lf_getlock(lock, fl) 503 register struct lockf *lock; 504 register struct flock *fl; 505 { 506 register struct lockf *block; 507 508 #ifdef LOCKF_DEBUG 509 if (lockf_debug & 1) 510 lf_print("lf_getlock", lock); 511 #endif /* LOCKF_DEBUG */ 512 513 if ((block = lf_getblock(lock))) { 514 fl->l_type = block->lf_type; 515 fl->l_whence = SEEK_SET; 516 fl->l_start = block->lf_start; 517 if (block->lf_end == -1) 518 fl->l_len = 0; 519 else 520 fl->l_len = block->lf_end - block->lf_start + 1; 521 if (block->lf_flags & F_POSIX) 522 fl->l_pid = ((struct proc *)(block->lf_id))->p_pid; 523 else 524 fl->l_pid = -1; 525 } else { 526 fl->l_type = F_UNLCK; 527 } 528 return (0); 529 } 530 531 /* 532 * Walk the list of locks for an inode and 533 * return the first blocking lock. 534 */ 535 static struct lockf * 536 lf_getblock(lock) 537 register struct lockf *lock; 538 { 539 struct lockf **prev, *overlap, *lf = *(lock->lf_head); 540 int ovcase; 541 542 prev = lock->lf_head; 543 while ((ovcase = lf_findoverlap(lf, lock, OTHERS, &prev, &overlap))) { 544 /* 545 * We've found an overlap, see if it blocks us 546 */ 547 if ((lock->lf_type == F_WRLCK || overlap->lf_type == F_WRLCK)) 548 return (overlap); 549 /* 550 * Nope, point to the next one on the list and 551 * see if it blocks us 552 */ 553 lf = overlap->lf_next; 554 } 555 return (NOLOCKF); 556 } 557 558 /* 559 * Walk the list of locks for an inode to 560 * find an overlapping lock (if any). 561 * 562 * NOTE: this returns only the FIRST overlapping lock. There 563 * may be more than one. 564 */ 565 static int 566 lf_findoverlap(lf, lock, type, prev, overlap) 567 register struct lockf *lf; 568 struct lockf *lock; 569 int type; 570 struct lockf ***prev; 571 struct lockf **overlap; 572 { 573 off_t start, end; 574 575 *overlap = lf; 576 if (lf == NOLOCKF) 577 return (0); 578 #ifdef LOCKF_DEBUG 579 if (lockf_debug & 2) 580 lf_print("lf_findoverlap: looking for overlap in", lock); 581 #endif /* LOCKF_DEBUG */ 582 start = lock->lf_start; 583 end = lock->lf_end; 584 while (lf != NOLOCKF) { 585 if (((type & SELF) && lf->lf_id != lock->lf_id) || 586 ((type & OTHERS) && lf->lf_id == lock->lf_id)) { 587 *prev = &lf->lf_next; 588 *overlap = lf = lf->lf_next; 589 continue; 590 } 591 #ifdef LOCKF_DEBUG 592 if (lockf_debug & 2) 593 lf_print("\tchecking", lf); 594 #endif /* LOCKF_DEBUG */ 595 /* 596 * OK, check for overlap 597 * 598 * Six cases: 599 * 0) no overlap 600 * 1) overlap == lock 601 * 2) overlap contains lock 602 * 3) lock contains overlap 603 * 4) overlap starts before lock 604 * 5) overlap ends after lock 605 */ 606 if ((lf->lf_end != -1 && start > lf->lf_end) || 607 (end != -1 && lf->lf_start > end)) { 608 /* Case 0 */ 609 #ifdef LOCKF_DEBUG 610 if (lockf_debug & 2) 611 printf("no overlap\n"); 612 #endif /* LOCKF_DEBUG */ 613 if ((type & SELF) && end != -1 && lf->lf_start > end) 614 return (0); 615 *prev = &lf->lf_next; 616 *overlap = lf = lf->lf_next; 617 continue; 618 } 619 if ((lf->lf_start == start) && (lf->lf_end == end)) { 620 /* Case 1 */ 621 #ifdef LOCKF_DEBUG 622 if (lockf_debug & 2) 623 printf("overlap == lock\n"); 624 #endif /* LOCKF_DEBUG */ 625 return (1); 626 } 627 if ((lf->lf_start <= start) && 628 (end != -1) && 629 ((lf->lf_end >= end) || (lf->lf_end == -1))) { 630 /* Case 2 */ 631 #ifdef LOCKF_DEBUG 632 if (lockf_debug & 2) 633 printf("overlap contains lock\n"); 634 #endif /* LOCKF_DEBUG */ 635 return (2); 636 } 637 if (start <= lf->lf_start && 638 (end == -1 || 639 (lf->lf_end != -1 && end >= lf->lf_end))) { 640 /* Case 3 */ 641 #ifdef LOCKF_DEBUG 642 if (lockf_debug & 2) 643 printf("lock contains overlap\n"); 644 #endif /* LOCKF_DEBUG */ 645 return (3); 646 } 647 if ((lf->lf_start < start) && 648 ((lf->lf_end >= start) || (lf->lf_end == -1))) { 649 /* Case 4 */ 650 #ifdef LOCKF_DEBUG 651 if (lockf_debug & 2) 652 printf("overlap starts before lock\n"); 653 #endif /* LOCKF_DEBUG */ 654 return (4); 655 } 656 if ((lf->lf_start > start) && 657 (end != -1) && 658 ((lf->lf_end > end) || (lf->lf_end == -1))) { 659 /* Case 5 */ 660 #ifdef LOCKF_DEBUG 661 if (lockf_debug & 2) 662 printf("overlap ends after lock\n"); 663 #endif /* LOCKF_DEBUG */ 664 return (5); 665 } 666 panic("lf_findoverlap: default"); 667 } 668 return (0); 669 } 670 671 /* 672 * Split a lock and a contained region into 673 * two or three locks as necessary. 674 */ 675 static void 676 lf_split(lock1, lock2) 677 register struct lockf *lock1; 678 register struct lockf *lock2; 679 { 680 register struct lockf *splitlock; 681 682 #ifdef LOCKF_DEBUG 683 if (lockf_debug & 2) { 684 lf_print("lf_split", lock1); 685 lf_print("splitting from", lock2); 686 } 687 #endif /* LOCKF_DEBUG */ 688 /* 689 * Check to see if spliting into only two pieces. 690 */ 691 if (lock1->lf_start == lock2->lf_start) { 692 lock1->lf_start = lock2->lf_end + 1; 693 lock2->lf_next = lock1; 694 return; 695 } 696 if (lock1->lf_end == lock2->lf_end) { 697 lock1->lf_end = lock2->lf_start - 1; 698 lock2->lf_next = lock1->lf_next; 699 lock1->lf_next = lock2; 700 return; 701 } 702 /* 703 * Make a new lock consisting of the last part of 704 * the encompassing lock 705 */ 706 MALLOC(splitlock, struct lockf *, sizeof *splitlock, M_LOCKF, M_WAITOK); 707 bcopy((caddr_t)lock1, (caddr_t)splitlock, sizeof *splitlock); 708 splitlock->lf_start = lock2->lf_end + 1; 709 TAILQ_INIT(&splitlock->lf_blkhd); 710 lock1->lf_end = lock2->lf_start - 1; 711 /* 712 * OK, now link it in 713 */ 714 splitlock->lf_next = lock1->lf_next; 715 lock2->lf_next = splitlock; 716 lock1->lf_next = lock2; 717 } 718 719 /* 720 * Wakeup a blocklist 721 */ 722 static void 723 lf_wakelock(listhead) 724 struct lockf *listhead; 725 { 726 register struct lockf *wakelock; 727 728 while (!TAILQ_EMPTY(&listhead->lf_blkhd)) { 729 wakelock = TAILQ_FIRST(&listhead->lf_blkhd); 730 TAILQ_REMOVE(&listhead->lf_blkhd, wakelock, lf_block); 731 wakelock->lf_next = NOLOCKF; 732 #ifdef LOCKF_DEBUG 733 if (lockf_debug & 2) 734 lf_print("lf_wakelock: awakening", wakelock); 735 #endif /* LOCKF_DEBUG */ 736 wakeup((caddr_t)wakelock); 737 } 738 } 739 740 #ifdef LOCKF_DEBUG 741 /* 742 * Print out a lock. 743 */ 744 void 745 lf_print(tag, lock) 746 char *tag; 747 register struct lockf *lock; 748 { 749 750 printf("%s: lock %p for ", tag, (void *)lock); 751 if (lock->lf_flags & F_POSIX) 752 printf("proc %ld", (long)((struct proc *)lock->lf_id)->p_pid); 753 else 754 printf("id %p", (void *)lock->lf_id); 755 /* XXX no %qd in kernel. Truncate. */ 756 printf(" in ino %lu on dev <%d, %d>, %s, start %ld, end %ld", 757 (u_long)lock->lf_inode->i_number, 758 major(lock->lf_inode->i_dev), 759 minor(lock->lf_inode->i_dev), 760 lock->lf_type == F_RDLCK ? "shared" : 761 lock->lf_type == F_WRLCK ? "exclusive" : 762 lock->lf_type == F_UNLCK ? "unlock" : 763 "unknown", (long)lock->lf_start, (long)lock->lf_end); 764 if (!TAILQ_EMPTY(&lock->lf_blkhd)) 765 printf(" block %p\n", (void *)TAILQ_FIRST(&lock->lf_blkhd)); 766 else 767 printf("\n"); 768 } 769 770 void 771 lf_printlist(tag, lock) 772 char *tag; 773 struct lockf *lock; 774 { 775 register struct lockf *lf, *blk; 776 777 printf("%s: Lock list for ino %lu on dev <%d, %d>:\n", 778 tag, (u_long)lock->lf_inode->i_number, 779 major(lock->lf_inode->i_dev), 780 minor(lock->lf_inode->i_dev)); 781 for (lf = lock->lf_inode->i_lockf; lf; lf = lf->lf_next) { 782 printf("\tlock %p for ",(void *)lf); 783 if (lf->lf_flags & F_POSIX) 784 printf("proc %ld", 785 (long)((struct proc *)lf->lf_id)->p_pid); 786 else 787 printf("id %p", (void *)lf->lf_id); 788 /* XXX no %qd in kernel. Truncate. */ 789 printf(", %s, start %ld, end %ld", 790 lf->lf_type == F_RDLCK ? "shared" : 791 lf->lf_type == F_WRLCK ? "exclusive" : 792 lf->lf_type == F_UNLCK ? "unlock" : 793 "unknown", (long)lf->lf_start, (long)lf->lf_end); 794 TAILQ_FOREACH(blk, &lf->lf_blkhd, lf_block) { 795 printf("\n\t\tlock request %p for ", (void *)blk); 796 if (blk->lf_flags & F_POSIX) 797 printf("proc %ld", 798 (long)((struct proc *)blk->lf_id)->p_pid); 799 else 800 printf("id %p", (void *)blk->lf_id); 801 /* XXX no %qd in kernel. Truncate. */ 802 printf(", %s, start %ld, end %ld", 803 blk->lf_type == F_RDLCK ? "shared" : 804 blk->lf_type == F_WRLCK ? "exclusive" : 805 blk->lf_type == F_UNLCK ? "unlock" : 806 "unknown", (long)blk->lf_start, 807 (long)blk->lf_end); 808 if (!TAILQ_EMPTY(&blk->lf_blkhd)) 809 panic("lf_printlist: bad list"); 810 } 811 printf("\n"); 812 } 813 } 814 #endif /* LOCKF_DEBUG */ 815