1 /* 2 * Copyright (c) 1982, 1986, 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * Scooter Morris at Genentech Inc. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * @(#)ufs_lockf.c 8.3 (Berkeley) 1/6/94 37 * $FreeBSD$ 38 */ 39 40 #include "opt_debug_lockf.h" 41 42 #include <sys/param.h> 43 #include <sys/systm.h> 44 #include <sys/kernel.h> 45 #include <sys/lock.h> 46 #include <sys/mutex.h> 47 #include <sys/proc.h> 48 #include <sys/unistd.h> 49 #include <sys/vnode.h> 50 #include <sys/malloc.h> 51 #include <sys/fcntl.h> 52 #include <sys/lockf.h> 53 54 #include <machine/limits.h> 55 56 /* 57 * This variable controls the maximum number of processes that will 58 * be checked in doing deadlock detection. 59 */ 60 static int maxlockdepth = MAXDEPTH; 61 62 #ifdef LOCKF_DEBUG 63 #include <sys/kernel.h> 64 #include <sys/sysctl.h> 65 66 #include <ufs/ufs/quota.h> 67 #include <ufs/ufs/inode.h> 68 69 70 static int lockf_debug = 0; 71 SYSCTL_INT(_debug, OID_AUTO, lockf_debug, CTLFLAG_RW, &lockf_debug, 0, ""); 72 #endif 73 74 MALLOC_DEFINE(M_LOCKF, "lockf", "Byte-range locking structures"); 75 76 #define NOLOCKF (struct lockf *)0 77 #define SELF 0x1 78 #define OTHERS 0x2 79 static int lf_clearlock __P((struct lockf *)); 80 static int lf_findoverlap __P((struct lockf *, 81 struct lockf *, int, struct lockf ***, struct lockf **)); 82 static struct lockf * 83 lf_getblock __P((struct lockf *)); 84 static int lf_getlock __P((struct lockf *, struct flock *)); 85 static int lf_setlock __P((struct lockf *)); 86 static void lf_split __P((struct lockf *, struct lockf *)); 87 static void lf_wakelock __P((struct lockf *)); 88 89 /* 90 * Advisory record locking support 91 */ 92 int 93 lf_advlock(ap, head, size) 94 struct vop_advlock_args /* { 95 struct vnode *a_vp; 96 caddr_t a_id; 97 int a_op; 98 struct flock *a_fl; 99 int a_flags; 100 } */ *ap; 101 struct lockf **head; 102 u_quad_t size; 103 { 104 register struct flock *fl = ap->a_fl; 105 register struct lockf *lock; 106 off_t start, end, oadd; 107 int error; 108 109 /* 110 * Convert the flock structure into a start and end. 111 */ 112 switch (fl->l_whence) { 113 114 case SEEK_SET: 115 case SEEK_CUR: 116 /* 117 * Caller is responsible for adding any necessary offset 118 * when SEEK_CUR is used. 119 */ 120 start = fl->l_start; 121 break; 122 123 case SEEK_END: 124 if (size > OFF_MAX || 125 (fl->l_start > 0 && size > OFF_MAX - fl->l_start)) 126 return (EOVERFLOW); 127 start = size + fl->l_start; 128 break; 129 130 default: 131 return (EINVAL); 132 } 133 if (start < 0) 134 return (EINVAL); 135 if (fl->l_len < 0) { 136 if (start == 0) 137 return (EINVAL); 138 end = start - 1; 139 start += fl->l_len; 140 if (start < 0) 141 return (EINVAL); 142 } else if (fl->l_len == 0) 143 end = -1; 144 else { 145 oadd = fl->l_len - 1; 146 if (oadd > OFF_MAX - start) 147 return (EOVERFLOW); 148 end = start + oadd; 149 } 150 /* 151 * Avoid the common case of unlocking when inode has no locks. 152 */ 153 if (*head == (struct lockf *)0) { 154 if (ap->a_op != F_SETLK) { 155 fl->l_type = F_UNLCK; 156 return (0); 157 } 158 } 159 /* 160 * Create the lockf structure 161 */ 162 MALLOC(lock, struct lockf *, sizeof *lock, M_LOCKF, M_WAITOK); 163 lock->lf_start = start; 164 lock->lf_end = end; 165 lock->lf_id = ap->a_id; 166 /* lock->lf_inode = ip; */ /* XXX JH */ 167 lock->lf_type = fl->l_type; 168 lock->lf_head = head; 169 lock->lf_next = (struct lockf *)0; 170 TAILQ_INIT(&lock->lf_blkhd); 171 lock->lf_flags = ap->a_flags; 172 /* 173 * Do the requested operation. 174 */ 175 switch(ap->a_op) { 176 case F_SETLK: 177 return (lf_setlock(lock)); 178 179 case F_UNLCK: 180 error = lf_clearlock(lock); 181 FREE(lock, M_LOCKF); 182 return (error); 183 184 case F_GETLK: 185 error = lf_getlock(lock, fl); 186 FREE(lock, M_LOCKF); 187 return (error); 188 189 default: 190 free(lock, M_LOCKF); 191 return (EINVAL); 192 } 193 /* NOTREACHED */ 194 } 195 196 /* 197 * Set a byte-range lock. 198 */ 199 static int 200 lf_setlock(lock) 201 register struct lockf *lock; 202 { 203 register struct lockf *block; 204 struct lockf **head = lock->lf_head; 205 struct lockf **prev, *overlap, *ltmp; 206 static char lockstr[] = "lockf"; 207 int ovcase, priority, needtolink, error; 208 209 #ifdef LOCKF_DEBUG 210 if (lockf_debug & 1) 211 lf_print("lf_setlock", lock); 212 #endif /* LOCKF_DEBUG */ 213 214 /* 215 * Set the priority 216 */ 217 priority = PLOCK; 218 if (lock->lf_type == F_WRLCK) 219 priority += 4; 220 priority |= PCATCH; 221 /* 222 * Scan lock list for this file looking for locks that would block us. 223 */ 224 while ((block = lf_getblock(lock))) { 225 /* 226 * Free the structure and return if nonblocking. 227 */ 228 if ((lock->lf_flags & F_WAIT) == 0) { 229 FREE(lock, M_LOCKF); 230 return (EAGAIN); 231 } 232 /* 233 * We are blocked. Since flock style locks cover 234 * the whole file, there is no chance for deadlock. 235 * For byte-range locks we must check for deadlock. 236 * 237 * Deadlock detection is done by looking through the 238 * wait channels to see if there are any cycles that 239 * involve us. MAXDEPTH is set just to make sure we 240 * do not go off into neverland. 241 */ 242 if ((lock->lf_flags & F_POSIX) && 243 (block->lf_flags & F_POSIX)) { 244 register struct proc *wproc; 245 register struct lockf *waitblock; 246 int i = 0; 247 248 /* The block is waiting on something */ 249 wproc = (struct proc *)block->lf_id; 250 mtx_lock_spin(&sched_lock); 251 while (wproc->p_wchan && 252 (wproc->p_wmesg == lockstr) && 253 (i++ < maxlockdepth)) { 254 waitblock = (struct lockf *)wproc->p_wchan; 255 /* Get the owner of the blocking lock */ 256 waitblock = waitblock->lf_next; 257 if ((waitblock->lf_flags & F_POSIX) == 0) 258 break; 259 wproc = (struct proc *)waitblock->lf_id; 260 if (wproc == (struct proc *)lock->lf_id) { 261 mtx_unlock_spin(&sched_lock); 262 free(lock, M_LOCKF); 263 return (EDEADLK); 264 } 265 } 266 mtx_unlock_spin(&sched_lock); 267 } 268 /* 269 * For flock type locks, we must first remove 270 * any shared locks that we hold before we sleep 271 * waiting for an exclusive lock. 272 */ 273 if ((lock->lf_flags & F_FLOCK) && 274 lock->lf_type == F_WRLCK) { 275 lock->lf_type = F_UNLCK; 276 (void) lf_clearlock(lock); 277 lock->lf_type = F_WRLCK; 278 } 279 /* 280 * Add our lock to the blocked list and sleep until we're free. 281 * Remember who blocked us (for deadlock detection). 282 */ 283 lock->lf_next = block; 284 TAILQ_INSERT_TAIL(&block->lf_blkhd, lock, lf_block); 285 #ifdef LOCKF_DEBUG 286 if (lockf_debug & 1) { 287 lf_print("lf_setlock: blocking on", block); 288 lf_printlist("lf_setlock", block); 289 } 290 #endif /* LOCKF_DEBUG */ 291 error = tsleep((caddr_t)lock, priority, lockstr, 0); 292 /* 293 * We may have been awakened by a signal and/or by a 294 * debugger continuing us (in which cases we must remove 295 * ourselves from the blocked list) and/or by another 296 * process releasing a lock (in which case we have 297 * already been removed from the blocked list and our 298 * lf_next field set to NOLOCKF). 299 */ 300 if (lock->lf_next) { 301 TAILQ_REMOVE(&lock->lf_next->lf_blkhd, lock, lf_block); 302 lock->lf_next = NOLOCKF; 303 } 304 if (error) { 305 free(lock, M_LOCKF); 306 return (error); 307 } 308 } 309 /* 310 * No blocks!! Add the lock. Note that we will 311 * downgrade or upgrade any overlapping locks this 312 * process already owns. 313 * 314 * Skip over locks owned by other processes. 315 * Handle any locks that overlap and are owned by ourselves. 316 */ 317 prev = head; 318 block = *head; 319 needtolink = 1; 320 for (;;) { 321 ovcase = lf_findoverlap(block, lock, SELF, &prev, &overlap); 322 if (ovcase) 323 block = overlap->lf_next; 324 /* 325 * Six cases: 326 * 0) no overlap 327 * 1) overlap == lock 328 * 2) overlap contains lock 329 * 3) lock contains overlap 330 * 4) overlap starts before lock 331 * 5) overlap ends after lock 332 */ 333 switch (ovcase) { 334 case 0: /* no overlap */ 335 if (needtolink) { 336 *prev = lock; 337 lock->lf_next = overlap; 338 } 339 break; 340 341 case 1: /* overlap == lock */ 342 /* 343 * If downgrading lock, others may be 344 * able to acquire it. 345 */ 346 if (lock->lf_type == F_RDLCK && 347 overlap->lf_type == F_WRLCK) 348 lf_wakelock(overlap); 349 overlap->lf_type = lock->lf_type; 350 FREE(lock, M_LOCKF); 351 lock = overlap; /* for debug output below */ 352 break; 353 354 case 2: /* overlap contains lock */ 355 /* 356 * Check for common starting point and different types. 357 */ 358 if (overlap->lf_type == lock->lf_type) { 359 free(lock, M_LOCKF); 360 lock = overlap; /* for debug output below */ 361 break; 362 } 363 if (overlap->lf_start == lock->lf_start) { 364 *prev = lock; 365 lock->lf_next = overlap; 366 overlap->lf_start = lock->lf_end + 1; 367 } else 368 lf_split(overlap, lock); 369 lf_wakelock(overlap); 370 break; 371 372 case 3: /* lock contains overlap */ 373 /* 374 * If downgrading lock, others may be able to 375 * acquire it, otherwise take the list. 376 */ 377 if (lock->lf_type == F_RDLCK && 378 overlap->lf_type == F_WRLCK) { 379 lf_wakelock(overlap); 380 } else { 381 while (!TAILQ_EMPTY(&overlap->lf_blkhd)) { 382 ltmp = TAILQ_FIRST(&overlap->lf_blkhd); 383 TAILQ_REMOVE(&overlap->lf_blkhd, ltmp, 384 lf_block); 385 TAILQ_INSERT_TAIL(&lock->lf_blkhd, 386 ltmp, lf_block); 387 ltmp->lf_next = lock; 388 } 389 } 390 /* 391 * Add the new lock if necessary and delete the overlap. 392 */ 393 if (needtolink) { 394 *prev = lock; 395 lock->lf_next = overlap->lf_next; 396 prev = &lock->lf_next; 397 needtolink = 0; 398 } else 399 *prev = overlap->lf_next; 400 free(overlap, M_LOCKF); 401 continue; 402 403 case 4: /* overlap starts before lock */ 404 /* 405 * Add lock after overlap on the list. 406 */ 407 lock->lf_next = overlap->lf_next; 408 overlap->lf_next = lock; 409 overlap->lf_end = lock->lf_start - 1; 410 prev = &lock->lf_next; 411 lf_wakelock(overlap); 412 needtolink = 0; 413 continue; 414 415 case 5: /* overlap ends after lock */ 416 /* 417 * Add the new lock before overlap. 418 */ 419 if (needtolink) { 420 *prev = lock; 421 lock->lf_next = overlap; 422 } 423 overlap->lf_start = lock->lf_end + 1; 424 lf_wakelock(overlap); 425 break; 426 } 427 break; 428 } 429 #ifdef LOCKF_DEBUG 430 if (lockf_debug & 1) { 431 lf_print("lf_setlock: got the lock", lock); 432 lf_printlist("lf_setlock", lock); 433 } 434 #endif /* LOCKF_DEBUG */ 435 return (0); 436 } 437 438 /* 439 * Remove a byte-range lock on an inode. 440 * 441 * Generally, find the lock (or an overlap to that lock) 442 * and remove it (or shrink it), then wakeup anyone we can. 443 */ 444 static int 445 lf_clearlock(unlock) 446 register struct lockf *unlock; 447 { 448 struct lockf **head = unlock->lf_head; 449 register struct lockf *lf = *head; 450 struct lockf *overlap, **prev; 451 int ovcase; 452 453 if (lf == NOLOCKF) 454 return (0); 455 #ifdef LOCKF_DEBUG 456 if (unlock->lf_type != F_UNLCK) 457 panic("lf_clearlock: bad type"); 458 if (lockf_debug & 1) 459 lf_print("lf_clearlock", unlock); 460 #endif /* LOCKF_DEBUG */ 461 prev = head; 462 while ((ovcase = lf_findoverlap(lf, unlock, SELF, &prev, &overlap))) { 463 /* 464 * Wakeup the list of locks to be retried. 465 */ 466 lf_wakelock(overlap); 467 468 switch (ovcase) { 469 470 case 1: /* overlap == lock */ 471 *prev = overlap->lf_next; 472 FREE(overlap, M_LOCKF); 473 break; 474 475 case 2: /* overlap contains lock: split it */ 476 if (overlap->lf_start == unlock->lf_start) { 477 overlap->lf_start = unlock->lf_end + 1; 478 break; 479 } 480 lf_split(overlap, unlock); 481 overlap->lf_next = unlock->lf_next; 482 break; 483 484 case 3: /* lock contains overlap */ 485 *prev = overlap->lf_next; 486 lf = overlap->lf_next; 487 free(overlap, M_LOCKF); 488 continue; 489 490 case 4: /* overlap starts before lock */ 491 overlap->lf_end = unlock->lf_start - 1; 492 prev = &overlap->lf_next; 493 lf = overlap->lf_next; 494 continue; 495 496 case 5: /* overlap ends after lock */ 497 overlap->lf_start = unlock->lf_end + 1; 498 break; 499 } 500 break; 501 } 502 #ifdef LOCKF_DEBUG 503 if (lockf_debug & 1) 504 lf_printlist("lf_clearlock", unlock); 505 #endif /* LOCKF_DEBUG */ 506 return (0); 507 } 508 509 /* 510 * Check whether there is a blocking lock, 511 * and if so return its process identifier. 512 */ 513 static int 514 lf_getlock(lock, fl) 515 register struct lockf *lock; 516 register struct flock *fl; 517 { 518 register struct lockf *block; 519 520 #ifdef LOCKF_DEBUG 521 if (lockf_debug & 1) 522 lf_print("lf_getlock", lock); 523 #endif /* LOCKF_DEBUG */ 524 525 if ((block = lf_getblock(lock))) { 526 fl->l_type = block->lf_type; 527 fl->l_whence = SEEK_SET; 528 fl->l_start = block->lf_start; 529 if (block->lf_end == -1) 530 fl->l_len = 0; 531 else 532 fl->l_len = block->lf_end - block->lf_start + 1; 533 if (block->lf_flags & F_POSIX) 534 fl->l_pid = ((struct proc *)(block->lf_id))->p_pid; 535 else 536 fl->l_pid = -1; 537 } else { 538 fl->l_type = F_UNLCK; 539 } 540 return (0); 541 } 542 543 /* 544 * Walk the list of locks for an inode and 545 * return the first blocking lock. 546 */ 547 static struct lockf * 548 lf_getblock(lock) 549 register struct lockf *lock; 550 { 551 struct lockf **prev, *overlap, *lf = *(lock->lf_head); 552 int ovcase; 553 554 prev = lock->lf_head; 555 while ((ovcase = lf_findoverlap(lf, lock, OTHERS, &prev, &overlap))) { 556 /* 557 * We've found an overlap, see if it blocks us 558 */ 559 if ((lock->lf_type == F_WRLCK || overlap->lf_type == F_WRLCK)) 560 return (overlap); 561 /* 562 * Nope, point to the next one on the list and 563 * see if it blocks us 564 */ 565 lf = overlap->lf_next; 566 } 567 return (NOLOCKF); 568 } 569 570 /* 571 * Walk the list of locks for an inode to 572 * find an overlapping lock (if any). 573 * 574 * NOTE: this returns only the FIRST overlapping lock. There 575 * may be more than one. 576 */ 577 static int 578 lf_findoverlap(lf, lock, type, prev, overlap) 579 register struct lockf *lf; 580 struct lockf *lock; 581 int type; 582 struct lockf ***prev; 583 struct lockf **overlap; 584 { 585 off_t start, end; 586 587 *overlap = lf; 588 if (lf == NOLOCKF) 589 return (0); 590 #ifdef LOCKF_DEBUG 591 if (lockf_debug & 2) 592 lf_print("lf_findoverlap: looking for overlap in", lock); 593 #endif /* LOCKF_DEBUG */ 594 start = lock->lf_start; 595 end = lock->lf_end; 596 while (lf != NOLOCKF) { 597 if (((type & SELF) && lf->lf_id != lock->lf_id) || 598 ((type & OTHERS) && lf->lf_id == lock->lf_id)) { 599 *prev = &lf->lf_next; 600 *overlap = lf = lf->lf_next; 601 continue; 602 } 603 #ifdef LOCKF_DEBUG 604 if (lockf_debug & 2) 605 lf_print("\tchecking", lf); 606 #endif /* LOCKF_DEBUG */ 607 /* 608 * OK, check for overlap 609 * 610 * Six cases: 611 * 0) no overlap 612 * 1) overlap == lock 613 * 2) overlap contains lock 614 * 3) lock contains overlap 615 * 4) overlap starts before lock 616 * 5) overlap ends after lock 617 */ 618 if ((lf->lf_end != -1 && start > lf->lf_end) || 619 (end != -1 && lf->lf_start > end)) { 620 /* Case 0 */ 621 #ifdef LOCKF_DEBUG 622 if (lockf_debug & 2) 623 printf("no overlap\n"); 624 #endif /* LOCKF_DEBUG */ 625 if ((type & SELF) && end != -1 && lf->lf_start > end) 626 return (0); 627 *prev = &lf->lf_next; 628 *overlap = lf = lf->lf_next; 629 continue; 630 } 631 if ((lf->lf_start == start) && (lf->lf_end == end)) { 632 /* Case 1 */ 633 #ifdef LOCKF_DEBUG 634 if (lockf_debug & 2) 635 printf("overlap == lock\n"); 636 #endif /* LOCKF_DEBUG */ 637 return (1); 638 } 639 if ((lf->lf_start <= start) && 640 (end != -1) && 641 ((lf->lf_end >= end) || (lf->lf_end == -1))) { 642 /* Case 2 */ 643 #ifdef LOCKF_DEBUG 644 if (lockf_debug & 2) 645 printf("overlap contains lock\n"); 646 #endif /* LOCKF_DEBUG */ 647 return (2); 648 } 649 if (start <= lf->lf_start && 650 (end == -1 || 651 (lf->lf_end != -1 && end >= lf->lf_end))) { 652 /* Case 3 */ 653 #ifdef LOCKF_DEBUG 654 if (lockf_debug & 2) 655 printf("lock contains overlap\n"); 656 #endif /* LOCKF_DEBUG */ 657 return (3); 658 } 659 if ((lf->lf_start < start) && 660 ((lf->lf_end >= start) || (lf->lf_end == -1))) { 661 /* Case 4 */ 662 #ifdef LOCKF_DEBUG 663 if (lockf_debug & 2) 664 printf("overlap starts before lock\n"); 665 #endif /* LOCKF_DEBUG */ 666 return (4); 667 } 668 if ((lf->lf_start > start) && 669 (end != -1) && 670 ((lf->lf_end > end) || (lf->lf_end == -1))) { 671 /* Case 5 */ 672 #ifdef LOCKF_DEBUG 673 if (lockf_debug & 2) 674 printf("overlap ends after lock\n"); 675 #endif /* LOCKF_DEBUG */ 676 return (5); 677 } 678 panic("lf_findoverlap: default"); 679 } 680 return (0); 681 } 682 683 /* 684 * Split a lock and a contained region into 685 * two or three locks as necessary. 686 */ 687 static void 688 lf_split(lock1, lock2) 689 register struct lockf *lock1; 690 register struct lockf *lock2; 691 { 692 register struct lockf *splitlock; 693 694 #ifdef LOCKF_DEBUG 695 if (lockf_debug & 2) { 696 lf_print("lf_split", lock1); 697 lf_print("splitting from", lock2); 698 } 699 #endif /* LOCKF_DEBUG */ 700 /* 701 * Check to see if spliting into only two pieces. 702 */ 703 if (lock1->lf_start == lock2->lf_start) { 704 lock1->lf_start = lock2->lf_end + 1; 705 lock2->lf_next = lock1; 706 return; 707 } 708 if (lock1->lf_end == lock2->lf_end) { 709 lock1->lf_end = lock2->lf_start - 1; 710 lock2->lf_next = lock1->lf_next; 711 lock1->lf_next = lock2; 712 return; 713 } 714 /* 715 * Make a new lock consisting of the last part of 716 * the encompassing lock 717 */ 718 MALLOC(splitlock, struct lockf *, sizeof *splitlock, M_LOCKF, M_WAITOK); 719 bcopy((caddr_t)lock1, (caddr_t)splitlock, sizeof *splitlock); 720 splitlock->lf_start = lock2->lf_end + 1; 721 TAILQ_INIT(&splitlock->lf_blkhd); 722 lock1->lf_end = lock2->lf_start - 1; 723 /* 724 * OK, now link it in 725 */ 726 splitlock->lf_next = lock1->lf_next; 727 lock2->lf_next = splitlock; 728 lock1->lf_next = lock2; 729 } 730 731 /* 732 * Wakeup a blocklist 733 */ 734 static void 735 lf_wakelock(listhead) 736 struct lockf *listhead; 737 { 738 register struct lockf *wakelock; 739 740 while (!TAILQ_EMPTY(&listhead->lf_blkhd)) { 741 wakelock = TAILQ_FIRST(&listhead->lf_blkhd); 742 TAILQ_REMOVE(&listhead->lf_blkhd, wakelock, lf_block); 743 wakelock->lf_next = NOLOCKF; 744 #ifdef LOCKF_DEBUG 745 if (lockf_debug & 2) 746 lf_print("lf_wakelock: awakening", wakelock); 747 #endif /* LOCKF_DEBUG */ 748 wakeup((caddr_t)wakelock); 749 } 750 } 751 752 #ifdef LOCKF_DEBUG 753 /* 754 * Print out a lock. 755 */ 756 void 757 lf_print(tag, lock) 758 char *tag; 759 register struct lockf *lock; 760 { 761 762 printf("%s: lock %p for ", tag, (void *)lock); 763 if (lock->lf_flags & F_POSIX) 764 printf("proc %ld", (long)((struct proc *)lock->lf_id)->p_pid); 765 else 766 printf("id %p", (void *)lock->lf_id); 767 /* XXX no %qd in kernel. Truncate. */ 768 printf(" in ino %lu on dev <%d, %d>, %s, start %ld, end %ld", 769 (u_long)lock->lf_inode->i_number, 770 major(lock->lf_inode->i_dev), 771 minor(lock->lf_inode->i_dev), 772 lock->lf_type == F_RDLCK ? "shared" : 773 lock->lf_type == F_WRLCK ? "exclusive" : 774 lock->lf_type == F_UNLCK ? "unlock" : 775 "unknown", (long)lock->lf_start, (long)lock->lf_end); 776 if (!TAILQ_EMPTY(&lock->lf_blkhd)) 777 printf(" block %p\n", (void *)TAILQ_FIRST(&lock->lf_blkhd)); 778 else 779 printf("\n"); 780 } 781 782 void 783 lf_printlist(tag, lock) 784 char *tag; 785 struct lockf *lock; 786 { 787 register struct lockf *lf, *blk; 788 789 printf("%s: Lock list for ino %lu on dev <%d, %d>:\n", 790 tag, (u_long)lock->lf_inode->i_number, 791 major(lock->lf_inode->i_dev), 792 minor(lock->lf_inode->i_dev)); 793 for (lf = lock->lf_inode->i_lockf; lf; lf = lf->lf_next) { 794 printf("\tlock %p for ",(void *)lf); 795 if (lf->lf_flags & F_POSIX) 796 printf("proc %ld", 797 (long)((struct proc *)lf->lf_id)->p_pid); 798 else 799 printf("id %p", (void *)lf->lf_id); 800 /* XXX no %qd in kernel. Truncate. */ 801 printf(", %s, start %ld, end %ld", 802 lf->lf_type == F_RDLCK ? "shared" : 803 lf->lf_type == F_WRLCK ? "exclusive" : 804 lf->lf_type == F_UNLCK ? "unlock" : 805 "unknown", (long)lf->lf_start, (long)lf->lf_end); 806 TAILQ_FOREACH(blk, &lf->lf_blkhd, lf_block) { 807 printf("\n\t\tlock request %p for ", (void *)blk); 808 if (blk->lf_flags & F_POSIX) 809 printf("proc %ld", 810 (long)((struct proc *)blk->lf_id)->p_pid); 811 else 812 printf("id %p", (void *)blk->lf_id); 813 /* XXX no %qd in kernel. Truncate. */ 814 printf(", %s, start %ld, end %ld", 815 blk->lf_type == F_RDLCK ? "shared" : 816 blk->lf_type == F_WRLCK ? "exclusive" : 817 blk->lf_type == F_UNLCK ? "unlock" : 818 "unknown", (long)blk->lf_start, 819 (long)blk->lf_end); 820 if (!TAILQ_EMPTY(&blk->lf_blkhd)) 821 panic("lf_printlist: bad list"); 822 } 823 printf("\n"); 824 } 825 } 826 #endif /* LOCKF_DEBUG */ 827