xref: /freebsd/sys/kern/kern_lockf.c (revision 1e413cf93298b5b97441a21d9a50fdcd0ee9945e)
1 /*-
2  * Copyright (c) 1982, 1986, 1989, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * Scooter Morris at Genentech Inc.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 4. Neither the name of the University nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  *	@(#)ufs_lockf.c	8.3 (Berkeley) 1/6/94
33  */
34 
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 #include "opt_debug_lockf.h"
39 
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/kernel.h>
43 #include <sys/limits.h>
44 #include <sys/lock.h>
45 #include <sys/mount.h>
46 #include <sys/mutex.h>
47 #include <sys/proc.h>
48 #include <sys/unistd.h>
49 #include <sys/vnode.h>
50 #include <sys/malloc.h>
51 #include <sys/fcntl.h>
52 #include <sys/lockf.h>
53 
54 /*
55  * This variable controls the maximum number of processes that will
56  * be checked in doing deadlock detection.
57  */
58 static int maxlockdepth = MAXDEPTH;
59 
60 #ifdef LOCKF_DEBUG
61 #include <sys/sysctl.h>
62 
63 #include <ufs/ufs/quota.h>
64 #include <ufs/ufs/inode.h>
65 
66 
67 static int	lockf_debug = 0;
68 SYSCTL_INT(_debug, OID_AUTO, lockf_debug, CTLFLAG_RW, &lockf_debug, 0, "");
69 #endif
70 
71 MALLOC_DEFINE(M_LOCKF, "lockf", "Byte-range locking structures");
72 
73 #define NOLOCKF (struct lockf *)0
74 #define SELF	0x1
75 #define OTHERS	0x2
76 static int	 lf_clearlock(struct lockf *, struct lockf **);
77 static int	 lf_findoverlap(struct lockf *,
78 	    struct lockf *, int, struct lockf ***, struct lockf **);
79 static struct lockf *
80 	 lf_getblock(struct lockf *);
81 static int	 lf_getlock(struct lockf *, struct flock *);
82 static int	 lf_setlock(struct lockf *, struct vnode *, struct lockf **);
83 static void	 lf_split(struct lockf *, struct lockf *, struct lockf **);
84 static void	 lf_wakelock(struct lockf *);
85 #ifdef LOCKF_DEBUG
86 static void	 lf_print(char *, struct lockf *);
87 static void	 lf_printlist(char *, struct lockf *);
88 #endif
89 
90 /*
91  * Advisory record locking support
92  */
93 int
94 lf_advlock(ap, head, size)
95 	struct vop_advlock_args /* {
96 		struct vnode *a_vp;
97 		caddr_t  a_id;
98 		int  a_op;
99 		struct flock *a_fl;
100 		int  a_flags;
101 	} */ *ap;
102 	struct lockf **head;
103 	u_quad_t size;
104 {
105 	struct flock *fl = ap->a_fl;
106 	struct lockf *lock;
107 	struct vnode *vp = ap->a_vp;
108 	off_t start, end, oadd;
109 	struct lockf *clean, *n;
110 	int error;
111 
112 	/*
113 	 * Convert the flock structure into a start and end.
114 	 */
115 	switch (fl->l_whence) {
116 
117 	case SEEK_SET:
118 	case SEEK_CUR:
119 		/*
120 		 * Caller is responsible for adding any necessary offset
121 		 * when SEEK_CUR is used.
122 		 */
123 		start = fl->l_start;
124 		break;
125 
126 	case SEEK_END:
127 		if (size > OFF_MAX ||
128 		    (fl->l_start > 0 && size > OFF_MAX - fl->l_start))
129 			return (EOVERFLOW);
130 		start = size + fl->l_start;
131 		break;
132 
133 	default:
134 		return (EINVAL);
135 	}
136 	if (start < 0)
137 		return (EINVAL);
138 	if (fl->l_len < 0) {
139 		if (start == 0)
140 			return (EINVAL);
141 		end = start - 1;
142 		start += fl->l_len;
143 		if (start < 0)
144 			return (EINVAL);
145 	} else if (fl->l_len == 0)
146 		end = -1;
147 	else {
148 		oadd = fl->l_len - 1;
149 		if (oadd > OFF_MAX - start)
150 			return (EOVERFLOW);
151 		end = start + oadd;
152 	}
153 	/*
154 	 * Avoid the common case of unlocking when inode has no locks.
155 	 */
156 	if (*head == (struct lockf *)0) {
157 		if (ap->a_op != F_SETLK) {
158 			fl->l_type = F_UNLCK;
159 			return (0);
160 		}
161 	}
162 	/*
163 	 * Allocate a spare structure in case we have to split.
164 	 */
165 	clean = NULL;
166 	if (ap->a_op == F_SETLK || ap->a_op == F_UNLCK) {
167 		MALLOC(clean, struct lockf *, sizeof *lock, M_LOCKF, M_WAITOK);
168 		clean->lf_next = NULL;
169 	}
170 	/*
171 	 * Create the lockf structure
172 	 */
173 	MALLOC(lock, struct lockf *, sizeof *lock, M_LOCKF, M_WAITOK);
174 	lock->lf_start = start;
175 	lock->lf_end = end;
176 	lock->lf_id = ap->a_id;
177 	/*
178 	 * XXX The problem is that VTOI is ufs specific, so it will
179 	 * break LOCKF_DEBUG for all other FS's other than UFS because
180 	 * it casts the vnode->data ptr to struct inode *.
181 	 */
182 /*	lock->lf_inode = VTOI(ap->a_vp); */
183 	lock->lf_inode = (struct inode *)0;
184 	lock->lf_type = fl->l_type;
185 	lock->lf_head = head;
186 	lock->lf_next = (struct lockf *)0;
187 	TAILQ_INIT(&lock->lf_blkhd);
188 	lock->lf_flags = ap->a_flags;
189 	/*
190 	 * Do the requested operation.
191 	 */
192 	VI_LOCK(vp);
193 	switch(ap->a_op) {
194 	case F_SETLK:
195 		error = lf_setlock(lock, vp, &clean);
196 		break;
197 
198 	case F_UNLCK:
199 		error = lf_clearlock(lock, &clean);
200 		lock->lf_next = clean;
201 		clean = lock;
202 		break;
203 
204 	case F_GETLK:
205 		error = lf_getlock(lock, fl);
206 		lock->lf_next = clean;
207 		clean = lock;
208 		break;
209 
210 	default:
211 		lock->lf_next = clean;
212 		clean = lock;
213 		error = EINVAL;
214 		break;
215 	}
216 	VI_UNLOCK(vp);
217 	for (lock = clean; lock != NULL; ) {
218 		n = lock->lf_next;
219 		free(lock, M_LOCKF);
220 		lock = n;
221 	}
222 	return (error);
223 }
224 
225 /*
226  * Set a byte-range lock.
227  */
228 static int
229 lf_setlock(lock, vp, clean)
230 	struct lockf *lock;
231 	struct vnode *vp;
232 	struct lockf **clean;
233 {
234 	struct lockf *block;
235 	struct lockf **head = lock->lf_head;
236 	struct lockf **prev, *overlap, *ltmp;
237 	static char lockstr[] = "lockf";
238 	int ovcase, priority, needtolink, error;
239 
240 #ifdef LOCKF_DEBUG
241 	if (lockf_debug & 1)
242 		lf_print("lf_setlock", lock);
243 #endif /* LOCKF_DEBUG */
244 
245 	/*
246 	 * Set the priority
247 	 */
248 	priority = PLOCK;
249 	if (lock->lf_type == F_WRLCK)
250 		priority += 4;
251 	priority |= PCATCH;
252 	/*
253 	 * Scan lock list for this file looking for locks that would block us.
254 	 */
255 	while ((block = lf_getblock(lock))) {
256 		/*
257 		 * Free the structure and return if nonblocking.
258 		 */
259 		if ((lock->lf_flags & F_WAIT) == 0) {
260 			lock->lf_next = *clean;
261 			*clean = lock;
262 			return (EAGAIN);
263 		}
264 		/*
265 		 * We are blocked. Since flock style locks cover
266 		 * the whole file, there is no chance for deadlock.
267 		 * For byte-range locks we must check for deadlock.
268 		 *
269 		 * Deadlock detection is done by looking through the
270 		 * wait channels to see if there are any cycles that
271 		 * involve us. MAXDEPTH is set just to make sure we
272 		 * do not go off into neverland.
273 		 */
274 		if ((lock->lf_flags & F_POSIX) &&
275 		    (block->lf_flags & F_POSIX)) {
276 			struct proc *wproc;
277 			struct proc *nproc;
278 			struct thread *td;
279 			struct lockf *waitblock;
280 			int i = 0;
281 
282 			/* The block is waiting on something */
283 			wproc = (struct proc *)block->lf_id;
284 restart:
285 			nproc = NULL;
286 			PROC_SLOCK(wproc);
287 			FOREACH_THREAD_IN_PROC(wproc, td) {
288 				thread_lock(td);
289 				while (td->td_wchan &&
290 				    (td->td_wmesg == lockstr) &&
291 				    (i++ < maxlockdepth)) {
292 					waitblock = (struct lockf *)td->td_wchan;
293 					/* Get the owner of the blocking lock */
294 					waitblock = waitblock->lf_next;
295 					if ((waitblock->lf_flags & F_POSIX) == 0)
296 						break;
297 					nproc = (struct proc *)waitblock->lf_id;
298 					if (nproc == (struct proc *)lock->lf_id) {
299 						PROC_SUNLOCK(wproc);
300 						thread_unlock(td);
301 						lock->lf_next = *clean;
302 						*clean = lock;
303 						return (EDEADLK);
304 					}
305 				}
306 				thread_unlock(td);
307 			}
308 			PROC_SUNLOCK(wproc);
309 			wproc = nproc;
310 			if (wproc)
311 				goto restart;
312 		}
313 		/*
314 		 * For flock type locks, we must first remove
315 		 * any shared locks that we hold before we sleep
316 		 * waiting for an exclusive lock.
317 		 */
318 		if ((lock->lf_flags & F_FLOCK) &&
319 		    lock->lf_type == F_WRLCK) {
320 			lock->lf_type = F_UNLCK;
321 			(void) lf_clearlock(lock, clean);
322 			lock->lf_type = F_WRLCK;
323 		}
324 		/*
325 		 * Add our lock to the blocked list and sleep until we're free.
326 		 * Remember who blocked us (for deadlock detection).
327 		 */
328 		lock->lf_next = block;
329 		TAILQ_INSERT_TAIL(&block->lf_blkhd, lock, lf_block);
330 #ifdef LOCKF_DEBUG
331 		if (lockf_debug & 1) {
332 			lf_print("lf_setlock: blocking on", block);
333 			lf_printlist("lf_setlock", block);
334 		}
335 #endif /* LOCKF_DEBUG */
336 		error = msleep(lock, VI_MTX(vp), priority, lockstr, 0);
337 		/*
338 		 * We may have been awakened by a signal and/or by a
339 		 * debugger continuing us (in which cases we must remove
340 		 * ourselves from the blocked list) and/or by another
341 		 * process releasing a lock (in which case we have
342 		 * already been removed from the blocked list and our
343 		 * lf_next field set to NOLOCKF).
344 		 */
345 		if (lock->lf_next) {
346 			TAILQ_REMOVE(&lock->lf_next->lf_blkhd, lock, lf_block);
347 			lock->lf_next = NOLOCKF;
348 		}
349 		if (error) {
350 			lock->lf_next = *clean;
351 			*clean = lock;
352 			return (error);
353 		}
354 	}
355 	/*
356 	 * No blocks!!  Add the lock.  Note that we will
357 	 * downgrade or upgrade any overlapping locks this
358 	 * process already owns.
359 	 *
360 	 * Skip over locks owned by other processes.
361 	 * Handle any locks that overlap and are owned by ourselves.
362 	 */
363 	prev = head;
364 	block = *head;
365 	needtolink = 1;
366 	for (;;) {
367 		ovcase = lf_findoverlap(block, lock, SELF, &prev, &overlap);
368 		if (ovcase)
369 			block = overlap->lf_next;
370 		/*
371 		 * Six cases:
372 		 *	0) no overlap
373 		 *	1) overlap == lock
374 		 *	2) overlap contains lock
375 		 *	3) lock contains overlap
376 		 *	4) overlap starts before lock
377 		 *	5) overlap ends after lock
378 		 */
379 		switch (ovcase) {
380 		case 0: /* no overlap */
381 			if (needtolink) {
382 				*prev = lock;
383 				lock->lf_next = overlap;
384 			}
385 			break;
386 
387 		case 1: /* overlap == lock */
388 			/*
389 			 * If downgrading lock, others may be
390 			 * able to acquire it.
391 			 */
392 			if (lock->lf_type == F_RDLCK &&
393 			    overlap->lf_type == F_WRLCK)
394 				lf_wakelock(overlap);
395 			overlap->lf_type = lock->lf_type;
396 			lock->lf_next = *clean;
397 			*clean = lock;
398 			lock = overlap; /* for debug output below */
399 			break;
400 
401 		case 2: /* overlap contains lock */
402 			/*
403 			 * Check for common starting point and different types.
404 			 */
405 			if (overlap->lf_type == lock->lf_type) {
406 				lock->lf_next = *clean;
407 				*clean = lock;
408 				lock = overlap; /* for debug output below */
409 				break;
410 			}
411 			if (overlap->lf_start == lock->lf_start) {
412 				*prev = lock;
413 				lock->lf_next = overlap;
414 				overlap->lf_start = lock->lf_end + 1;
415 			} else
416 				lf_split(overlap, lock, clean);
417 			lf_wakelock(overlap);
418 			break;
419 
420 		case 3: /* lock contains overlap */
421 			/*
422 			 * If downgrading lock, others may be able to
423 			 * acquire it, otherwise take the list.
424 			 */
425 			if (lock->lf_type == F_RDLCK &&
426 			    overlap->lf_type == F_WRLCK) {
427 				lf_wakelock(overlap);
428 			} else {
429 				while (!TAILQ_EMPTY(&overlap->lf_blkhd)) {
430 					ltmp = TAILQ_FIRST(&overlap->lf_blkhd);
431 					TAILQ_REMOVE(&overlap->lf_blkhd, ltmp,
432 					    lf_block);
433 					TAILQ_INSERT_TAIL(&lock->lf_blkhd,
434 					    ltmp, lf_block);
435 					ltmp->lf_next = lock;
436 				}
437 			}
438 			/*
439 			 * Add the new lock if necessary and delete the overlap.
440 			 */
441 			if (needtolink) {
442 				*prev = lock;
443 				lock->lf_next = overlap->lf_next;
444 				prev = &lock->lf_next;
445 				needtolink = 0;
446 			} else
447 				*prev = overlap->lf_next;
448 			overlap->lf_next = *clean;
449 			*clean = overlap;
450 			continue;
451 
452 		case 4: /* overlap starts before lock */
453 			/*
454 			 * Add lock after overlap on the list.
455 			 */
456 			lock->lf_next = overlap->lf_next;
457 			overlap->lf_next = lock;
458 			overlap->lf_end = lock->lf_start - 1;
459 			prev = &lock->lf_next;
460 			lf_wakelock(overlap);
461 			needtolink = 0;
462 			continue;
463 
464 		case 5: /* overlap ends after lock */
465 			/*
466 			 * Add the new lock before overlap.
467 			 */
468 			if (needtolink) {
469 				*prev = lock;
470 				lock->lf_next = overlap;
471 			}
472 			overlap->lf_start = lock->lf_end + 1;
473 			lf_wakelock(overlap);
474 			break;
475 		}
476 		break;
477 	}
478 #ifdef LOCKF_DEBUG
479 	if (lockf_debug & 1) {
480 		lf_print("lf_setlock: got the lock", lock);
481 		lf_printlist("lf_setlock", lock);
482 	}
483 #endif /* LOCKF_DEBUG */
484 	return (0);
485 }
486 
487 /*
488  * Remove a byte-range lock on an inode.
489  *
490  * Generally, find the lock (or an overlap to that lock)
491  * and remove it (or shrink it), then wakeup anyone we can.
492  */
493 static int
494 lf_clearlock(unlock, clean)
495 	struct lockf *unlock;
496 	struct lockf **clean;
497 {
498 	struct lockf **head = unlock->lf_head;
499 	register struct lockf *lf = *head;
500 	struct lockf *overlap, **prev;
501 	int ovcase;
502 
503 	if (lf == NOLOCKF)
504 		return (0);
505 #ifdef LOCKF_DEBUG
506 	if (unlock->lf_type != F_UNLCK)
507 		panic("lf_clearlock: bad type");
508 	if (lockf_debug & 1)
509 		lf_print("lf_clearlock", unlock);
510 #endif /* LOCKF_DEBUG */
511 	prev = head;
512 	while ((ovcase = lf_findoverlap(lf, unlock, SELF, &prev, &overlap))) {
513 		/*
514 		 * Wakeup the list of locks to be retried.
515 		 */
516 		lf_wakelock(overlap);
517 
518 		switch (ovcase) {
519 
520 		case 1: /* overlap == lock */
521 			*prev = overlap->lf_next;
522 			overlap->lf_next = *clean;
523 			*clean = overlap;
524 			break;
525 
526 		case 2: /* overlap contains lock: split it */
527 			if (overlap->lf_start == unlock->lf_start) {
528 				overlap->lf_start = unlock->lf_end + 1;
529 				break;
530 			}
531 			lf_split(overlap, unlock, clean);
532 			overlap->lf_next = unlock->lf_next;
533 			break;
534 
535 		case 3: /* lock contains overlap */
536 			*prev = overlap->lf_next;
537 			lf = overlap->lf_next;
538 			overlap->lf_next = *clean;
539 			*clean = overlap;
540 			continue;
541 
542 		case 4: /* overlap starts before lock */
543 			overlap->lf_end = unlock->lf_start - 1;
544 			prev = &overlap->lf_next;
545 			lf = overlap->lf_next;
546 			continue;
547 
548 		case 5: /* overlap ends after lock */
549 			overlap->lf_start = unlock->lf_end + 1;
550 			break;
551 		}
552 		break;
553 	}
554 #ifdef LOCKF_DEBUG
555 	if (lockf_debug & 1)
556 		lf_printlist("lf_clearlock", unlock);
557 #endif /* LOCKF_DEBUG */
558 	return (0);
559 }
560 
561 /*
562  * Check whether there is a blocking lock,
563  * and if so return its process identifier.
564  */
565 static int
566 lf_getlock(lock, fl)
567 	register struct lockf *lock;
568 	register struct flock *fl;
569 {
570 	register struct lockf *block;
571 
572 #ifdef LOCKF_DEBUG
573 	if (lockf_debug & 1)
574 		lf_print("lf_getlock", lock);
575 #endif /* LOCKF_DEBUG */
576 
577 	if ((block = lf_getblock(lock))) {
578 		fl->l_type = block->lf_type;
579 		fl->l_whence = SEEK_SET;
580 		fl->l_start = block->lf_start;
581 		if (block->lf_end == -1)
582 			fl->l_len = 0;
583 		else
584 			fl->l_len = block->lf_end - block->lf_start + 1;
585 		if (block->lf_flags & F_POSIX)
586 			fl->l_pid = ((struct proc *)(block->lf_id))->p_pid;
587 		else
588 			fl->l_pid = -1;
589 	} else {
590 		fl->l_type = F_UNLCK;
591 	}
592 	return (0);
593 }
594 
595 /*
596  * Walk the list of locks for an inode and
597  * return the first blocking lock.
598  */
599 static struct lockf *
600 lf_getblock(lock)
601 	register struct lockf *lock;
602 {
603 	struct lockf **prev, *overlap, *lf = *(lock->lf_head);
604 	int ovcase;
605 
606 	prev = lock->lf_head;
607 	while ((ovcase = lf_findoverlap(lf, lock, OTHERS, &prev, &overlap))) {
608 		/*
609 		 * We've found an overlap, see if it blocks us
610 		 */
611 		if ((lock->lf_type == F_WRLCK || overlap->lf_type == F_WRLCK))
612 			return (overlap);
613 		/*
614 		 * Nope, point to the next one on the list and
615 		 * see if it blocks us
616 		 */
617 		lf = overlap->lf_next;
618 	}
619 	return (NOLOCKF);
620 }
621 
622 /*
623  * Walk the list of locks for an inode to
624  * find an overlapping lock (if any).
625  *
626  * NOTE: this returns only the FIRST overlapping lock.  There
627  *	 may be more than one.
628  */
629 static int
630 lf_findoverlap(lf, lock, type, prev, overlap)
631 	register struct lockf *lf;
632 	struct lockf *lock;
633 	int type;
634 	struct lockf ***prev;
635 	struct lockf **overlap;
636 {
637 	off_t start, end;
638 
639 	*overlap = lf;
640 	if (lf == NOLOCKF)
641 		return (0);
642 #ifdef LOCKF_DEBUG
643 	if (lockf_debug & 2)
644 		lf_print("lf_findoverlap: looking for overlap in", lock);
645 #endif /* LOCKF_DEBUG */
646 	start = lock->lf_start;
647 	end = lock->lf_end;
648 	while (lf != NOLOCKF) {
649 		if (((type & SELF) && lf->lf_id != lock->lf_id) ||
650 		    ((type & OTHERS) && lf->lf_id == lock->lf_id)) {
651 			*prev = &lf->lf_next;
652 			*overlap = lf = lf->lf_next;
653 			continue;
654 		}
655 #ifdef LOCKF_DEBUG
656 		if (lockf_debug & 2)
657 			lf_print("\tchecking", lf);
658 #endif /* LOCKF_DEBUG */
659 		/*
660 		 * OK, check for overlap
661 		 *
662 		 * Six cases:
663 		 *	0) no overlap
664 		 *	1) overlap == lock
665 		 *	2) overlap contains lock
666 		 *	3) lock contains overlap
667 		 *	4) overlap starts before lock
668 		 *	5) overlap ends after lock
669 		 */
670 		if ((lf->lf_end != -1 && start > lf->lf_end) ||
671 		    (end != -1 && lf->lf_start > end)) {
672 			/* Case 0 */
673 #ifdef LOCKF_DEBUG
674 			if (lockf_debug & 2)
675 				printf("no overlap\n");
676 #endif /* LOCKF_DEBUG */
677 			if ((type & SELF) && end != -1 && lf->lf_start > end)
678 				return (0);
679 			*prev = &lf->lf_next;
680 			*overlap = lf = lf->lf_next;
681 			continue;
682 		}
683 		if ((lf->lf_start == start) && (lf->lf_end == end)) {
684 			/* Case 1 */
685 #ifdef LOCKF_DEBUG
686 			if (lockf_debug & 2)
687 				printf("overlap == lock\n");
688 #endif /* LOCKF_DEBUG */
689 			return (1);
690 		}
691 		if ((lf->lf_start <= start) &&
692 		    (end != -1) &&
693 		    ((lf->lf_end >= end) || (lf->lf_end == -1))) {
694 			/* Case 2 */
695 #ifdef LOCKF_DEBUG
696 			if (lockf_debug & 2)
697 				printf("overlap contains lock\n");
698 #endif /* LOCKF_DEBUG */
699 			return (2);
700 		}
701 		if (start <= lf->lf_start &&
702 		           (end == -1 ||
703 			   (lf->lf_end != -1 && end >= lf->lf_end))) {
704 			/* Case 3 */
705 #ifdef LOCKF_DEBUG
706 			if (lockf_debug & 2)
707 				printf("lock contains overlap\n");
708 #endif /* LOCKF_DEBUG */
709 			return (3);
710 		}
711 		if ((lf->lf_start < start) &&
712 			((lf->lf_end >= start) || (lf->lf_end == -1))) {
713 			/* Case 4 */
714 #ifdef LOCKF_DEBUG
715 			if (lockf_debug & 2)
716 				printf("overlap starts before lock\n");
717 #endif /* LOCKF_DEBUG */
718 			return (4);
719 		}
720 		if ((lf->lf_start > start) &&
721 			(end != -1) &&
722 			((lf->lf_end > end) || (lf->lf_end == -1))) {
723 			/* Case 5 */
724 #ifdef LOCKF_DEBUG
725 			if (lockf_debug & 2)
726 				printf("overlap ends after lock\n");
727 #endif /* LOCKF_DEBUG */
728 			return (5);
729 		}
730 		panic("lf_findoverlap: default");
731 	}
732 	return (0);
733 }
734 
735 /*
736  * Split a lock and a contained region into
737  * two or three locks as necessary.
738  */
739 static void
740 lf_split(lock1, lock2, split)
741 	struct lockf *lock1;
742 	struct lockf *lock2;
743 	struct lockf **split;
744 {
745 	struct lockf *splitlock;
746 
747 #ifdef LOCKF_DEBUG
748 	if (lockf_debug & 2) {
749 		lf_print("lf_split", lock1);
750 		lf_print("splitting from", lock2);
751 	}
752 #endif /* LOCKF_DEBUG */
753 	/*
754 	 * Check to see if spliting into only two pieces.
755 	 */
756 	if (lock1->lf_start == lock2->lf_start) {
757 		lock1->lf_start = lock2->lf_end + 1;
758 		lock2->lf_next = lock1;
759 		return;
760 	}
761 	if (lock1->lf_end == lock2->lf_end) {
762 		lock1->lf_end = lock2->lf_start - 1;
763 		lock2->lf_next = lock1->lf_next;
764 		lock1->lf_next = lock2;
765 		return;
766 	}
767 	/*
768 	 * Make a new lock consisting of the last part of
769 	 * the encompassing lock.  We use the preallocated
770 	 * splitlock so we don't have to block.
771 	 */
772 	splitlock = *split;
773 	KASSERT(splitlock != NULL, ("no split"));
774 	*split = splitlock->lf_next;
775 	bcopy(lock1, splitlock, sizeof *splitlock);
776 	splitlock->lf_start = lock2->lf_end + 1;
777 	TAILQ_INIT(&splitlock->lf_blkhd);
778 	lock1->lf_end = lock2->lf_start - 1;
779 	/*
780 	 * OK, now link it in
781 	 */
782 	splitlock->lf_next = lock1->lf_next;
783 	lock2->lf_next = splitlock;
784 	lock1->lf_next = lock2;
785 }
786 
787 /*
788  * Wakeup a blocklist
789  */
790 static void
791 lf_wakelock(listhead)
792 	struct lockf *listhead;
793 {
794 	register struct lockf *wakelock;
795 
796 	while (!TAILQ_EMPTY(&listhead->lf_blkhd)) {
797 		wakelock = TAILQ_FIRST(&listhead->lf_blkhd);
798 		TAILQ_REMOVE(&listhead->lf_blkhd, wakelock, lf_block);
799 		wakelock->lf_next = NOLOCKF;
800 #ifdef LOCKF_DEBUG
801 		if (lockf_debug & 2)
802 			lf_print("lf_wakelock: awakening", wakelock);
803 #endif /* LOCKF_DEBUG */
804 		wakeup(wakelock);
805 	}
806 }
807 
808 #ifdef LOCKF_DEBUG
809 /*
810  * Print out a lock.
811  */
812 static void
813 lf_print(tag, lock)
814 	char *tag;
815 	register struct lockf *lock;
816 {
817 
818 	printf("%s: lock %p for ", tag, (void *)lock);
819 	if (lock->lf_flags & F_POSIX)
820 		printf("proc %ld", (long)((struct proc *)lock->lf_id)->p_pid);
821 	else
822 		printf("id %p", (void *)lock->lf_id);
823 	if (lock->lf_inode != (struct inode *)0)
824 		printf(" in ino %ju on dev <%s>, %s, start %jd, end %jd",
825 		    (uintmax_t)lock->lf_inode->i_number,
826 		    devtoname(lock->lf_inode->i_dev),
827 		    lock->lf_type == F_RDLCK ? "shared" :
828 		    lock->lf_type == F_WRLCK ? "exclusive" :
829 		    lock->lf_type == F_UNLCK ? "unlock" : "unknown",
830 		    (intmax_t)lock->lf_start, (intmax_t)lock->lf_end);
831 	else
832 		printf(" %s, start %jd, end %jd",
833 		    lock->lf_type == F_RDLCK ? "shared" :
834 		    lock->lf_type == F_WRLCK ? "exclusive" :
835 		    lock->lf_type == F_UNLCK ? "unlock" : "unknown",
836 		    (intmax_t)lock->lf_start, (intmax_t)lock->lf_end);
837 	if (!TAILQ_EMPTY(&lock->lf_blkhd))
838 		printf(" block %p\n", (void *)TAILQ_FIRST(&lock->lf_blkhd));
839 	else
840 		printf("\n");
841 }
842 
843 static void
844 lf_printlist(tag, lock)
845 	char *tag;
846 	struct lockf *lock;
847 {
848 	register struct lockf *lf, *blk;
849 
850 	if (lock->lf_inode == (struct inode *)0)
851 		return;
852 
853 	printf("%s: Lock list for ino %ju on dev <%s>:\n",
854 	    tag, (uintmax_t)lock->lf_inode->i_number,
855 	    devtoname(lock->lf_inode->i_dev));
856 	for (lf = lock->lf_inode->i_lockf; lf; lf = lf->lf_next) {
857 		printf("\tlock %p for ",(void *)lf);
858 		if (lf->lf_flags & F_POSIX)
859 			printf("proc %ld",
860 			    (long)((struct proc *)lf->lf_id)->p_pid);
861 		else
862 			printf("id %p", (void *)lf->lf_id);
863 		printf(", %s, start %jd, end %jd",
864 		    lf->lf_type == F_RDLCK ? "shared" :
865 		    lf->lf_type == F_WRLCK ? "exclusive" :
866 		    lf->lf_type == F_UNLCK ? "unlock" :
867 		    "unknown", (intmax_t)lf->lf_start, (intmax_t)lf->lf_end);
868 		TAILQ_FOREACH(blk, &lf->lf_blkhd, lf_block) {
869 			printf("\n\t\tlock request %p for ", (void *)blk);
870 			if (blk->lf_flags & F_POSIX)
871 				printf("proc %ld",
872 				    (long)((struct proc *)blk->lf_id)->p_pid);
873 			else
874 				printf("id %p", (void *)blk->lf_id);
875 			printf(", %s, start %jd, end %jd",
876 			    blk->lf_type == F_RDLCK ? "shared" :
877 			    blk->lf_type == F_WRLCK ? "exclusive" :
878 			    blk->lf_type == F_UNLCK ? "unlock" :
879 			    "unknown", (intmax_t)blk->lf_start,
880 			    (intmax_t)blk->lf_end);
881 			if (!TAILQ_EMPTY(&blk->lf_blkhd))
882 				panic("lf_printlist: bad list");
883 		}
884 		printf("\n");
885 	}
886 }
887 #endif /* LOCKF_DEBUG */
888