xref: /freebsd/sys/kern/kern_lockf.c (revision 6990ffd8a95caaba6858ad44ff1b3157d1efba8f)
1 /*
2  * Copyright (c) 1982, 1986, 1989, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * Scooter Morris at Genentech Inc.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *	This product includes software developed by the University of
19  *	California, Berkeley and its contributors.
20  * 4. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *	@(#)ufs_lockf.c	8.3 (Berkeley) 1/6/94
37  * $FreeBSD$
38  */
39 
40 #include "opt_debug_lockf.h"
41 
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/kernel.h>
45 #include <sys/lock.h>
46 #include <sys/mutex.h>
47 #include <sys/proc.h>
48 #include <sys/unistd.h>
49 #include <sys/vnode.h>
50 #include <sys/malloc.h>
51 #include <sys/fcntl.h>
52 #include <sys/lockf.h>
53 
54 #include <machine/limits.h>
55 
56 /*
57  * This variable controls the maximum number of processes that will
58  * be checked in doing deadlock detection.
59  */
60 static int maxlockdepth = MAXDEPTH;
61 
62 #ifdef LOCKF_DEBUG
63 #include <sys/kernel.h>
64 #include <sys/sysctl.h>
65 
66 #include <ufs/ufs/quota.h>
67 #include <ufs/ufs/inode.h>
68 
69 
70 static int	lockf_debug = 0;
71 SYSCTL_INT(_debug, OID_AUTO, lockf_debug, CTLFLAG_RW, &lockf_debug, 0, "");
72 #endif
73 
74 MALLOC_DEFINE(M_LOCKF, "lockf", "Byte-range locking structures");
75 
76 #define NOLOCKF (struct lockf *)0
77 #define SELF	0x1
78 #define OTHERS	0x2
79 static int	 lf_clearlock __P((struct lockf *));
80 static int	 lf_findoverlap __P((struct lockf *,
81 	    struct lockf *, int, struct lockf ***, struct lockf **));
82 static struct lockf *
83 	 lf_getblock __P((struct lockf *));
84 static int	 lf_getlock __P((struct lockf *, struct flock *));
85 static int	 lf_setlock __P((struct lockf *));
86 static void	 lf_split __P((struct lockf *, struct lockf *));
87 static void	 lf_wakelock __P((struct lockf *));
88 
89 /*
90  * Advisory record locking support
91  */
92 int
93 lf_advlock(ap, head, size)
94 	struct vop_advlock_args /* {
95 		struct vnode *a_vp;
96 		caddr_t  a_id;
97 		int  a_op;
98 		struct flock *a_fl;
99 		int  a_flags;
100 	} */ *ap;
101 	struct lockf **head;
102 	u_quad_t size;
103 {
104 	register struct flock *fl = ap->a_fl;
105 	register struct lockf *lock;
106 	off_t start, end, oadd;
107 	int error;
108 
109 	/*
110 	 * Convert the flock structure into a start and end.
111 	 */
112 	switch (fl->l_whence) {
113 
114 	case SEEK_SET:
115 	case SEEK_CUR:
116 		/*
117 		 * Caller is responsible for adding any necessary offset
118 		 * when SEEK_CUR is used.
119 		 */
120 		start = fl->l_start;
121 		break;
122 
123 	case SEEK_END:
124 		if (size > OFF_MAX ||
125 		    (fl->l_start > 0 && size > OFF_MAX - fl->l_start))
126 			return (EOVERFLOW);
127 		start = size + fl->l_start;
128 		break;
129 
130 	default:
131 		return (EINVAL);
132 	}
133 	if (start < 0)
134 		return (EINVAL);
135 	if (fl->l_len < 0) {
136 		if (start == 0)
137 			return (EINVAL);
138 		end = start - 1;
139 		start += fl->l_len;
140 		if (start < 0)
141 			return (EINVAL);
142 	} else if (fl->l_len == 0)
143 		end = -1;
144 	else {
145 		oadd = fl->l_len - 1;
146 		if (oadd > OFF_MAX - start)
147 			return (EOVERFLOW);
148 		end = start + oadd;
149 	}
150 	/*
151 	 * Avoid the common case of unlocking when inode has no locks.
152 	 */
153 	if (*head == (struct lockf *)0) {
154 		if (ap->a_op != F_SETLK) {
155 			fl->l_type = F_UNLCK;
156 			return (0);
157 		}
158 	}
159 	/*
160 	 * Create the lockf structure
161 	 */
162 	MALLOC(lock, struct lockf *, sizeof *lock, M_LOCKF, M_WAITOK);
163 	lock->lf_start = start;
164 	lock->lf_end = end;
165 	lock->lf_id = ap->a_id;
166 /*	lock->lf_inode = ip; */	/* XXX JH */
167 	lock->lf_type = fl->l_type;
168 	lock->lf_head = head;
169 	lock->lf_next = (struct lockf *)0;
170 	TAILQ_INIT(&lock->lf_blkhd);
171 	lock->lf_flags = ap->a_flags;
172 	/*
173 	 * Do the requested operation.
174 	 */
175 	switch(ap->a_op) {
176 	case F_SETLK:
177 		return (lf_setlock(lock));
178 
179 	case F_UNLCK:
180 		error = lf_clearlock(lock);
181 		FREE(lock, M_LOCKF);
182 		return (error);
183 
184 	case F_GETLK:
185 		error = lf_getlock(lock, fl);
186 		FREE(lock, M_LOCKF);
187 		return (error);
188 
189 	default:
190 		free(lock, M_LOCKF);
191 		return (EINVAL);
192 	}
193 	/* NOTREACHED */
194 }
195 
196 /*
197  * Set a byte-range lock.
198  */
199 static int
200 lf_setlock(lock)
201 	register struct lockf *lock;
202 {
203 	register struct lockf *block;
204 	struct lockf **head = lock->lf_head;
205 	struct lockf **prev, *overlap, *ltmp;
206 	static char lockstr[] = "lockf";
207 	int ovcase, priority, needtolink, error;
208 
209 #ifdef LOCKF_DEBUG
210 	if (lockf_debug & 1)
211 		lf_print("lf_setlock", lock);
212 #endif /* LOCKF_DEBUG */
213 
214 	/*
215 	 * Set the priority
216 	 */
217 	priority = PLOCK;
218 	if (lock->lf_type == F_WRLCK)
219 		priority += 4;
220 	priority |= PCATCH;
221 	/*
222 	 * Scan lock list for this file looking for locks that would block us.
223 	 */
224 	while ((block = lf_getblock(lock))) {
225 		/*
226 		 * Free the structure and return if nonblocking.
227 		 */
228 		if ((lock->lf_flags & F_WAIT) == 0) {
229 			FREE(lock, M_LOCKF);
230 			return (EAGAIN);
231 		}
232 		/*
233 		 * We are blocked. Since flock style locks cover
234 		 * the whole file, there is no chance for deadlock.
235 		 * For byte-range locks we must check for deadlock.
236 		 *
237 		 * Deadlock detection is done by looking through the
238 		 * wait channels to see if there are any cycles that
239 		 * involve us. MAXDEPTH is set just to make sure we
240 		 * do not go off into neverland.
241 		 */
242 		if ((lock->lf_flags & F_POSIX) &&
243 		    (block->lf_flags & F_POSIX)) {
244 			register struct proc *wproc;
245 			struct thread *td;
246 			register struct lockf *waitblock;
247 			int i = 0;
248 
249 			/* The block is waiting on something */
250 			/* XXXKSE this is not complete under threads */
251 			wproc = (struct proc *)block->lf_id;
252 			mtx_lock_spin(&sched_lock);
253 			FOREACH_THREAD_IN_PROC(wproc, td) {
254 				while (td->td_wchan &&
255 				    (td->td_wmesg == lockstr) &&
256 				    (i++ < maxlockdepth)) {
257 					waitblock = (struct lockf *)td->td_wchan;
258 					/* Get the owner of the blocking lock */
259 					waitblock = waitblock->lf_next;
260 					if ((waitblock->lf_flags & F_POSIX) == 0)
261 						break;
262 					wproc = (struct proc *)waitblock->lf_id;
263 					if (wproc == (struct proc *)lock->lf_id) {
264 						mtx_unlock_spin(&sched_lock);
265 						free(lock, M_LOCKF);
266 						return (EDEADLK);
267 					}
268 				}
269 			}
270 			mtx_unlock_spin(&sched_lock);
271 		}
272 		/*
273 		 * For flock type locks, we must first remove
274 		 * any shared locks that we hold before we sleep
275 		 * waiting for an exclusive lock.
276 		 */
277 		if ((lock->lf_flags & F_FLOCK) &&
278 		    lock->lf_type == F_WRLCK) {
279 			lock->lf_type = F_UNLCK;
280 			(void) lf_clearlock(lock);
281 			lock->lf_type = F_WRLCK;
282 		}
283 		/*
284 		 * Add our lock to the blocked list and sleep until we're free.
285 		 * Remember who blocked us (for deadlock detection).
286 		 */
287 		lock->lf_next = block;
288 		TAILQ_INSERT_TAIL(&block->lf_blkhd, lock, lf_block);
289 #ifdef LOCKF_DEBUG
290 		if (lockf_debug & 1) {
291 			lf_print("lf_setlock: blocking on", block);
292 			lf_printlist("lf_setlock", block);
293 		}
294 #endif /* LOCKF_DEBUG */
295 		error = tsleep((caddr_t)lock, priority, lockstr, 0);
296 		/*
297 		 * We may have been awakened by a signal and/or by a
298 		 * debugger continuing us (in which cases we must remove
299 		 * ourselves from the blocked list) and/or by another
300 		 * process releasing a lock (in which case we have
301 		 * already been removed from the blocked list and our
302 		 * lf_next field set to NOLOCKF).
303 		 */
304 		if (lock->lf_next) {
305 			TAILQ_REMOVE(&lock->lf_next->lf_blkhd, lock, lf_block);
306 			lock->lf_next = NOLOCKF;
307 		}
308 		if (error) {
309 			free(lock, M_LOCKF);
310 			return (error);
311 		}
312 	}
313 	/*
314 	 * No blocks!!  Add the lock.  Note that we will
315 	 * downgrade or upgrade any overlapping locks this
316 	 * process already owns.
317 	 *
318 	 * Skip over locks owned by other processes.
319 	 * Handle any locks that overlap and are owned by ourselves.
320 	 */
321 	prev = head;
322 	block = *head;
323 	needtolink = 1;
324 	for (;;) {
325 		ovcase = lf_findoverlap(block, lock, SELF, &prev, &overlap);
326 		if (ovcase)
327 			block = overlap->lf_next;
328 		/*
329 		 * Six cases:
330 		 *	0) no overlap
331 		 *	1) overlap == lock
332 		 *	2) overlap contains lock
333 		 *	3) lock contains overlap
334 		 *	4) overlap starts before lock
335 		 *	5) overlap ends after lock
336 		 */
337 		switch (ovcase) {
338 		case 0: /* no overlap */
339 			if (needtolink) {
340 				*prev = lock;
341 				lock->lf_next = overlap;
342 			}
343 			break;
344 
345 		case 1: /* overlap == lock */
346 			/*
347 			 * If downgrading lock, others may be
348 			 * able to acquire it.
349 			 */
350 			if (lock->lf_type == F_RDLCK &&
351 			    overlap->lf_type == F_WRLCK)
352 				lf_wakelock(overlap);
353 			overlap->lf_type = lock->lf_type;
354 			FREE(lock, M_LOCKF);
355 			lock = overlap; /* for debug output below */
356 			break;
357 
358 		case 2: /* overlap contains lock */
359 			/*
360 			 * Check for common starting point and different types.
361 			 */
362 			if (overlap->lf_type == lock->lf_type) {
363 				free(lock, M_LOCKF);
364 				lock = overlap; /* for debug output below */
365 				break;
366 			}
367 			if (overlap->lf_start == lock->lf_start) {
368 				*prev = lock;
369 				lock->lf_next = overlap;
370 				overlap->lf_start = lock->lf_end + 1;
371 			} else
372 				lf_split(overlap, lock);
373 			lf_wakelock(overlap);
374 			break;
375 
376 		case 3: /* lock contains overlap */
377 			/*
378 			 * If downgrading lock, others may be able to
379 			 * acquire it, otherwise take the list.
380 			 */
381 			if (lock->lf_type == F_RDLCK &&
382 			    overlap->lf_type == F_WRLCK) {
383 				lf_wakelock(overlap);
384 			} else {
385 				while (!TAILQ_EMPTY(&overlap->lf_blkhd)) {
386 					ltmp = TAILQ_FIRST(&overlap->lf_blkhd);
387 					TAILQ_REMOVE(&overlap->lf_blkhd, ltmp,
388 					    lf_block);
389 					TAILQ_INSERT_TAIL(&lock->lf_blkhd,
390 					    ltmp, lf_block);
391 					ltmp->lf_next = lock;
392 				}
393 			}
394 			/*
395 			 * Add the new lock if necessary and delete the overlap.
396 			 */
397 			if (needtolink) {
398 				*prev = lock;
399 				lock->lf_next = overlap->lf_next;
400 				prev = &lock->lf_next;
401 				needtolink = 0;
402 			} else
403 				*prev = overlap->lf_next;
404 			free(overlap, M_LOCKF);
405 			continue;
406 
407 		case 4: /* overlap starts before lock */
408 			/*
409 			 * Add lock after overlap on the list.
410 			 */
411 			lock->lf_next = overlap->lf_next;
412 			overlap->lf_next = lock;
413 			overlap->lf_end = lock->lf_start - 1;
414 			prev = &lock->lf_next;
415 			lf_wakelock(overlap);
416 			needtolink = 0;
417 			continue;
418 
419 		case 5: /* overlap ends after lock */
420 			/*
421 			 * Add the new lock before overlap.
422 			 */
423 			if (needtolink) {
424 				*prev = lock;
425 				lock->lf_next = overlap;
426 			}
427 			overlap->lf_start = lock->lf_end + 1;
428 			lf_wakelock(overlap);
429 			break;
430 		}
431 		break;
432 	}
433 #ifdef LOCKF_DEBUG
434 	if (lockf_debug & 1) {
435 		lf_print("lf_setlock: got the lock", lock);
436 		lf_printlist("lf_setlock", lock);
437 	}
438 #endif /* LOCKF_DEBUG */
439 	return (0);
440 }
441 
442 /*
443  * Remove a byte-range lock on an inode.
444  *
445  * Generally, find the lock (or an overlap to that lock)
446  * and remove it (or shrink it), then wakeup anyone we can.
447  */
448 static int
449 lf_clearlock(unlock)
450 	register struct lockf *unlock;
451 {
452 	struct lockf **head = unlock->lf_head;
453 	register struct lockf *lf = *head;
454 	struct lockf *overlap, **prev;
455 	int ovcase;
456 
457 	if (lf == NOLOCKF)
458 		return (0);
459 #ifdef LOCKF_DEBUG
460 	if (unlock->lf_type != F_UNLCK)
461 		panic("lf_clearlock: bad type");
462 	if (lockf_debug & 1)
463 		lf_print("lf_clearlock", unlock);
464 #endif /* LOCKF_DEBUG */
465 	prev = head;
466 	while ((ovcase = lf_findoverlap(lf, unlock, SELF, &prev, &overlap))) {
467 		/*
468 		 * Wakeup the list of locks to be retried.
469 		 */
470 		lf_wakelock(overlap);
471 
472 		switch (ovcase) {
473 
474 		case 1: /* overlap == lock */
475 			*prev = overlap->lf_next;
476 			FREE(overlap, M_LOCKF);
477 			break;
478 
479 		case 2: /* overlap contains lock: split it */
480 			if (overlap->lf_start == unlock->lf_start) {
481 				overlap->lf_start = unlock->lf_end + 1;
482 				break;
483 			}
484 			lf_split(overlap, unlock);
485 			overlap->lf_next = unlock->lf_next;
486 			break;
487 
488 		case 3: /* lock contains overlap */
489 			*prev = overlap->lf_next;
490 			lf = overlap->lf_next;
491 			free(overlap, M_LOCKF);
492 			continue;
493 
494 		case 4: /* overlap starts before lock */
495 			overlap->lf_end = unlock->lf_start - 1;
496 			prev = &overlap->lf_next;
497 			lf = overlap->lf_next;
498 			continue;
499 
500 		case 5: /* overlap ends after lock */
501 			overlap->lf_start = unlock->lf_end + 1;
502 			break;
503 		}
504 		break;
505 	}
506 #ifdef LOCKF_DEBUG
507 	if (lockf_debug & 1)
508 		lf_printlist("lf_clearlock", unlock);
509 #endif /* LOCKF_DEBUG */
510 	return (0);
511 }
512 
513 /*
514  * Check whether there is a blocking lock,
515  * and if so return its process identifier.
516  */
517 static int
518 lf_getlock(lock, fl)
519 	register struct lockf *lock;
520 	register struct flock *fl;
521 {
522 	register struct lockf *block;
523 
524 #ifdef LOCKF_DEBUG
525 	if (lockf_debug & 1)
526 		lf_print("lf_getlock", lock);
527 #endif /* LOCKF_DEBUG */
528 
529 	if ((block = lf_getblock(lock))) {
530 		fl->l_type = block->lf_type;
531 		fl->l_whence = SEEK_SET;
532 		fl->l_start = block->lf_start;
533 		if (block->lf_end == -1)
534 			fl->l_len = 0;
535 		else
536 			fl->l_len = block->lf_end - block->lf_start + 1;
537 		if (block->lf_flags & F_POSIX)
538 			fl->l_pid = ((struct proc *)(block->lf_id))->p_pid;
539 		else
540 			fl->l_pid = -1;
541 	} else {
542 		fl->l_type = F_UNLCK;
543 	}
544 	return (0);
545 }
546 
547 /*
548  * Walk the list of locks for an inode and
549  * return the first blocking lock.
550  */
551 static struct lockf *
552 lf_getblock(lock)
553 	register struct lockf *lock;
554 {
555 	struct lockf **prev, *overlap, *lf = *(lock->lf_head);
556 	int ovcase;
557 
558 	prev = lock->lf_head;
559 	while ((ovcase = lf_findoverlap(lf, lock, OTHERS, &prev, &overlap))) {
560 		/*
561 		 * We've found an overlap, see if it blocks us
562 		 */
563 		if ((lock->lf_type == F_WRLCK || overlap->lf_type == F_WRLCK))
564 			return (overlap);
565 		/*
566 		 * Nope, point to the next one on the list and
567 		 * see if it blocks us
568 		 */
569 		lf = overlap->lf_next;
570 	}
571 	return (NOLOCKF);
572 }
573 
574 /*
575  * Walk the list of locks for an inode to
576  * find an overlapping lock (if any).
577  *
578  * NOTE: this returns only the FIRST overlapping lock.  There
579  *	 may be more than one.
580  */
581 static int
582 lf_findoverlap(lf, lock, type, prev, overlap)
583 	register struct lockf *lf;
584 	struct lockf *lock;
585 	int type;
586 	struct lockf ***prev;
587 	struct lockf **overlap;
588 {
589 	off_t start, end;
590 
591 	*overlap = lf;
592 	if (lf == NOLOCKF)
593 		return (0);
594 #ifdef LOCKF_DEBUG
595 	if (lockf_debug & 2)
596 		lf_print("lf_findoverlap: looking for overlap in", lock);
597 #endif /* LOCKF_DEBUG */
598 	start = lock->lf_start;
599 	end = lock->lf_end;
600 	while (lf != NOLOCKF) {
601 		if (((type & SELF) && lf->lf_id != lock->lf_id) ||
602 		    ((type & OTHERS) && lf->lf_id == lock->lf_id)) {
603 			*prev = &lf->lf_next;
604 			*overlap = lf = lf->lf_next;
605 			continue;
606 		}
607 #ifdef LOCKF_DEBUG
608 		if (lockf_debug & 2)
609 			lf_print("\tchecking", lf);
610 #endif /* LOCKF_DEBUG */
611 		/*
612 		 * OK, check for overlap
613 		 *
614 		 * Six cases:
615 		 *	0) no overlap
616 		 *	1) overlap == lock
617 		 *	2) overlap contains lock
618 		 *	3) lock contains overlap
619 		 *	4) overlap starts before lock
620 		 *	5) overlap ends after lock
621 		 */
622 		if ((lf->lf_end != -1 && start > lf->lf_end) ||
623 		    (end != -1 && lf->lf_start > end)) {
624 			/* Case 0 */
625 #ifdef LOCKF_DEBUG
626 			if (lockf_debug & 2)
627 				printf("no overlap\n");
628 #endif /* LOCKF_DEBUG */
629 			if ((type & SELF) && end != -1 && lf->lf_start > end)
630 				return (0);
631 			*prev = &lf->lf_next;
632 			*overlap = lf = lf->lf_next;
633 			continue;
634 		}
635 		if ((lf->lf_start == start) && (lf->lf_end == end)) {
636 			/* Case 1 */
637 #ifdef LOCKF_DEBUG
638 			if (lockf_debug & 2)
639 				printf("overlap == lock\n");
640 #endif /* LOCKF_DEBUG */
641 			return (1);
642 		}
643 		if ((lf->lf_start <= start) &&
644 		    (end != -1) &&
645 		    ((lf->lf_end >= end) || (lf->lf_end == -1))) {
646 			/* Case 2 */
647 #ifdef LOCKF_DEBUG
648 			if (lockf_debug & 2)
649 				printf("overlap contains lock\n");
650 #endif /* LOCKF_DEBUG */
651 			return (2);
652 		}
653 		if (start <= lf->lf_start &&
654 		           (end == -1 ||
655 			   (lf->lf_end != -1 && end >= lf->lf_end))) {
656 			/* Case 3 */
657 #ifdef LOCKF_DEBUG
658 			if (lockf_debug & 2)
659 				printf("lock contains overlap\n");
660 #endif /* LOCKF_DEBUG */
661 			return (3);
662 		}
663 		if ((lf->lf_start < start) &&
664 			((lf->lf_end >= start) || (lf->lf_end == -1))) {
665 			/* Case 4 */
666 #ifdef LOCKF_DEBUG
667 			if (lockf_debug & 2)
668 				printf("overlap starts before lock\n");
669 #endif /* LOCKF_DEBUG */
670 			return (4);
671 		}
672 		if ((lf->lf_start > start) &&
673 			(end != -1) &&
674 			((lf->lf_end > end) || (lf->lf_end == -1))) {
675 			/* Case 5 */
676 #ifdef LOCKF_DEBUG
677 			if (lockf_debug & 2)
678 				printf("overlap ends after lock\n");
679 #endif /* LOCKF_DEBUG */
680 			return (5);
681 		}
682 		panic("lf_findoverlap: default");
683 	}
684 	return (0);
685 }
686 
687 /*
688  * Split a lock and a contained region into
689  * two or three locks as necessary.
690  */
691 static void
692 lf_split(lock1, lock2)
693 	register struct lockf *lock1;
694 	register struct lockf *lock2;
695 {
696 	register struct lockf *splitlock;
697 
698 #ifdef LOCKF_DEBUG
699 	if (lockf_debug & 2) {
700 		lf_print("lf_split", lock1);
701 		lf_print("splitting from", lock2);
702 	}
703 #endif /* LOCKF_DEBUG */
704 	/*
705 	 * Check to see if spliting into only two pieces.
706 	 */
707 	if (lock1->lf_start == lock2->lf_start) {
708 		lock1->lf_start = lock2->lf_end + 1;
709 		lock2->lf_next = lock1;
710 		return;
711 	}
712 	if (lock1->lf_end == lock2->lf_end) {
713 		lock1->lf_end = lock2->lf_start - 1;
714 		lock2->lf_next = lock1->lf_next;
715 		lock1->lf_next = lock2;
716 		return;
717 	}
718 	/*
719 	 * Make a new lock consisting of the last part of
720 	 * the encompassing lock
721 	 */
722 	MALLOC(splitlock, struct lockf *, sizeof *splitlock, M_LOCKF, M_WAITOK);
723 	bcopy((caddr_t)lock1, (caddr_t)splitlock, sizeof *splitlock);
724 	splitlock->lf_start = lock2->lf_end + 1;
725 	TAILQ_INIT(&splitlock->lf_blkhd);
726 	lock1->lf_end = lock2->lf_start - 1;
727 	/*
728 	 * OK, now link it in
729 	 */
730 	splitlock->lf_next = lock1->lf_next;
731 	lock2->lf_next = splitlock;
732 	lock1->lf_next = lock2;
733 }
734 
735 /*
736  * Wakeup a blocklist
737  */
738 static void
739 lf_wakelock(listhead)
740 	struct lockf *listhead;
741 {
742 	register struct lockf *wakelock;
743 
744 	while (!TAILQ_EMPTY(&listhead->lf_blkhd)) {
745 		wakelock = TAILQ_FIRST(&listhead->lf_blkhd);
746 		TAILQ_REMOVE(&listhead->lf_blkhd, wakelock, lf_block);
747 		wakelock->lf_next = NOLOCKF;
748 #ifdef LOCKF_DEBUG
749 		if (lockf_debug & 2)
750 			lf_print("lf_wakelock: awakening", wakelock);
751 #endif /* LOCKF_DEBUG */
752 		wakeup((caddr_t)wakelock);
753 	}
754 }
755 
756 #ifdef LOCKF_DEBUG
757 /*
758  * Print out a lock.
759  */
760 void
761 lf_print(tag, lock)
762 	char *tag;
763 	register struct lockf *lock;
764 {
765 
766 	printf("%s: lock %p for ", tag, (void *)lock);
767 	if (lock->lf_flags & F_POSIX)
768 		printf("proc %ld", (long)((struct proc *)lock->lf_id)->p_pid);
769 	else
770 		printf("id %p", (void *)lock->lf_id);
771 	/* XXX no %qd in kernel.  Truncate. */
772 	printf(" in ino %lu on dev <%d, %d>, %s, start %ld, end %ld",
773 	    (u_long)lock->lf_inode->i_number,
774 	    major(lock->lf_inode->i_dev),
775 	    minor(lock->lf_inode->i_dev),
776 	    lock->lf_type == F_RDLCK ? "shared" :
777 	    lock->lf_type == F_WRLCK ? "exclusive" :
778 	    lock->lf_type == F_UNLCK ? "unlock" :
779 	    "unknown", (long)lock->lf_start, (long)lock->lf_end);
780 	if (!TAILQ_EMPTY(&lock->lf_blkhd))
781 		printf(" block %p\n", (void *)TAILQ_FIRST(&lock->lf_blkhd));
782 	else
783 		printf("\n");
784 }
785 
786 void
787 lf_printlist(tag, lock)
788 	char *tag;
789 	struct lockf *lock;
790 {
791 	register struct lockf *lf, *blk;
792 
793 	printf("%s: Lock list for ino %lu on dev <%d, %d>:\n",
794 	    tag, (u_long)lock->lf_inode->i_number,
795 	    major(lock->lf_inode->i_dev),
796 	    minor(lock->lf_inode->i_dev));
797 	for (lf = lock->lf_inode->i_lockf; lf; lf = lf->lf_next) {
798 		printf("\tlock %p for ",(void *)lf);
799 		if (lf->lf_flags & F_POSIX)
800 			printf("proc %ld",
801 			    (long)((struct proc *)lf->lf_id)->p_pid);
802 		else
803 			printf("id %p", (void *)lf->lf_id);
804 		/* XXX no %qd in kernel.  Truncate. */
805 		printf(", %s, start %ld, end %ld",
806 		    lf->lf_type == F_RDLCK ? "shared" :
807 		    lf->lf_type == F_WRLCK ? "exclusive" :
808 		    lf->lf_type == F_UNLCK ? "unlock" :
809 		    "unknown", (long)lf->lf_start, (long)lf->lf_end);
810 		TAILQ_FOREACH(blk, &lf->lf_blkhd, lf_block) {
811 			printf("\n\t\tlock request %p for ", (void *)blk);
812 			if (blk->lf_flags & F_POSIX)
813 				printf("proc %ld",
814 				    (long)((struct proc *)blk->lf_id)->p_pid);
815 			else
816 				printf("id %p", (void *)blk->lf_id);
817 			/* XXX no %qd in kernel.  Truncate. */
818 			printf(", %s, start %ld, end %ld",
819 			    blk->lf_type == F_RDLCK ? "shared" :
820 			    blk->lf_type == F_WRLCK ? "exclusive" :
821 			    blk->lf_type == F_UNLCK ? "unlock" :
822 			    "unknown", (long)blk->lf_start,
823 			    (long)blk->lf_end);
824 			if (!TAILQ_EMPTY(&blk->lf_blkhd))
825 				panic("lf_printlist: bad list");
826 		}
827 		printf("\n");
828 	}
829 }
830 #endif /* LOCKF_DEBUG */
831