xref: /freebsd/sys/kern/kern_lockf.c (revision 6c6c03be2ddb04c54e455122799923deaefa4114)
1 /*-
2  * Copyright (c) 2008 Isilon Inc http://www.isilon.com/
3  * Authors: Doug Rabson <dfr@rabson.org>
4  * Developed with Red Inc: Alfred Perlstein <alfred@freebsd.org>
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 /*-
28  * Copyright (c) 1982, 1986, 1989, 1993
29  *	The Regents of the University of California.  All rights reserved.
30  *
31  * This code is derived from software contributed to Berkeley by
32  * Scooter Morris at Genentech Inc.
33  *
34  * Redistribution and use in source and binary forms, with or without
35  * modification, are permitted provided that the following conditions
36  * are met:
37  * 1. Redistributions of source code must retain the above copyright
38  *    notice, this list of conditions and the following disclaimer.
39  * 2. Redistributions in binary form must reproduce the above copyright
40  *    notice, this list of conditions and the following disclaimer in the
41  *    documentation and/or other materials provided with the distribution.
42  * 4. Neither the name of the University nor the names of its contributors
43  *    may be used to endorse or promote products derived from this software
44  *    without specific prior written permission.
45  *
46  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
47  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
48  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
49  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
50  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
51  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
52  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
53  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
54  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
55  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
56  * SUCH DAMAGE.
57  *
58  *	@(#)ufs_lockf.c	8.3 (Berkeley) 1/6/94
59  */
60 
61 #include <sys/cdefs.h>
62 __FBSDID("$FreeBSD$");
63 
64 #include "opt_debug_lockf.h"
65 
66 #include <sys/param.h>
67 #include <sys/systm.h>
68 #include <sys/hash.h>
69 #include <sys/kernel.h>
70 #include <sys/limits.h>
71 #include <sys/lock.h>
72 #include <sys/mount.h>
73 #include <sys/mutex.h>
74 #include <sys/proc.h>
75 #include <sys/sx.h>
76 #include <sys/unistd.h>
77 #include <sys/vnode.h>
78 #include <sys/malloc.h>
79 #include <sys/fcntl.h>
80 #include <sys/lockf.h>
81 #include <sys/taskqueue.h>
82 
83 #ifdef LOCKF_DEBUG
84 #include <sys/sysctl.h>
85 
86 #include <ufs/ufs/quota.h>
87 #include <ufs/ufs/inode.h>
88 
89 static int	lockf_debug = 0; /* control debug output */
90 SYSCTL_INT(_debug, OID_AUTO, lockf_debug, CTLFLAG_RW, &lockf_debug, 0, "");
91 #endif
92 
93 MALLOC_DEFINE(M_LOCKF, "lockf", "Byte-range locking structures");
94 
95 struct owner_edge;
96 struct owner_vertex;
97 struct owner_vertex_list;
98 struct owner_graph;
99 
100 #define NOLOCKF (struct lockf_entry *)0
101 #define SELF	0x1
102 #define OTHERS	0x2
103 static void	 lf_init(void *);
104 static int	 lf_hash_owner(caddr_t, struct flock *, int);
105 static int	 lf_owner_matches(struct lock_owner *, caddr_t, struct flock *,
106     int);
107 static struct lockf_entry *
108 		 lf_alloc_lock(struct lock_owner *);
109 static void	 lf_free_lock(struct lockf_entry *);
110 static int	 lf_clearlock(struct lockf *, struct lockf_entry *);
111 static int	 lf_overlaps(struct lockf_entry *, struct lockf_entry *);
112 static int	 lf_blocks(struct lockf_entry *, struct lockf_entry *);
113 static void	 lf_free_edge(struct lockf_edge *);
114 static struct lockf_edge *
115 		 lf_alloc_edge(void);
116 static void	 lf_alloc_vertex(struct lockf_entry *);
117 static int	 lf_add_edge(struct lockf_entry *, struct lockf_entry *);
118 static void	 lf_remove_edge(struct lockf_edge *);
119 static void	 lf_remove_outgoing(struct lockf_entry *);
120 static void	 lf_remove_incoming(struct lockf_entry *);
121 static int	 lf_add_outgoing(struct lockf *, struct lockf_entry *);
122 static int	 lf_add_incoming(struct lockf *, struct lockf_entry *);
123 static int	 lf_findoverlap(struct lockf_entry **, struct lockf_entry *,
124     int);
125 static struct lockf_entry *
126 		 lf_getblock(struct lockf *, struct lockf_entry *);
127 static int	 lf_getlock(struct lockf *, struct lockf_entry *, struct flock *);
128 static void	 lf_insert_lock(struct lockf *, struct lockf_entry *);
129 static void	 lf_wakeup_lock(struct lockf *, struct lockf_entry *);
130 static void	 lf_update_dependancies(struct lockf *, struct lockf_entry *,
131     int all, struct lockf_entry_list *);
132 static void	 lf_set_start(struct lockf *, struct lockf_entry *, off_t,
133 	struct lockf_entry_list*);
134 static void	 lf_set_end(struct lockf *, struct lockf_entry *, off_t,
135 	struct lockf_entry_list*);
136 static int	 lf_setlock(struct lockf *, struct lockf_entry *,
137     struct vnode *, void **cookiep);
138 static int	 lf_cancel(struct lockf *, struct lockf_entry *, void *);
139 static void	 lf_split(struct lockf *, struct lockf_entry *,
140     struct lockf_entry *, struct lockf_entry_list *);
141 #ifdef LOCKF_DEBUG
142 static int	 graph_reaches(struct owner_vertex *x, struct owner_vertex *y,
143     struct owner_vertex_list *path);
144 static void	 graph_check(struct owner_graph *g, int checkorder);
145 static void	 graph_print_vertices(struct owner_vertex_list *set);
146 #endif
147 static int	 graph_delta_forward(struct owner_graph *g,
148     struct owner_vertex *x, struct owner_vertex *y,
149     struct owner_vertex_list *delta);
150 static int	 graph_delta_backward(struct owner_graph *g,
151     struct owner_vertex *x, struct owner_vertex *y,
152     struct owner_vertex_list *delta);
153 static int	 graph_add_indices(int *indices, int n,
154     struct owner_vertex_list *set);
155 static int	 graph_assign_indices(struct owner_graph *g, int *indices,
156     int nextunused, struct owner_vertex_list *set);
157 static int	 graph_add_edge(struct owner_graph *g,
158     struct owner_vertex *x, struct owner_vertex *y);
159 static void	 graph_remove_edge(struct owner_graph *g,
160     struct owner_vertex *x, struct owner_vertex *y);
161 static struct owner_vertex *graph_alloc_vertex(struct owner_graph *g,
162     struct lock_owner *lo);
163 static void	 graph_free_vertex(struct owner_graph *g,
164     struct owner_vertex *v);
165 static struct owner_graph * graph_init(struct owner_graph *g);
166 #ifdef LOCKF_DEBUG
167 static void	 lf_print(char *, struct lockf_entry *);
168 static void	 lf_printlist(char *, struct lockf_entry *);
169 static void	 lf_print_owner(struct lock_owner *);
170 #endif
171 
172 /*
173  * This structure is used to keep track of both local and remote lock
174  * owners. The lf_owner field of the struct lockf_entry points back at
175  * the lock owner structure. Each possible lock owner (local proc for
176  * POSIX fcntl locks, local file for BSD flock locks or <pid,sysid>
177  * pair for remote locks) is represented by a unique instance of
178  * struct lock_owner.
179  *
180  * If a lock owner has a lock that blocks some other lock or a lock
181  * that is waiting for some other lock, it also has a vertex in the
182  * owner_graph below.
183  *
184  * Locks:
185  * (s)		locked by state->ls_lock
186  * (S)		locked by lf_lock_states_lock
187  * (l)		locked by lf_lock_owners_lock
188  * (g)		locked by lf_owner_graph_lock
189  * (c)		const until freeing
190  */
191 #define	LOCK_OWNER_HASH_SIZE	256
192 
193 struct lock_owner {
194 	LIST_ENTRY(lock_owner) lo_link; /* (l) hash chain */
195 	int	lo_refs;	    /* (l) Number of locks referring to this */
196 	int	lo_flags;	    /* (c) Flags passwd to lf_advlock */
197 	caddr_t	lo_id;		    /* (c) Id value passed to lf_advlock */
198 	pid_t	lo_pid;		    /* (c) Process Id of the lock owner */
199 	int	lo_sysid;	    /* (c) System Id of the lock owner */
200 	struct owner_vertex *lo_vertex; /* (g) entry in deadlock graph */
201 };
202 
203 LIST_HEAD(lock_owner_list, lock_owner);
204 
205 static struct sx		lf_lock_states_lock;
206 static struct lockf_list	lf_lock_states; /* (S) */
207 static struct sx		lf_lock_owners_lock;
208 static struct lock_owner_list	lf_lock_owners[LOCK_OWNER_HASH_SIZE]; /* (l) */
209 
210 /*
211  * Structures for deadlock detection.
212  *
213  * We have two types of directed graph, the first is the set of locks,
214  * both active and pending on a vnode. Within this graph, active locks
215  * are terminal nodes in the graph (i.e. have no out-going
216  * edges). Pending locks have out-going edges to each blocking active
217  * lock that prevents the lock from being granted and also to each
218  * older pending lock that would block them if it was active. The
219  * graph for each vnode is naturally acyclic; new edges are only ever
220  * added to or from new nodes (either new pending locks which only add
221  * out-going edges or new active locks which only add in-coming edges)
222  * therefore they cannot create loops in the lock graph.
223  *
224  * The second graph is a global graph of lock owners. Each lock owner
225  * is a vertex in that graph and an edge is added to the graph
226  * whenever an edge is added to a vnode graph, with end points
227  * corresponding to owner of the new pending lock and the owner of the
228  * lock upon which it waits. In order to prevent deadlock, we only add
229  * an edge to this graph if the new edge would not create a cycle.
230  *
231  * The lock owner graph is topologically sorted, i.e. if a node has
232  * any outgoing edges, then it has an order strictly less than any
233  * node to which it has an outgoing edge. We preserve this ordering
234  * (and detect cycles) on edge insertion using Algorithm PK from the
235  * paper "A Dynamic Topological Sort Algorithm for Directed Acyclic
236  * Graphs" (ACM Journal of Experimental Algorithms, Vol 11, Article
237  * No. 1.7)
238  */
239 struct owner_vertex;
240 
241 struct owner_edge {
242 	LIST_ENTRY(owner_edge) e_outlink; /* (g) link from's out-edge list */
243 	LIST_ENTRY(owner_edge) e_inlink;  /* (g) link to's in-edge list */
244 	int		e_refs;		  /* (g) number of times added */
245 	struct owner_vertex *e_from;	  /* (c) out-going from here */
246 	struct owner_vertex *e_to;	  /* (c) in-coming to here */
247 };
248 LIST_HEAD(owner_edge_list, owner_edge);
249 
250 struct owner_vertex {
251 	TAILQ_ENTRY(owner_vertex) v_link; /* (g) workspace for edge insertion */
252 	uint32_t	v_gen;		  /* (g) workspace for edge insertion */
253 	int		v_order;	  /* (g) order of vertex in graph */
254 	struct owner_edge_list v_outedges;/* (g) list of out-edges */
255 	struct owner_edge_list v_inedges; /* (g) list of in-edges */
256 	struct lock_owner *v_owner;	  /* (c) corresponding lock owner */
257 };
258 TAILQ_HEAD(owner_vertex_list, owner_vertex);
259 
260 struct owner_graph {
261 	struct owner_vertex** g_vertices; /* (g) pointers to vertices */
262 	int		g_size;		  /* (g) number of vertices */
263 	int		g_space;	  /* (g) space allocated for vertices */
264 	int		*g_indexbuf;	  /* (g) workspace for loop detection */
265 	uint32_t	g_gen;		  /* (g) increment when re-ordering */
266 };
267 
268 static struct sx		lf_owner_graph_lock;
269 static struct owner_graph	lf_owner_graph;
270 
271 /*
272  * Initialise various structures and locks.
273  */
274 static void
275 lf_init(void *dummy)
276 {
277 	int i;
278 
279 	sx_init(&lf_lock_states_lock, "lock states lock");
280 	LIST_INIT(&lf_lock_states);
281 
282 	sx_init(&lf_lock_owners_lock, "lock owners lock");
283 	for (i = 0; i < LOCK_OWNER_HASH_SIZE; i++)
284 		LIST_INIT(&lf_lock_owners[i]);
285 
286 	sx_init(&lf_owner_graph_lock, "owner graph lock");
287 	graph_init(&lf_owner_graph);
288 }
289 SYSINIT(lf_init, SI_SUB_LOCK, SI_ORDER_FIRST, lf_init, NULL);
290 
291 /*
292  * Generate a hash value for a lock owner.
293  */
294 static int
295 lf_hash_owner(caddr_t id, struct flock *fl, int flags)
296 {
297 	uint32_t h;
298 
299 	if (flags & F_REMOTE) {
300 		h = HASHSTEP(0, fl->l_pid);
301 		h = HASHSTEP(h, fl->l_sysid);
302 	} else if (flags & F_FLOCK) {
303 		h = ((uintptr_t) id) >> 7;
304 	} else {
305 		struct proc *p = (struct proc *) id;
306 		h = HASHSTEP(0, p->p_pid);
307 		h = HASHSTEP(h, 0);
308 	}
309 
310 	return (h % LOCK_OWNER_HASH_SIZE);
311 }
312 
313 /*
314  * Return true if a lock owner matches the details passed to
315  * lf_advlock.
316  */
317 static int
318 lf_owner_matches(struct lock_owner *lo, caddr_t id, struct flock *fl,
319     int flags)
320 {
321 	if (flags & F_REMOTE) {
322 		return lo->lo_pid == fl->l_pid
323 			&& lo->lo_sysid == fl->l_sysid;
324 	} else {
325 		return lo->lo_id == id;
326 	}
327 }
328 
329 static struct lockf_entry *
330 lf_alloc_lock(struct lock_owner *lo)
331 {
332 	struct lockf_entry *lf;
333 
334 	lf = malloc(sizeof(struct lockf_entry), M_LOCKF, M_WAITOK|M_ZERO);
335 
336 #ifdef LOCKF_DEBUG
337 	if (lockf_debug & 4)
338 		printf("Allocated lock %p\n", lf);
339 #endif
340 	if (lo) {
341 		sx_xlock(&lf_lock_owners_lock);
342 		lo->lo_refs++;
343 		sx_xunlock(&lf_lock_owners_lock);
344 		lf->lf_owner = lo;
345 	}
346 
347 	return (lf);
348 }
349 
350 static void
351 lf_free_lock(struct lockf_entry *lock)
352 {
353 	/*
354 	 * Adjust the lock_owner reference count and
355 	 * reclaim the entry if this is the last lock
356 	 * for that owner.
357 	 */
358 	struct lock_owner *lo = lock->lf_owner;
359 	if (lo) {
360 		KASSERT(LIST_EMPTY(&lock->lf_outedges),
361 		    ("freeing lock with dependancies"));
362 		KASSERT(LIST_EMPTY(&lock->lf_inedges),
363 		    ("freeing lock with dependants"));
364 		sx_xlock(&lf_lock_owners_lock);
365 		KASSERT(lo->lo_refs > 0, ("lock owner refcount"));
366 		lo->lo_refs--;
367 		if (lo->lo_refs == 0) {
368 #ifdef LOCKF_DEBUG
369 			if (lockf_debug & 1)
370 				printf("lf_free_lock: freeing lock owner %p\n",
371 				    lo);
372 #endif
373 			if (lo->lo_vertex) {
374 				sx_xlock(&lf_owner_graph_lock);
375 				graph_free_vertex(&lf_owner_graph,
376 				    lo->lo_vertex);
377 				sx_xunlock(&lf_owner_graph_lock);
378 			}
379 			LIST_REMOVE(lo, lo_link);
380 			free(lo, M_LOCKF);
381 #ifdef LOCKF_DEBUG
382 			if (lockf_debug & 4)
383 				printf("Freed lock owner %p\n", lo);
384 #endif
385 		}
386 		sx_unlock(&lf_lock_owners_lock);
387 	}
388 	if ((lock->lf_flags & F_REMOTE) && lock->lf_vnode) {
389 		vrele(lock->lf_vnode);
390 		lock->lf_vnode = NULL;
391 	}
392 #ifdef LOCKF_DEBUG
393 	if (lockf_debug & 4)
394 		printf("Freed lock %p\n", lock);
395 #endif
396 	free(lock, M_LOCKF);
397 }
398 
399 /*
400  * Advisory record locking support
401  */
402 int
403 lf_advlockasync(struct vop_advlockasync_args *ap, struct lockf **statep,
404     u_quad_t size)
405 {
406 	struct lockf *state, *freestate = NULL;
407 	struct flock *fl = ap->a_fl;
408 	struct lockf_entry *lock;
409 	struct vnode *vp = ap->a_vp;
410 	caddr_t id = ap->a_id;
411 	int flags = ap->a_flags;
412 	int hash;
413 	struct lock_owner *lo;
414 	off_t start, end, oadd;
415 	int error;
416 
417 	/*
418 	 * Handle the F_UNLKSYS case first - no need to mess about
419 	 * creating a lock owner for this one.
420 	 */
421 	if (ap->a_op == F_UNLCKSYS) {
422 		lf_clearremotesys(fl->l_sysid);
423 		return (0);
424 	}
425 
426 	/*
427 	 * Convert the flock structure into a start and end.
428 	 */
429 	switch (fl->l_whence) {
430 
431 	case SEEK_SET:
432 	case SEEK_CUR:
433 		/*
434 		 * Caller is responsible for adding any necessary offset
435 		 * when SEEK_CUR is used.
436 		 */
437 		start = fl->l_start;
438 		break;
439 
440 	case SEEK_END:
441 		if (size > OFF_MAX ||
442 		    (fl->l_start > 0 && size > OFF_MAX - fl->l_start))
443 			return (EOVERFLOW);
444 		start = size + fl->l_start;
445 		break;
446 
447 	default:
448 		return (EINVAL);
449 	}
450 	if (start < 0)
451 		return (EINVAL);
452 	if (fl->l_len < 0) {
453 		if (start == 0)
454 			return (EINVAL);
455 		end = start - 1;
456 		start += fl->l_len;
457 		if (start < 0)
458 			return (EINVAL);
459 	} else if (fl->l_len == 0) {
460 		end = OFF_MAX;
461 	} else {
462 		oadd = fl->l_len - 1;
463 		if (oadd > OFF_MAX - start)
464 			return (EOVERFLOW);
465 		end = start + oadd;
466 	}
467 	/*
468 	 * Avoid the common case of unlocking when inode has no locks.
469 	 */
470 	VI_LOCK(vp);
471 	if ((*statep) == NULL) {
472 		if (ap->a_op != F_SETLK) {
473 			fl->l_type = F_UNLCK;
474 			VI_UNLOCK(vp);
475 			return (0);
476 		}
477 	}
478 	VI_UNLOCK(vp);
479 
480 	/*
481 	 * Map our arguments to an existing lock owner or create one
482 	 * if this is the first time we have seen this owner.
483 	 */
484 	hash = lf_hash_owner(id, fl, flags);
485 	sx_xlock(&lf_lock_owners_lock);
486 	LIST_FOREACH(lo, &lf_lock_owners[hash], lo_link)
487 		if (lf_owner_matches(lo, id, fl, flags))
488 			break;
489 	if (!lo) {
490 		/*
491 		 * We initialise the lock with a reference
492 		 * count which matches the new lockf_entry
493 		 * structure created below.
494 		 */
495 		lo = malloc(sizeof(struct lock_owner), M_LOCKF,
496 		    M_WAITOK|M_ZERO);
497 #ifdef LOCKF_DEBUG
498 		if (lockf_debug & 4)
499 			printf("Allocated lock owner %p\n", lo);
500 #endif
501 
502 		lo->lo_refs = 1;
503 		lo->lo_flags = flags;
504 		lo->lo_id = id;
505 		if (flags & F_REMOTE) {
506 			lo->lo_pid = fl->l_pid;
507 			lo->lo_sysid = fl->l_sysid;
508 		} else if (flags & F_FLOCK) {
509 			lo->lo_pid = -1;
510 			lo->lo_sysid = 0;
511 		} else {
512 			struct proc *p = (struct proc *) id;
513 			lo->lo_pid = p->p_pid;
514 			lo->lo_sysid = 0;
515 		}
516 		lo->lo_vertex = NULL;
517 
518 #ifdef LOCKF_DEBUG
519 		if (lockf_debug & 1) {
520 			printf("lf_advlockasync: new lock owner %p ", lo);
521 			lf_print_owner(lo);
522 			printf("\n");
523 		}
524 #endif
525 
526 		LIST_INSERT_HEAD(&lf_lock_owners[hash], lo, lo_link);
527 	} else {
528 		/*
529 		 * We have seen this lock owner before, increase its
530 		 * reference count to account for the new lockf_entry
531 		 * structure we create below.
532 		 */
533 		lo->lo_refs++;
534 	}
535 	sx_xunlock(&lf_lock_owners_lock);
536 
537 	/*
538 	 * Create the lockf structure. We initialise the lf_owner
539 	 * field here instead of in lf_alloc_lock() to avoid paying
540 	 * the lf_lock_owners_lock tax twice.
541 	 */
542 	lock = lf_alloc_lock(NULL);
543 	lock->lf_start = start;
544 	lock->lf_end = end;
545 	lock->lf_owner = lo;
546 	lock->lf_vnode = vp;
547 	if (flags & F_REMOTE) {
548 		/*
549 		 * For remote locks, the caller may release its ref to
550 		 * the vnode at any time - we have to ref it here to
551 		 * prevent it from being recycled unexpectedly.
552 		 */
553 		vref(vp);
554 	}
555 
556 	/*
557 	 * XXX The problem is that VTOI is ufs specific, so it will
558 	 * break LOCKF_DEBUG for all other FS's other than UFS because
559 	 * it casts the vnode->data ptr to struct inode *.
560 	 */
561 /*	lock->lf_inode = VTOI(ap->a_vp); */
562 	lock->lf_inode = (struct inode *)0;
563 	lock->lf_type = fl->l_type;
564 	LIST_INIT(&lock->lf_outedges);
565 	LIST_INIT(&lock->lf_inedges);
566 	lock->lf_async_task = ap->a_task;
567 	lock->lf_flags = ap->a_flags;
568 
569 	/*
570 	 * Do the requested operation. First find our state structure
571 	 * and create a new one if necessary - the caller's *statep
572 	 * variable and the state's ls_threads count is protected by
573 	 * the vnode interlock.
574 	 */
575 	VI_LOCK(vp);
576 	if (vp->v_iflag & VI_DOOMED) {
577 		VI_UNLOCK(vp);
578 		lf_free_lock(lock);
579 		return (ENOENT);
580 	}
581 
582 	/*
583 	 * Allocate a state structure if necessary.
584 	 */
585 	state = *statep;
586 	if (state == NULL) {
587 		struct lockf *ls;
588 
589 		VI_UNLOCK(vp);
590 
591 		ls = malloc(sizeof(struct lockf), M_LOCKF, M_WAITOK|M_ZERO);
592 		sx_init(&ls->ls_lock, "ls_lock");
593 		LIST_INIT(&ls->ls_active);
594 		LIST_INIT(&ls->ls_pending);
595 		ls->ls_threads = 1;
596 
597 		sx_xlock(&lf_lock_states_lock);
598 		LIST_INSERT_HEAD(&lf_lock_states, ls, ls_link);
599 		sx_xunlock(&lf_lock_states_lock);
600 
601 		/*
602 		 * Cope if we lost a race with some other thread while
603 		 * trying to allocate memory.
604 		 */
605 		VI_LOCK(vp);
606 		if (vp->v_iflag & VI_DOOMED) {
607 			VI_UNLOCK(vp);
608 			sx_xlock(&lf_lock_states_lock);
609 			LIST_REMOVE(ls, ls_link);
610 			sx_xunlock(&lf_lock_states_lock);
611 			sx_destroy(&ls->ls_lock);
612 			free(ls, M_LOCKF);
613 			lf_free_lock(lock);
614 			return (ENOENT);
615 		}
616 		if ((*statep) == NULL) {
617 			state = *statep = ls;
618 			VI_UNLOCK(vp);
619 		} else {
620 			state = *statep;
621 			state->ls_threads++;
622 			VI_UNLOCK(vp);
623 
624 			sx_xlock(&lf_lock_states_lock);
625 			LIST_REMOVE(ls, ls_link);
626 			sx_xunlock(&lf_lock_states_lock);
627 			sx_destroy(&ls->ls_lock);
628 			free(ls, M_LOCKF);
629 		}
630 	} else {
631 		state->ls_threads++;
632 		VI_UNLOCK(vp);
633 	}
634 
635 	sx_xlock(&state->ls_lock);
636 	switch(ap->a_op) {
637 	case F_SETLK:
638 		error = lf_setlock(state, lock, vp, ap->a_cookiep);
639 		break;
640 
641 	case F_UNLCK:
642 		error = lf_clearlock(state, lock);
643 		lf_free_lock(lock);
644 		break;
645 
646 	case F_GETLK:
647 		error = lf_getlock(state, lock, fl);
648 		lf_free_lock(lock);
649 		break;
650 
651 	case F_CANCEL:
652 		if (ap->a_cookiep)
653 			error = lf_cancel(state, lock, *ap->a_cookiep);
654 		else
655 			error = EINVAL;
656 		lf_free_lock(lock);
657 		break;
658 
659 	default:
660 		lf_free_lock(lock);
661 		error = EINVAL;
662 		break;
663 	}
664 
665 #ifdef INVARIANTS
666 	/*
667 	 * Check for some can't happen stuff. In this case, the active
668 	 * lock list becoming disordered or containing mutually
669 	 * blocking locks. We also check the pending list for locks
670 	 * which should be active (i.e. have no out-going edges).
671 	 */
672 	LIST_FOREACH(lock, &state->ls_active, lf_link) {
673 		struct lockf_entry *lf;
674 		if (LIST_NEXT(lock, lf_link))
675 			KASSERT((lock->lf_start
676 				<= LIST_NEXT(lock, lf_link)->lf_start),
677 			    ("locks disordered"));
678 		LIST_FOREACH(lf, &state->ls_active, lf_link) {
679 			if (lock == lf)
680 				break;
681 			KASSERT(!lf_blocks(lock, lf),
682 			    ("two conflicting active locks"));
683 			if (lock->lf_owner == lf->lf_owner)
684 				KASSERT(!lf_overlaps(lock, lf),
685 				    ("two overlapping locks from same owner"));
686 		}
687 	}
688 	LIST_FOREACH(lock, &state->ls_pending, lf_link) {
689 		KASSERT(!LIST_EMPTY(&lock->lf_outedges),
690 		    ("pending lock which should be active"));
691 	}
692 #endif
693 	sx_xunlock(&state->ls_lock);
694 
695 	/*
696 	 * If we have removed the last active lock on the vnode and
697 	 * this is the last thread that was in-progress, we can free
698 	 * the state structure. We update the caller's pointer inside
699 	 * the vnode interlock but call free outside.
700 	 *
701 	 * XXX alternatively, keep the state structure around until
702 	 * the filesystem recycles - requires a callback from the
703 	 * filesystem.
704 	 */
705 	VI_LOCK(vp);
706 
707 	state->ls_threads--;
708 	wakeup(state);
709 	if (LIST_EMPTY(&state->ls_active) && state->ls_threads == 0) {
710 		KASSERT(LIST_EMPTY(&state->ls_pending),
711 		    ("freeing state with pending locks"));
712 		freestate = state;
713 		*statep = NULL;
714 	}
715 
716 	VI_UNLOCK(vp);
717 
718 	if (freestate) {
719 		sx_xlock(&lf_lock_states_lock);
720 		LIST_REMOVE(freestate, ls_link);
721 		sx_xunlock(&lf_lock_states_lock);
722 		sx_destroy(&freestate->ls_lock);
723 		free(freestate, M_LOCKF);
724 	}
725 	return (error);
726 }
727 
728 int
729 lf_advlock(struct vop_advlock_args *ap, struct lockf **statep, u_quad_t size)
730 {
731 	struct vop_advlockasync_args a;
732 
733 	a.a_vp = ap->a_vp;
734 	a.a_id = ap->a_id;
735 	a.a_op = ap->a_op;
736 	a.a_fl = ap->a_fl;
737 	a.a_flags = ap->a_flags;
738 	a.a_task = NULL;
739 	a.a_cookiep = NULL;
740 
741 	return (lf_advlockasync(&a, statep, size));
742 }
743 
744 void
745 lf_purgelocks(struct vnode *vp, struct lockf **statep)
746 {
747 	struct lockf *state;
748 	struct lockf_entry *lock, *nlock;
749 
750 	/*
751 	 * For this to work correctly, the caller must ensure that no
752 	 * other threads enter the locking system for this vnode,
753 	 * e.g. by checking VI_DOOMED. We wake up any threads that are
754 	 * sleeping waiting for locks on this vnode and then free all
755 	 * the remaining locks.
756 	 */
757 	VI_LOCK(vp);
758 	state = *statep;
759 	if (state) {
760 		state->ls_threads++;
761 		VI_UNLOCK(vp);
762 
763 		sx_xlock(&state->ls_lock);
764 		sx_xlock(&lf_owner_graph_lock);
765 		LIST_FOREACH_SAFE(lock, &state->ls_pending, lf_link, nlock) {
766 			LIST_REMOVE(lock, lf_link);
767 			lf_remove_outgoing(lock);
768 			lf_remove_incoming(lock);
769 
770 			/*
771 			 * If its an async lock, we can just free it
772 			 * here, otherwise we let the sleeping thread
773 			 * free it.
774 			 */
775 			if (lock->lf_async_task) {
776 				lf_free_lock(lock);
777 			} else {
778 				lock->lf_flags |= F_INTR;
779 				wakeup(lock);
780 			}
781 		}
782 		sx_xunlock(&lf_owner_graph_lock);
783 		sx_xunlock(&state->ls_lock);
784 
785 		/*
786 		 * Wait for all other threads, sleeping and otherwise
787 		 * to leave.
788 		 */
789 		VI_LOCK(vp);
790 		while (state->ls_threads > 1)
791 			msleep(state, VI_MTX(vp), 0, "purgelocks", 0);
792 		*statep = 0;
793 		VI_UNLOCK(vp);
794 
795 		/*
796 		 * We can just free all the active locks since they
797 		 * will have no dependancies (we removed them all
798 		 * above). We don't need to bother locking since we
799 		 * are the last thread using this state structure.
800 		 */
801 		LIST_FOREACH_SAFE(lock, &state->ls_pending, lf_link, nlock) {
802 			LIST_REMOVE(lock, lf_link);
803 			lf_free_lock(lock);
804 		}
805 		sx_xlock(&lf_lock_states_lock);
806 		LIST_REMOVE(state, ls_link);
807 		sx_xunlock(&lf_lock_states_lock);
808 		sx_destroy(&state->ls_lock);
809 		free(state, M_LOCKF);
810 	} else {
811 		VI_UNLOCK(vp);
812 	}
813 }
814 
815 /*
816  * Return non-zero if locks 'x' and 'y' overlap.
817  */
818 static int
819 lf_overlaps(struct lockf_entry *x, struct lockf_entry *y)
820 {
821 
822 	return (x->lf_start <= y->lf_end && x->lf_end >= y->lf_start);
823 }
824 
825 /*
826  * Return non-zero if lock 'x' is blocked by lock 'y' (or vice versa).
827  */
828 static int
829 lf_blocks(struct lockf_entry *x, struct lockf_entry *y)
830 {
831 
832 	return x->lf_owner != y->lf_owner
833 		&& (x->lf_type == F_WRLCK || y->lf_type == F_WRLCK)
834 		&& lf_overlaps(x, y);
835 }
836 
837 /*
838  * Allocate a lock edge from the free list
839  */
840 static struct lockf_edge *
841 lf_alloc_edge(void)
842 {
843 
844 	return (malloc(sizeof(struct lockf_edge), M_LOCKF, M_WAITOK|M_ZERO));
845 }
846 
847 /*
848  * Free a lock edge.
849  */
850 static void
851 lf_free_edge(struct lockf_edge *e)
852 {
853 
854 	free(e, M_LOCKF);
855 }
856 
857 
858 /*
859  * Ensure that the lock's owner has a corresponding vertex in the
860  * owner graph.
861  */
862 static void
863 lf_alloc_vertex(struct lockf_entry *lock)
864 {
865 	struct owner_graph *g = &lf_owner_graph;
866 
867 	if (!lock->lf_owner->lo_vertex)
868 		lock->lf_owner->lo_vertex =
869 			graph_alloc_vertex(g, lock->lf_owner);
870 }
871 
872 /*
873  * Attempt to record an edge from lock x to lock y. Return EDEADLK if
874  * the new edge would cause a cycle in the owner graph.
875  */
876 static int
877 lf_add_edge(struct lockf_entry *x, struct lockf_entry *y)
878 {
879 	struct owner_graph *g = &lf_owner_graph;
880 	struct lockf_edge *e;
881 	int error;
882 
883 #ifdef INVARIANTS
884 	LIST_FOREACH(e, &x->lf_outedges, le_outlink)
885 		KASSERT(e->le_to != y, ("adding lock edge twice"));
886 #endif
887 
888 	/*
889 	 * Make sure the two owners have entries in the owner graph.
890 	 */
891 	lf_alloc_vertex(x);
892 	lf_alloc_vertex(y);
893 
894 	error = graph_add_edge(g, x->lf_owner->lo_vertex,
895 	    y->lf_owner->lo_vertex);
896 	if (error)
897 		return (error);
898 
899 	e = lf_alloc_edge();
900 	LIST_INSERT_HEAD(&x->lf_outedges, e, le_outlink);
901 	LIST_INSERT_HEAD(&y->lf_inedges, e, le_inlink);
902 	e->le_from = x;
903 	e->le_to = y;
904 
905 	return (0);
906 }
907 
908 /*
909  * Remove an edge from the lock graph.
910  */
911 static void
912 lf_remove_edge(struct lockf_edge *e)
913 {
914 	struct owner_graph *g = &lf_owner_graph;
915 	struct lockf_entry *x = e->le_from;
916 	struct lockf_entry *y = e->le_to;
917 
918 	graph_remove_edge(g, x->lf_owner->lo_vertex, y->lf_owner->lo_vertex);
919 	LIST_REMOVE(e, le_outlink);
920 	LIST_REMOVE(e, le_inlink);
921 	e->le_from = NULL;
922 	e->le_to = NULL;
923 	lf_free_edge(e);
924 }
925 
926 /*
927  * Remove all out-going edges from lock x.
928  */
929 static void
930 lf_remove_outgoing(struct lockf_entry *x)
931 {
932 	struct lockf_edge *e;
933 
934 	while ((e = LIST_FIRST(&x->lf_outedges)) != NULL) {
935 		lf_remove_edge(e);
936 	}
937 }
938 
939 /*
940  * Remove all in-coming edges from lock x.
941  */
942 static void
943 lf_remove_incoming(struct lockf_entry *x)
944 {
945 	struct lockf_edge *e;
946 
947 	while ((e = LIST_FIRST(&x->lf_inedges)) != NULL) {
948 		lf_remove_edge(e);
949 	}
950 }
951 
952 /*
953  * Walk the list of locks for the file and create an out-going edge
954  * from lock to each blocking lock.
955  */
956 static int
957 lf_add_outgoing(struct lockf *state, struct lockf_entry *lock)
958 {
959 	struct lockf_entry *overlap;
960 	int error;
961 
962 	LIST_FOREACH(overlap, &state->ls_active, lf_link) {
963 		/*
964 		 * We may assume that the active list is sorted by
965 		 * lf_start.
966 		 */
967 		if (overlap->lf_start > lock->lf_end)
968 			break;
969 		if (!lf_blocks(lock, overlap))
970 			continue;
971 
972 		/*
973 		 * We've found a blocking lock. Add the corresponding
974 		 * edge to the graphs and see if it would cause a
975 		 * deadlock.
976 		 */
977 		error = lf_add_edge(lock, overlap);
978 
979 		/*
980 		 * The only error that lf_add_edge returns is EDEADLK.
981 		 * Remove any edges we added and return the error.
982 		 */
983 		if (error) {
984 			lf_remove_outgoing(lock);
985 			return (error);
986 		}
987 	}
988 
989 	/*
990 	 * We also need to add edges to sleeping locks that block
991 	 * us. This ensures that lf_wakeup_lock cannot grant two
992 	 * mutually blocking locks simultaneously and also enforces a
993 	 * 'first come, first served' fairness model. Note that this
994 	 * only happens if we are blocked by at least one active lock
995 	 * due to the call to lf_getblock in lf_setlock below.
996 	 */
997 	LIST_FOREACH(overlap, &state->ls_pending, lf_link) {
998 		if (!lf_blocks(lock, overlap))
999 			continue;
1000 		/*
1001 		 * We've found a blocking lock. Add the corresponding
1002 		 * edge to the graphs and see if it would cause a
1003 		 * deadlock.
1004 		 */
1005 		error = lf_add_edge(lock, overlap);
1006 
1007 		/*
1008 		 * The only error that lf_add_edge returns is EDEADLK.
1009 		 * Remove any edges we added and return the error.
1010 		 */
1011 		if (error) {
1012 			lf_remove_outgoing(lock);
1013 			return (error);
1014 		}
1015 	}
1016 
1017 	return (0);
1018 }
1019 
1020 /*
1021  * Walk the list of pending locks for the file and create an in-coming
1022  * edge from lock to each blocking lock.
1023  */
1024 static int
1025 lf_add_incoming(struct lockf *state, struct lockf_entry *lock)
1026 {
1027 	struct lockf_entry *overlap;
1028 	int error;
1029 
1030 	LIST_FOREACH(overlap, &state->ls_pending, lf_link) {
1031 		if (!lf_blocks(lock, overlap))
1032 			continue;
1033 
1034 		/*
1035 		 * We've found a blocking lock. Add the corresponding
1036 		 * edge to the graphs and see if it would cause a
1037 		 * deadlock.
1038 		 */
1039 		error = lf_add_edge(overlap, lock);
1040 
1041 		/*
1042 		 * The only error that lf_add_edge returns is EDEADLK.
1043 		 * Remove any edges we added and return the error.
1044 		 */
1045 		if (error) {
1046 			lf_remove_incoming(lock);
1047 			return (error);
1048 		}
1049 	}
1050 	return (0);
1051 }
1052 
1053 /*
1054  * Insert lock into the active list, keeping list entries ordered by
1055  * increasing values of lf_start.
1056  */
1057 static void
1058 lf_insert_lock(struct lockf *state, struct lockf_entry *lock)
1059 {
1060 	struct lockf_entry *lf, *lfprev;
1061 
1062 	if (LIST_EMPTY(&state->ls_active)) {
1063 		LIST_INSERT_HEAD(&state->ls_active, lock, lf_link);
1064 		return;
1065 	}
1066 
1067 	lfprev = NULL;
1068 	LIST_FOREACH(lf, &state->ls_active, lf_link) {
1069 		if (lf->lf_start > lock->lf_start) {
1070 			LIST_INSERT_BEFORE(lf, lock, lf_link);
1071 			return;
1072 		}
1073 		lfprev = lf;
1074 	}
1075 	LIST_INSERT_AFTER(lfprev, lock, lf_link);
1076 }
1077 
1078 /*
1079  * Wake up a sleeping lock and remove it from the pending list now
1080  * that all its dependancies have been resolved. The caller should
1081  * arrange for the lock to be added to the active list, adjusting any
1082  * existing locks for the same owner as needed.
1083  */
1084 static void
1085 lf_wakeup_lock(struct lockf *state, struct lockf_entry *wakelock)
1086 {
1087 
1088 	/*
1089 	 * Remove from ls_pending list and wake up the caller
1090 	 * or start the async notification, as appropriate.
1091 	 */
1092 	LIST_REMOVE(wakelock, lf_link);
1093 #ifdef LOCKF_DEBUG
1094 	if (lockf_debug & 1)
1095 		lf_print("lf_wakeup_lock: awakening", wakelock);
1096 #endif /* LOCKF_DEBUG */
1097 	if (wakelock->lf_async_task) {
1098 		taskqueue_enqueue(taskqueue_thread, wakelock->lf_async_task);
1099 	} else {
1100 		wakeup(wakelock);
1101 	}
1102 }
1103 
1104 /*
1105  * Re-check all dependant locks and remove edges to locks that we no
1106  * longer block. If 'all' is non-zero, the lock has been removed and
1107  * we must remove all the dependancies, otherwise it has simply been
1108  * reduced but remains active. Any pending locks which have been been
1109  * unblocked are added to 'granted'
1110  */
1111 static void
1112 lf_update_dependancies(struct lockf *state, struct lockf_entry *lock, int all,
1113 	struct lockf_entry_list *granted)
1114 {
1115 	struct lockf_edge *e, *ne;
1116 	struct lockf_entry *deplock;
1117 
1118 	LIST_FOREACH_SAFE(e, &lock->lf_inedges, le_inlink, ne) {
1119 		deplock = e->le_from;
1120 		if (all || !lf_blocks(lock, deplock)) {
1121 			sx_xlock(&lf_owner_graph_lock);
1122 			lf_remove_edge(e);
1123 			sx_xunlock(&lf_owner_graph_lock);
1124 			if (LIST_EMPTY(&deplock->lf_outedges)) {
1125 				lf_wakeup_lock(state, deplock);
1126 				LIST_INSERT_HEAD(granted, deplock, lf_link);
1127 			}
1128 		}
1129 	}
1130 }
1131 
1132 /*
1133  * Set the start of an existing active lock, updating dependancies and
1134  * adding any newly woken locks to 'granted'.
1135  */
1136 static void
1137 lf_set_start(struct lockf *state, struct lockf_entry *lock, off_t new_start,
1138 	struct lockf_entry_list *granted)
1139 {
1140 
1141 	KASSERT(new_start >= lock->lf_start, ("can't increase lock"));
1142 	lock->lf_start = new_start;
1143 	LIST_REMOVE(lock, lf_link);
1144 	lf_insert_lock(state, lock);
1145 	lf_update_dependancies(state, lock, FALSE, granted);
1146 }
1147 
1148 /*
1149  * Set the end of an existing active lock, updating dependancies and
1150  * adding any newly woken locks to 'granted'.
1151  */
1152 static void
1153 lf_set_end(struct lockf *state, struct lockf_entry *lock, off_t new_end,
1154 	struct lockf_entry_list *granted)
1155 {
1156 
1157 	KASSERT(new_end <= lock->lf_end, ("can't increase lock"));
1158 	lock->lf_end = new_end;
1159 	lf_update_dependancies(state, lock, FALSE, granted);
1160 }
1161 
1162 /*
1163  * Add a lock to the active list, updating or removing any current
1164  * locks owned by the same owner and processing any pending locks that
1165  * become unblocked as a result. This code is also used for unlock
1166  * since the logic for updating existing locks is identical.
1167  *
1168  * As a result of processing the new lock, we may unblock existing
1169  * pending locks as a result of downgrading/unlocking. We simply
1170  * activate the newly granted locks by looping.
1171  *
1172  * Since the new lock already has its dependancies set up, we always
1173  * add it to the list (unless its an unlock request). This may
1174  * fragment the lock list in some pathological cases but its probably
1175  * not a real problem.
1176  */
1177 static void
1178 lf_activate_lock(struct lockf *state, struct lockf_entry *lock)
1179 {
1180 	struct lockf_entry *overlap, *lf;
1181 	struct lockf_entry_list granted;
1182 	int ovcase;
1183 
1184 	LIST_INIT(&granted);
1185 	LIST_INSERT_HEAD(&granted, lock, lf_link);
1186 
1187 	while (!LIST_EMPTY(&granted)) {
1188 		lock = LIST_FIRST(&granted);
1189 		LIST_REMOVE(lock, lf_link);
1190 
1191 		/*
1192 		 * Skip over locks owned by other processes.  Handle
1193 		 * any locks that overlap and are owned by ourselves.
1194 		 */
1195 		overlap = LIST_FIRST(&state->ls_active);
1196 		for (;;) {
1197 			ovcase = lf_findoverlap(&overlap, lock, SELF);
1198 
1199 #ifdef LOCKF_DEBUG
1200 			if (ovcase && (lockf_debug & 2)) {
1201 				printf("lf_setlock: overlap %d", ovcase);
1202 				lf_print("", overlap);
1203 			}
1204 #endif
1205 			/*
1206 			 * Six cases:
1207 			 *	0) no overlap
1208 			 *	1) overlap == lock
1209 			 *	2) overlap contains lock
1210 			 *	3) lock contains overlap
1211 			 *	4) overlap starts before lock
1212 			 *	5) overlap ends after lock
1213 			 */
1214 			switch (ovcase) {
1215 			case 0: /* no overlap */
1216 				break;
1217 
1218 			case 1: /* overlap == lock */
1219 				/*
1220 				 * We have already setup the
1221 				 * dependants for the new lock, taking
1222 				 * into account a possible downgrade
1223 				 * or unlock. Remove the old lock.
1224 				 */
1225 				LIST_REMOVE(overlap, lf_link);
1226 				lf_update_dependancies(state, overlap, TRUE,
1227 					&granted);
1228 				lf_free_lock(overlap);
1229 				break;
1230 
1231 			case 2: /* overlap contains lock */
1232 				/*
1233 				 * Just split the existing lock.
1234 				 */
1235 				lf_split(state, overlap, lock, &granted);
1236 				break;
1237 
1238 			case 3: /* lock contains overlap */
1239 				/*
1240 				 * Delete the overlap and advance to
1241 				 * the next entry in the list.
1242 				 */
1243 				lf = LIST_NEXT(overlap, lf_link);
1244 				LIST_REMOVE(overlap, lf_link);
1245 				lf_update_dependancies(state, overlap, TRUE,
1246 					&granted);
1247 				lf_free_lock(overlap);
1248 				overlap = lf;
1249 				continue;
1250 
1251 			case 4: /* overlap starts before lock */
1252 				/*
1253 				 * Just update the overlap end and
1254 				 * move on.
1255 				 */
1256 				lf_set_end(state, overlap, lock->lf_start - 1,
1257 				    &granted);
1258 				overlap = LIST_NEXT(overlap, lf_link);
1259 				continue;
1260 
1261 			case 5: /* overlap ends after lock */
1262 				/*
1263 				 * Change the start of overlap and
1264 				 * re-insert.
1265 				 */
1266 				lf_set_start(state, overlap, lock->lf_end + 1,
1267 				    &granted);
1268 				break;
1269 			}
1270 			break;
1271 		}
1272 #ifdef LOCKF_DEBUG
1273 		if (lockf_debug & 1) {
1274 			if (lock->lf_type != F_UNLCK)
1275 				lf_print("lf_activate_lock: activated", lock);
1276 			else
1277 				lf_print("lf_activate_lock: unlocked", lock);
1278 			lf_printlist("lf_activate_lock", lock);
1279 		}
1280 #endif /* LOCKF_DEBUG */
1281 		if (lock->lf_type != F_UNLCK)
1282 			lf_insert_lock(state, lock);
1283 	}
1284 }
1285 
1286 /*
1287  * Cancel a pending lock request, either as a result of a signal or a
1288  * cancel request for an async lock.
1289  */
1290 static void
1291 lf_cancel_lock(struct lockf *state, struct lockf_entry *lock)
1292 {
1293 	struct lockf_entry_list granted;
1294 
1295 	/*
1296 	 * Note it is theoretically possible that cancelling this lock
1297 	 * may allow some other pending lock to become
1298 	 * active. Consider this case:
1299 	 *
1300 	 * Owner	Action		Result		Dependancies
1301 	 *
1302 	 * A:		lock [0..0]	succeeds
1303 	 * B:		lock [2..2]	succeeds
1304 	 * C:		lock [1..2]	blocked		C->B
1305 	 * D:		lock [0..1]	blocked		C->B,D->A,D->C
1306 	 * A:		unlock [0..0]			C->B,D->C
1307 	 * C:		cancel [1..2]
1308 	 */
1309 
1310 	LIST_REMOVE(lock, lf_link);
1311 
1312 	/*
1313 	 * Removing out-going edges is simple.
1314 	 */
1315 	sx_xlock(&lf_owner_graph_lock);
1316 	lf_remove_outgoing(lock);
1317 	sx_xunlock(&lf_owner_graph_lock);
1318 
1319 	/*
1320 	 * Removing in-coming edges may allow some other lock to
1321 	 * become active - we use lf_update_dependancies to figure
1322 	 * this out.
1323 	 */
1324 	LIST_INIT(&granted);
1325 	lf_update_dependancies(state, lock, TRUE, &granted);
1326 	lf_free_lock(lock);
1327 
1328 	/*
1329 	 * Feed any newly active locks to lf_activate_lock.
1330 	 */
1331 	while (!LIST_EMPTY(&granted)) {
1332 		lock = LIST_FIRST(&granted);
1333 		LIST_REMOVE(lock, lf_link);
1334 		lf_activate_lock(state, lock);
1335 	}
1336 }
1337 
1338 /*
1339  * Set a byte-range lock.
1340  */
1341 static int
1342 lf_setlock(struct lockf *state, struct lockf_entry *lock, struct vnode *vp,
1343     void **cookiep)
1344 {
1345 	struct lockf_entry *block;
1346 	static char lockstr[] = "lockf";
1347 	int priority, error;
1348 
1349 #ifdef LOCKF_DEBUG
1350 	if (lockf_debug & 1)
1351 		lf_print("lf_setlock", lock);
1352 #endif /* LOCKF_DEBUG */
1353 
1354 	/*
1355 	 * Set the priority
1356 	 */
1357 	priority = PLOCK;
1358 	if (lock->lf_type == F_WRLCK)
1359 		priority += 4;
1360 	if (!(lock->lf_flags & F_NOINTR))
1361 		priority |= PCATCH;
1362 	/*
1363 	 * Scan lock list for this file looking for locks that would block us.
1364 	 */
1365 	while ((block = lf_getblock(state, lock))) {
1366 		/*
1367 		 * Free the structure and return if nonblocking.
1368 		 */
1369 		if ((lock->lf_flags & F_WAIT) == 0
1370 		    && lock->lf_async_task == NULL) {
1371 			lf_free_lock(lock);
1372 			error = EAGAIN;
1373 			goto out;
1374 		}
1375 
1376 		/*
1377 		 * For flock type locks, we must first remove
1378 		 * any shared locks that we hold before we sleep
1379 		 * waiting for an exclusive lock.
1380 		 */
1381 		if ((lock->lf_flags & F_FLOCK) &&
1382 		    lock->lf_type == F_WRLCK) {
1383 			lock->lf_type = F_UNLCK;
1384 			lf_activate_lock(state, lock);
1385 			lock->lf_type = F_WRLCK;
1386 		}
1387 
1388 		/*
1389 		 * We are blocked. Create edges to each blocking lock,
1390 		 * checking for deadlock using the owner graph. For
1391 		 * simplicity, we run deadlock detection for all
1392 		 * locks, posix and otherwise.
1393 		 */
1394 		sx_xlock(&lf_owner_graph_lock);
1395 		error = lf_add_outgoing(state, lock);
1396 		sx_xunlock(&lf_owner_graph_lock);
1397 
1398 		if (error) {
1399 #ifdef LOCKF_DEBUG
1400 			if (lockf_debug & 1)
1401 				lf_print("lf_setlock: deadlock", lock);
1402 #endif
1403 			lf_free_lock(lock);
1404 			goto out;
1405 		}
1406 
1407 		/*
1408 		 * We have added edges to everything that blocks
1409 		 * us. Sleep until they all go away.
1410 		 */
1411 		LIST_INSERT_HEAD(&state->ls_pending, lock, lf_link);
1412 #ifdef LOCKF_DEBUG
1413 		if (lockf_debug & 1) {
1414 			struct lockf_edge *e;
1415 			LIST_FOREACH(e, &lock->lf_outedges, le_outlink) {
1416 				lf_print("lf_setlock: blocking on", e->le_to);
1417 				lf_printlist("lf_setlock", e->le_to);
1418 			}
1419 		}
1420 #endif /* LOCKF_DEBUG */
1421 
1422 		if ((lock->lf_flags & F_WAIT) == 0) {
1423 			/*
1424 			 * The caller requested async notification -
1425 			 * this callback happens when the blocking
1426 			 * lock is released, allowing the caller to
1427 			 * make another attempt to take the lock.
1428 			 */
1429 			*cookiep = (void *) lock;
1430 			error = EINPROGRESS;
1431 			goto out;
1432 		}
1433 
1434 		error = sx_sleep(lock, &state->ls_lock, priority, lockstr, 0);
1435 		/*
1436 		 * We may have been awakened by a signal and/or by a
1437 		 * debugger continuing us (in which cases we must
1438 		 * remove our lock graph edges) and/or by another
1439 		 * process releasing a lock (in which case our edges
1440 		 * have already been removed and we have been moved to
1441 		 * the active list). We may also have been woken by
1442 		 * lf_purgelocks which we report to the caller as
1443 		 * EINTR. In that case, lf_purgelocks will have
1444 		 * removed our lock graph edges.
1445 		 *
1446 		 * Note that it is possible to receive a signal after
1447 		 * we were successfully woken (and moved to the active
1448 		 * list) but before we resumed execution. In this
1449 		 * case, our lf_outedges list will be clear. We
1450 		 * pretend there was no error.
1451 		 *
1452 		 * Note also, if we have been sleeping long enough, we
1453 		 * may now have incoming edges from some newer lock
1454 		 * which is waiting behind us in the queue.
1455 		 */
1456 		if (lock->lf_flags & F_INTR) {
1457 			error = EINTR;
1458 			lf_free_lock(lock);
1459 			goto out;
1460 		}
1461 		if (LIST_EMPTY(&lock->lf_outedges)) {
1462 			error = 0;
1463 		} else {
1464 			lf_cancel_lock(state, lock);
1465 			goto out;
1466 		}
1467 #ifdef LOCKF_DEBUG
1468 		if (lockf_debug & 1) {
1469 			lf_print("lf_setlock: granted", lock);
1470 		}
1471 #endif
1472 		goto out;
1473 	}
1474 	/*
1475 	 * It looks like we are going to grant the lock. First add
1476 	 * edges from any currently pending lock that the new lock
1477 	 * would block.
1478 	 */
1479 	sx_xlock(&lf_owner_graph_lock);
1480 	error = lf_add_incoming(state, lock);
1481 	sx_xunlock(&lf_owner_graph_lock);
1482 	if (error) {
1483 #ifdef LOCKF_DEBUG
1484 		if (lockf_debug & 1)
1485 			lf_print("lf_setlock: deadlock", lock);
1486 #endif
1487 		lf_free_lock(lock);
1488 		goto out;
1489 	}
1490 
1491 	/*
1492 	 * No blocks!!  Add the lock.  Note that we will
1493 	 * downgrade or upgrade any overlapping locks this
1494 	 * process already owns.
1495 	 */
1496 	lf_activate_lock(state, lock);
1497 	error = 0;
1498 out:
1499 	return (error);
1500 }
1501 
1502 /*
1503  * Remove a byte-range lock on an inode.
1504  *
1505  * Generally, find the lock (or an overlap to that lock)
1506  * and remove it (or shrink it), then wakeup anyone we can.
1507  */
1508 static int
1509 lf_clearlock(struct lockf *state, struct lockf_entry *unlock)
1510 {
1511 	struct lockf_entry *overlap;
1512 
1513 	overlap = LIST_FIRST(&state->ls_active);
1514 
1515 	if (overlap == NOLOCKF)
1516 		return (0);
1517 #ifdef LOCKF_DEBUG
1518 	if (unlock->lf_type != F_UNLCK)
1519 		panic("lf_clearlock: bad type");
1520 	if (lockf_debug & 1)
1521 		lf_print("lf_clearlock", unlock);
1522 #endif /* LOCKF_DEBUG */
1523 
1524 	lf_activate_lock(state, unlock);
1525 
1526 	return (0);
1527 }
1528 
1529 /*
1530  * Check whether there is a blocking lock, and if so return its
1531  * details in '*fl'.
1532  */
1533 static int
1534 lf_getlock(struct lockf *state, struct lockf_entry *lock, struct flock *fl)
1535 {
1536 	struct lockf_entry *block;
1537 
1538 #ifdef LOCKF_DEBUG
1539 	if (lockf_debug & 1)
1540 		lf_print("lf_getlock", lock);
1541 #endif /* LOCKF_DEBUG */
1542 
1543 	if ((block = lf_getblock(state, lock))) {
1544 		fl->l_type = block->lf_type;
1545 		fl->l_whence = SEEK_SET;
1546 		fl->l_start = block->lf_start;
1547 		if (block->lf_end == OFF_MAX)
1548 			fl->l_len = 0;
1549 		else
1550 			fl->l_len = block->lf_end - block->lf_start + 1;
1551 		fl->l_pid = block->lf_owner->lo_pid;
1552 		fl->l_sysid = block->lf_owner->lo_sysid;
1553 	} else {
1554 		fl->l_type = F_UNLCK;
1555 	}
1556 	return (0);
1557 }
1558 
1559 /*
1560  * Cancel an async lock request.
1561  */
1562 static int
1563 lf_cancel(struct lockf *state, struct lockf_entry *lock, void *cookie)
1564 {
1565 	struct lockf_entry *reallock;
1566 
1567 	/*
1568 	 * We need to match this request with an existing lock
1569 	 * request.
1570 	 */
1571 	LIST_FOREACH(reallock, &state->ls_pending, lf_link) {
1572 		if ((void *) reallock == cookie) {
1573 			/*
1574 			 * Double-check that this lock looks right
1575 			 * (maybe use a rolling ID for the cancel
1576 			 * cookie instead?)
1577 			 */
1578 			if (!(reallock->lf_vnode == lock->lf_vnode
1579 				&& reallock->lf_start == lock->lf_start
1580 				&& reallock->lf_end == lock->lf_end)) {
1581 				return (ENOENT);
1582 			}
1583 
1584 			/*
1585 			 * Make sure this lock was async and then just
1586 			 * remove it from its wait lists.
1587 			 */
1588 			if (!reallock->lf_async_task) {
1589 				return (ENOENT);
1590 			}
1591 
1592 			/*
1593 			 * Note that since any other thread must take
1594 			 * state->ls_lock before it can possibly
1595 			 * trigger the async callback, we are safe
1596 			 * from a race with lf_wakeup_lock, i.e. we
1597 			 * can free the lock (actually our caller does
1598 			 * this).
1599 			 */
1600 			lf_cancel_lock(state, reallock);
1601 			return (0);
1602 		}
1603 	}
1604 
1605 	/*
1606 	 * We didn't find a matching lock - not much we can do here.
1607 	 */
1608 	return (ENOENT);
1609 }
1610 
1611 /*
1612  * Walk the list of locks for an inode and
1613  * return the first blocking lock.
1614  */
1615 static struct lockf_entry *
1616 lf_getblock(struct lockf *state, struct lockf_entry *lock)
1617 {
1618 	struct lockf_entry *overlap;
1619 
1620 	LIST_FOREACH(overlap, &state->ls_active, lf_link) {
1621 		/*
1622 		 * We may assume that the active list is sorted by
1623 		 * lf_start.
1624 		 */
1625 		if (overlap->lf_start > lock->lf_end)
1626 			break;
1627 		if (!lf_blocks(lock, overlap))
1628 			continue;
1629 		return (overlap);
1630 	}
1631 	return (NOLOCKF);
1632 }
1633 
1634 /*
1635  * Walk the list of locks for an inode to find an overlapping lock (if
1636  * any) and return a classification of that overlap.
1637  *
1638  * Arguments:
1639  *	*overlap	The place in the lock list to start looking
1640  *	lock		The lock which is being tested
1641  *	type		Pass 'SELF' to test only locks with the same
1642  *			owner as lock, or 'OTHER' to test only locks
1643  *			with a different owner
1644  *
1645  * Returns one of six values:
1646  *	0) no overlap
1647  *	1) overlap == lock
1648  *	2) overlap contains lock
1649  *	3) lock contains overlap
1650  *	4) overlap starts before lock
1651  *	5) overlap ends after lock
1652  *
1653  * If there is an overlapping lock, '*overlap' is set to point at the
1654  * overlapping lock.
1655  *
1656  * NOTE: this returns only the FIRST overlapping lock.  There
1657  *	 may be more than one.
1658  */
1659 static int
1660 lf_findoverlap(struct lockf_entry **overlap, struct lockf_entry *lock, int type)
1661 {
1662 	struct lockf_entry *lf;
1663 	off_t start, end;
1664 	int res;
1665 
1666 	if ((*overlap) == NOLOCKF) {
1667 		return (0);
1668 	}
1669 #ifdef LOCKF_DEBUG
1670 	if (lockf_debug & 2)
1671 		lf_print("lf_findoverlap: looking for overlap in", lock);
1672 #endif /* LOCKF_DEBUG */
1673 	start = lock->lf_start;
1674 	end = lock->lf_end;
1675 	res = 0;
1676 	while (*overlap) {
1677 		lf = *overlap;
1678 		if (lf->lf_start > end)
1679 			break;
1680 		if (((type & SELF) && lf->lf_owner != lock->lf_owner) ||
1681 		    ((type & OTHERS) && lf->lf_owner == lock->lf_owner)) {
1682 			*overlap = LIST_NEXT(lf, lf_link);
1683 			continue;
1684 		}
1685 #ifdef LOCKF_DEBUG
1686 		if (lockf_debug & 2)
1687 			lf_print("\tchecking", lf);
1688 #endif /* LOCKF_DEBUG */
1689 		/*
1690 		 * OK, check for overlap
1691 		 *
1692 		 * Six cases:
1693 		 *	0) no overlap
1694 		 *	1) overlap == lock
1695 		 *	2) overlap contains lock
1696 		 *	3) lock contains overlap
1697 		 *	4) overlap starts before lock
1698 		 *	5) overlap ends after lock
1699 		 */
1700 		if (start > lf->lf_end) {
1701 			/* Case 0 */
1702 #ifdef LOCKF_DEBUG
1703 			if (lockf_debug & 2)
1704 				printf("no overlap\n");
1705 #endif /* LOCKF_DEBUG */
1706 			*overlap = LIST_NEXT(lf, lf_link);
1707 			continue;
1708 		}
1709 		if (lf->lf_start == start && lf->lf_end == end) {
1710 			/* Case 1 */
1711 #ifdef LOCKF_DEBUG
1712 			if (lockf_debug & 2)
1713 				printf("overlap == lock\n");
1714 #endif /* LOCKF_DEBUG */
1715 			res = 1;
1716 			break;
1717 		}
1718 		if (lf->lf_start <= start && lf->lf_end >= end) {
1719 			/* Case 2 */
1720 #ifdef LOCKF_DEBUG
1721 			if (lockf_debug & 2)
1722 				printf("overlap contains lock\n");
1723 #endif /* LOCKF_DEBUG */
1724 			res = 2;
1725 			break;
1726 		}
1727 		if (start <= lf->lf_start && end >= lf->lf_end) {
1728 			/* Case 3 */
1729 #ifdef LOCKF_DEBUG
1730 			if (lockf_debug & 2)
1731 				printf("lock contains overlap\n");
1732 #endif /* LOCKF_DEBUG */
1733 			res = 3;
1734 			break;
1735 		}
1736 		if (lf->lf_start < start && lf->lf_end >= start) {
1737 			/* Case 4 */
1738 #ifdef LOCKF_DEBUG
1739 			if (lockf_debug & 2)
1740 				printf("overlap starts before lock\n");
1741 #endif /* LOCKF_DEBUG */
1742 			res = 4;
1743 			break;
1744 		}
1745 		if (lf->lf_start > start && lf->lf_end > end) {
1746 			/* Case 5 */
1747 #ifdef LOCKF_DEBUG
1748 			if (lockf_debug & 2)
1749 				printf("overlap ends after lock\n");
1750 #endif /* LOCKF_DEBUG */
1751 			res = 5;
1752 			break;
1753 		}
1754 		panic("lf_findoverlap: default");
1755 	}
1756 	return (res);
1757 }
1758 
1759 /*
1760  * Split an the existing 'lock1', based on the extent of the lock
1761  * described by 'lock2'. The existing lock should cover 'lock2'
1762  * entirely.
1763  *
1764  * Any pending locks which have been been unblocked are added to
1765  * 'granted'
1766  */
1767 static void
1768 lf_split(struct lockf *state, struct lockf_entry *lock1,
1769     struct lockf_entry *lock2, struct lockf_entry_list *granted)
1770 {
1771 	struct lockf_entry *splitlock;
1772 
1773 #ifdef LOCKF_DEBUG
1774 	if (lockf_debug & 2) {
1775 		lf_print("lf_split", lock1);
1776 		lf_print("splitting from", lock2);
1777 	}
1778 #endif /* LOCKF_DEBUG */
1779 	/*
1780 	 * Check to see if we don't need to split at all.
1781 	 */
1782 	if (lock1->lf_start == lock2->lf_start) {
1783 		lf_set_start(state, lock1, lock2->lf_end + 1, granted);
1784 		return;
1785 	}
1786 	if (lock1->lf_end == lock2->lf_end) {
1787 		lf_set_end(state, lock1, lock2->lf_start - 1, granted);
1788 		return;
1789 	}
1790 	/*
1791 	 * Make a new lock consisting of the last part of
1792 	 * the encompassing lock.
1793 	 */
1794 	splitlock = lf_alloc_lock(lock1->lf_owner);
1795 	memcpy(splitlock, lock1, sizeof *splitlock);
1796 	if (splitlock->lf_flags & F_REMOTE)
1797 		vref(splitlock->lf_vnode);
1798 
1799 	/*
1800 	 * This cannot cause a deadlock since any edges we would add
1801 	 * to splitlock already exist in lock1. We must be sure to add
1802 	 * necessary dependancies to splitlock before we reduce lock1
1803 	 * otherwise we may accidentally grant a pending lock that
1804 	 * was blocked by the tail end of lock1.
1805 	 */
1806 	splitlock->lf_start = lock2->lf_end + 1;
1807 	LIST_INIT(&splitlock->lf_outedges);
1808 	LIST_INIT(&splitlock->lf_inedges);
1809 	sx_xlock(&lf_owner_graph_lock);
1810 	lf_add_incoming(state, splitlock);
1811 	sx_xunlock(&lf_owner_graph_lock);
1812 
1813 	lf_set_end(state, lock1, lock2->lf_start - 1, granted);
1814 
1815 	/*
1816 	 * OK, now link it in
1817 	 */
1818 	lf_insert_lock(state, splitlock);
1819 }
1820 
1821 struct lockdesc {
1822 	STAILQ_ENTRY(lockdesc) link;
1823 	struct vnode *vp;
1824 	struct flock fl;
1825 };
1826 STAILQ_HEAD(lockdesclist, lockdesc);
1827 
1828 int
1829 lf_iteratelocks_sysid(int sysid, lf_iterator *fn, void *arg)
1830 {
1831 	struct lockf *ls;
1832 	struct lockf_entry *lf;
1833 	struct lockdesc *ldesc;
1834 	struct lockdesclist locks;
1835 	int error;
1836 
1837 	/*
1838 	 * In order to keep the locking simple, we iterate over the
1839 	 * active lock lists to build a list of locks that need
1840 	 * releasing. We then call the iterator for each one in turn.
1841 	 *
1842 	 * We take an extra reference to the vnode for the duration to
1843 	 * make sure it doesn't go away before we are finished.
1844 	 */
1845 	STAILQ_INIT(&locks);
1846 	sx_xlock(&lf_lock_states_lock);
1847 	LIST_FOREACH(ls, &lf_lock_states, ls_link) {
1848 		sx_xlock(&ls->ls_lock);
1849 		LIST_FOREACH(lf, &ls->ls_active, lf_link) {
1850 			if (lf->lf_owner->lo_sysid != sysid)
1851 				continue;
1852 
1853 			ldesc = malloc(sizeof(struct lockdesc), M_LOCKF,
1854 			    M_WAITOK);
1855 			ldesc->vp = lf->lf_vnode;
1856 			vref(ldesc->vp);
1857 			ldesc->fl.l_start = lf->lf_start;
1858 			if (lf->lf_end == OFF_MAX)
1859 				ldesc->fl.l_len = 0;
1860 			else
1861 				ldesc->fl.l_len =
1862 					lf->lf_end - lf->lf_start + 1;
1863 			ldesc->fl.l_whence = SEEK_SET;
1864 			ldesc->fl.l_type = F_UNLCK;
1865 			ldesc->fl.l_pid = lf->lf_owner->lo_pid;
1866 			ldesc->fl.l_sysid = sysid;
1867 			STAILQ_INSERT_TAIL(&locks, ldesc, link);
1868 		}
1869 		sx_xunlock(&ls->ls_lock);
1870 	}
1871 	sx_xunlock(&lf_lock_states_lock);
1872 
1873 	/*
1874 	 * Call the iterator function for each lock in turn. If the
1875 	 * iterator returns an error code, just free the rest of the
1876 	 * lockdesc structures.
1877 	 */
1878 	error = 0;
1879 	while ((ldesc = STAILQ_FIRST(&locks)) != NULL) {
1880 		STAILQ_REMOVE_HEAD(&locks, link);
1881 		if (!error)
1882 			error = fn(ldesc->vp, &ldesc->fl, arg);
1883 		vrele(ldesc->vp);
1884 		free(ldesc, M_LOCKF);
1885 	}
1886 
1887 	return (error);
1888 }
1889 
1890 int
1891 lf_iteratelocks_vnode(struct vnode *vp, lf_iterator *fn, void *arg)
1892 {
1893 	struct lockf *ls;
1894 	struct lockf_entry *lf;
1895 	struct lockdesc *ldesc;
1896 	struct lockdesclist locks;
1897 	int error;
1898 
1899 	/*
1900 	 * In order to keep the locking simple, we iterate over the
1901 	 * active lock lists to build a list of locks that need
1902 	 * releasing. We then call the iterator for each one in turn.
1903 	 *
1904 	 * We take an extra reference to the vnode for the duration to
1905 	 * make sure it doesn't go away before we are finished.
1906 	 */
1907 	STAILQ_INIT(&locks);
1908 	ls = vp->v_lockf;
1909 	if (!ls)
1910 		return (0);
1911 
1912 	sx_xlock(&ls->ls_lock);
1913 	LIST_FOREACH(lf, &ls->ls_active, lf_link) {
1914 		ldesc = malloc(sizeof(struct lockdesc), M_LOCKF,
1915 		    M_WAITOK);
1916 		ldesc->vp = lf->lf_vnode;
1917 		vref(ldesc->vp);
1918 		ldesc->fl.l_start = lf->lf_start;
1919 		if (lf->lf_end == OFF_MAX)
1920 			ldesc->fl.l_len = 0;
1921 		else
1922 			ldesc->fl.l_len =
1923 				lf->lf_end - lf->lf_start + 1;
1924 		ldesc->fl.l_whence = SEEK_SET;
1925 		ldesc->fl.l_type = F_UNLCK;
1926 		ldesc->fl.l_pid = lf->lf_owner->lo_pid;
1927 		ldesc->fl.l_sysid = lf->lf_owner->lo_sysid;
1928 		STAILQ_INSERT_TAIL(&locks, ldesc, link);
1929 	}
1930 	sx_xunlock(&ls->ls_lock);
1931 
1932 	/*
1933 	 * Call the iterator function for each lock in turn. If the
1934 	 * iterator returns an error code, just free the rest of the
1935 	 * lockdesc structures.
1936 	 */
1937 	error = 0;
1938 	while ((ldesc = STAILQ_FIRST(&locks)) != NULL) {
1939 		STAILQ_REMOVE_HEAD(&locks, link);
1940 		if (!error)
1941 			error = fn(ldesc->vp, &ldesc->fl, arg);
1942 		vrele(ldesc->vp);
1943 		free(ldesc, M_LOCKF);
1944 	}
1945 
1946 	return (error);
1947 }
1948 
1949 static int
1950 lf_clearremotesys_iterator(struct vnode *vp, struct flock *fl, void *arg)
1951 {
1952 
1953 	VOP_ADVLOCK(vp, 0, F_UNLCK, fl, F_REMOTE);
1954 	return (0);
1955 }
1956 
1957 void
1958 lf_clearremotesys(int sysid)
1959 {
1960 
1961 	KASSERT(sysid != 0, ("Can't clear local locks with F_UNLCKSYS"));
1962 	lf_iteratelocks_sysid(sysid, lf_clearremotesys_iterator, NULL);
1963 }
1964 
1965 int
1966 lf_countlocks(int sysid)
1967 {
1968 	int i;
1969 	struct lock_owner *lo;
1970 	int count;
1971 
1972 	count = 0;
1973 	sx_xlock(&lf_lock_owners_lock);
1974 	for (i = 0; i < LOCK_OWNER_HASH_SIZE; i++)
1975 		LIST_FOREACH(lo, &lf_lock_owners[i], lo_link)
1976 			if (lo->lo_sysid == sysid)
1977 				count += lo->lo_refs;
1978 	sx_xunlock(&lf_lock_owners_lock);
1979 
1980 	return (count);
1981 }
1982 
1983 #ifdef LOCKF_DEBUG
1984 
1985 /*
1986  * Return non-zero if y is reachable from x using a brute force
1987  * search. If reachable and path is non-null, return the route taken
1988  * in path.
1989  */
1990 static int
1991 graph_reaches(struct owner_vertex *x, struct owner_vertex *y,
1992     struct owner_vertex_list *path)
1993 {
1994 	struct owner_edge *e;
1995 
1996 	if (x == y) {
1997 		if (path)
1998 			TAILQ_INSERT_HEAD(path, x, v_link);
1999 		return 1;
2000 	}
2001 
2002 	LIST_FOREACH(e, &x->v_outedges, e_outlink) {
2003 		if (graph_reaches(e->e_to, y, path)) {
2004 			if (path)
2005 				TAILQ_INSERT_HEAD(path, x, v_link);
2006 			return 1;
2007 		}
2008 	}
2009 	return 0;
2010 }
2011 
2012 /*
2013  * Perform consistency checks on the graph. Make sure the values of
2014  * v_order are correct. If checkorder is non-zero, check no vertex can
2015  * reach any other vertex with a smaller order.
2016  */
2017 static void
2018 graph_check(struct owner_graph *g, int checkorder)
2019 {
2020 	int i, j;
2021 
2022 	for (i = 0; i < g->g_size; i++) {
2023 		if (!g->g_vertices[i]->v_owner)
2024 			continue;
2025 		KASSERT(g->g_vertices[i]->v_order == i,
2026 		    ("lock graph vertices disordered"));
2027 		if (checkorder) {
2028 			for (j = 0; j < i; j++) {
2029 				if (!g->g_vertices[j]->v_owner)
2030 					continue;
2031 				KASSERT(!graph_reaches(g->g_vertices[i],
2032 					g->g_vertices[j], NULL),
2033 				    ("lock graph vertices disordered"));
2034 			}
2035 		}
2036 	}
2037 }
2038 
2039 static void
2040 graph_print_vertices(struct owner_vertex_list *set)
2041 {
2042 	struct owner_vertex *v;
2043 
2044 	printf("{ ");
2045 	TAILQ_FOREACH(v, set, v_link) {
2046 		printf("%d:", v->v_order);
2047 		lf_print_owner(v->v_owner);
2048 		if (TAILQ_NEXT(v, v_link))
2049 			printf(", ");
2050 	}
2051 	printf(" }\n");
2052 }
2053 
2054 #endif
2055 
2056 /*
2057  * Calculate the sub-set of vertices v from the affected region [y..x]
2058  * where v is reachable from y. Return -1 if a loop was detected
2059  * (i.e. x is reachable from y, otherwise the number of vertices in
2060  * this subset.
2061  */
2062 static int
2063 graph_delta_forward(struct owner_graph *g, struct owner_vertex *x,
2064     struct owner_vertex *y, struct owner_vertex_list *delta)
2065 {
2066 	uint32_t gen;
2067 	struct owner_vertex *v;
2068 	struct owner_edge *e;
2069 	int n;
2070 
2071 	/*
2072 	 * We start with a set containing just y. Then for each vertex
2073 	 * v in the set so far unprocessed, we add each vertex that v
2074 	 * has an out-edge to and that is within the affected region
2075 	 * [y..x]. If we see the vertex x on our travels, stop
2076 	 * immediately.
2077 	 */
2078 	TAILQ_INIT(delta);
2079 	TAILQ_INSERT_TAIL(delta, y, v_link);
2080 	v = y;
2081 	n = 1;
2082 	gen = g->g_gen;
2083 	while (v) {
2084 		LIST_FOREACH(e, &v->v_outedges, e_outlink) {
2085 			if (e->e_to == x)
2086 				return -1;
2087 			if (e->e_to->v_order < x->v_order
2088 			    && e->e_to->v_gen != gen) {
2089 				e->e_to->v_gen = gen;
2090 				TAILQ_INSERT_TAIL(delta, e->e_to, v_link);
2091 				n++;
2092 			}
2093 		}
2094 		v = TAILQ_NEXT(v, v_link);
2095 	}
2096 
2097 	return (n);
2098 }
2099 
2100 /*
2101  * Calculate the sub-set of vertices v from the affected region [y..x]
2102  * where v reaches x. Return the number of vertices in this subset.
2103  */
2104 static int
2105 graph_delta_backward(struct owner_graph *g, struct owner_vertex *x,
2106     struct owner_vertex *y, struct owner_vertex_list *delta)
2107 {
2108 	uint32_t gen;
2109 	struct owner_vertex *v;
2110 	struct owner_edge *e;
2111 	int n;
2112 
2113 	/*
2114 	 * We start with a set containing just x. Then for each vertex
2115 	 * v in the set so far unprocessed, we add each vertex that v
2116 	 * has an in-edge from and that is within the affected region
2117 	 * [y..x].
2118 	 */
2119 	TAILQ_INIT(delta);
2120 	TAILQ_INSERT_TAIL(delta, x, v_link);
2121 	v = x;
2122 	n = 1;
2123 	gen = g->g_gen;
2124 	while (v) {
2125 		LIST_FOREACH(e, &v->v_inedges, e_inlink) {
2126 			if (e->e_from->v_order > y->v_order
2127 			    && e->e_from->v_gen != gen) {
2128 				e->e_from->v_gen = gen;
2129 				TAILQ_INSERT_HEAD(delta, e->e_from, v_link);
2130 				n++;
2131 			}
2132 		}
2133 		v = TAILQ_PREV(v, owner_vertex_list, v_link);
2134 	}
2135 
2136 	return (n);
2137 }
2138 
2139 static int
2140 graph_add_indices(int *indices, int n, struct owner_vertex_list *set)
2141 {
2142 	struct owner_vertex *v;
2143 	int i, j;
2144 
2145 	TAILQ_FOREACH(v, set, v_link) {
2146 		for (i = n;
2147 		     i > 0 && indices[i - 1] > v->v_order; i--)
2148 			;
2149 		for (j = n - 1; j >= i; j--)
2150 			indices[j + 1] = indices[j];
2151 		indices[i] = v->v_order;
2152 		n++;
2153 	}
2154 
2155 	return (n);
2156 }
2157 
2158 static int
2159 graph_assign_indices(struct owner_graph *g, int *indices, int nextunused,
2160     struct owner_vertex_list *set)
2161 {
2162 	struct owner_vertex *v, *vlowest;
2163 
2164 	while (!TAILQ_EMPTY(set)) {
2165 		vlowest = NULL;
2166 		TAILQ_FOREACH(v, set, v_link) {
2167 			if (!vlowest || v->v_order < vlowest->v_order)
2168 				vlowest = v;
2169 		}
2170 		TAILQ_REMOVE(set, vlowest, v_link);
2171 		vlowest->v_order = indices[nextunused];
2172 		g->g_vertices[vlowest->v_order] = vlowest;
2173 		nextunused++;
2174 	}
2175 
2176 	return (nextunused);
2177 }
2178 
2179 static int
2180 graph_add_edge(struct owner_graph *g, struct owner_vertex *x,
2181     struct owner_vertex *y)
2182 {
2183 	struct owner_edge *e;
2184 	struct owner_vertex_list deltaF, deltaB;
2185 	int nF, nB, n, vi, i;
2186 	int *indices;
2187 
2188 	sx_assert(&lf_owner_graph_lock, SX_XLOCKED);
2189 
2190 	LIST_FOREACH(e, &x->v_outedges, e_outlink) {
2191 		if (e->e_to == y) {
2192 			e->e_refs++;
2193 			return (0);
2194 		}
2195 	}
2196 
2197 #ifdef LOCKF_DEBUG
2198 	if (lockf_debug & 8) {
2199 		printf("adding edge %d:", x->v_order);
2200 		lf_print_owner(x->v_owner);
2201 		printf(" -> %d:", y->v_order);
2202 		lf_print_owner(y->v_owner);
2203 		printf("\n");
2204 	}
2205 #endif
2206 	if (y->v_order < x->v_order) {
2207 		/*
2208 		 * The new edge violates the order. First find the set
2209 		 * of affected vertices reachable from y (deltaF) and
2210 		 * the set of affect vertices affected that reach x
2211 		 * (deltaB), using the graph generation number to
2212 		 * detect whether we have visited a given vertex
2213 		 * already. We re-order the graph so that each vertex
2214 		 * in deltaB appears before each vertex in deltaF.
2215 		 *
2216 		 * If x is a member of deltaF, then the new edge would
2217 		 * create a cycle. Otherwise, we may assume that
2218 		 * deltaF and deltaB are disjoint.
2219 		 */
2220 		g->g_gen++;
2221 		if (g->g_gen == 0) {
2222 			/*
2223 			 * Generation wrap.
2224 			 */
2225 			for (vi = 0; vi < g->g_size; vi++) {
2226 				g->g_vertices[vi]->v_gen = 0;
2227 			}
2228 			g->g_gen++;
2229 		}
2230 		nF = graph_delta_forward(g, x, y, &deltaF);
2231 		if (nF < 0) {
2232 #ifdef LOCKF_DEBUG
2233 			if (lockf_debug & 8) {
2234 				struct owner_vertex_list path;
2235 				printf("deadlock: ");
2236 				TAILQ_INIT(&path);
2237 				graph_reaches(y, x, &path);
2238 				graph_print_vertices(&path);
2239 			}
2240 #endif
2241 			return (EDEADLK);
2242 		}
2243 
2244 #ifdef LOCKF_DEBUG
2245 		if (lockf_debug & 8) {
2246 			printf("re-ordering graph vertices\n");
2247 			printf("deltaF = ");
2248 			graph_print_vertices(&deltaF);
2249 		}
2250 #endif
2251 
2252 		nB = graph_delta_backward(g, x, y, &deltaB);
2253 
2254 #ifdef LOCKF_DEBUG
2255 		if (lockf_debug & 8) {
2256 			printf("deltaB = ");
2257 			graph_print_vertices(&deltaB);
2258 		}
2259 #endif
2260 
2261 		/*
2262 		 * We first build a set of vertex indices (vertex
2263 		 * order values) that we may use, then we re-assign
2264 		 * orders first to those vertices in deltaB, then to
2265 		 * deltaF. Note that the contents of deltaF and deltaB
2266 		 * may be partially disordered - we perform an
2267 		 * insertion sort while building our index set.
2268 		 */
2269 		indices = g->g_indexbuf;
2270 		n = graph_add_indices(indices, 0, &deltaF);
2271 		graph_add_indices(indices, n, &deltaB);
2272 
2273 		/*
2274 		 * We must also be sure to maintain the relative
2275 		 * ordering of deltaF and deltaB when re-assigning
2276 		 * vertices. We do this by iteratively removing the
2277 		 * lowest ordered element from the set and assigning
2278 		 * it the next value from our new ordering.
2279 		 */
2280 		i = graph_assign_indices(g, indices, 0, &deltaB);
2281 		graph_assign_indices(g, indices, i, &deltaF);
2282 
2283 #ifdef LOCKF_DEBUG
2284 		if (lockf_debug & 8) {
2285 			struct owner_vertex_list set;
2286 			TAILQ_INIT(&set);
2287 			for (i = 0; i < nB + nF; i++)
2288 				TAILQ_INSERT_TAIL(&set,
2289 				    g->g_vertices[indices[i]], v_link);
2290 			printf("new ordering = ");
2291 			graph_print_vertices(&set);
2292 		}
2293 #endif
2294 	}
2295 
2296 	KASSERT(x->v_order < y->v_order, ("Failed to re-order graph"));
2297 
2298 #ifdef LOCKF_DEBUG
2299 	if (lockf_debug & 8) {
2300 		graph_check(g, TRUE);
2301 	}
2302 #endif
2303 
2304 	e = malloc(sizeof(struct owner_edge), M_LOCKF, M_WAITOK);
2305 
2306 	LIST_INSERT_HEAD(&x->v_outedges, e, e_outlink);
2307 	LIST_INSERT_HEAD(&y->v_inedges, e, e_inlink);
2308 	e->e_refs = 1;
2309 	e->e_from = x;
2310 	e->e_to = y;
2311 
2312 	return (0);
2313 }
2314 
2315 /*
2316  * Remove an edge x->y from the graph.
2317  */
2318 static void
2319 graph_remove_edge(struct owner_graph *g, struct owner_vertex *x,
2320     struct owner_vertex *y)
2321 {
2322 	struct owner_edge *e;
2323 
2324 	sx_assert(&lf_owner_graph_lock, SX_XLOCKED);
2325 
2326 	LIST_FOREACH(e, &x->v_outedges, e_outlink) {
2327 		if (e->e_to == y)
2328 			break;
2329 	}
2330 	KASSERT(e, ("Removing non-existent edge from deadlock graph"));
2331 
2332 	e->e_refs--;
2333 	if (e->e_refs == 0) {
2334 #ifdef LOCKF_DEBUG
2335 		if (lockf_debug & 8) {
2336 			printf("removing edge %d:", x->v_order);
2337 			lf_print_owner(x->v_owner);
2338 			printf(" -> %d:", y->v_order);
2339 			lf_print_owner(y->v_owner);
2340 			printf("\n");
2341 		}
2342 #endif
2343 		LIST_REMOVE(e, e_outlink);
2344 		LIST_REMOVE(e, e_inlink);
2345 		free(e, M_LOCKF);
2346 	}
2347 }
2348 
2349 /*
2350  * Allocate a vertex from the free list. Return ENOMEM if there are
2351  * none.
2352  */
2353 static struct owner_vertex *
2354 graph_alloc_vertex(struct owner_graph *g, struct lock_owner *lo)
2355 {
2356 	struct owner_vertex *v;
2357 
2358 	sx_assert(&lf_owner_graph_lock, SX_XLOCKED);
2359 
2360 	v = malloc(sizeof(struct owner_vertex), M_LOCKF, M_WAITOK);
2361 	if (g->g_size == g->g_space) {
2362 		g->g_vertices = realloc(g->g_vertices,
2363 		    2 * g->g_space * sizeof(struct owner_vertex *),
2364 		    M_LOCKF, M_WAITOK);
2365 		free(g->g_indexbuf, M_LOCKF);
2366 		g->g_indexbuf = malloc(2 * g->g_space * sizeof(int),
2367 		    M_LOCKF, M_WAITOK);
2368 		g->g_space = 2 * g->g_space;
2369 	}
2370 	v->v_order = g->g_size;
2371 	v->v_gen = g->g_gen;
2372 	g->g_vertices[g->g_size] = v;
2373 	g->g_size++;
2374 
2375 	LIST_INIT(&v->v_outedges);
2376 	LIST_INIT(&v->v_inedges);
2377 	v->v_owner = lo;
2378 
2379 	return (v);
2380 }
2381 
2382 static void
2383 graph_free_vertex(struct owner_graph *g, struct owner_vertex *v)
2384 {
2385 	struct owner_vertex *w;
2386 	int i;
2387 
2388 	sx_assert(&lf_owner_graph_lock, SX_XLOCKED);
2389 
2390 	KASSERT(LIST_EMPTY(&v->v_outedges), ("Freeing vertex with edges"));
2391 	KASSERT(LIST_EMPTY(&v->v_inedges), ("Freeing vertex with edges"));
2392 
2393 	/*
2394 	 * Remove from the graph's array and close up the gap,
2395 	 * renumbering the other vertices.
2396 	 */
2397 	for (i = v->v_order + 1; i < g->g_size; i++) {
2398 		w = g->g_vertices[i];
2399 		w->v_order--;
2400 		g->g_vertices[i - 1] = w;
2401 	}
2402 	g->g_size--;
2403 
2404 	free(v, M_LOCKF);
2405 }
2406 
2407 static struct owner_graph *
2408 graph_init(struct owner_graph *g)
2409 {
2410 
2411 	g->g_vertices = malloc(10 * sizeof(struct owner_vertex *),
2412 	    M_LOCKF, M_WAITOK);
2413 	g->g_size = 0;
2414 	g->g_space = 10;
2415 	g->g_indexbuf = malloc(g->g_space * sizeof(int), M_LOCKF, M_WAITOK);
2416 	g->g_gen = 0;
2417 
2418 	return (g);
2419 }
2420 
2421 #ifdef LOCKF_DEBUG
2422 /*
2423  * Print description of a lock owner
2424  */
2425 static void
2426 lf_print_owner(struct lock_owner *lo)
2427 {
2428 
2429 	if (lo->lo_flags & F_REMOTE) {
2430 		printf("remote pid %d, system %d",
2431 		    lo->lo_pid, lo->lo_sysid);
2432 	} else if (lo->lo_flags & F_FLOCK) {
2433 		printf("file %p", lo->lo_id);
2434 	} else {
2435 		printf("local pid %d", lo->lo_pid);
2436 	}
2437 }
2438 
2439 /*
2440  * Print out a lock.
2441  */
2442 static void
2443 lf_print(char *tag, struct lockf_entry *lock)
2444 {
2445 
2446 	printf("%s: lock %p for ", tag, (void *)lock);
2447 	lf_print_owner(lock->lf_owner);
2448 	if (lock->lf_inode != (struct inode *)0)
2449 		printf(" in ino %ju on dev <%s>,",
2450 		    (uintmax_t)lock->lf_inode->i_number,
2451 		    devtoname(lock->lf_inode->i_dev));
2452 	printf(" %s, start %jd, end ",
2453 	    lock->lf_type == F_RDLCK ? "shared" :
2454 	    lock->lf_type == F_WRLCK ? "exclusive" :
2455 	    lock->lf_type == F_UNLCK ? "unlock" : "unknown",
2456 	    (intmax_t)lock->lf_start);
2457 	if (lock->lf_end == OFF_MAX)
2458 		printf("EOF");
2459 	else
2460 		printf("%jd", (intmax_t)lock->lf_end);
2461 	if (!LIST_EMPTY(&lock->lf_outedges))
2462 		printf(" block %p\n",
2463 		    (void *)LIST_FIRST(&lock->lf_outedges)->le_to);
2464 	else
2465 		printf("\n");
2466 }
2467 
2468 static void
2469 lf_printlist(char *tag, struct lockf_entry *lock)
2470 {
2471 	struct lockf_entry *lf, *blk;
2472 	struct lockf_edge *e;
2473 
2474 	if (lock->lf_inode == (struct inode *)0)
2475 		return;
2476 
2477 	printf("%s: Lock list for ino %ju on dev <%s>:\n",
2478 	    tag, (uintmax_t)lock->lf_inode->i_number,
2479 	    devtoname(lock->lf_inode->i_dev));
2480 	LIST_FOREACH(lf, &lock->lf_vnode->v_lockf->ls_active, lf_link) {
2481 		printf("\tlock %p for ",(void *)lf);
2482 		lf_print_owner(lock->lf_owner);
2483 		printf(", %s, start %jd, end %jd",
2484 		    lf->lf_type == F_RDLCK ? "shared" :
2485 		    lf->lf_type == F_WRLCK ? "exclusive" :
2486 		    lf->lf_type == F_UNLCK ? "unlock" :
2487 		    "unknown", (intmax_t)lf->lf_start, (intmax_t)lf->lf_end);
2488 		LIST_FOREACH(e, &lf->lf_outedges, le_outlink) {
2489 			blk = e->le_to;
2490 			printf("\n\t\tlock request %p for ", (void *)blk);
2491 			lf_print_owner(blk->lf_owner);
2492 			printf(", %s, start %jd, end %jd",
2493 			    blk->lf_type == F_RDLCK ? "shared" :
2494 			    blk->lf_type == F_WRLCK ? "exclusive" :
2495 			    blk->lf_type == F_UNLCK ? "unlock" :
2496 			    "unknown", (intmax_t)blk->lf_start,
2497 			    (intmax_t)blk->lf_end);
2498 			if (!LIST_EMPTY(&blk->lf_inedges))
2499 				panic("lf_printlist: bad list");
2500 		}
2501 		printf("\n");
2502 	}
2503 }
2504 #endif /* LOCKF_DEBUG */
2505