xref: /freebsd/sys/kern/kern_lockf.c (revision 10f0bcab61ef441cb5af32fb706688d8cbd55dc0)
1 /*-
2  * Copyright (c) 2008 Isilon Inc http://www.isilon.com/
3  * Authors: Doug Rabson <dfr@rabson.org>
4  * Developed with Red Inc: Alfred Perlstein <alfred@freebsd.org>
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 /*-
28  * Copyright (c) 1982, 1986, 1989, 1993
29  *	The Regents of the University of California.  All rights reserved.
30  *
31  * This code is derived from software contributed to Berkeley by
32  * Scooter Morris at Genentech Inc.
33  *
34  * Redistribution and use in source and binary forms, with or without
35  * modification, are permitted provided that the following conditions
36  * are met:
37  * 1. Redistributions of source code must retain the above copyright
38  *    notice, this list of conditions and the following disclaimer.
39  * 2. Redistributions in binary form must reproduce the above copyright
40  *    notice, this list of conditions and the following disclaimer in the
41  *    documentation and/or other materials provided with the distribution.
42  * 4. Neither the name of the University nor the names of its contributors
43  *    may be used to endorse or promote products derived from this software
44  *    without specific prior written permission.
45  *
46  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
47  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
48  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
49  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
50  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
51  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
52  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
53  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
54  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
55  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
56  * SUCH DAMAGE.
57  *
58  *	@(#)ufs_lockf.c	8.3 (Berkeley) 1/6/94
59  */
60 
61 #include <sys/cdefs.h>
62 __FBSDID("$FreeBSD$");
63 
64 #include "opt_debug_lockf.h"
65 
66 #include <sys/param.h>
67 #include <sys/systm.h>
68 #include <sys/hash.h>
69 #include <sys/kernel.h>
70 #include <sys/limits.h>
71 #include <sys/lock.h>
72 #include <sys/mount.h>
73 #include <sys/mutex.h>
74 #include <sys/proc.h>
75 #include <sys/sx.h>
76 #include <sys/unistd.h>
77 #include <sys/vnode.h>
78 #include <sys/malloc.h>
79 #include <sys/fcntl.h>
80 #include <sys/lockf.h>
81 #include <sys/taskqueue.h>
82 
83 #ifdef LOCKF_DEBUG
84 #include <sys/sysctl.h>
85 
86 #include <ufs/ufs/quota.h>
87 #include <ufs/ufs/inode.h>
88 
89 static int	lockf_debug = 0; /* control debug output */
90 SYSCTL_INT(_debug, OID_AUTO, lockf_debug, CTLFLAG_RW, &lockf_debug, 0, "");
91 #endif
92 
93 MALLOC_DEFINE(M_LOCKF, "lockf", "Byte-range locking structures");
94 
95 struct owner_edge;
96 struct owner_vertex;
97 struct owner_vertex_list;
98 struct owner_graph;
99 
100 #define NOLOCKF (struct lockf_entry *)0
101 #define SELF	0x1
102 #define OTHERS	0x2
103 static void	 lf_init(void *);
104 static int	 lf_hash_owner(caddr_t, struct flock *, int);
105 static int	 lf_owner_matches(struct lock_owner *, caddr_t, struct flock *,
106     int);
107 static struct lockf_entry *
108 		 lf_alloc_lock(struct lock_owner *);
109 static void	 lf_free_lock(struct lockf_entry *);
110 static int	 lf_clearlock(struct lockf *, struct lockf_entry *);
111 static int	 lf_overlaps(struct lockf_entry *, struct lockf_entry *);
112 static int	 lf_blocks(struct lockf_entry *, struct lockf_entry *);
113 static void	 lf_free_edge(struct lockf_edge *);
114 static struct lockf_edge *
115 		 lf_alloc_edge(void);
116 static void	 lf_alloc_vertex(struct lockf_entry *);
117 static int	 lf_add_edge(struct lockf_entry *, struct lockf_entry *);
118 static void	 lf_remove_edge(struct lockf_edge *);
119 static void	 lf_remove_outgoing(struct lockf_entry *);
120 static void	 lf_remove_incoming(struct lockf_entry *);
121 static int	 lf_add_outgoing(struct lockf *, struct lockf_entry *);
122 static int	 lf_add_incoming(struct lockf *, struct lockf_entry *);
123 static int	 lf_findoverlap(struct lockf_entry **, struct lockf_entry *,
124     int);
125 static struct lockf_entry *
126 		 lf_getblock(struct lockf *, struct lockf_entry *);
127 static int	 lf_getlock(struct lockf *, struct lockf_entry *, struct flock *);
128 static void	 lf_insert_lock(struct lockf *, struct lockf_entry *);
129 static void	 lf_wakeup_lock(struct lockf *, struct lockf_entry *);
130 static void	 lf_update_dependancies(struct lockf *, struct lockf_entry *,
131     int all, struct lockf_entry_list *);
132 static void	 lf_set_start(struct lockf *, struct lockf_entry *, off_t,
133 	struct lockf_entry_list*);
134 static void	 lf_set_end(struct lockf *, struct lockf_entry *, off_t,
135 	struct lockf_entry_list*);
136 static int	 lf_setlock(struct lockf *, struct lockf_entry *,
137     struct vnode *, void **cookiep);
138 static int	 lf_cancel(struct lockf *, struct lockf_entry *, void *);
139 static void	 lf_split(struct lockf *, struct lockf_entry *,
140     struct lockf_entry *, struct lockf_entry_list *);
141 #ifdef LOCKF_DEBUG
142 static int	 graph_reaches(struct owner_vertex *x, struct owner_vertex *y,
143     struct owner_vertex_list *path);
144 static void	 graph_check(struct owner_graph *g, int checkorder);
145 static void	 graph_print_vertices(struct owner_vertex_list *set);
146 #endif
147 static int	 graph_delta_forward(struct owner_graph *g,
148     struct owner_vertex *x, struct owner_vertex *y,
149     struct owner_vertex_list *delta);
150 static int	 graph_delta_backward(struct owner_graph *g,
151     struct owner_vertex *x, struct owner_vertex *y,
152     struct owner_vertex_list *delta);
153 static int	 graph_add_indices(int *indices, int n,
154     struct owner_vertex_list *set);
155 static int	 graph_assign_indices(struct owner_graph *g, int *indices,
156     int nextunused, struct owner_vertex_list *set);
157 static int	 graph_add_edge(struct owner_graph *g,
158     struct owner_vertex *x, struct owner_vertex *y);
159 static void	 graph_remove_edge(struct owner_graph *g,
160     struct owner_vertex *x, struct owner_vertex *y);
161 static struct owner_vertex *graph_alloc_vertex(struct owner_graph *g,
162     struct lock_owner *lo);
163 static void	 graph_free_vertex(struct owner_graph *g,
164     struct owner_vertex *v);
165 static struct owner_graph * graph_init(struct owner_graph *g);
166 #ifdef LOCKF_DEBUG
167 static void	 lf_print(char *, struct lockf_entry *);
168 static void	 lf_printlist(char *, struct lockf_entry *);
169 static void	 lf_print_owner(struct lock_owner *);
170 #endif
171 
172 /*
173  * This structure is used to keep track of both local and remote lock
174  * owners. The lf_owner field of the struct lockf_entry points back at
175  * the lock owner structure. Each possible lock owner (local proc for
176  * POSIX fcntl locks, local file for BSD flock locks or <pid,sysid>
177  * pair for remote locks) is represented by a unique instance of
178  * struct lock_owner.
179  *
180  * If a lock owner has a lock that blocks some other lock or a lock
181  * that is waiting for some other lock, it also has a vertex in the
182  * owner_graph below.
183  *
184  * Locks:
185  * (s)		locked by state->ls_lock
186  * (S)		locked by lf_lock_states_lock
187  * (l)		locked by lf_lock_owners_lock
188  * (g)		locked by lf_owner_graph_lock
189  * (c)		const until freeing
190  */
191 #define	LOCK_OWNER_HASH_SIZE	256
192 
193 struct lock_owner {
194 	LIST_ENTRY(lock_owner) lo_link; /* (l) hash chain */
195 	int	lo_refs;	    /* (l) Number of locks referring to this */
196 	int	lo_flags;	    /* (c) Flags passwd to lf_advlock */
197 	caddr_t	lo_id;		    /* (c) Id value passed to lf_advlock */
198 	pid_t	lo_pid;		    /* (c) Process Id of the lock owner */
199 	int	lo_sysid;	    /* (c) System Id of the lock owner */
200 	struct owner_vertex *lo_vertex; /* (g) entry in deadlock graph */
201 };
202 
203 LIST_HEAD(lock_owner_list, lock_owner);
204 
205 static struct sx		lf_lock_states_lock;
206 static struct lockf_list	lf_lock_states; /* (S) */
207 static struct sx		lf_lock_owners_lock;
208 static struct lock_owner_list	lf_lock_owners[LOCK_OWNER_HASH_SIZE]; /* (l) */
209 
210 /*
211  * Structures for deadlock detection.
212  *
213  * We have two types of directed graph, the first is the set of locks,
214  * both active and pending on a vnode. Within this graph, active locks
215  * are terminal nodes in the graph (i.e. have no out-going
216  * edges). Pending locks have out-going edges to each blocking active
217  * lock that prevents the lock from being granted and also to each
218  * older pending lock that would block them if it was active. The
219  * graph for each vnode is naturally acyclic; new edges are only ever
220  * added to or from new nodes (either new pending locks which only add
221  * out-going edges or new active locks which only add in-coming edges)
222  * therefore they cannot create loops in the lock graph.
223  *
224  * The second graph is a global graph of lock owners. Each lock owner
225  * is a vertex in that graph and an edge is added to the graph
226  * whenever an edge is added to a vnode graph, with end points
227  * corresponding to owner of the new pending lock and the owner of the
228  * lock upon which it waits. In order to prevent deadlock, we only add
229  * an edge to this graph if the new edge would not create a cycle.
230  *
231  * The lock owner graph is topologically sorted, i.e. if a node has
232  * any outgoing edges, then it has an order strictly less than any
233  * node to which it has an outgoing edge. We preserve this ordering
234  * (and detect cycles) on edge insertion using Algorithm PK from the
235  * paper "A Dynamic Topological Sort Algorithm for Directed Acyclic
236  * Graphs" (ACM Journal of Experimental Algorithms, Vol 11, Article
237  * No. 1.7)
238  */
239 struct owner_vertex;
240 
241 struct owner_edge {
242 	LIST_ENTRY(owner_edge) e_outlink; /* (g) link from's out-edge list */
243 	LIST_ENTRY(owner_edge) e_inlink;  /* (g) link to's in-edge list */
244 	int		e_refs;		  /* (g) number of times added */
245 	struct owner_vertex *e_from;	  /* (c) out-going from here */
246 	struct owner_vertex *e_to;	  /* (c) in-coming to here */
247 };
248 LIST_HEAD(owner_edge_list, owner_edge);
249 
250 struct owner_vertex {
251 	TAILQ_ENTRY(owner_vertex) v_link; /* (g) workspace for edge insertion */
252 	uint32_t	v_gen;		  /* (g) workspace for edge insertion */
253 	int		v_order;	  /* (g) order of vertex in graph */
254 	struct owner_edge_list v_outedges;/* (g) list of out-edges */
255 	struct owner_edge_list v_inedges; /* (g) list of in-edges */
256 	struct lock_owner *v_owner;	  /* (c) corresponding lock owner */
257 };
258 TAILQ_HEAD(owner_vertex_list, owner_vertex);
259 
260 struct owner_graph {
261 	struct owner_vertex** g_vertices; /* (g) pointers to vertices */
262 	int		g_size;		  /* (g) number of vertices */
263 	int		g_space;	  /* (g) space allocated for vertices */
264 	int		*g_indexbuf;	  /* (g) workspace for loop detection */
265 	uint32_t	g_gen;		  /* (g) increment when re-ordering */
266 };
267 
268 static struct sx		lf_owner_graph_lock;
269 static struct owner_graph	lf_owner_graph;
270 
271 /*
272  * Initialise various structures and locks.
273  */
274 static void
275 lf_init(void *dummy)
276 {
277 	int i;
278 
279 	sx_init(&lf_lock_states_lock, "lock states lock");
280 	LIST_INIT(&lf_lock_states);
281 
282 	sx_init(&lf_lock_owners_lock, "lock owners lock");
283 	for (i = 0; i < LOCK_OWNER_HASH_SIZE; i++)
284 		LIST_INIT(&lf_lock_owners[i]);
285 
286 	sx_init(&lf_owner_graph_lock, "owner graph lock");
287 	graph_init(&lf_owner_graph);
288 }
289 SYSINIT(lf_init, SI_SUB_LOCK, SI_ORDER_FIRST, lf_init, NULL);
290 
291 /*
292  * Generate a hash value for a lock owner.
293  */
294 static int
295 lf_hash_owner(caddr_t id, struct flock *fl, int flags)
296 {
297 	uint32_t h;
298 
299 	if (flags & F_REMOTE) {
300 		h = HASHSTEP(0, fl->l_pid);
301 		h = HASHSTEP(h, fl->l_sysid);
302 	} else if (flags & F_FLOCK) {
303 		h = ((uintptr_t) id) >> 7;
304 	} else {
305 		struct proc *p = (struct proc *) id;
306 		h = HASHSTEP(0, p->p_pid);
307 		h = HASHSTEP(h, 0);
308 	}
309 
310 	return (h % LOCK_OWNER_HASH_SIZE);
311 }
312 
313 /*
314  * Return true if a lock owner matches the details passed to
315  * lf_advlock.
316  */
317 static int
318 lf_owner_matches(struct lock_owner *lo, caddr_t id, struct flock *fl,
319     int flags)
320 {
321 	if (flags & F_REMOTE) {
322 		return lo->lo_pid == fl->l_pid
323 			&& lo->lo_sysid == fl->l_sysid;
324 	} else {
325 		return lo->lo_id == id;
326 	}
327 }
328 
329 static struct lockf_entry *
330 lf_alloc_lock(struct lock_owner *lo)
331 {
332 	struct lockf_entry *lf;
333 
334 	lf = malloc(sizeof(struct lockf_entry), M_LOCKF, M_WAITOK|M_ZERO);
335 
336 #ifdef LOCKF_DEBUG
337 	if (lockf_debug & 4)
338 		printf("Allocated lock %p\n", lf);
339 #endif
340 	if (lo) {
341 		sx_xlock(&lf_lock_owners_lock);
342 		lo->lo_refs++;
343 		sx_xunlock(&lf_lock_owners_lock);
344 		lf->lf_owner = lo;
345 	}
346 
347 	return (lf);
348 }
349 
350 static void
351 lf_free_lock(struct lockf_entry *lock)
352 {
353 	/*
354 	 * Adjust the lock_owner reference count and
355 	 * reclaim the entry if this is the last lock
356 	 * for that owner.
357 	 */
358 	struct lock_owner *lo = lock->lf_owner;
359 	if (lo) {
360 		KASSERT(LIST_EMPTY(&lock->lf_outedges),
361 		    ("freeing lock with dependancies"));
362 		KASSERT(LIST_EMPTY(&lock->lf_inedges),
363 		    ("freeing lock with dependants"));
364 		sx_xlock(&lf_lock_owners_lock);
365 		KASSERT(lo->lo_refs > 0, ("lock owner refcount"));
366 		lo->lo_refs--;
367 		if (lo->lo_refs == 0) {
368 #ifdef LOCKF_DEBUG
369 			if (lockf_debug & 1)
370 				printf("lf_free_lock: freeing lock owner %p\n",
371 				    lo);
372 #endif
373 			if (lo->lo_vertex) {
374 				sx_xlock(&lf_owner_graph_lock);
375 				graph_free_vertex(&lf_owner_graph,
376 				    lo->lo_vertex);
377 				sx_xunlock(&lf_owner_graph_lock);
378 			}
379 			LIST_REMOVE(lo, lo_link);
380 			free(lo, M_LOCKF);
381 #ifdef LOCKF_DEBUG
382 			if (lockf_debug & 4)
383 				printf("Freed lock owner %p\n", lo);
384 #endif
385 		}
386 		sx_unlock(&lf_lock_owners_lock);
387 	}
388 	if ((lock->lf_flags & F_REMOTE) && lock->lf_vnode) {
389 		vrele(lock->lf_vnode);
390 		lock->lf_vnode = NULL;
391 	}
392 #ifdef LOCKF_DEBUG
393 	if (lockf_debug & 4)
394 		printf("Freed lock %p\n", lock);
395 #endif
396 	free(lock, M_LOCKF);
397 }
398 
399 /*
400  * Advisory record locking support
401  */
402 int
403 lf_advlockasync(struct vop_advlockasync_args *ap, struct lockf **statep,
404     u_quad_t size)
405 {
406 	struct lockf *state, *freestate = NULL;
407 	struct flock *fl = ap->a_fl;
408 	struct lockf_entry *lock;
409 	struct vnode *vp = ap->a_vp;
410 	caddr_t id = ap->a_id;
411 	int flags = ap->a_flags;
412 	int hash;
413 	struct lock_owner *lo;
414 	off_t start, end, oadd;
415 	int error;
416 
417 	/*
418 	 * Handle the F_UNLKSYS case first - no need to mess about
419 	 * creating a lock owner for this one.
420 	 */
421 	if (ap->a_op == F_UNLCKSYS) {
422 		lf_clearremotesys(fl->l_sysid);
423 		return (0);
424 	}
425 
426 	/*
427 	 * Convert the flock structure into a start and end.
428 	 */
429 	switch (fl->l_whence) {
430 
431 	case SEEK_SET:
432 	case SEEK_CUR:
433 		/*
434 		 * Caller is responsible for adding any necessary offset
435 		 * when SEEK_CUR is used.
436 		 */
437 		start = fl->l_start;
438 		break;
439 
440 	case SEEK_END:
441 		if (size > OFF_MAX ||
442 		    (fl->l_start > 0 && size > OFF_MAX - fl->l_start))
443 			return (EOVERFLOW);
444 		start = size + fl->l_start;
445 		break;
446 
447 	default:
448 		return (EINVAL);
449 	}
450 	if (start < 0)
451 		return (EINVAL);
452 	if (fl->l_len < 0) {
453 		if (start == 0)
454 			return (EINVAL);
455 		end = start - 1;
456 		start += fl->l_len;
457 		if (start < 0)
458 			return (EINVAL);
459 	} else if (fl->l_len == 0) {
460 		end = OFF_MAX;
461 	} else {
462 		oadd = fl->l_len - 1;
463 		if (oadd > OFF_MAX - start)
464 			return (EOVERFLOW);
465 		end = start + oadd;
466 	}
467 	/*
468 	 * Avoid the common case of unlocking when inode has no locks.
469 	 */
470 	if ((*statep) == NULL || LIST_EMPTY(&(*statep)->ls_active)) {
471 		if (ap->a_op != F_SETLK) {
472 			fl->l_type = F_UNLCK;
473 			return (0);
474 		}
475 	}
476 
477 	/*
478 	 * Map our arguments to an existing lock owner or create one
479 	 * if this is the first time we have seen this owner.
480 	 */
481 	hash = lf_hash_owner(id, fl, flags);
482 	sx_xlock(&lf_lock_owners_lock);
483 	LIST_FOREACH(lo, &lf_lock_owners[hash], lo_link)
484 		if (lf_owner_matches(lo, id, fl, flags))
485 			break;
486 	if (!lo) {
487 		/*
488 		 * We initialise the lock with a reference
489 		 * count which matches the new lockf_entry
490 		 * structure created below.
491 		 */
492 		lo = malloc(sizeof(struct lock_owner), M_LOCKF,
493 		    M_WAITOK|M_ZERO);
494 #ifdef LOCKF_DEBUG
495 		if (lockf_debug & 4)
496 			printf("Allocated lock owner %p\n", lo);
497 #endif
498 
499 		lo->lo_refs = 1;
500 		lo->lo_flags = flags;
501 		lo->lo_id = id;
502 		if (flags & F_REMOTE) {
503 			lo->lo_pid = fl->l_pid;
504 			lo->lo_sysid = fl->l_sysid;
505 		} else if (flags & F_FLOCK) {
506 			lo->lo_pid = -1;
507 			lo->lo_sysid = 0;
508 		} else {
509 			struct proc *p = (struct proc *) id;
510 			lo->lo_pid = p->p_pid;
511 			lo->lo_sysid = 0;
512 		}
513 		lo->lo_vertex = NULL;
514 
515 #ifdef LOCKF_DEBUG
516 		if (lockf_debug & 1) {
517 			printf("lf_advlockasync: new lock owner %p ", lo);
518 			lf_print_owner(lo);
519 			printf("\n");
520 		}
521 #endif
522 
523 		LIST_INSERT_HEAD(&lf_lock_owners[hash], lo, lo_link);
524 	} else {
525 		/*
526 		 * We have seen this lock owner before, increase its
527 		 * reference count to account for the new lockf_entry
528 		 * structure we create below.
529 		 */
530 		lo->lo_refs++;
531 	}
532 	sx_xunlock(&lf_lock_owners_lock);
533 
534 	/*
535 	 * Create the lockf structure. We initialise the lf_owner
536 	 * field here instead of in lf_alloc_lock() to avoid paying
537 	 * the lf_lock_owners_lock tax twice.
538 	 */
539 	lock = lf_alloc_lock(NULL);
540 	lock->lf_start = start;
541 	lock->lf_end = end;
542 	lock->lf_owner = lo;
543 	lock->lf_vnode = vp;
544 	if (flags & F_REMOTE) {
545 		/*
546 		 * For remote locks, the caller may release its ref to
547 		 * the vnode at any time - we have to ref it here to
548 		 * prevent it from being recycled unexpectedly.
549 		 */
550 		vref(vp);
551 	}
552 
553 	/*
554 	 * XXX The problem is that VTOI is ufs specific, so it will
555 	 * break LOCKF_DEBUG for all other FS's other than UFS because
556 	 * it casts the vnode->data ptr to struct inode *.
557 	 */
558 /*	lock->lf_inode = VTOI(ap->a_vp); */
559 	lock->lf_inode = (struct inode *)0;
560 	lock->lf_type = fl->l_type;
561 	LIST_INIT(&lock->lf_outedges);
562 	LIST_INIT(&lock->lf_inedges);
563 	lock->lf_async_task = ap->a_task;
564 	lock->lf_flags = ap->a_flags;
565 
566 	/*
567 	 * Do the requested operation. First find our state structure
568 	 * and create a new one if necessary - the caller's *statep
569 	 * variable and the state's ls_threads count is protected by
570 	 * the vnode interlock.
571 	 */
572 	VI_LOCK(vp);
573 
574 	/*
575 	 * Allocate a state structure if necessary.
576 	 */
577 	state = *statep;
578 	if (state == NULL) {
579 		struct lockf *ls;
580 
581 		VI_UNLOCK(vp);
582 
583 		ls = malloc(sizeof(struct lockf), M_LOCKF, M_WAITOK|M_ZERO);
584 		sx_init(&ls->ls_lock, "ls_lock");
585 		LIST_INIT(&ls->ls_active);
586 		LIST_INIT(&ls->ls_pending);
587 
588 		sx_xlock(&lf_lock_states_lock);
589 		LIST_INSERT_HEAD(&lf_lock_states, ls, ls_link);
590 		sx_xunlock(&lf_lock_states_lock);
591 
592 		/*
593 		 * Cope if we lost a race with some other thread while
594 		 * trying to allocate memory.
595 		 */
596 		VI_LOCK(vp);
597 		if ((*statep) == NULL) {
598 			(*statep) = ls;
599 		} else {
600 			sx_xlock(&lf_lock_states_lock);
601 			LIST_REMOVE(ls, ls_link);
602 			sx_xunlock(&lf_lock_states_lock);
603 			sx_destroy(&ls->ls_lock);
604 			free(ls, M_LOCKF);
605 		}
606 	}
607 	state = *statep;
608 	state->ls_threads++;
609 
610 	VI_UNLOCK(vp);
611 
612 	sx_xlock(&state->ls_lock);
613 	switch(ap->a_op) {
614 	case F_SETLK:
615 		error = lf_setlock(state, lock, vp, ap->a_cookiep);
616 		break;
617 
618 	case F_UNLCK:
619 		error = lf_clearlock(state, lock);
620 		lf_free_lock(lock);
621 		break;
622 
623 	case F_GETLK:
624 		error = lf_getlock(state, lock, fl);
625 		lf_free_lock(lock);
626 		break;
627 
628 	case F_CANCEL:
629 		if (ap->a_cookiep)
630 			error = lf_cancel(state, lock, *ap->a_cookiep);
631 		else
632 			error = EINVAL;
633 		lf_free_lock(lock);
634 		break;
635 
636 	default:
637 		lf_free_lock(lock);
638 		error = EINVAL;
639 		break;
640 	}
641 
642 #ifdef INVARIANTS
643 	/*
644 	 * Check for some can't happen stuff. In this case, the active
645 	 * lock list becoming disordered or containing mutually
646 	 * blocking locks. We also check the pending list for locks
647 	 * which should be active (i.e. have no out-going edges).
648 	 */
649 	LIST_FOREACH(lock, &state->ls_active, lf_link) {
650 		struct lockf_entry *lf;
651 		if (LIST_NEXT(lock, lf_link))
652 			KASSERT((lock->lf_start
653 				<= LIST_NEXT(lock, lf_link)->lf_start),
654 			    ("locks disordered"));
655 		LIST_FOREACH(lf, &state->ls_active, lf_link) {
656 			if (lock == lf)
657 				break;
658 			KASSERT(!lf_blocks(lock, lf),
659 			    ("two conflicting active locks"));
660 			if (lock->lf_owner == lf->lf_owner)
661 				KASSERT(!lf_overlaps(lock, lf),
662 				    ("two overlapping locks from same owner"));
663 		}
664 	}
665 	LIST_FOREACH(lock, &state->ls_pending, lf_link) {
666 		KASSERT(!LIST_EMPTY(&lock->lf_outedges),
667 		    ("pending lock which should be active"));
668 	}
669 #endif
670 	sx_xunlock(&state->ls_lock);
671 
672 	/*
673 	 * If we have removed the last active lock on the vnode and
674 	 * this is the last thread that was in-progress, we can free
675 	 * the state structure. We update the caller's pointer inside
676 	 * the vnode interlock but call free outside.
677 	 *
678 	 * XXX alternatively, keep the state structure around until
679 	 * the filesystem recycles - requires a callback from the
680 	 * filesystem.
681 	 */
682 	VI_LOCK(vp);
683 
684 	state->ls_threads--;
685 	if (LIST_EMPTY(&state->ls_active) && state->ls_threads == 0) {
686 		KASSERT(LIST_EMPTY(&state->ls_pending),
687 		    ("freeing state with pending locks"));
688 		freestate = state;
689 		*statep = NULL;
690 	}
691 
692 	VI_UNLOCK(vp);
693 
694 	if (freestate) {
695 		sx_xlock(&lf_lock_states_lock);
696 		LIST_REMOVE(freestate, ls_link);
697 		sx_xunlock(&lf_lock_states_lock);
698 		sx_destroy(&freestate->ls_lock);
699 		free(freestate, M_LOCKF);
700 	}
701 	return (error);
702 }
703 
704 int
705 lf_advlock(struct vop_advlock_args *ap, struct lockf **statep, u_quad_t size)
706 {
707 	struct vop_advlockasync_args a;
708 
709 	a.a_vp = ap->a_vp;
710 	a.a_id = ap->a_id;
711 	a.a_op = ap->a_op;
712 	a.a_fl = ap->a_fl;
713 	a.a_flags = ap->a_flags;
714 	a.a_task = NULL;
715 	a.a_cookiep = NULL;
716 
717 	return (lf_advlockasync(&a, statep, size));
718 }
719 
720 /*
721  * Return non-zero if locks 'x' and 'y' overlap.
722  */
723 static int
724 lf_overlaps(struct lockf_entry *x, struct lockf_entry *y)
725 {
726 
727 	return (x->lf_start <= y->lf_end && x->lf_end >= y->lf_start);
728 }
729 
730 /*
731  * Return non-zero if lock 'x' is blocked by lock 'y' (or vice versa).
732  */
733 static int
734 lf_blocks(struct lockf_entry *x, struct lockf_entry *y)
735 {
736 
737 	return x->lf_owner != y->lf_owner
738 		&& (x->lf_type == F_WRLCK || y->lf_type == F_WRLCK)
739 		&& lf_overlaps(x, y);
740 }
741 
742 /*
743  * Allocate a lock edge from the free list
744  */
745 static struct lockf_edge *
746 lf_alloc_edge(void)
747 {
748 
749 	return (malloc(sizeof(struct lockf_edge), M_LOCKF, M_WAITOK|M_ZERO));
750 }
751 
752 /*
753  * Free a lock edge.
754  */
755 static void
756 lf_free_edge(struct lockf_edge *e)
757 {
758 
759 	free(e, M_LOCKF);
760 }
761 
762 
763 /*
764  * Ensure that the lock's owner has a corresponding vertex in the
765  * owner graph.
766  */
767 static void
768 lf_alloc_vertex(struct lockf_entry *lock)
769 {
770 	struct owner_graph *g = &lf_owner_graph;
771 
772 	if (!lock->lf_owner->lo_vertex)
773 		lock->lf_owner->lo_vertex =
774 			graph_alloc_vertex(g, lock->lf_owner);
775 }
776 
777 /*
778  * Attempt to record an edge from lock x to lock y. Return EDEADLK if
779  * the new edge would cause a cycle in the owner graph.
780  */
781 static int
782 lf_add_edge(struct lockf_entry *x, struct lockf_entry *y)
783 {
784 	struct owner_graph *g = &lf_owner_graph;
785 	struct lockf_edge *e;
786 	int error;
787 
788 #ifdef INVARIANTS
789 	LIST_FOREACH(e, &x->lf_outedges, le_outlink)
790 		KASSERT(e->le_to != y, ("adding lock edge twice"));
791 #endif
792 
793 	/*
794 	 * Make sure the two owners have entries in the owner graph.
795 	 */
796 	lf_alloc_vertex(x);
797 	lf_alloc_vertex(y);
798 
799 	error = graph_add_edge(g, x->lf_owner->lo_vertex,
800 	    y->lf_owner->lo_vertex);
801 	if (error)
802 		return (error);
803 
804 	e = lf_alloc_edge();
805 	LIST_INSERT_HEAD(&x->lf_outedges, e, le_outlink);
806 	LIST_INSERT_HEAD(&y->lf_inedges, e, le_inlink);
807 	e->le_from = x;
808 	e->le_to = y;
809 
810 	return (0);
811 }
812 
813 /*
814  * Remove an edge from the lock graph.
815  */
816 static void
817 lf_remove_edge(struct lockf_edge *e)
818 {
819 	struct owner_graph *g = &lf_owner_graph;
820 	struct lockf_entry *x = e->le_from;
821 	struct lockf_entry *y = e->le_to;
822 
823 	graph_remove_edge(g, x->lf_owner->lo_vertex, y->lf_owner->lo_vertex);
824 	LIST_REMOVE(e, le_outlink);
825 	LIST_REMOVE(e, le_inlink);
826 	e->le_from = NULL;
827 	e->le_to = NULL;
828 	lf_free_edge(e);
829 }
830 
831 /*
832  * Remove all out-going edges from lock x.
833  */
834 static void
835 lf_remove_outgoing(struct lockf_entry *x)
836 {
837 	struct lockf_edge *e;
838 
839 	while ((e = LIST_FIRST(&x->lf_outedges)) != NULL) {
840 		lf_remove_edge(e);
841 	}
842 }
843 
844 /*
845  * Remove all in-coming edges from lock x.
846  */
847 static void
848 lf_remove_incoming(struct lockf_entry *x)
849 {
850 	struct lockf_edge *e;
851 
852 	while ((e = LIST_FIRST(&x->lf_inedges)) != NULL) {
853 		lf_remove_edge(e);
854 	}
855 }
856 
857 /*
858  * Walk the list of locks for the file and create an out-going edge
859  * from lock to each blocking lock.
860  */
861 static int
862 lf_add_outgoing(struct lockf *state, struct lockf_entry *lock)
863 {
864 	struct lockf_entry *overlap;
865 	int error;
866 
867 	LIST_FOREACH(overlap, &state->ls_active, lf_link) {
868 		/*
869 		 * We may assume that the active list is sorted by
870 		 * lf_start.
871 		 */
872 		if (overlap->lf_start > lock->lf_end)
873 			break;
874 		if (!lf_blocks(lock, overlap))
875 			continue;
876 
877 		/*
878 		 * We've found a blocking lock. Add the corresponding
879 		 * edge to the graphs and see if it would cause a
880 		 * deadlock.
881 		 */
882 		error = lf_add_edge(lock, overlap);
883 
884 		/*
885 		 * The only error that lf_add_edge returns is EDEADLK.
886 		 * Remove any edges we added and return the error.
887 		 */
888 		if (error) {
889 			lf_remove_outgoing(lock);
890 			return (error);
891 		}
892 	}
893 
894 	/*
895 	 * We also need to add edges to sleeping locks that block
896 	 * us. This ensures that lf_wakeup_lock cannot grant two
897 	 * mutually blocking locks simultaneously and also enforces a
898 	 * 'first come, first served' fairness model. Note that this
899 	 * only happens if we are blocked by at least one active lock
900 	 * due to the call to lf_getblock in lf_setlock below.
901 	 */
902 	LIST_FOREACH(overlap, &state->ls_pending, lf_link) {
903 		if (!lf_blocks(lock, overlap))
904 			continue;
905 		/*
906 		 * We've found a blocking lock. Add the corresponding
907 		 * edge to the graphs and see if it would cause a
908 		 * deadlock.
909 		 */
910 		error = lf_add_edge(lock, overlap);
911 
912 		/*
913 		 * The only error that lf_add_edge returns is EDEADLK.
914 		 * Remove any edges we added and return the error.
915 		 */
916 		if (error) {
917 			lf_remove_outgoing(lock);
918 			return (error);
919 		}
920 	}
921 
922 	return (0);
923 }
924 
925 /*
926  * Walk the list of pending locks for the file and create an in-coming
927  * edge from lock to each blocking lock.
928  */
929 static int
930 lf_add_incoming(struct lockf *state, struct lockf_entry *lock)
931 {
932 	struct lockf_entry *overlap;
933 	int error;
934 
935 	LIST_FOREACH(overlap, &state->ls_pending, lf_link) {
936 		if (!lf_blocks(lock, overlap))
937 			continue;
938 
939 		/*
940 		 * We've found a blocking lock. Add the corresponding
941 		 * edge to the graphs and see if it would cause a
942 		 * deadlock.
943 		 */
944 		error = lf_add_edge(overlap, lock);
945 
946 		/*
947 		 * The only error that lf_add_edge returns is EDEADLK.
948 		 * Remove any edges we added and return the error.
949 		 */
950 		if (error) {
951 			lf_remove_incoming(lock);
952 			return (error);
953 		}
954 	}
955 	return (0);
956 }
957 
958 /*
959  * Insert lock into the active list, keeping list entries ordered by
960  * increasing values of lf_start.
961  */
962 static void
963 lf_insert_lock(struct lockf *state, struct lockf_entry *lock)
964 {
965 	struct lockf_entry *lf, *lfprev;
966 
967 	if (LIST_EMPTY(&state->ls_active)) {
968 		LIST_INSERT_HEAD(&state->ls_active, lock, lf_link);
969 		return;
970 	}
971 
972 	lfprev = NULL;
973 	LIST_FOREACH(lf, &state->ls_active, lf_link) {
974 		if (lf->lf_start > lock->lf_start) {
975 			LIST_INSERT_BEFORE(lf, lock, lf_link);
976 			return;
977 		}
978 		lfprev = lf;
979 	}
980 	LIST_INSERT_AFTER(lfprev, lock, lf_link);
981 }
982 
983 /*
984  * Wake up a sleeping lock and remove it from the pending list now
985  * that all its dependancies have been resolved. The caller should
986  * arrange for the lock to be added to the active list, adjusting any
987  * existing locks for the same owner as needed.
988  */
989 static void
990 lf_wakeup_lock(struct lockf *state, struct lockf_entry *wakelock)
991 {
992 
993 	/*
994 	 * Remove from ls_pending list and wake up the caller
995 	 * or start the async notification, as appropriate.
996 	 */
997 	LIST_REMOVE(wakelock, lf_link);
998 #ifdef LOCKF_DEBUG
999 	if (lockf_debug & 1)
1000 		lf_print("lf_wakeup_lock: awakening", wakelock);
1001 #endif /* LOCKF_DEBUG */
1002 	if (wakelock->lf_async_task) {
1003 		taskqueue_enqueue(taskqueue_thread, wakelock->lf_async_task);
1004 	} else {
1005 		wakeup(wakelock);
1006 	}
1007 }
1008 
1009 /*
1010  * Re-check all dependant locks and remove edges to locks that we no
1011  * longer block. If 'all' is non-zero, the lock has been removed and
1012  * we must remove all the dependancies, otherwise it has simply been
1013  * reduced but remains active. Any pending locks which have been been
1014  * unblocked are added to 'granted'
1015  */
1016 static void
1017 lf_update_dependancies(struct lockf *state, struct lockf_entry *lock, int all,
1018 	struct lockf_entry_list *granted)
1019 {
1020 	struct lockf_edge *e, *ne;
1021 	struct lockf_entry *deplock;
1022 
1023 	LIST_FOREACH_SAFE(e, &lock->lf_inedges, le_inlink, ne) {
1024 		deplock = e->le_from;
1025 		if (all || !lf_blocks(lock, deplock)) {
1026 			sx_xlock(&lf_owner_graph_lock);
1027 			lf_remove_edge(e);
1028 			sx_xunlock(&lf_owner_graph_lock);
1029 			if (LIST_EMPTY(&deplock->lf_outedges)) {
1030 				lf_wakeup_lock(state, deplock);
1031 				LIST_INSERT_HEAD(granted, deplock, lf_link);
1032 			}
1033 		}
1034 	}
1035 }
1036 
1037 /*
1038  * Set the start of an existing active lock, updating dependancies and
1039  * adding any newly woken locks to 'granted'.
1040  */
1041 static void
1042 lf_set_start(struct lockf *state, struct lockf_entry *lock, off_t new_start,
1043 	struct lockf_entry_list *granted)
1044 {
1045 
1046 	KASSERT(new_start >= lock->lf_start, ("can't increase lock"));
1047 	lock->lf_start = new_start;
1048 	LIST_REMOVE(lock, lf_link);
1049 	lf_insert_lock(state, lock);
1050 	lf_update_dependancies(state, lock, FALSE, granted);
1051 }
1052 
1053 /*
1054  * Set the end of an existing active lock, updating dependancies and
1055  * adding any newly woken locks to 'granted'.
1056  */
1057 static void
1058 lf_set_end(struct lockf *state, struct lockf_entry *lock, off_t new_end,
1059 	struct lockf_entry_list *granted)
1060 {
1061 
1062 	KASSERT(new_end <= lock->lf_end, ("can't increase lock"));
1063 	lock->lf_end = new_end;
1064 	lf_update_dependancies(state, lock, FALSE, granted);
1065 }
1066 
1067 /*
1068  * Add a lock to the active list, updating or removing any current
1069  * locks owned by the same owner and processing any pending locks that
1070  * become unblocked as a result. This code is also used for unlock
1071  * since the logic for updating existing locks is identical.
1072  *
1073  * As a result of processing the new lock, we may unblock existing
1074  * pending locks as a result of downgrading/unlocking. We simply
1075  * activate the newly granted locks by looping.
1076  *
1077  * Since the new lock already has its dependancies set up, we always
1078  * add it to the list (unless its an unlock request). This may
1079  * fragment the lock list in some pathological cases but its probably
1080  * not a real problem.
1081  */
1082 static void
1083 lf_activate_lock(struct lockf *state, struct lockf_entry *lock)
1084 {
1085 	struct lockf_entry *overlap, *lf;
1086 	struct lockf_entry_list granted;
1087 	int ovcase;
1088 
1089 	LIST_INIT(&granted);
1090 	LIST_INSERT_HEAD(&granted, lock, lf_link);
1091 
1092 	while (!LIST_EMPTY(&granted)) {
1093 		lock = LIST_FIRST(&granted);
1094 		LIST_REMOVE(lock, lf_link);
1095 
1096 		/*
1097 		 * Skip over locks owned by other processes.  Handle
1098 		 * any locks that overlap and are owned by ourselves.
1099 		 */
1100 		overlap = LIST_FIRST(&state->ls_active);
1101 		for (;;) {
1102 			ovcase = lf_findoverlap(&overlap, lock, SELF);
1103 
1104 #ifdef LOCKF_DEBUG
1105 			if (ovcase && (lockf_debug & 2)) {
1106 				printf("lf_setlock: overlap %d", ovcase);
1107 				lf_print("", overlap);
1108 			}
1109 #endif
1110 			/*
1111 			 * Six cases:
1112 			 *	0) no overlap
1113 			 *	1) overlap == lock
1114 			 *	2) overlap contains lock
1115 			 *	3) lock contains overlap
1116 			 *	4) overlap starts before lock
1117 			 *	5) overlap ends after lock
1118 			 */
1119 			switch (ovcase) {
1120 			case 0: /* no overlap */
1121 				break;
1122 
1123 			case 1: /* overlap == lock */
1124 				/*
1125 				 * We have already setup the
1126 				 * dependants for the new lock, taking
1127 				 * into account a possible downgrade
1128 				 * or unlock. Remove the old lock.
1129 				 */
1130 				LIST_REMOVE(overlap, lf_link);
1131 				lf_update_dependancies(state, overlap, TRUE,
1132 					&granted);
1133 				lf_free_lock(overlap);
1134 				break;
1135 
1136 			case 2: /* overlap contains lock */
1137 				/*
1138 				 * Just split the existing lock.
1139 				 */
1140 				lf_split(state, overlap, lock, &granted);
1141 				break;
1142 
1143 			case 3: /* lock contains overlap */
1144 				/*
1145 				 * Delete the overlap and advance to
1146 				 * the next entry in the list.
1147 				 */
1148 				lf = LIST_NEXT(overlap, lf_link);
1149 				LIST_REMOVE(overlap, lf_link);
1150 				lf_update_dependancies(state, overlap, TRUE,
1151 					&granted);
1152 				lf_free_lock(overlap);
1153 				overlap = lf;
1154 				continue;
1155 
1156 			case 4: /* overlap starts before lock */
1157 				/*
1158 				 * Just update the overlap end and
1159 				 * move on.
1160 				 */
1161 				lf_set_end(state, overlap, lock->lf_start - 1,
1162 				    &granted);
1163 				overlap = LIST_NEXT(overlap, lf_link);
1164 				continue;
1165 
1166 			case 5: /* overlap ends after lock */
1167 				/*
1168 				 * Change the start of overlap and
1169 				 * re-insert.
1170 				 */
1171 				lf_set_start(state, overlap, lock->lf_end + 1,
1172 				    &granted);
1173 				break;
1174 			}
1175 			break;
1176 		}
1177 #ifdef LOCKF_DEBUG
1178 		if (lockf_debug & 1) {
1179 			if (lock->lf_type != F_UNLCK)
1180 				lf_print("lf_activate_lock: activated", lock);
1181 			else
1182 				lf_print("lf_activate_lock: unlocked", lock);
1183 			lf_printlist("lf_activate_lock", lock);
1184 		}
1185 #endif /* LOCKF_DEBUG */
1186 		if (lock->lf_type != F_UNLCK)
1187 			lf_insert_lock(state, lock);
1188 	}
1189 }
1190 
1191 /*
1192  * Cancel a pending lock request, either as a result of a signal or a
1193  * cancel request for an async lock.
1194  */
1195 static void
1196 lf_cancel_lock(struct lockf *state, struct lockf_entry *lock)
1197 {
1198 	struct lockf_entry_list granted;
1199 
1200 	/*
1201 	 * Note it is theoretically possible that cancelling this lock
1202 	 * may allow some other pending lock to become
1203 	 * active. Consider this case:
1204 	 *
1205 	 * Owner	Action		Result		Dependancies
1206 	 *
1207 	 * A:		lock [0..0]	succeeds
1208 	 * B:		lock [2..2]	succeeds
1209 	 * C:		lock [1..2]	blocked		C->B
1210 	 * D:		lock [0..1]	blocked		C->B,D->A,D->C
1211 	 * A:		unlock [0..0]			C->B,D->C
1212 	 * C:		cancel [1..2]
1213 	 */
1214 
1215 	LIST_REMOVE(lock, lf_link);
1216 
1217 	/*
1218 	 * Removing out-going edges is simple.
1219 	 */
1220 	sx_xlock(&lf_owner_graph_lock);
1221 	lf_remove_outgoing(lock);
1222 	sx_xunlock(&lf_owner_graph_lock);
1223 
1224 	/*
1225 	 * Removing in-coming edges may allow some other lock to
1226 	 * become active - we use lf_update_dependancies to figure
1227 	 * this out.
1228 	 */
1229 	LIST_INIT(&granted);
1230 	lf_update_dependancies(state, lock, TRUE, &granted);
1231 	lf_free_lock(lock);
1232 
1233 	/*
1234 	 * Feed any newly active locks to lf_activate_lock.
1235 	 */
1236 	while (!LIST_EMPTY(&granted)) {
1237 		lock = LIST_FIRST(&granted);
1238 		LIST_REMOVE(lock, lf_link);
1239 		lf_activate_lock(state, lock);
1240 	}
1241 }
1242 
1243 /*
1244  * Set a byte-range lock.
1245  */
1246 static int
1247 lf_setlock(struct lockf *state, struct lockf_entry *lock, struct vnode *vp,
1248     void **cookiep)
1249 {
1250 	struct lockf_entry *block;
1251 	static char lockstr[] = "lockf";
1252 	int priority, error;
1253 
1254 #ifdef LOCKF_DEBUG
1255 	if (lockf_debug & 1)
1256 		lf_print("lf_setlock", lock);
1257 #endif /* LOCKF_DEBUG */
1258 
1259 	/*
1260 	 * Set the priority
1261 	 */
1262 	priority = PLOCK;
1263 	if (lock->lf_type == F_WRLCK)
1264 		priority += 4;
1265 	priority |= PCATCH;
1266 	/*
1267 	 * Scan lock list for this file looking for locks that would block us.
1268 	 */
1269 	while ((block = lf_getblock(state, lock))) {
1270 		/*
1271 		 * Free the structure and return if nonblocking.
1272 		 */
1273 		if ((lock->lf_flags & F_WAIT) == 0
1274 		    && lock->lf_async_task == NULL) {
1275 			lf_free_lock(lock);
1276 			error = EAGAIN;
1277 			goto out;
1278 		}
1279 
1280 		/*
1281 		 * We are blocked. Create edges to each blocking lock,
1282 		 * checking for deadlock using the owner graph. For
1283 		 * simplicity, we run deadlock detection for all
1284 		 * locks, posix and otherwise.
1285 		 */
1286 		sx_xlock(&lf_owner_graph_lock);
1287 		error = lf_add_outgoing(state, lock);
1288 		sx_xunlock(&lf_owner_graph_lock);
1289 
1290 		if (error) {
1291 #ifdef LOCKF_DEBUG
1292 			if (lockf_debug & 1)
1293 				lf_print("lf_setlock: deadlock", lock);
1294 #endif
1295 			lf_free_lock(lock);
1296 			goto out;
1297 		}
1298 
1299 		/*
1300 		 * For flock type locks, we must first remove
1301 		 * any shared locks that we hold before we sleep
1302 		 * waiting for an exclusive lock.
1303 		 */
1304 		if ((lock->lf_flags & F_FLOCK) &&
1305 		    lock->lf_type == F_WRLCK) {
1306 			lock->lf_type = F_UNLCK;
1307 			lf_activate_lock(state, lock);
1308 			lock->lf_type = F_WRLCK;
1309 		}
1310 		/*
1311 		 * We have added edges to everything that blocks
1312 		 * us. Sleep until they all go away.
1313 		 */
1314 		LIST_INSERT_HEAD(&state->ls_pending, lock, lf_link);
1315 #ifdef LOCKF_DEBUG
1316 		if (lockf_debug & 1) {
1317 			struct lockf_edge *e;
1318 			LIST_FOREACH(e, &lock->lf_outedges, le_outlink) {
1319 				lf_print("lf_setlock: blocking on", e->le_to);
1320 				lf_printlist("lf_setlock", e->le_to);
1321 			}
1322 		}
1323 #endif /* LOCKF_DEBUG */
1324 
1325 		if ((lock->lf_flags & F_WAIT) == 0) {
1326 			/*
1327 			 * The caller requested async notification -
1328 			 * this callback happens when the blocking
1329 			 * lock is released, allowing the caller to
1330 			 * make another attempt to take the lock.
1331 			 */
1332 			*cookiep = (void *) lock;
1333 			error = EINPROGRESS;
1334 			goto out;
1335 		}
1336 
1337 		error = sx_sleep(lock, &state->ls_lock, priority, lockstr, 0);
1338 		/*
1339 		 * We may have been awakened by a signal and/or by a
1340 		 * debugger continuing us (in which cases we must
1341 		 * remove our lock graph edges) and/or by another
1342 		 * process releasing a lock (in which case our edges
1343 		 * have already been removed and we have been moved to
1344 		 * the active list).
1345 		 *
1346 		 * Note that it is possible to receive a signal after
1347 		 * we were successfully woken (and moved to the active
1348 		 * list) but before we resumed execution. In this
1349 		 * case, our lf_outedges list will be clear. We
1350 		 * pretend there was no error.
1351 		 *
1352 		 * Note also, if we have been sleeping long enough, we
1353 		 * may now have incoming edges from some newer lock
1354 		 * which is waiting behind us in the queue.
1355 		 */
1356 		if (LIST_EMPTY(&lock->lf_outedges)) {
1357 			error = 0;
1358 		} else {
1359 			lf_cancel_lock(state, lock);
1360 			goto out;
1361 		}
1362 #ifdef LOCKF_DEBUG
1363 		if (lockf_debug & 1) {
1364 			lf_print("lf_setlock: granted", lock);
1365 		}
1366 #endif
1367 		goto out;
1368 	}
1369 	/*
1370 	 * It looks like we are going to grant the lock. First add
1371 	 * edges from any currently pending lock that the new lock
1372 	 * would block.
1373 	 */
1374 	sx_xlock(&lf_owner_graph_lock);
1375 	error = lf_add_incoming(state, lock);
1376 	sx_xunlock(&lf_owner_graph_lock);
1377 	if (error) {
1378 #ifdef LOCKF_DEBUG
1379 		if (lockf_debug & 1)
1380 			lf_print("lf_setlock: deadlock", lock);
1381 #endif
1382 		lf_free_lock(lock);
1383 		goto out;
1384 	}
1385 
1386 	/*
1387 	 * No blocks!!  Add the lock.  Note that we will
1388 	 * downgrade or upgrade any overlapping locks this
1389 	 * process already owns.
1390 	 */
1391 	lf_activate_lock(state, lock);
1392 	error = 0;
1393 out:
1394 	return (error);
1395 }
1396 
1397 /*
1398  * Remove a byte-range lock on an inode.
1399  *
1400  * Generally, find the lock (or an overlap to that lock)
1401  * and remove it (or shrink it), then wakeup anyone we can.
1402  */
1403 static int
1404 lf_clearlock(struct lockf *state, struct lockf_entry *unlock)
1405 {
1406 	struct lockf_entry *overlap;
1407 
1408 	overlap = LIST_FIRST(&state->ls_active);
1409 
1410 	if (overlap == NOLOCKF)
1411 		return (0);
1412 #ifdef LOCKF_DEBUG
1413 	if (unlock->lf_type != F_UNLCK)
1414 		panic("lf_clearlock: bad type");
1415 	if (lockf_debug & 1)
1416 		lf_print("lf_clearlock", unlock);
1417 #endif /* LOCKF_DEBUG */
1418 
1419 	lf_activate_lock(state, unlock);
1420 
1421 	return (0);
1422 }
1423 
1424 /*
1425  * Check whether there is a blocking lock, and if so return its
1426  * details in '*fl'.
1427  */
1428 static int
1429 lf_getlock(struct lockf *state, struct lockf_entry *lock, struct flock *fl)
1430 {
1431 	struct lockf_entry *block;
1432 
1433 #ifdef LOCKF_DEBUG
1434 	if (lockf_debug & 1)
1435 		lf_print("lf_getlock", lock);
1436 #endif /* LOCKF_DEBUG */
1437 
1438 	if ((block = lf_getblock(state, lock))) {
1439 		fl->l_type = block->lf_type;
1440 		fl->l_whence = SEEK_SET;
1441 		fl->l_start = block->lf_start;
1442 		if (block->lf_end == OFF_MAX)
1443 			fl->l_len = 0;
1444 		else
1445 			fl->l_len = block->lf_end - block->lf_start + 1;
1446 		fl->l_pid = block->lf_owner->lo_pid;
1447 		fl->l_sysid = block->lf_owner->lo_sysid;
1448 	} else {
1449 		fl->l_type = F_UNLCK;
1450 	}
1451 	return (0);
1452 }
1453 
1454 /*
1455  * Cancel an async lock request.
1456  */
1457 static int
1458 lf_cancel(struct lockf *state, struct lockf_entry *lock, void *cookie)
1459 {
1460 	struct lockf_entry *reallock;
1461 
1462 	/*
1463 	 * We need to match this request with an existing lock
1464 	 * request.
1465 	 */
1466 	LIST_FOREACH(reallock, &state->ls_pending, lf_link) {
1467 		if ((void *) reallock == cookie) {
1468 			/*
1469 			 * Double-check that this lock looks right
1470 			 * (maybe use a rolling ID for the cancel
1471 			 * cookie instead?)
1472 			 */
1473 			if (!(reallock->lf_vnode == lock->lf_vnode
1474 				&& reallock->lf_start == lock->lf_start
1475 				&& reallock->lf_end == lock->lf_end)) {
1476 				return (ENOENT);
1477 			}
1478 
1479 			/*
1480 			 * Make sure this lock was async and then just
1481 			 * remove it from its wait lists.
1482 			 */
1483 			if (!reallock->lf_async_task) {
1484 				return (ENOENT);
1485 			}
1486 
1487 			/*
1488 			 * Note that since any other thread must take
1489 			 * state->ls_lock before it can possibly
1490 			 * trigger the async callback, we are safe
1491 			 * from a race with lf_wakeup_lock, i.e. we
1492 			 * can free the lock (actually our caller does
1493 			 * this).
1494 			 */
1495 			lf_cancel_lock(state, reallock);
1496 			return (0);
1497 		}
1498 	}
1499 
1500 	/*
1501 	 * We didn't find a matching lock - not much we can do here.
1502 	 */
1503 	return (ENOENT);
1504 }
1505 
1506 /*
1507  * Walk the list of locks for an inode and
1508  * return the first blocking lock.
1509  */
1510 static struct lockf_entry *
1511 lf_getblock(struct lockf *state, struct lockf_entry *lock)
1512 {
1513 	struct lockf_entry *overlap;
1514 
1515 	LIST_FOREACH(overlap, &state->ls_active, lf_link) {
1516 		/*
1517 		 * We may assume that the active list is sorted by
1518 		 * lf_start.
1519 		 */
1520 		if (overlap->lf_start > lock->lf_end)
1521 			break;
1522 		if (!lf_blocks(lock, overlap))
1523 			continue;
1524 		return (overlap);
1525 	}
1526 	return (NOLOCKF);
1527 }
1528 
1529 /*
1530  * Walk the list of locks for an inode to find an overlapping lock (if
1531  * any) and return a classification of that overlap.
1532  *
1533  * Arguments:
1534  *	*overlap	The place in the lock list to start looking
1535  *	lock		The lock which is being tested
1536  *	type		Pass 'SELF' to test only locks with the same
1537  *			owner as lock, or 'OTHER' to test only locks
1538  *			with a different owner
1539  *
1540  * Returns one of six values:
1541  *	0) no overlap
1542  *	1) overlap == lock
1543  *	2) overlap contains lock
1544  *	3) lock contains overlap
1545  *	4) overlap starts before lock
1546  *	5) overlap ends after lock
1547  *
1548  * If there is an overlapping lock, '*overlap' is set to point at the
1549  * overlapping lock.
1550  *
1551  * NOTE: this returns only the FIRST overlapping lock.  There
1552  *	 may be more than one.
1553  */
1554 static int
1555 lf_findoverlap(struct lockf_entry **overlap, struct lockf_entry *lock, int type)
1556 {
1557 	struct lockf_entry *lf;
1558 	off_t start, end;
1559 	int res;
1560 
1561 	if ((*overlap) == NOLOCKF) {
1562 		return (0);
1563 	}
1564 #ifdef LOCKF_DEBUG
1565 	if (lockf_debug & 2)
1566 		lf_print("lf_findoverlap: looking for overlap in", lock);
1567 #endif /* LOCKF_DEBUG */
1568 	start = lock->lf_start;
1569 	end = lock->lf_end;
1570 	res = 0;
1571 	while (*overlap) {
1572 		lf = *overlap;
1573 		if (lf->lf_start > end)
1574 			break;
1575 		if (((type & SELF) && lf->lf_owner != lock->lf_owner) ||
1576 		    ((type & OTHERS) && lf->lf_owner == lock->lf_owner)) {
1577 			*overlap = LIST_NEXT(lf, lf_link);
1578 			continue;
1579 		}
1580 #ifdef LOCKF_DEBUG
1581 		if (lockf_debug & 2)
1582 			lf_print("\tchecking", lf);
1583 #endif /* LOCKF_DEBUG */
1584 		/*
1585 		 * OK, check for overlap
1586 		 *
1587 		 * Six cases:
1588 		 *	0) no overlap
1589 		 *	1) overlap == lock
1590 		 *	2) overlap contains lock
1591 		 *	3) lock contains overlap
1592 		 *	4) overlap starts before lock
1593 		 *	5) overlap ends after lock
1594 		 */
1595 		if (start > lf->lf_end) {
1596 			/* Case 0 */
1597 #ifdef LOCKF_DEBUG
1598 			if (lockf_debug & 2)
1599 				printf("no overlap\n");
1600 #endif /* LOCKF_DEBUG */
1601 			*overlap = LIST_NEXT(lf, lf_link);
1602 			continue;
1603 		}
1604 		if (lf->lf_start == start && lf->lf_end == end) {
1605 			/* Case 1 */
1606 #ifdef LOCKF_DEBUG
1607 			if (lockf_debug & 2)
1608 				printf("overlap == lock\n");
1609 #endif /* LOCKF_DEBUG */
1610 			res = 1;
1611 			break;
1612 		}
1613 		if (lf->lf_start <= start && lf->lf_end >= end) {
1614 			/* Case 2 */
1615 #ifdef LOCKF_DEBUG
1616 			if (lockf_debug & 2)
1617 				printf("overlap contains lock\n");
1618 #endif /* LOCKF_DEBUG */
1619 			res = 2;
1620 			break;
1621 		}
1622 		if (start <= lf->lf_start && end >= lf->lf_end) {
1623 			/* Case 3 */
1624 #ifdef LOCKF_DEBUG
1625 			if (lockf_debug & 2)
1626 				printf("lock contains overlap\n");
1627 #endif /* LOCKF_DEBUG */
1628 			res = 3;
1629 			break;
1630 		}
1631 		if (lf->lf_start < start && lf->lf_end >= start) {
1632 			/* Case 4 */
1633 #ifdef LOCKF_DEBUG
1634 			if (lockf_debug & 2)
1635 				printf("overlap starts before lock\n");
1636 #endif /* LOCKF_DEBUG */
1637 			res = 4;
1638 			break;
1639 		}
1640 		if (lf->lf_start > start && lf->lf_end > end) {
1641 			/* Case 5 */
1642 #ifdef LOCKF_DEBUG
1643 			if (lockf_debug & 2)
1644 				printf("overlap ends after lock\n");
1645 #endif /* LOCKF_DEBUG */
1646 			res = 5;
1647 			break;
1648 		}
1649 		panic("lf_findoverlap: default");
1650 	}
1651 	return (res);
1652 }
1653 
1654 /*
1655  * Split an the existing 'lock1', based on the extent of the lock
1656  * described by 'lock2'. The existing lock should cover 'lock2'
1657  * entirely.
1658  *
1659  * Any pending locks which have been been unblocked are added to
1660  * 'granted'
1661  */
1662 static void
1663 lf_split(struct lockf *state, struct lockf_entry *lock1,
1664     struct lockf_entry *lock2, struct lockf_entry_list *granted)
1665 {
1666 	struct lockf_entry *splitlock;
1667 
1668 #ifdef LOCKF_DEBUG
1669 	if (lockf_debug & 2) {
1670 		lf_print("lf_split", lock1);
1671 		lf_print("splitting from", lock2);
1672 	}
1673 #endif /* LOCKF_DEBUG */
1674 	/*
1675 	 * Check to see if we don't need to split at all.
1676 	 */
1677 	if (lock1->lf_start == lock2->lf_start) {
1678 		lf_set_start(state, lock1, lock2->lf_end + 1, granted);
1679 		return;
1680 	}
1681 	if (lock1->lf_end == lock2->lf_end) {
1682 		lf_set_end(state, lock1, lock2->lf_start - 1, granted);
1683 		return;
1684 	}
1685 	/*
1686 	 * Make a new lock consisting of the last part of
1687 	 * the encompassing lock.
1688 	 */
1689 	splitlock = lf_alloc_lock(lock1->lf_owner);
1690 	memcpy(splitlock, lock1, sizeof *splitlock);
1691 	if (splitlock->lf_flags & F_REMOTE)
1692 		vref(splitlock->lf_vnode);
1693 
1694 	/*
1695 	 * This cannot cause a deadlock since any edges we would add
1696 	 * to splitlock already exist in lock1. We must be sure to add
1697 	 * necessary dependancies to splitlock before we reduce lock1
1698 	 * otherwise we may accidentally grant a pending lock that
1699 	 * was blocked by the tail end of lock1.
1700 	 */
1701 	splitlock->lf_start = lock2->lf_end + 1;
1702 	LIST_INIT(&splitlock->lf_outedges);
1703 	LIST_INIT(&splitlock->lf_inedges);
1704 	sx_xlock(&lf_owner_graph_lock);
1705 	lf_add_incoming(state, splitlock);
1706 	sx_xunlock(&lf_owner_graph_lock);
1707 
1708 	lf_set_end(state, lock1, lock2->lf_start - 1, granted);
1709 
1710 	/*
1711 	 * OK, now link it in
1712 	 */
1713 	lf_insert_lock(state, splitlock);
1714 }
1715 
1716 struct clearlock {
1717 	STAILQ_ENTRY(clearlock) link;
1718 	struct vnode *vp;
1719 	struct flock fl;
1720 };
1721 STAILQ_HEAD(clearlocklist, clearlock);
1722 
1723 void
1724 lf_clearremotesys(int sysid)
1725 {
1726 	struct lockf *ls;
1727 	struct lockf_entry *lf;
1728 	struct clearlock *cl;
1729 	struct clearlocklist locks;
1730 
1731 	KASSERT(sysid != 0, ("Can't clear local locks with F_UNLCKSYS"));
1732 
1733 	/*
1734 	 * In order to keep the locking simple, we iterate over the
1735 	 * active lock lists to build a list of locks that need
1736 	 * releasing. We then call VOP_ADVLOCK for each one in turn.
1737 	 *
1738 	 * We take an extra reference to the vnode for the duration to
1739 	 * make sure it doesn't go away before we are finished.
1740 	 */
1741 	STAILQ_INIT(&locks);
1742 	sx_xlock(&lf_lock_states_lock);
1743 	LIST_FOREACH(ls, &lf_lock_states, ls_link) {
1744 		sx_xlock(&ls->ls_lock);
1745 		LIST_FOREACH(lf, &ls->ls_active, lf_link) {
1746 			if (lf->lf_owner->lo_sysid != sysid)
1747 				continue;
1748 
1749 			cl = malloc(sizeof(struct clearlock), M_LOCKF,
1750 			    M_WAITOK);
1751 			cl->vp = lf->lf_vnode;
1752 			vref(cl->vp);
1753 			cl->fl.l_start = lf->lf_start;
1754 			if (lf->lf_end == OFF_MAX)
1755 				cl->fl.l_len = 0;
1756 			else
1757 				cl->fl.l_len =
1758 					lf->lf_end - lf->lf_start + 1;
1759 			cl->fl.l_whence = SEEK_SET;
1760 			cl->fl.l_type = F_UNLCK;
1761 			cl->fl.l_pid = lf->lf_owner->lo_pid;
1762 			cl->fl.l_sysid = sysid;
1763 			STAILQ_INSERT_TAIL(&locks, cl, link);
1764 		}
1765 		sx_xunlock(&ls->ls_lock);
1766 	}
1767 	sx_xunlock(&lf_lock_states_lock);
1768 
1769 	while ((cl = STAILQ_FIRST(&locks)) != NULL) {
1770 		STAILQ_REMOVE_HEAD(&locks, link);
1771 		VOP_ADVLOCK(cl->vp, 0, F_UNLCK, &cl->fl, F_REMOTE);
1772 		vrele(cl->vp);
1773 		free(cl, M_LOCKF);
1774 	}
1775 }
1776 
1777 int
1778 lf_countlocks(int sysid)
1779 {
1780 	int i;
1781 	struct lock_owner *lo;
1782 	int count;
1783 
1784 	count = 0;
1785 	sx_xlock(&lf_lock_owners_lock);
1786 	for (i = 0; i < LOCK_OWNER_HASH_SIZE; i++)
1787 		LIST_FOREACH(lo, &lf_lock_owners[i], lo_link)
1788 			if (lo->lo_sysid == sysid)
1789 				count += lo->lo_refs;
1790 	sx_xunlock(&lf_lock_owners_lock);
1791 
1792 	return (count);
1793 }
1794 
1795 #ifdef LOCKF_DEBUG
1796 
1797 /*
1798  * Return non-zero if y is reachable from x using a brute force
1799  * search. If reachable and path is non-null, return the route taken
1800  * in path.
1801  */
1802 static int
1803 graph_reaches(struct owner_vertex *x, struct owner_vertex *y,
1804     struct owner_vertex_list *path)
1805 {
1806 	struct owner_edge *e;
1807 
1808 	if (x == y) {
1809 		if (path)
1810 			TAILQ_INSERT_HEAD(path, x, v_link);
1811 		return 1;
1812 	}
1813 
1814 	LIST_FOREACH(e, &x->v_outedges, e_outlink) {
1815 		if (graph_reaches(e->e_to, y, path)) {
1816 			if (path)
1817 				TAILQ_INSERT_HEAD(path, x, v_link);
1818 			return 1;
1819 		}
1820 	}
1821 	return 0;
1822 }
1823 
1824 /*
1825  * Perform consistency checks on the graph. Make sure the values of
1826  * v_order are correct. If checkorder is non-zero, check no vertex can
1827  * reach any other vertex with a smaller order.
1828  */
1829 static void
1830 graph_check(struct owner_graph *g, int checkorder)
1831 {
1832 	int i, j;
1833 
1834 	for (i = 0; i < g->g_size; i++) {
1835 		if (!g->g_vertices[i]->v_owner)
1836 			continue;
1837 		KASSERT(g->g_vertices[i]->v_order == i,
1838 		    ("lock graph vertices disordered"));
1839 		if (checkorder) {
1840 			for (j = 0; j < i; j++) {
1841 				if (!g->g_vertices[j]->v_owner)
1842 					continue;
1843 				KASSERT(!graph_reaches(g->g_vertices[i],
1844 					g->g_vertices[j], NULL),
1845 				    ("lock graph vertices disordered"));
1846 			}
1847 		}
1848 	}
1849 }
1850 
1851 static void
1852 graph_print_vertices(struct owner_vertex_list *set)
1853 {
1854 	struct owner_vertex *v;
1855 
1856 	printf("{ ");
1857 	TAILQ_FOREACH(v, set, v_link) {
1858 		printf("%d:", v->v_order);
1859 		lf_print_owner(v->v_owner);
1860 		if (TAILQ_NEXT(v, v_link))
1861 			printf(", ");
1862 	}
1863 	printf(" }\n");
1864 }
1865 
1866 #endif
1867 
1868 /*
1869  * Calculate the sub-set of vertices v from the affected region [y..x]
1870  * where v is reachable from y. Return -1 if a loop was detected
1871  * (i.e. x is reachable from y, otherwise the number of vertices in
1872  * this subset.
1873  */
1874 static int
1875 graph_delta_forward(struct owner_graph *g, struct owner_vertex *x,
1876     struct owner_vertex *y, struct owner_vertex_list *delta)
1877 {
1878 	uint32_t gen;
1879 	struct owner_vertex *v;
1880 	struct owner_edge *e;
1881 	int n;
1882 
1883 	/*
1884 	 * We start with a set containing just y. Then for each vertex
1885 	 * v in the set so far unprocessed, we add each vertex that v
1886 	 * has an out-edge to and that is within the affected region
1887 	 * [y..x]. If we see the vertex x on our travels, stop
1888 	 * immediately.
1889 	 */
1890 	TAILQ_INIT(delta);
1891 	TAILQ_INSERT_TAIL(delta, y, v_link);
1892 	v = y;
1893 	n = 1;
1894 	gen = g->g_gen;
1895 	while (v) {
1896 		LIST_FOREACH(e, &v->v_outedges, e_outlink) {
1897 			if (e->e_to == x)
1898 				return -1;
1899 			if (e->e_to->v_order < x->v_order
1900 			    && e->e_to->v_gen != gen) {
1901 				e->e_to->v_gen = gen;
1902 				TAILQ_INSERT_TAIL(delta, e->e_to, v_link);
1903 				n++;
1904 			}
1905 		}
1906 		v = TAILQ_NEXT(v, v_link);
1907 	}
1908 
1909 	return (n);
1910 }
1911 
1912 /*
1913  * Calculate the sub-set of vertices v from the affected region [y..x]
1914  * where v reaches x. Return the number of vertices in this subset.
1915  */
1916 static int
1917 graph_delta_backward(struct owner_graph *g, struct owner_vertex *x,
1918     struct owner_vertex *y, struct owner_vertex_list *delta)
1919 {
1920 	uint32_t gen;
1921 	struct owner_vertex *v;
1922 	struct owner_edge *e;
1923 	int n;
1924 
1925 	/*
1926 	 * We start with a set containing just x. Then for each vertex
1927 	 * v in the set so far unprocessed, we add each vertex that v
1928 	 * has an in-edge from and that is within the affected region
1929 	 * [y..x].
1930 	 */
1931 	TAILQ_INIT(delta);
1932 	TAILQ_INSERT_TAIL(delta, x, v_link);
1933 	v = x;
1934 	n = 1;
1935 	gen = g->g_gen;
1936 	while (v) {
1937 		LIST_FOREACH(e, &v->v_inedges, e_inlink) {
1938 			if (e->e_from->v_order > y->v_order
1939 			    && e->e_from->v_gen != gen) {
1940 				e->e_from->v_gen = gen;
1941 				TAILQ_INSERT_HEAD(delta, e->e_from, v_link);
1942 				n++;
1943 			}
1944 		}
1945 		v = TAILQ_PREV(v, owner_vertex_list, v_link);
1946 	}
1947 
1948 	return (n);
1949 }
1950 
1951 static int
1952 graph_add_indices(int *indices, int n, struct owner_vertex_list *set)
1953 {
1954 	struct owner_vertex *v;
1955 	int i, j;
1956 
1957 	TAILQ_FOREACH(v, set, v_link) {
1958 		for (i = n;
1959 		     i > 0 && indices[i - 1] > v->v_order; i--)
1960 			;
1961 		for (j = n - 1; j >= i; j--)
1962 			indices[j + 1] = indices[j];
1963 		indices[i] = v->v_order;
1964 		n++;
1965 	}
1966 
1967 	return (n);
1968 }
1969 
1970 static int
1971 graph_assign_indices(struct owner_graph *g, int *indices, int nextunused,
1972     struct owner_vertex_list *set)
1973 {
1974 	struct owner_vertex *v, *vlowest;
1975 
1976 	while (!TAILQ_EMPTY(set)) {
1977 		vlowest = NULL;
1978 		TAILQ_FOREACH(v, set, v_link) {
1979 			if (!vlowest || v->v_order < vlowest->v_order)
1980 				vlowest = v;
1981 		}
1982 		TAILQ_REMOVE(set, vlowest, v_link);
1983 		vlowest->v_order = indices[nextunused];
1984 		g->g_vertices[vlowest->v_order] = vlowest;
1985 		nextunused++;
1986 	}
1987 
1988 	return (nextunused);
1989 }
1990 
1991 static int
1992 graph_add_edge(struct owner_graph *g, struct owner_vertex *x,
1993     struct owner_vertex *y)
1994 {
1995 	struct owner_edge *e;
1996 	struct owner_vertex_list deltaF, deltaB;
1997 	int nF, nB, n, vi, i;
1998 	int *indices;
1999 
2000 	sx_assert(&lf_owner_graph_lock, SX_XLOCKED);
2001 
2002 	LIST_FOREACH(e, &x->v_outedges, e_outlink) {
2003 		if (e->e_to == y) {
2004 			e->e_refs++;
2005 			return (0);
2006 		}
2007 	}
2008 
2009 #ifdef LOCKF_DEBUG
2010 	if (lockf_debug & 8) {
2011 		printf("adding edge %d:", x->v_order);
2012 		lf_print_owner(x->v_owner);
2013 		printf(" -> %d:", y->v_order);
2014 		lf_print_owner(y->v_owner);
2015 		printf("\n");
2016 	}
2017 #endif
2018 	if (y->v_order < x->v_order) {
2019 		/*
2020 		 * The new edge violates the order. First find the set
2021 		 * of affected vertices reachable from y (deltaF) and
2022 		 * the set of affect vertices affected that reach x
2023 		 * (deltaB), using the graph generation number to
2024 		 * detect whether we have visited a given vertex
2025 		 * already. We re-order the graph so that each vertex
2026 		 * in deltaB appears before each vertex in deltaF.
2027 		 *
2028 		 * If x is a member of deltaF, then the new edge would
2029 		 * create a cycle. Otherwise, we may assume that
2030 		 * deltaF and deltaB are disjoint.
2031 		 */
2032 		g->g_gen++;
2033 		if (g->g_gen == 0) {
2034 			/*
2035 			 * Generation wrap.
2036 			 */
2037 			for (vi = 0; vi < g->g_size; vi++) {
2038 				g->g_vertices[vi]->v_gen = 0;
2039 			}
2040 			g->g_gen++;
2041 		}
2042 		nF = graph_delta_forward(g, x, y, &deltaF);
2043 		if (nF < 0) {
2044 #ifdef LOCKF_DEBUG
2045 			if (lockf_debug & 8) {
2046 				struct owner_vertex_list path;
2047 				printf("deadlock: ");
2048 				TAILQ_INIT(&path);
2049 				graph_reaches(y, x, &path);
2050 				graph_print_vertices(&path);
2051 			}
2052 #endif
2053 			return (EDEADLK);
2054 		}
2055 
2056 #ifdef LOCKF_DEBUG
2057 		if (lockf_debug & 8) {
2058 			printf("re-ordering graph vertices\n");
2059 			printf("deltaF = ");
2060 			graph_print_vertices(&deltaF);
2061 		}
2062 #endif
2063 
2064 		nB = graph_delta_backward(g, x, y, &deltaB);
2065 
2066 #ifdef LOCKF_DEBUG
2067 		if (lockf_debug & 8) {
2068 			printf("deltaB = ");
2069 			graph_print_vertices(&deltaB);
2070 		}
2071 #endif
2072 
2073 		/*
2074 		 * We first build a set of vertex indices (vertex
2075 		 * order values) that we may use, then we re-assign
2076 		 * orders first to those vertices in deltaB, then to
2077 		 * deltaF. Note that the contents of deltaF and deltaB
2078 		 * may be partially disordered - we perform an
2079 		 * insertion sort while building our index set.
2080 		 */
2081 		indices = g->g_indexbuf;
2082 		n = graph_add_indices(indices, 0, &deltaF);
2083 		graph_add_indices(indices, n, &deltaB);
2084 
2085 		/*
2086 		 * We must also be sure to maintain the relative
2087 		 * ordering of deltaF and deltaB when re-assigning
2088 		 * vertices. We do this by iteratively removing the
2089 		 * lowest ordered element from the set and assigning
2090 		 * it the next value from our new ordering.
2091 		 */
2092 		i = graph_assign_indices(g, indices, 0, &deltaB);
2093 		graph_assign_indices(g, indices, i, &deltaF);
2094 
2095 #ifdef LOCKF_DEBUG
2096 		if (lockf_debug & 8) {
2097 			struct owner_vertex_list set;
2098 			TAILQ_INIT(&set);
2099 			for (i = 0; i < nB + nF; i++)
2100 				TAILQ_INSERT_TAIL(&set,
2101 				    g->g_vertices[indices[i]], v_link);
2102 			printf("new ordering = ");
2103 			graph_print_vertices(&set);
2104 		}
2105 #endif
2106 	}
2107 
2108 	KASSERT(x->v_order < y->v_order, ("Failed to re-order graph"));
2109 
2110 #ifdef LOCKF_DEBUG
2111 	if (lockf_debug & 8) {
2112 		graph_check(g, TRUE);
2113 	}
2114 #endif
2115 
2116 	e = malloc(sizeof(struct owner_edge), M_LOCKF, M_WAITOK);
2117 
2118 	LIST_INSERT_HEAD(&x->v_outedges, e, e_outlink);
2119 	LIST_INSERT_HEAD(&y->v_inedges, e, e_inlink);
2120 	e->e_refs = 1;
2121 	e->e_from = x;
2122 	e->e_to = y;
2123 
2124 	return (0);
2125 }
2126 
2127 /*
2128  * Remove an edge x->y from the graph.
2129  */
2130 static void
2131 graph_remove_edge(struct owner_graph *g, struct owner_vertex *x,
2132     struct owner_vertex *y)
2133 {
2134 	struct owner_edge *e;
2135 
2136 	sx_assert(&lf_owner_graph_lock, SX_XLOCKED);
2137 
2138 	LIST_FOREACH(e, &x->v_outedges, e_outlink) {
2139 		if (e->e_to == y)
2140 			break;
2141 	}
2142 	KASSERT(e, ("Removing non-existent edge from deadlock graph"));
2143 
2144 	e->e_refs--;
2145 	if (e->e_refs == 0) {
2146 #ifdef LOCKF_DEBUG
2147 		if (lockf_debug & 8) {
2148 			printf("removing edge %d:", x->v_order);
2149 			lf_print_owner(x->v_owner);
2150 			printf(" -> %d:", y->v_order);
2151 			lf_print_owner(y->v_owner);
2152 			printf("\n");
2153 		}
2154 #endif
2155 		LIST_REMOVE(e, e_outlink);
2156 		LIST_REMOVE(e, e_inlink);
2157 		free(e, M_LOCKF);
2158 	}
2159 }
2160 
2161 /*
2162  * Allocate a vertex from the free list. Return ENOMEM if there are
2163  * none.
2164  */
2165 static struct owner_vertex *
2166 graph_alloc_vertex(struct owner_graph *g, struct lock_owner *lo)
2167 {
2168 	struct owner_vertex *v;
2169 
2170 	sx_assert(&lf_owner_graph_lock, SX_XLOCKED);
2171 
2172 	v = malloc(sizeof(struct owner_vertex), M_LOCKF, M_WAITOK);
2173 	if (g->g_size == g->g_space) {
2174 		g->g_vertices = realloc(g->g_vertices,
2175 		    2 * g->g_space * sizeof(struct owner_vertex *),
2176 		    M_LOCKF, M_WAITOK);
2177 		free(g->g_indexbuf, M_LOCKF);
2178 		g->g_indexbuf = malloc(2 * g->g_space * sizeof(int),
2179 		    M_LOCKF, M_WAITOK);
2180 		g->g_space = 2 * g->g_space;
2181 	}
2182 	v->v_order = g->g_size;
2183 	v->v_gen = g->g_gen;
2184 	g->g_vertices[g->g_size] = v;
2185 	g->g_size++;
2186 
2187 	LIST_INIT(&v->v_outedges);
2188 	LIST_INIT(&v->v_inedges);
2189 	v->v_owner = lo;
2190 
2191 	return (v);
2192 }
2193 
2194 static void
2195 graph_free_vertex(struct owner_graph *g, struct owner_vertex *v)
2196 {
2197 	struct owner_vertex *w;
2198 	int i;
2199 
2200 	sx_assert(&lf_owner_graph_lock, SX_XLOCKED);
2201 
2202 	KASSERT(LIST_EMPTY(&v->v_outedges), ("Freeing vertex with edges"));
2203 	KASSERT(LIST_EMPTY(&v->v_inedges), ("Freeing vertex with edges"));
2204 
2205 	/*
2206 	 * Remove from the graph's array and close up the gap,
2207 	 * renumbering the other vertices.
2208 	 */
2209 	for (i = v->v_order + 1; i < g->g_size; i++) {
2210 		w = g->g_vertices[i];
2211 		w->v_order--;
2212 		g->g_vertices[i - 1] = w;
2213 	}
2214 	g->g_size--;
2215 
2216 	free(v, M_LOCKF);
2217 }
2218 
2219 static struct owner_graph *
2220 graph_init(struct owner_graph *g)
2221 {
2222 
2223 	g->g_vertices = malloc(10 * sizeof(struct owner_vertex *),
2224 	    M_LOCKF, M_WAITOK);
2225 	g->g_size = 0;
2226 	g->g_space = 10;
2227 	g->g_indexbuf = malloc(g->g_space * sizeof(int), M_LOCKF, M_WAITOK);
2228 	g->g_gen = 0;
2229 
2230 	return (g);
2231 }
2232 
2233 #ifdef LOCKF_DEBUG
2234 /*
2235  * Print description of a lock owner
2236  */
2237 static void
2238 lf_print_owner(struct lock_owner *lo)
2239 {
2240 
2241 	if (lo->lo_flags & F_REMOTE) {
2242 		printf("remote pid %d, system %d",
2243 		    lo->lo_pid, lo->lo_sysid);
2244 	} else if (lo->lo_flags & F_FLOCK) {
2245 		printf("file %p", lo->lo_id);
2246 	} else {
2247 		printf("local pid %d", lo->lo_pid);
2248 	}
2249 }
2250 
2251 /*
2252  * Print out a lock.
2253  */
2254 static void
2255 lf_print(char *tag, struct lockf_entry *lock)
2256 {
2257 
2258 	printf("%s: lock %p for ", tag, (void *)lock);
2259 	lf_print_owner(lock->lf_owner);
2260 	if (lock->lf_inode != (struct inode *)0)
2261 		printf(" in ino %ju on dev <%s>,",
2262 		    (uintmax_t)lock->lf_inode->i_number,
2263 		    devtoname(lock->lf_inode->i_dev));
2264 	printf(" %s, start %jd, end ",
2265 	    lock->lf_type == F_RDLCK ? "shared" :
2266 	    lock->lf_type == F_WRLCK ? "exclusive" :
2267 	    lock->lf_type == F_UNLCK ? "unlock" : "unknown",
2268 	    (intmax_t)lock->lf_start);
2269 	if (lock->lf_end == OFF_MAX)
2270 		printf("EOF");
2271 	else
2272 		printf("%jd", (intmax_t)lock->lf_end);
2273 	if (!LIST_EMPTY(&lock->lf_outedges))
2274 		printf(" block %p\n",
2275 		    (void *)LIST_FIRST(&lock->lf_outedges)->le_to);
2276 	else
2277 		printf("\n");
2278 }
2279 
2280 static void
2281 lf_printlist(char *tag, struct lockf_entry *lock)
2282 {
2283 	struct lockf_entry *lf, *blk;
2284 	struct lockf_edge *e;
2285 
2286 	if (lock->lf_inode == (struct inode *)0)
2287 		return;
2288 
2289 	printf("%s: Lock list for ino %ju on dev <%s>:\n",
2290 	    tag, (uintmax_t)lock->lf_inode->i_number,
2291 	    devtoname(lock->lf_inode->i_dev));
2292 	LIST_FOREACH(lf, &lock->lf_inode->i_lockf->ls_active, lf_link) {
2293 		printf("\tlock %p for ",(void *)lf);
2294 		lf_print_owner(lock->lf_owner);
2295 		printf(", %s, start %jd, end %jd",
2296 		    lf->lf_type == F_RDLCK ? "shared" :
2297 		    lf->lf_type == F_WRLCK ? "exclusive" :
2298 		    lf->lf_type == F_UNLCK ? "unlock" :
2299 		    "unknown", (intmax_t)lf->lf_start, (intmax_t)lf->lf_end);
2300 		LIST_FOREACH(e, &lf->lf_outedges, le_outlink) {
2301 			blk = e->le_to;
2302 			printf("\n\t\tlock request %p for ", (void *)blk);
2303 			lf_print_owner(blk->lf_owner);
2304 			printf(", %s, start %jd, end %jd",
2305 			    blk->lf_type == F_RDLCK ? "shared" :
2306 			    blk->lf_type == F_WRLCK ? "exclusive" :
2307 			    blk->lf_type == F_UNLCK ? "unlock" :
2308 			    "unknown", (intmax_t)blk->lf_start,
2309 			    (intmax_t)blk->lf_end);
2310 			if (!LIST_EMPTY(&blk->lf_inedges))
2311 				panic("lf_printlist: bad list");
2312 		}
2313 		printf("\n");
2314 	}
2315 }
2316 #endif /* LOCKF_DEBUG */
2317