xref: /freebsd/sys/kern/kern_lockf.c (revision 2008043f386721d58158e37e0d7e50df8095942d)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 2008 Isilon Inc http://www.isilon.com/
5  * Authors: Doug Rabson <dfr@rabson.org>
6  * Developed with Red Inc: Alfred Perlstein <alfred@freebsd.org>
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 /*-
30  * Copyright (c) 1982, 1986, 1989, 1993
31  *	The Regents of the University of California.  All rights reserved.
32  *
33  * This code is derived from software contributed to Berkeley by
34  * Scooter Morris at Genentech Inc.
35  *
36  * Redistribution and use in source and binary forms, with or without
37  * modification, are permitted provided that the following conditions
38  * are met:
39  * 1. Redistributions of source code must retain the above copyright
40  *    notice, this list of conditions and the following disclaimer.
41  * 2. Redistributions in binary form must reproduce the above copyright
42  *    notice, this list of conditions and the following disclaimer in the
43  *    documentation and/or other materials provided with the distribution.
44  * 3. Neither the name of the University nor the names of its contributors
45  *    may be used to endorse or promote products derived from this software
46  *    without specific prior written permission.
47  *
48  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58  * SUCH DAMAGE.
59  *
60  *	@(#)ufs_lockf.c	8.3 (Berkeley) 1/6/94
61  */
62 
63 #include <sys/cdefs.h>
64 #include "opt_debug_lockf.h"
65 
66 #include <sys/param.h>
67 #include <sys/systm.h>
68 #include <sys/hash.h>
69 #include <sys/jail.h>
70 #include <sys/kernel.h>
71 #include <sys/limits.h>
72 #include <sys/lock.h>
73 #include <sys/mount.h>
74 #include <sys/mutex.h>
75 #include <sys/proc.h>
76 #include <sys/sbuf.h>
77 #include <sys/stat.h>
78 #include <sys/sx.h>
79 #include <sys/unistd.h>
80 #include <sys/user.h>
81 #include <sys/vnode.h>
82 #include <sys/malloc.h>
83 #include <sys/fcntl.h>
84 #include <sys/lockf.h>
85 #include <sys/taskqueue.h>
86 
87 #ifdef LOCKF_DEBUG
88 #include <sys/sysctl.h>
89 
90 static int	lockf_debug = 0; /* control debug output */
91 SYSCTL_INT(_debug, OID_AUTO, lockf_debug, CTLFLAG_RW, &lockf_debug, 0, "");
92 #endif
93 
94 static MALLOC_DEFINE(M_LOCKF, "lockf", "Byte-range locking structures");
95 
96 struct owner_edge;
97 struct owner_vertex;
98 struct owner_vertex_list;
99 struct owner_graph;
100 
101 #define NOLOCKF (struct lockf_entry *)0
102 #define SELF	0x1
103 #define OTHERS	0x2
104 static void	 lf_init(void *);
105 static int	 lf_hash_owner(caddr_t, struct vnode *, struct flock *, int);
106 static int	 lf_owner_matches(struct lock_owner *, caddr_t, struct flock *,
107     int);
108 static struct lockf_entry *
109 		 lf_alloc_lock(struct lock_owner *);
110 static int	 lf_free_lock(struct lockf_entry *);
111 static int	 lf_clearlock(struct lockf *, struct lockf_entry *);
112 static int	 lf_overlaps(struct lockf_entry *, struct lockf_entry *);
113 static int	 lf_blocks(struct lockf_entry *, struct lockf_entry *);
114 static void	 lf_free_edge(struct lockf_edge *);
115 static struct lockf_edge *
116 		 lf_alloc_edge(void);
117 static void	 lf_alloc_vertex(struct lockf_entry *);
118 static int	 lf_add_edge(struct lockf_entry *, struct lockf_entry *);
119 static void	 lf_remove_edge(struct lockf_edge *);
120 static void	 lf_remove_outgoing(struct lockf_entry *);
121 static void	 lf_remove_incoming(struct lockf_entry *);
122 static int	 lf_add_outgoing(struct lockf *, struct lockf_entry *);
123 static int	 lf_add_incoming(struct lockf *, struct lockf_entry *);
124 static int	 lf_findoverlap(struct lockf_entry **, struct lockf_entry *,
125     int);
126 static struct lockf_entry *
127 		 lf_getblock(struct lockf *, struct lockf_entry *);
128 static int	 lf_getlock(struct lockf *, struct lockf_entry *, struct flock *);
129 static void	 lf_insert_lock(struct lockf *, struct lockf_entry *);
130 static void	 lf_wakeup_lock(struct lockf *, struct lockf_entry *);
131 static void	 lf_update_dependancies(struct lockf *, struct lockf_entry *,
132     int all, struct lockf_entry_list *);
133 static void	 lf_set_start(struct lockf *, struct lockf_entry *, off_t,
134 	struct lockf_entry_list*);
135 static void	 lf_set_end(struct lockf *, struct lockf_entry *, off_t,
136 	struct lockf_entry_list*);
137 static int	 lf_setlock(struct lockf *, struct lockf_entry *,
138     struct vnode *, void **cookiep);
139 static int	 lf_cancel(struct lockf *, struct lockf_entry *, void *);
140 static void	 lf_split(struct lockf *, struct lockf_entry *,
141     struct lockf_entry *, struct lockf_entry_list *);
142 #ifdef LOCKF_DEBUG
143 static int	 graph_reaches(struct owner_vertex *x, struct owner_vertex *y,
144     struct owner_vertex_list *path);
145 static void	 graph_check(struct owner_graph *g, int checkorder);
146 static void	 graph_print_vertices(struct owner_vertex_list *set);
147 #endif
148 static int	 graph_delta_forward(struct owner_graph *g,
149     struct owner_vertex *x, struct owner_vertex *y,
150     struct owner_vertex_list *delta);
151 static int	 graph_delta_backward(struct owner_graph *g,
152     struct owner_vertex *x, struct owner_vertex *y,
153     struct owner_vertex_list *delta);
154 static int	 graph_add_indices(int *indices, int n,
155     struct owner_vertex_list *set);
156 static int	 graph_assign_indices(struct owner_graph *g, int *indices,
157     int nextunused, struct owner_vertex_list *set);
158 static int	 graph_add_edge(struct owner_graph *g,
159     struct owner_vertex *x, struct owner_vertex *y);
160 static void	 graph_remove_edge(struct owner_graph *g,
161     struct owner_vertex *x, struct owner_vertex *y);
162 static struct owner_vertex *graph_alloc_vertex(struct owner_graph *g,
163     struct lock_owner *lo);
164 static void	 graph_free_vertex(struct owner_graph *g,
165     struct owner_vertex *v);
166 static struct owner_graph * graph_init(struct owner_graph *g);
167 #ifdef LOCKF_DEBUG
168 static void	 lf_print(char *, struct lockf_entry *);
169 static void	 lf_printlist(char *, struct lockf_entry *);
170 static void	 lf_print_owner(struct lock_owner *);
171 #endif
172 
173 /*
174  * This structure is used to keep track of both local and remote lock
175  * owners. The lf_owner field of the struct lockf_entry points back at
176  * the lock owner structure. Each possible lock owner (local proc for
177  * POSIX fcntl locks, local file for BSD flock locks or <pid,sysid>
178  * pair for remote locks) is represented by a unique instance of
179  * struct lock_owner.
180  *
181  * If a lock owner has a lock that blocks some other lock or a lock
182  * that is waiting for some other lock, it also has a vertex in the
183  * owner_graph below.
184  *
185  * Locks:
186  * (s)		locked by state->ls_lock
187  * (S)		locked by lf_lock_states_lock
188  * (g)		locked by lf_owner_graph_lock
189  * (c)		const until freeing
190  */
191 #define	LOCK_OWNER_HASH_SIZE	256
192 
193 struct lock_owner {
194 	LIST_ENTRY(lock_owner) lo_link; /* (l) hash chain */
195 	int	lo_refs;	    /* (l) Number of locks referring to this */
196 	int	lo_flags;	    /* (c) Flags passed to lf_advlock */
197 	caddr_t	lo_id;		    /* (c) Id value passed to lf_advlock */
198 	pid_t	lo_pid;		    /* (c) Process Id of the lock owner */
199 	int	lo_sysid;	    /* (c) System Id of the lock owner */
200 	int	lo_hash;	    /* (c) Used to lock the appropriate chain */
201 	struct owner_vertex *lo_vertex; /* (g) entry in deadlock graph */
202 };
203 
204 LIST_HEAD(lock_owner_list, lock_owner);
205 
206 struct lock_owner_chain {
207 	struct sx		lock;
208 	struct lock_owner_list	list;
209 };
210 
211 static struct sx		lf_lock_states_lock;
212 static struct lockf_list	lf_lock_states; /* (S) */
213 static struct lock_owner_chain	lf_lock_owners[LOCK_OWNER_HASH_SIZE];
214 
215 /*
216  * Structures for deadlock detection.
217  *
218  * We have two types of directed graph, the first is the set of locks,
219  * both active and pending on a vnode. Within this graph, active locks
220  * are terminal nodes in the graph (i.e. have no out-going
221  * edges). Pending locks have out-going edges to each blocking active
222  * lock that prevents the lock from being granted and also to each
223  * older pending lock that would block them if it was active. The
224  * graph for each vnode is naturally acyclic; new edges are only ever
225  * added to or from new nodes (either new pending locks which only add
226  * out-going edges or new active locks which only add in-coming edges)
227  * therefore they cannot create loops in the lock graph.
228  *
229  * The second graph is a global graph of lock owners. Each lock owner
230  * is a vertex in that graph and an edge is added to the graph
231  * whenever an edge is added to a vnode graph, with end points
232  * corresponding to owner of the new pending lock and the owner of the
233  * lock upon which it waits. In order to prevent deadlock, we only add
234  * an edge to this graph if the new edge would not create a cycle.
235  *
236  * The lock owner graph is topologically sorted, i.e. if a node has
237  * any outgoing edges, then it has an order strictly less than any
238  * node to which it has an outgoing edge. We preserve this ordering
239  * (and detect cycles) on edge insertion using Algorithm PK from the
240  * paper "A Dynamic Topological Sort Algorithm for Directed Acyclic
241  * Graphs" (ACM Journal of Experimental Algorithms, Vol 11, Article
242  * No. 1.7)
243  */
244 struct owner_vertex;
245 
246 struct owner_edge {
247 	LIST_ENTRY(owner_edge) e_outlink; /* (g) link from's out-edge list */
248 	LIST_ENTRY(owner_edge) e_inlink;  /* (g) link to's in-edge list */
249 	int		e_refs;		  /* (g) number of times added */
250 	struct owner_vertex *e_from;	  /* (c) out-going from here */
251 	struct owner_vertex *e_to;	  /* (c) in-coming to here */
252 };
253 LIST_HEAD(owner_edge_list, owner_edge);
254 
255 struct owner_vertex {
256 	TAILQ_ENTRY(owner_vertex) v_link; /* (g) workspace for edge insertion */
257 	uint32_t	v_gen;		  /* (g) workspace for edge insertion */
258 	int		v_order;	  /* (g) order of vertex in graph */
259 	struct owner_edge_list v_outedges;/* (g) list of out-edges */
260 	struct owner_edge_list v_inedges; /* (g) list of in-edges */
261 	struct lock_owner *v_owner;	  /* (c) corresponding lock owner */
262 };
263 TAILQ_HEAD(owner_vertex_list, owner_vertex);
264 
265 struct owner_graph {
266 	struct owner_vertex** g_vertices; /* (g) pointers to vertices */
267 	int		g_size;		  /* (g) number of vertices */
268 	int		g_space;	  /* (g) space allocated for vertices */
269 	int		*g_indexbuf;	  /* (g) workspace for loop detection */
270 	uint32_t	g_gen;		  /* (g) increment when re-ordering */
271 };
272 
273 static struct sx		lf_owner_graph_lock;
274 static struct owner_graph	lf_owner_graph;
275 
276 /*
277  * Initialise various structures and locks.
278  */
279 static void
280 lf_init(void *dummy)
281 {
282 	int i;
283 
284 	sx_init(&lf_lock_states_lock, "lock states lock");
285 	LIST_INIT(&lf_lock_states);
286 
287 	for (i = 0; i < LOCK_OWNER_HASH_SIZE; i++) {
288 		sx_init(&lf_lock_owners[i].lock, "lock owners lock");
289 		LIST_INIT(&lf_lock_owners[i].list);
290 	}
291 
292 	sx_init(&lf_owner_graph_lock, "owner graph lock");
293 	graph_init(&lf_owner_graph);
294 }
295 SYSINIT(lf_init, SI_SUB_LOCK, SI_ORDER_FIRST, lf_init, NULL);
296 
297 /*
298  * Generate a hash value for a lock owner.
299  */
300 static int
301 lf_hash_owner(caddr_t id, struct vnode *vp, struct flock *fl, int flags)
302 {
303 	uint32_t h;
304 
305 	if (flags & F_REMOTE) {
306 		h = HASHSTEP(0, fl->l_pid);
307 		h = HASHSTEP(h, fl->l_sysid);
308 	} else if (flags & F_FLOCK) {
309 		h = ((uintptr_t) id) >> 7;
310 	} else {
311 		h = ((uintptr_t) vp) >> 7;
312 	}
313 
314 	return (h % LOCK_OWNER_HASH_SIZE);
315 }
316 
317 /*
318  * Return true if a lock owner matches the details passed to
319  * lf_advlock.
320  */
321 static int
322 lf_owner_matches(struct lock_owner *lo, caddr_t id, struct flock *fl,
323     int flags)
324 {
325 	if (flags & F_REMOTE) {
326 		return lo->lo_pid == fl->l_pid
327 			&& lo->lo_sysid == fl->l_sysid;
328 	} else {
329 		return lo->lo_id == id;
330 	}
331 }
332 
333 static struct lockf_entry *
334 lf_alloc_lock(struct lock_owner *lo)
335 {
336 	struct lockf_entry *lf;
337 
338 	lf = malloc(sizeof(struct lockf_entry), M_LOCKF, M_WAITOK|M_ZERO);
339 
340 #ifdef LOCKF_DEBUG
341 	if (lockf_debug & 4)
342 		printf("Allocated lock %p\n", lf);
343 #endif
344 	if (lo) {
345 		sx_xlock(&lf_lock_owners[lo->lo_hash].lock);
346 		lo->lo_refs++;
347 		sx_xunlock(&lf_lock_owners[lo->lo_hash].lock);
348 		lf->lf_owner = lo;
349 	}
350 
351 	return (lf);
352 }
353 
354 static int
355 lf_free_lock(struct lockf_entry *lock)
356 {
357 	struct sx *chainlock;
358 
359 	KASSERT(lock->lf_refs > 0, ("lockf_entry negative ref count %p", lock));
360 	if (--lock->lf_refs > 0)
361 		return (0);
362 	/*
363 	 * Adjust the lock_owner reference count and
364 	 * reclaim the entry if this is the last lock
365 	 * for that owner.
366 	 */
367 	struct lock_owner *lo = lock->lf_owner;
368 	if (lo) {
369 		KASSERT(LIST_EMPTY(&lock->lf_outedges),
370 		    ("freeing lock with dependencies"));
371 		KASSERT(LIST_EMPTY(&lock->lf_inedges),
372 		    ("freeing lock with dependants"));
373 		chainlock = &lf_lock_owners[lo->lo_hash].lock;
374 		sx_xlock(chainlock);
375 		KASSERT(lo->lo_refs > 0, ("lock owner refcount"));
376 		lo->lo_refs--;
377 		if (lo->lo_refs == 0) {
378 #ifdef LOCKF_DEBUG
379 			if (lockf_debug & 1)
380 				printf("lf_free_lock: freeing lock owner %p\n",
381 				    lo);
382 #endif
383 			if (lo->lo_vertex) {
384 				sx_xlock(&lf_owner_graph_lock);
385 				graph_free_vertex(&lf_owner_graph,
386 				    lo->lo_vertex);
387 				sx_xunlock(&lf_owner_graph_lock);
388 			}
389 			LIST_REMOVE(lo, lo_link);
390 			free(lo, M_LOCKF);
391 #ifdef LOCKF_DEBUG
392 			if (lockf_debug & 4)
393 				printf("Freed lock owner %p\n", lo);
394 #endif
395 		}
396 		sx_unlock(chainlock);
397 	}
398 	if ((lock->lf_flags & F_REMOTE) && lock->lf_vnode) {
399 		vrele(lock->lf_vnode);
400 		lock->lf_vnode = NULL;
401 	}
402 #ifdef LOCKF_DEBUG
403 	if (lockf_debug & 4)
404 		printf("Freed lock %p\n", lock);
405 #endif
406 	free(lock, M_LOCKF);
407 	return (1);
408 }
409 
410 /*
411  * Advisory record locking support
412  */
413 int
414 lf_advlockasync(struct vop_advlockasync_args *ap, struct lockf **statep,
415     u_quad_t size)
416 {
417 	struct lockf *state;
418 	struct flock *fl = ap->a_fl;
419 	struct lockf_entry *lock;
420 	struct vnode *vp = ap->a_vp;
421 	caddr_t id = ap->a_id;
422 	int flags = ap->a_flags;
423 	int hash;
424 	struct lock_owner *lo;
425 	off_t start, end, oadd;
426 	int error;
427 
428 	/*
429 	 * Handle the F_UNLKSYS case first - no need to mess about
430 	 * creating a lock owner for this one.
431 	 */
432 	if (ap->a_op == F_UNLCKSYS) {
433 		lf_clearremotesys(fl->l_sysid);
434 		return (0);
435 	}
436 
437 	/*
438 	 * Convert the flock structure into a start and end.
439 	 */
440 	switch (fl->l_whence) {
441 	case SEEK_SET:
442 	case SEEK_CUR:
443 		/*
444 		 * Caller is responsible for adding any necessary offset
445 		 * when SEEK_CUR is used.
446 		 */
447 		start = fl->l_start;
448 		break;
449 
450 	case SEEK_END:
451 		if (size > OFF_MAX ||
452 		    (fl->l_start > 0 && size > OFF_MAX - fl->l_start))
453 			return (EOVERFLOW);
454 		start = size + fl->l_start;
455 		break;
456 
457 	default:
458 		return (EINVAL);
459 	}
460 	if (start < 0)
461 		return (EINVAL);
462 	if (fl->l_len < 0) {
463 		if (start == 0)
464 			return (EINVAL);
465 		end = start - 1;
466 		start += fl->l_len;
467 		if (start < 0)
468 			return (EINVAL);
469 	} else if (fl->l_len == 0) {
470 		end = OFF_MAX;
471 	} else {
472 		oadd = fl->l_len - 1;
473 		if (oadd > OFF_MAX - start)
474 			return (EOVERFLOW);
475 		end = start + oadd;
476 	}
477 
478 retry_setlock:
479 
480 	/*
481 	 * Avoid the common case of unlocking when inode has no locks.
482 	 */
483 	if (ap->a_op != F_SETLK && (*statep) == NULL) {
484 		VI_LOCK(vp);
485 		if ((*statep) == NULL) {
486 			fl->l_type = F_UNLCK;
487 			VI_UNLOCK(vp);
488 			return (0);
489 		}
490 		VI_UNLOCK(vp);
491 	}
492 
493 	/*
494 	 * Map our arguments to an existing lock owner or create one
495 	 * if this is the first time we have seen this owner.
496 	 */
497 	hash = lf_hash_owner(id, vp, fl, flags);
498 	sx_xlock(&lf_lock_owners[hash].lock);
499 	LIST_FOREACH(lo, &lf_lock_owners[hash].list, lo_link)
500 		if (lf_owner_matches(lo, id, fl, flags))
501 			break;
502 	if (!lo) {
503 		/*
504 		 * We initialise the lock with a reference
505 		 * count which matches the new lockf_entry
506 		 * structure created below.
507 		 */
508 		lo = malloc(sizeof(struct lock_owner), M_LOCKF,
509 		    M_WAITOK|M_ZERO);
510 #ifdef LOCKF_DEBUG
511 		if (lockf_debug & 4)
512 			printf("Allocated lock owner %p\n", lo);
513 #endif
514 
515 		lo->lo_refs = 1;
516 		lo->lo_flags = flags;
517 		lo->lo_id = id;
518 		lo->lo_hash = hash;
519 		if (flags & F_REMOTE) {
520 			lo->lo_pid = fl->l_pid;
521 			lo->lo_sysid = fl->l_sysid;
522 		} else if (flags & F_FLOCK) {
523 			lo->lo_pid = -1;
524 			lo->lo_sysid = 0;
525 		} else {
526 			struct proc *p = (struct proc *) id;
527 			lo->lo_pid = p->p_pid;
528 			lo->lo_sysid = 0;
529 		}
530 		lo->lo_vertex = NULL;
531 
532 #ifdef LOCKF_DEBUG
533 		if (lockf_debug & 1) {
534 			printf("lf_advlockasync: new lock owner %p ", lo);
535 			lf_print_owner(lo);
536 			printf("\n");
537 		}
538 #endif
539 
540 		LIST_INSERT_HEAD(&lf_lock_owners[hash].list, lo, lo_link);
541 	} else {
542 		/*
543 		 * We have seen this lock owner before, increase its
544 		 * reference count to account for the new lockf_entry
545 		 * structure we create below.
546 		 */
547 		lo->lo_refs++;
548 	}
549 	sx_xunlock(&lf_lock_owners[hash].lock);
550 
551 	/*
552 	 * Create the lockf structure. We initialise the lf_owner
553 	 * field here instead of in lf_alloc_lock() to avoid paying
554 	 * the lf_lock_owners_lock tax twice.
555 	 */
556 	lock = lf_alloc_lock(NULL);
557 	lock->lf_refs = 1;
558 	lock->lf_start = start;
559 	lock->lf_end = end;
560 	lock->lf_owner = lo;
561 	lock->lf_vnode = vp;
562 	if (flags & F_REMOTE) {
563 		/*
564 		 * For remote locks, the caller may release its ref to
565 		 * the vnode at any time - we have to ref it here to
566 		 * prevent it from being recycled unexpectedly.
567 		 */
568 		vref(vp);
569 	}
570 
571 	lock->lf_type = fl->l_type;
572 	LIST_INIT(&lock->lf_outedges);
573 	LIST_INIT(&lock->lf_inedges);
574 	lock->lf_async_task = ap->a_task;
575 	lock->lf_flags = ap->a_flags;
576 
577 	/*
578 	 * Do the requested operation. First find our state structure
579 	 * and create a new one if necessary - the caller's *statep
580 	 * variable and the state's ls_threads count is protected by
581 	 * the vnode interlock.
582 	 */
583 	VI_LOCK(vp);
584 	if (VN_IS_DOOMED(vp)) {
585 		VI_UNLOCK(vp);
586 		lf_free_lock(lock);
587 		return (ENOENT);
588 	}
589 
590 	/*
591 	 * Allocate a state structure if necessary.
592 	 */
593 	state = *statep;
594 	if (state == NULL) {
595 		struct lockf *ls;
596 
597 		VI_UNLOCK(vp);
598 
599 		ls = malloc(sizeof(struct lockf), M_LOCKF, M_WAITOK|M_ZERO);
600 		sx_init(&ls->ls_lock, "ls_lock");
601 		LIST_INIT(&ls->ls_active);
602 		LIST_INIT(&ls->ls_pending);
603 		ls->ls_threads = 1;
604 
605 		sx_xlock(&lf_lock_states_lock);
606 		LIST_INSERT_HEAD(&lf_lock_states, ls, ls_link);
607 		sx_xunlock(&lf_lock_states_lock);
608 
609 		/*
610 		 * Cope if we lost a race with some other thread while
611 		 * trying to allocate memory.
612 		 */
613 		VI_LOCK(vp);
614 		if (VN_IS_DOOMED(vp)) {
615 			VI_UNLOCK(vp);
616 			sx_xlock(&lf_lock_states_lock);
617 			LIST_REMOVE(ls, ls_link);
618 			sx_xunlock(&lf_lock_states_lock);
619 			sx_destroy(&ls->ls_lock);
620 			free(ls, M_LOCKF);
621 			lf_free_lock(lock);
622 			return (ENOENT);
623 		}
624 		if ((*statep) == NULL) {
625 			state = *statep = ls;
626 			VI_UNLOCK(vp);
627 		} else {
628 			state = *statep;
629 			MPASS(state->ls_threads >= 0);
630 			state->ls_threads++;
631 			VI_UNLOCK(vp);
632 
633 			sx_xlock(&lf_lock_states_lock);
634 			LIST_REMOVE(ls, ls_link);
635 			sx_xunlock(&lf_lock_states_lock);
636 			sx_destroy(&ls->ls_lock);
637 			free(ls, M_LOCKF);
638 		}
639 	} else {
640 		MPASS(state->ls_threads >= 0);
641 		state->ls_threads++;
642 		VI_UNLOCK(vp);
643 	}
644 
645 	sx_xlock(&state->ls_lock);
646 	/*
647 	 * Recheck the doomed vnode after state->ls_lock is
648 	 * locked. lf_purgelocks() requires that no new threads add
649 	 * pending locks when vnode is marked by VIRF_DOOMED flag.
650 	 */
651 	if (VN_IS_DOOMED(vp)) {
652 		VI_LOCK(vp);
653 		MPASS(state->ls_threads > 0);
654 		state->ls_threads--;
655 		wakeup(state);
656 		VI_UNLOCK(vp);
657 		sx_xunlock(&state->ls_lock);
658 		lf_free_lock(lock);
659 		return (ENOENT);
660 	}
661 
662 	switch (ap->a_op) {
663 	case F_SETLK:
664 		error = lf_setlock(state, lock, vp, ap->a_cookiep);
665 		break;
666 
667 	case F_UNLCK:
668 		error = lf_clearlock(state, lock);
669 		lf_free_lock(lock);
670 		break;
671 
672 	case F_GETLK:
673 		error = lf_getlock(state, lock, fl);
674 		lf_free_lock(lock);
675 		break;
676 
677 	case F_CANCEL:
678 		if (ap->a_cookiep)
679 			error = lf_cancel(state, lock, *ap->a_cookiep);
680 		else
681 			error = EINVAL;
682 		lf_free_lock(lock);
683 		break;
684 
685 	default:
686 		lf_free_lock(lock);
687 		error = EINVAL;
688 		break;
689 	}
690 
691 #ifdef DIAGNOSTIC
692 	/*
693 	 * Check for some can't happen stuff. In this case, the active
694 	 * lock list becoming disordered or containing mutually
695 	 * blocking locks. We also check the pending list for locks
696 	 * which should be active (i.e. have no out-going edges).
697 	 */
698 	LIST_FOREACH(lock, &state->ls_active, lf_link) {
699 		struct lockf_entry *lf;
700 		if (LIST_NEXT(lock, lf_link))
701 			KASSERT((lock->lf_start
702 				<= LIST_NEXT(lock, lf_link)->lf_start),
703 			    ("locks disordered"));
704 		LIST_FOREACH(lf, &state->ls_active, lf_link) {
705 			if (lock == lf)
706 				break;
707 			KASSERT(!lf_blocks(lock, lf),
708 			    ("two conflicting active locks"));
709 			if (lock->lf_owner == lf->lf_owner)
710 				KASSERT(!lf_overlaps(lock, lf),
711 				    ("two overlapping locks from same owner"));
712 		}
713 	}
714 	LIST_FOREACH(lock, &state->ls_pending, lf_link) {
715 		KASSERT(!LIST_EMPTY(&lock->lf_outedges),
716 		    ("pending lock which should be active"));
717 	}
718 #endif
719 	sx_xunlock(&state->ls_lock);
720 
721 	VI_LOCK(vp);
722 	MPASS(state->ls_threads > 0);
723 	state->ls_threads--;
724 	if (state->ls_threads != 0) {
725 		wakeup(state);
726 	}
727 	VI_UNLOCK(vp);
728 
729 	if (error == EDOOFUS) {
730 		KASSERT(ap->a_op == F_SETLK, ("EDOOFUS"));
731 		goto retry_setlock;
732 	}
733 	return (error);
734 }
735 
736 int
737 lf_advlock(struct vop_advlock_args *ap, struct lockf **statep, u_quad_t size)
738 {
739 	struct vop_advlockasync_args a;
740 
741 	a.a_vp = ap->a_vp;
742 	a.a_id = ap->a_id;
743 	a.a_op = ap->a_op;
744 	a.a_fl = ap->a_fl;
745 	a.a_flags = ap->a_flags;
746 	a.a_task = NULL;
747 	a.a_cookiep = NULL;
748 
749 	return (lf_advlockasync(&a, statep, size));
750 }
751 
752 void
753 lf_purgelocks(struct vnode *vp, struct lockf **statep)
754 {
755 	struct lockf *state;
756 	struct lockf_entry *lock, *nlock;
757 
758 	/*
759 	 * For this to work correctly, the caller must ensure that no
760 	 * other threads enter the locking system for this vnode,
761 	 * e.g. by checking VIRF_DOOMED. We wake up any threads that are
762 	 * sleeping waiting for locks on this vnode and then free all
763 	 * the remaining locks.
764 	 */
765 	KASSERT(VN_IS_DOOMED(vp),
766 	    ("lf_purgelocks: vp %p has not vgone yet", vp));
767 	state = *statep;
768 	if (state == NULL) {
769 		return;
770 	}
771 	VI_LOCK(vp);
772 	*statep = NULL;
773 	if (LIST_EMPTY(&state->ls_active) && state->ls_threads == 0) {
774 		KASSERT(LIST_EMPTY(&state->ls_pending),
775 		    ("freeing state with pending locks"));
776 		VI_UNLOCK(vp);
777 		goto out_free;
778 	}
779 	MPASS(state->ls_threads >= 0);
780 	state->ls_threads++;
781 	VI_UNLOCK(vp);
782 
783 	sx_xlock(&state->ls_lock);
784 	sx_xlock(&lf_owner_graph_lock);
785 	LIST_FOREACH_SAFE(lock, &state->ls_pending, lf_link, nlock) {
786 		LIST_REMOVE(lock, lf_link);
787 		lf_remove_outgoing(lock);
788 		lf_remove_incoming(lock);
789 
790 		/*
791 		 * If its an async lock, we can just free it
792 		 * here, otherwise we let the sleeping thread
793 		 * free it.
794 		 */
795 		if (lock->lf_async_task) {
796 			lf_free_lock(lock);
797 		} else {
798 			lock->lf_flags |= F_INTR;
799 			wakeup(lock);
800 		}
801 	}
802 	sx_xunlock(&lf_owner_graph_lock);
803 	sx_xunlock(&state->ls_lock);
804 
805 	/*
806 	 * Wait for all other threads, sleeping and otherwise
807 	 * to leave.
808 	 */
809 	VI_LOCK(vp);
810 	while (state->ls_threads > 1)
811 		msleep(state, VI_MTX(vp), 0, "purgelocks", 0);
812 	VI_UNLOCK(vp);
813 
814 	/*
815 	 * We can just free all the active locks since they
816 	 * will have no dependencies (we removed them all
817 	 * above). We don't need to bother locking since we
818 	 * are the last thread using this state structure.
819 	 */
820 	KASSERT(LIST_EMPTY(&state->ls_pending),
821 	    ("lock pending for %p", state));
822 	LIST_FOREACH_SAFE(lock, &state->ls_active, lf_link, nlock) {
823 		LIST_REMOVE(lock, lf_link);
824 		lf_free_lock(lock);
825 	}
826 out_free:
827 	sx_xlock(&lf_lock_states_lock);
828 	LIST_REMOVE(state, ls_link);
829 	sx_xunlock(&lf_lock_states_lock);
830 	sx_destroy(&state->ls_lock);
831 	free(state, M_LOCKF);
832 }
833 
834 /*
835  * Return non-zero if locks 'x' and 'y' overlap.
836  */
837 static int
838 lf_overlaps(struct lockf_entry *x, struct lockf_entry *y)
839 {
840 
841 	return (x->lf_start <= y->lf_end && x->lf_end >= y->lf_start);
842 }
843 
844 /*
845  * Return non-zero if lock 'x' is blocked by lock 'y' (or vice versa).
846  */
847 static int
848 lf_blocks(struct lockf_entry *x, struct lockf_entry *y)
849 {
850 
851 	return x->lf_owner != y->lf_owner
852 		&& (x->lf_type == F_WRLCK || y->lf_type == F_WRLCK)
853 		&& lf_overlaps(x, y);
854 }
855 
856 /*
857  * Allocate a lock edge from the free list
858  */
859 static struct lockf_edge *
860 lf_alloc_edge(void)
861 {
862 
863 	return (malloc(sizeof(struct lockf_edge), M_LOCKF, M_WAITOK|M_ZERO));
864 }
865 
866 /*
867  * Free a lock edge.
868  */
869 static void
870 lf_free_edge(struct lockf_edge *e)
871 {
872 
873 	free(e, M_LOCKF);
874 }
875 
876 /*
877  * Ensure that the lock's owner has a corresponding vertex in the
878  * owner graph.
879  */
880 static void
881 lf_alloc_vertex(struct lockf_entry *lock)
882 {
883 	struct owner_graph *g = &lf_owner_graph;
884 
885 	if (!lock->lf_owner->lo_vertex)
886 		lock->lf_owner->lo_vertex =
887 			graph_alloc_vertex(g, lock->lf_owner);
888 }
889 
890 /*
891  * Attempt to record an edge from lock x to lock y. Return EDEADLK if
892  * the new edge would cause a cycle in the owner graph.
893  */
894 static int
895 lf_add_edge(struct lockf_entry *x, struct lockf_entry *y)
896 {
897 	struct owner_graph *g = &lf_owner_graph;
898 	struct lockf_edge *e;
899 	int error;
900 
901 #ifdef DIAGNOSTIC
902 	LIST_FOREACH(e, &x->lf_outedges, le_outlink)
903 		KASSERT(e->le_to != y, ("adding lock edge twice"));
904 #endif
905 
906 	/*
907 	 * Make sure the two owners have entries in the owner graph.
908 	 */
909 	lf_alloc_vertex(x);
910 	lf_alloc_vertex(y);
911 
912 	error = graph_add_edge(g, x->lf_owner->lo_vertex,
913 	    y->lf_owner->lo_vertex);
914 	if (error)
915 		return (error);
916 
917 	e = lf_alloc_edge();
918 	LIST_INSERT_HEAD(&x->lf_outedges, e, le_outlink);
919 	LIST_INSERT_HEAD(&y->lf_inedges, e, le_inlink);
920 	e->le_from = x;
921 	e->le_to = y;
922 
923 	return (0);
924 }
925 
926 /*
927  * Remove an edge from the lock graph.
928  */
929 static void
930 lf_remove_edge(struct lockf_edge *e)
931 {
932 	struct owner_graph *g = &lf_owner_graph;
933 	struct lockf_entry *x = e->le_from;
934 	struct lockf_entry *y = e->le_to;
935 
936 	graph_remove_edge(g, x->lf_owner->lo_vertex, y->lf_owner->lo_vertex);
937 	LIST_REMOVE(e, le_outlink);
938 	LIST_REMOVE(e, le_inlink);
939 	e->le_from = NULL;
940 	e->le_to = NULL;
941 	lf_free_edge(e);
942 }
943 
944 /*
945  * Remove all out-going edges from lock x.
946  */
947 static void
948 lf_remove_outgoing(struct lockf_entry *x)
949 {
950 	struct lockf_edge *e;
951 
952 	while ((e = LIST_FIRST(&x->lf_outedges)) != NULL) {
953 		lf_remove_edge(e);
954 	}
955 }
956 
957 /*
958  * Remove all in-coming edges from lock x.
959  */
960 static void
961 lf_remove_incoming(struct lockf_entry *x)
962 {
963 	struct lockf_edge *e;
964 
965 	while ((e = LIST_FIRST(&x->lf_inedges)) != NULL) {
966 		lf_remove_edge(e);
967 	}
968 }
969 
970 /*
971  * Walk the list of locks for the file and create an out-going edge
972  * from lock to each blocking lock.
973  */
974 static int
975 lf_add_outgoing(struct lockf *state, struct lockf_entry *lock)
976 {
977 	struct lockf_entry *overlap;
978 	int error;
979 
980 	LIST_FOREACH(overlap, &state->ls_active, lf_link) {
981 		/*
982 		 * We may assume that the active list is sorted by
983 		 * lf_start.
984 		 */
985 		if (overlap->lf_start > lock->lf_end)
986 			break;
987 		if (!lf_blocks(lock, overlap))
988 			continue;
989 
990 		/*
991 		 * We've found a blocking lock. Add the corresponding
992 		 * edge to the graphs and see if it would cause a
993 		 * deadlock.
994 		 */
995 		error = lf_add_edge(lock, overlap);
996 
997 		/*
998 		 * The only error that lf_add_edge returns is EDEADLK.
999 		 * Remove any edges we added and return the error.
1000 		 */
1001 		if (error) {
1002 			lf_remove_outgoing(lock);
1003 			return (error);
1004 		}
1005 	}
1006 
1007 	/*
1008 	 * We also need to add edges to sleeping locks that block
1009 	 * us. This ensures that lf_wakeup_lock cannot grant two
1010 	 * mutually blocking locks simultaneously and also enforces a
1011 	 * 'first come, first served' fairness model. Note that this
1012 	 * only happens if we are blocked by at least one active lock
1013 	 * due to the call to lf_getblock in lf_setlock below.
1014 	 */
1015 	LIST_FOREACH(overlap, &state->ls_pending, lf_link) {
1016 		if (!lf_blocks(lock, overlap))
1017 			continue;
1018 		/*
1019 		 * We've found a blocking lock. Add the corresponding
1020 		 * edge to the graphs and see if it would cause a
1021 		 * deadlock.
1022 		 */
1023 		error = lf_add_edge(lock, overlap);
1024 
1025 		/*
1026 		 * The only error that lf_add_edge returns is EDEADLK.
1027 		 * Remove any edges we added and return the error.
1028 		 */
1029 		if (error) {
1030 			lf_remove_outgoing(lock);
1031 			return (error);
1032 		}
1033 	}
1034 
1035 	return (0);
1036 }
1037 
1038 /*
1039  * Walk the list of pending locks for the file and create an in-coming
1040  * edge from lock to each blocking lock.
1041  */
1042 static int
1043 lf_add_incoming(struct lockf *state, struct lockf_entry *lock)
1044 {
1045 	struct lockf_entry *overlap;
1046 	int error;
1047 
1048 	sx_assert(&state->ls_lock, SX_XLOCKED);
1049 	if (LIST_EMPTY(&state->ls_pending))
1050 		return (0);
1051 
1052 	error = 0;
1053 	sx_xlock(&lf_owner_graph_lock);
1054 	LIST_FOREACH(overlap, &state->ls_pending, lf_link) {
1055 		if (!lf_blocks(lock, overlap))
1056 			continue;
1057 
1058 		/*
1059 		 * We've found a blocking lock. Add the corresponding
1060 		 * edge to the graphs and see if it would cause a
1061 		 * deadlock.
1062 		 */
1063 		error = lf_add_edge(overlap, lock);
1064 
1065 		/*
1066 		 * The only error that lf_add_edge returns is EDEADLK.
1067 		 * Remove any edges we added and return the error.
1068 		 */
1069 		if (error) {
1070 			lf_remove_incoming(lock);
1071 			break;
1072 		}
1073 	}
1074 	sx_xunlock(&lf_owner_graph_lock);
1075 	return (error);
1076 }
1077 
1078 /*
1079  * Insert lock into the active list, keeping list entries ordered by
1080  * increasing values of lf_start.
1081  */
1082 static void
1083 lf_insert_lock(struct lockf *state, struct lockf_entry *lock)
1084 {
1085 	struct lockf_entry *lf, *lfprev;
1086 
1087 	if (LIST_EMPTY(&state->ls_active)) {
1088 		LIST_INSERT_HEAD(&state->ls_active, lock, lf_link);
1089 		return;
1090 	}
1091 
1092 	lfprev = NULL;
1093 	LIST_FOREACH(lf, &state->ls_active, lf_link) {
1094 		if (lf->lf_start > lock->lf_start) {
1095 			LIST_INSERT_BEFORE(lf, lock, lf_link);
1096 			return;
1097 		}
1098 		lfprev = lf;
1099 	}
1100 	LIST_INSERT_AFTER(lfprev, lock, lf_link);
1101 }
1102 
1103 /*
1104  * Wake up a sleeping lock and remove it from the pending list now
1105  * that all its dependencies have been resolved. The caller should
1106  * arrange for the lock to be added to the active list, adjusting any
1107  * existing locks for the same owner as needed.
1108  */
1109 static void
1110 lf_wakeup_lock(struct lockf *state, struct lockf_entry *wakelock)
1111 {
1112 
1113 	/*
1114 	 * Remove from ls_pending list and wake up the caller
1115 	 * or start the async notification, as appropriate.
1116 	 */
1117 	LIST_REMOVE(wakelock, lf_link);
1118 #ifdef LOCKF_DEBUG
1119 	if (lockf_debug & 1)
1120 		lf_print("lf_wakeup_lock: awakening", wakelock);
1121 #endif /* LOCKF_DEBUG */
1122 	if (wakelock->lf_async_task) {
1123 		taskqueue_enqueue(taskqueue_thread, wakelock->lf_async_task);
1124 	} else {
1125 		wakeup(wakelock);
1126 	}
1127 }
1128 
1129 /*
1130  * Re-check all dependent locks and remove edges to locks that we no
1131  * longer block. If 'all' is non-zero, the lock has been removed and
1132  * we must remove all the dependencies, otherwise it has simply been
1133  * reduced but remains active. Any pending locks which have been been
1134  * unblocked are added to 'granted'
1135  */
1136 static void
1137 lf_update_dependancies(struct lockf *state, struct lockf_entry *lock, int all,
1138 	struct lockf_entry_list *granted)
1139 {
1140 	struct lockf_edge *e, *ne;
1141 	struct lockf_entry *deplock;
1142 
1143 	LIST_FOREACH_SAFE(e, &lock->lf_inedges, le_inlink, ne) {
1144 		deplock = e->le_from;
1145 		if (all || !lf_blocks(lock, deplock)) {
1146 			sx_xlock(&lf_owner_graph_lock);
1147 			lf_remove_edge(e);
1148 			sx_xunlock(&lf_owner_graph_lock);
1149 			if (LIST_EMPTY(&deplock->lf_outedges)) {
1150 				lf_wakeup_lock(state, deplock);
1151 				LIST_INSERT_HEAD(granted, deplock, lf_link);
1152 			}
1153 		}
1154 	}
1155 }
1156 
1157 /*
1158  * Set the start of an existing active lock, updating dependencies and
1159  * adding any newly woken locks to 'granted'.
1160  */
1161 static void
1162 lf_set_start(struct lockf *state, struct lockf_entry *lock, off_t new_start,
1163 	struct lockf_entry_list *granted)
1164 {
1165 
1166 	KASSERT(new_start >= lock->lf_start, ("can't increase lock"));
1167 	lock->lf_start = new_start;
1168 	LIST_REMOVE(lock, lf_link);
1169 	lf_insert_lock(state, lock);
1170 	lf_update_dependancies(state, lock, FALSE, granted);
1171 }
1172 
1173 /*
1174  * Set the end of an existing active lock, updating dependencies and
1175  * adding any newly woken locks to 'granted'.
1176  */
1177 static void
1178 lf_set_end(struct lockf *state, struct lockf_entry *lock, off_t new_end,
1179 	struct lockf_entry_list *granted)
1180 {
1181 
1182 	KASSERT(new_end <= lock->lf_end, ("can't increase lock"));
1183 	lock->lf_end = new_end;
1184 	lf_update_dependancies(state, lock, FALSE, granted);
1185 }
1186 
1187 /*
1188  * Add a lock to the active list, updating or removing any current
1189  * locks owned by the same owner and processing any pending locks that
1190  * become unblocked as a result. This code is also used for unlock
1191  * since the logic for updating existing locks is identical.
1192  *
1193  * As a result of processing the new lock, we may unblock existing
1194  * pending locks as a result of downgrading/unlocking. We simply
1195  * activate the newly granted locks by looping.
1196  *
1197  * Since the new lock already has its dependencies set up, we always
1198  * add it to the list (unless its an unlock request). This may
1199  * fragment the lock list in some pathological cases but its probably
1200  * not a real problem.
1201  */
1202 static void
1203 lf_activate_lock(struct lockf *state, struct lockf_entry *lock)
1204 {
1205 	struct lockf_entry *overlap, *lf;
1206 	struct lockf_entry_list granted;
1207 	int ovcase;
1208 
1209 	LIST_INIT(&granted);
1210 	LIST_INSERT_HEAD(&granted, lock, lf_link);
1211 
1212 	while (!LIST_EMPTY(&granted)) {
1213 		lock = LIST_FIRST(&granted);
1214 		LIST_REMOVE(lock, lf_link);
1215 
1216 		/*
1217 		 * Skip over locks owned by other processes.  Handle
1218 		 * any locks that overlap and are owned by ourselves.
1219 		 */
1220 		overlap = LIST_FIRST(&state->ls_active);
1221 		for (;;) {
1222 			ovcase = lf_findoverlap(&overlap, lock, SELF);
1223 
1224 #ifdef LOCKF_DEBUG
1225 			if (ovcase && (lockf_debug & 2)) {
1226 				printf("lf_setlock: overlap %d", ovcase);
1227 				lf_print("", overlap);
1228 			}
1229 #endif
1230 			/*
1231 			 * Six cases:
1232 			 *	0) no overlap
1233 			 *	1) overlap == lock
1234 			 *	2) overlap contains lock
1235 			 *	3) lock contains overlap
1236 			 *	4) overlap starts before lock
1237 			 *	5) overlap ends after lock
1238 			 */
1239 			switch (ovcase) {
1240 			case 0: /* no overlap */
1241 				break;
1242 
1243 			case 1: /* overlap == lock */
1244 				/*
1245 				 * We have already setup the
1246 				 * dependants for the new lock, taking
1247 				 * into account a possible downgrade
1248 				 * or unlock. Remove the old lock.
1249 				 */
1250 				LIST_REMOVE(overlap, lf_link);
1251 				lf_update_dependancies(state, overlap, TRUE,
1252 					&granted);
1253 				lf_free_lock(overlap);
1254 				break;
1255 
1256 			case 2: /* overlap contains lock */
1257 				/*
1258 				 * Just split the existing lock.
1259 				 */
1260 				lf_split(state, overlap, lock, &granted);
1261 				break;
1262 
1263 			case 3: /* lock contains overlap */
1264 				/*
1265 				 * Delete the overlap and advance to
1266 				 * the next entry in the list.
1267 				 */
1268 				lf = LIST_NEXT(overlap, lf_link);
1269 				LIST_REMOVE(overlap, lf_link);
1270 				lf_update_dependancies(state, overlap, TRUE,
1271 					&granted);
1272 				lf_free_lock(overlap);
1273 				overlap = lf;
1274 				continue;
1275 
1276 			case 4: /* overlap starts before lock */
1277 				/*
1278 				 * Just update the overlap end and
1279 				 * move on.
1280 				 */
1281 				lf_set_end(state, overlap, lock->lf_start - 1,
1282 				    &granted);
1283 				overlap = LIST_NEXT(overlap, lf_link);
1284 				continue;
1285 
1286 			case 5: /* overlap ends after lock */
1287 				/*
1288 				 * Change the start of overlap and
1289 				 * re-insert.
1290 				 */
1291 				lf_set_start(state, overlap, lock->lf_end + 1,
1292 				    &granted);
1293 				break;
1294 			}
1295 			break;
1296 		}
1297 #ifdef LOCKF_DEBUG
1298 		if (lockf_debug & 1) {
1299 			if (lock->lf_type != F_UNLCK)
1300 				lf_print("lf_activate_lock: activated", lock);
1301 			else
1302 				lf_print("lf_activate_lock: unlocked", lock);
1303 			lf_printlist("lf_activate_lock", lock);
1304 		}
1305 #endif /* LOCKF_DEBUG */
1306 		if (lock->lf_type != F_UNLCK)
1307 			lf_insert_lock(state, lock);
1308 	}
1309 }
1310 
1311 /*
1312  * Cancel a pending lock request, either as a result of a signal or a
1313  * cancel request for an async lock.
1314  */
1315 static void
1316 lf_cancel_lock(struct lockf *state, struct lockf_entry *lock)
1317 {
1318 	struct lockf_entry_list granted;
1319 
1320 	/*
1321 	 * Note it is theoretically possible that cancelling this lock
1322 	 * may allow some other pending lock to become
1323 	 * active. Consider this case:
1324 	 *
1325 	 * Owner	Action		Result		Dependencies
1326 	 *
1327 	 * A:		lock [0..0]	succeeds
1328 	 * B:		lock [2..2]	succeeds
1329 	 * C:		lock [1..2]	blocked		C->B
1330 	 * D:		lock [0..1]	blocked		C->B,D->A,D->C
1331 	 * A:		unlock [0..0]			C->B,D->C
1332 	 * C:		cancel [1..2]
1333 	 */
1334 
1335 	LIST_REMOVE(lock, lf_link);
1336 
1337 	/*
1338 	 * Removing out-going edges is simple.
1339 	 */
1340 	sx_xlock(&lf_owner_graph_lock);
1341 	lf_remove_outgoing(lock);
1342 	sx_xunlock(&lf_owner_graph_lock);
1343 
1344 	/*
1345 	 * Removing in-coming edges may allow some other lock to
1346 	 * become active - we use lf_update_dependancies to figure
1347 	 * this out.
1348 	 */
1349 	LIST_INIT(&granted);
1350 	lf_update_dependancies(state, lock, TRUE, &granted);
1351 	lf_free_lock(lock);
1352 
1353 	/*
1354 	 * Feed any newly active locks to lf_activate_lock.
1355 	 */
1356 	while (!LIST_EMPTY(&granted)) {
1357 		lock = LIST_FIRST(&granted);
1358 		LIST_REMOVE(lock, lf_link);
1359 		lf_activate_lock(state, lock);
1360 	}
1361 }
1362 
1363 /*
1364  * Set a byte-range lock.
1365  */
1366 static int
1367 lf_setlock(struct lockf *state, struct lockf_entry *lock, struct vnode *vp,
1368     void **cookiep)
1369 {
1370 	static char lockstr[] = "lockf";
1371 	int error, priority, stops_deferred;
1372 
1373 #ifdef LOCKF_DEBUG
1374 	if (lockf_debug & 1)
1375 		lf_print("lf_setlock", lock);
1376 #endif /* LOCKF_DEBUG */
1377 
1378 	/*
1379 	 * Set the priority
1380 	 */
1381 	priority = PLOCK;
1382 	if (lock->lf_type == F_WRLCK)
1383 		priority += 4;
1384 	if (!(lock->lf_flags & F_NOINTR))
1385 		priority |= PCATCH;
1386 	/*
1387 	 * Scan lock list for this file looking for locks that would block us.
1388 	 */
1389 	if (lf_getblock(state, lock)) {
1390 		/*
1391 		 * Free the structure and return if nonblocking.
1392 		 */
1393 		if ((lock->lf_flags & F_WAIT) == 0
1394 		    && lock->lf_async_task == NULL) {
1395 			lf_free_lock(lock);
1396 			error = EAGAIN;
1397 			goto out;
1398 		}
1399 
1400 		/*
1401 		 * For flock type locks, we must first remove
1402 		 * any shared locks that we hold before we sleep
1403 		 * waiting for an exclusive lock.
1404 		 */
1405 		if ((lock->lf_flags & F_FLOCK) &&
1406 		    lock->lf_type == F_WRLCK) {
1407 			lock->lf_type = F_UNLCK;
1408 			lf_activate_lock(state, lock);
1409 			lock->lf_type = F_WRLCK;
1410 		}
1411 
1412 		/*
1413 		 * We are blocked. Create edges to each blocking lock,
1414 		 * checking for deadlock using the owner graph. For
1415 		 * simplicity, we run deadlock detection for all
1416 		 * locks, posix and otherwise.
1417 		 */
1418 		sx_xlock(&lf_owner_graph_lock);
1419 		error = lf_add_outgoing(state, lock);
1420 		sx_xunlock(&lf_owner_graph_lock);
1421 
1422 		if (error) {
1423 #ifdef LOCKF_DEBUG
1424 			if (lockf_debug & 1)
1425 				lf_print("lf_setlock: deadlock", lock);
1426 #endif
1427 			lf_free_lock(lock);
1428 			goto out;
1429 		}
1430 
1431 		/*
1432 		 * We have added edges to everything that blocks
1433 		 * us. Sleep until they all go away.
1434 		 */
1435 		LIST_INSERT_HEAD(&state->ls_pending, lock, lf_link);
1436 #ifdef LOCKF_DEBUG
1437 		if (lockf_debug & 1) {
1438 			struct lockf_edge *e;
1439 			LIST_FOREACH(e, &lock->lf_outedges, le_outlink) {
1440 				lf_print("lf_setlock: blocking on", e->le_to);
1441 				lf_printlist("lf_setlock", e->le_to);
1442 			}
1443 		}
1444 #endif /* LOCKF_DEBUG */
1445 
1446 		if ((lock->lf_flags & F_WAIT) == 0) {
1447 			/*
1448 			 * The caller requested async notification -
1449 			 * this callback happens when the blocking
1450 			 * lock is released, allowing the caller to
1451 			 * make another attempt to take the lock.
1452 			 */
1453 			*cookiep = (void *) lock;
1454 			error = EINPROGRESS;
1455 			goto out;
1456 		}
1457 
1458 		lock->lf_refs++;
1459 		stops_deferred = sigdeferstop(SIGDEFERSTOP_ERESTART);
1460 		error = sx_sleep(lock, &state->ls_lock, priority, lockstr, 0);
1461 		sigallowstop(stops_deferred);
1462 		if (lf_free_lock(lock)) {
1463 			error = EDOOFUS;
1464 			goto out;
1465 		}
1466 
1467 		/*
1468 		 * We may have been awakened by a signal and/or by a
1469 		 * debugger continuing us (in which cases we must
1470 		 * remove our lock graph edges) and/or by another
1471 		 * process releasing a lock (in which case our edges
1472 		 * have already been removed and we have been moved to
1473 		 * the active list). We may also have been woken by
1474 		 * lf_purgelocks which we report to the caller as
1475 		 * EINTR. In that case, lf_purgelocks will have
1476 		 * removed our lock graph edges.
1477 		 *
1478 		 * Note that it is possible to receive a signal after
1479 		 * we were successfully woken (and moved to the active
1480 		 * list) but before we resumed execution. In this
1481 		 * case, our lf_outedges list will be clear. We
1482 		 * pretend there was no error.
1483 		 *
1484 		 * Note also, if we have been sleeping long enough, we
1485 		 * may now have incoming edges from some newer lock
1486 		 * which is waiting behind us in the queue.
1487 		 */
1488 		if (lock->lf_flags & F_INTR) {
1489 			error = EINTR;
1490 			lf_free_lock(lock);
1491 			goto out;
1492 		}
1493 		if (LIST_EMPTY(&lock->lf_outedges)) {
1494 			error = 0;
1495 		} else {
1496 			lf_cancel_lock(state, lock);
1497 			goto out;
1498 		}
1499 #ifdef LOCKF_DEBUG
1500 		if (lockf_debug & 1) {
1501 			lf_print("lf_setlock: granted", lock);
1502 		}
1503 #endif
1504 		goto out;
1505 	}
1506 	/*
1507 	 * It looks like we are going to grant the lock. First add
1508 	 * edges from any currently pending lock that the new lock
1509 	 * would block.
1510 	 */
1511 	error = lf_add_incoming(state, lock);
1512 	if (error) {
1513 #ifdef LOCKF_DEBUG
1514 		if (lockf_debug & 1)
1515 			lf_print("lf_setlock: deadlock", lock);
1516 #endif
1517 		lf_free_lock(lock);
1518 		goto out;
1519 	}
1520 
1521 	/*
1522 	 * No blocks!!  Add the lock.  Note that we will
1523 	 * downgrade or upgrade any overlapping locks this
1524 	 * process already owns.
1525 	 */
1526 	lf_activate_lock(state, lock);
1527 	error = 0;
1528 out:
1529 	return (error);
1530 }
1531 
1532 /*
1533  * Remove a byte-range lock on an inode.
1534  *
1535  * Generally, find the lock (or an overlap to that lock)
1536  * and remove it (or shrink it), then wakeup anyone we can.
1537  */
1538 static int
1539 lf_clearlock(struct lockf *state, struct lockf_entry *unlock)
1540 {
1541 	struct lockf_entry *overlap;
1542 
1543 	overlap = LIST_FIRST(&state->ls_active);
1544 
1545 	if (overlap == NOLOCKF)
1546 		return (0);
1547 #ifdef LOCKF_DEBUG
1548 	if (unlock->lf_type != F_UNLCK)
1549 		panic("lf_clearlock: bad type");
1550 	if (lockf_debug & 1)
1551 		lf_print("lf_clearlock", unlock);
1552 #endif /* LOCKF_DEBUG */
1553 
1554 	lf_activate_lock(state, unlock);
1555 
1556 	return (0);
1557 }
1558 
1559 /*
1560  * Check whether there is a blocking lock, and if so return its
1561  * details in '*fl'.
1562  */
1563 static int
1564 lf_getlock(struct lockf *state, struct lockf_entry *lock, struct flock *fl)
1565 {
1566 	struct lockf_entry *block;
1567 
1568 #ifdef LOCKF_DEBUG
1569 	if (lockf_debug & 1)
1570 		lf_print("lf_getlock", lock);
1571 #endif /* LOCKF_DEBUG */
1572 
1573 	if ((block = lf_getblock(state, lock))) {
1574 		fl->l_type = block->lf_type;
1575 		fl->l_whence = SEEK_SET;
1576 		fl->l_start = block->lf_start;
1577 		if (block->lf_end == OFF_MAX)
1578 			fl->l_len = 0;
1579 		else
1580 			fl->l_len = block->lf_end - block->lf_start + 1;
1581 		fl->l_pid = block->lf_owner->lo_pid;
1582 		fl->l_sysid = block->lf_owner->lo_sysid;
1583 	} else {
1584 		fl->l_type = F_UNLCK;
1585 	}
1586 	return (0);
1587 }
1588 
1589 /*
1590  * Cancel an async lock request.
1591  */
1592 static int
1593 lf_cancel(struct lockf *state, struct lockf_entry *lock, void *cookie)
1594 {
1595 	struct lockf_entry *reallock;
1596 
1597 	/*
1598 	 * We need to match this request with an existing lock
1599 	 * request.
1600 	 */
1601 	LIST_FOREACH(reallock, &state->ls_pending, lf_link) {
1602 		if ((void *) reallock == cookie) {
1603 			/*
1604 			 * Double-check that this lock looks right
1605 			 * (maybe use a rolling ID for the cancel
1606 			 * cookie instead?)
1607 			 */
1608 			if (!(reallock->lf_vnode == lock->lf_vnode
1609 				&& reallock->lf_start == lock->lf_start
1610 				&& reallock->lf_end == lock->lf_end)) {
1611 				return (ENOENT);
1612 			}
1613 
1614 			/*
1615 			 * Make sure this lock was async and then just
1616 			 * remove it from its wait lists.
1617 			 */
1618 			if (!reallock->lf_async_task) {
1619 				return (ENOENT);
1620 			}
1621 
1622 			/*
1623 			 * Note that since any other thread must take
1624 			 * state->ls_lock before it can possibly
1625 			 * trigger the async callback, we are safe
1626 			 * from a race with lf_wakeup_lock, i.e. we
1627 			 * can free the lock (actually our caller does
1628 			 * this).
1629 			 */
1630 			lf_cancel_lock(state, reallock);
1631 			return (0);
1632 		}
1633 	}
1634 
1635 	/*
1636 	 * We didn't find a matching lock - not much we can do here.
1637 	 */
1638 	return (ENOENT);
1639 }
1640 
1641 /*
1642  * Walk the list of locks for an inode and
1643  * return the first blocking lock.
1644  */
1645 static struct lockf_entry *
1646 lf_getblock(struct lockf *state, struct lockf_entry *lock)
1647 {
1648 	struct lockf_entry *overlap;
1649 
1650 	LIST_FOREACH(overlap, &state->ls_active, lf_link) {
1651 		/*
1652 		 * We may assume that the active list is sorted by
1653 		 * lf_start.
1654 		 */
1655 		if (overlap->lf_start > lock->lf_end)
1656 			break;
1657 		if (!lf_blocks(lock, overlap))
1658 			continue;
1659 		return (overlap);
1660 	}
1661 	return (NOLOCKF);
1662 }
1663 
1664 /*
1665  * Walk the list of locks for an inode to find an overlapping lock (if
1666  * any) and return a classification of that overlap.
1667  *
1668  * Arguments:
1669  *	*overlap	The place in the lock list to start looking
1670  *	lock		The lock which is being tested
1671  *	type		Pass 'SELF' to test only locks with the same
1672  *			owner as lock, or 'OTHER' to test only locks
1673  *			with a different owner
1674  *
1675  * Returns one of six values:
1676  *	0) no overlap
1677  *	1) overlap == lock
1678  *	2) overlap contains lock
1679  *	3) lock contains overlap
1680  *	4) overlap starts before lock
1681  *	5) overlap ends after lock
1682  *
1683  * If there is an overlapping lock, '*overlap' is set to point at the
1684  * overlapping lock.
1685  *
1686  * NOTE: this returns only the FIRST overlapping lock.  There
1687  *	 may be more than one.
1688  */
1689 static int
1690 lf_findoverlap(struct lockf_entry **overlap, struct lockf_entry *lock, int type)
1691 {
1692 	struct lockf_entry *lf;
1693 	off_t start, end;
1694 	int res;
1695 
1696 	if ((*overlap) == NOLOCKF) {
1697 		return (0);
1698 	}
1699 #ifdef LOCKF_DEBUG
1700 	if (lockf_debug & 2)
1701 		lf_print("lf_findoverlap: looking for overlap in", lock);
1702 #endif /* LOCKF_DEBUG */
1703 	start = lock->lf_start;
1704 	end = lock->lf_end;
1705 	res = 0;
1706 	while (*overlap) {
1707 		lf = *overlap;
1708 		if (lf->lf_start > end)
1709 			break;
1710 		if (((type & SELF) && lf->lf_owner != lock->lf_owner) ||
1711 		    ((type & OTHERS) && lf->lf_owner == lock->lf_owner)) {
1712 			*overlap = LIST_NEXT(lf, lf_link);
1713 			continue;
1714 		}
1715 #ifdef LOCKF_DEBUG
1716 		if (lockf_debug & 2)
1717 			lf_print("\tchecking", lf);
1718 #endif /* LOCKF_DEBUG */
1719 		/*
1720 		 * OK, check for overlap
1721 		 *
1722 		 * Six cases:
1723 		 *	0) no overlap
1724 		 *	1) overlap == lock
1725 		 *	2) overlap contains lock
1726 		 *	3) lock contains overlap
1727 		 *	4) overlap starts before lock
1728 		 *	5) overlap ends after lock
1729 		 */
1730 		if (start > lf->lf_end) {
1731 			/* Case 0 */
1732 #ifdef LOCKF_DEBUG
1733 			if (lockf_debug & 2)
1734 				printf("no overlap\n");
1735 #endif /* LOCKF_DEBUG */
1736 			*overlap = LIST_NEXT(lf, lf_link);
1737 			continue;
1738 		}
1739 		if (lf->lf_start == start && lf->lf_end == end) {
1740 			/* Case 1 */
1741 #ifdef LOCKF_DEBUG
1742 			if (lockf_debug & 2)
1743 				printf("overlap == lock\n");
1744 #endif /* LOCKF_DEBUG */
1745 			res = 1;
1746 			break;
1747 		}
1748 		if (lf->lf_start <= start && lf->lf_end >= end) {
1749 			/* Case 2 */
1750 #ifdef LOCKF_DEBUG
1751 			if (lockf_debug & 2)
1752 				printf("overlap contains lock\n");
1753 #endif /* LOCKF_DEBUG */
1754 			res = 2;
1755 			break;
1756 		}
1757 		if (start <= lf->lf_start && end >= lf->lf_end) {
1758 			/* Case 3 */
1759 #ifdef LOCKF_DEBUG
1760 			if (lockf_debug & 2)
1761 				printf("lock contains overlap\n");
1762 #endif /* LOCKF_DEBUG */
1763 			res = 3;
1764 			break;
1765 		}
1766 		if (lf->lf_start < start && lf->lf_end >= start) {
1767 			/* Case 4 */
1768 #ifdef LOCKF_DEBUG
1769 			if (lockf_debug & 2)
1770 				printf("overlap starts before lock\n");
1771 #endif /* LOCKF_DEBUG */
1772 			res = 4;
1773 			break;
1774 		}
1775 		if (lf->lf_start > start && lf->lf_end > end) {
1776 			/* Case 5 */
1777 #ifdef LOCKF_DEBUG
1778 			if (lockf_debug & 2)
1779 				printf("overlap ends after lock\n");
1780 #endif /* LOCKF_DEBUG */
1781 			res = 5;
1782 			break;
1783 		}
1784 		panic("lf_findoverlap: default");
1785 	}
1786 	return (res);
1787 }
1788 
1789 /*
1790  * Split an the existing 'lock1', based on the extent of the lock
1791  * described by 'lock2'. The existing lock should cover 'lock2'
1792  * entirely.
1793  *
1794  * Any pending locks which have been been unblocked are added to
1795  * 'granted'
1796  */
1797 static void
1798 lf_split(struct lockf *state, struct lockf_entry *lock1,
1799     struct lockf_entry *lock2, struct lockf_entry_list *granted)
1800 {
1801 	struct lockf_entry *splitlock;
1802 
1803 #ifdef LOCKF_DEBUG
1804 	if (lockf_debug & 2) {
1805 		lf_print("lf_split", lock1);
1806 		lf_print("splitting from", lock2);
1807 	}
1808 #endif /* LOCKF_DEBUG */
1809 	/*
1810 	 * Check to see if we don't need to split at all.
1811 	 */
1812 	if (lock1->lf_start == lock2->lf_start) {
1813 		lf_set_start(state, lock1, lock2->lf_end + 1, granted);
1814 		return;
1815 	}
1816 	if (lock1->lf_end == lock2->lf_end) {
1817 		lf_set_end(state, lock1, lock2->lf_start - 1, granted);
1818 		return;
1819 	}
1820 	/*
1821 	 * Make a new lock consisting of the last part of
1822 	 * the encompassing lock.
1823 	 */
1824 	splitlock = lf_alloc_lock(lock1->lf_owner);
1825 	memcpy(splitlock, lock1, sizeof *splitlock);
1826 	splitlock->lf_refs = 1;
1827 	if (splitlock->lf_flags & F_REMOTE)
1828 		vref(splitlock->lf_vnode);
1829 
1830 	/*
1831 	 * This cannot cause a deadlock since any edges we would add
1832 	 * to splitlock already exist in lock1. We must be sure to add
1833 	 * necessary dependencies to splitlock before we reduce lock1
1834 	 * otherwise we may accidentally grant a pending lock that
1835 	 * was blocked by the tail end of lock1.
1836 	 */
1837 	splitlock->lf_start = lock2->lf_end + 1;
1838 	LIST_INIT(&splitlock->lf_outedges);
1839 	LIST_INIT(&splitlock->lf_inedges);
1840 	lf_add_incoming(state, splitlock);
1841 
1842 	lf_set_end(state, lock1, lock2->lf_start - 1, granted);
1843 
1844 	/*
1845 	 * OK, now link it in
1846 	 */
1847 	lf_insert_lock(state, splitlock);
1848 }
1849 
1850 struct lockdesc {
1851 	STAILQ_ENTRY(lockdesc) link;
1852 	struct vnode *vp;
1853 	struct flock fl;
1854 };
1855 STAILQ_HEAD(lockdesclist, lockdesc);
1856 
1857 int
1858 lf_iteratelocks_sysid(int sysid, lf_iterator *fn, void *arg)
1859 {
1860 	struct lockf *ls;
1861 	struct lockf_entry *lf;
1862 	struct lockdesc *ldesc;
1863 	struct lockdesclist locks;
1864 	int error;
1865 
1866 	/*
1867 	 * In order to keep the locking simple, we iterate over the
1868 	 * active lock lists to build a list of locks that need
1869 	 * releasing. We then call the iterator for each one in turn.
1870 	 *
1871 	 * We take an extra reference to the vnode for the duration to
1872 	 * make sure it doesn't go away before we are finished.
1873 	 */
1874 	STAILQ_INIT(&locks);
1875 	sx_xlock(&lf_lock_states_lock);
1876 	LIST_FOREACH(ls, &lf_lock_states, ls_link) {
1877 		sx_xlock(&ls->ls_lock);
1878 		LIST_FOREACH(lf, &ls->ls_active, lf_link) {
1879 			if (lf->lf_owner->lo_sysid != sysid)
1880 				continue;
1881 
1882 			ldesc = malloc(sizeof(struct lockdesc), M_LOCKF,
1883 			    M_WAITOK);
1884 			ldesc->vp = lf->lf_vnode;
1885 			vref(ldesc->vp);
1886 			ldesc->fl.l_start = lf->lf_start;
1887 			if (lf->lf_end == OFF_MAX)
1888 				ldesc->fl.l_len = 0;
1889 			else
1890 				ldesc->fl.l_len =
1891 					lf->lf_end - lf->lf_start + 1;
1892 			ldesc->fl.l_whence = SEEK_SET;
1893 			ldesc->fl.l_type = F_UNLCK;
1894 			ldesc->fl.l_pid = lf->lf_owner->lo_pid;
1895 			ldesc->fl.l_sysid = sysid;
1896 			STAILQ_INSERT_TAIL(&locks, ldesc, link);
1897 		}
1898 		sx_xunlock(&ls->ls_lock);
1899 	}
1900 	sx_xunlock(&lf_lock_states_lock);
1901 
1902 	/*
1903 	 * Call the iterator function for each lock in turn. If the
1904 	 * iterator returns an error code, just free the rest of the
1905 	 * lockdesc structures.
1906 	 */
1907 	error = 0;
1908 	while ((ldesc = STAILQ_FIRST(&locks)) != NULL) {
1909 		STAILQ_REMOVE_HEAD(&locks, link);
1910 		if (!error)
1911 			error = fn(ldesc->vp, &ldesc->fl, arg);
1912 		vrele(ldesc->vp);
1913 		free(ldesc, M_LOCKF);
1914 	}
1915 
1916 	return (error);
1917 }
1918 
1919 int
1920 lf_iteratelocks_vnode(struct vnode *vp, lf_iterator *fn, void *arg)
1921 {
1922 	struct lockf *ls;
1923 	struct lockf_entry *lf;
1924 	struct lockdesc *ldesc;
1925 	struct lockdesclist locks;
1926 	int error;
1927 
1928 	/*
1929 	 * In order to keep the locking simple, we iterate over the
1930 	 * active lock lists to build a list of locks that need
1931 	 * releasing. We then call the iterator for each one in turn.
1932 	 *
1933 	 * We take an extra reference to the vnode for the duration to
1934 	 * make sure it doesn't go away before we are finished.
1935 	 */
1936 	STAILQ_INIT(&locks);
1937 	VI_LOCK(vp);
1938 	ls = vp->v_lockf;
1939 	if (!ls) {
1940 		VI_UNLOCK(vp);
1941 		return (0);
1942 	}
1943 	MPASS(ls->ls_threads >= 0);
1944 	ls->ls_threads++;
1945 	VI_UNLOCK(vp);
1946 
1947 	sx_xlock(&ls->ls_lock);
1948 	LIST_FOREACH(lf, &ls->ls_active, lf_link) {
1949 		ldesc = malloc(sizeof(struct lockdesc), M_LOCKF,
1950 		    M_WAITOK);
1951 		ldesc->vp = lf->lf_vnode;
1952 		vref(ldesc->vp);
1953 		ldesc->fl.l_start = lf->lf_start;
1954 		if (lf->lf_end == OFF_MAX)
1955 			ldesc->fl.l_len = 0;
1956 		else
1957 			ldesc->fl.l_len =
1958 				lf->lf_end - lf->lf_start + 1;
1959 		ldesc->fl.l_whence = SEEK_SET;
1960 		ldesc->fl.l_type = F_UNLCK;
1961 		ldesc->fl.l_pid = lf->lf_owner->lo_pid;
1962 		ldesc->fl.l_sysid = lf->lf_owner->lo_sysid;
1963 		STAILQ_INSERT_TAIL(&locks, ldesc, link);
1964 	}
1965 	sx_xunlock(&ls->ls_lock);
1966 	VI_LOCK(vp);
1967 	MPASS(ls->ls_threads > 0);
1968 	ls->ls_threads--;
1969 	wakeup(ls);
1970 	VI_UNLOCK(vp);
1971 
1972 	/*
1973 	 * Call the iterator function for each lock in turn. If the
1974 	 * iterator returns an error code, just free the rest of the
1975 	 * lockdesc structures.
1976 	 */
1977 	error = 0;
1978 	while ((ldesc = STAILQ_FIRST(&locks)) != NULL) {
1979 		STAILQ_REMOVE_HEAD(&locks, link);
1980 		if (!error)
1981 			error = fn(ldesc->vp, &ldesc->fl, arg);
1982 		vrele(ldesc->vp);
1983 		free(ldesc, M_LOCKF);
1984 	}
1985 
1986 	return (error);
1987 }
1988 
1989 static int
1990 lf_clearremotesys_iterator(struct vnode *vp, struct flock *fl, void *arg)
1991 {
1992 
1993 	VOP_ADVLOCK(vp, 0, F_UNLCK, fl, F_REMOTE);
1994 	return (0);
1995 }
1996 
1997 void
1998 lf_clearremotesys(int sysid)
1999 {
2000 
2001 	KASSERT(sysid != 0, ("Can't clear local locks with F_UNLCKSYS"));
2002 	lf_iteratelocks_sysid(sysid, lf_clearremotesys_iterator, NULL);
2003 }
2004 
2005 int
2006 lf_countlocks(int sysid)
2007 {
2008 	int i;
2009 	struct lock_owner *lo;
2010 	int count;
2011 
2012 	count = 0;
2013 	for (i = 0; i < LOCK_OWNER_HASH_SIZE; i++) {
2014 		sx_xlock(&lf_lock_owners[i].lock);
2015 		LIST_FOREACH(lo, &lf_lock_owners[i].list, lo_link)
2016 			if (lo->lo_sysid == sysid)
2017 				count += lo->lo_refs;
2018 		sx_xunlock(&lf_lock_owners[i].lock);
2019 	}
2020 
2021 	return (count);
2022 }
2023 
2024 #ifdef LOCKF_DEBUG
2025 
2026 /*
2027  * Return non-zero if y is reachable from x using a brute force
2028  * search. If reachable and path is non-null, return the route taken
2029  * in path.
2030  */
2031 static int
2032 graph_reaches(struct owner_vertex *x, struct owner_vertex *y,
2033     struct owner_vertex_list *path)
2034 {
2035 	struct owner_edge *e;
2036 
2037 	if (x == y) {
2038 		if (path)
2039 			TAILQ_INSERT_HEAD(path, x, v_link);
2040 		return 1;
2041 	}
2042 
2043 	LIST_FOREACH(e, &x->v_outedges, e_outlink) {
2044 		if (graph_reaches(e->e_to, y, path)) {
2045 			if (path)
2046 				TAILQ_INSERT_HEAD(path, x, v_link);
2047 			return 1;
2048 		}
2049 	}
2050 	return 0;
2051 }
2052 
2053 /*
2054  * Perform consistency checks on the graph. Make sure the values of
2055  * v_order are correct. If checkorder is non-zero, check no vertex can
2056  * reach any other vertex with a smaller order.
2057  */
2058 static void
2059 graph_check(struct owner_graph *g, int checkorder)
2060 {
2061 	int i, j;
2062 
2063 	for (i = 0; i < g->g_size; i++) {
2064 		if (!g->g_vertices[i]->v_owner)
2065 			continue;
2066 		KASSERT(g->g_vertices[i]->v_order == i,
2067 		    ("lock graph vertices disordered"));
2068 		if (checkorder) {
2069 			for (j = 0; j < i; j++) {
2070 				if (!g->g_vertices[j]->v_owner)
2071 					continue;
2072 				KASSERT(!graph_reaches(g->g_vertices[i],
2073 					g->g_vertices[j], NULL),
2074 				    ("lock graph vertices disordered"));
2075 			}
2076 		}
2077 	}
2078 }
2079 
2080 static void
2081 graph_print_vertices(struct owner_vertex_list *set)
2082 {
2083 	struct owner_vertex *v;
2084 
2085 	printf("{ ");
2086 	TAILQ_FOREACH(v, set, v_link) {
2087 		printf("%d:", v->v_order);
2088 		lf_print_owner(v->v_owner);
2089 		if (TAILQ_NEXT(v, v_link))
2090 			printf(", ");
2091 	}
2092 	printf(" }\n");
2093 }
2094 
2095 #endif
2096 
2097 /*
2098  * Calculate the sub-set of vertices v from the affected region [y..x]
2099  * where v is reachable from y. Return -1 if a loop was detected
2100  * (i.e. x is reachable from y, otherwise the number of vertices in
2101  * this subset.
2102  */
2103 static int
2104 graph_delta_forward(struct owner_graph *g, struct owner_vertex *x,
2105     struct owner_vertex *y, struct owner_vertex_list *delta)
2106 {
2107 	uint32_t gen;
2108 	struct owner_vertex *v;
2109 	struct owner_edge *e;
2110 	int n;
2111 
2112 	/*
2113 	 * We start with a set containing just y. Then for each vertex
2114 	 * v in the set so far unprocessed, we add each vertex that v
2115 	 * has an out-edge to and that is within the affected region
2116 	 * [y..x]. If we see the vertex x on our travels, stop
2117 	 * immediately.
2118 	 */
2119 	TAILQ_INIT(delta);
2120 	TAILQ_INSERT_TAIL(delta, y, v_link);
2121 	v = y;
2122 	n = 1;
2123 	gen = g->g_gen;
2124 	while (v) {
2125 		LIST_FOREACH(e, &v->v_outedges, e_outlink) {
2126 			if (e->e_to == x)
2127 				return -1;
2128 			if (e->e_to->v_order < x->v_order
2129 			    && e->e_to->v_gen != gen) {
2130 				e->e_to->v_gen = gen;
2131 				TAILQ_INSERT_TAIL(delta, e->e_to, v_link);
2132 				n++;
2133 			}
2134 		}
2135 		v = TAILQ_NEXT(v, v_link);
2136 	}
2137 
2138 	return (n);
2139 }
2140 
2141 /*
2142  * Calculate the sub-set of vertices v from the affected region [y..x]
2143  * where v reaches x. Return the number of vertices in this subset.
2144  */
2145 static int
2146 graph_delta_backward(struct owner_graph *g, struct owner_vertex *x,
2147     struct owner_vertex *y, struct owner_vertex_list *delta)
2148 {
2149 	uint32_t gen;
2150 	struct owner_vertex *v;
2151 	struct owner_edge *e;
2152 	int n;
2153 
2154 	/*
2155 	 * We start with a set containing just x. Then for each vertex
2156 	 * v in the set so far unprocessed, we add each vertex that v
2157 	 * has an in-edge from and that is within the affected region
2158 	 * [y..x].
2159 	 */
2160 	TAILQ_INIT(delta);
2161 	TAILQ_INSERT_TAIL(delta, x, v_link);
2162 	v = x;
2163 	n = 1;
2164 	gen = g->g_gen;
2165 	while (v) {
2166 		LIST_FOREACH(e, &v->v_inedges, e_inlink) {
2167 			if (e->e_from->v_order > y->v_order
2168 			    && e->e_from->v_gen != gen) {
2169 				e->e_from->v_gen = gen;
2170 				TAILQ_INSERT_HEAD(delta, e->e_from, v_link);
2171 				n++;
2172 			}
2173 		}
2174 		v = TAILQ_PREV(v, owner_vertex_list, v_link);
2175 	}
2176 
2177 	return (n);
2178 }
2179 
2180 static int
2181 graph_add_indices(int *indices, int n, struct owner_vertex_list *set)
2182 {
2183 	struct owner_vertex *v;
2184 	int i, j;
2185 
2186 	TAILQ_FOREACH(v, set, v_link) {
2187 		for (i = n;
2188 		     i > 0 && indices[i - 1] > v->v_order; i--)
2189 			;
2190 		for (j = n - 1; j >= i; j--)
2191 			indices[j + 1] = indices[j];
2192 		indices[i] = v->v_order;
2193 		n++;
2194 	}
2195 
2196 	return (n);
2197 }
2198 
2199 static int
2200 graph_assign_indices(struct owner_graph *g, int *indices, int nextunused,
2201     struct owner_vertex_list *set)
2202 {
2203 	struct owner_vertex *v, *vlowest;
2204 
2205 	while (!TAILQ_EMPTY(set)) {
2206 		vlowest = NULL;
2207 		TAILQ_FOREACH(v, set, v_link) {
2208 			if (!vlowest || v->v_order < vlowest->v_order)
2209 				vlowest = v;
2210 		}
2211 		TAILQ_REMOVE(set, vlowest, v_link);
2212 		vlowest->v_order = indices[nextunused];
2213 		g->g_vertices[vlowest->v_order] = vlowest;
2214 		nextunused++;
2215 	}
2216 
2217 	return (nextunused);
2218 }
2219 
2220 static int
2221 graph_add_edge(struct owner_graph *g, struct owner_vertex *x,
2222     struct owner_vertex *y)
2223 {
2224 	struct owner_edge *e;
2225 	struct owner_vertex_list deltaF, deltaB;
2226 	int nF, n, vi, i;
2227 	int *indices;
2228 	int nB __unused;
2229 
2230 	sx_assert(&lf_owner_graph_lock, SX_XLOCKED);
2231 
2232 	LIST_FOREACH(e, &x->v_outedges, e_outlink) {
2233 		if (e->e_to == y) {
2234 			e->e_refs++;
2235 			return (0);
2236 		}
2237 	}
2238 
2239 #ifdef LOCKF_DEBUG
2240 	if (lockf_debug & 8) {
2241 		printf("adding edge %d:", x->v_order);
2242 		lf_print_owner(x->v_owner);
2243 		printf(" -> %d:", y->v_order);
2244 		lf_print_owner(y->v_owner);
2245 		printf("\n");
2246 	}
2247 #endif
2248 	if (y->v_order < x->v_order) {
2249 		/*
2250 		 * The new edge violates the order. First find the set
2251 		 * of affected vertices reachable from y (deltaF) and
2252 		 * the set of affect vertices affected that reach x
2253 		 * (deltaB), using the graph generation number to
2254 		 * detect whether we have visited a given vertex
2255 		 * already. We re-order the graph so that each vertex
2256 		 * in deltaB appears before each vertex in deltaF.
2257 		 *
2258 		 * If x is a member of deltaF, then the new edge would
2259 		 * create a cycle. Otherwise, we may assume that
2260 		 * deltaF and deltaB are disjoint.
2261 		 */
2262 		g->g_gen++;
2263 		if (g->g_gen == 0) {
2264 			/*
2265 			 * Generation wrap.
2266 			 */
2267 			for (vi = 0; vi < g->g_size; vi++) {
2268 				g->g_vertices[vi]->v_gen = 0;
2269 			}
2270 			g->g_gen++;
2271 		}
2272 		nF = graph_delta_forward(g, x, y, &deltaF);
2273 		if (nF < 0) {
2274 #ifdef LOCKF_DEBUG
2275 			if (lockf_debug & 8) {
2276 				struct owner_vertex_list path;
2277 				printf("deadlock: ");
2278 				TAILQ_INIT(&path);
2279 				graph_reaches(y, x, &path);
2280 				graph_print_vertices(&path);
2281 			}
2282 #endif
2283 			return (EDEADLK);
2284 		}
2285 
2286 #ifdef LOCKF_DEBUG
2287 		if (lockf_debug & 8) {
2288 			printf("re-ordering graph vertices\n");
2289 			printf("deltaF = ");
2290 			graph_print_vertices(&deltaF);
2291 		}
2292 #endif
2293 
2294 		nB = graph_delta_backward(g, x, y, &deltaB);
2295 
2296 #ifdef LOCKF_DEBUG
2297 		if (lockf_debug & 8) {
2298 			printf("deltaB = ");
2299 			graph_print_vertices(&deltaB);
2300 		}
2301 #endif
2302 
2303 		/*
2304 		 * We first build a set of vertex indices (vertex
2305 		 * order values) that we may use, then we re-assign
2306 		 * orders first to those vertices in deltaB, then to
2307 		 * deltaF. Note that the contents of deltaF and deltaB
2308 		 * may be partially disordered - we perform an
2309 		 * insertion sort while building our index set.
2310 		 */
2311 		indices = g->g_indexbuf;
2312 		n = graph_add_indices(indices, 0, &deltaF);
2313 		graph_add_indices(indices, n, &deltaB);
2314 
2315 		/*
2316 		 * We must also be sure to maintain the relative
2317 		 * ordering of deltaF and deltaB when re-assigning
2318 		 * vertices. We do this by iteratively removing the
2319 		 * lowest ordered element from the set and assigning
2320 		 * it the next value from our new ordering.
2321 		 */
2322 		i = graph_assign_indices(g, indices, 0, &deltaB);
2323 		graph_assign_indices(g, indices, i, &deltaF);
2324 
2325 #ifdef LOCKF_DEBUG
2326 		if (lockf_debug & 8) {
2327 			struct owner_vertex_list set;
2328 			TAILQ_INIT(&set);
2329 			for (i = 0; i < nB + nF; i++)
2330 				TAILQ_INSERT_TAIL(&set,
2331 				    g->g_vertices[indices[i]], v_link);
2332 			printf("new ordering = ");
2333 			graph_print_vertices(&set);
2334 		}
2335 #endif
2336 	}
2337 
2338 	KASSERT(x->v_order < y->v_order, ("Failed to re-order graph"));
2339 
2340 #ifdef LOCKF_DEBUG
2341 	if (lockf_debug & 8) {
2342 		graph_check(g, TRUE);
2343 	}
2344 #endif
2345 
2346 	e = malloc(sizeof(struct owner_edge), M_LOCKF, M_WAITOK);
2347 
2348 	LIST_INSERT_HEAD(&x->v_outedges, e, e_outlink);
2349 	LIST_INSERT_HEAD(&y->v_inedges, e, e_inlink);
2350 	e->e_refs = 1;
2351 	e->e_from = x;
2352 	e->e_to = y;
2353 
2354 	return (0);
2355 }
2356 
2357 /*
2358  * Remove an edge x->y from the graph.
2359  */
2360 static void
2361 graph_remove_edge(struct owner_graph *g, struct owner_vertex *x,
2362     struct owner_vertex *y)
2363 {
2364 	struct owner_edge *e;
2365 
2366 	sx_assert(&lf_owner_graph_lock, SX_XLOCKED);
2367 
2368 	LIST_FOREACH(e, &x->v_outedges, e_outlink) {
2369 		if (e->e_to == y)
2370 			break;
2371 	}
2372 	KASSERT(e, ("Removing non-existent edge from deadlock graph"));
2373 
2374 	e->e_refs--;
2375 	if (e->e_refs == 0) {
2376 #ifdef LOCKF_DEBUG
2377 		if (lockf_debug & 8) {
2378 			printf("removing edge %d:", x->v_order);
2379 			lf_print_owner(x->v_owner);
2380 			printf(" -> %d:", y->v_order);
2381 			lf_print_owner(y->v_owner);
2382 			printf("\n");
2383 		}
2384 #endif
2385 		LIST_REMOVE(e, e_outlink);
2386 		LIST_REMOVE(e, e_inlink);
2387 		free(e, M_LOCKF);
2388 	}
2389 }
2390 
2391 /*
2392  * Allocate a vertex from the free list. Return ENOMEM if there are
2393  * none.
2394  */
2395 static struct owner_vertex *
2396 graph_alloc_vertex(struct owner_graph *g, struct lock_owner *lo)
2397 {
2398 	struct owner_vertex *v;
2399 
2400 	sx_assert(&lf_owner_graph_lock, SX_XLOCKED);
2401 
2402 	v = malloc(sizeof(struct owner_vertex), M_LOCKF, M_WAITOK);
2403 	if (g->g_size == g->g_space) {
2404 		g->g_vertices = realloc(g->g_vertices,
2405 		    2 * g->g_space * sizeof(struct owner_vertex *),
2406 		    M_LOCKF, M_WAITOK);
2407 		free(g->g_indexbuf, M_LOCKF);
2408 		g->g_indexbuf = malloc(2 * g->g_space * sizeof(int),
2409 		    M_LOCKF, M_WAITOK);
2410 		g->g_space = 2 * g->g_space;
2411 	}
2412 	v->v_order = g->g_size;
2413 	v->v_gen = g->g_gen;
2414 	g->g_vertices[g->g_size] = v;
2415 	g->g_size++;
2416 
2417 	LIST_INIT(&v->v_outedges);
2418 	LIST_INIT(&v->v_inedges);
2419 	v->v_owner = lo;
2420 
2421 	return (v);
2422 }
2423 
2424 static void
2425 graph_free_vertex(struct owner_graph *g, struct owner_vertex *v)
2426 {
2427 	struct owner_vertex *w;
2428 	int i;
2429 
2430 	sx_assert(&lf_owner_graph_lock, SX_XLOCKED);
2431 
2432 	KASSERT(LIST_EMPTY(&v->v_outedges), ("Freeing vertex with edges"));
2433 	KASSERT(LIST_EMPTY(&v->v_inedges), ("Freeing vertex with edges"));
2434 
2435 	/*
2436 	 * Remove from the graph's array and close up the gap,
2437 	 * renumbering the other vertices.
2438 	 */
2439 	for (i = v->v_order + 1; i < g->g_size; i++) {
2440 		w = g->g_vertices[i];
2441 		w->v_order--;
2442 		g->g_vertices[i - 1] = w;
2443 	}
2444 	g->g_size--;
2445 
2446 	free(v, M_LOCKF);
2447 }
2448 
2449 static struct owner_graph *
2450 graph_init(struct owner_graph *g)
2451 {
2452 
2453 	g->g_vertices = malloc(10 * sizeof(struct owner_vertex *),
2454 	    M_LOCKF, M_WAITOK);
2455 	g->g_size = 0;
2456 	g->g_space = 10;
2457 	g->g_indexbuf = malloc(g->g_space * sizeof(int), M_LOCKF, M_WAITOK);
2458 	g->g_gen = 0;
2459 
2460 	return (g);
2461 }
2462 
2463 struct kinfo_lockf_linked {
2464 	struct kinfo_lockf kl;
2465 	struct vnode *vp;
2466 	STAILQ_ENTRY(kinfo_lockf_linked) link;
2467 };
2468 
2469 int
2470 vfs_report_lockf(struct mount *mp, struct sbuf *sb)
2471 {
2472 	struct lockf *ls;
2473 	struct lockf_entry *lf;
2474 	struct kinfo_lockf_linked *klf;
2475 	struct vnode *vp;
2476 	struct ucred *ucred;
2477 	char *fullpath, *freepath;
2478 	struct stat stt;
2479 	STAILQ_HEAD(, kinfo_lockf_linked) locks;
2480 	int error, gerror;
2481 
2482 	STAILQ_INIT(&locks);
2483 	sx_slock(&lf_lock_states_lock);
2484 	LIST_FOREACH(ls, &lf_lock_states, ls_link) {
2485 		sx_slock(&ls->ls_lock);
2486 		LIST_FOREACH(lf, &ls->ls_active, lf_link) {
2487 			vp = lf->lf_vnode;
2488 			if (VN_IS_DOOMED(vp) || vp->v_mount != mp)
2489 				continue;
2490 			vhold(vp);
2491 			klf = malloc(sizeof(struct kinfo_lockf_linked),
2492 			    M_LOCKF, M_WAITOK | M_ZERO);
2493 			klf->vp = vp;
2494 			klf->kl.kl_structsize = sizeof(struct kinfo_lockf);
2495 			klf->kl.kl_start = lf->lf_start;
2496 			klf->kl.kl_len = lf->lf_end == OFF_MAX ? 0 :
2497 			    lf->lf_end - lf->lf_start + 1;
2498 			klf->kl.kl_rw = lf->lf_type == F_RDLCK ?
2499 			    KLOCKF_RW_READ : KLOCKF_RW_WRITE;
2500 			if (lf->lf_owner->lo_sysid != 0) {
2501 				klf->kl.kl_pid = lf->lf_owner->lo_pid;
2502 				klf->kl.kl_sysid = lf->lf_owner->lo_sysid;
2503 				klf->kl.kl_type = KLOCKF_TYPE_REMOTE;
2504 			} else if (lf->lf_owner->lo_pid == -1) {
2505 				klf->kl.kl_pid = -1;
2506 				klf->kl.kl_sysid = 0;
2507 				klf->kl.kl_type = KLOCKF_TYPE_FLOCK;
2508 			} else {
2509 				klf->kl.kl_pid = lf->lf_owner->lo_pid;
2510 				klf->kl.kl_sysid = 0;
2511 				klf->kl.kl_type = KLOCKF_TYPE_PID;
2512 			}
2513 			STAILQ_INSERT_TAIL(&locks, klf, link);
2514 		}
2515 		sx_sunlock(&ls->ls_lock);
2516 	}
2517 	sx_sunlock(&lf_lock_states_lock);
2518 
2519 	gerror = 0;
2520 	ucred = curthread->td_ucred;
2521 	while ((klf = STAILQ_FIRST(&locks)) != NULL) {
2522 		STAILQ_REMOVE_HEAD(&locks, link);
2523 		vp = klf->vp;
2524 		if (gerror == 0 && vn_lock(vp, LK_SHARED) == 0) {
2525 			error = prison_canseemount(ucred, vp->v_mount);
2526 			if (error == 0)
2527 				error = VOP_STAT(vp, &stt, ucred, NOCRED);
2528 			VOP_UNLOCK(vp);
2529 			if (error == 0) {
2530 				klf->kl.kl_file_fsid = stt.st_dev;
2531 				klf->kl.kl_file_rdev = stt.st_rdev;
2532 				klf->kl.kl_file_fileid = stt.st_ino;
2533 				freepath = NULL;
2534 				fullpath = "-";
2535 				error = vn_fullpath(vp, &fullpath, &freepath);
2536 				if (error == 0)
2537 					strlcpy(klf->kl.kl_path, fullpath,
2538 					    sizeof(klf->kl.kl_path));
2539 				free(freepath, M_TEMP);
2540 				if (sbuf_bcat(sb, &klf->kl,
2541 				    klf->kl.kl_structsize) != 0) {
2542 					gerror = sbuf_error(sb);
2543 				}
2544 			}
2545 		}
2546 		vdrop(vp);
2547 		free(klf, M_LOCKF);
2548 	}
2549 
2550 	return (gerror);
2551 }
2552 
2553 static int
2554 sysctl_kern_lockf_run(struct sbuf *sb)
2555 {
2556 	struct mount *mp;
2557 	int error;
2558 
2559 	error = 0;
2560 	mtx_lock(&mountlist_mtx);
2561 	TAILQ_FOREACH(mp, &mountlist, mnt_list) {
2562 		error = vfs_busy(mp, MBF_MNTLSTLOCK);
2563 		if (error != 0)
2564 			continue;
2565 		error = mp->mnt_op->vfs_report_lockf(mp, sb);
2566 		mtx_lock(&mountlist_mtx);
2567 		vfs_unbusy(mp);
2568 		if (error != 0)
2569 			break;
2570 	}
2571 	mtx_unlock(&mountlist_mtx);
2572 	return (error);
2573 }
2574 
2575 static int
2576 sysctl_kern_lockf(SYSCTL_HANDLER_ARGS)
2577 {
2578 	struct sbuf sb;
2579 	int error, error2;
2580 
2581 	sbuf_new_for_sysctl(&sb, NULL, sizeof(struct kinfo_lockf) * 5, req);
2582 	sbuf_clear_flags(&sb, SBUF_INCLUDENUL);
2583 	error = sysctl_kern_lockf_run(&sb);
2584 	error2 = sbuf_finish(&sb);
2585 	sbuf_delete(&sb);
2586 	return (error != 0 ? error : error2);
2587 }
2588 SYSCTL_PROC(_kern, KERN_LOCKF, lockf,
2589     CTLTYPE_OPAQUE | CTLFLAG_RD | CTLFLAG_MPSAFE,
2590     0, 0, sysctl_kern_lockf, "S,lockf",
2591     "Advisory locks table");
2592 
2593 #ifdef LOCKF_DEBUG
2594 /*
2595  * Print description of a lock owner
2596  */
2597 static void
2598 lf_print_owner(struct lock_owner *lo)
2599 {
2600 
2601 	if (lo->lo_flags & F_REMOTE) {
2602 		printf("remote pid %d, system %d",
2603 		    lo->lo_pid, lo->lo_sysid);
2604 	} else if (lo->lo_flags & F_FLOCK) {
2605 		printf("file %p", lo->lo_id);
2606 	} else {
2607 		printf("local pid %d", lo->lo_pid);
2608 	}
2609 }
2610 
2611 /*
2612  * Print out a lock.
2613  */
2614 static void
2615 lf_print(char *tag, struct lockf_entry *lock)
2616 {
2617 
2618 	printf("%s: lock %p for ", tag, (void *)lock);
2619 	lf_print_owner(lock->lf_owner);
2620 	printf("\nvnode %p", lock->lf_vnode);
2621 	VOP_PRINT(lock->lf_vnode);
2622 	printf(" %s, start %jd, end ",
2623 	    lock->lf_type == F_RDLCK ? "shared" :
2624 	    lock->lf_type == F_WRLCK ? "exclusive" :
2625 	    lock->lf_type == F_UNLCK ? "unlock" : "unknown",
2626 	    (intmax_t)lock->lf_start);
2627 	if (lock->lf_end == OFF_MAX)
2628 		printf("EOF");
2629 	else
2630 		printf("%jd", (intmax_t)lock->lf_end);
2631 	if (!LIST_EMPTY(&lock->lf_outedges))
2632 		printf(" block %p\n",
2633 		    (void *)LIST_FIRST(&lock->lf_outedges)->le_to);
2634 	else
2635 		printf("\n");
2636 }
2637 
2638 static void
2639 lf_printlist(char *tag, struct lockf_entry *lock)
2640 {
2641 	struct lockf_entry *lf, *blk;
2642 	struct lockf_edge *e;
2643 
2644 	printf("%s: Lock list for vnode %p:\n", tag, lock->lf_vnode);
2645 	LIST_FOREACH(lf, &lock->lf_vnode->v_lockf->ls_active, lf_link) {
2646 		printf("\tlock %p for ",(void *)lf);
2647 		lf_print_owner(lock->lf_owner);
2648 		printf(", %s, start %jd, end %jd",
2649 		    lf->lf_type == F_RDLCK ? "shared" :
2650 		    lf->lf_type == F_WRLCK ? "exclusive" :
2651 		    lf->lf_type == F_UNLCK ? "unlock" :
2652 		    "unknown", (intmax_t)lf->lf_start, (intmax_t)lf->lf_end);
2653 		LIST_FOREACH(e, &lf->lf_outedges, le_outlink) {
2654 			blk = e->le_to;
2655 			printf("\n\t\tlock request %p for ", (void *)blk);
2656 			lf_print_owner(blk->lf_owner);
2657 			printf(", %s, start %jd, end %jd",
2658 			    blk->lf_type == F_RDLCK ? "shared" :
2659 			    blk->lf_type == F_WRLCK ? "exclusive" :
2660 			    blk->lf_type == F_UNLCK ? "unlock" :
2661 			    "unknown", (intmax_t)blk->lf_start,
2662 			    (intmax_t)blk->lf_end);
2663 			if (!LIST_EMPTY(&blk->lf_inedges))
2664 				panic("lf_printlist: bad list");
2665 		}
2666 		printf("\n");
2667 	}
2668 }
2669 #endif /* LOCKF_DEBUG */
2670