1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 2008 Isilon Inc http://www.isilon.com/
5 * Authors: Doug Rabson <dfr@rabson.org>
6 * Developed with Red Inc: Alfred Perlstein <alfred@freebsd.org>
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29 /*-
30 * Copyright (c) 1982, 1986, 1989, 1993
31 * The Regents of the University of California. All rights reserved.
32 *
33 * This code is derived from software contributed to Berkeley by
34 * Scooter Morris at Genentech Inc.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. Neither the name of the University nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE.
59 */
60
61 #include <sys/cdefs.h>
62 #include "opt_debug_lockf.h"
63
64 #include <sys/param.h>
65 #include <sys/systm.h>
66 #include <sys/hash.h>
67 #include <sys/jail.h>
68 #include <sys/kernel.h>
69 #include <sys/limits.h>
70 #include <sys/lock.h>
71 #include <sys/mount.h>
72 #include <sys/mutex.h>
73 #include <sys/proc.h>
74 #include <sys/sbuf.h>
75 #include <sys/stat.h>
76 #include <sys/sx.h>
77 #include <sys/unistd.h>
78 #include <sys/user.h>
79 #include <sys/vnode.h>
80 #include <sys/malloc.h>
81 #include <sys/fcntl.h>
82 #include <sys/lockf.h>
83 #include <sys/taskqueue.h>
84
85 #ifdef LOCKF_DEBUG
86 #include <sys/sysctl.h>
87
88 static int lockf_debug = 0; /* control debug output */
89 SYSCTL_INT(_debug, OID_AUTO, lockf_debug, CTLFLAG_RW, &lockf_debug, 0, "");
90 #endif
91
92 static MALLOC_DEFINE(M_LOCKF, "lockf", "Byte-range locking structures");
93
94 struct owner_edge;
95 struct owner_vertex;
96 struct owner_vertex_list;
97 struct owner_graph;
98
99 #define NOLOCKF (struct lockf_entry *)0
100 #define SELF 0x1
101 #define OTHERS 0x2
102 static void lf_init(void *);
103 static int lf_hash_owner(caddr_t, struct vnode *, struct flock *, int);
104 static int lf_owner_matches(struct lock_owner *, caddr_t, struct flock *,
105 int);
106 static struct lockf_entry *
107 lf_alloc_lock(struct lock_owner *);
108 static int lf_free_lock(struct lockf_entry *);
109 static int lf_clearlock(struct lockf *, struct lockf_entry *);
110 static int lf_overlaps(struct lockf_entry *, struct lockf_entry *);
111 static int lf_blocks(struct lockf_entry *, struct lockf_entry *);
112 static void lf_free_edge(struct lockf_edge *);
113 static struct lockf_edge *
114 lf_alloc_edge(void);
115 static void lf_alloc_vertex(struct lockf_entry *);
116 static int lf_add_edge(struct lockf_entry *, struct lockf_entry *);
117 static void lf_remove_edge(struct lockf_edge *);
118 static void lf_remove_outgoing(struct lockf_entry *);
119 static void lf_remove_incoming(struct lockf_entry *);
120 static int lf_add_outgoing(struct lockf *, struct lockf_entry *);
121 static int lf_add_incoming(struct lockf *, struct lockf_entry *);
122 static int lf_findoverlap(struct lockf_entry **, struct lockf_entry *,
123 int);
124 static struct lockf_entry *
125 lf_getblock(struct lockf *, struct lockf_entry *);
126 static int lf_getlock(struct lockf *, struct lockf_entry *, struct flock *);
127 static void lf_insert_lock(struct lockf *, struct lockf_entry *);
128 static void lf_wakeup_lock(struct lockf *, struct lockf_entry *);
129 static void lf_update_dependancies(struct lockf *, struct lockf_entry *,
130 int all, struct lockf_entry_list *);
131 static void lf_set_start(struct lockf *, struct lockf_entry *, off_t,
132 struct lockf_entry_list*);
133 static void lf_set_end(struct lockf *, struct lockf_entry *, off_t,
134 struct lockf_entry_list*);
135 static int lf_setlock(struct lockf *, struct lockf_entry *,
136 struct vnode *, void **cookiep);
137 static int lf_cancel(struct lockf *, struct lockf_entry *, void *);
138 static void lf_split(struct lockf *, struct lockf_entry *,
139 struct lockf_entry *, struct lockf_entry_list *);
140 #ifdef LOCKF_DEBUG
141 static int graph_reaches(struct owner_vertex *x, struct owner_vertex *y,
142 struct owner_vertex_list *path);
143 static void graph_check(struct owner_graph *g, int checkorder);
144 static void graph_print_vertices(struct owner_vertex_list *set);
145 #endif
146 static int graph_delta_forward(struct owner_graph *g,
147 struct owner_vertex *x, struct owner_vertex *y,
148 struct owner_vertex_list *delta);
149 static int graph_delta_backward(struct owner_graph *g,
150 struct owner_vertex *x, struct owner_vertex *y,
151 struct owner_vertex_list *delta);
152 static int graph_add_indices(int *indices, int n,
153 struct owner_vertex_list *set);
154 static int graph_assign_indices(struct owner_graph *g, int *indices,
155 int nextunused, struct owner_vertex_list *set);
156 static int graph_add_edge(struct owner_graph *g,
157 struct owner_vertex *x, struct owner_vertex *y);
158 static void graph_remove_edge(struct owner_graph *g,
159 struct owner_vertex *x, struct owner_vertex *y);
160 static struct owner_vertex *graph_alloc_vertex(struct owner_graph *g,
161 struct lock_owner *lo);
162 static void graph_free_vertex(struct owner_graph *g,
163 struct owner_vertex *v);
164 static struct owner_graph * graph_init(struct owner_graph *g);
165 #ifdef LOCKF_DEBUG
166 static void lf_print(char *, struct lockf_entry *);
167 static void lf_printlist(char *, struct lockf_entry *);
168 static void lf_print_owner(struct lock_owner *);
169 #endif
170
171 /*
172 * This structure is used to keep track of both local and remote lock
173 * owners. The lf_owner field of the struct lockf_entry points back at
174 * the lock owner structure. Each possible lock owner (local proc for
175 * POSIX fcntl locks, local file for BSD flock locks or <pid,sysid>
176 * pair for remote locks) is represented by a unique instance of
177 * struct lock_owner.
178 *
179 * If a lock owner has a lock that blocks some other lock or a lock
180 * that is waiting for some other lock, it also has a vertex in the
181 * owner_graph below.
182 *
183 * Locks:
184 * (s) locked by state->ls_lock
185 * (S) locked by lf_lock_states_lock
186 * (g) locked by lf_owner_graph_lock
187 * (c) const until freeing
188 */
189 #define LOCK_OWNER_HASH_SIZE 256
190
191 struct lock_owner {
192 LIST_ENTRY(lock_owner) lo_link; /* (l) hash chain */
193 int lo_refs; /* (l) Number of locks referring to this */
194 int lo_flags; /* (c) Flags passed to lf_advlock */
195 caddr_t lo_id; /* (c) Id value passed to lf_advlock */
196 pid_t lo_pid; /* (c) Process Id of the lock owner */
197 int lo_sysid; /* (c) System Id of the lock owner */
198 int lo_hash; /* (c) Used to lock the appropriate chain */
199 struct owner_vertex *lo_vertex; /* (g) entry in deadlock graph */
200 };
201
202 LIST_HEAD(lock_owner_list, lock_owner);
203
204 struct lock_owner_chain {
205 struct sx lock;
206 struct lock_owner_list list;
207 };
208
209 static struct sx lf_lock_states_lock;
210 static struct lockf_list lf_lock_states; /* (S) */
211 static struct lock_owner_chain lf_lock_owners[LOCK_OWNER_HASH_SIZE];
212
213 /*
214 * Structures for deadlock detection.
215 *
216 * We have two types of directed graph, the first is the set of locks,
217 * both active and pending on a vnode. Within this graph, active locks
218 * are terminal nodes in the graph (i.e. have no out-going
219 * edges). Pending locks have out-going edges to each blocking active
220 * lock that prevents the lock from being granted and also to each
221 * older pending lock that would block them if it was active. The
222 * graph for each vnode is naturally acyclic; new edges are only ever
223 * added to or from new nodes (either new pending locks which only add
224 * out-going edges or new active locks which only add in-coming edges)
225 * therefore they cannot create loops in the lock graph.
226 *
227 * The second graph is a global graph of lock owners. Each lock owner
228 * is a vertex in that graph and an edge is added to the graph
229 * whenever an edge is added to a vnode graph, with end points
230 * corresponding to owner of the new pending lock and the owner of the
231 * lock upon which it waits. In order to prevent deadlock, we only add
232 * an edge to this graph if the new edge would not create a cycle.
233 *
234 * The lock owner graph is topologically sorted, i.e. if a node has
235 * any outgoing edges, then it has an order strictly less than any
236 * node to which it has an outgoing edge. We preserve this ordering
237 * (and detect cycles) on edge insertion using Algorithm PK from the
238 * paper "A Dynamic Topological Sort Algorithm for Directed Acyclic
239 * Graphs" (ACM Journal of Experimental Algorithms, Vol 11, Article
240 * No. 1.7)
241 */
242 struct owner_vertex;
243
244 struct owner_edge {
245 LIST_ENTRY(owner_edge) e_outlink; /* (g) link from's out-edge list */
246 LIST_ENTRY(owner_edge) e_inlink; /* (g) link to's in-edge list */
247 int e_refs; /* (g) number of times added */
248 struct owner_vertex *e_from; /* (c) out-going from here */
249 struct owner_vertex *e_to; /* (c) in-coming to here */
250 };
251 LIST_HEAD(owner_edge_list, owner_edge);
252
253 struct owner_vertex {
254 TAILQ_ENTRY(owner_vertex) v_link; /* (g) workspace for edge insertion */
255 uint32_t v_gen; /* (g) workspace for edge insertion */
256 int v_order; /* (g) order of vertex in graph */
257 struct owner_edge_list v_outedges;/* (g) list of out-edges */
258 struct owner_edge_list v_inedges; /* (g) list of in-edges */
259 struct lock_owner *v_owner; /* (c) corresponding lock owner */
260 };
261 TAILQ_HEAD(owner_vertex_list, owner_vertex);
262
263 struct owner_graph {
264 struct owner_vertex** g_vertices; /* (g) pointers to vertices */
265 int g_size; /* (g) number of vertices */
266 int g_space; /* (g) space allocated for vertices */
267 int *g_indexbuf; /* (g) workspace for loop detection */
268 uint32_t g_gen; /* (g) increment when re-ordering */
269 };
270
271 static struct sx lf_owner_graph_lock;
272 static struct owner_graph lf_owner_graph;
273
274 /*
275 * Initialise various structures and locks.
276 */
277 static void
lf_init(void * dummy)278 lf_init(void *dummy)
279 {
280 int i;
281
282 sx_init(&lf_lock_states_lock, "lock states lock");
283 LIST_INIT(&lf_lock_states);
284
285 for (i = 0; i < LOCK_OWNER_HASH_SIZE; i++) {
286 sx_init(&lf_lock_owners[i].lock, "lock owners lock");
287 LIST_INIT(&lf_lock_owners[i].list);
288 }
289
290 sx_init(&lf_owner_graph_lock, "owner graph lock");
291 graph_init(&lf_owner_graph);
292 }
293 SYSINIT(lf_init, SI_SUB_LOCK, SI_ORDER_FIRST, lf_init, NULL);
294
295 /*
296 * Generate a hash value for a lock owner.
297 */
298 static int
lf_hash_owner(caddr_t id,struct vnode * vp,struct flock * fl,int flags)299 lf_hash_owner(caddr_t id, struct vnode *vp, struct flock *fl, int flags)
300 {
301 uint32_t h;
302
303 if (flags & F_REMOTE) {
304 h = HASHSTEP(0, fl->l_pid);
305 h = HASHSTEP(h, fl->l_sysid);
306 } else if (flags & F_FLOCK) {
307 h = ((uintptr_t) id) >> 7;
308 } else {
309 h = ((uintptr_t) vp) >> 7;
310 }
311
312 return (h % LOCK_OWNER_HASH_SIZE);
313 }
314
315 /*
316 * Return true if a lock owner matches the details passed to
317 * lf_advlock.
318 */
319 static int
lf_owner_matches(struct lock_owner * lo,caddr_t id,struct flock * fl,int flags)320 lf_owner_matches(struct lock_owner *lo, caddr_t id, struct flock *fl,
321 int flags)
322 {
323 if (flags & F_REMOTE) {
324 return lo->lo_pid == fl->l_pid
325 && lo->lo_sysid == fl->l_sysid;
326 } else {
327 return lo->lo_id == id;
328 }
329 }
330
331 static struct lockf_entry *
lf_alloc_lock(struct lock_owner * lo)332 lf_alloc_lock(struct lock_owner *lo)
333 {
334 struct lockf_entry *lf;
335
336 lf = malloc(sizeof(struct lockf_entry), M_LOCKF, M_WAITOK|M_ZERO);
337
338 #ifdef LOCKF_DEBUG
339 if (lockf_debug & 4)
340 printf("Allocated lock %p\n", lf);
341 #endif
342 if (lo) {
343 sx_xlock(&lf_lock_owners[lo->lo_hash].lock);
344 lo->lo_refs++;
345 sx_xunlock(&lf_lock_owners[lo->lo_hash].lock);
346 lf->lf_owner = lo;
347 }
348
349 return (lf);
350 }
351
352 static int
lf_free_lock(struct lockf_entry * lock)353 lf_free_lock(struct lockf_entry *lock)
354 {
355 struct sx *chainlock;
356
357 KASSERT(lock->lf_refs > 0, ("lockf_entry negative ref count %p", lock));
358 if (--lock->lf_refs > 0)
359 return (0);
360 /*
361 * Adjust the lock_owner reference count and
362 * reclaim the entry if this is the last lock
363 * for that owner.
364 */
365 struct lock_owner *lo = lock->lf_owner;
366 if (lo) {
367 KASSERT(LIST_EMPTY(&lock->lf_outedges),
368 ("freeing lock with dependencies"));
369 KASSERT(LIST_EMPTY(&lock->lf_inedges),
370 ("freeing lock with dependants"));
371 chainlock = &lf_lock_owners[lo->lo_hash].lock;
372 sx_xlock(chainlock);
373 KASSERT(lo->lo_refs > 0, ("lock owner refcount"));
374 lo->lo_refs--;
375 if (lo->lo_refs == 0) {
376 #ifdef LOCKF_DEBUG
377 if (lockf_debug & 1)
378 printf("lf_free_lock: freeing lock owner %p\n",
379 lo);
380 #endif
381 if (lo->lo_vertex) {
382 sx_xlock(&lf_owner_graph_lock);
383 graph_free_vertex(&lf_owner_graph,
384 lo->lo_vertex);
385 sx_xunlock(&lf_owner_graph_lock);
386 }
387 LIST_REMOVE(lo, lo_link);
388 free(lo, M_LOCKF);
389 #ifdef LOCKF_DEBUG
390 if (lockf_debug & 4)
391 printf("Freed lock owner %p\n", lo);
392 #endif
393 }
394 sx_unlock(chainlock);
395 }
396 if ((lock->lf_flags & F_REMOTE) && lock->lf_vnode) {
397 vrele(lock->lf_vnode);
398 lock->lf_vnode = NULL;
399 }
400 #ifdef LOCKF_DEBUG
401 if (lockf_debug & 4)
402 printf("Freed lock %p\n", lock);
403 #endif
404 free(lock, M_LOCKF);
405 return (1);
406 }
407
408 /*
409 * Advisory record locking support
410 */
411 int
lf_advlockasync(struct vop_advlockasync_args * ap,struct lockf ** statep,u_quad_t size)412 lf_advlockasync(struct vop_advlockasync_args *ap, struct lockf **statep,
413 u_quad_t size)
414 {
415 struct lockf *state;
416 struct flock *fl = ap->a_fl;
417 struct lockf_entry *lock;
418 struct vnode *vp = ap->a_vp;
419 caddr_t id = ap->a_id;
420 int flags = ap->a_flags;
421 int hash;
422 struct lock_owner *lo;
423 off_t start, end, oadd;
424 int error;
425
426 /*
427 * Handle the F_UNLKSYS case first - no need to mess about
428 * creating a lock owner for this one.
429 */
430 if (ap->a_op == F_UNLCKSYS) {
431 lf_clearremotesys(fl->l_sysid);
432 return (0);
433 }
434
435 /*
436 * Convert the flock structure into a start and end.
437 */
438 switch (fl->l_whence) {
439 case SEEK_SET:
440 case SEEK_CUR:
441 /*
442 * Caller is responsible for adding any necessary offset
443 * when SEEK_CUR is used.
444 */
445 start = fl->l_start;
446 break;
447
448 case SEEK_END:
449 if (size > OFF_MAX ||
450 (fl->l_start > 0 && size > OFF_MAX - fl->l_start))
451 return (EOVERFLOW);
452 start = size + fl->l_start;
453 break;
454
455 default:
456 return (EINVAL);
457 }
458 if (start < 0)
459 return (EINVAL);
460 if (fl->l_len < 0) {
461 if (start == 0)
462 return (EINVAL);
463 end = start - 1;
464 start += fl->l_len;
465 if (start < 0)
466 return (EINVAL);
467 } else if (fl->l_len == 0) {
468 end = OFF_MAX;
469 } else {
470 oadd = fl->l_len - 1;
471 if (oadd > OFF_MAX - start)
472 return (EOVERFLOW);
473 end = start + oadd;
474 }
475
476 retry_setlock:
477
478 /*
479 * Avoid the common case of unlocking when inode has no locks.
480 */
481 if (ap->a_op != F_SETLK && (*statep) == NULL) {
482 VI_LOCK(vp);
483 if ((*statep) == NULL) {
484 fl->l_type = F_UNLCK;
485 VI_UNLOCK(vp);
486 return (0);
487 }
488 VI_UNLOCK(vp);
489 }
490
491 /*
492 * Map our arguments to an existing lock owner or create one
493 * if this is the first time we have seen this owner.
494 */
495 hash = lf_hash_owner(id, vp, fl, flags);
496 sx_xlock(&lf_lock_owners[hash].lock);
497 LIST_FOREACH(lo, &lf_lock_owners[hash].list, lo_link)
498 if (lf_owner_matches(lo, id, fl, flags))
499 break;
500 if (!lo) {
501 /*
502 * We initialise the lock with a reference
503 * count which matches the new lockf_entry
504 * structure created below.
505 */
506 lo = malloc(sizeof(struct lock_owner), M_LOCKF,
507 M_WAITOK|M_ZERO);
508 #ifdef LOCKF_DEBUG
509 if (lockf_debug & 4)
510 printf("Allocated lock owner %p\n", lo);
511 #endif
512
513 lo->lo_refs = 1;
514 lo->lo_flags = flags;
515 lo->lo_id = id;
516 lo->lo_hash = hash;
517 if (flags & F_REMOTE) {
518 lo->lo_pid = fl->l_pid;
519 lo->lo_sysid = fl->l_sysid;
520 } else if (flags & F_FLOCK) {
521 lo->lo_pid = -1;
522 lo->lo_sysid = 0;
523 } else {
524 struct proc *p = (struct proc *) id;
525 lo->lo_pid = p->p_pid;
526 lo->lo_sysid = 0;
527 }
528 lo->lo_vertex = NULL;
529
530 #ifdef LOCKF_DEBUG
531 if (lockf_debug & 1) {
532 printf("lf_advlockasync: new lock owner %p ", lo);
533 lf_print_owner(lo);
534 printf("\n");
535 }
536 #endif
537
538 LIST_INSERT_HEAD(&lf_lock_owners[hash].list, lo, lo_link);
539 } else {
540 /*
541 * We have seen this lock owner before, increase its
542 * reference count to account for the new lockf_entry
543 * structure we create below.
544 */
545 lo->lo_refs++;
546 }
547 sx_xunlock(&lf_lock_owners[hash].lock);
548
549 /*
550 * Create the lockf structure. We initialise the lf_owner
551 * field here instead of in lf_alloc_lock() to avoid paying
552 * the lf_lock_owners_lock tax twice.
553 */
554 lock = lf_alloc_lock(NULL);
555 lock->lf_refs = 1;
556 lock->lf_start = start;
557 lock->lf_end = end;
558 lock->lf_owner = lo;
559 lock->lf_vnode = vp;
560 if (flags & F_REMOTE) {
561 /*
562 * For remote locks, the caller may release its ref to
563 * the vnode at any time - we have to ref it here to
564 * prevent it from being recycled unexpectedly.
565 */
566 vref(vp);
567 }
568
569 lock->lf_type = fl->l_type;
570 LIST_INIT(&lock->lf_outedges);
571 LIST_INIT(&lock->lf_inedges);
572 lock->lf_async_task = ap->a_task;
573 lock->lf_flags = ap->a_flags;
574
575 /*
576 * Do the requested operation. First find our state structure
577 * and create a new one if necessary - the caller's *statep
578 * variable and the state's ls_threads count is protected by
579 * the vnode interlock.
580 */
581 VI_LOCK(vp);
582 if (VN_IS_DOOMED(vp)) {
583 VI_UNLOCK(vp);
584 lf_free_lock(lock);
585 return (ENOENT);
586 }
587
588 /*
589 * Allocate a state structure if necessary.
590 */
591 state = *statep;
592 if (state == NULL) {
593 struct lockf *ls;
594
595 VI_UNLOCK(vp);
596
597 ls = malloc(sizeof(struct lockf), M_LOCKF, M_WAITOK|M_ZERO);
598 sx_init(&ls->ls_lock, "ls_lock");
599 LIST_INIT(&ls->ls_active);
600 LIST_INIT(&ls->ls_pending);
601 ls->ls_threads = 1;
602
603 sx_xlock(&lf_lock_states_lock);
604 LIST_INSERT_HEAD(&lf_lock_states, ls, ls_link);
605 sx_xunlock(&lf_lock_states_lock);
606
607 /*
608 * Cope if we lost a race with some other thread while
609 * trying to allocate memory.
610 */
611 VI_LOCK(vp);
612 if (VN_IS_DOOMED(vp)) {
613 VI_UNLOCK(vp);
614 sx_xlock(&lf_lock_states_lock);
615 LIST_REMOVE(ls, ls_link);
616 sx_xunlock(&lf_lock_states_lock);
617 sx_destroy(&ls->ls_lock);
618 free(ls, M_LOCKF);
619 lf_free_lock(lock);
620 return (ENOENT);
621 }
622 if ((*statep) == NULL) {
623 state = *statep = ls;
624 VI_UNLOCK(vp);
625 } else {
626 state = *statep;
627 MPASS(state->ls_threads >= 0);
628 state->ls_threads++;
629 VI_UNLOCK(vp);
630
631 sx_xlock(&lf_lock_states_lock);
632 LIST_REMOVE(ls, ls_link);
633 sx_xunlock(&lf_lock_states_lock);
634 sx_destroy(&ls->ls_lock);
635 free(ls, M_LOCKF);
636 }
637 } else {
638 MPASS(state->ls_threads >= 0);
639 state->ls_threads++;
640 VI_UNLOCK(vp);
641 }
642
643 sx_xlock(&state->ls_lock);
644 /*
645 * Recheck the doomed vnode after state->ls_lock is
646 * locked. lf_purgelocks() requires that no new threads add
647 * pending locks when vnode is marked by VIRF_DOOMED flag.
648 */
649 if (VN_IS_DOOMED(vp)) {
650 VI_LOCK(vp);
651 MPASS(state->ls_threads > 0);
652 state->ls_threads--;
653 wakeup(state);
654 VI_UNLOCK(vp);
655 sx_xunlock(&state->ls_lock);
656 lf_free_lock(lock);
657 return (ENOENT);
658 }
659
660 switch (ap->a_op) {
661 case F_SETLK:
662 error = lf_setlock(state, lock, vp, ap->a_cookiep);
663 break;
664
665 case F_UNLCK:
666 error = lf_clearlock(state, lock);
667 lf_free_lock(lock);
668 break;
669
670 case F_GETLK:
671 error = lf_getlock(state, lock, fl);
672 lf_free_lock(lock);
673 break;
674
675 case F_CANCEL:
676 if (ap->a_cookiep)
677 error = lf_cancel(state, lock, *ap->a_cookiep);
678 else
679 error = EINVAL;
680 lf_free_lock(lock);
681 break;
682
683 default:
684 lf_free_lock(lock);
685 error = EINVAL;
686 break;
687 }
688
689 #ifdef DIAGNOSTIC
690 /*
691 * Check for some can't happen stuff. In this case, the active
692 * lock list becoming disordered or containing mutually
693 * blocking locks. We also check the pending list for locks
694 * which should be active (i.e. have no out-going edges).
695 */
696 LIST_FOREACH(lock, &state->ls_active, lf_link) {
697 struct lockf_entry *lf;
698 if (LIST_NEXT(lock, lf_link))
699 KASSERT((lock->lf_start
700 <= LIST_NEXT(lock, lf_link)->lf_start),
701 ("locks disordered"));
702 LIST_FOREACH(lf, &state->ls_active, lf_link) {
703 if (lock == lf)
704 break;
705 KASSERT(!lf_blocks(lock, lf),
706 ("two conflicting active locks"));
707 if (lock->lf_owner == lf->lf_owner)
708 KASSERT(!lf_overlaps(lock, lf),
709 ("two overlapping locks from same owner"));
710 }
711 }
712 LIST_FOREACH(lock, &state->ls_pending, lf_link) {
713 KASSERT(!LIST_EMPTY(&lock->lf_outedges),
714 ("pending lock which should be active"));
715 }
716 #endif
717 sx_xunlock(&state->ls_lock);
718
719 VI_LOCK(vp);
720 MPASS(state->ls_threads > 0);
721 state->ls_threads--;
722 if (state->ls_threads != 0) {
723 wakeup(state);
724 }
725 VI_UNLOCK(vp);
726
727 if (error == EDOOFUS) {
728 KASSERT(ap->a_op == F_SETLK, ("EDOOFUS"));
729 goto retry_setlock;
730 }
731 return (error);
732 }
733
734 int
lf_advlock(struct vop_advlock_args * ap,struct lockf ** statep,u_quad_t size)735 lf_advlock(struct vop_advlock_args *ap, struct lockf **statep, u_quad_t size)
736 {
737 struct vop_advlockasync_args a;
738
739 a.a_vp = ap->a_vp;
740 a.a_id = ap->a_id;
741 a.a_op = ap->a_op;
742 a.a_fl = ap->a_fl;
743 a.a_flags = ap->a_flags;
744 a.a_task = NULL;
745 a.a_cookiep = NULL;
746
747 return (lf_advlockasync(&a, statep, size));
748 }
749
750 void
lf_purgelocks(struct vnode * vp,struct lockf ** statep)751 lf_purgelocks(struct vnode *vp, struct lockf **statep)
752 {
753 struct lockf *state;
754 struct lockf_entry *lock, *nlock;
755
756 /*
757 * For this to work correctly, the caller must ensure that no
758 * other threads enter the locking system for this vnode,
759 * e.g. by checking VIRF_DOOMED. We wake up any threads that are
760 * sleeping waiting for locks on this vnode and then free all
761 * the remaining locks.
762 */
763 KASSERT(VN_IS_DOOMED(vp),
764 ("lf_purgelocks: vp %p has not vgone yet", vp));
765 state = *statep;
766 if (state == NULL) {
767 return;
768 }
769 VI_LOCK(vp);
770 *statep = NULL;
771 if (LIST_EMPTY(&state->ls_active) && state->ls_threads == 0) {
772 KASSERT(LIST_EMPTY(&state->ls_pending),
773 ("freeing state with pending locks"));
774 VI_UNLOCK(vp);
775 goto out_free;
776 }
777 MPASS(state->ls_threads >= 0);
778 state->ls_threads++;
779 VI_UNLOCK(vp);
780
781 sx_xlock(&state->ls_lock);
782 sx_xlock(&lf_owner_graph_lock);
783 LIST_FOREACH_SAFE(lock, &state->ls_pending, lf_link, nlock) {
784 LIST_REMOVE(lock, lf_link);
785 lf_remove_outgoing(lock);
786 lf_remove_incoming(lock);
787
788 /*
789 * If its an async lock, we can just free it
790 * here, otherwise we let the sleeping thread
791 * free it.
792 */
793 if (lock->lf_async_task) {
794 lf_free_lock(lock);
795 } else {
796 lock->lf_flags |= F_INTR;
797 wakeup(lock);
798 }
799 }
800 sx_xunlock(&lf_owner_graph_lock);
801 sx_xunlock(&state->ls_lock);
802
803 /*
804 * Wait for all other threads, sleeping and otherwise
805 * to leave.
806 */
807 VI_LOCK(vp);
808 while (state->ls_threads > 1)
809 msleep(state, VI_MTX(vp), 0, "purgelocks", 0);
810 VI_UNLOCK(vp);
811
812 /*
813 * We can just free all the active locks since they
814 * will have no dependencies (we removed them all
815 * above). We don't need to bother locking since we
816 * are the last thread using this state structure.
817 */
818 KASSERT(LIST_EMPTY(&state->ls_pending),
819 ("lock pending for %p", state));
820 LIST_FOREACH_SAFE(lock, &state->ls_active, lf_link, nlock) {
821 LIST_REMOVE(lock, lf_link);
822 lf_free_lock(lock);
823 }
824 out_free:
825 sx_xlock(&lf_lock_states_lock);
826 LIST_REMOVE(state, ls_link);
827 sx_xunlock(&lf_lock_states_lock);
828 sx_destroy(&state->ls_lock);
829 free(state, M_LOCKF);
830 }
831
832 /*
833 * Return non-zero if locks 'x' and 'y' overlap.
834 */
835 static int
lf_overlaps(struct lockf_entry * x,struct lockf_entry * y)836 lf_overlaps(struct lockf_entry *x, struct lockf_entry *y)
837 {
838
839 return (x->lf_start <= y->lf_end && x->lf_end >= y->lf_start);
840 }
841
842 /*
843 * Return non-zero if lock 'x' is blocked by lock 'y' (or vice versa).
844 */
845 static int
lf_blocks(struct lockf_entry * x,struct lockf_entry * y)846 lf_blocks(struct lockf_entry *x, struct lockf_entry *y)
847 {
848
849 return x->lf_owner != y->lf_owner
850 && (x->lf_type == F_WRLCK || y->lf_type == F_WRLCK)
851 && lf_overlaps(x, y);
852 }
853
854 /*
855 * Allocate a lock edge from the free list
856 */
857 static struct lockf_edge *
lf_alloc_edge(void)858 lf_alloc_edge(void)
859 {
860
861 return (malloc(sizeof(struct lockf_edge), M_LOCKF, M_WAITOK|M_ZERO));
862 }
863
864 /*
865 * Free a lock edge.
866 */
867 static void
lf_free_edge(struct lockf_edge * e)868 lf_free_edge(struct lockf_edge *e)
869 {
870
871 free(e, M_LOCKF);
872 }
873
874 /*
875 * Ensure that the lock's owner has a corresponding vertex in the
876 * owner graph.
877 */
878 static void
lf_alloc_vertex(struct lockf_entry * lock)879 lf_alloc_vertex(struct lockf_entry *lock)
880 {
881 struct owner_graph *g = &lf_owner_graph;
882
883 if (!lock->lf_owner->lo_vertex)
884 lock->lf_owner->lo_vertex =
885 graph_alloc_vertex(g, lock->lf_owner);
886 }
887
888 /*
889 * Attempt to record an edge from lock x to lock y. Return EDEADLK if
890 * the new edge would cause a cycle in the owner graph.
891 */
892 static int
lf_add_edge(struct lockf_entry * x,struct lockf_entry * y)893 lf_add_edge(struct lockf_entry *x, struct lockf_entry *y)
894 {
895 struct owner_graph *g = &lf_owner_graph;
896 struct lockf_edge *e;
897 int error;
898
899 #ifdef DIAGNOSTIC
900 LIST_FOREACH(e, &x->lf_outedges, le_outlink)
901 KASSERT(e->le_to != y, ("adding lock edge twice"));
902 #endif
903
904 /*
905 * Make sure the two owners have entries in the owner graph.
906 */
907 lf_alloc_vertex(x);
908 lf_alloc_vertex(y);
909
910 error = graph_add_edge(g, x->lf_owner->lo_vertex,
911 y->lf_owner->lo_vertex);
912 if (error)
913 return (error);
914
915 e = lf_alloc_edge();
916 LIST_INSERT_HEAD(&x->lf_outedges, e, le_outlink);
917 LIST_INSERT_HEAD(&y->lf_inedges, e, le_inlink);
918 e->le_from = x;
919 e->le_to = y;
920
921 return (0);
922 }
923
924 /*
925 * Remove an edge from the lock graph.
926 */
927 static void
lf_remove_edge(struct lockf_edge * e)928 lf_remove_edge(struct lockf_edge *e)
929 {
930 struct owner_graph *g = &lf_owner_graph;
931 struct lockf_entry *x = e->le_from;
932 struct lockf_entry *y = e->le_to;
933
934 graph_remove_edge(g, x->lf_owner->lo_vertex, y->lf_owner->lo_vertex);
935 LIST_REMOVE(e, le_outlink);
936 LIST_REMOVE(e, le_inlink);
937 e->le_from = NULL;
938 e->le_to = NULL;
939 lf_free_edge(e);
940 }
941
942 /*
943 * Remove all out-going edges from lock x.
944 */
945 static void
lf_remove_outgoing(struct lockf_entry * x)946 lf_remove_outgoing(struct lockf_entry *x)
947 {
948 struct lockf_edge *e;
949
950 while ((e = LIST_FIRST(&x->lf_outedges)) != NULL) {
951 lf_remove_edge(e);
952 }
953 }
954
955 /*
956 * Remove all in-coming edges from lock x.
957 */
958 static void
lf_remove_incoming(struct lockf_entry * x)959 lf_remove_incoming(struct lockf_entry *x)
960 {
961 struct lockf_edge *e;
962
963 while ((e = LIST_FIRST(&x->lf_inedges)) != NULL) {
964 lf_remove_edge(e);
965 }
966 }
967
968 /*
969 * Walk the list of locks for the file and create an out-going edge
970 * from lock to each blocking lock.
971 */
972 static int
lf_add_outgoing(struct lockf * state,struct lockf_entry * lock)973 lf_add_outgoing(struct lockf *state, struct lockf_entry *lock)
974 {
975 struct lockf_entry *overlap;
976 int error;
977
978 LIST_FOREACH(overlap, &state->ls_active, lf_link) {
979 /*
980 * We may assume that the active list is sorted by
981 * lf_start.
982 */
983 if (overlap->lf_start > lock->lf_end)
984 break;
985 if (!lf_blocks(lock, overlap))
986 continue;
987
988 /*
989 * We've found a blocking lock. Add the corresponding
990 * edge to the graphs and see if it would cause a
991 * deadlock.
992 */
993 error = lf_add_edge(lock, overlap);
994
995 /*
996 * The only error that lf_add_edge returns is EDEADLK.
997 * Remove any edges we added and return the error.
998 */
999 if (error) {
1000 lf_remove_outgoing(lock);
1001 return (error);
1002 }
1003 }
1004
1005 /*
1006 * We also need to add edges to sleeping locks that block
1007 * us. This ensures that lf_wakeup_lock cannot grant two
1008 * mutually blocking locks simultaneously and also enforces a
1009 * 'first come, first served' fairness model. Note that this
1010 * only happens if we are blocked by at least one active lock
1011 * due to the call to lf_getblock in lf_setlock below.
1012 */
1013 LIST_FOREACH(overlap, &state->ls_pending, lf_link) {
1014 if (!lf_blocks(lock, overlap))
1015 continue;
1016 /*
1017 * We've found a blocking lock. Add the corresponding
1018 * edge to the graphs and see if it would cause a
1019 * deadlock.
1020 */
1021 error = lf_add_edge(lock, overlap);
1022
1023 /*
1024 * The only error that lf_add_edge returns is EDEADLK.
1025 * Remove any edges we added and return the error.
1026 */
1027 if (error) {
1028 lf_remove_outgoing(lock);
1029 return (error);
1030 }
1031 }
1032
1033 return (0);
1034 }
1035
1036 /*
1037 * Walk the list of pending locks for the file and create an in-coming
1038 * edge from lock to each blocking lock.
1039 */
1040 static int
lf_add_incoming(struct lockf * state,struct lockf_entry * lock)1041 lf_add_incoming(struct lockf *state, struct lockf_entry *lock)
1042 {
1043 struct lockf_entry *overlap;
1044 int error;
1045
1046 sx_assert(&state->ls_lock, SX_XLOCKED);
1047 if (LIST_EMPTY(&state->ls_pending))
1048 return (0);
1049
1050 error = 0;
1051 sx_xlock(&lf_owner_graph_lock);
1052 LIST_FOREACH(overlap, &state->ls_pending, lf_link) {
1053 if (!lf_blocks(lock, overlap))
1054 continue;
1055
1056 /*
1057 * We've found a blocking lock. Add the corresponding
1058 * edge to the graphs and see if it would cause a
1059 * deadlock.
1060 */
1061 error = lf_add_edge(overlap, lock);
1062
1063 /*
1064 * The only error that lf_add_edge returns is EDEADLK.
1065 * Remove any edges we added and return the error.
1066 */
1067 if (error) {
1068 lf_remove_incoming(lock);
1069 break;
1070 }
1071 }
1072 sx_xunlock(&lf_owner_graph_lock);
1073 return (error);
1074 }
1075
1076 /*
1077 * Insert lock into the active list, keeping list entries ordered by
1078 * increasing values of lf_start.
1079 */
1080 static void
lf_insert_lock(struct lockf * state,struct lockf_entry * lock)1081 lf_insert_lock(struct lockf *state, struct lockf_entry *lock)
1082 {
1083 struct lockf_entry *lf, *lfprev;
1084
1085 if (LIST_EMPTY(&state->ls_active)) {
1086 LIST_INSERT_HEAD(&state->ls_active, lock, lf_link);
1087 return;
1088 }
1089
1090 lfprev = NULL;
1091 LIST_FOREACH(lf, &state->ls_active, lf_link) {
1092 if (lf->lf_start > lock->lf_start) {
1093 LIST_INSERT_BEFORE(lf, lock, lf_link);
1094 return;
1095 }
1096 lfprev = lf;
1097 }
1098 LIST_INSERT_AFTER(lfprev, lock, lf_link);
1099 }
1100
1101 /*
1102 * Wake up a sleeping lock and remove it from the pending list now
1103 * that all its dependencies have been resolved. The caller should
1104 * arrange for the lock to be added to the active list, adjusting any
1105 * existing locks for the same owner as needed.
1106 */
1107 static void
lf_wakeup_lock(struct lockf * state,struct lockf_entry * wakelock)1108 lf_wakeup_lock(struct lockf *state, struct lockf_entry *wakelock)
1109 {
1110
1111 /*
1112 * Remove from ls_pending list and wake up the caller
1113 * or start the async notification, as appropriate.
1114 */
1115 LIST_REMOVE(wakelock, lf_link);
1116 #ifdef LOCKF_DEBUG
1117 if (lockf_debug & 1)
1118 lf_print("lf_wakeup_lock: awakening", wakelock);
1119 #endif /* LOCKF_DEBUG */
1120 if (wakelock->lf_async_task) {
1121 taskqueue_enqueue(taskqueue_thread, wakelock->lf_async_task);
1122 } else {
1123 wakeup(wakelock);
1124 }
1125 }
1126
1127 /*
1128 * Re-check all dependent locks and remove edges to locks that we no
1129 * longer block. If 'all' is non-zero, the lock has been removed and
1130 * we must remove all the dependencies, otherwise it has simply been
1131 * reduced but remains active. Any pending locks which have been been
1132 * unblocked are added to 'granted'
1133 */
1134 static void
lf_update_dependancies(struct lockf * state,struct lockf_entry * lock,int all,struct lockf_entry_list * granted)1135 lf_update_dependancies(struct lockf *state, struct lockf_entry *lock, int all,
1136 struct lockf_entry_list *granted)
1137 {
1138 struct lockf_edge *e, *ne;
1139 struct lockf_entry *deplock;
1140
1141 LIST_FOREACH_SAFE(e, &lock->lf_inedges, le_inlink, ne) {
1142 deplock = e->le_from;
1143 if (all || !lf_blocks(lock, deplock)) {
1144 sx_xlock(&lf_owner_graph_lock);
1145 lf_remove_edge(e);
1146 sx_xunlock(&lf_owner_graph_lock);
1147 if (LIST_EMPTY(&deplock->lf_outedges)) {
1148 lf_wakeup_lock(state, deplock);
1149 LIST_INSERT_HEAD(granted, deplock, lf_link);
1150 }
1151 }
1152 }
1153 }
1154
1155 /*
1156 * Set the start of an existing active lock, updating dependencies and
1157 * adding any newly woken locks to 'granted'.
1158 */
1159 static void
lf_set_start(struct lockf * state,struct lockf_entry * lock,off_t new_start,struct lockf_entry_list * granted)1160 lf_set_start(struct lockf *state, struct lockf_entry *lock, off_t new_start,
1161 struct lockf_entry_list *granted)
1162 {
1163
1164 KASSERT(new_start >= lock->lf_start, ("can't increase lock"));
1165 lock->lf_start = new_start;
1166 LIST_REMOVE(lock, lf_link);
1167 lf_insert_lock(state, lock);
1168 lf_update_dependancies(state, lock, FALSE, granted);
1169 }
1170
1171 /*
1172 * Set the end of an existing active lock, updating dependencies and
1173 * adding any newly woken locks to 'granted'.
1174 */
1175 static void
lf_set_end(struct lockf * state,struct lockf_entry * lock,off_t new_end,struct lockf_entry_list * granted)1176 lf_set_end(struct lockf *state, struct lockf_entry *lock, off_t new_end,
1177 struct lockf_entry_list *granted)
1178 {
1179
1180 KASSERT(new_end <= lock->lf_end, ("can't increase lock"));
1181 lock->lf_end = new_end;
1182 lf_update_dependancies(state, lock, FALSE, granted);
1183 }
1184
1185 /*
1186 * Add a lock to the active list, updating or removing any current
1187 * locks owned by the same owner and processing any pending locks that
1188 * become unblocked as a result. This code is also used for unlock
1189 * since the logic for updating existing locks is identical.
1190 *
1191 * As a result of processing the new lock, we may unblock existing
1192 * pending locks as a result of downgrading/unlocking. We simply
1193 * activate the newly granted locks by looping.
1194 *
1195 * Since the new lock already has its dependencies set up, we always
1196 * add it to the list (unless its an unlock request). This may
1197 * fragment the lock list in some pathological cases but its probably
1198 * not a real problem.
1199 */
1200 static void
lf_activate_lock(struct lockf * state,struct lockf_entry * lock)1201 lf_activate_lock(struct lockf *state, struct lockf_entry *lock)
1202 {
1203 struct lockf_entry *overlap, *lf;
1204 struct lockf_entry_list granted;
1205 int ovcase;
1206
1207 LIST_INIT(&granted);
1208 LIST_INSERT_HEAD(&granted, lock, lf_link);
1209
1210 while (!LIST_EMPTY(&granted)) {
1211 lock = LIST_FIRST(&granted);
1212 LIST_REMOVE(lock, lf_link);
1213
1214 /*
1215 * Skip over locks owned by other processes. Handle
1216 * any locks that overlap and are owned by ourselves.
1217 */
1218 overlap = LIST_FIRST(&state->ls_active);
1219 for (;;) {
1220 ovcase = lf_findoverlap(&overlap, lock, SELF);
1221
1222 #ifdef LOCKF_DEBUG
1223 if (ovcase && (lockf_debug & 2)) {
1224 printf("lf_setlock: overlap %d", ovcase);
1225 lf_print("", overlap);
1226 }
1227 #endif
1228 /*
1229 * Six cases:
1230 * 0) no overlap
1231 * 1) overlap == lock
1232 * 2) overlap contains lock
1233 * 3) lock contains overlap
1234 * 4) overlap starts before lock
1235 * 5) overlap ends after lock
1236 */
1237 switch (ovcase) {
1238 case 0: /* no overlap */
1239 break;
1240
1241 case 1: /* overlap == lock */
1242 /*
1243 * We have already setup the
1244 * dependants for the new lock, taking
1245 * into account a possible downgrade
1246 * or unlock. Remove the old lock.
1247 */
1248 LIST_REMOVE(overlap, lf_link);
1249 lf_update_dependancies(state, overlap, TRUE,
1250 &granted);
1251 lf_free_lock(overlap);
1252 break;
1253
1254 case 2: /* overlap contains lock */
1255 /*
1256 * Just split the existing lock.
1257 */
1258 lf_split(state, overlap, lock, &granted);
1259 break;
1260
1261 case 3: /* lock contains overlap */
1262 /*
1263 * Delete the overlap and advance to
1264 * the next entry in the list.
1265 */
1266 lf = LIST_NEXT(overlap, lf_link);
1267 LIST_REMOVE(overlap, lf_link);
1268 lf_update_dependancies(state, overlap, TRUE,
1269 &granted);
1270 lf_free_lock(overlap);
1271 overlap = lf;
1272 continue;
1273
1274 case 4: /* overlap starts before lock */
1275 /*
1276 * Just update the overlap end and
1277 * move on.
1278 */
1279 lf_set_end(state, overlap, lock->lf_start - 1,
1280 &granted);
1281 overlap = LIST_NEXT(overlap, lf_link);
1282 continue;
1283
1284 case 5: /* overlap ends after lock */
1285 /*
1286 * Change the start of overlap and
1287 * re-insert.
1288 */
1289 lf_set_start(state, overlap, lock->lf_end + 1,
1290 &granted);
1291 break;
1292 }
1293 break;
1294 }
1295 #ifdef LOCKF_DEBUG
1296 if (lockf_debug & 1) {
1297 if (lock->lf_type != F_UNLCK)
1298 lf_print("lf_activate_lock: activated", lock);
1299 else
1300 lf_print("lf_activate_lock: unlocked", lock);
1301 lf_printlist("lf_activate_lock", lock);
1302 }
1303 #endif /* LOCKF_DEBUG */
1304 if (lock->lf_type != F_UNLCK)
1305 lf_insert_lock(state, lock);
1306 }
1307 }
1308
1309 /*
1310 * Cancel a pending lock request, either as a result of a signal or a
1311 * cancel request for an async lock.
1312 */
1313 static void
lf_cancel_lock(struct lockf * state,struct lockf_entry * lock)1314 lf_cancel_lock(struct lockf *state, struct lockf_entry *lock)
1315 {
1316 struct lockf_entry_list granted;
1317
1318 /*
1319 * Note it is theoretically possible that cancelling this lock
1320 * may allow some other pending lock to become
1321 * active. Consider this case:
1322 *
1323 * Owner Action Result Dependencies
1324 *
1325 * A: lock [0..0] succeeds
1326 * B: lock [2..2] succeeds
1327 * C: lock [1..2] blocked C->B
1328 * D: lock [0..1] blocked C->B,D->A,D->C
1329 * A: unlock [0..0] C->B,D->C
1330 * C: cancel [1..2]
1331 */
1332
1333 LIST_REMOVE(lock, lf_link);
1334
1335 /*
1336 * Removing out-going edges is simple.
1337 */
1338 sx_xlock(&lf_owner_graph_lock);
1339 lf_remove_outgoing(lock);
1340 sx_xunlock(&lf_owner_graph_lock);
1341
1342 /*
1343 * Removing in-coming edges may allow some other lock to
1344 * become active - we use lf_update_dependancies to figure
1345 * this out.
1346 */
1347 LIST_INIT(&granted);
1348 lf_update_dependancies(state, lock, TRUE, &granted);
1349 lf_free_lock(lock);
1350
1351 /*
1352 * Feed any newly active locks to lf_activate_lock.
1353 */
1354 while (!LIST_EMPTY(&granted)) {
1355 lock = LIST_FIRST(&granted);
1356 LIST_REMOVE(lock, lf_link);
1357 lf_activate_lock(state, lock);
1358 }
1359 }
1360
1361 /*
1362 * Set a byte-range lock.
1363 */
1364 static int
lf_setlock(struct lockf * state,struct lockf_entry * lock,struct vnode * vp,void ** cookiep)1365 lf_setlock(struct lockf *state, struct lockf_entry *lock, struct vnode *vp,
1366 void **cookiep)
1367 {
1368 static char lockstr[] = "lockf";
1369 int error, priority, stops_deferred;
1370
1371 #ifdef LOCKF_DEBUG
1372 if (lockf_debug & 1)
1373 lf_print("lf_setlock", lock);
1374 #endif /* LOCKF_DEBUG */
1375
1376 /*
1377 * Set the priority
1378 */
1379 priority = PLOCK;
1380 if (lock->lf_type == F_WRLCK)
1381 priority += 4;
1382 if (!(lock->lf_flags & F_NOINTR))
1383 priority |= PCATCH;
1384 /*
1385 * Scan lock list for this file looking for locks that would block us.
1386 */
1387 if (lf_getblock(state, lock)) {
1388 /*
1389 * Free the structure and return if nonblocking.
1390 */
1391 if ((lock->lf_flags & F_WAIT) == 0
1392 && lock->lf_async_task == NULL) {
1393 lf_free_lock(lock);
1394 error = EAGAIN;
1395 goto out;
1396 }
1397
1398 /*
1399 * For flock type locks, we must first remove
1400 * any shared locks that we hold before we sleep
1401 * waiting for an exclusive lock.
1402 */
1403 if ((lock->lf_flags & F_FLOCK) &&
1404 lock->lf_type == F_WRLCK) {
1405 lock->lf_type = F_UNLCK;
1406 lf_activate_lock(state, lock);
1407 lock->lf_type = F_WRLCK;
1408 }
1409
1410 /*
1411 * We are blocked. Create edges to each blocking lock,
1412 * checking for deadlock using the owner graph. For
1413 * simplicity, we run deadlock detection for all
1414 * locks, posix and otherwise.
1415 */
1416 sx_xlock(&lf_owner_graph_lock);
1417 error = lf_add_outgoing(state, lock);
1418 sx_xunlock(&lf_owner_graph_lock);
1419
1420 if (error) {
1421 #ifdef LOCKF_DEBUG
1422 if (lockf_debug & 1)
1423 lf_print("lf_setlock: deadlock", lock);
1424 #endif
1425 lf_free_lock(lock);
1426 goto out;
1427 }
1428
1429 /*
1430 * We have added edges to everything that blocks
1431 * us. Sleep until they all go away.
1432 */
1433 LIST_INSERT_HEAD(&state->ls_pending, lock, lf_link);
1434 #ifdef LOCKF_DEBUG
1435 if (lockf_debug & 1) {
1436 struct lockf_edge *e;
1437 LIST_FOREACH(e, &lock->lf_outedges, le_outlink) {
1438 lf_print("lf_setlock: blocking on", e->le_to);
1439 lf_printlist("lf_setlock", e->le_to);
1440 }
1441 }
1442 #endif /* LOCKF_DEBUG */
1443
1444 if ((lock->lf_flags & F_WAIT) == 0) {
1445 /*
1446 * The caller requested async notification -
1447 * this callback happens when the blocking
1448 * lock is released, allowing the caller to
1449 * make another attempt to take the lock.
1450 */
1451 *cookiep = (void *) lock;
1452 error = EINPROGRESS;
1453 goto out;
1454 }
1455
1456 lock->lf_refs++;
1457 stops_deferred = sigdeferstop(SIGDEFERSTOP_ERESTART);
1458 error = sx_sleep(lock, &state->ls_lock, priority, lockstr, 0);
1459 sigallowstop(stops_deferred);
1460 if (lf_free_lock(lock)) {
1461 error = EDOOFUS;
1462 goto out;
1463 }
1464
1465 /*
1466 * We may have been awakened by a signal and/or by a
1467 * debugger continuing us (in which cases we must
1468 * remove our lock graph edges) and/or by another
1469 * process releasing a lock (in which case our edges
1470 * have already been removed and we have been moved to
1471 * the active list). We may also have been woken by
1472 * lf_purgelocks which we report to the caller as
1473 * EINTR. In that case, lf_purgelocks will have
1474 * removed our lock graph edges.
1475 *
1476 * Note that it is possible to receive a signal after
1477 * we were successfully woken (and moved to the active
1478 * list) but before we resumed execution. In this
1479 * case, our lf_outedges list will be clear. We
1480 * pretend there was no error.
1481 *
1482 * Note also, if we have been sleeping long enough, we
1483 * may now have incoming edges from some newer lock
1484 * which is waiting behind us in the queue.
1485 */
1486 if (lock->lf_flags & F_INTR) {
1487 error = EINTR;
1488 lf_free_lock(lock);
1489 goto out;
1490 }
1491 if (LIST_EMPTY(&lock->lf_outedges)) {
1492 error = 0;
1493 } else {
1494 lf_cancel_lock(state, lock);
1495 goto out;
1496 }
1497 #ifdef LOCKF_DEBUG
1498 if (lockf_debug & 1) {
1499 lf_print("lf_setlock: granted", lock);
1500 }
1501 #endif
1502 goto out;
1503 }
1504 /*
1505 * It looks like we are going to grant the lock. First add
1506 * edges from any currently pending lock that the new lock
1507 * would block.
1508 */
1509 error = lf_add_incoming(state, lock);
1510 if (error) {
1511 #ifdef LOCKF_DEBUG
1512 if (lockf_debug & 1)
1513 lf_print("lf_setlock: deadlock", lock);
1514 #endif
1515 lf_free_lock(lock);
1516 goto out;
1517 }
1518
1519 /*
1520 * No blocks!! Add the lock. Note that we will
1521 * downgrade or upgrade any overlapping locks this
1522 * process already owns.
1523 */
1524 lf_activate_lock(state, lock);
1525 error = 0;
1526 out:
1527 return (error);
1528 }
1529
1530 /*
1531 * Remove a byte-range lock on an inode.
1532 *
1533 * Generally, find the lock (or an overlap to that lock)
1534 * and remove it (or shrink it), then wakeup anyone we can.
1535 */
1536 static int
lf_clearlock(struct lockf * state,struct lockf_entry * unlock)1537 lf_clearlock(struct lockf *state, struct lockf_entry *unlock)
1538 {
1539 struct lockf_entry *overlap;
1540
1541 overlap = LIST_FIRST(&state->ls_active);
1542
1543 if (overlap == NOLOCKF)
1544 return (0);
1545 #ifdef LOCKF_DEBUG
1546 if (unlock->lf_type != F_UNLCK)
1547 panic("lf_clearlock: bad type");
1548 if (lockf_debug & 1)
1549 lf_print("lf_clearlock", unlock);
1550 #endif /* LOCKF_DEBUG */
1551
1552 lf_activate_lock(state, unlock);
1553
1554 return (0);
1555 }
1556
1557 /*
1558 * Check whether there is a blocking lock, and if so return its
1559 * details in '*fl'.
1560 */
1561 static int
lf_getlock(struct lockf * state,struct lockf_entry * lock,struct flock * fl)1562 lf_getlock(struct lockf *state, struct lockf_entry *lock, struct flock *fl)
1563 {
1564 struct lockf_entry *block;
1565
1566 #ifdef LOCKF_DEBUG
1567 if (lockf_debug & 1)
1568 lf_print("lf_getlock", lock);
1569 #endif /* LOCKF_DEBUG */
1570
1571 if ((block = lf_getblock(state, lock))) {
1572 fl->l_type = block->lf_type;
1573 fl->l_whence = SEEK_SET;
1574 fl->l_start = block->lf_start;
1575 if (block->lf_end == OFF_MAX)
1576 fl->l_len = 0;
1577 else
1578 fl->l_len = block->lf_end - block->lf_start + 1;
1579 fl->l_pid = block->lf_owner->lo_pid;
1580 fl->l_sysid = block->lf_owner->lo_sysid;
1581 } else {
1582 fl->l_type = F_UNLCK;
1583 }
1584 return (0);
1585 }
1586
1587 /*
1588 * Cancel an async lock request.
1589 */
1590 static int
lf_cancel(struct lockf * state,struct lockf_entry * lock,void * cookie)1591 lf_cancel(struct lockf *state, struct lockf_entry *lock, void *cookie)
1592 {
1593 struct lockf_entry *reallock;
1594
1595 /*
1596 * We need to match this request with an existing lock
1597 * request.
1598 */
1599 LIST_FOREACH(reallock, &state->ls_pending, lf_link) {
1600 if ((void *) reallock == cookie) {
1601 /*
1602 * Double-check that this lock looks right
1603 * (maybe use a rolling ID for the cancel
1604 * cookie instead?)
1605 */
1606 if (!(reallock->lf_vnode == lock->lf_vnode
1607 && reallock->lf_start == lock->lf_start
1608 && reallock->lf_end == lock->lf_end)) {
1609 return (ENOENT);
1610 }
1611
1612 /*
1613 * Make sure this lock was async and then just
1614 * remove it from its wait lists.
1615 */
1616 if (!reallock->lf_async_task) {
1617 return (ENOENT);
1618 }
1619
1620 /*
1621 * Note that since any other thread must take
1622 * state->ls_lock before it can possibly
1623 * trigger the async callback, we are safe
1624 * from a race with lf_wakeup_lock, i.e. we
1625 * can free the lock (actually our caller does
1626 * this).
1627 */
1628 lf_cancel_lock(state, reallock);
1629 return (0);
1630 }
1631 }
1632
1633 /*
1634 * We didn't find a matching lock - not much we can do here.
1635 */
1636 return (ENOENT);
1637 }
1638
1639 /*
1640 * Walk the list of locks for an inode and
1641 * return the first blocking lock.
1642 */
1643 static struct lockf_entry *
lf_getblock(struct lockf * state,struct lockf_entry * lock)1644 lf_getblock(struct lockf *state, struct lockf_entry *lock)
1645 {
1646 struct lockf_entry *overlap;
1647
1648 LIST_FOREACH(overlap, &state->ls_active, lf_link) {
1649 /*
1650 * We may assume that the active list is sorted by
1651 * lf_start.
1652 */
1653 if (overlap->lf_start > lock->lf_end)
1654 break;
1655 if (!lf_blocks(lock, overlap))
1656 continue;
1657 return (overlap);
1658 }
1659 return (NOLOCKF);
1660 }
1661
1662 /*
1663 * Walk the list of locks for an inode to find an overlapping lock (if
1664 * any) and return a classification of that overlap.
1665 *
1666 * Arguments:
1667 * *overlap The place in the lock list to start looking
1668 * lock The lock which is being tested
1669 * type Pass 'SELF' to test only locks with the same
1670 * owner as lock, or 'OTHER' to test only locks
1671 * with a different owner
1672 *
1673 * Returns one of six values:
1674 * 0) no overlap
1675 * 1) overlap == lock
1676 * 2) overlap contains lock
1677 * 3) lock contains overlap
1678 * 4) overlap starts before lock
1679 * 5) overlap ends after lock
1680 *
1681 * If there is an overlapping lock, '*overlap' is set to point at the
1682 * overlapping lock.
1683 *
1684 * NOTE: this returns only the FIRST overlapping lock. There
1685 * may be more than one.
1686 */
1687 static int
lf_findoverlap(struct lockf_entry ** overlap,struct lockf_entry * lock,int type)1688 lf_findoverlap(struct lockf_entry **overlap, struct lockf_entry *lock, int type)
1689 {
1690 struct lockf_entry *lf;
1691 off_t start, end;
1692 int res;
1693
1694 if ((*overlap) == NOLOCKF) {
1695 return (0);
1696 }
1697 #ifdef LOCKF_DEBUG
1698 if (lockf_debug & 2)
1699 lf_print("lf_findoverlap: looking for overlap in", lock);
1700 #endif /* LOCKF_DEBUG */
1701 start = lock->lf_start;
1702 end = lock->lf_end;
1703 res = 0;
1704 while (*overlap) {
1705 lf = *overlap;
1706 if (lf->lf_start > end)
1707 break;
1708 if (((type & SELF) && lf->lf_owner != lock->lf_owner) ||
1709 ((type & OTHERS) && lf->lf_owner == lock->lf_owner)) {
1710 *overlap = LIST_NEXT(lf, lf_link);
1711 continue;
1712 }
1713 #ifdef LOCKF_DEBUG
1714 if (lockf_debug & 2)
1715 lf_print("\tchecking", lf);
1716 #endif /* LOCKF_DEBUG */
1717 /*
1718 * OK, check for overlap
1719 *
1720 * Six cases:
1721 * 0) no overlap
1722 * 1) overlap == lock
1723 * 2) overlap contains lock
1724 * 3) lock contains overlap
1725 * 4) overlap starts before lock
1726 * 5) overlap ends after lock
1727 */
1728 if (start > lf->lf_end) {
1729 /* Case 0 */
1730 #ifdef LOCKF_DEBUG
1731 if (lockf_debug & 2)
1732 printf("no overlap\n");
1733 #endif /* LOCKF_DEBUG */
1734 *overlap = LIST_NEXT(lf, lf_link);
1735 continue;
1736 }
1737 if (lf->lf_start == start && lf->lf_end == end) {
1738 /* Case 1 */
1739 #ifdef LOCKF_DEBUG
1740 if (lockf_debug & 2)
1741 printf("overlap == lock\n");
1742 #endif /* LOCKF_DEBUG */
1743 res = 1;
1744 break;
1745 }
1746 if (lf->lf_start <= start && lf->lf_end >= end) {
1747 /* Case 2 */
1748 #ifdef LOCKF_DEBUG
1749 if (lockf_debug & 2)
1750 printf("overlap contains lock\n");
1751 #endif /* LOCKF_DEBUG */
1752 res = 2;
1753 break;
1754 }
1755 if (start <= lf->lf_start && end >= lf->lf_end) {
1756 /* Case 3 */
1757 #ifdef LOCKF_DEBUG
1758 if (lockf_debug & 2)
1759 printf("lock contains overlap\n");
1760 #endif /* LOCKF_DEBUG */
1761 res = 3;
1762 break;
1763 }
1764 if (lf->lf_start < start && lf->lf_end >= start) {
1765 /* Case 4 */
1766 #ifdef LOCKF_DEBUG
1767 if (lockf_debug & 2)
1768 printf("overlap starts before lock\n");
1769 #endif /* LOCKF_DEBUG */
1770 res = 4;
1771 break;
1772 }
1773 if (lf->lf_start > start && lf->lf_end > end) {
1774 /* Case 5 */
1775 #ifdef LOCKF_DEBUG
1776 if (lockf_debug & 2)
1777 printf("overlap ends after lock\n");
1778 #endif /* LOCKF_DEBUG */
1779 res = 5;
1780 break;
1781 }
1782 panic("lf_findoverlap: default");
1783 }
1784 return (res);
1785 }
1786
1787 /*
1788 * Split an the existing 'lock1', based on the extent of the lock
1789 * described by 'lock2'. The existing lock should cover 'lock2'
1790 * entirely.
1791 *
1792 * Any pending locks which have been been unblocked are added to
1793 * 'granted'
1794 */
1795 static void
lf_split(struct lockf * state,struct lockf_entry * lock1,struct lockf_entry * lock2,struct lockf_entry_list * granted)1796 lf_split(struct lockf *state, struct lockf_entry *lock1,
1797 struct lockf_entry *lock2, struct lockf_entry_list *granted)
1798 {
1799 struct lockf_entry *splitlock;
1800
1801 #ifdef LOCKF_DEBUG
1802 if (lockf_debug & 2) {
1803 lf_print("lf_split", lock1);
1804 lf_print("splitting from", lock2);
1805 }
1806 #endif /* LOCKF_DEBUG */
1807 /*
1808 * Check to see if we don't need to split at all.
1809 */
1810 if (lock1->lf_start == lock2->lf_start) {
1811 lf_set_start(state, lock1, lock2->lf_end + 1, granted);
1812 return;
1813 }
1814 if (lock1->lf_end == lock2->lf_end) {
1815 lf_set_end(state, lock1, lock2->lf_start - 1, granted);
1816 return;
1817 }
1818 /*
1819 * Make a new lock consisting of the last part of
1820 * the encompassing lock.
1821 */
1822 splitlock = lf_alloc_lock(lock1->lf_owner);
1823 memcpy(splitlock, lock1, sizeof *splitlock);
1824 splitlock->lf_refs = 1;
1825 if (splitlock->lf_flags & F_REMOTE)
1826 vref(splitlock->lf_vnode);
1827
1828 /*
1829 * This cannot cause a deadlock since any edges we would add
1830 * to splitlock already exist in lock1. We must be sure to add
1831 * necessary dependencies to splitlock before we reduce lock1
1832 * otherwise we may accidentally grant a pending lock that
1833 * was blocked by the tail end of lock1.
1834 */
1835 splitlock->lf_start = lock2->lf_end + 1;
1836 LIST_INIT(&splitlock->lf_outedges);
1837 LIST_INIT(&splitlock->lf_inedges);
1838 lf_add_incoming(state, splitlock);
1839
1840 lf_set_end(state, lock1, lock2->lf_start - 1, granted);
1841
1842 /*
1843 * OK, now link it in
1844 */
1845 lf_insert_lock(state, splitlock);
1846 }
1847
1848 struct lockdesc {
1849 STAILQ_ENTRY(lockdesc) link;
1850 struct vnode *vp;
1851 struct flock fl;
1852 };
1853 STAILQ_HEAD(lockdesclist, lockdesc);
1854
1855 int
lf_iteratelocks_sysid(int sysid,lf_iterator * fn,void * arg)1856 lf_iteratelocks_sysid(int sysid, lf_iterator *fn, void *arg)
1857 {
1858 struct lockf *ls;
1859 struct lockf_entry *lf;
1860 struct lockdesc *ldesc;
1861 struct lockdesclist locks;
1862 int error;
1863
1864 /*
1865 * In order to keep the locking simple, we iterate over the
1866 * active lock lists to build a list of locks that need
1867 * releasing. We then call the iterator for each one in turn.
1868 *
1869 * We take an extra reference to the vnode for the duration to
1870 * make sure it doesn't go away before we are finished.
1871 */
1872 STAILQ_INIT(&locks);
1873 sx_xlock(&lf_lock_states_lock);
1874 LIST_FOREACH(ls, &lf_lock_states, ls_link) {
1875 sx_xlock(&ls->ls_lock);
1876 LIST_FOREACH(lf, &ls->ls_active, lf_link) {
1877 if (lf->lf_owner->lo_sysid != sysid)
1878 continue;
1879
1880 ldesc = malloc(sizeof(struct lockdesc), M_LOCKF,
1881 M_WAITOK);
1882 ldesc->vp = lf->lf_vnode;
1883 vref(ldesc->vp);
1884 ldesc->fl.l_start = lf->lf_start;
1885 if (lf->lf_end == OFF_MAX)
1886 ldesc->fl.l_len = 0;
1887 else
1888 ldesc->fl.l_len =
1889 lf->lf_end - lf->lf_start + 1;
1890 ldesc->fl.l_whence = SEEK_SET;
1891 ldesc->fl.l_type = F_UNLCK;
1892 ldesc->fl.l_pid = lf->lf_owner->lo_pid;
1893 ldesc->fl.l_sysid = sysid;
1894 STAILQ_INSERT_TAIL(&locks, ldesc, link);
1895 }
1896 sx_xunlock(&ls->ls_lock);
1897 }
1898 sx_xunlock(&lf_lock_states_lock);
1899
1900 /*
1901 * Call the iterator function for each lock in turn. If the
1902 * iterator returns an error code, just free the rest of the
1903 * lockdesc structures.
1904 */
1905 error = 0;
1906 while ((ldesc = STAILQ_FIRST(&locks)) != NULL) {
1907 STAILQ_REMOVE_HEAD(&locks, link);
1908 if (!error)
1909 error = fn(ldesc->vp, &ldesc->fl, arg);
1910 vrele(ldesc->vp);
1911 free(ldesc, M_LOCKF);
1912 }
1913
1914 return (error);
1915 }
1916
1917 int
lf_iteratelocks_vnode(struct vnode * vp,lf_iterator * fn,void * arg)1918 lf_iteratelocks_vnode(struct vnode *vp, lf_iterator *fn, void *arg)
1919 {
1920 struct lockf *ls;
1921 struct lockf_entry *lf;
1922 struct lockdesc *ldesc;
1923 struct lockdesclist locks;
1924 int error;
1925
1926 /*
1927 * In order to keep the locking simple, we iterate over the
1928 * active lock lists to build a list of locks that need
1929 * releasing. We then call the iterator for each one in turn.
1930 *
1931 * We take an extra reference to the vnode for the duration to
1932 * make sure it doesn't go away before we are finished.
1933 */
1934 STAILQ_INIT(&locks);
1935 VI_LOCK(vp);
1936 ls = vp->v_lockf;
1937 if (!ls) {
1938 VI_UNLOCK(vp);
1939 return (0);
1940 }
1941 MPASS(ls->ls_threads >= 0);
1942 ls->ls_threads++;
1943 VI_UNLOCK(vp);
1944
1945 sx_xlock(&ls->ls_lock);
1946 LIST_FOREACH(lf, &ls->ls_active, lf_link) {
1947 ldesc = malloc(sizeof(struct lockdesc), M_LOCKF,
1948 M_WAITOK);
1949 ldesc->vp = lf->lf_vnode;
1950 vref(ldesc->vp);
1951 ldesc->fl.l_start = lf->lf_start;
1952 if (lf->lf_end == OFF_MAX)
1953 ldesc->fl.l_len = 0;
1954 else
1955 ldesc->fl.l_len =
1956 lf->lf_end - lf->lf_start + 1;
1957 ldesc->fl.l_whence = SEEK_SET;
1958 ldesc->fl.l_type = F_UNLCK;
1959 ldesc->fl.l_pid = lf->lf_owner->lo_pid;
1960 ldesc->fl.l_sysid = lf->lf_owner->lo_sysid;
1961 STAILQ_INSERT_TAIL(&locks, ldesc, link);
1962 }
1963 sx_xunlock(&ls->ls_lock);
1964 VI_LOCK(vp);
1965 MPASS(ls->ls_threads > 0);
1966 ls->ls_threads--;
1967 wakeup(ls);
1968 VI_UNLOCK(vp);
1969
1970 /*
1971 * Call the iterator function for each lock in turn. If the
1972 * iterator returns an error code, just free the rest of the
1973 * lockdesc structures.
1974 */
1975 error = 0;
1976 while ((ldesc = STAILQ_FIRST(&locks)) != NULL) {
1977 STAILQ_REMOVE_HEAD(&locks, link);
1978 if (!error)
1979 error = fn(ldesc->vp, &ldesc->fl, arg);
1980 vrele(ldesc->vp);
1981 free(ldesc, M_LOCKF);
1982 }
1983
1984 return (error);
1985 }
1986
1987 static int
lf_clearremotesys_iterator(struct vnode * vp,struct flock * fl,void * arg)1988 lf_clearremotesys_iterator(struct vnode *vp, struct flock *fl, void *arg)
1989 {
1990
1991 VOP_ADVLOCK(vp, 0, F_UNLCK, fl, F_REMOTE);
1992 return (0);
1993 }
1994
1995 void
lf_clearremotesys(int sysid)1996 lf_clearremotesys(int sysid)
1997 {
1998
1999 KASSERT(sysid != 0, ("Can't clear local locks with F_UNLCKSYS"));
2000 lf_iteratelocks_sysid(sysid, lf_clearremotesys_iterator, NULL);
2001 }
2002
2003 int
lf_countlocks(int sysid)2004 lf_countlocks(int sysid)
2005 {
2006 int i;
2007 struct lock_owner *lo;
2008 int count;
2009
2010 count = 0;
2011 for (i = 0; i < LOCK_OWNER_HASH_SIZE; i++) {
2012 sx_xlock(&lf_lock_owners[i].lock);
2013 LIST_FOREACH(lo, &lf_lock_owners[i].list, lo_link)
2014 if (lo->lo_sysid == sysid)
2015 count += lo->lo_refs;
2016 sx_xunlock(&lf_lock_owners[i].lock);
2017 }
2018
2019 return (count);
2020 }
2021
2022 #ifdef LOCKF_DEBUG
2023
2024 /*
2025 * Return non-zero if y is reachable from x using a brute force
2026 * search. If reachable and path is non-null, return the route taken
2027 * in path.
2028 */
2029 static int
graph_reaches(struct owner_vertex * x,struct owner_vertex * y,struct owner_vertex_list * path)2030 graph_reaches(struct owner_vertex *x, struct owner_vertex *y,
2031 struct owner_vertex_list *path)
2032 {
2033 struct owner_edge *e;
2034
2035 if (x == y) {
2036 if (path)
2037 TAILQ_INSERT_HEAD(path, x, v_link);
2038 return 1;
2039 }
2040
2041 LIST_FOREACH(e, &x->v_outedges, e_outlink) {
2042 if (graph_reaches(e->e_to, y, path)) {
2043 if (path)
2044 TAILQ_INSERT_HEAD(path, x, v_link);
2045 return 1;
2046 }
2047 }
2048 return 0;
2049 }
2050
2051 /*
2052 * Perform consistency checks on the graph. Make sure the values of
2053 * v_order are correct. If checkorder is non-zero, check no vertex can
2054 * reach any other vertex with a smaller order.
2055 */
2056 static void
graph_check(struct owner_graph * g,int checkorder)2057 graph_check(struct owner_graph *g, int checkorder)
2058 {
2059 int i, j;
2060
2061 for (i = 0; i < g->g_size; i++) {
2062 if (!g->g_vertices[i]->v_owner)
2063 continue;
2064 KASSERT(g->g_vertices[i]->v_order == i,
2065 ("lock graph vertices disordered"));
2066 if (checkorder) {
2067 for (j = 0; j < i; j++) {
2068 if (!g->g_vertices[j]->v_owner)
2069 continue;
2070 KASSERT(!graph_reaches(g->g_vertices[i],
2071 g->g_vertices[j], NULL),
2072 ("lock graph vertices disordered"));
2073 }
2074 }
2075 }
2076 }
2077
2078 static void
graph_print_vertices(struct owner_vertex_list * set)2079 graph_print_vertices(struct owner_vertex_list *set)
2080 {
2081 struct owner_vertex *v;
2082
2083 printf("{ ");
2084 TAILQ_FOREACH(v, set, v_link) {
2085 printf("%d:", v->v_order);
2086 lf_print_owner(v->v_owner);
2087 if (TAILQ_NEXT(v, v_link))
2088 printf(", ");
2089 }
2090 printf(" }\n");
2091 }
2092
2093 #endif
2094
2095 /*
2096 * Calculate the sub-set of vertices v from the affected region [y..x]
2097 * where v is reachable from y. Return -1 if a loop was detected
2098 * (i.e. x is reachable from y, otherwise the number of vertices in
2099 * this subset.
2100 */
2101 static int
graph_delta_forward(struct owner_graph * g,struct owner_vertex * x,struct owner_vertex * y,struct owner_vertex_list * delta)2102 graph_delta_forward(struct owner_graph *g, struct owner_vertex *x,
2103 struct owner_vertex *y, struct owner_vertex_list *delta)
2104 {
2105 uint32_t gen;
2106 struct owner_vertex *v;
2107 struct owner_edge *e;
2108 int n;
2109
2110 /*
2111 * We start with a set containing just y. Then for each vertex
2112 * v in the set so far unprocessed, we add each vertex that v
2113 * has an out-edge to and that is within the affected region
2114 * [y..x]. If we see the vertex x on our travels, stop
2115 * immediately.
2116 */
2117 TAILQ_INIT(delta);
2118 TAILQ_INSERT_TAIL(delta, y, v_link);
2119 v = y;
2120 n = 1;
2121 gen = g->g_gen;
2122 while (v) {
2123 LIST_FOREACH(e, &v->v_outedges, e_outlink) {
2124 if (e->e_to == x)
2125 return -1;
2126 if (e->e_to->v_order < x->v_order
2127 && e->e_to->v_gen != gen) {
2128 e->e_to->v_gen = gen;
2129 TAILQ_INSERT_TAIL(delta, e->e_to, v_link);
2130 n++;
2131 }
2132 }
2133 v = TAILQ_NEXT(v, v_link);
2134 }
2135
2136 return (n);
2137 }
2138
2139 /*
2140 * Calculate the sub-set of vertices v from the affected region [y..x]
2141 * where v reaches x. Return the number of vertices in this subset.
2142 */
2143 static int
graph_delta_backward(struct owner_graph * g,struct owner_vertex * x,struct owner_vertex * y,struct owner_vertex_list * delta)2144 graph_delta_backward(struct owner_graph *g, struct owner_vertex *x,
2145 struct owner_vertex *y, struct owner_vertex_list *delta)
2146 {
2147 uint32_t gen;
2148 struct owner_vertex *v;
2149 struct owner_edge *e;
2150 int n;
2151
2152 /*
2153 * We start with a set containing just x. Then for each vertex
2154 * v in the set so far unprocessed, we add each vertex that v
2155 * has an in-edge from and that is within the affected region
2156 * [y..x].
2157 */
2158 TAILQ_INIT(delta);
2159 TAILQ_INSERT_TAIL(delta, x, v_link);
2160 v = x;
2161 n = 1;
2162 gen = g->g_gen;
2163 while (v) {
2164 LIST_FOREACH(e, &v->v_inedges, e_inlink) {
2165 if (e->e_from->v_order > y->v_order
2166 && e->e_from->v_gen != gen) {
2167 e->e_from->v_gen = gen;
2168 TAILQ_INSERT_HEAD(delta, e->e_from, v_link);
2169 n++;
2170 }
2171 }
2172 v = TAILQ_PREV(v, owner_vertex_list, v_link);
2173 }
2174
2175 return (n);
2176 }
2177
2178 static int
graph_add_indices(int * indices,int n,struct owner_vertex_list * set)2179 graph_add_indices(int *indices, int n, struct owner_vertex_list *set)
2180 {
2181 struct owner_vertex *v;
2182 int i, j;
2183
2184 TAILQ_FOREACH(v, set, v_link) {
2185 for (i = n;
2186 i > 0 && indices[i - 1] > v->v_order; i--)
2187 ;
2188 for (j = n - 1; j >= i; j--)
2189 indices[j + 1] = indices[j];
2190 indices[i] = v->v_order;
2191 n++;
2192 }
2193
2194 return (n);
2195 }
2196
2197 static int
graph_assign_indices(struct owner_graph * g,int * indices,int nextunused,struct owner_vertex_list * set)2198 graph_assign_indices(struct owner_graph *g, int *indices, int nextunused,
2199 struct owner_vertex_list *set)
2200 {
2201 struct owner_vertex *v, *vlowest;
2202
2203 while (!TAILQ_EMPTY(set)) {
2204 vlowest = NULL;
2205 TAILQ_FOREACH(v, set, v_link) {
2206 if (!vlowest || v->v_order < vlowest->v_order)
2207 vlowest = v;
2208 }
2209 TAILQ_REMOVE(set, vlowest, v_link);
2210 vlowest->v_order = indices[nextunused];
2211 g->g_vertices[vlowest->v_order] = vlowest;
2212 nextunused++;
2213 }
2214
2215 return (nextunused);
2216 }
2217
2218 static int
graph_add_edge(struct owner_graph * g,struct owner_vertex * x,struct owner_vertex * y)2219 graph_add_edge(struct owner_graph *g, struct owner_vertex *x,
2220 struct owner_vertex *y)
2221 {
2222 struct owner_edge *e;
2223 struct owner_vertex_list deltaF, deltaB;
2224 int nF, n, vi, i;
2225 int *indices;
2226 int nB __unused;
2227
2228 sx_assert(&lf_owner_graph_lock, SX_XLOCKED);
2229
2230 LIST_FOREACH(e, &x->v_outedges, e_outlink) {
2231 if (e->e_to == y) {
2232 e->e_refs++;
2233 return (0);
2234 }
2235 }
2236
2237 #ifdef LOCKF_DEBUG
2238 if (lockf_debug & 8) {
2239 printf("adding edge %d:", x->v_order);
2240 lf_print_owner(x->v_owner);
2241 printf(" -> %d:", y->v_order);
2242 lf_print_owner(y->v_owner);
2243 printf("\n");
2244 }
2245 #endif
2246 if (y->v_order < x->v_order) {
2247 /*
2248 * The new edge violates the order. First find the set
2249 * of affected vertices reachable from y (deltaF) and
2250 * the set of affect vertices affected that reach x
2251 * (deltaB), using the graph generation number to
2252 * detect whether we have visited a given vertex
2253 * already. We re-order the graph so that each vertex
2254 * in deltaB appears before each vertex in deltaF.
2255 *
2256 * If x is a member of deltaF, then the new edge would
2257 * create a cycle. Otherwise, we may assume that
2258 * deltaF and deltaB are disjoint.
2259 */
2260 g->g_gen++;
2261 if (g->g_gen == 0) {
2262 /*
2263 * Generation wrap.
2264 */
2265 for (vi = 0; vi < g->g_size; vi++) {
2266 g->g_vertices[vi]->v_gen = 0;
2267 }
2268 g->g_gen++;
2269 }
2270 nF = graph_delta_forward(g, x, y, &deltaF);
2271 if (nF < 0) {
2272 #ifdef LOCKF_DEBUG
2273 if (lockf_debug & 8) {
2274 struct owner_vertex_list path;
2275 printf("deadlock: ");
2276 TAILQ_INIT(&path);
2277 graph_reaches(y, x, &path);
2278 graph_print_vertices(&path);
2279 }
2280 #endif
2281 return (EDEADLK);
2282 }
2283
2284 #ifdef LOCKF_DEBUG
2285 if (lockf_debug & 8) {
2286 printf("re-ordering graph vertices\n");
2287 printf("deltaF = ");
2288 graph_print_vertices(&deltaF);
2289 }
2290 #endif
2291
2292 nB = graph_delta_backward(g, x, y, &deltaB);
2293
2294 #ifdef LOCKF_DEBUG
2295 if (lockf_debug & 8) {
2296 printf("deltaB = ");
2297 graph_print_vertices(&deltaB);
2298 }
2299 #endif
2300
2301 /*
2302 * We first build a set of vertex indices (vertex
2303 * order values) that we may use, then we re-assign
2304 * orders first to those vertices in deltaB, then to
2305 * deltaF. Note that the contents of deltaF and deltaB
2306 * may be partially disordered - we perform an
2307 * insertion sort while building our index set.
2308 */
2309 indices = g->g_indexbuf;
2310 n = graph_add_indices(indices, 0, &deltaF);
2311 graph_add_indices(indices, n, &deltaB);
2312
2313 /*
2314 * We must also be sure to maintain the relative
2315 * ordering of deltaF and deltaB when re-assigning
2316 * vertices. We do this by iteratively removing the
2317 * lowest ordered element from the set and assigning
2318 * it the next value from our new ordering.
2319 */
2320 i = graph_assign_indices(g, indices, 0, &deltaB);
2321 graph_assign_indices(g, indices, i, &deltaF);
2322
2323 #ifdef LOCKF_DEBUG
2324 if (lockf_debug & 8) {
2325 struct owner_vertex_list set;
2326 TAILQ_INIT(&set);
2327 for (i = 0; i < nB + nF; i++)
2328 TAILQ_INSERT_TAIL(&set,
2329 g->g_vertices[indices[i]], v_link);
2330 printf("new ordering = ");
2331 graph_print_vertices(&set);
2332 }
2333 #endif
2334 }
2335
2336 KASSERT(x->v_order < y->v_order, ("Failed to re-order graph"));
2337
2338 #ifdef LOCKF_DEBUG
2339 if (lockf_debug & 8) {
2340 graph_check(g, TRUE);
2341 }
2342 #endif
2343
2344 e = malloc(sizeof(struct owner_edge), M_LOCKF, M_WAITOK);
2345
2346 LIST_INSERT_HEAD(&x->v_outedges, e, e_outlink);
2347 LIST_INSERT_HEAD(&y->v_inedges, e, e_inlink);
2348 e->e_refs = 1;
2349 e->e_from = x;
2350 e->e_to = y;
2351
2352 return (0);
2353 }
2354
2355 /*
2356 * Remove an edge x->y from the graph.
2357 */
2358 static void
graph_remove_edge(struct owner_graph * g,struct owner_vertex * x,struct owner_vertex * y)2359 graph_remove_edge(struct owner_graph *g, struct owner_vertex *x,
2360 struct owner_vertex *y)
2361 {
2362 struct owner_edge *e;
2363
2364 sx_assert(&lf_owner_graph_lock, SX_XLOCKED);
2365
2366 LIST_FOREACH(e, &x->v_outedges, e_outlink) {
2367 if (e->e_to == y)
2368 break;
2369 }
2370 KASSERT(e, ("Removing non-existent edge from deadlock graph"));
2371
2372 e->e_refs--;
2373 if (e->e_refs == 0) {
2374 #ifdef LOCKF_DEBUG
2375 if (lockf_debug & 8) {
2376 printf("removing edge %d:", x->v_order);
2377 lf_print_owner(x->v_owner);
2378 printf(" -> %d:", y->v_order);
2379 lf_print_owner(y->v_owner);
2380 printf("\n");
2381 }
2382 #endif
2383 LIST_REMOVE(e, e_outlink);
2384 LIST_REMOVE(e, e_inlink);
2385 free(e, M_LOCKF);
2386 }
2387 }
2388
2389 /*
2390 * Allocate a vertex from the free list. Return ENOMEM if there are
2391 * none.
2392 */
2393 static struct owner_vertex *
graph_alloc_vertex(struct owner_graph * g,struct lock_owner * lo)2394 graph_alloc_vertex(struct owner_graph *g, struct lock_owner *lo)
2395 {
2396 struct owner_vertex *v;
2397
2398 sx_assert(&lf_owner_graph_lock, SX_XLOCKED);
2399
2400 v = malloc(sizeof(struct owner_vertex), M_LOCKF, M_WAITOK);
2401 if (g->g_size == g->g_space) {
2402 g->g_vertices = realloc(g->g_vertices,
2403 2 * g->g_space * sizeof(struct owner_vertex *),
2404 M_LOCKF, M_WAITOK);
2405 free(g->g_indexbuf, M_LOCKF);
2406 g->g_indexbuf = malloc(2 * g->g_space * sizeof(int),
2407 M_LOCKF, M_WAITOK);
2408 g->g_space = 2 * g->g_space;
2409 }
2410 v->v_order = g->g_size;
2411 v->v_gen = g->g_gen;
2412 g->g_vertices[g->g_size] = v;
2413 g->g_size++;
2414
2415 LIST_INIT(&v->v_outedges);
2416 LIST_INIT(&v->v_inedges);
2417 v->v_owner = lo;
2418
2419 return (v);
2420 }
2421
2422 static void
graph_free_vertex(struct owner_graph * g,struct owner_vertex * v)2423 graph_free_vertex(struct owner_graph *g, struct owner_vertex *v)
2424 {
2425 struct owner_vertex *w;
2426 int i;
2427
2428 sx_assert(&lf_owner_graph_lock, SX_XLOCKED);
2429
2430 KASSERT(LIST_EMPTY(&v->v_outedges), ("Freeing vertex with edges"));
2431 KASSERT(LIST_EMPTY(&v->v_inedges), ("Freeing vertex with edges"));
2432
2433 /*
2434 * Remove from the graph's array and close up the gap,
2435 * renumbering the other vertices.
2436 */
2437 for (i = v->v_order + 1; i < g->g_size; i++) {
2438 w = g->g_vertices[i];
2439 w->v_order--;
2440 g->g_vertices[i - 1] = w;
2441 }
2442 g->g_size--;
2443
2444 free(v, M_LOCKF);
2445 }
2446
2447 static struct owner_graph *
graph_init(struct owner_graph * g)2448 graph_init(struct owner_graph *g)
2449 {
2450
2451 g->g_vertices = malloc(10 * sizeof(struct owner_vertex *),
2452 M_LOCKF, M_WAITOK);
2453 g->g_size = 0;
2454 g->g_space = 10;
2455 g->g_indexbuf = malloc(g->g_space * sizeof(int), M_LOCKF, M_WAITOK);
2456 g->g_gen = 0;
2457
2458 return (g);
2459 }
2460
2461 struct kinfo_lockf_linked {
2462 struct kinfo_lockf kl;
2463 struct vnode *vp;
2464 STAILQ_ENTRY(kinfo_lockf_linked) link;
2465 };
2466
2467 int
vfs_report_lockf(struct mount * mp,struct sbuf * sb)2468 vfs_report_lockf(struct mount *mp, struct sbuf *sb)
2469 {
2470 struct lockf *ls;
2471 struct lockf_entry *lf;
2472 struct kinfo_lockf_linked *klf;
2473 struct vnode *vp;
2474 struct ucred *ucred;
2475 char *fullpath, *freepath;
2476 struct stat stt;
2477 STAILQ_HEAD(, kinfo_lockf_linked) locks;
2478 int error, gerror;
2479
2480 STAILQ_INIT(&locks);
2481 sx_slock(&lf_lock_states_lock);
2482 LIST_FOREACH(ls, &lf_lock_states, ls_link) {
2483 sx_slock(&ls->ls_lock);
2484 LIST_FOREACH(lf, &ls->ls_active, lf_link) {
2485 vp = lf->lf_vnode;
2486 if (VN_IS_DOOMED(vp) || vp->v_mount != mp)
2487 continue;
2488 vhold(vp);
2489 klf = malloc(sizeof(struct kinfo_lockf_linked),
2490 M_LOCKF, M_WAITOK | M_ZERO);
2491 klf->vp = vp;
2492 klf->kl.kl_structsize = sizeof(struct kinfo_lockf);
2493 klf->kl.kl_start = lf->lf_start;
2494 klf->kl.kl_len = lf->lf_end == OFF_MAX ? 0 :
2495 lf->lf_end - lf->lf_start + 1;
2496 klf->kl.kl_rw = lf->lf_type == F_RDLCK ?
2497 KLOCKF_RW_READ : KLOCKF_RW_WRITE;
2498 if (lf->lf_owner->lo_sysid != 0) {
2499 klf->kl.kl_pid = lf->lf_owner->lo_pid;
2500 klf->kl.kl_sysid = lf->lf_owner->lo_sysid;
2501 klf->kl.kl_type = KLOCKF_TYPE_REMOTE;
2502 } else if (lf->lf_owner->lo_pid == -1) {
2503 klf->kl.kl_pid = -1;
2504 klf->kl.kl_sysid = 0;
2505 klf->kl.kl_type = KLOCKF_TYPE_FLOCK;
2506 } else {
2507 klf->kl.kl_pid = lf->lf_owner->lo_pid;
2508 klf->kl.kl_sysid = 0;
2509 klf->kl.kl_type = KLOCKF_TYPE_PID;
2510 }
2511 STAILQ_INSERT_TAIL(&locks, klf, link);
2512 }
2513 sx_sunlock(&ls->ls_lock);
2514 }
2515 sx_sunlock(&lf_lock_states_lock);
2516
2517 gerror = 0;
2518 ucred = curthread->td_ucred;
2519 while ((klf = STAILQ_FIRST(&locks)) != NULL) {
2520 STAILQ_REMOVE_HEAD(&locks, link);
2521 vp = klf->vp;
2522 if (gerror == 0 && vn_lock(vp, LK_SHARED) == 0) {
2523 error = prison_canseemount(ucred, vp->v_mount);
2524 if (error == 0)
2525 error = VOP_STAT(vp, &stt, ucred, NOCRED);
2526 VOP_UNLOCK(vp);
2527 if (error == 0) {
2528 klf->kl.kl_file_fsid = stt.st_dev;
2529 klf->kl.kl_file_rdev = stt.st_rdev;
2530 klf->kl.kl_file_fileid = stt.st_ino;
2531 freepath = NULL;
2532 fullpath = "-";
2533 error = vn_fullpath(vp, &fullpath, &freepath);
2534 if (error == 0)
2535 strlcpy(klf->kl.kl_path, fullpath,
2536 sizeof(klf->kl.kl_path));
2537 free(freepath, M_TEMP);
2538 if (sbuf_bcat(sb, &klf->kl,
2539 klf->kl.kl_structsize) != 0) {
2540 gerror = sbuf_error(sb);
2541 }
2542 }
2543 }
2544 vdrop(vp);
2545 free(klf, M_LOCKF);
2546 }
2547
2548 return (gerror);
2549 }
2550
2551 static int
sysctl_kern_lockf_run(struct sbuf * sb)2552 sysctl_kern_lockf_run(struct sbuf *sb)
2553 {
2554 struct mount *mp;
2555 int error;
2556
2557 error = 0;
2558 mtx_lock(&mountlist_mtx);
2559 TAILQ_FOREACH(mp, &mountlist, mnt_list) {
2560 error = vfs_busy(mp, MBF_MNTLSTLOCK);
2561 if (error != 0)
2562 continue;
2563 error = mp->mnt_op->vfs_report_lockf(mp, sb);
2564 mtx_lock(&mountlist_mtx);
2565 vfs_unbusy(mp);
2566 if (error != 0)
2567 break;
2568 }
2569 mtx_unlock(&mountlist_mtx);
2570 return (error);
2571 }
2572
2573 static int
sysctl_kern_lockf(SYSCTL_HANDLER_ARGS)2574 sysctl_kern_lockf(SYSCTL_HANDLER_ARGS)
2575 {
2576 struct sbuf sb;
2577 int error, error2;
2578
2579 sbuf_new_for_sysctl(&sb, NULL, sizeof(struct kinfo_lockf) * 5, req);
2580 sbuf_clear_flags(&sb, SBUF_INCLUDENUL);
2581 error = sysctl_kern_lockf_run(&sb);
2582 error2 = sbuf_finish(&sb);
2583 sbuf_delete(&sb);
2584 return (error != 0 ? error : error2);
2585 }
2586 SYSCTL_PROC(_kern, KERN_LOCKF, lockf,
2587 CTLTYPE_OPAQUE | CTLFLAG_RD | CTLFLAG_MPSAFE,
2588 0, 0, sysctl_kern_lockf, "S,lockf",
2589 "Advisory locks table");
2590
2591 #ifdef LOCKF_DEBUG
2592 /*
2593 * Print description of a lock owner
2594 */
2595 static void
lf_print_owner(struct lock_owner * lo)2596 lf_print_owner(struct lock_owner *lo)
2597 {
2598
2599 if (lo->lo_flags & F_REMOTE) {
2600 printf("remote pid %d, system %d",
2601 lo->lo_pid, lo->lo_sysid);
2602 } else if (lo->lo_flags & F_FLOCK) {
2603 printf("file %p", lo->lo_id);
2604 } else {
2605 printf("local pid %d", lo->lo_pid);
2606 }
2607 }
2608
2609 /*
2610 * Print out a lock.
2611 */
2612 static void
lf_print(char * tag,struct lockf_entry * lock)2613 lf_print(char *tag, struct lockf_entry *lock)
2614 {
2615
2616 printf("%s: lock %p for ", tag, (void *)lock);
2617 lf_print_owner(lock->lf_owner);
2618 printf("\nvnode %p", lock->lf_vnode);
2619 VOP_PRINT(lock->lf_vnode);
2620 printf(" %s, start %jd, end ",
2621 lock->lf_type == F_RDLCK ? "shared" :
2622 lock->lf_type == F_WRLCK ? "exclusive" :
2623 lock->lf_type == F_UNLCK ? "unlock" : "unknown",
2624 (intmax_t)lock->lf_start);
2625 if (lock->lf_end == OFF_MAX)
2626 printf("EOF");
2627 else
2628 printf("%jd", (intmax_t)lock->lf_end);
2629 if (!LIST_EMPTY(&lock->lf_outedges))
2630 printf(" block %p\n",
2631 (void *)LIST_FIRST(&lock->lf_outedges)->le_to);
2632 else
2633 printf("\n");
2634 }
2635
2636 static void
lf_printlist(char * tag,struct lockf_entry * lock)2637 lf_printlist(char *tag, struct lockf_entry *lock)
2638 {
2639 struct lockf_entry *lf, *blk;
2640 struct lockf_edge *e;
2641
2642 printf("%s: Lock list for vnode %p:\n", tag, lock->lf_vnode);
2643 LIST_FOREACH(lf, &lock->lf_vnode->v_lockf->ls_active, lf_link) {
2644 printf("\tlock %p for ",(void *)lf);
2645 lf_print_owner(lock->lf_owner);
2646 printf(", %s, start %jd, end %jd",
2647 lf->lf_type == F_RDLCK ? "shared" :
2648 lf->lf_type == F_WRLCK ? "exclusive" :
2649 lf->lf_type == F_UNLCK ? "unlock" :
2650 "unknown", (intmax_t)lf->lf_start, (intmax_t)lf->lf_end);
2651 LIST_FOREACH(e, &lf->lf_outedges, le_outlink) {
2652 blk = e->le_to;
2653 printf("\n\t\tlock request %p for ", (void *)blk);
2654 lf_print_owner(blk->lf_owner);
2655 printf(", %s, start %jd, end %jd",
2656 blk->lf_type == F_RDLCK ? "shared" :
2657 blk->lf_type == F_WRLCK ? "exclusive" :
2658 blk->lf_type == F_UNLCK ? "unlock" :
2659 "unknown", (intmax_t)blk->lf_start,
2660 (intmax_t)blk->lf_end);
2661 if (!LIST_EMPTY(&blk->lf_inedges))
2662 panic("lf_printlist: bad list");
2663 }
2664 printf("\n");
2665 }
2666 }
2667 #endif /* LOCKF_DEBUG */
2668