Lines Matching +full:lock +full:- +full:- +full:- +full:-
1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
29 /*-
70 #include <sys/lock.h>
92 static MALLOC_DEFINE(M_LOCKF, "lockf", "Byte-range locking structures");
172 * This structure is used to keep track of both local and remote lock
174 * the lock owner structure. Each possible lock owner (local proc for
179 * If a lock owner has a lock that blocks some other lock or a lock
180 * that is waiting for some other lock, it also has a vertex in the
184 * (s) locked by state->ls_lock
196 pid_t lo_pid; /* (c) Process Id of the lock owner */
197 int lo_sysid; /* (c) System Id of the lock owner */
198 int lo_hash; /* (c) Used to lock the appropriate chain */
205 struct sx lock; member
218 * are terminal nodes in the graph (i.e. have no out-going
219 * edges). Pending locks have out-going edges to each blocking active
220 * lock that prevents the lock from being granted and also to each
221 * older pending lock that would block them if it was active. The
224 * out-going edges or new active locks which only add in-coming edges)
225 * therefore they cannot create loops in the lock graph.
227 * The second graph is a global graph of lock owners. Each lock owner
230 * corresponding to owner of the new pending lock and the owner of the
231 * lock upon which it waits. In order to prevent deadlock, we only add
234 * The lock owner graph is topologically sorted, i.e. if a node has
245 LIST_ENTRY(owner_edge) e_outlink; /* (g) link from's out-edge list */
246 LIST_ENTRY(owner_edge) e_inlink; /* (g) link to's in-edge list */
248 struct owner_vertex *e_from; /* (c) out-going from here */
249 struct owner_vertex *e_to; /* (c) in-coming to here */
257 struct owner_edge_list v_outedges;/* (g) list of out-edges */
258 struct owner_edge_list v_inedges; /* (g) list of in-edges */
259 struct lock_owner *v_owner; /* (c) corresponding lock owner */
268 uint32_t g_gen; /* (g) increment when re-ordering */
282 sx_init(&lf_lock_states_lock, "lock states lock"); in lf_init()
286 sx_init(&lf_lock_owners[i].lock, "lock owners lock"); in lf_init()
290 sx_init(&lf_owner_graph_lock, "owner graph lock"); in lf_init()
296 * Generate a hash value for a lock owner.
304 h = HASHSTEP(0, fl->l_pid); in lf_hash_owner()
305 h = HASHSTEP(h, fl->l_sysid); in lf_hash_owner()
316 * Return true if a lock owner matches the details passed to
324 return lo->lo_pid == fl->l_pid in lf_owner_matches()
325 && lo->lo_sysid == fl->l_sysid; in lf_owner_matches()
327 return lo->lo_id == id; in lf_owner_matches()
340 printf("Allocated lock %p\n", lf); in lf_alloc_lock()
343 sx_xlock(&lf_lock_owners[lo->lo_hash].lock); in lf_alloc_lock()
344 lo->lo_refs++; in lf_alloc_lock()
345 sx_xunlock(&lf_lock_owners[lo->lo_hash].lock); in lf_alloc_lock()
346 lf->lf_owner = lo; in lf_alloc_lock()
353 lf_free_lock(struct lockf_entry *lock) in lf_free_lock() argument
357 KASSERT(lock->lf_refs > 0, ("lockf_entry negative ref count %p", lock)); in lf_free_lock()
358 if (--lock->lf_refs > 0) in lf_free_lock()
362 * reclaim the entry if this is the last lock in lf_free_lock()
365 struct lock_owner *lo = lock->lf_owner; in lf_free_lock()
367 KASSERT(LIST_EMPTY(&lock->lf_outedges), in lf_free_lock()
368 ("freeing lock with dependencies")); in lf_free_lock()
369 KASSERT(LIST_EMPTY(&lock->lf_inedges), in lf_free_lock()
370 ("freeing lock with dependants")); in lf_free_lock()
371 chainlock = &lf_lock_owners[lo->lo_hash].lock; in lf_free_lock()
373 KASSERT(lo->lo_refs > 0, ("lock owner refcount")); in lf_free_lock()
374 lo->lo_refs--; in lf_free_lock()
375 if (lo->lo_refs == 0) { in lf_free_lock()
378 printf("lf_free_lock: freeing lock owner %p\n", in lf_free_lock()
381 if (lo->lo_vertex) { in lf_free_lock()
384 lo->lo_vertex); in lf_free_lock()
391 printf("Freed lock owner %p\n", lo); in lf_free_lock()
396 if ((lock->lf_flags & F_REMOTE) && lock->lf_vnode) { in lf_free_lock()
397 vrele(lock->lf_vnode); in lf_free_lock()
398 lock->lf_vnode = NULL; in lf_free_lock()
402 printf("Freed lock %p\n", lock); in lf_free_lock()
404 free(lock, M_LOCKF); in lf_free_lock()
416 struct flock *fl = ap->a_fl; in lf_advlockasync()
417 struct lockf_entry *lock; in lf_advlockasync() local
418 struct vnode *vp = ap->a_vp; in lf_advlockasync()
419 caddr_t id = ap->a_id; in lf_advlockasync()
420 int flags = ap->a_flags; in lf_advlockasync()
427 * Handle the F_UNLKSYS case first - no need to mess about in lf_advlockasync()
428 * creating a lock owner for this one. in lf_advlockasync()
430 if (ap->a_op == F_UNLCKSYS) { in lf_advlockasync()
431 lf_clearremotesys(fl->l_sysid); in lf_advlockasync()
438 switch (fl->l_whence) { in lf_advlockasync()
445 start = fl->l_start; in lf_advlockasync()
450 (fl->l_start > 0 && size > OFF_MAX - fl->l_start)) in lf_advlockasync()
452 start = size + fl->l_start; in lf_advlockasync()
460 if (fl->l_len < 0) { in lf_advlockasync()
463 end = start - 1; in lf_advlockasync()
464 start += fl->l_len; in lf_advlockasync()
467 } else if (fl->l_len == 0) { in lf_advlockasync()
470 oadd = fl->l_len - 1; in lf_advlockasync()
471 if (oadd > OFF_MAX - start) in lf_advlockasync()
481 if (ap->a_op != F_SETLK && (*statep) == NULL) { in lf_advlockasync()
484 fl->l_type = F_UNLCK; in lf_advlockasync()
492 * Map our arguments to an existing lock owner or create one in lf_advlockasync()
496 sx_xlock(&lf_lock_owners[hash].lock); in lf_advlockasync()
502 * We initialise the lock with a reference in lf_advlockasync()
510 printf("Allocated lock owner %p\n", lo); in lf_advlockasync()
513 lo->lo_refs = 1; in lf_advlockasync()
514 lo->lo_flags = flags; in lf_advlockasync()
515 lo->lo_id = id; in lf_advlockasync()
516 lo->lo_hash = hash; in lf_advlockasync()
518 lo->lo_pid = fl->l_pid; in lf_advlockasync()
519 lo->lo_sysid = fl->l_sysid; in lf_advlockasync()
521 lo->lo_pid = -1; in lf_advlockasync()
522 lo->lo_sysid = 0; in lf_advlockasync()
525 lo->lo_pid = p->p_pid; in lf_advlockasync()
526 lo->lo_sysid = 0; in lf_advlockasync()
528 lo->lo_vertex = NULL; in lf_advlockasync()
532 printf("lf_advlockasync: new lock owner %p ", lo); in lf_advlockasync()
541 * We have seen this lock owner before, increase its in lf_advlockasync()
545 lo->lo_refs++; in lf_advlockasync()
547 sx_xunlock(&lf_lock_owners[hash].lock); in lf_advlockasync()
554 lock = lf_alloc_lock(NULL); in lf_advlockasync()
555 lock->lf_refs = 1; in lf_advlockasync()
556 lock->lf_start = start; in lf_advlockasync()
557 lock->lf_end = end; in lf_advlockasync()
558 lock->lf_owner = lo; in lf_advlockasync()
559 lock->lf_vnode = vp; in lf_advlockasync()
563 * the vnode at any time - we have to ref it here to in lf_advlockasync()
569 lock->lf_type = fl->l_type; in lf_advlockasync()
570 LIST_INIT(&lock->lf_outedges); in lf_advlockasync()
571 LIST_INIT(&lock->lf_inedges); in lf_advlockasync()
572 lock->lf_async_task = ap->a_task; in lf_advlockasync()
573 lock->lf_flags = ap->a_flags; in lf_advlockasync()
577 * and create a new one if necessary - the caller's *statep in lf_advlockasync()
584 lf_free_lock(lock); in lf_advlockasync()
598 sx_init(&ls->ls_lock, "ls_lock"); in lf_advlockasync()
599 LIST_INIT(&ls->ls_active); in lf_advlockasync()
600 LIST_INIT(&ls->ls_pending); in lf_advlockasync()
601 ls->ls_threads = 1; in lf_advlockasync()
617 sx_destroy(&ls->ls_lock); in lf_advlockasync()
619 lf_free_lock(lock); in lf_advlockasync()
627 MPASS(state->ls_threads >= 0); in lf_advlockasync()
628 state->ls_threads++; in lf_advlockasync()
634 sx_destroy(&ls->ls_lock); in lf_advlockasync()
638 MPASS(state->ls_threads >= 0); in lf_advlockasync()
639 state->ls_threads++; in lf_advlockasync()
643 sx_xlock(&state->ls_lock); in lf_advlockasync()
645 * Recheck the doomed vnode after state->ls_lock is in lf_advlockasync()
651 MPASS(state->ls_threads > 0); in lf_advlockasync()
652 state->ls_threads--; in lf_advlockasync()
655 sx_xunlock(&state->ls_lock); in lf_advlockasync()
656 lf_free_lock(lock); in lf_advlockasync()
660 switch (ap->a_op) { in lf_advlockasync()
662 error = lf_setlock(state, lock, vp, ap->a_cookiep); in lf_advlockasync()
666 error = lf_clearlock(state, lock); in lf_advlockasync()
667 lf_free_lock(lock); in lf_advlockasync()
671 error = lf_getlock(state, lock, fl); in lf_advlockasync()
672 lf_free_lock(lock); in lf_advlockasync()
676 if (ap->a_cookiep) in lf_advlockasync()
677 error = lf_cancel(state, lock, *ap->a_cookiep); in lf_advlockasync()
680 lf_free_lock(lock); in lf_advlockasync()
684 lf_free_lock(lock); in lf_advlockasync()
692 * lock list becoming disordered or containing mutually in lf_advlockasync()
694 * which should be active (i.e. have no out-going edges). in lf_advlockasync()
696 LIST_FOREACH(lock, &state->ls_active, lf_link) { in lf_advlockasync()
698 if (LIST_NEXT(lock, lf_link)) in lf_advlockasync()
699 KASSERT((lock->lf_start in lf_advlockasync()
700 <= LIST_NEXT(lock, lf_link)->lf_start), in lf_advlockasync()
702 LIST_FOREACH(lf, &state->ls_active, lf_link) { in lf_advlockasync()
703 if (lock == lf) in lf_advlockasync()
705 KASSERT(!lf_blocks(lock, lf), in lf_advlockasync()
707 if (lock->lf_owner == lf->lf_owner) in lf_advlockasync()
708 KASSERT(!lf_overlaps(lock, lf), in lf_advlockasync()
712 LIST_FOREACH(lock, &state->ls_pending, lf_link) { in lf_advlockasync()
713 KASSERT(!LIST_EMPTY(&lock->lf_outedges), in lf_advlockasync()
714 ("pending lock which should be active")); in lf_advlockasync()
717 sx_xunlock(&state->ls_lock); in lf_advlockasync()
720 MPASS(state->ls_threads > 0); in lf_advlockasync()
721 state->ls_threads--; in lf_advlockasync()
722 if (state->ls_threads != 0) { in lf_advlockasync()
728 KASSERT(ap->a_op == F_SETLK, ("EDOOFUS")); in lf_advlockasync()
739 a.a_vp = ap->a_vp; in lf_advlock()
740 a.a_id = ap->a_id; in lf_advlock()
741 a.a_op = ap->a_op; in lf_advlock()
742 a.a_fl = ap->a_fl; in lf_advlock()
743 a.a_flags = ap->a_flags; in lf_advlock()
754 struct lockf_entry *lock, *nlock; in lf_purgelocks() local
771 if (LIST_EMPTY(&state->ls_active) && state->ls_threads == 0) { in lf_purgelocks()
772 KASSERT(LIST_EMPTY(&state->ls_pending), in lf_purgelocks()
777 MPASS(state->ls_threads >= 0); in lf_purgelocks()
778 state->ls_threads++; in lf_purgelocks()
781 sx_xlock(&state->ls_lock); in lf_purgelocks()
783 LIST_FOREACH_SAFE(lock, &state->ls_pending, lf_link, nlock) { in lf_purgelocks()
784 LIST_REMOVE(lock, lf_link); in lf_purgelocks()
785 lf_remove_outgoing(lock); in lf_purgelocks()
786 lf_remove_incoming(lock); in lf_purgelocks()
789 * If its an async lock, we can just free it in lf_purgelocks()
793 if (lock->lf_async_task) { in lf_purgelocks()
794 lf_free_lock(lock); in lf_purgelocks()
796 lock->lf_flags |= F_INTR; in lf_purgelocks()
797 wakeup(lock); in lf_purgelocks()
801 sx_xunlock(&state->ls_lock); in lf_purgelocks()
808 while (state->ls_threads > 1) in lf_purgelocks()
818 KASSERT(LIST_EMPTY(&state->ls_pending), in lf_purgelocks()
819 ("lock pending for %p", state)); in lf_purgelocks()
820 LIST_FOREACH_SAFE(lock, &state->ls_active, lf_link, nlock) { in lf_purgelocks()
821 LIST_REMOVE(lock, lf_link); in lf_purgelocks()
822 lf_free_lock(lock); in lf_purgelocks()
828 sx_destroy(&state->ls_lock); in lf_purgelocks()
833 * Return non-zero if locks 'x' and 'y' overlap.
839 return (x->lf_start <= y->lf_end && x->lf_end >= y->lf_start); in lf_overlaps()
843 * Return non-zero if lock 'x' is blocked by lock 'y' (or vice versa).
849 return x->lf_owner != y->lf_owner in lf_blocks()
850 && (x->lf_type == F_WRLCK || y->lf_type == F_WRLCK) in lf_blocks()
855 * Allocate a lock edge from the free list
865 * Free a lock edge.
875 * Ensure that the lock's owner has a corresponding vertex in the
879 lf_alloc_vertex(struct lockf_entry *lock) in lf_alloc_vertex() argument
883 if (!lock->lf_owner->lo_vertex) in lf_alloc_vertex()
884 lock->lf_owner->lo_vertex = in lf_alloc_vertex()
885 graph_alloc_vertex(g, lock->lf_owner); in lf_alloc_vertex()
889 * Attempt to record an edge from lock x to lock y. Return EDEADLK if
900 LIST_FOREACH(e, &x->lf_outedges, le_outlink) in lf_add_edge()
901 KASSERT(e->le_to != y, ("adding lock edge twice")); in lf_add_edge()
910 error = graph_add_edge(g, x->lf_owner->lo_vertex, in lf_add_edge()
911 y->lf_owner->lo_vertex); in lf_add_edge()
916 LIST_INSERT_HEAD(&x->lf_outedges, e, le_outlink); in lf_add_edge()
917 LIST_INSERT_HEAD(&y->lf_inedges, e, le_inlink); in lf_add_edge()
918 e->le_from = x; in lf_add_edge()
919 e->le_to = y; in lf_add_edge()
925 * Remove an edge from the lock graph.
931 struct lockf_entry *x = e->le_from; in lf_remove_edge()
932 struct lockf_entry *y = e->le_to; in lf_remove_edge()
934 graph_remove_edge(g, x->lf_owner->lo_vertex, y->lf_owner->lo_vertex); in lf_remove_edge()
937 e->le_from = NULL; in lf_remove_edge()
938 e->le_to = NULL; in lf_remove_edge()
943 * Remove all out-going edges from lock x.
950 while ((e = LIST_FIRST(&x->lf_outedges)) != NULL) { in lf_remove_outgoing()
956 * Remove all in-coming edges from lock x.
963 while ((e = LIST_FIRST(&x->lf_inedges)) != NULL) { in lf_remove_incoming()
969 * Walk the list of locks for the file and create an out-going edge
970 * from lock to each blocking lock.
973 lf_add_outgoing(struct lockf *state, struct lockf_entry *lock) in lf_add_outgoing() argument
978 LIST_FOREACH(overlap, &state->ls_active, lf_link) { in lf_add_outgoing()
983 if (overlap->lf_start > lock->lf_end) in lf_add_outgoing()
985 if (!lf_blocks(lock, overlap)) in lf_add_outgoing()
989 * We've found a blocking lock. Add the corresponding in lf_add_outgoing()
993 error = lf_add_edge(lock, overlap); in lf_add_outgoing()
1000 lf_remove_outgoing(lock); in lf_add_outgoing()
1010 * only happens if we are blocked by at least one active lock in lf_add_outgoing()
1013 LIST_FOREACH(overlap, &state->ls_pending, lf_link) { in lf_add_outgoing()
1014 if (!lf_blocks(lock, overlap)) in lf_add_outgoing()
1017 * We've found a blocking lock. Add the corresponding in lf_add_outgoing()
1021 error = lf_add_edge(lock, overlap); in lf_add_outgoing()
1028 lf_remove_outgoing(lock); in lf_add_outgoing()
1037 * Walk the list of pending locks for the file and create an in-coming
1038 * edge from lock to each blocking lock.
1041 lf_add_incoming(struct lockf *state, struct lockf_entry *lock) in lf_add_incoming() argument
1046 sx_assert(&state->ls_lock, SX_XLOCKED); in lf_add_incoming()
1047 if (LIST_EMPTY(&state->ls_pending)) in lf_add_incoming()
1052 LIST_FOREACH(overlap, &state->ls_pending, lf_link) { in lf_add_incoming()
1053 if (!lf_blocks(lock, overlap)) in lf_add_incoming()
1057 * We've found a blocking lock. Add the corresponding in lf_add_incoming()
1061 error = lf_add_edge(overlap, lock); in lf_add_incoming()
1068 lf_remove_incoming(lock); in lf_add_incoming()
1077 * Insert lock into the active list, keeping list entries ordered by
1081 lf_insert_lock(struct lockf *state, struct lockf_entry *lock) in lf_insert_lock() argument
1085 if (LIST_EMPTY(&state->ls_active)) { in lf_insert_lock()
1086 LIST_INSERT_HEAD(&state->ls_active, lock, lf_link); in lf_insert_lock()
1091 LIST_FOREACH(lf, &state->ls_active, lf_link) { in lf_insert_lock()
1092 if (lf->lf_start > lock->lf_start) { in lf_insert_lock()
1093 LIST_INSERT_BEFORE(lf, lock, lf_link); in lf_insert_lock()
1098 LIST_INSERT_AFTER(lfprev, lock, lf_link); in lf_insert_lock()
1102 * Wake up a sleeping lock and remove it from the pending list now
1104 * arrange for the lock to be added to the active list, adjusting any
1120 if (wakelock->lf_async_task) { in lf_wakeup_lock()
1121 taskqueue_enqueue(taskqueue_thread, wakelock->lf_async_task); in lf_wakeup_lock()
1128 * Re-check all dependent locks and remove edges to locks that we no
1129 * longer block. If 'all' is non-zero, the lock has been removed and
1135 lf_update_dependancies(struct lockf *state, struct lockf_entry *lock, int all, in lf_update_dependancies() argument
1141 LIST_FOREACH_SAFE(e, &lock->lf_inedges, le_inlink, ne) { in lf_update_dependancies()
1142 deplock = e->le_from; in lf_update_dependancies()
1143 if (all || !lf_blocks(lock, deplock)) { in lf_update_dependancies()
1147 if (LIST_EMPTY(&deplock->lf_outedges)) { in lf_update_dependancies()
1156 * Set the start of an existing active lock, updating dependencies and
1160 lf_set_start(struct lockf *state, struct lockf_entry *lock, off_t new_start, in lf_set_start() argument
1164 KASSERT(new_start >= lock->lf_start, ("can't increase lock")); in lf_set_start()
1165 lock->lf_start = new_start; in lf_set_start()
1166 LIST_REMOVE(lock, lf_link); in lf_set_start()
1167 lf_insert_lock(state, lock); in lf_set_start()
1168 lf_update_dependancies(state, lock, FALSE, granted); in lf_set_start()
1172 * Set the end of an existing active lock, updating dependencies and
1176 lf_set_end(struct lockf *state, struct lockf_entry *lock, off_t new_end, in lf_set_end() argument
1180 KASSERT(new_end <= lock->lf_end, ("can't increase lock")); in lf_set_end()
1181 lock->lf_end = new_end; in lf_set_end()
1182 lf_update_dependancies(state, lock, FALSE, granted); in lf_set_end()
1186 * Add a lock to the active list, updating or removing any current
1191 * As a result of processing the new lock, we may unblock existing
1195 * Since the new lock already has its dependencies set up, we always
1197 * fragment the lock list in some pathological cases but its probably
1201 lf_activate_lock(struct lockf *state, struct lockf_entry *lock) in lf_activate_lock() argument
1208 LIST_INSERT_HEAD(&granted, lock, lf_link); in lf_activate_lock()
1211 lock = LIST_FIRST(&granted); in lf_activate_lock()
1212 LIST_REMOVE(lock, lf_link); in lf_activate_lock()
1218 overlap = LIST_FIRST(&state->ls_active); in lf_activate_lock()
1220 ovcase = lf_findoverlap(&overlap, lock, SELF); in lf_activate_lock()
1231 * 1) overlap == lock in lf_activate_lock()
1232 * 2) overlap contains lock in lf_activate_lock()
1233 * 3) lock contains overlap in lf_activate_lock()
1234 * 4) overlap starts before lock in lf_activate_lock()
1235 * 5) overlap ends after lock in lf_activate_lock()
1241 case 1: /* overlap == lock */ in lf_activate_lock()
1244 * dependants for the new lock, taking in lf_activate_lock()
1246 * or unlock. Remove the old lock. in lf_activate_lock()
1254 case 2: /* overlap contains lock */ in lf_activate_lock()
1256 * Just split the existing lock. in lf_activate_lock()
1258 lf_split(state, overlap, lock, &granted); in lf_activate_lock()
1261 case 3: /* lock contains overlap */ in lf_activate_lock()
1274 case 4: /* overlap starts before lock */ in lf_activate_lock()
1279 lf_set_end(state, overlap, lock->lf_start - 1, in lf_activate_lock()
1284 case 5: /* overlap ends after lock */ in lf_activate_lock()
1287 * re-insert. in lf_activate_lock()
1289 lf_set_start(state, overlap, lock->lf_end + 1, in lf_activate_lock()
1297 if (lock->lf_type != F_UNLCK) in lf_activate_lock()
1298 lf_print("lf_activate_lock: activated", lock); in lf_activate_lock()
1300 lf_print("lf_activate_lock: unlocked", lock); in lf_activate_lock()
1301 lf_printlist("lf_activate_lock", lock); in lf_activate_lock()
1304 if (lock->lf_type != F_UNLCK) in lf_activate_lock()
1305 lf_insert_lock(state, lock); in lf_activate_lock()
1310 * Cancel a pending lock request, either as a result of a signal or a
1311 * cancel request for an async lock.
1314 lf_cancel_lock(struct lockf *state, struct lockf_entry *lock) in lf_cancel_lock() argument
1319 * Note it is theoretically possible that cancelling this lock in lf_cancel_lock()
1320 * may allow some other pending lock to become in lf_cancel_lock()
1325 * A: lock [0..0] succeeds in lf_cancel_lock()
1326 * B: lock [2..2] succeeds in lf_cancel_lock()
1327 * C: lock [1..2] blocked C->B in lf_cancel_lock()
1328 * D: lock [0..1] blocked C->B,D->A,D->C in lf_cancel_lock()
1329 * A: unlock [0..0] C->B,D->C in lf_cancel_lock()
1333 LIST_REMOVE(lock, lf_link); in lf_cancel_lock()
1336 * Removing out-going edges is simple. in lf_cancel_lock()
1339 lf_remove_outgoing(lock); in lf_cancel_lock()
1343 * Removing in-coming edges may allow some other lock to in lf_cancel_lock()
1344 * become active - we use lf_update_dependancies to figure in lf_cancel_lock()
1348 lf_update_dependancies(state, lock, TRUE, &granted); in lf_cancel_lock()
1349 lf_free_lock(lock); in lf_cancel_lock()
1355 lock = LIST_FIRST(&granted); in lf_cancel_lock()
1356 LIST_REMOVE(lock, lf_link); in lf_cancel_lock()
1357 lf_activate_lock(state, lock); in lf_cancel_lock()
1362 * Set a byte-range lock.
1365 lf_setlock(struct lockf *state, struct lockf_entry *lock, struct vnode *vp, in lf_setlock() argument
1373 lf_print("lf_setlock", lock); in lf_setlock()
1380 if (lock->lf_type == F_WRLCK) in lf_setlock()
1382 if (!(lock->lf_flags & F_NOINTR)) in lf_setlock()
1385 * Scan lock list for this file looking for locks that would block us. in lf_setlock()
1387 if (lf_getblock(state, lock)) { in lf_setlock()
1391 if ((lock->lf_flags & F_WAIT) == 0 in lf_setlock()
1392 && lock->lf_async_task == NULL) { in lf_setlock()
1393 lf_free_lock(lock); in lf_setlock()
1401 * waiting for an exclusive lock. in lf_setlock()
1403 if ((lock->lf_flags & F_FLOCK) && in lf_setlock()
1404 lock->lf_type == F_WRLCK) { in lf_setlock()
1405 lock->lf_type = F_UNLCK; in lf_setlock()
1406 lf_activate_lock(state, lock); in lf_setlock()
1407 lock->lf_type = F_WRLCK; in lf_setlock()
1411 * We are blocked. Create edges to each blocking lock, in lf_setlock()
1417 error = lf_add_outgoing(state, lock); in lf_setlock()
1423 lf_print("lf_setlock: deadlock", lock); in lf_setlock()
1425 lf_free_lock(lock); in lf_setlock()
1433 LIST_INSERT_HEAD(&state->ls_pending, lock, lf_link); in lf_setlock()
1437 LIST_FOREACH(e, &lock->lf_outedges, le_outlink) { in lf_setlock()
1438 lf_print("lf_setlock: blocking on", e->le_to); in lf_setlock()
1439 lf_printlist("lf_setlock", e->le_to); in lf_setlock()
1444 if ((lock->lf_flags & F_WAIT) == 0) { in lf_setlock()
1446 * The caller requested async notification - in lf_setlock()
1448 * lock is released, allowing the caller to in lf_setlock()
1449 * make another attempt to take the lock. in lf_setlock()
1451 *cookiep = (void *) lock; in lf_setlock()
1456 lock->lf_refs++; in lf_setlock()
1458 error = sx_sleep(lock, &state->ls_lock, priority, lockstr, 0); in lf_setlock()
1460 if (lf_free_lock(lock)) { in lf_setlock()
1468 * remove our lock graph edges) and/or by another in lf_setlock()
1469 * process releasing a lock (in which case our edges in lf_setlock()
1474 * removed our lock graph edges. in lf_setlock()
1483 * may now have incoming edges from some newer lock in lf_setlock()
1486 if (lock->lf_flags & F_INTR) { in lf_setlock()
1488 lf_free_lock(lock); in lf_setlock()
1491 if (LIST_EMPTY(&lock->lf_outedges)) { in lf_setlock()
1494 lf_cancel_lock(state, lock); in lf_setlock()
1499 lf_print("lf_setlock: granted", lock); in lf_setlock()
1505 * It looks like we are going to grant the lock. First add in lf_setlock()
1506 * edges from any currently pending lock that the new lock in lf_setlock()
1509 error = lf_add_incoming(state, lock); in lf_setlock()
1513 lf_print("lf_setlock: deadlock", lock); in lf_setlock()
1515 lf_free_lock(lock); in lf_setlock()
1520 * No blocks!! Add the lock. Note that we will in lf_setlock()
1524 lf_activate_lock(state, lock); in lf_setlock()
1531 * Remove a byte-range lock on an inode.
1533 * Generally, find the lock (or an overlap to that lock)
1541 overlap = LIST_FIRST(&state->ls_active); in lf_clearlock()
1546 if (unlock->lf_type != F_UNLCK) in lf_clearlock()
1558 * Check whether there is a blocking lock, and if so return its
1562 lf_getlock(struct lockf *state, struct lockf_entry *lock, struct flock *fl) in lf_getlock() argument
1568 lf_print("lf_getlock", lock); in lf_getlock()
1571 if ((block = lf_getblock(state, lock))) { in lf_getlock()
1572 fl->l_type = block->lf_type; in lf_getlock()
1573 fl->l_whence = SEEK_SET; in lf_getlock()
1574 fl->l_start = block->lf_start; in lf_getlock()
1575 if (block->lf_end == OFF_MAX) in lf_getlock()
1576 fl->l_len = 0; in lf_getlock()
1578 fl->l_len = block->lf_end - block->lf_start + 1; in lf_getlock()
1579 fl->l_pid = block->lf_owner->lo_pid; in lf_getlock()
1580 fl->l_sysid = block->lf_owner->lo_sysid; in lf_getlock()
1582 fl->l_type = F_UNLCK; in lf_getlock()
1588 * Cancel an async lock request.
1591 lf_cancel(struct lockf *state, struct lockf_entry *lock, void *cookie) in lf_cancel() argument
1596 * We need to match this request with an existing lock in lf_cancel()
1599 LIST_FOREACH(reallock, &state->ls_pending, lf_link) { in lf_cancel()
1602 * Double-check that this lock looks right in lf_cancel()
1606 if (!(reallock->lf_vnode == lock->lf_vnode in lf_cancel()
1607 && reallock->lf_start == lock->lf_start in lf_cancel()
1608 && reallock->lf_end == lock->lf_end)) { in lf_cancel()
1613 * Make sure this lock was async and then just in lf_cancel()
1616 if (!reallock->lf_async_task) { in lf_cancel()
1622 * state->ls_lock before it can possibly in lf_cancel()
1625 * can free the lock (actually our caller does in lf_cancel()
1634 * We didn't find a matching lock - not much we can do here. in lf_cancel()
1641 * return the first blocking lock.
1644 lf_getblock(struct lockf *state, struct lockf_entry *lock) in lf_getblock() argument
1648 LIST_FOREACH(overlap, &state->ls_active, lf_link) { in lf_getblock()
1653 if (overlap->lf_start > lock->lf_end) in lf_getblock()
1655 if (!lf_blocks(lock, overlap)) in lf_getblock()
1663 * Walk the list of locks for an inode to find an overlapping lock (if
1667 * *overlap The place in the lock list to start looking
1668 * lock The lock which is being tested
1670 * owner as lock, or 'OTHER' to test only locks
1675 * 1) overlap == lock
1676 * 2) overlap contains lock
1677 * 3) lock contains overlap
1678 * 4) overlap starts before lock
1679 * 5) overlap ends after lock
1681 * If there is an overlapping lock, '*overlap' is set to point at the
1682 * overlapping lock.
1684 * NOTE: this returns only the FIRST overlapping lock. There
1688 lf_findoverlap(struct lockf_entry **overlap, struct lockf_entry *lock, int type) in lf_findoverlap() argument
1699 lf_print("lf_findoverlap: looking for overlap in", lock); in lf_findoverlap()
1701 start = lock->lf_start; in lf_findoverlap()
1702 end = lock->lf_end; in lf_findoverlap()
1706 if (lf->lf_start > end) in lf_findoverlap()
1708 if (((type & SELF) && lf->lf_owner != lock->lf_owner) || in lf_findoverlap()
1709 ((type & OTHERS) && lf->lf_owner == lock->lf_owner)) { in lf_findoverlap()
1722 * 1) overlap == lock in lf_findoverlap()
1723 * 2) overlap contains lock in lf_findoverlap()
1724 * 3) lock contains overlap in lf_findoverlap()
1725 * 4) overlap starts before lock in lf_findoverlap()
1726 * 5) overlap ends after lock in lf_findoverlap()
1728 if (start > lf->lf_end) { in lf_findoverlap()
1737 if (lf->lf_start == start && lf->lf_end == end) { in lf_findoverlap()
1741 printf("overlap == lock\n"); in lf_findoverlap()
1746 if (lf->lf_start <= start && lf->lf_end >= end) { in lf_findoverlap()
1750 printf("overlap contains lock\n"); in lf_findoverlap()
1755 if (start <= lf->lf_start && end >= lf->lf_end) { in lf_findoverlap()
1759 printf("lock contains overlap\n"); in lf_findoverlap()
1764 if (lf->lf_start < start && lf->lf_end >= start) { in lf_findoverlap()
1768 printf("overlap starts before lock\n"); in lf_findoverlap()
1773 if (lf->lf_start > start && lf->lf_end > end) { in lf_findoverlap()
1777 printf("overlap ends after lock\n"); in lf_findoverlap()
1788 * Split an the existing 'lock1', based on the extent of the lock
1789 * described by 'lock2'. The existing lock should cover 'lock2'
1810 if (lock1->lf_start == lock2->lf_start) { in lf_split()
1811 lf_set_start(state, lock1, lock2->lf_end + 1, granted); in lf_split()
1814 if (lock1->lf_end == lock2->lf_end) { in lf_split()
1815 lf_set_end(state, lock1, lock2->lf_start - 1, granted); in lf_split()
1819 * Make a new lock consisting of the last part of in lf_split()
1820 * the encompassing lock. in lf_split()
1822 splitlock = lf_alloc_lock(lock1->lf_owner); in lf_split()
1824 splitlock->lf_refs = 1; in lf_split()
1825 if (splitlock->lf_flags & F_REMOTE) in lf_split()
1826 vref(splitlock->lf_vnode); in lf_split()
1832 * otherwise we may accidentally grant a pending lock that in lf_split()
1835 splitlock->lf_start = lock2->lf_end + 1; in lf_split()
1836 LIST_INIT(&splitlock->lf_outedges); in lf_split()
1837 LIST_INIT(&splitlock->lf_inedges); in lf_split()
1840 lf_set_end(state, lock1, lock2->lf_start - 1, granted); in lf_split()
1866 * active lock lists to build a list of locks that need in lf_iteratelocks_sysid()
1875 sx_xlock(&ls->ls_lock); in lf_iteratelocks_sysid()
1876 LIST_FOREACH(lf, &ls->ls_active, lf_link) { in lf_iteratelocks_sysid()
1877 if (lf->lf_owner->lo_sysid != sysid) in lf_iteratelocks_sysid()
1882 ldesc->vp = lf->lf_vnode; in lf_iteratelocks_sysid()
1883 vref(ldesc->vp); in lf_iteratelocks_sysid()
1884 ldesc->fl.l_start = lf->lf_start; in lf_iteratelocks_sysid()
1885 if (lf->lf_end == OFF_MAX) in lf_iteratelocks_sysid()
1886 ldesc->fl.l_len = 0; in lf_iteratelocks_sysid()
1888 ldesc->fl.l_len = in lf_iteratelocks_sysid()
1889 lf->lf_end - lf->lf_start + 1; in lf_iteratelocks_sysid()
1890 ldesc->fl.l_whence = SEEK_SET; in lf_iteratelocks_sysid()
1891 ldesc->fl.l_type = F_UNLCK; in lf_iteratelocks_sysid()
1892 ldesc->fl.l_pid = lf->lf_owner->lo_pid; in lf_iteratelocks_sysid()
1893 ldesc->fl.l_sysid = sysid; in lf_iteratelocks_sysid()
1896 sx_xunlock(&ls->ls_lock); in lf_iteratelocks_sysid()
1901 * Call the iterator function for each lock in turn. If the in lf_iteratelocks_sysid()
1909 error = fn(ldesc->vp, &ldesc->fl, arg); in lf_iteratelocks_sysid()
1910 vrele(ldesc->vp); in lf_iteratelocks_sysid()
1928 * active lock lists to build a list of locks that need in lf_iteratelocks_vnode()
1936 ls = vp->v_lockf; in lf_iteratelocks_vnode()
1941 MPASS(ls->ls_threads >= 0); in lf_iteratelocks_vnode()
1942 ls->ls_threads++; in lf_iteratelocks_vnode()
1945 sx_xlock(&ls->ls_lock); in lf_iteratelocks_vnode()
1946 LIST_FOREACH(lf, &ls->ls_active, lf_link) { in lf_iteratelocks_vnode()
1949 ldesc->vp = lf->lf_vnode; in lf_iteratelocks_vnode()
1950 vref(ldesc->vp); in lf_iteratelocks_vnode()
1951 ldesc->fl.l_start = lf->lf_start; in lf_iteratelocks_vnode()
1952 if (lf->lf_end == OFF_MAX) in lf_iteratelocks_vnode()
1953 ldesc->fl.l_len = 0; in lf_iteratelocks_vnode()
1955 ldesc->fl.l_len = in lf_iteratelocks_vnode()
1956 lf->lf_end - lf->lf_start + 1; in lf_iteratelocks_vnode()
1957 ldesc->fl.l_whence = SEEK_SET; in lf_iteratelocks_vnode()
1958 ldesc->fl.l_type = F_UNLCK; in lf_iteratelocks_vnode()
1959 ldesc->fl.l_pid = lf->lf_owner->lo_pid; in lf_iteratelocks_vnode()
1960 ldesc->fl.l_sysid = lf->lf_owner->lo_sysid; in lf_iteratelocks_vnode()
1963 sx_xunlock(&ls->ls_lock); in lf_iteratelocks_vnode()
1965 MPASS(ls->ls_threads > 0); in lf_iteratelocks_vnode()
1966 ls->ls_threads--; in lf_iteratelocks_vnode()
1971 * Call the iterator function for each lock in turn. If the in lf_iteratelocks_vnode()
1979 error = fn(ldesc->vp, &ldesc->fl, arg); in lf_iteratelocks_vnode()
1980 vrele(ldesc->vp); in lf_iteratelocks_vnode()
2012 sx_xlock(&lf_lock_owners[i].lock); in lf_countlocks()
2014 if (lo->lo_sysid == sysid) in lf_countlocks()
2015 count += lo->lo_refs; in lf_countlocks()
2016 sx_xunlock(&lf_lock_owners[i].lock); in lf_countlocks()
2025 * Return non-zero if y is reachable from x using a brute force
2026 * search. If reachable and path is non-null, return the route taken
2041 LIST_FOREACH(e, &x->v_outedges, e_outlink) { in graph_reaches()
2042 if (graph_reaches(e->e_to, y, path)) { in graph_reaches()
2053 * v_order are correct. If checkorder is non-zero, check no vertex can
2061 for (i = 0; i < g->g_size; i++) { in graph_check()
2062 if (!g->g_vertices[i]->v_owner) in graph_check()
2064 KASSERT(g->g_vertices[i]->v_order == i, in graph_check()
2065 ("lock graph vertices disordered")); in graph_check()
2068 if (!g->g_vertices[j]->v_owner) in graph_check()
2070 KASSERT(!graph_reaches(g->g_vertices[i], in graph_check()
2071 g->g_vertices[j], NULL), in graph_check()
2072 ("lock graph vertices disordered")); in graph_check()
2085 printf("%d:", v->v_order); in graph_print_vertices()
2086 lf_print_owner(v->v_owner); in graph_print_vertices()
2096 * Calculate the sub-set of vertices v from the affected region [y..x]
2097 * where v is reachable from y. Return -1 if a loop was detected
2113 * has an out-edge to and that is within the affected region in graph_delta_forward()
2121 gen = g->g_gen; in graph_delta_forward()
2123 LIST_FOREACH(e, &v->v_outedges, e_outlink) { in graph_delta_forward()
2124 if (e->e_to == x) in graph_delta_forward()
2125 return -1; in graph_delta_forward()
2126 if (e->e_to->v_order < x->v_order in graph_delta_forward()
2127 && e->e_to->v_gen != gen) { in graph_delta_forward()
2128 e->e_to->v_gen = gen; in graph_delta_forward()
2129 TAILQ_INSERT_TAIL(delta, e->e_to, v_link); in graph_delta_forward()
2140 * Calculate the sub-set of vertices v from the affected region [y..x]
2155 * has an in-edge from and that is within the affected region in graph_delta_backward()
2162 gen = g->g_gen; in graph_delta_backward()
2164 LIST_FOREACH(e, &v->v_inedges, e_inlink) { in graph_delta_backward()
2165 if (e->e_from->v_order > y->v_order in graph_delta_backward()
2166 && e->e_from->v_gen != gen) { in graph_delta_backward()
2167 e->e_from->v_gen = gen; in graph_delta_backward()
2168 TAILQ_INSERT_HEAD(delta, e->e_from, v_link); in graph_delta_backward()
2186 i > 0 && indices[i - 1] > v->v_order; i--) in graph_add_indices()
2188 for (j = n - 1; j >= i; j--) in graph_add_indices()
2190 indices[i] = v->v_order; in graph_add_indices()
2206 if (!vlowest || v->v_order < vlowest->v_order) in graph_assign_indices()
2210 vlowest->v_order = indices[nextunused]; in graph_assign_indices()
2211 g->g_vertices[vlowest->v_order] = vlowest; in graph_assign_indices()
2230 LIST_FOREACH(e, &x->v_outedges, e_outlink) { in graph_add_edge()
2231 if (e->e_to == y) { in graph_add_edge()
2232 e->e_refs++; in graph_add_edge()
2239 printf("adding edge %d:", x->v_order); in graph_add_edge()
2240 lf_print_owner(x->v_owner); in graph_add_edge()
2241 printf(" -> %d:", y->v_order); in graph_add_edge()
2242 lf_print_owner(y->v_owner); in graph_add_edge()
2246 if (y->v_order < x->v_order) { in graph_add_edge()
2253 * already. We re-order the graph so that each vertex in graph_add_edge()
2260 g->g_gen++; in graph_add_edge()
2261 if (g->g_gen == 0) { in graph_add_edge()
2265 for (vi = 0; vi < g->g_size; vi++) { in graph_add_edge()
2266 g->g_vertices[vi]->v_gen = 0; in graph_add_edge()
2268 g->g_gen++; in graph_add_edge()
2286 printf("re-ordering graph vertices\n"); in graph_add_edge()
2303 * order values) that we may use, then we re-assign in graph_add_edge()
2306 * may be partially disordered - we perform an in graph_add_edge()
2309 indices = g->g_indexbuf; in graph_add_edge()
2315 * ordering of deltaF and deltaB when re-assigning in graph_add_edge()
2329 g->g_vertices[indices[i]], v_link); in graph_add_edge()
2336 KASSERT(x->v_order < y->v_order, ("Failed to re-order graph")); in graph_add_edge()
2346 LIST_INSERT_HEAD(&x->v_outedges, e, e_outlink); in graph_add_edge()
2347 LIST_INSERT_HEAD(&y->v_inedges, e, e_inlink); in graph_add_edge()
2348 e->e_refs = 1; in graph_add_edge()
2349 e->e_from = x; in graph_add_edge()
2350 e->e_to = y; in graph_add_edge()
2356 * Remove an edge x->y from the graph.
2366 LIST_FOREACH(e, &x->v_outedges, e_outlink) { in graph_remove_edge()
2367 if (e->e_to == y) in graph_remove_edge()
2370 KASSERT(e, ("Removing non-existent edge from deadlock graph")); in graph_remove_edge()
2372 e->e_refs--; in graph_remove_edge()
2373 if (e->e_refs == 0) { in graph_remove_edge()
2376 printf("removing edge %d:", x->v_order); in graph_remove_edge()
2377 lf_print_owner(x->v_owner); in graph_remove_edge()
2378 printf(" -> %d:", y->v_order); in graph_remove_edge()
2379 lf_print_owner(y->v_owner); in graph_remove_edge()
2401 if (g->g_size == g->g_space) { in graph_alloc_vertex()
2402 g->g_vertices = realloc(g->g_vertices, in graph_alloc_vertex()
2403 2 * g->g_space * sizeof(struct owner_vertex *), in graph_alloc_vertex()
2405 free(g->g_indexbuf, M_LOCKF); in graph_alloc_vertex()
2406 g->g_indexbuf = malloc(2 * g->g_space * sizeof(int), in graph_alloc_vertex()
2408 g->g_space = 2 * g->g_space; in graph_alloc_vertex()
2410 v->v_order = g->g_size; in graph_alloc_vertex()
2411 v->v_gen = g->g_gen; in graph_alloc_vertex()
2412 g->g_vertices[g->g_size] = v; in graph_alloc_vertex()
2413 g->g_size++; in graph_alloc_vertex()
2415 LIST_INIT(&v->v_outedges); in graph_alloc_vertex()
2416 LIST_INIT(&v->v_inedges); in graph_alloc_vertex()
2417 v->v_owner = lo; in graph_alloc_vertex()
2430 KASSERT(LIST_EMPTY(&v->v_outedges), ("Freeing vertex with edges")); in graph_free_vertex()
2431 KASSERT(LIST_EMPTY(&v->v_inedges), ("Freeing vertex with edges")); in graph_free_vertex()
2437 for (i = v->v_order + 1; i < g->g_size; i++) { in graph_free_vertex()
2438 w = g->g_vertices[i]; in graph_free_vertex()
2439 w->v_order--; in graph_free_vertex()
2440 g->g_vertices[i - 1] = w; in graph_free_vertex()
2442 g->g_size--; in graph_free_vertex()
2451 g->g_vertices = malloc(10 * sizeof(struct owner_vertex *), in graph_init()
2453 g->g_size = 0; in graph_init()
2454 g->g_space = 10; in graph_init()
2455 g->g_indexbuf = malloc(g->g_space * sizeof(int), M_LOCKF, M_WAITOK); in graph_init()
2456 g->g_gen = 0; in graph_init()
2483 sx_slock(&ls->ls_lock); in vfs_report_lockf()
2484 LIST_FOREACH(lf, &ls->ls_active, lf_link) { in vfs_report_lockf()
2485 vp = lf->lf_vnode; in vfs_report_lockf()
2486 if (VN_IS_DOOMED(vp) || vp->v_mount != mp) in vfs_report_lockf()
2491 klf->vp = vp; in vfs_report_lockf()
2492 klf->kl.kl_structsize = sizeof(struct kinfo_lockf); in vfs_report_lockf()
2493 klf->kl.kl_start = lf->lf_start; in vfs_report_lockf()
2494 klf->kl.kl_len = lf->lf_end == OFF_MAX ? 0 : in vfs_report_lockf()
2495 lf->lf_end - lf->lf_start + 1; in vfs_report_lockf()
2496 klf->kl.kl_rw = lf->lf_type == F_RDLCK ? in vfs_report_lockf()
2498 if (lf->lf_owner->lo_sysid != 0) { in vfs_report_lockf()
2499 klf->kl.kl_pid = lf->lf_owner->lo_pid; in vfs_report_lockf()
2500 klf->kl.kl_sysid = lf->lf_owner->lo_sysid; in vfs_report_lockf()
2501 klf->kl.kl_type = KLOCKF_TYPE_REMOTE; in vfs_report_lockf()
2502 } else if (lf->lf_owner->lo_pid == -1) { in vfs_report_lockf()
2503 klf->kl.kl_pid = -1; in vfs_report_lockf()
2504 klf->kl.kl_sysid = 0; in vfs_report_lockf()
2505 klf->kl.kl_type = KLOCKF_TYPE_FLOCK; in vfs_report_lockf()
2507 klf->kl.kl_pid = lf->lf_owner->lo_pid; in vfs_report_lockf()
2508 klf->kl.kl_sysid = 0; in vfs_report_lockf()
2509 klf->kl.kl_type = KLOCKF_TYPE_PID; in vfs_report_lockf()
2513 sx_sunlock(&ls->ls_lock); in vfs_report_lockf()
2518 ucred = curthread->td_ucred; in vfs_report_lockf()
2521 vp = klf->vp; in vfs_report_lockf()
2523 error = prison_canseemount(ucred, vp->v_mount); in vfs_report_lockf()
2528 klf->kl.kl_file_fsid = stt.st_dev; in vfs_report_lockf()
2529 klf->kl.kl_file_rdev = stt.st_rdev; in vfs_report_lockf()
2530 klf->kl.kl_file_fileid = stt.st_ino; in vfs_report_lockf()
2532 fullpath = "-"; in vfs_report_lockf()
2535 strlcpy(klf->kl.kl_path, fullpath, in vfs_report_lockf()
2536 sizeof(klf->kl.kl_path)); in vfs_report_lockf()
2538 if (sbuf_bcat(sb, &klf->kl, in vfs_report_lockf()
2539 klf->kl.kl_structsize) != 0) { in vfs_report_lockf()
2563 error = mp->mnt_op->vfs_report_lockf(mp, sb); in sysctl_kern_lockf_run()
2593 * Print description of a lock owner
2599 if (lo->lo_flags & F_REMOTE) { in lf_print_owner()
2601 lo->lo_pid, lo->lo_sysid); in lf_print_owner()
2602 } else if (lo->lo_flags & F_FLOCK) { in lf_print_owner()
2603 printf("file %p", lo->lo_id); in lf_print_owner()
2605 printf("local pid %d", lo->lo_pid); in lf_print_owner()
2610 * Print out a lock.
2613 lf_print(char *tag, struct lockf_entry *lock) in lf_print() argument
2616 printf("%s: lock %p for ", tag, (void *)lock); in lf_print()
2617 lf_print_owner(lock->lf_owner); in lf_print()
2618 printf("\nvnode %p", lock->lf_vnode); in lf_print()
2619 VOP_PRINT(lock->lf_vnode); in lf_print()
2621 lock->lf_type == F_RDLCK ? "shared" : in lf_print()
2622 lock->lf_type == F_WRLCK ? "exclusive" : in lf_print()
2623 lock->lf_type == F_UNLCK ? "unlock" : "unknown", in lf_print()
2624 (intmax_t)lock->lf_start); in lf_print()
2625 if (lock->lf_end == OFF_MAX) in lf_print()
2628 printf("%jd", (intmax_t)lock->lf_end); in lf_print()
2629 if (!LIST_EMPTY(&lock->lf_outedges)) in lf_print()
2631 (void *)LIST_FIRST(&lock->lf_outedges)->le_to); in lf_print()
2637 lf_printlist(char *tag, struct lockf_entry *lock) in lf_printlist() argument
2642 printf("%s: Lock list for vnode %p:\n", tag, lock->lf_vnode); in lf_printlist()
2643 LIST_FOREACH(lf, &lock->lf_vnode->v_lockf->ls_active, lf_link) { in lf_printlist()
2645 lf_print_owner(lock->lf_owner); in lf_printlist()
2647 lf->lf_type == F_RDLCK ? "shared" : in lf_printlist()
2648 lf->lf_type == F_WRLCK ? "exclusive" : in lf_printlist()
2649 lf->lf_type == F_UNLCK ? "unlock" : in lf_printlist()
2650 "unknown", (intmax_t)lf->lf_start, (intmax_t)lf->lf_end); in lf_printlist()
2651 LIST_FOREACH(e, &lf->lf_outedges, le_outlink) { in lf_printlist()
2652 blk = e->le_to; in lf_printlist()
2654 lf_print_owner(blk->lf_owner); in lf_printlist()
2656 blk->lf_type == F_RDLCK ? "shared" : in lf_printlist()
2657 blk->lf_type == F_WRLCK ? "exclusive" : in lf_printlist()
2658 blk->lf_type == F_UNLCK ? "unlock" : in lf_printlist()
2659 "unknown", (intmax_t)blk->lf_start, in lf_printlist()
2660 (intmax_t)blk->lf_end); in lf_printlist()
2661 if (!LIST_EMPTY(&blk->lf_inedges)) in lf_printlist()