Lines Matching +full:non +full:- +full:overlap +full:- +full:time
1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
29 /*-
92 static MALLOC_DEFINE(M_LOCKF, "lockf", "Byte-range locking structures");
184 * (s) locked by state->ls_lock
218 * are terminal nodes in the graph (i.e. have no out-going
219 * edges). Pending locks have out-going edges to each blocking active
224 * out-going edges or new active locks which only add in-coming edges)
245 LIST_ENTRY(owner_edge) e_outlink; /* (g) link from's out-edge list */
246 LIST_ENTRY(owner_edge) e_inlink; /* (g) link to's in-edge list */
248 struct owner_vertex *e_from; /* (c) out-going from here */
249 struct owner_vertex *e_to; /* (c) in-coming to here */
257 struct owner_edge_list v_outedges;/* (g) list of out-edges */
258 struct owner_edge_list v_inedges; /* (g) list of in-edges */
268 uint32_t g_gen; /* (g) increment when re-ordering */
304 h = HASHSTEP(0, fl->l_pid); in lf_hash_owner()
305 h = HASHSTEP(h, fl->l_sysid); in lf_hash_owner()
324 return lo->lo_pid == fl->l_pid in lf_owner_matches()
325 && lo->lo_sysid == fl->l_sysid; in lf_owner_matches()
327 return lo->lo_id == id; in lf_owner_matches()
343 sx_xlock(&lf_lock_owners[lo->lo_hash].lock); in lf_alloc_lock()
344 lo->lo_refs++; in lf_alloc_lock()
345 sx_xunlock(&lf_lock_owners[lo->lo_hash].lock); in lf_alloc_lock()
346 lf->lf_owner = lo; in lf_alloc_lock()
357 KASSERT(lock->lf_refs > 0, ("lockf_entry negative ref count %p", lock)); in lf_free_lock()
358 if (--lock->lf_refs > 0) in lf_free_lock()
365 struct lock_owner *lo = lock->lf_owner; in lf_free_lock()
367 KASSERT(LIST_EMPTY(&lock->lf_outedges), in lf_free_lock()
369 KASSERT(LIST_EMPTY(&lock->lf_inedges), in lf_free_lock()
371 chainlock = &lf_lock_owners[lo->lo_hash].lock; in lf_free_lock()
373 KASSERT(lo->lo_refs > 0, ("lock owner refcount")); in lf_free_lock()
374 lo->lo_refs--; in lf_free_lock()
375 if (lo->lo_refs == 0) { in lf_free_lock()
381 if (lo->lo_vertex) { in lf_free_lock()
384 lo->lo_vertex); in lf_free_lock()
396 if ((lock->lf_flags & F_REMOTE) && lock->lf_vnode) { in lf_free_lock()
397 vrele(lock->lf_vnode); in lf_free_lock()
398 lock->lf_vnode = NULL; in lf_free_lock()
416 struct flock *fl = ap->a_fl; in lf_advlockasync()
418 struct vnode *vp = ap->a_vp; in lf_advlockasync()
419 caddr_t id = ap->a_id; in lf_advlockasync()
420 int flags = ap->a_flags; in lf_advlockasync()
427 * Handle the F_UNLKSYS case first - no need to mess about in lf_advlockasync()
430 if (ap->a_op == F_UNLCKSYS) { in lf_advlockasync()
431 lf_clearremotesys(fl->l_sysid); in lf_advlockasync()
438 switch (fl->l_whence) { in lf_advlockasync()
445 start = fl->l_start; in lf_advlockasync()
450 (fl->l_start > 0 && size > OFF_MAX - fl->l_start)) in lf_advlockasync()
452 start = size + fl->l_start; in lf_advlockasync()
460 if (fl->l_len < 0) { in lf_advlockasync()
463 end = start - 1; in lf_advlockasync()
464 start += fl->l_len; in lf_advlockasync()
467 } else if (fl->l_len == 0) { in lf_advlockasync()
470 oadd = fl->l_len - 1; in lf_advlockasync()
471 if (oadd > OFF_MAX - start) in lf_advlockasync()
481 if (ap->a_op != F_SETLK && (*statep) == NULL) { in lf_advlockasync()
484 fl->l_type = F_UNLCK; in lf_advlockasync()
493 * if this is the first time we have seen this owner. in lf_advlockasync()
513 lo->lo_refs = 1; in lf_advlockasync()
514 lo->lo_flags = flags; in lf_advlockasync()
515 lo->lo_id = id; in lf_advlockasync()
516 lo->lo_hash = hash; in lf_advlockasync()
518 lo->lo_pid = fl->l_pid; in lf_advlockasync()
519 lo->lo_sysid = fl->l_sysid; in lf_advlockasync()
521 lo->lo_pid = -1; in lf_advlockasync()
522 lo->lo_sysid = 0; in lf_advlockasync()
525 lo->lo_pid = p->p_pid; in lf_advlockasync()
526 lo->lo_sysid = 0; in lf_advlockasync()
528 lo->lo_vertex = NULL; in lf_advlockasync()
545 lo->lo_refs++; in lf_advlockasync()
555 lock->lf_refs = 1; in lf_advlockasync()
556 lock->lf_start = start; in lf_advlockasync()
557 lock->lf_end = end; in lf_advlockasync()
558 lock->lf_owner = lo; in lf_advlockasync()
559 lock->lf_vnode = vp; in lf_advlockasync()
563 * the vnode at any time - we have to ref it here to in lf_advlockasync()
569 lock->lf_type = fl->l_type; in lf_advlockasync()
570 LIST_INIT(&lock->lf_outedges); in lf_advlockasync()
571 LIST_INIT(&lock->lf_inedges); in lf_advlockasync()
572 lock->lf_async_task = ap->a_task; in lf_advlockasync()
573 lock->lf_flags = ap->a_flags; in lf_advlockasync()
577 * and create a new one if necessary - the caller's *statep in lf_advlockasync()
598 sx_init(&ls->ls_lock, "ls_lock"); in lf_advlockasync()
599 LIST_INIT(&ls->ls_active); in lf_advlockasync()
600 LIST_INIT(&ls->ls_pending); in lf_advlockasync()
601 ls->ls_threads = 1; in lf_advlockasync()
617 sx_destroy(&ls->ls_lock); in lf_advlockasync()
627 MPASS(state->ls_threads >= 0); in lf_advlockasync()
628 state->ls_threads++; in lf_advlockasync()
634 sx_destroy(&ls->ls_lock); in lf_advlockasync()
638 MPASS(state->ls_threads >= 0); in lf_advlockasync()
639 state->ls_threads++; in lf_advlockasync()
643 sx_xlock(&state->ls_lock); in lf_advlockasync()
645 * Recheck the doomed vnode after state->ls_lock is in lf_advlockasync()
651 MPASS(state->ls_threads > 0); in lf_advlockasync()
652 state->ls_threads--; in lf_advlockasync()
655 sx_xunlock(&state->ls_lock); in lf_advlockasync()
660 switch (ap->a_op) { in lf_advlockasync()
662 error = lf_setlock(state, lock, vp, ap->a_cookiep); in lf_advlockasync()
676 if (ap->a_cookiep) in lf_advlockasync()
677 error = lf_cancel(state, lock, *ap->a_cookiep); in lf_advlockasync()
694 * which should be active (i.e. have no out-going edges). in lf_advlockasync()
696 LIST_FOREACH(lock, &state->ls_active, lf_link) { in lf_advlockasync()
699 KASSERT((lock->lf_start in lf_advlockasync()
700 <= LIST_NEXT(lock, lf_link)->lf_start), in lf_advlockasync()
702 LIST_FOREACH(lf, &state->ls_active, lf_link) { in lf_advlockasync()
707 if (lock->lf_owner == lf->lf_owner) in lf_advlockasync()
712 LIST_FOREACH(lock, &state->ls_pending, lf_link) { in lf_advlockasync()
713 KASSERT(!LIST_EMPTY(&lock->lf_outedges), in lf_advlockasync()
717 sx_xunlock(&state->ls_lock); in lf_advlockasync()
720 MPASS(state->ls_threads > 0); in lf_advlockasync()
721 state->ls_threads--; in lf_advlockasync()
722 if (state->ls_threads != 0) { in lf_advlockasync()
728 KASSERT(ap->a_op == F_SETLK, ("EDOOFUS")); in lf_advlockasync()
739 a.a_vp = ap->a_vp; in lf_advlock()
740 a.a_id = ap->a_id; in lf_advlock()
741 a.a_op = ap->a_op; in lf_advlock()
742 a.a_fl = ap->a_fl; in lf_advlock()
743 a.a_flags = ap->a_flags; in lf_advlock()
771 if (LIST_EMPTY(&state->ls_active) && state->ls_threads == 0) { in lf_purgelocks()
772 KASSERT(LIST_EMPTY(&state->ls_pending), in lf_purgelocks()
777 MPASS(state->ls_threads >= 0); in lf_purgelocks()
778 state->ls_threads++; in lf_purgelocks()
781 sx_xlock(&state->ls_lock); in lf_purgelocks()
783 LIST_FOREACH_SAFE(lock, &state->ls_pending, lf_link, nlock) { in lf_purgelocks()
793 if (lock->lf_async_task) { in lf_purgelocks()
796 lock->lf_flags |= F_INTR; in lf_purgelocks()
801 sx_xunlock(&state->ls_lock); in lf_purgelocks()
808 while (state->ls_threads > 1) in lf_purgelocks()
818 KASSERT(LIST_EMPTY(&state->ls_pending), in lf_purgelocks()
820 LIST_FOREACH_SAFE(lock, &state->ls_active, lf_link, nlock) { in lf_purgelocks()
828 sx_destroy(&state->ls_lock); in lf_purgelocks()
833 * Return non-zero if locks 'x' and 'y' overlap.
839 return (x->lf_start <= y->lf_end && x->lf_end >= y->lf_start); in lf_overlaps()
843 * Return non-zero if lock 'x' is blocked by lock 'y' (or vice versa).
849 return x->lf_owner != y->lf_owner in lf_blocks()
850 && (x->lf_type == F_WRLCK || y->lf_type == F_WRLCK) in lf_blocks()
883 if (!lock->lf_owner->lo_vertex) in lf_alloc_vertex()
884 lock->lf_owner->lo_vertex = in lf_alloc_vertex()
885 graph_alloc_vertex(g, lock->lf_owner); in lf_alloc_vertex()
900 LIST_FOREACH(e, &x->lf_outedges, le_outlink) in lf_add_edge()
901 KASSERT(e->le_to != y, ("adding lock edge twice")); in lf_add_edge()
910 error = graph_add_edge(g, x->lf_owner->lo_vertex, in lf_add_edge()
911 y->lf_owner->lo_vertex); in lf_add_edge()
916 LIST_INSERT_HEAD(&x->lf_outedges, e, le_outlink); in lf_add_edge()
917 LIST_INSERT_HEAD(&y->lf_inedges, e, le_inlink); in lf_add_edge()
918 e->le_from = x; in lf_add_edge()
919 e->le_to = y; in lf_add_edge()
931 struct lockf_entry *x = e->le_from; in lf_remove_edge()
932 struct lockf_entry *y = e->le_to; in lf_remove_edge()
934 graph_remove_edge(g, x->lf_owner->lo_vertex, y->lf_owner->lo_vertex); in lf_remove_edge()
937 e->le_from = NULL; in lf_remove_edge()
938 e->le_to = NULL; in lf_remove_edge()
943 * Remove all out-going edges from lock x.
950 while ((e = LIST_FIRST(&x->lf_outedges)) != NULL) { in lf_remove_outgoing()
956 * Remove all in-coming edges from lock x.
963 while ((e = LIST_FIRST(&x->lf_inedges)) != NULL) { in lf_remove_incoming()
969 * Walk the list of locks for the file and create an out-going edge
975 struct lockf_entry *overlap; in lf_add_outgoing() local
978 LIST_FOREACH(overlap, &state->ls_active, lf_link) { in lf_add_outgoing()
983 if (overlap->lf_start > lock->lf_end) in lf_add_outgoing()
985 if (!lf_blocks(lock, overlap)) in lf_add_outgoing()
993 error = lf_add_edge(lock, overlap); in lf_add_outgoing()
1013 LIST_FOREACH(overlap, &state->ls_pending, lf_link) { in lf_add_outgoing()
1014 if (!lf_blocks(lock, overlap)) in lf_add_outgoing()
1021 error = lf_add_edge(lock, overlap); in lf_add_outgoing()
1037 * Walk the list of pending locks for the file and create an in-coming
1043 struct lockf_entry *overlap; in lf_add_incoming() local
1046 sx_assert(&state->ls_lock, SX_XLOCKED); in lf_add_incoming()
1047 if (LIST_EMPTY(&state->ls_pending)) in lf_add_incoming()
1052 LIST_FOREACH(overlap, &state->ls_pending, lf_link) { in lf_add_incoming()
1053 if (!lf_blocks(lock, overlap)) in lf_add_incoming()
1061 error = lf_add_edge(overlap, lock); in lf_add_incoming()
1085 if (LIST_EMPTY(&state->ls_active)) { in lf_insert_lock()
1086 LIST_INSERT_HEAD(&state->ls_active, lock, lf_link); in lf_insert_lock()
1091 LIST_FOREACH(lf, &state->ls_active, lf_link) { in lf_insert_lock()
1092 if (lf->lf_start > lock->lf_start) { in lf_insert_lock()
1120 if (wakelock->lf_async_task) { in lf_wakeup_lock()
1121 taskqueue_enqueue(taskqueue_thread, wakelock->lf_async_task); in lf_wakeup_lock()
1128 * Re-check all dependent locks and remove edges to locks that we no
1129 * longer block. If 'all' is non-zero, the lock has been removed and
1141 LIST_FOREACH_SAFE(e, &lock->lf_inedges, le_inlink, ne) { in lf_update_dependancies()
1142 deplock = e->le_from; in lf_update_dependancies()
1147 if (LIST_EMPTY(&deplock->lf_outedges)) { in lf_update_dependancies()
1164 KASSERT(new_start >= lock->lf_start, ("can't increase lock")); in lf_set_start()
1165 lock->lf_start = new_start; in lf_set_start()
1180 KASSERT(new_end <= lock->lf_end, ("can't increase lock")); in lf_set_end()
1181 lock->lf_end = new_end; in lf_set_end()
1203 struct lockf_entry *overlap, *lf; in lf_activate_lock() local
1216 * any locks that overlap and are owned by ourselves. in lf_activate_lock()
1218 overlap = LIST_FIRST(&state->ls_active); in lf_activate_lock()
1220 ovcase = lf_findoverlap(&overlap, lock, SELF); in lf_activate_lock()
1224 printf("lf_setlock: overlap %d", ovcase); in lf_activate_lock()
1225 lf_print("", overlap); in lf_activate_lock()
1230 * 0) no overlap in lf_activate_lock()
1231 * 1) overlap == lock in lf_activate_lock()
1232 * 2) overlap contains lock in lf_activate_lock()
1233 * 3) lock contains overlap in lf_activate_lock()
1234 * 4) overlap starts before lock in lf_activate_lock()
1235 * 5) overlap ends after lock in lf_activate_lock()
1238 case 0: /* no overlap */ in lf_activate_lock()
1241 case 1: /* overlap == lock */ in lf_activate_lock()
1248 LIST_REMOVE(overlap, lf_link); in lf_activate_lock()
1249 lf_update_dependancies(state, overlap, TRUE, in lf_activate_lock()
1251 lf_free_lock(overlap); in lf_activate_lock()
1254 case 2: /* overlap contains lock */ in lf_activate_lock()
1258 lf_split(state, overlap, lock, &granted); in lf_activate_lock()
1261 case 3: /* lock contains overlap */ in lf_activate_lock()
1263 * Delete the overlap and advance to in lf_activate_lock()
1266 lf = LIST_NEXT(overlap, lf_link); in lf_activate_lock()
1267 LIST_REMOVE(overlap, lf_link); in lf_activate_lock()
1268 lf_update_dependancies(state, overlap, TRUE, in lf_activate_lock()
1270 lf_free_lock(overlap); in lf_activate_lock()
1271 overlap = lf; in lf_activate_lock()
1274 case 4: /* overlap starts before lock */ in lf_activate_lock()
1276 * Just update the overlap end and in lf_activate_lock()
1279 lf_set_end(state, overlap, lock->lf_start - 1, in lf_activate_lock()
1281 overlap = LIST_NEXT(overlap, lf_link); in lf_activate_lock()
1284 case 5: /* overlap ends after lock */ in lf_activate_lock()
1286 * Change the start of overlap and in lf_activate_lock()
1287 * re-insert. in lf_activate_lock()
1289 lf_set_start(state, overlap, lock->lf_end + 1, in lf_activate_lock()
1297 if (lock->lf_type != F_UNLCK) in lf_activate_lock()
1304 if (lock->lf_type != F_UNLCK) in lf_activate_lock()
1327 * C: lock [1..2] blocked C->B in lf_cancel_lock()
1328 * D: lock [0..1] blocked C->B,D->A,D->C in lf_cancel_lock()
1329 * A: unlock [0..0] C->B,D->C in lf_cancel_lock()
1336 * Removing out-going edges is simple. in lf_cancel_lock()
1343 * Removing in-coming edges may allow some other lock to in lf_cancel_lock()
1344 * become active - we use lf_update_dependancies to figure in lf_cancel_lock()
1362 * Set a byte-range lock.
1380 if (lock->lf_type == F_WRLCK) in lf_setlock()
1382 if (!(lock->lf_flags & F_NOINTR)) in lf_setlock()
1391 if ((lock->lf_flags & F_WAIT) == 0 in lf_setlock()
1392 && lock->lf_async_task == NULL) { in lf_setlock()
1403 if ((lock->lf_flags & F_FLOCK) && in lf_setlock()
1404 lock->lf_type == F_WRLCK) { in lf_setlock()
1405 lock->lf_type = F_UNLCK; in lf_setlock()
1407 lock->lf_type = F_WRLCK; in lf_setlock()
1433 LIST_INSERT_HEAD(&state->ls_pending, lock, lf_link); in lf_setlock()
1437 LIST_FOREACH(e, &lock->lf_outedges, le_outlink) { in lf_setlock()
1438 lf_print("lf_setlock: blocking on", e->le_to); in lf_setlock()
1439 lf_printlist("lf_setlock", e->le_to); in lf_setlock()
1444 if ((lock->lf_flags & F_WAIT) == 0) { in lf_setlock()
1446 * The caller requested async notification - in lf_setlock()
1456 lock->lf_refs++; in lf_setlock()
1458 error = sx_sleep(lock, &state->ls_lock, priority, lockstr, 0); in lf_setlock()
1486 if (lock->lf_flags & F_INTR) { in lf_setlock()
1491 if (LIST_EMPTY(&lock->lf_outedges)) { in lf_setlock()
1531 * Remove a byte-range lock on an inode.
1533 * Generally, find the lock (or an overlap to that lock)
1539 struct lockf_entry *overlap; in lf_clearlock() local
1541 overlap = LIST_FIRST(&state->ls_active); in lf_clearlock()
1543 if (overlap == NOLOCKF) in lf_clearlock()
1546 if (unlock->lf_type != F_UNLCK) in lf_clearlock()
1572 fl->l_type = block->lf_type; in lf_getlock()
1573 fl->l_whence = SEEK_SET; in lf_getlock()
1574 fl->l_start = block->lf_start; in lf_getlock()
1575 if (block->lf_end == OFF_MAX) in lf_getlock()
1576 fl->l_len = 0; in lf_getlock()
1578 fl->l_len = block->lf_end - block->lf_start + 1; in lf_getlock()
1579 fl->l_pid = block->lf_owner->lo_pid; in lf_getlock()
1580 fl->l_sysid = block->lf_owner->lo_sysid; in lf_getlock()
1582 fl->l_type = F_UNLCK; in lf_getlock()
1599 LIST_FOREACH(reallock, &state->ls_pending, lf_link) { in lf_cancel()
1602 * Double-check that this lock looks right in lf_cancel()
1606 if (!(reallock->lf_vnode == lock->lf_vnode in lf_cancel()
1607 && reallock->lf_start == lock->lf_start in lf_cancel()
1608 && reallock->lf_end == lock->lf_end)) { in lf_cancel()
1616 if (!reallock->lf_async_task) { in lf_cancel()
1622 * state->ls_lock before it can possibly in lf_cancel()
1634 * We didn't find a matching lock - not much we can do here. in lf_cancel()
1646 struct lockf_entry *overlap; in lf_getblock() local
1648 LIST_FOREACH(overlap, &state->ls_active, lf_link) { in lf_getblock()
1653 if (overlap->lf_start > lock->lf_end) in lf_getblock()
1655 if (!lf_blocks(lock, overlap)) in lf_getblock()
1657 return (overlap); in lf_getblock()
1664 * any) and return a classification of that overlap.
1667 * *overlap The place in the lock list to start looking
1674 * 0) no overlap
1675 * 1) overlap == lock
1676 * 2) overlap contains lock
1677 * 3) lock contains overlap
1678 * 4) overlap starts before lock
1679 * 5) overlap ends after lock
1681 * If there is an overlapping lock, '*overlap' is set to point at the
1688 lf_findoverlap(struct lockf_entry **overlap, struct lockf_entry *lock, int type) in lf_findoverlap() argument
1694 if ((*overlap) == NOLOCKF) { in lf_findoverlap()
1699 lf_print("lf_findoverlap: looking for overlap in", lock); in lf_findoverlap()
1701 start = lock->lf_start; in lf_findoverlap()
1702 end = lock->lf_end; in lf_findoverlap()
1704 while (*overlap) { in lf_findoverlap()
1705 lf = *overlap; in lf_findoverlap()
1706 if (lf->lf_start > end) in lf_findoverlap()
1708 if (((type & SELF) && lf->lf_owner != lock->lf_owner) || in lf_findoverlap()
1709 ((type & OTHERS) && lf->lf_owner == lock->lf_owner)) { in lf_findoverlap()
1710 *overlap = LIST_NEXT(lf, lf_link); in lf_findoverlap()
1718 * OK, check for overlap in lf_findoverlap()
1721 * 0) no overlap in lf_findoverlap()
1722 * 1) overlap == lock in lf_findoverlap()
1723 * 2) overlap contains lock in lf_findoverlap()
1724 * 3) lock contains overlap in lf_findoverlap()
1725 * 4) overlap starts before lock in lf_findoverlap()
1726 * 5) overlap ends after lock in lf_findoverlap()
1728 if (start > lf->lf_end) { in lf_findoverlap()
1732 printf("no overlap\n"); in lf_findoverlap()
1734 *overlap = LIST_NEXT(lf, lf_link); in lf_findoverlap()
1737 if (lf->lf_start == start && lf->lf_end == end) { in lf_findoverlap()
1741 printf("overlap == lock\n"); in lf_findoverlap()
1746 if (lf->lf_start <= start && lf->lf_end >= end) { in lf_findoverlap()
1750 printf("overlap contains lock\n"); in lf_findoverlap()
1755 if (start <= lf->lf_start && end >= lf->lf_end) { in lf_findoverlap()
1759 printf("lock contains overlap\n"); in lf_findoverlap()
1764 if (lf->lf_start < start && lf->lf_end >= start) { in lf_findoverlap()
1768 printf("overlap starts before lock\n"); in lf_findoverlap()
1773 if (lf->lf_start > start && lf->lf_end > end) { in lf_findoverlap()
1777 printf("overlap ends after lock\n"); in lf_findoverlap()
1810 if (lock1->lf_start == lock2->lf_start) { in lf_split()
1811 lf_set_start(state, lock1, lock2->lf_end + 1, granted); in lf_split()
1814 if (lock1->lf_end == lock2->lf_end) { in lf_split()
1815 lf_set_end(state, lock1, lock2->lf_start - 1, granted); in lf_split()
1822 splitlock = lf_alloc_lock(lock1->lf_owner); in lf_split()
1824 splitlock->lf_refs = 1; in lf_split()
1825 if (splitlock->lf_flags & F_REMOTE) in lf_split()
1826 vref(splitlock->lf_vnode); in lf_split()
1835 splitlock->lf_start = lock2->lf_end + 1; in lf_split()
1836 LIST_INIT(&splitlock->lf_outedges); in lf_split()
1837 LIST_INIT(&splitlock->lf_inedges); in lf_split()
1840 lf_set_end(state, lock1, lock2->lf_start - 1, granted); in lf_split()
1875 sx_xlock(&ls->ls_lock); in lf_iteratelocks_sysid()
1876 LIST_FOREACH(lf, &ls->ls_active, lf_link) { in lf_iteratelocks_sysid()
1877 if (lf->lf_owner->lo_sysid != sysid) in lf_iteratelocks_sysid()
1882 ldesc->vp = lf->lf_vnode; in lf_iteratelocks_sysid()
1883 vref(ldesc->vp); in lf_iteratelocks_sysid()
1884 ldesc->fl.l_start = lf->lf_start; in lf_iteratelocks_sysid()
1885 if (lf->lf_end == OFF_MAX) in lf_iteratelocks_sysid()
1886 ldesc->fl.l_len = 0; in lf_iteratelocks_sysid()
1888 ldesc->fl.l_len = in lf_iteratelocks_sysid()
1889 lf->lf_end - lf->lf_start + 1; in lf_iteratelocks_sysid()
1890 ldesc->fl.l_whence = SEEK_SET; in lf_iteratelocks_sysid()
1891 ldesc->fl.l_type = F_UNLCK; in lf_iteratelocks_sysid()
1892 ldesc->fl.l_pid = lf->lf_owner->lo_pid; in lf_iteratelocks_sysid()
1893 ldesc->fl.l_sysid = sysid; in lf_iteratelocks_sysid()
1896 sx_xunlock(&ls->ls_lock); in lf_iteratelocks_sysid()
1909 error = fn(ldesc->vp, &ldesc->fl, arg); in lf_iteratelocks_sysid()
1910 vrele(ldesc->vp); in lf_iteratelocks_sysid()
1936 ls = vp->v_lockf; in lf_iteratelocks_vnode()
1941 MPASS(ls->ls_threads >= 0); in lf_iteratelocks_vnode()
1942 ls->ls_threads++; in lf_iteratelocks_vnode()
1945 sx_xlock(&ls->ls_lock); in lf_iteratelocks_vnode()
1946 LIST_FOREACH(lf, &ls->ls_active, lf_link) { in lf_iteratelocks_vnode()
1949 ldesc->vp = lf->lf_vnode; in lf_iteratelocks_vnode()
1950 vref(ldesc->vp); in lf_iteratelocks_vnode()
1951 ldesc->fl.l_start = lf->lf_start; in lf_iteratelocks_vnode()
1952 if (lf->lf_end == OFF_MAX) in lf_iteratelocks_vnode()
1953 ldesc->fl.l_len = 0; in lf_iteratelocks_vnode()
1955 ldesc->fl.l_len = in lf_iteratelocks_vnode()
1956 lf->lf_end - lf->lf_start + 1; in lf_iteratelocks_vnode()
1957 ldesc->fl.l_whence = SEEK_SET; in lf_iteratelocks_vnode()
1958 ldesc->fl.l_type = F_UNLCK; in lf_iteratelocks_vnode()
1959 ldesc->fl.l_pid = lf->lf_owner->lo_pid; in lf_iteratelocks_vnode()
1960 ldesc->fl.l_sysid = lf->lf_owner->lo_sysid; in lf_iteratelocks_vnode()
1963 sx_xunlock(&ls->ls_lock); in lf_iteratelocks_vnode()
1965 MPASS(ls->ls_threads > 0); in lf_iteratelocks_vnode()
1966 ls->ls_threads--; in lf_iteratelocks_vnode()
1979 error = fn(ldesc->vp, &ldesc->fl, arg); in lf_iteratelocks_vnode()
1980 vrele(ldesc->vp); in lf_iteratelocks_vnode()
2014 if (lo->lo_sysid == sysid) in lf_countlocks()
2015 count += lo->lo_refs; in lf_countlocks()
2025 * Return non-zero if y is reachable from x using a brute force
2026 * search. If reachable and path is non-null, return the route taken
2041 LIST_FOREACH(e, &x->v_outedges, e_outlink) { in graph_reaches()
2042 if (graph_reaches(e->e_to, y, path)) { in graph_reaches()
2053 * v_order are correct. If checkorder is non-zero, check no vertex can
2061 for (i = 0; i < g->g_size; i++) { in graph_check()
2062 if (!g->g_vertices[i]->v_owner) in graph_check()
2064 KASSERT(g->g_vertices[i]->v_order == i, in graph_check()
2068 if (!g->g_vertices[j]->v_owner) in graph_check()
2070 KASSERT(!graph_reaches(g->g_vertices[i], in graph_check()
2071 g->g_vertices[j], NULL), in graph_check()
2085 printf("%d:", v->v_order); in graph_print_vertices()
2086 lf_print_owner(v->v_owner); in graph_print_vertices()
2096 * Calculate the sub-set of vertices v from the affected region [y..x]
2097 * where v is reachable from y. Return -1 if a loop was detected
2113 * has an out-edge to and that is within the affected region in graph_delta_forward()
2121 gen = g->g_gen; in graph_delta_forward()
2123 LIST_FOREACH(e, &v->v_outedges, e_outlink) { in graph_delta_forward()
2124 if (e->e_to == x) in graph_delta_forward()
2125 return -1; in graph_delta_forward()
2126 if (e->e_to->v_order < x->v_order in graph_delta_forward()
2127 && e->e_to->v_gen != gen) { in graph_delta_forward()
2128 e->e_to->v_gen = gen; in graph_delta_forward()
2129 TAILQ_INSERT_TAIL(delta, e->e_to, v_link); in graph_delta_forward()
2140 * Calculate the sub-set of vertices v from the affected region [y..x]
2155 * has an in-edge from and that is within the affected region in graph_delta_backward()
2162 gen = g->g_gen; in graph_delta_backward()
2164 LIST_FOREACH(e, &v->v_inedges, e_inlink) { in graph_delta_backward()
2165 if (e->e_from->v_order > y->v_order in graph_delta_backward()
2166 && e->e_from->v_gen != gen) { in graph_delta_backward()
2167 e->e_from->v_gen = gen; in graph_delta_backward()
2168 TAILQ_INSERT_HEAD(delta, e->e_from, v_link); in graph_delta_backward()
2186 i > 0 && indices[i - 1] > v->v_order; i--) in graph_add_indices()
2188 for (j = n - 1; j >= i; j--) in graph_add_indices()
2190 indices[i] = v->v_order; in graph_add_indices()
2206 if (!vlowest || v->v_order < vlowest->v_order) in graph_assign_indices()
2210 vlowest->v_order = indices[nextunused]; in graph_assign_indices()
2211 g->g_vertices[vlowest->v_order] = vlowest; in graph_assign_indices()
2230 LIST_FOREACH(e, &x->v_outedges, e_outlink) { in graph_add_edge()
2231 if (e->e_to == y) { in graph_add_edge()
2232 e->e_refs++; in graph_add_edge()
2239 printf("adding edge %d:", x->v_order); in graph_add_edge()
2240 lf_print_owner(x->v_owner); in graph_add_edge()
2241 printf(" -> %d:", y->v_order); in graph_add_edge()
2242 lf_print_owner(y->v_owner); in graph_add_edge()
2246 if (y->v_order < x->v_order) { in graph_add_edge()
2253 * already. We re-order the graph so that each vertex in graph_add_edge()
2260 g->g_gen++; in graph_add_edge()
2261 if (g->g_gen == 0) { in graph_add_edge()
2265 for (vi = 0; vi < g->g_size; vi++) { in graph_add_edge()
2266 g->g_vertices[vi]->v_gen = 0; in graph_add_edge()
2268 g->g_gen++; in graph_add_edge()
2286 printf("re-ordering graph vertices\n"); in graph_add_edge()
2303 * order values) that we may use, then we re-assign in graph_add_edge()
2306 * may be partially disordered - we perform an in graph_add_edge()
2309 indices = g->g_indexbuf; in graph_add_edge()
2315 * ordering of deltaF and deltaB when re-assigning in graph_add_edge()
2329 g->g_vertices[indices[i]], v_link); in graph_add_edge()
2336 KASSERT(x->v_order < y->v_order, ("Failed to re-order graph")); in graph_add_edge()
2346 LIST_INSERT_HEAD(&x->v_outedges, e, e_outlink); in graph_add_edge()
2347 LIST_INSERT_HEAD(&y->v_inedges, e, e_inlink); in graph_add_edge()
2348 e->e_refs = 1; in graph_add_edge()
2349 e->e_from = x; in graph_add_edge()
2350 e->e_to = y; in graph_add_edge()
2356 * Remove an edge x->y from the graph.
2366 LIST_FOREACH(e, &x->v_outedges, e_outlink) { in graph_remove_edge()
2367 if (e->e_to == y) in graph_remove_edge()
2370 KASSERT(e, ("Removing non-existent edge from deadlock graph")); in graph_remove_edge()
2372 e->e_refs--; in graph_remove_edge()
2373 if (e->e_refs == 0) { in graph_remove_edge()
2376 printf("removing edge %d:", x->v_order); in graph_remove_edge()
2377 lf_print_owner(x->v_owner); in graph_remove_edge()
2378 printf(" -> %d:", y->v_order); in graph_remove_edge()
2379 lf_print_owner(y->v_owner); in graph_remove_edge()
2401 if (g->g_size == g->g_space) { in graph_alloc_vertex()
2402 g->g_vertices = realloc(g->g_vertices, in graph_alloc_vertex()
2403 2 * g->g_space * sizeof(struct owner_vertex *), in graph_alloc_vertex()
2405 free(g->g_indexbuf, M_LOCKF); in graph_alloc_vertex()
2406 g->g_indexbuf = malloc(2 * g->g_space * sizeof(int), in graph_alloc_vertex()
2408 g->g_space = 2 * g->g_space; in graph_alloc_vertex()
2410 v->v_order = g->g_size; in graph_alloc_vertex()
2411 v->v_gen = g->g_gen; in graph_alloc_vertex()
2412 g->g_vertices[g->g_size] = v; in graph_alloc_vertex()
2413 g->g_size++; in graph_alloc_vertex()
2415 LIST_INIT(&v->v_outedges); in graph_alloc_vertex()
2416 LIST_INIT(&v->v_inedges); in graph_alloc_vertex()
2417 v->v_owner = lo; in graph_alloc_vertex()
2430 KASSERT(LIST_EMPTY(&v->v_outedges), ("Freeing vertex with edges")); in graph_free_vertex()
2431 KASSERT(LIST_EMPTY(&v->v_inedges), ("Freeing vertex with edges")); in graph_free_vertex()
2437 for (i = v->v_order + 1; i < g->g_size; i++) { in graph_free_vertex()
2438 w = g->g_vertices[i]; in graph_free_vertex()
2439 w->v_order--; in graph_free_vertex()
2440 g->g_vertices[i - 1] = w; in graph_free_vertex()
2442 g->g_size--; in graph_free_vertex()
2451 g->g_vertices = malloc(10 * sizeof(struct owner_vertex *), in graph_init()
2453 g->g_size = 0; in graph_init()
2454 g->g_space = 10; in graph_init()
2455 g->g_indexbuf = malloc(g->g_space * sizeof(int), M_LOCKF, M_WAITOK); in graph_init()
2456 g->g_gen = 0; in graph_init()
2483 sx_slock(&ls->ls_lock); in vfs_report_lockf()
2484 LIST_FOREACH(lf, &ls->ls_active, lf_link) { in vfs_report_lockf()
2485 vp = lf->lf_vnode; in vfs_report_lockf()
2486 if (VN_IS_DOOMED(vp) || vp->v_mount != mp) in vfs_report_lockf()
2491 klf->vp = vp; in vfs_report_lockf()
2492 klf->kl.kl_structsize = sizeof(struct kinfo_lockf); in vfs_report_lockf()
2493 klf->kl.kl_start = lf->lf_start; in vfs_report_lockf()
2494 klf->kl.kl_len = lf->lf_end == OFF_MAX ? 0 : in vfs_report_lockf()
2495 lf->lf_end - lf->lf_start + 1; in vfs_report_lockf()
2496 klf->kl.kl_rw = lf->lf_type == F_RDLCK ? in vfs_report_lockf()
2498 if (lf->lf_owner->lo_sysid != 0) { in vfs_report_lockf()
2499 klf->kl.kl_pid = lf->lf_owner->lo_pid; in vfs_report_lockf()
2500 klf->kl.kl_sysid = lf->lf_owner->lo_sysid; in vfs_report_lockf()
2501 klf->kl.kl_type = KLOCKF_TYPE_REMOTE; in vfs_report_lockf()
2502 } else if (lf->lf_owner->lo_pid == -1) { in vfs_report_lockf()
2503 klf->kl.kl_pid = -1; in vfs_report_lockf()
2504 klf->kl.kl_sysid = 0; in vfs_report_lockf()
2505 klf->kl.kl_type = KLOCKF_TYPE_FLOCK; in vfs_report_lockf()
2507 klf->kl.kl_pid = lf->lf_owner->lo_pid; in vfs_report_lockf()
2508 klf->kl.kl_sysid = 0; in vfs_report_lockf()
2509 klf->kl.kl_type = KLOCKF_TYPE_PID; in vfs_report_lockf()
2513 sx_sunlock(&ls->ls_lock); in vfs_report_lockf()
2518 ucred = curthread->td_ucred; in vfs_report_lockf()
2521 vp = klf->vp; in vfs_report_lockf()
2523 error = prison_canseemount(ucred, vp->v_mount); in vfs_report_lockf()
2528 klf->kl.kl_file_fsid = stt.st_dev; in vfs_report_lockf()
2529 klf->kl.kl_file_rdev = stt.st_rdev; in vfs_report_lockf()
2530 klf->kl.kl_file_fileid = stt.st_ino; in vfs_report_lockf()
2532 fullpath = "-"; in vfs_report_lockf()
2535 strlcpy(klf->kl.kl_path, fullpath, in vfs_report_lockf()
2536 sizeof(klf->kl.kl_path)); in vfs_report_lockf()
2538 if (sbuf_bcat(sb, &klf->kl, in vfs_report_lockf()
2539 klf->kl.kl_structsize) != 0) { in vfs_report_lockf()
2563 error = mp->mnt_op->vfs_report_lockf(mp, sb); in sysctl_kern_lockf_run()
2599 if (lo->lo_flags & F_REMOTE) { in lf_print_owner()
2601 lo->lo_pid, lo->lo_sysid); in lf_print_owner()
2602 } else if (lo->lo_flags & F_FLOCK) { in lf_print_owner()
2603 printf("file %p", lo->lo_id); in lf_print_owner()
2605 printf("local pid %d", lo->lo_pid); in lf_print_owner()
2617 lf_print_owner(lock->lf_owner); in lf_print()
2618 printf("\nvnode %p", lock->lf_vnode); in lf_print()
2619 VOP_PRINT(lock->lf_vnode); in lf_print()
2621 lock->lf_type == F_RDLCK ? "shared" : in lf_print()
2622 lock->lf_type == F_WRLCK ? "exclusive" : in lf_print()
2623 lock->lf_type == F_UNLCK ? "unlock" : "unknown", in lf_print()
2624 (intmax_t)lock->lf_start); in lf_print()
2625 if (lock->lf_end == OFF_MAX) in lf_print()
2628 printf("%jd", (intmax_t)lock->lf_end); in lf_print()
2629 if (!LIST_EMPTY(&lock->lf_outedges)) in lf_print()
2631 (void *)LIST_FIRST(&lock->lf_outedges)->le_to); in lf_print()
2642 printf("%s: Lock list for vnode %p:\n", tag, lock->lf_vnode); in lf_printlist()
2643 LIST_FOREACH(lf, &lock->lf_vnode->v_lockf->ls_active, lf_link) { in lf_printlist()
2645 lf_print_owner(lock->lf_owner); in lf_printlist()
2647 lf->lf_type == F_RDLCK ? "shared" : in lf_printlist()
2648 lf->lf_type == F_WRLCK ? "exclusive" : in lf_printlist()
2649 lf->lf_type == F_UNLCK ? "unlock" : in lf_printlist()
2650 "unknown", (intmax_t)lf->lf_start, (intmax_t)lf->lf_end); in lf_printlist()
2651 LIST_FOREACH(e, &lf->lf_outedges, le_outlink) { in lf_printlist()
2652 blk = e->le_to; in lf_printlist()
2654 lf_print_owner(blk->lf_owner); in lf_printlist()
2656 blk->lf_type == F_RDLCK ? "shared" : in lf_printlist()
2657 blk->lf_type == F_WRLCK ? "exclusive" : in lf_printlist()
2658 blk->lf_type == F_UNLCK ? "unlock" : in lf_printlist()
2659 "unknown", (intmax_t)blk->lf_start, in lf_printlist()
2660 (intmax_t)blk->lf_end); in lf_printlist()
2661 if (!LIST_EMPTY(&blk->lf_inedges)) in lf_printlist()