Lines Matching full:lock
70 #include <sys/lock.h>
172 * This structure is used to keep track of both local and remote lock
174 * the lock owner structure. Each possible lock owner (local proc for
179 * If a lock owner has a lock that blocks some other lock or a lock
180 * that is waiting for some other lock, it also has a vertex in the
196 pid_t lo_pid; /* (c) Process Id of the lock owner */
197 int lo_sysid; /* (c) System Id of the lock owner */
198 int lo_hash; /* (c) Used to lock the appropriate chain */
205 struct sx lock; member
220 * lock that prevents the lock from being granted and also to each
221 * older pending lock that would block them if it was active. The
225 * therefore they cannot create loops in the lock graph.
227 * The second graph is a global graph of lock owners. Each lock owner
230 * corresponding to owner of the new pending lock and the owner of the
231 * lock upon which it waits. In order to prevent deadlock, we only add
234 * The lock owner graph is topologically sorted, i.e. if a node has
259 struct lock_owner *v_owner; /* (c) corresponding lock owner */
282 sx_init(&lf_lock_states_lock, "lock states lock"); in lf_init()
286 sx_init(&lf_lock_owners[i].lock, "lock owners lock"); in lf_init()
290 sx_init(&lf_owner_graph_lock, "owner graph lock"); in lf_init()
296 * Generate a hash value for a lock owner.
316 * Return true if a lock owner matches the details passed to
340 printf("Allocated lock %p\n", lf); in lf_alloc_lock()
343 sx_xlock(&lf_lock_owners[lo->lo_hash].lock); in lf_alloc_lock()
345 sx_xunlock(&lf_lock_owners[lo->lo_hash].lock); in lf_alloc_lock()
353 lf_free_lock(struct lockf_entry *lock) in lf_free_lock() argument
357 KASSERT(lock->lf_refs > 0, ("lockf_entry negative ref count %p", lock)); in lf_free_lock()
358 if (--lock->lf_refs > 0) in lf_free_lock()
362 * reclaim the entry if this is the last lock in lf_free_lock()
365 struct lock_owner *lo = lock->lf_owner; in lf_free_lock()
367 KASSERT(LIST_EMPTY(&lock->lf_outedges), in lf_free_lock()
368 ("freeing lock with dependencies")); in lf_free_lock()
369 KASSERT(LIST_EMPTY(&lock->lf_inedges), in lf_free_lock()
370 ("freeing lock with dependants")); in lf_free_lock()
371 chainlock = &lf_lock_owners[lo->lo_hash].lock; in lf_free_lock()
373 KASSERT(lo->lo_refs > 0, ("lock owner refcount")); in lf_free_lock()
378 printf("lf_free_lock: freeing lock owner %p\n", in lf_free_lock()
391 printf("Freed lock owner %p\n", lo); in lf_free_lock()
396 if ((lock->lf_flags & F_REMOTE) && lock->lf_vnode) { in lf_free_lock()
397 vrele(lock->lf_vnode); in lf_free_lock()
398 lock->lf_vnode = NULL; in lf_free_lock()
402 printf("Freed lock %p\n", lock); in lf_free_lock()
404 free(lock, M_LOCKF); in lf_free_lock()
417 struct lockf_entry *lock; in lf_advlockasync() local
428 * creating a lock owner for this one. in lf_advlockasync()
492 * Map our arguments to an existing lock owner or create one in lf_advlockasync()
496 sx_xlock(&lf_lock_owners[hash].lock); in lf_advlockasync()
502 * We initialise the lock with a reference in lf_advlockasync()
510 printf("Allocated lock owner %p\n", lo); in lf_advlockasync()
532 printf("lf_advlockasync: new lock owner %p ", lo); in lf_advlockasync()
541 * We have seen this lock owner before, increase its in lf_advlockasync()
547 sx_xunlock(&lf_lock_owners[hash].lock); in lf_advlockasync()
554 lock = lf_alloc_lock(NULL); in lf_advlockasync()
555 lock->lf_refs = 1; in lf_advlockasync()
556 lock->lf_start = start; in lf_advlockasync()
557 lock->lf_end = end; in lf_advlockasync()
558 lock->lf_owner = lo; in lf_advlockasync()
559 lock->lf_vnode = vp; in lf_advlockasync()
569 lock->lf_type = fl->l_type; in lf_advlockasync()
570 LIST_INIT(&lock->lf_outedges); in lf_advlockasync()
571 LIST_INIT(&lock->lf_inedges); in lf_advlockasync()
572 lock->lf_async_task = ap->a_task; in lf_advlockasync()
573 lock->lf_flags = ap->a_flags; in lf_advlockasync()
584 lf_free_lock(lock); in lf_advlockasync()
619 lf_free_lock(lock); in lf_advlockasync()
656 lf_free_lock(lock); in lf_advlockasync()
662 error = lf_setlock(state, lock, vp, ap->a_cookiep); in lf_advlockasync()
666 error = lf_clearlock(state, lock); in lf_advlockasync()
667 lf_free_lock(lock); in lf_advlockasync()
671 error = lf_getlock(state, lock, fl); in lf_advlockasync()
672 lf_free_lock(lock); in lf_advlockasync()
677 error = lf_cancel(state, lock, *ap->a_cookiep); in lf_advlockasync()
680 lf_free_lock(lock); in lf_advlockasync()
684 lf_free_lock(lock); in lf_advlockasync()
692 * lock list becoming disordered or containing mutually in lf_advlockasync()
696 LIST_FOREACH(lock, &state->ls_active, lf_link) { in lf_advlockasync()
698 if (LIST_NEXT(lock, lf_link)) in lf_advlockasync()
699 KASSERT((lock->lf_start in lf_advlockasync()
700 <= LIST_NEXT(lock, lf_link)->lf_start), in lf_advlockasync()
703 if (lock == lf) in lf_advlockasync()
705 KASSERT(!lf_blocks(lock, lf), in lf_advlockasync()
707 if (lock->lf_owner == lf->lf_owner) in lf_advlockasync()
708 KASSERT(!lf_overlaps(lock, lf), in lf_advlockasync()
712 LIST_FOREACH(lock, &state->ls_pending, lf_link) { in lf_advlockasync()
713 KASSERT(!LIST_EMPTY(&lock->lf_outedges), in lf_advlockasync()
714 ("pending lock which should be active")); in lf_advlockasync()
754 struct lockf_entry *lock, *nlock; in lf_purgelocks() local
783 LIST_FOREACH_SAFE(lock, &state->ls_pending, lf_link, nlock) { in lf_purgelocks()
784 LIST_REMOVE(lock, lf_link); in lf_purgelocks()
785 lf_remove_outgoing(lock); in lf_purgelocks()
786 lf_remove_incoming(lock); in lf_purgelocks()
789 * If its an async lock, we can just free it in lf_purgelocks()
793 if (lock->lf_async_task) { in lf_purgelocks()
794 lf_free_lock(lock); in lf_purgelocks()
796 lock->lf_flags |= F_INTR; in lf_purgelocks()
797 wakeup(lock); in lf_purgelocks()
819 ("lock pending for %p", state)); in lf_purgelocks()
820 LIST_FOREACH_SAFE(lock, &state->ls_active, lf_link, nlock) { in lf_purgelocks()
821 LIST_REMOVE(lock, lf_link); in lf_purgelocks()
822 lf_free_lock(lock); in lf_purgelocks()
843 * Return non-zero if lock 'x' is blocked by lock 'y' (or vice versa).
855 * Allocate a lock edge from the free list
865 * Free a lock edge.
875 * Ensure that the lock's owner has a corresponding vertex in the
879 lf_alloc_vertex(struct lockf_entry *lock) in lf_alloc_vertex() argument
883 if (!lock->lf_owner->lo_vertex) in lf_alloc_vertex()
884 lock->lf_owner->lo_vertex = in lf_alloc_vertex()
885 graph_alloc_vertex(g, lock->lf_owner); in lf_alloc_vertex()
889 * Attempt to record an edge from lock x to lock y. Return EDEADLK if
901 KASSERT(e->le_to != y, ("adding lock edge twice")); in lf_add_edge()
925 * Remove an edge from the lock graph.
943 * Remove all out-going edges from lock x.
956 * Remove all in-coming edges from lock x.
970 * from lock to each blocking lock.
973 lf_add_outgoing(struct lockf *state, struct lockf_entry *lock) in lf_add_outgoing() argument
983 if (overlap->lf_start > lock->lf_end) in lf_add_outgoing()
985 if (!lf_blocks(lock, overlap)) in lf_add_outgoing()
989 * We've found a blocking lock. Add the corresponding in lf_add_outgoing()
993 error = lf_add_edge(lock, overlap); in lf_add_outgoing()
1000 lf_remove_outgoing(lock); in lf_add_outgoing()
1010 * only happens if we are blocked by at least one active lock in lf_add_outgoing()
1014 if (!lf_blocks(lock, overlap)) in lf_add_outgoing()
1017 * We've found a blocking lock. Add the corresponding in lf_add_outgoing()
1021 error = lf_add_edge(lock, overlap); in lf_add_outgoing()
1028 lf_remove_outgoing(lock); in lf_add_outgoing()
1038 * edge from lock to each blocking lock.
1041 lf_add_incoming(struct lockf *state, struct lockf_entry *lock) in lf_add_incoming() argument
1053 if (!lf_blocks(lock, overlap)) in lf_add_incoming()
1057 * We've found a blocking lock. Add the corresponding in lf_add_incoming()
1061 error = lf_add_edge(overlap, lock); in lf_add_incoming()
1068 lf_remove_incoming(lock); in lf_add_incoming()
1077 * Insert lock into the active list, keeping list entries ordered by
1081 lf_insert_lock(struct lockf *state, struct lockf_entry *lock) in lf_insert_lock() argument
1086 LIST_INSERT_HEAD(&state->ls_active, lock, lf_link); in lf_insert_lock()
1092 if (lf->lf_start > lock->lf_start) { in lf_insert_lock()
1093 LIST_INSERT_BEFORE(lf, lock, lf_link); in lf_insert_lock()
1098 LIST_INSERT_AFTER(lfprev, lock, lf_link); in lf_insert_lock()
1102 * Wake up a sleeping lock and remove it from the pending list now
1104 * arrange for the lock to be added to the active list, adjusting any
1129 * longer block. If 'all' is non-zero, the lock has been removed and
1135 lf_update_dependancies(struct lockf *state, struct lockf_entry *lock, int all, in lf_update_dependancies() argument
1141 LIST_FOREACH_SAFE(e, &lock->lf_inedges, le_inlink, ne) { in lf_update_dependancies()
1143 if (all || !lf_blocks(lock, deplock)) { in lf_update_dependancies()
1156 * Set the start of an existing active lock, updating dependencies and
1160 lf_set_start(struct lockf *state, struct lockf_entry *lock, off_t new_start, in lf_set_start() argument
1164 KASSERT(new_start >= lock->lf_start, ("can't increase lock")); in lf_set_start()
1165 lock->lf_start = new_start; in lf_set_start()
1166 LIST_REMOVE(lock, lf_link); in lf_set_start()
1167 lf_insert_lock(state, lock); in lf_set_start()
1168 lf_update_dependancies(state, lock, FALSE, granted); in lf_set_start()
1172 * Set the end of an existing active lock, updating dependencies and
1176 lf_set_end(struct lockf *state, struct lockf_entry *lock, off_t new_end, in lf_set_end() argument
1180 KASSERT(new_end <= lock->lf_end, ("can't increase lock")); in lf_set_end()
1181 lock->lf_end = new_end; in lf_set_end()
1182 lf_update_dependancies(state, lock, FALSE, granted); in lf_set_end()
1186 * Add a lock to the active list, updating or removing any current
1191 * As a result of processing the new lock, we may unblock existing
1195 * Since the new lock already has its dependencies set up, we always
1197 * fragment the lock list in some pathological cases but its probably
1201 lf_activate_lock(struct lockf *state, struct lockf_entry *lock) in lf_activate_lock() argument
1208 LIST_INSERT_HEAD(&granted, lock, lf_link); in lf_activate_lock()
1211 lock = LIST_FIRST(&granted); in lf_activate_lock()
1212 LIST_REMOVE(lock, lf_link); in lf_activate_lock()
1220 ovcase = lf_findoverlap(&overlap, lock, SELF); in lf_activate_lock()
1231 * 1) overlap == lock in lf_activate_lock()
1232 * 2) overlap contains lock in lf_activate_lock()
1233 * 3) lock contains overlap in lf_activate_lock()
1234 * 4) overlap starts before lock in lf_activate_lock()
1235 * 5) overlap ends after lock in lf_activate_lock()
1241 case 1: /* overlap == lock */ in lf_activate_lock()
1244 * dependants for the new lock, taking in lf_activate_lock()
1246 * or unlock. Remove the old lock. in lf_activate_lock()
1254 case 2: /* overlap contains lock */ in lf_activate_lock()
1256 * Just split the existing lock. in lf_activate_lock()
1258 lf_split(state, overlap, lock, &granted); in lf_activate_lock()
1261 case 3: /* lock contains overlap */ in lf_activate_lock()
1274 case 4: /* overlap starts before lock */ in lf_activate_lock()
1279 lf_set_end(state, overlap, lock->lf_start - 1, in lf_activate_lock()
1284 case 5: /* overlap ends after lock */ in lf_activate_lock()
1289 lf_set_start(state, overlap, lock->lf_end + 1, in lf_activate_lock()
1297 if (lock->lf_type != F_UNLCK) in lf_activate_lock()
1298 lf_print("lf_activate_lock: activated", lock); in lf_activate_lock()
1300 lf_print("lf_activate_lock: unlocked", lock); in lf_activate_lock()
1301 lf_printlist("lf_activate_lock", lock); in lf_activate_lock()
1304 if (lock->lf_type != F_UNLCK) in lf_activate_lock()
1305 lf_insert_lock(state, lock); in lf_activate_lock()
1310 * Cancel a pending lock request, either as a result of a signal or a
1311 * cancel request for an async lock.
1314 lf_cancel_lock(struct lockf *state, struct lockf_entry *lock) in lf_cancel_lock() argument
1319 * Note it is theoretically possible that cancelling this lock in lf_cancel_lock()
1320 * may allow some other pending lock to become in lf_cancel_lock()
1325 * A: lock [0..0] succeeds in lf_cancel_lock()
1326 * B: lock [2..2] succeeds in lf_cancel_lock()
1327 * C: lock [1..2] blocked C->B in lf_cancel_lock()
1328 * D: lock [0..1] blocked C->B,D->A,D->C in lf_cancel_lock()
1333 LIST_REMOVE(lock, lf_link); in lf_cancel_lock()
1339 lf_remove_outgoing(lock); in lf_cancel_lock()
1343 * Removing in-coming edges may allow some other lock to in lf_cancel_lock()
1348 lf_update_dependancies(state, lock, TRUE, &granted); in lf_cancel_lock()
1349 lf_free_lock(lock); in lf_cancel_lock()
1355 lock = LIST_FIRST(&granted); in lf_cancel_lock()
1356 LIST_REMOVE(lock, lf_link); in lf_cancel_lock()
1357 lf_activate_lock(state, lock); in lf_cancel_lock()
1362 * Set a byte-range lock.
1365 lf_setlock(struct lockf *state, struct lockf_entry *lock, struct vnode *vp, in lf_setlock() argument
1373 lf_print("lf_setlock", lock); in lf_setlock()
1380 if (lock->lf_type == F_WRLCK) in lf_setlock()
1382 if (!(lock->lf_flags & F_NOINTR)) in lf_setlock()
1385 * Scan lock list for this file looking for locks that would block us. in lf_setlock()
1387 if (lf_getblock(state, lock)) { in lf_setlock()
1391 if ((lock->lf_flags & F_WAIT) == 0 in lf_setlock()
1392 && lock->lf_async_task == NULL) { in lf_setlock()
1393 lf_free_lock(lock); in lf_setlock()
1401 * waiting for an exclusive lock. in lf_setlock()
1403 if ((lock->lf_flags & F_FLOCK) && in lf_setlock()
1404 lock->lf_type == F_WRLCK) { in lf_setlock()
1405 lock->lf_type = F_UNLCK; in lf_setlock()
1406 lf_activate_lock(state, lock); in lf_setlock()
1407 lock->lf_type = F_WRLCK; in lf_setlock()
1411 * We are blocked. Create edges to each blocking lock, in lf_setlock()
1417 error = lf_add_outgoing(state, lock); in lf_setlock()
1423 lf_print("lf_setlock: deadlock", lock); in lf_setlock()
1425 lf_free_lock(lock); in lf_setlock()
1433 LIST_INSERT_HEAD(&state->ls_pending, lock, lf_link); in lf_setlock()
1437 LIST_FOREACH(e, &lock->lf_outedges, le_outlink) { in lf_setlock()
1444 if ((lock->lf_flags & F_WAIT) == 0) { in lf_setlock()
1448 * lock is released, allowing the caller to in lf_setlock()
1449 * make another attempt to take the lock. in lf_setlock()
1451 *cookiep = (void *) lock; in lf_setlock()
1456 lock->lf_refs++; in lf_setlock()
1458 error = sx_sleep(lock, &state->ls_lock, priority, lockstr, 0); in lf_setlock()
1460 if (lf_free_lock(lock)) { in lf_setlock()
1468 * remove our lock graph edges) and/or by another in lf_setlock()
1469 * process releasing a lock (in which case our edges in lf_setlock()
1474 * removed our lock graph edges. in lf_setlock()
1483 * may now have incoming edges from some newer lock in lf_setlock()
1486 if (lock->lf_flags & F_INTR) { in lf_setlock()
1488 lf_free_lock(lock); in lf_setlock()
1491 if (LIST_EMPTY(&lock->lf_outedges)) { in lf_setlock()
1494 lf_cancel_lock(state, lock); in lf_setlock()
1499 lf_print("lf_setlock: granted", lock); in lf_setlock()
1505 * It looks like we are going to grant the lock. First add in lf_setlock()
1506 * edges from any currently pending lock that the new lock in lf_setlock()
1509 error = lf_add_incoming(state, lock); in lf_setlock()
1513 lf_print("lf_setlock: deadlock", lock); in lf_setlock()
1515 lf_free_lock(lock); in lf_setlock()
1520 * No blocks!! Add the lock. Note that we will in lf_setlock()
1524 lf_activate_lock(state, lock); in lf_setlock()
1531 * Remove a byte-range lock on an inode.
1533 * Generally, find the lock (or an overlap to that lock)
1558 * Check whether there is a blocking lock, and if so return its
1562 lf_getlock(struct lockf *state, struct lockf_entry *lock, struct flock *fl) in lf_getlock() argument
1568 lf_print("lf_getlock", lock); in lf_getlock()
1571 if ((block = lf_getblock(state, lock))) { in lf_getlock()
1588 * Cancel an async lock request.
1591 lf_cancel(struct lockf *state, struct lockf_entry *lock, void *cookie) in lf_cancel() argument
1596 * We need to match this request with an existing lock in lf_cancel()
1602 * Double-check that this lock looks right in lf_cancel()
1606 if (!(reallock->lf_vnode == lock->lf_vnode in lf_cancel()
1607 && reallock->lf_start == lock->lf_start in lf_cancel()
1608 && reallock->lf_end == lock->lf_end)) { in lf_cancel()
1613 * Make sure this lock was async and then just in lf_cancel()
1625 * can free the lock (actually our caller does in lf_cancel()
1634 * We didn't find a matching lock - not much we can do here. in lf_cancel()
1641 * return the first blocking lock.
1644 lf_getblock(struct lockf *state, struct lockf_entry *lock) in lf_getblock() argument
1653 if (overlap->lf_start > lock->lf_end) in lf_getblock()
1655 if (!lf_blocks(lock, overlap)) in lf_getblock()
1663 * Walk the list of locks for an inode to find an overlapping lock (if
1667 * *overlap The place in the lock list to start looking
1668 * lock The lock which is being tested
1670 * owner as lock, or 'OTHER' to test only locks
1675 * 1) overlap == lock
1676 * 2) overlap contains lock
1677 * 3) lock contains overlap
1678 * 4) overlap starts before lock
1679 * 5) overlap ends after lock
1681 * If there is an overlapping lock, '*overlap' is set to point at the
1682 * overlapping lock.
1684 * NOTE: this returns only the FIRST overlapping lock. There
1688 lf_findoverlap(struct lockf_entry **overlap, struct lockf_entry *lock, int type) in lf_findoverlap() argument
1699 lf_print("lf_findoverlap: looking for overlap in", lock); in lf_findoverlap()
1701 start = lock->lf_start; in lf_findoverlap()
1702 end = lock->lf_end; in lf_findoverlap()
1708 if (((type & SELF) && lf->lf_owner != lock->lf_owner) || in lf_findoverlap()
1709 ((type & OTHERS) && lf->lf_owner == lock->lf_owner)) { in lf_findoverlap()
1722 * 1) overlap == lock in lf_findoverlap()
1723 * 2) overlap contains lock in lf_findoverlap()
1724 * 3) lock contains overlap in lf_findoverlap()
1725 * 4) overlap starts before lock in lf_findoverlap()
1726 * 5) overlap ends after lock in lf_findoverlap()
1741 printf("overlap == lock\n"); in lf_findoverlap()
1750 printf("overlap contains lock\n"); in lf_findoverlap()
1759 printf("lock contains overlap\n"); in lf_findoverlap()
1768 printf("overlap starts before lock\n"); in lf_findoverlap()
1777 printf("overlap ends after lock\n"); in lf_findoverlap()
1788 * Split an the existing 'lock1', based on the extent of the lock
1789 * described by 'lock2'. The existing lock should cover 'lock2'
1819 * Make a new lock consisting of the last part of in lf_split()
1820 * the encompassing lock. in lf_split()
1832 * otherwise we may accidentally grant a pending lock that in lf_split()
1866 * active lock lists to build a list of locks that need in lf_iteratelocks_sysid()
1901 * Call the iterator function for each lock in turn. If the in lf_iteratelocks_sysid()
1928 * active lock lists to build a list of locks that need in lf_iteratelocks_vnode()
1971 * Call the iterator function for each lock in turn. If the in lf_iteratelocks_vnode()
2012 sx_xlock(&lf_lock_owners[i].lock); in lf_countlocks()
2016 sx_xunlock(&lf_lock_owners[i].lock); in lf_countlocks()
2065 ("lock graph vertices disordered")); in graph_check()
2072 ("lock graph vertices disordered")); in graph_check()
2593 * Print description of a lock owner
2610 * Print out a lock.
2613 lf_print(char *tag, struct lockf_entry *lock) in lf_print() argument
2616 printf("%s: lock %p for ", tag, (void *)lock); in lf_print()
2617 lf_print_owner(lock->lf_owner); in lf_print()
2618 printf("\nvnode %p", lock->lf_vnode); in lf_print()
2619 VOP_PRINT(lock->lf_vnode); in lf_print()
2621 lock->lf_type == F_RDLCK ? "shared" : in lf_print()
2622 lock->lf_type == F_WRLCK ? "exclusive" : in lf_print()
2623 lock->lf_type == F_UNLCK ? "unlock" : "unknown", in lf_print()
2624 (intmax_t)lock->lf_start); in lf_print()
2625 if (lock->lf_end == OFF_MAX) in lf_print()
2628 printf("%jd", (intmax_t)lock->lf_end); in lf_print()
2629 if (!LIST_EMPTY(&lock->lf_outedges)) in lf_print()
2631 (void *)LIST_FIRST(&lock->lf_outedges)->le_to); in lf_print()
2637 lf_printlist(char *tag, struct lockf_entry *lock) in lf_printlist() argument
2642 printf("%s: Lock list for vnode %p:\n", tag, lock->lf_vnode); in lf_printlist()
2643 LIST_FOREACH(lf, &lock->lf_vnode->v_lockf->ls_active, lf_link) { in lf_printlist()
2645 lf_print_owner(lock->lf_owner); in lf_printlist()