Lines Matching refs:lock

205 	struct sx		lock;  member
286 sx_init(&lf_lock_owners[i].lock, "lock owners lock"); in lf_init()
343 sx_xlock(&lf_lock_owners[lo->lo_hash].lock); in lf_alloc_lock()
345 sx_xunlock(&lf_lock_owners[lo->lo_hash].lock); in lf_alloc_lock()
353 lf_free_lock(struct lockf_entry *lock) in lf_free_lock() argument
357 KASSERT(lock->lf_refs > 0, ("lockf_entry negative ref count %p", lock)); in lf_free_lock()
358 if (--lock->lf_refs > 0) in lf_free_lock()
365 struct lock_owner *lo = lock->lf_owner; in lf_free_lock()
367 KASSERT(LIST_EMPTY(&lock->lf_outedges), in lf_free_lock()
369 KASSERT(LIST_EMPTY(&lock->lf_inedges), in lf_free_lock()
371 chainlock = &lf_lock_owners[lo->lo_hash].lock; in lf_free_lock()
396 if ((lock->lf_flags & F_REMOTE) && lock->lf_vnode) { in lf_free_lock()
397 vrele(lock->lf_vnode); in lf_free_lock()
398 lock->lf_vnode = NULL; in lf_free_lock()
402 printf("Freed lock %p\n", lock); in lf_free_lock()
404 free(lock, M_LOCKF); in lf_free_lock()
417 struct lockf_entry *lock; in lf_advlockasync() local
496 sx_xlock(&lf_lock_owners[hash].lock); in lf_advlockasync()
547 sx_xunlock(&lf_lock_owners[hash].lock); in lf_advlockasync()
554 lock = lf_alloc_lock(NULL); in lf_advlockasync()
555 lock->lf_refs = 1; in lf_advlockasync()
556 lock->lf_start = start; in lf_advlockasync()
557 lock->lf_end = end; in lf_advlockasync()
558 lock->lf_owner = lo; in lf_advlockasync()
559 lock->lf_vnode = vp; in lf_advlockasync()
569 lock->lf_type = fl->l_type; in lf_advlockasync()
570 LIST_INIT(&lock->lf_outedges); in lf_advlockasync()
571 LIST_INIT(&lock->lf_inedges); in lf_advlockasync()
572 lock->lf_async_task = ap->a_task; in lf_advlockasync()
573 lock->lf_flags = ap->a_flags; in lf_advlockasync()
584 lf_free_lock(lock); in lf_advlockasync()
619 lf_free_lock(lock); in lf_advlockasync()
656 lf_free_lock(lock); in lf_advlockasync()
662 error = lf_setlock(state, lock, vp, ap->a_cookiep); in lf_advlockasync()
666 error = lf_clearlock(state, lock); in lf_advlockasync()
667 lf_free_lock(lock); in lf_advlockasync()
671 error = lf_getlock(state, lock, fl); in lf_advlockasync()
672 lf_free_lock(lock); in lf_advlockasync()
677 error = lf_cancel(state, lock, *ap->a_cookiep); in lf_advlockasync()
680 lf_free_lock(lock); in lf_advlockasync()
684 lf_free_lock(lock); in lf_advlockasync()
696 LIST_FOREACH(lock, &state->ls_active, lf_link) { in lf_advlockasync()
698 if (LIST_NEXT(lock, lf_link)) in lf_advlockasync()
699 KASSERT((lock->lf_start in lf_advlockasync()
700 <= LIST_NEXT(lock, lf_link)->lf_start), in lf_advlockasync()
703 if (lock == lf) in lf_advlockasync()
705 KASSERT(!lf_blocks(lock, lf), in lf_advlockasync()
707 if (lock->lf_owner == lf->lf_owner) in lf_advlockasync()
708 KASSERT(!lf_overlaps(lock, lf), in lf_advlockasync()
712 LIST_FOREACH(lock, &state->ls_pending, lf_link) { in lf_advlockasync()
713 KASSERT(!LIST_EMPTY(&lock->lf_outedges), in lf_advlockasync()
754 struct lockf_entry *lock, *nlock; in lf_purgelocks() local
783 LIST_FOREACH_SAFE(lock, &state->ls_pending, lf_link, nlock) { in lf_purgelocks()
784 LIST_REMOVE(lock, lf_link); in lf_purgelocks()
785 lf_remove_outgoing(lock); in lf_purgelocks()
786 lf_remove_incoming(lock); in lf_purgelocks()
793 if (lock->lf_async_task) { in lf_purgelocks()
794 lf_free_lock(lock); in lf_purgelocks()
796 lock->lf_flags |= F_INTR; in lf_purgelocks()
797 wakeup(lock); in lf_purgelocks()
820 LIST_FOREACH_SAFE(lock, &state->ls_active, lf_link, nlock) { in lf_purgelocks()
821 LIST_REMOVE(lock, lf_link); in lf_purgelocks()
822 lf_free_lock(lock); in lf_purgelocks()
879 lf_alloc_vertex(struct lockf_entry *lock) in lf_alloc_vertex() argument
883 if (!lock->lf_owner->lo_vertex) in lf_alloc_vertex()
884 lock->lf_owner->lo_vertex = in lf_alloc_vertex()
885 graph_alloc_vertex(g, lock->lf_owner); in lf_alloc_vertex()
973 lf_add_outgoing(struct lockf *state, struct lockf_entry *lock) in lf_add_outgoing() argument
983 if (overlap->lf_start > lock->lf_end) in lf_add_outgoing()
985 if (!lf_blocks(lock, overlap)) in lf_add_outgoing()
993 error = lf_add_edge(lock, overlap); in lf_add_outgoing()
1000 lf_remove_outgoing(lock); in lf_add_outgoing()
1014 if (!lf_blocks(lock, overlap)) in lf_add_outgoing()
1021 error = lf_add_edge(lock, overlap); in lf_add_outgoing()
1028 lf_remove_outgoing(lock); in lf_add_outgoing()
1041 lf_add_incoming(struct lockf *state, struct lockf_entry *lock) in lf_add_incoming() argument
1053 if (!lf_blocks(lock, overlap)) in lf_add_incoming()
1061 error = lf_add_edge(overlap, lock); in lf_add_incoming()
1068 lf_remove_incoming(lock); in lf_add_incoming()
1081 lf_insert_lock(struct lockf *state, struct lockf_entry *lock) in lf_insert_lock() argument
1086 LIST_INSERT_HEAD(&state->ls_active, lock, lf_link); in lf_insert_lock()
1092 if (lf->lf_start > lock->lf_start) { in lf_insert_lock()
1093 LIST_INSERT_BEFORE(lf, lock, lf_link); in lf_insert_lock()
1098 LIST_INSERT_AFTER(lfprev, lock, lf_link); in lf_insert_lock()
1135 lf_update_dependancies(struct lockf *state, struct lockf_entry *lock, int all, in lf_update_dependancies() argument
1141 LIST_FOREACH_SAFE(e, &lock->lf_inedges, le_inlink, ne) { in lf_update_dependancies()
1143 if (all || !lf_blocks(lock, deplock)) { in lf_update_dependancies()
1160 lf_set_start(struct lockf *state, struct lockf_entry *lock, off_t new_start, in lf_set_start() argument
1164 KASSERT(new_start >= lock->lf_start, ("can't increase lock")); in lf_set_start()
1165 lock->lf_start = new_start; in lf_set_start()
1166 LIST_REMOVE(lock, lf_link); in lf_set_start()
1167 lf_insert_lock(state, lock); in lf_set_start()
1168 lf_update_dependancies(state, lock, FALSE, granted); in lf_set_start()
1176 lf_set_end(struct lockf *state, struct lockf_entry *lock, off_t new_end, in lf_set_end() argument
1180 KASSERT(new_end <= lock->lf_end, ("can't increase lock")); in lf_set_end()
1181 lock->lf_end = new_end; in lf_set_end()
1182 lf_update_dependancies(state, lock, FALSE, granted); in lf_set_end()
1201 lf_activate_lock(struct lockf *state, struct lockf_entry *lock) in lf_activate_lock() argument
1208 LIST_INSERT_HEAD(&granted, lock, lf_link); in lf_activate_lock()
1211 lock = LIST_FIRST(&granted); in lf_activate_lock()
1212 LIST_REMOVE(lock, lf_link); in lf_activate_lock()
1220 ovcase = lf_findoverlap(&overlap, lock, SELF); in lf_activate_lock()
1258 lf_split(state, overlap, lock, &granted); in lf_activate_lock()
1279 lf_set_end(state, overlap, lock->lf_start - 1, in lf_activate_lock()
1289 lf_set_start(state, overlap, lock->lf_end + 1, in lf_activate_lock()
1297 if (lock->lf_type != F_UNLCK) in lf_activate_lock()
1298 lf_print("lf_activate_lock: activated", lock); in lf_activate_lock()
1300 lf_print("lf_activate_lock: unlocked", lock); in lf_activate_lock()
1301 lf_printlist("lf_activate_lock", lock); in lf_activate_lock()
1304 if (lock->lf_type != F_UNLCK) in lf_activate_lock()
1305 lf_insert_lock(state, lock); in lf_activate_lock()
1314 lf_cancel_lock(struct lockf *state, struct lockf_entry *lock) in lf_cancel_lock() argument
1333 LIST_REMOVE(lock, lf_link); in lf_cancel_lock()
1339 lf_remove_outgoing(lock); in lf_cancel_lock()
1348 lf_update_dependancies(state, lock, TRUE, &granted); in lf_cancel_lock()
1349 lf_free_lock(lock); in lf_cancel_lock()
1355 lock = LIST_FIRST(&granted); in lf_cancel_lock()
1356 LIST_REMOVE(lock, lf_link); in lf_cancel_lock()
1357 lf_activate_lock(state, lock); in lf_cancel_lock()
1365 lf_setlock(struct lockf *state, struct lockf_entry *lock, struct vnode *vp, in lf_setlock() argument
1373 lf_print("lf_setlock", lock); in lf_setlock()
1380 if (lock->lf_type == F_WRLCK) in lf_setlock()
1382 if (!(lock->lf_flags & F_NOINTR)) in lf_setlock()
1387 if (lf_getblock(state, lock)) { in lf_setlock()
1391 if ((lock->lf_flags & F_WAIT) == 0 in lf_setlock()
1392 && lock->lf_async_task == NULL) { in lf_setlock()
1393 lf_free_lock(lock); in lf_setlock()
1403 if ((lock->lf_flags & F_FLOCK) && in lf_setlock()
1404 lock->lf_type == F_WRLCK) { in lf_setlock()
1405 lock->lf_type = F_UNLCK; in lf_setlock()
1406 lf_activate_lock(state, lock); in lf_setlock()
1407 lock->lf_type = F_WRLCK; in lf_setlock()
1417 error = lf_add_outgoing(state, lock); in lf_setlock()
1423 lf_print("lf_setlock: deadlock", lock); in lf_setlock()
1425 lf_free_lock(lock); in lf_setlock()
1433 LIST_INSERT_HEAD(&state->ls_pending, lock, lf_link); in lf_setlock()
1437 LIST_FOREACH(e, &lock->lf_outedges, le_outlink) { in lf_setlock()
1444 if ((lock->lf_flags & F_WAIT) == 0) { in lf_setlock()
1451 *cookiep = (void *) lock; in lf_setlock()
1456 lock->lf_refs++; in lf_setlock()
1458 error = sx_sleep(lock, &state->ls_lock, priority, lockstr, 0); in lf_setlock()
1460 if (lf_free_lock(lock)) { in lf_setlock()
1486 if (lock->lf_flags & F_INTR) { in lf_setlock()
1488 lf_free_lock(lock); in lf_setlock()
1491 if (LIST_EMPTY(&lock->lf_outedges)) { in lf_setlock()
1494 lf_cancel_lock(state, lock); in lf_setlock()
1499 lf_print("lf_setlock: granted", lock); in lf_setlock()
1509 error = lf_add_incoming(state, lock); in lf_setlock()
1513 lf_print("lf_setlock: deadlock", lock); in lf_setlock()
1515 lf_free_lock(lock); in lf_setlock()
1524 lf_activate_lock(state, lock); in lf_setlock()
1562 lf_getlock(struct lockf *state, struct lockf_entry *lock, struct flock *fl) in lf_getlock() argument
1568 lf_print("lf_getlock", lock); in lf_getlock()
1571 if ((block = lf_getblock(state, lock))) { in lf_getlock()
1591 lf_cancel(struct lockf *state, struct lockf_entry *lock, void *cookie) in lf_cancel() argument
1606 if (!(reallock->lf_vnode == lock->lf_vnode in lf_cancel()
1607 && reallock->lf_start == lock->lf_start in lf_cancel()
1608 && reallock->lf_end == lock->lf_end)) { in lf_cancel()
1644 lf_getblock(struct lockf *state, struct lockf_entry *lock) in lf_getblock() argument
1653 if (overlap->lf_start > lock->lf_end) in lf_getblock()
1655 if (!lf_blocks(lock, overlap)) in lf_getblock()
1688 lf_findoverlap(struct lockf_entry **overlap, struct lockf_entry *lock, int type) in lf_findoverlap() argument
1699 lf_print("lf_findoverlap: looking for overlap in", lock); in lf_findoverlap()
1701 start = lock->lf_start; in lf_findoverlap()
1702 end = lock->lf_end; in lf_findoverlap()
1708 if (((type & SELF) && lf->lf_owner != lock->lf_owner) || in lf_findoverlap()
1709 ((type & OTHERS) && lf->lf_owner == lock->lf_owner)) { in lf_findoverlap()
2012 sx_xlock(&lf_lock_owners[i].lock); in lf_countlocks()
2016 sx_xunlock(&lf_lock_owners[i].lock); in lf_countlocks()
2613 lf_print(char *tag, struct lockf_entry *lock) in lf_print() argument
2616 printf("%s: lock %p for ", tag, (void *)lock); in lf_print()
2617 lf_print_owner(lock->lf_owner); in lf_print()
2618 printf("\nvnode %p", lock->lf_vnode); in lf_print()
2619 VOP_PRINT(lock->lf_vnode); in lf_print()
2621 lock->lf_type == F_RDLCK ? "shared" : in lf_print()
2622 lock->lf_type == F_WRLCK ? "exclusive" : in lf_print()
2623 lock->lf_type == F_UNLCK ? "unlock" : "unknown", in lf_print()
2624 (intmax_t)lock->lf_start); in lf_print()
2625 if (lock->lf_end == OFF_MAX) in lf_print()
2628 printf("%jd", (intmax_t)lock->lf_end); in lf_print()
2629 if (!LIST_EMPTY(&lock->lf_outedges)) in lf_print()
2631 (void *)LIST_FIRST(&lock->lf_outedges)->le_to); in lf_print()
2637 lf_printlist(char *tag, struct lockf_entry *lock) in lf_printlist() argument
2642 printf("%s: Lock list for vnode %p:\n", tag, lock->lf_vnode); in lf_printlist()
2643 LIST_FOREACH(lf, &lock->lf_vnode->v_lockf->ls_active, lf_link) { in lf_printlist()
2645 lf_print_owner(lock->lf_owner); in lf_printlist()