Lines Matching full:path
24 struct btree_path *path; in bch2_btree_node_lock_counts() local
33 trans_for_each_path(trans, path, i) in bch2_btree_node_lock_counts()
34 if (path != skip && &path->l[level].b->c == b) { in bch2_btree_node_lock_counts()
35 int t = btree_node_locked_type(path, level); in bch2_btree_node_lock_counts()
47 struct btree_path *path, struct btree *b) in bch2_btree_node_unlock_write() argument
49 bch2_btree_node_unlock_write_inlined(trans, path, b); in bch2_btree_node_unlock_write()
325 struct btree_path *path = paths + path_idx; in bch2_check_for_deadlock() local
326 if (!path->nodes_locked) in bch2_check_for_deadlock()
338 int lock_held = btree_node_locked_type(path, top->level); in bch2_check_for_deadlock()
343 b = &READ_ONCE(path->l[top->level].b)->c; in bch2_check_for_deadlock()
417 int __bch2_btree_node_lock_write(struct btree_trans *trans, struct btree_path *path, in __bch2_btree_node_lock_write() argument
436 mark_btree_node_locked_noreset(path, b->level, BTREE_NODE_INTENT_LOCKED); in __bch2_btree_node_lock_write()
442 struct btree_path *path, in bch2_btree_node_lock_write_nofail() argument
445 int ret = __btree_node_lock_write(trans, path, b, true); in bch2_btree_node_lock_write_nofail()
452 struct btree_path *path, in btree_path_get_locks() argument
456 unsigned l = path->level; in btree_path_get_locks()
460 if (!btree_path_node(path, l)) in btree_path_get_locks()
464 ? bch2_btree_node_upgrade(trans, path, l) in btree_path_get_locks()
465 : bch2_btree_node_relock(trans, path, l))) { in btree_path_get_locks()
470 f->b = path->l[l].b; in btree_path_get_locks()
475 } while (l < path->locks_want); in btree_path_get_locks()
483 __bch2_btree_path_unlock(trans, path); in btree_path_get_locks()
484 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE); in btree_path_get_locks()
487 path->l[fail_idx].b = upgrade in btree_path_get_locks()
494 if (path->uptodate == BTREE_ITER_NEED_RELOCK) in btree_path_get_locks()
495 path->uptodate = BTREE_ITER_UPTODATE; in btree_path_get_locks()
497 return path->uptodate < BTREE_ITER_NEED_RELOCK; in btree_path_get_locks()
501 struct btree_path *path, unsigned level, in __bch2_btree_node_relock() argument
504 struct btree *b = btree_path_node(path, level); in __bch2_btree_node_relock()
505 int want = __btree_lock_want(path, level); in __bch2_btree_node_relock()
510 if (six_relock_type(&b->c.lock, want, path->l[level].lock_seq) || in __bch2_btree_node_relock()
511 (btree_node_lock_seq_matches(path, b, level) && in __bch2_btree_node_relock()
513 mark_btree_node_locked(trans, path, level, want); in __bch2_btree_node_relock()
518 trace_and_count(trans->c, btree_path_relock_fail, trans, _RET_IP_, path, level); in __bch2_btree_node_relock()
525 struct btree_path *path, unsigned level) in bch2_btree_node_upgrade() argument
527 struct btree *b = path->l[level].b; in bch2_btree_node_upgrade()
529 if (!is_btree_node(path, level)) in bch2_btree_node_upgrade()
532 switch (btree_lock_want(path, level)) { in bch2_btree_node_upgrade()
534 BUG_ON(btree_node_locked(path, level)); in bch2_btree_node_upgrade()
537 BUG_ON(btree_node_intent_locked(path, level)); in bch2_btree_node_upgrade()
538 return bch2_btree_node_relock(trans, path, level); in bch2_btree_node_upgrade()
545 if (btree_node_intent_locked(path, level)) in bch2_btree_node_upgrade()
551 if (btree_node_locked(path, level) in bch2_btree_node_upgrade()
553 : six_relock_type(&b->c.lock, SIX_LOCK_intent, path->l[level].lock_seq)) in bch2_btree_node_upgrade()
556 if (btree_node_lock_seq_matches(path, b, level) && in bch2_btree_node_upgrade()
558 btree_node_unlock(trans, path, level); in bch2_btree_node_upgrade()
562 trace_and_count(trans->c, btree_path_upgrade_fail, trans, _RET_IP_, path, level); in bch2_btree_node_upgrade()
565 mark_btree_node_locked_noreset(path, level, BTREE_NODE_INTENT_LOCKED); in bch2_btree_node_upgrade()
569 /* Btree path locking: */
575 struct btree_path *path) in bch2_btree_path_relock_intent() argument
579 for (l = path->level; in bch2_btree_path_relock_intent()
580 l < path->locks_want && btree_path_node(path, l); in bch2_btree_path_relock_intent()
582 if (!bch2_btree_node_relock(trans, path, l)) { in bch2_btree_path_relock_intent()
583 __bch2_btree_path_unlock(trans, path); in bch2_btree_path_relock_intent()
584 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE); in bch2_btree_path_relock_intent()
585 trace_and_count(trans->c, trans_restart_relock_path_intent, trans, _RET_IP_, path); in bch2_btree_path_relock_intent()
594 bool bch2_btree_path_relock_norestart(struct btree_trans *trans, struct btree_path *path) in bch2_btree_path_relock_norestart() argument
598 bool ret = btree_path_get_locks(trans, path, false, &f); in bch2_btree_path_relock_norestart()
604 struct btree_path *path, unsigned long trace_ip) in __bch2_btree_path_relock() argument
606 if (!bch2_btree_path_relock_norestart(trans, path)) { in __bch2_btree_path_relock()
607 trace_and_count(trans->c, trans_restart_relock_path, trans, trace_ip, path); in __bch2_btree_path_relock()
615 struct btree_path *path, in bch2_btree_path_upgrade_noupgrade_sibs() argument
619 EBUG_ON(path->locks_want >= new_locks_want); in bch2_btree_path_upgrade_noupgrade_sibs()
621 path->locks_want = new_locks_want; in bch2_btree_path_upgrade_noupgrade_sibs()
623 bool ret = btree_path_get_locks(trans, path, true, f); in bch2_btree_path_upgrade_noupgrade_sibs()
629 struct btree_path *path, in __bch2_btree_path_upgrade() argument
633 bool ret = bch2_btree_path_upgrade_noupgrade_sibs(trans, path, new_locks_want, f); in __bch2_btree_path_upgrade()
656 if (!path->cached && !trans->in_traverse_all) { in __bch2_btree_path_upgrade()
661 if (linked != path && in __bch2_btree_path_upgrade()
662 linked->cached == path->cached && in __bch2_btree_path_upgrade()
663 linked->btree_id == path->btree_id && in __bch2_btree_path_upgrade()
675 struct btree_path *path, in __bch2_btree_path_downgrade() argument
678 unsigned l, old_locks_want = path->locks_want; in __bch2_btree_path_downgrade()
683 EBUG_ON(path->locks_want < new_locks_want); in __bch2_btree_path_downgrade()
685 path->locks_want = new_locks_want; in __bch2_btree_path_downgrade()
687 while (path->nodes_locked && in __bch2_btree_path_downgrade()
688 (l = btree_path_highest_level_locked(path)) >= path->locks_want) { in __bch2_btree_path_downgrade()
689 if (l > path->level) { in __bch2_btree_path_downgrade()
690 btree_node_unlock(trans, path, l); in __bch2_btree_path_downgrade()
692 if (btree_node_intent_locked(path, l)) { in __bch2_btree_path_downgrade()
693 six_lock_downgrade(&path->l[l].b->c.lock); in __bch2_btree_path_downgrade()
694 mark_btree_node_locked_noreset(path, l, BTREE_NODE_READ_LOCKED); in __bch2_btree_path_downgrade()
700 bch2_btree_path_verify_locks(path); in __bch2_btree_path_downgrade()
702 trace_path_downgrade(trans, _RET_IP_, path, old_locks_want); in __bch2_btree_path_downgrade()
709 struct btree_path *path; in bch2_trans_downgrade() local
715 trans_for_each_path(trans, path, i) in bch2_trans_downgrade()
716 if (path->ref) in bch2_trans_downgrade()
717 bch2_btree_path_downgrade(trans, path); in bch2_trans_downgrade()
722 struct btree_path *path; in __bch2_trans_unlock() local
725 trans_for_each_path(trans, path, i) in __bch2_trans_unlock()
726 __bch2_btree_path_unlock(trans, path); in __bch2_trans_unlock()
729 …atic noinline __cold int bch2_trans_relock_fail(struct btree_trans *trans, struct btree_path *path, in bch2_trans_relock_fail() argument
738 bch2_bpos_to_text(&buf, path->pos); in bch2_trans_relock_fail()
739 prt_printf(&buf, " l=%u seq=%u node seq=", f->l, path->l[f->l].lock_seq); in bch2_trans_relock_fail()
773 struct btree_path *path; in __bch2_trans_relock() local
776 trans_for_each_path(trans, path, i) { in __bch2_trans_relock()
779 if (path->should_be_locked && in __bch2_trans_relock()
780 !btree_path_get_locks(trans, path, false, &f)) in __bch2_trans_relock()
781 return bch2_trans_relock_fail(trans, path, &f, trace); in __bch2_trans_relock()
822 struct btree_path *path; in bch2_trans_unlock_write() local
825 trans_for_each_path(trans, path, i) in bch2_trans_unlock_write()
827 if (btree_node_write_locked(path, l)) in bch2_trans_unlock_write()
828 bch2_btree_node_unlock_write(trans, path, path->l[l].b); in bch2_trans_unlock_write()
845 void bch2_btree_path_verify_locks(struct btree_path *path) in bch2_btree_path_verify_locks() argument
848 * A path may be uptodate and yet have nothing locked if and only if in bch2_btree_path_verify_locks()
849 * there is no node at path->level, which generally means we were in bch2_btree_path_verify_locks()
852 BUG_ON(path->uptodate == BTREE_ITER_UPTODATE && in bch2_btree_path_verify_locks()
853 btree_path_node(path, path->level) && in bch2_btree_path_verify_locks()
854 !path->nodes_locked); in bch2_btree_path_verify_locks()
856 if (!path->nodes_locked) in bch2_btree_path_verify_locks()
860 int want = btree_lock_want(path, l); in bch2_btree_path_verify_locks()
861 int have = btree_node_locked_type(path, l); in bch2_btree_path_verify_locks()
863 BUG_ON(!is_btree_node(path, l) && have != BTREE_NODE_UNLOCKED); in bch2_btree_path_verify_locks()
865 BUG_ON(is_btree_node(path, l) && in bch2_btree_path_verify_locks()
870 BUG_ON(btree_node_locked(path, l) && in bch2_btree_path_verify_locks()
871 path->l[l].lock_seq != six_lock_seq(&path->l[l].b->c.lock)); in bch2_btree_path_verify_locks()
877 struct btree_path *path; in bch2_trans_locked() local
880 trans_for_each_path(trans, path, i) in bch2_trans_locked()
881 if (path->nodes_locked) in bch2_trans_locked()
893 struct btree_path *path; in bch2_trans_verify_locks() local
896 trans_for_each_path(trans, path, i) in bch2_trans_verify_locks()
897 bch2_btree_path_verify_locks(path); in bch2_trans_verify_locks()