Lines Matching full:trans
45 int bch2_btree_node_check_topology(struct btree_trans *trans, struct btree *b) in bch2_btree_node_check_topology() argument
47 struct bch_fs *c = trans->c; in bch2_btree_node_check_topology()
65 need_fsck_err(trans, btree_root_bad_min_key, in bch2_btree_node_check_topology()
73 need_fsck_err(trans, btree_root_bad_max_key, in bch2_btree_node_check_topology()
84 bch2_btree_and_journal_iter_init_node_iter(trans, &iter, b); in bch2_btree_node_check_topology()
109 need_fsck_err(trans, btree_node_topology_bad_min_key, "%s", buf.buf); in bch2_btree_node_check_topology()
126 need_fsck_err(trans, btree_node_topology_empty_interior_node, "%s", buf.buf); in bch2_btree_node_check_topology()
139 need_fsck_err(trans, btree_node_topology_bad_max_key, "%s", buf.buf); in bch2_btree_node_check_topology()
225 static void __btree_node_free(struct btree_trans *trans, struct btree *b) in __btree_node_free() argument
227 struct bch_fs *c = trans->c; in __btree_node_free()
229 trace_and_count(c, btree_node_free, trans, b); in __btree_node_free()
242 static void bch2_btree_node_free_inmem(struct btree_trans *trans, in bch2_btree_node_free_inmem() argument
246 struct bch_fs *c = trans->c; in bch2_btree_node_free_inmem()
249 bch2_btree_node_lock_write_nofail(trans, path, &b->c); in bch2_btree_node_free_inmem()
251 __btree_node_free(trans, b); in bch2_btree_node_free_inmem()
260 trans_for_each_path(trans, path, i) in bch2_btree_node_free_inmem()
262 btree_node_unlock(trans, path, level); in bch2_btree_node_free_inmem()
268 struct btree_trans *trans, in bch2_btree_node_free_never_used() argument
296 trans_for_each_path(trans, path, i) in bch2_btree_node_free_never_used()
298 btree_node_unlock(trans, path, level); in bch2_btree_node_free_never_used()
303 static struct btree *__bch2_btree_node_alloc(struct btree_trans *trans, in __bch2_btree_node_alloc() argument
309 struct bch_fs *c = trans->c; in __bch2_btree_node_alloc()
321 b = bch2_btree_node_mem_alloc(trans, interior_node); in __bch2_btree_node_alloc()
339 ret = bch2_alloc_sectors_start_trans(trans, in __bch2_btree_node_alloc()
382 struct btree_trans *trans, in bch2_btree_node_alloc() argument
395 btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_intent); in bch2_btree_node_alloc()
396 btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_write); in bch2_btree_node_alloc()
429 trace_and_count(c, btree_node_alloc, trans, b); in bch2_btree_node_alloc()
448 struct btree_trans *trans, in bch2_btree_node_alloc_replacement() argument
451 struct btree *n = bch2_btree_node_alloc(as, trans, b->c.level); in bch2_btree_node_alloc_replacement()
476 struct btree_trans *trans, unsigned level) in __btree_root_alloc() argument
478 struct btree *b = bch2_btree_node_alloc(as, trans, level); in __btree_root_alloc()
490 static void bch2_btree_reserve_put(struct btree_update *as, struct btree_trans *trans) in bch2_btree_reserve_put() argument
517 btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_intent); in bch2_btree_reserve_put()
518 btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_write); in bch2_btree_reserve_put()
519 __btree_node_free(trans, b); in bch2_btree_reserve_put()
525 static int bch2_btree_reserve_get(struct btree_trans *trans, in bch2_btree_reserve_get() argument
541 ret = bch2_btree_cache_cannibalize_lock(trans, cl); in bch2_btree_reserve_get()
549 b = __bch2_btree_node_alloc(trans, &as->disk_res, cl, in bch2_btree_reserve_get()
560 bch2_btree_cache_cannibalize_unlock(trans); in bch2_btree_reserve_get()
566 static void bch2_btree_update_free(struct btree_update *as, struct btree_trans *trans) in bch2_btree_update_free() argument
577 bch2_btree_reserve_put(as, trans); in bch2_btree_update_free()
636 static int btree_update_nodes_written_trans(struct btree_trans *trans, in btree_update_nodes_written_trans() argument
639 struct jset_entry *e = bch2_trans_jset_entry_alloc(trans, as->journal_u64s); in btree_update_nodes_written_trans()
646 trans->journal_pin = &as->journal; in btree_update_nodes_written_trans()
651 ret = bch2_key_trigger_old(trans, as->btree_id, level, bkey_i_to_s_c(k), in btree_update_nodes_written_trans()
660 ret = bch2_key_trigger_new(trans, as->btree_id, level, bkey_i_to_s(k), in btree_update_nodes_written_trans()
673 struct btree_trans *trans = bch2_trans_get(c); in btree_update_nodes_written() local
701 btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_read); in btree_update_nodes_written()
723 ret = commit_do(trans, &as->disk_res, &journal_seq, in btree_update_nodes_written()
728 btree_update_nodes_written_trans(trans, as)); in btree_update_nodes_written()
729 bch2_trans_unlock(trans); in btree_update_nodes_written()
743 bch2_trans_unlock(trans); in btree_update_nodes_written()
744 bch2_trans_begin(trans); in btree_update_nodes_written()
765 btree_path_idx_t path_idx = bch2_path_get_unlocked_mut(trans, in btree_update_nodes_written()
767 struct btree_path *path = trans->paths + path_idx; in btree_update_nodes_written()
768 btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_intent); in btree_update_nodes_written()
769 mark_btree_node_locked(trans, path, b->c.level, BTREE_NODE_INTENT_LOCKED); in btree_update_nodes_written()
773 bch2_btree_node_lock_write_nofail(trans, path, &b->c); in btree_update_nodes_written()
813 btree_node_unlock(trans, path, b->c.level); in btree_update_nodes_written()
814 bch2_path_put(trans, path_idx, true); in btree_update_nodes_written()
832 btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_read); in btree_update_nodes_written()
840 bch2_btree_update_free(as, trans); in btree_update_nodes_written()
841 bch2_trans_put(trans); in btree_update_nodes_written()
1115 static void bch2_btree_update_done(struct btree_update *as, struct btree_trans *trans) in bch2_btree_update_done() argument
1126 bch2_btree_reserve_put(as, trans); in bch2_btree_update_done()
1136 bch2_btree_update_start(struct btree_trans *trans, struct btree_path *path, in bch2_btree_update_start() argument
1139 struct bch_fs *c = trans->c; in bch2_btree_update_start()
1148 u32 restart_count = trans->restart_count; in bch2_btree_update_start()
1165 ret = drop_locks_do(trans, in bch2_btree_update_start()
1175 ret = bch2_btree_path_upgrade(trans, path, level_end + 1); in bch2_btree_update_start()
1198 ret = drop_locks_do(trans, (down_read(&c->gc_lock), 0)); in bch2_btree_update_start()
1247 ret = bch2_btree_reserve_get(trans, as, nr_nodes, flags, NULL); in bch2_btree_update_start()
1266 ret = bch2_btree_reserve_get(trans, as, nr_nodes, flags, &cl); in bch2_btree_update_start()
1268 bch2_trans_unlock(trans); in bch2_btree_update_start()
1274 trace_and_count(c, btree_reserve_get_fail, trans->fn, in bch2_btree_update_start()
1279 ret = bch2_trans_relock(trans); in bch2_btree_update_start()
1283 bch2_trans_verify_not_restarted(trans, restart_count); in bch2_btree_update_start()
1286 bch2_btree_update_free(as, trans); in bch2_btree_update_start()
1311 struct btree_trans *trans, in bch2_btree_set_root() argument
1318 trace_and_count(c, btree_node_set_root, trans, b); in bch2_btree_set_root()
1327 bch2_btree_node_lock_write_nofail(trans, path, &old->c); in bch2_btree_set_root()
1329 int ret = bch2_btree_node_lock_write(trans, path, &old->c); in bch2_btree_set_root()
1345 bch2_btree_node_unlock_write(trans, path, old); in bch2_btree_set_root()
1352 struct btree_trans *trans, in bch2_insert_fixup_btree_ptr() argument
1389 bch2_btree_bset_insert_key(trans, path, b, node_iter, insert); in bch2_insert_fixup_btree_ptr()
1406 struct btree_trans *trans, in bch2_btree_insert_keys_interior() argument
1427 bch2_insert_fixup_btree_ptr(as, trans, path, b, &node_iter, insert); in bch2_btree_insert_keys_interior()
1446 struct btree_trans *trans, in __btree_split_node() argument
1484 (bch2_key_deleted_in_journal(trans, b->c.btree_id, b->c.level, uk.p) || in __btree_split_node()
1548 BUG_ON(bch2_btree_node_check_topology(trans, n[i])); in __btree_split_node()
1564 struct btree_trans *trans, in btree_split_insert_keys() argument
1569 struct btree_path *path = trans->paths + path_idx; in btree_split_insert_keys()
1577 bch2_btree_insert_keys_interior(as, trans, path, b, node_iter, keys); in btree_split_insert_keys()
1579 BUG_ON(bch2_btree_node_check_topology(trans, b)); in btree_split_insert_keys()
1583 static int btree_split(struct btree_update *as, struct btree_trans *trans, in btree_split() argument
1588 struct btree *parent = btree_node_parent(trans->paths + path, b); in btree_split()
1596 BUG_ON(parent && !btree_node_intent_locked(trans->paths + path, b->c.level + 1)); in btree_split()
1598 ret = bch2_btree_node_check_topology(trans, b); in btree_split()
1607 trace_and_count(c, btree_node_split, trans, b); in btree_split()
1609 n[0] = n1 = bch2_btree_node_alloc(as, trans, b->c.level); in btree_split()
1610 n[1] = n2 = bch2_btree_node_alloc(as, trans, b->c.level); in btree_split()
1612 __btree_split_node(as, trans, b, n, keys); in btree_split()
1615 btree_split_insert_keys(as, trans, path, n1, keys); in btree_split()
1616 btree_split_insert_keys(as, trans, path, n2, keys); in btree_split()
1628 path1 = bch2_path_get_unlocked_mut(trans, as->btree_id, n1->c.level, n1->key.k.p); in btree_split()
1630 mark_btree_node_locked(trans, trans->paths + path1, n1->c.level, BTREE_NODE_INTENT_LOCKED); in btree_split()
1631 bch2_btree_path_level_init(trans, trans->paths + path1, n1); in btree_split()
1633 path2 = bch2_path_get_unlocked_mut(trans, as->btree_id, n2->c.level, n2->key.k.p); in btree_split()
1635 mark_btree_node_locked(trans, trans->paths + path2, n2->c.level, BTREE_NODE_INTENT_LOCKED); in btree_split()
1636 bch2_btree_path_level_init(trans, trans->paths + path2, n2); in btree_split()
1648 n3 = __btree_root_alloc(as, trans, b->c.level + 1); in btree_split()
1653 trans->paths[path2].locks_want++; in btree_split()
1654 BUG_ON(btree_node_locked(trans->paths + path2, n3->c.level)); in btree_split()
1656 mark_btree_node_locked(trans, trans->paths + path2, n3->c.level, BTREE_NODE_INTENT_LOCKED); in btree_split()
1657 bch2_btree_path_level_init(trans, trans->paths + path2, n3); in btree_split()
1662 btree_split_insert_keys(as, trans, path, n3, &as->parent_keys); in btree_split()
1665 trace_and_count(c, btree_node_compact, trans, b); in btree_split()
1667 n1 = bch2_btree_node_alloc_replacement(as, trans, b); in btree_split()
1670 btree_split_insert_keys(as, trans, path, n1, keys); in btree_split()
1678 path1 = bch2_path_get_unlocked_mut(trans, as->btree_id, n1->c.level, n1->key.k.p); in btree_split()
1680 mark_btree_node_locked(trans, trans->paths + path1, n1->c.level, BTREE_NODE_INTENT_LOCKED); in btree_split()
1681 bch2_btree_path_level_init(trans, trans->paths + path1, n1); in btree_split()
1691 ret = bch2_btree_insert_node(as, trans, path, parent, &as->parent_keys); in btree_split()
1693 ret = bch2_btree_set_root(as, trans, trans->paths + path, n3, false); in btree_split()
1696 ret = bch2_btree_set_root(as, trans, trans->paths + path, n1, false); in btree_split()
1719 bch2_btree_node_free_inmem(trans, trans->paths + path, b); in btree_split()
1722 bch2_trans_node_add(trans, trans->paths + path, n3); in btree_split()
1724 bch2_trans_node_add(trans, trans->paths + path2, n2); in btree_split()
1725 bch2_trans_node_add(trans, trans->paths + path1, n1); in btree_split()
1734 __bch2_btree_path_unlock(trans, trans->paths + path2); in btree_split()
1735 bch2_path_put(trans, path2, true); in btree_split()
1738 __bch2_btree_path_unlock(trans, trans->paths + path1); in btree_split()
1739 bch2_path_put(trans, path1, true); in btree_split()
1742 bch2_trans_verify_locks(trans); in btree_split()
1751 bch2_btree_node_free_never_used(as, trans, n3); in btree_split()
1753 bch2_btree_node_free_never_used(as, trans, n2); in btree_split()
1754 bch2_btree_node_free_never_used(as, trans, n1); in btree_split()
1762 * @trans: btree_trans object
1773 static int bch2_btree_insert_node(struct btree_update *as, struct btree_trans *trans, in bch2_btree_insert_node() argument
1778 struct btree_path *path = trans->paths + path_idx, *linked; in bch2_btree_insert_node()
1791 ret = bch2_btree_node_lock_write(trans, path, &b->c); in bch2_btree_insert_node()
1795 bch2_btree_node_prep_for_write(trans, path, b); in bch2_btree_insert_node()
1798 bch2_btree_node_unlock_write(trans, path, b); in bch2_btree_insert_node()
1802 ret = bch2_btree_node_check_topology(trans, b); in bch2_btree_insert_node()
1804 bch2_btree_node_unlock_write(trans, path, b); in bch2_btree_insert_node()
1808 bch2_btree_insert_keys_interior(as, trans, path, b, in bch2_btree_insert_node()
1811 trans_for_each_path_with_node(trans, b, linked, i) in bch2_btree_insert_node()
1814 bch2_trans_verify_paths(trans); in bch2_btree_insert_node()
1826 bch2_trans_node_reinit_iter(trans, b); in bch2_btree_insert_node()
1829 bch2_btree_node_unlock_write(trans, path, b); in bch2_btree_insert_node()
1831 BUG_ON(bch2_btree_node_check_topology(trans, b)); in bch2_btree_insert_node()
1839 trace_and_count(c, trans_restart_split_race, trans, _THIS_IP_, b); in bch2_btree_insert_node()
1840 return btree_trans_restart(trans, BCH_ERR_transaction_restart_split_race); in bch2_btree_insert_node()
1843 return btree_split(as, trans, path_idx, b, keys); in bch2_btree_insert_node()
1846 int bch2_btree_split_leaf(struct btree_trans *trans, in bch2_btree_split_leaf() argument
1851 struct btree *b = path_l(trans->paths + path)->b; in bch2_btree_split_leaf()
1856 as = bch2_btree_update_start(trans, trans->paths + path, in bch2_btree_split_leaf()
1857 trans->paths[path].level, in bch2_btree_split_leaf()
1862 ret = btree_split(as, trans, path, b, NULL); in bch2_btree_split_leaf()
1864 bch2_btree_update_free(as, trans); in bch2_btree_split_leaf()
1868 bch2_btree_update_done(as, trans); in bch2_btree_split_leaf()
1870 for (l = trans->paths[path].level + 1; in bch2_btree_split_leaf()
1871 btree_node_intent_locked(&trans->paths[path], l) && !ret; in bch2_btree_split_leaf()
1873 ret = bch2_foreground_maybe_merge(trans, path, l, flags); in bch2_btree_split_leaf()
1878 static void __btree_increase_depth(struct btree_update *as, struct btree_trans *trans, in __btree_increase_depth() argument
1882 struct btree_path *path = trans->paths + path_idx; in __btree_increase_depth()
1887 n = __btree_root_alloc(as, trans, b->c.level + 1); in __btree_increase_depth()
1895 mark_btree_node_locked(trans, path, n->c.level, BTREE_NODE_INTENT_LOCKED); in __btree_increase_depth()
1896 bch2_btree_path_level_init(trans, path, n); in __btree_increase_depth()
1902 btree_split_insert_keys(as, trans, path_idx, n, &as->parent_keys); in __btree_increase_depth()
1904 int ret = bch2_btree_set_root(as, trans, path, n, true); in __btree_increase_depth()
1909 bch2_trans_node_add(trans, path, n); in __btree_increase_depth()
1916 bch2_trans_verify_locks(trans); in __btree_increase_depth()
1919 int bch2_btree_increase_depth(struct btree_trans *trans, btree_path_idx_t path, unsigned flags) in bch2_btree_increase_depth() argument
1921 struct bch_fs *c = trans->c; in bch2_btree_increase_depth()
1922 struct btree *b = bch2_btree_id_root(c, trans->paths[path].btree_id)->b; in bch2_btree_increase_depth()
1925 return bch2_btree_split_leaf(trans, path, flags); in bch2_btree_increase_depth()
1928 bch2_btree_update_start(trans, trans->paths + path, b->c.level, true, flags); in bch2_btree_increase_depth()
1932 __btree_increase_depth(as, trans, path); in bch2_btree_increase_depth()
1933 bch2_btree_update_done(as, trans); in bch2_btree_increase_depth()
1937 int __bch2_foreground_maybe_merge(struct btree_trans *trans, in __bch2_foreground_maybe_merge() argument
1943 struct bch_fs *c = trans->c; in __bch2_foreground_maybe_merge()
1951 enum btree_id btree = trans->paths[path].btree_id; in __bch2_foreground_maybe_merge()
1956 bch2_trans_verify_not_in_restart(trans); in __bch2_foreground_maybe_merge()
1957 bch2_trans_verify_not_unlocked(trans); in __bch2_foreground_maybe_merge()
1958 BUG_ON(!trans->paths[path].should_be_locked); in __bch2_foreground_maybe_merge()
1959 BUG_ON(!btree_node_locked(&trans->paths[path], level)); in __bch2_foreground_maybe_merge()
1977 b = trans->paths[path].l[level].b; in __bch2_foreground_maybe_merge()
1989 sib_path = bch2_path_get(trans, btree, sib_pos, in __bch2_foreground_maybe_merge()
1991 ret = bch2_btree_path_traverse(trans, sib_path, false); in __bch2_foreground_maybe_merge()
1995 btree_path_set_should_be_locked(trans, trans->paths + sib_path); in __bch2_foreground_maybe_merge()
1997 m = trans->paths[sib_path].l[level].b; in __bch2_foreground_maybe_merge()
1999 if (btree_node_parent(trans->paths + path, b) != in __bch2_foreground_maybe_merge()
2000 btree_node_parent(trans->paths + sib_path, m)) { in __bch2_foreground_maybe_merge()
2052 parent = btree_node_parent(trans->paths + path, b); in __bch2_foreground_maybe_merge()
2053 as = bch2_btree_update_start(trans, trans->paths + path, level, false, in __bch2_foreground_maybe_merge()
2059 trace_and_count(c, btree_node_merge, trans, b); in __bch2_foreground_maybe_merge()
2064 n = bch2_btree_node_alloc(as, trans, b->c.level); in __bch2_foreground_maybe_merge()
2083 new_path = bch2_path_get_unlocked_mut(trans, btree, n->c.level, n->key.k.p); in __bch2_foreground_maybe_merge()
2085 mark_btree_node_locked(trans, trans->paths + new_path, n->c.level, BTREE_NODE_INTENT_LOCKED); in __bch2_foreground_maybe_merge()
2086 bch2_btree_path_level_init(trans, trans->paths + new_path, n); in __bch2_foreground_maybe_merge()
2093 bch2_trans_verify_paths(trans); in __bch2_foreground_maybe_merge()
2095 ret = bch2_btree_insert_node(as, trans, path, parent, &as->parent_keys); in __bch2_foreground_maybe_merge()
2099 bch2_trans_verify_paths(trans); in __bch2_foreground_maybe_merge()
2104 bch2_btree_node_free_inmem(trans, trans->paths + path, b); in __bch2_foreground_maybe_merge()
2105 bch2_btree_node_free_inmem(trans, trans->paths + sib_path, m); in __bch2_foreground_maybe_merge()
2107 bch2_trans_node_add(trans, trans->paths + path, n); in __bch2_foreground_maybe_merge()
2109 bch2_trans_verify_paths(trans); in __bch2_foreground_maybe_merge()
2113 bch2_btree_update_done(as, trans); in __bch2_foreground_maybe_merge()
2119 bch2_path_put(trans, new_path, true); in __bch2_foreground_maybe_merge()
2120 bch2_path_put(trans, sib_path, true); in __bch2_foreground_maybe_merge()
2121 bch2_trans_verify_locks(trans); in __bch2_foreground_maybe_merge()
2125 ret = bch2_trans_relock(trans); in __bch2_foreground_maybe_merge()
2128 bch2_btree_node_free_never_used(as, trans, n); in __bch2_foreground_maybe_merge()
2129 bch2_btree_update_free(as, trans); in __bch2_foreground_maybe_merge()
2133 int bch2_btree_node_rewrite(struct btree_trans *trans, in bch2_btree_node_rewrite() argument
2138 struct bch_fs *c = trans->c; in bch2_btree_node_rewrite()
2146 struct btree_path *path = btree_iter_path(trans, iter); in bch2_btree_node_rewrite()
2148 as = bch2_btree_update_start(trans, path, b->c.level, false, flags); in bch2_btree_node_rewrite()
2155 n = bch2_btree_node_alloc_replacement(as, trans, b); in bch2_btree_node_rewrite()
2161 new_path = bch2_path_get_unlocked_mut(trans, iter->btree_id, n->c.level, n->key.k.p); in bch2_btree_node_rewrite()
2163 mark_btree_node_locked(trans, trans->paths + new_path, n->c.level, BTREE_NODE_INTENT_LOCKED); in bch2_btree_node_rewrite()
2164 bch2_btree_path_level_init(trans, trans->paths + new_path, n); in bch2_btree_node_rewrite()
2166 trace_and_count(c, btree_node_rewrite, trans, b); in bch2_btree_node_rewrite()
2170 ret = bch2_btree_insert_node(as, trans, iter->path, parent, &as->parent_keys); in bch2_btree_node_rewrite()
2172 ret = bch2_btree_set_root(as, trans, btree_iter_path(trans, iter), n, false); in bch2_btree_node_rewrite()
2181 bch2_btree_node_free_inmem(trans, btree_iter_path(trans, iter), b); in bch2_btree_node_rewrite()
2183 bch2_trans_node_add(trans, trans->paths + iter->path, n); in bch2_btree_node_rewrite()
2186 bch2_btree_update_done(as, trans); in bch2_btree_node_rewrite()
2189 bch2_path_put(trans, new_path, true); in bch2_btree_node_rewrite()
2190 bch2_trans_downgrade(trans); in bch2_btree_node_rewrite()
2193 bch2_btree_node_free_never_used(as, trans, n); in bch2_btree_node_rewrite()
2194 bch2_btree_update_free(as, trans); in bch2_btree_node_rewrite()
2208 static int async_btree_node_rewrite_trans(struct btree_trans *trans, in async_btree_node_rewrite_trans() argument
2211 struct bch_fs *c = trans->c; in async_btree_node_rewrite_trans()
2216 bch2_trans_node_iter_init(trans, &iter, a->btree_id, a->pos, in async_btree_node_rewrite_trans()
2236 ret = bch2_btree_node_rewrite(trans, &iter, b, 0); in async_btree_node_rewrite_trans()
2238 bch2_trans_iter_exit(trans, &iter); in async_btree_node_rewrite_trans()
2249 int ret = bch2_trans_do(c, async_btree_node_rewrite_trans(trans, a)); in async_btree_node_rewrite_work()
2327 static int __bch2_btree_node_update_key(struct btree_trans *trans, in __bch2_btree_node_update_key() argument
2334 struct bch_fs *c = trans->c; in __bch2_btree_node_update_key()
2340 ret = bch2_key_trigger_old(trans, b->c.btree_id, b->c.level + 1, in __bch2_btree_node_update_key()
2343 bch2_key_trigger_new(trans, b->c.btree_id, b->c.level + 1, in __bch2_btree_node_update_key()
2357 parent = btree_node_parent(btree_iter_path(trans, iter), b); in __bch2_btree_node_update_key()
2361 iter2.path = bch2_btree_path_make_mut(trans, iter2.path, in __bch2_btree_node_update_key()
2365 struct btree_path *path2 = btree_iter_path(trans, &iter2); in __bch2_btree_node_update_key()
2369 btree_path_set_level_up(trans, path2); in __bch2_btree_node_update_key()
2371 trans->paths_sorted = false; in __bch2_btree_node_update_key()
2374 bch2_trans_update(trans, &iter2, new_key, BTREE_TRIGGER_norun); in __bch2_btree_node_update_key()
2380 struct jset_entry *e = bch2_trans_jset_entry_alloc(trans, in __bch2_btree_node_update_key()
2392 ret = bch2_trans_commit(trans, NULL, NULL, commit_flags); in __bch2_btree_node_update_key()
2396 bch2_btree_node_lock_write_nofail(trans, btree_iter_path(trans, iter), &b->c); in __bch2_btree_node_update_key()
2412 bch2_btree_node_unlock_write(trans, btree_iter_path(trans, iter), b); in __bch2_btree_node_update_key()
2414 bch2_trans_iter_exit(trans, &iter2); in __bch2_btree_node_update_key()
2425 int bch2_btree_node_update_key(struct btree_trans *trans, struct btree_iter *iter, in bch2_btree_node_update_key() argument
2429 struct bch_fs *c = trans->c; in bch2_btree_node_update_key()
2431 struct btree_path *path = btree_iter_path(trans, iter); in bch2_btree_node_update_key()
2435 ret = bch2_btree_path_upgrade(trans, path, b->c.level + 1); in bch2_btree_node_update_key()
2446 ret = bch2_btree_cache_cannibalize_lock(trans, &cl); in bch2_btree_node_update_key()
2448 ret = drop_locks_do(trans, (closure_sync(&cl), 0)); in bch2_btree_node_update_key()
2453 new_hash = bch2_btree_node_mem_alloc(trans, false); in bch2_btree_node_update_key()
2460 ret = __bch2_btree_node_update_key(trans, iter, b, new_hash, new_key, in bch2_btree_node_update_key()
2468 bch2_btree_cache_cannibalize_unlock(trans); in bch2_btree_node_update_key()
2472 int bch2_btree_node_update_key_get_iter(struct btree_trans *trans, in bch2_btree_node_update_key_get_iter() argument
2479 bch2_trans_node_iter_init(trans, &iter, b->c.btree_id, b->key.k.p, in bch2_btree_node_update_key_get_iter()
2487 if (btree_iter_path(trans, &iter)->l[b->c.level].b != b) { in bch2_btree_node_update_key_get_iter()
2498 ret = bch2_btree_node_update_key(trans, &iter, b, new_key, in bch2_btree_node_update_key_get_iter()
2501 bch2_trans_iter_exit(trans, &iter); in bch2_btree_node_update_key_get_iter()
2518 int bch2_btree_root_alloc_fake_trans(struct btree_trans *trans, enum btree_id id, unsigned level) in bch2_btree_root_alloc_fake_trans() argument
2520 struct bch_fs *c = trans->c; in bch2_btree_root_alloc_fake_trans()
2528 ret = bch2_btree_cache_cannibalize_lock(trans, &cl); in bch2_btree_root_alloc_fake_trans()
2532 b = bch2_btree_node_mem_alloc(trans, false); in bch2_btree_root_alloc_fake_trans()
2533 bch2_btree_cache_cannibalize_unlock(trans); in bch2_btree_root_alloc_fake_trans()
2570 bch2_trans_run(c, lockrestart_do(trans, bch2_btree_root_alloc_fake_trans(trans, id, level))); in bch2_btree_root_alloc_fake()