Lines Matching +full:iref +full:- +full:level

1 // SPDX-License-Identifier: GPL-2.0
13 #include "tree-log.h"
14 #include "disk-io.h"
19 #include "block-group.h"
20 #include "space-info.h"
21 #include "inode-item.h"
24 #include "extent-tree.h"
25 #include "root-tree.h"
26 #include "dir-item.h"
27 #include "file-item.h"
30 #include "tree-checker.h"
51 * ---> record transid of last unlink/rename per directory
64 * log. ---> check inode while renaming/linking.
68 * ---> check inode and old parent dir during rename
74 * of zero and redo the rm -rf
78 * rm -rf f1/foo
82 * called on f1, only its parent dir. After a crash the rm -rf must
124 * extent tree an 4x-6x higher write load than ext3.
132 * After a crash, items are copied out of the log-tree back into the
134 * allocation tree, and the log-tree freed.
169 struct btrfs_fs_info *fs_info = root->fs_info; in start_log_trans()
170 struct btrfs_root *tree_root = fs_info->tree_root; in start_log_trans()
179 if (!test_bit(BTRFS_ROOT_HAS_LOG_TREE, &tree_root->state)) { in start_log_trans()
180 mutex_lock(&tree_root->log_mutex); in start_log_trans()
181 if (!fs_info->log_root_tree) { in start_log_trans()
184 set_bit(BTRFS_ROOT_HAS_LOG_TREE, &tree_root->state); in start_log_trans()
188 mutex_unlock(&tree_root->log_mutex); in start_log_trans()
193 mutex_lock(&root->log_mutex); in start_log_trans()
196 if (root->log_root) { in start_log_trans()
197 int index = (root->log_transid + 1) % 2; in start_log_trans()
204 if (zoned && atomic_read(&root->log_commit[index])) { in start_log_trans()
205 wait_log_commit(root, root->log_transid - 1); in start_log_trans()
209 if (!root->log_start_pid) { in start_log_trans()
210 clear_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state); in start_log_trans()
211 root->log_start_pid = current->pid; in start_log_trans()
212 } else if (root->log_start_pid != current->pid) { in start_log_trans()
213 set_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state); in start_log_trans()
217 * This means fs_info->log_root_tree was already created in start_log_trans()
231 set_bit(BTRFS_ROOT_HAS_LOG_TREE, &root->state); in start_log_trans()
232 clear_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state); in start_log_trans()
233 root->log_start_pid = current->pid; in start_log_trans()
236 atomic_inc(&root->log_writers); in start_log_trans()
237 if (!ctx->logging_new_name) { in start_log_trans()
238 int index = root->log_transid % 2; in start_log_trans()
239 list_add_tail(&ctx->list, &root->log_ctxs[index]); in start_log_trans()
240 ctx->log_transid = root->log_transid; in start_log_trans()
244 mutex_unlock(&root->log_mutex); in start_log_trans()
250 * to join, or returns -ENOENT if there were not transactions
255 const bool zoned = btrfs_is_zoned(root->fs_info); in join_running_log_trans()
256 int ret = -ENOENT; in join_running_log_trans()
258 if (!test_bit(BTRFS_ROOT_HAS_LOG_TREE, &root->state)) in join_running_log_trans()
261 mutex_lock(&root->log_mutex); in join_running_log_trans()
263 if (root->log_root) { in join_running_log_trans()
264 int index = (root->log_transid + 1) % 2; in join_running_log_trans()
267 if (zoned && atomic_read(&root->log_commit[index])) { in join_running_log_trans()
268 wait_log_commit(root, root->log_transid - 1); in join_running_log_trans()
271 atomic_inc(&root->log_writers); in join_running_log_trans()
273 mutex_unlock(&root->log_mutex); in join_running_log_trans()
284 atomic_inc(&root->log_writers); in btrfs_pin_log_trans()
293 if (atomic_dec_and_test(&root->log_writers)) { in btrfs_end_log_trans()
295 cond_wake_up_nomb(&root->log_writer_wait); in btrfs_end_log_trans()
338 struct walk_control *wc, u64 gen, int level);
346 struct walk_control *wc, u64 gen, int level) in process_one_buffer() argument
348 struct btrfs_fs_info *fs_info = log->fs_info; in process_one_buffer()
357 .level = level, in process_one_buffer()
366 if (wc->pin) { in process_one_buffer()
367 ret = btrfs_pin_extent_for_log_replay(wc->trans, eb); in process_one_buffer()
404 bool inode_item = key->type == BTRFS_INODE_ITEM_KEY; in overwrite_item()
426 u32 dst_size = btrfs_item_size(path->nodes[0], in overwrite_item()
427 path->slots[0]); in overwrite_item()
441 return -ENOMEM; in overwrite_item()
446 dst_ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]); in overwrite_item()
447 read_extent_buffer(path->nodes[0], dst_copy, dst_ptr, in overwrite_item()
473 item = btrfs_item_ptr(path->nodes[0], path->slots[0], in overwrite_item()
475 nbytes = btrfs_inode_nbytes(path->nodes[0], item); in overwrite_item()
512 path->skip_release_on_error = 1; in overwrite_item()
515 path->skip_release_on_error = 0; in overwrite_item()
518 if (ret == -EEXIST || ret == -EOVERFLOW) { in overwrite_item()
520 found_size = btrfs_item_size(path->nodes[0], in overwrite_item()
521 path->slots[0]); in overwrite_item()
525 btrfs_extend_item(trans, path, item_size - found_size); in overwrite_item()
529 dst_ptr = btrfs_item_ptr_offset(path->nodes[0], in overwrite_item()
530 path->slots[0]); in overwrite_item()
541 if (key->type == BTRFS_INODE_ITEM_KEY && ret == -EEXIST) { in overwrite_item()
549 struct extent_buffer *dst_eb = path->nodes[0]; in overwrite_item()
567 S_ISDIR(btrfs_inode_mode(path->nodes[0], dst_item))) { in overwrite_item()
569 saved_i_size = btrfs_inode_size(path->nodes[0], in overwrite_item()
574 copy_extent_buffer(path->nodes[0], eb, dst_ptr, in overwrite_item()
580 btrfs_set_inode_size(path->nodes[0], dst_item, saved_i_size); in overwrite_item()
584 if (key->type == BTRFS_INODE_ITEM_KEY) { in overwrite_item()
587 if (btrfs_inode_generation(path->nodes[0], dst_item) == 0) { in overwrite_item()
588 btrfs_set_inode_generation(path->nodes[0], dst_item, in overwrite_item()
589 trans->transid); in overwrite_item()
593 btrfs_mark_buffer_dirty(trans, path->nodes[0]); in overwrite_item()
605 return -ENOMEM; in read_alloc_one_name()
608 name->name = buf; in read_alloc_one_name()
609 name->len = len; in read_alloc_one_name()
647 struct btrfs_fs_info *fs_info = root->fs_info; in replay_one_extent()
650 u64 start = key->offset; in replay_one_extent()
675 fs_info->sectorsize); in replay_one_extent()
681 inode = read_one_inode(root, key->objectid); in replay_one_extent()
683 ret = -EIO; in replay_one_extent()
703 leaf = path->nodes[0]; in replay_one_extent()
704 existing = btrfs_item_ptr(leaf, path->slots[0], in replay_one_extent()
745 dest_offset = btrfs_item_ptr_offset(path->nodes[0], in replay_one_extent()
746 path->slots[0]); in replay_one_extent()
747 copy_extent_buffer(path->nodes[0], eb, dest_offset, in replay_one_extent()
753 offset = key->offset - btrfs_file_extent_offset(eb, item); in replay_one_extent()
790 btrfs_init_data_ref(&ref, key->objectid, offset, in replay_one_extent()
802 key->objectid, offset, &ins); in replay_one_extent()
818 ret = btrfs_lookup_csums_list(root->log_root, in replay_one_extent()
819 csum_start, csum_end - 1, in replay_one_extent()
857 * Which covers the 20K sub-range starting at offset 20K in replay_one_extent()
881 sums->logical); in replay_one_extent()
884 sums->logical, in replay_one_extent()
885 sums->len); in replay_one_extent()
890 list_del(&sums->list); in replay_one_extent()
906 extent_end - start); in replay_one_extent()
950 struct btrfs_root *root = dir->root; in drop_one_dir_item()
957 leaf = path->nodes[0]; in drop_one_dir_item()
962 return -ENOMEM; in drop_one_dir_item()
968 ret = -EIO; in drop_one_dir_item()
1005 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location); in inode_in_dir()
1018 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location); in inode_in_dir()
1047 return -ENOMEM; in backref_in_log()
1057 if (key->type == BTRFS_INODE_EXTREF_KEY) in backref_in_log()
1058 ret = !!btrfs_find_name_in_ext_backref(path->nodes[0], in backref_in_log()
1059 path->slots[0], in backref_in_log()
1062 ret = !!btrfs_find_name_in_backref(path->nodes[0], in backref_in_log()
1063 path->slots[0], name); in backref_in_log()
1095 leaf = path->nodes[0]; in __add_inode_ref()
1107 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); in __add_inode_ref()
1108 ptr_end = ptr + btrfs_item_size(leaf, path->slots[0]); in __add_inode_ref()
1125 inc_nlink(&inode->vfs_inode); in __add_inode_ref()
1154 leaf = path->nodes[0]; in __add_inode_ref()
1156 item_size = btrfs_item_size(leaf, path->slots[0]); in __add_inode_ref()
1157 base = btrfs_item_ptr_offset(leaf, path->slots[0]); in __add_inode_ref()
1167 ret = read_alloc_one_name(leaf, &extref->name, in __add_inode_ref()
1184 ret = -ENOENT; in __add_inode_ref()
1188 inc_nlink(&inode->vfs_inode); in __add_inode_ref()
1243 ret = read_alloc_one_name(eb, &extref->name, in extref_get_fields()
1305 eb = path->nodes[0]; in unlink_old_inode_refs()
1306 ref_ptr = btrfs_item_ptr_offset(eb, path->slots[0]); in unlink_old_inode_refs()
1307 ref_end = ref_ptr + btrfs_item_size(eb, path->slots[0]); in unlink_old_inode_refs()
1312 if (key->type == BTRFS_INODE_EXTREF_KEY) { in unlink_old_inode_refs()
1316 parent_id = key->offset; in unlink_old_inode_refs()
1322 if (key->type == BTRFS_INODE_EXTREF_KEY) in unlink_old_inode_refs()
1334 ret = -ENOENT; in unlink_old_inode_refs()
1349 if (key->type == BTRFS_INODE_EXTREF_KEY) in unlink_old_inode_refs()
1388 if (key->type == BTRFS_INODE_EXTREF_KEY) { in add_inode_ref()
1397 parent_objectid = key->offset; in add_inode_ref()
1399 inode_objectid = key->objectid; in add_inode_ref()
1409 ret = -ENOENT; in add_inode_ref()
1415 ret = -EIO; in add_inode_ref()
1430 ret = -ENOENT; in add_inode_ref()
1519 ret = btrfs_find_one_extref(inode->root, inode_objectid, offset, in count_inode_extrefs()
1524 leaf = path->nodes[0]; in count_inode_extrefs()
1525 item_size = btrfs_item_size(leaf, path->slots[0]); in count_inode_extrefs()
1526 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); in count_inode_extrefs()
1543 if (ret < 0 && ret != -ENOENT) in count_inode_extrefs()
1560 key.offset = (u64)-1; in count_inode_refs()
1563 ret = btrfs_search_slot(NULL, inode->root, &key, path, 0, 0); in count_inode_refs()
1567 if (path->slots[0] == 0) in count_inode_refs()
1569 path->slots[0]--; in count_inode_refs()
1572 btrfs_item_key_to_cpu(path->nodes[0], &key, in count_inode_refs()
1573 path->slots[0]); in count_inode_refs()
1577 ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]); in count_inode_refs()
1578 ptr_end = ptr + btrfs_item_size(path->nodes[0], in count_inode_refs()
1579 path->slots[0]); in count_inode_refs()
1584 name_len = btrfs_inode_ref_name_len(path->nodes[0], in count_inode_refs()
1592 if (path->slots[0] > 0) { in count_inode_refs()
1593 path->slots[0]--; in count_inode_refs()
1596 key.offset--; in count_inode_refs()
1617 struct btrfs_root *root = BTRFS_I(inode)->root; in fixup_inode_link_count()
1625 return -ENOMEM; in fixup_inode_link_count()
1641 if (nlink != inode->i_nlink) { in fixup_inode_link_count()
1647 if (S_ISDIR(inode->i_mode)) in fixup_inode_link_count()
1648 BTRFS_I(inode)->index_cnt = (u64)-1; in fixup_inode_link_count()
1650 if (inode->i_nlink == 0) { in fixup_inode_link_count()
1651 if (S_ISDIR(inode->i_mode)) { in fixup_inode_link_count()
1658 if (ret == -EEXIST) in fixup_inode_link_count()
1677 key.offset = (u64)-1; in fixup_inode_link_counts()
1679 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); in fixup_inode_link_counts()
1685 if (path->slots[0] == 0) in fixup_inode_link_counts()
1687 path->slots[0]--; in fixup_inode_link_counts()
1690 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); in fixup_inode_link_counts()
1702 ret = -EIO; in fixup_inode_link_counts()
1716 key.offset = (u64)-1; in fixup_inode_link_counts()
1739 return -EIO; in link_to_fixup_dir()
1749 if (!inode->i_nlink) in link_to_fixup_dir()
1754 } else if (ret == -EEXIST) { in link_to_fixup_dir()
1777 inode = read_one_inode(root, location->objectid); in insert_one_name()
1779 return -ENOENT; in insert_one_name()
1784 return -EIO; in insert_one_name()
1807 btrfs_dir_item_key_to_cpu(path->nodes[0], dst_di, &found_key); in delete_conflicting_dir_entry()
1809 if (found_key.objectid == log_key->objectid && in delete_conflicting_dir_entry()
1810 found_key.type == log_key->type && in delete_conflicting_dir_entry()
1811 found_key.offset == log_key->offset && in delete_conflicting_dir_entry()
1812 btrfs_dir_flags(path->nodes[0], dst_di) == log_flags) in delete_conflicting_dir_entry()
1839 * non-existing inode) and 1 if the name was replayed.
1862 dir = read_one_inode(root, key->objectid); in replay_one_name()
1864 return -EIO; in replay_one_name()
1879 dir_dst_di = btrfs_lookup_dir_item(trans, root, path, key->objectid, in replay_one_name()
1896 key->objectid, key->offset, in replay_one_name()
1924 search_key.offset = key->objectid; in replay_one_name()
1925 ret = backref_in_log(root->log_root, &search_key, 0, &name); in replay_one_name()
1937 search_key.offset = key->objectid; in replay_one_name()
1938 ret = backref_in_log(root->log_root, &search_key, key->objectid, &name); in replay_one_name()
1948 ret = insert_one_name(trans, root, key->objectid, key->offset, in replay_one_name()
1950 if (ret && ret != -ENOENT && ret != -EEXIST) in replay_one_name()
1959 btrfs_i_size_write(BTRFS_I(dir), dir->i_size + name.len * 2); in replay_one_name()
1980 ASSERT(key->type == BTRFS_DIR_INDEX_KEY); in replay_one_dir_item()
1988 * If this entry refers to a non-directory (directories can not have a in replay_one_dir_item()
2002 * xfs_io -c "fsync" testdir/bar in replay_one_dir_item()
2019 return -ENOMEM; in replay_one_dir_item()
2051 if (*start_ret == (u64)-1) in find_dir_range()
2062 if (path->slots[0] == 0) in find_dir_range()
2064 path->slots[0]--; in find_dir_range()
2067 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); in find_dir_range()
2073 item = btrfs_item_ptr(path->nodes[0], path->slots[0], in find_dir_range()
2075 found_end = btrfs_dir_log_end(path->nodes[0], item); in find_dir_range()
2086 nritems = btrfs_header_nritems(path->nodes[0]); in find_dir_range()
2087 path->slots[0]++; in find_dir_range()
2088 if (path->slots[0] >= nritems) { in find_dir_range()
2094 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); in find_dir_range()
2100 item = btrfs_item_ptr(path->nodes[0], path->slots[0], in find_dir_range()
2102 found_end = btrfs_dir_log_end(path->nodes[0], item); in find_dir_range()
2123 struct btrfs_root *root = BTRFS_I(dir)->root; in check_item_in_log()
2138 ASSERT(dir_key->type == BTRFS_DIR_INDEX_KEY); in check_item_in_log()
2140 eb = path->nodes[0]; in check_item_in_log()
2141 slot = path->slots[0]; in check_item_in_log()
2151 dir_key->objectid, in check_item_in_log()
2152 dir_key->offset, &name, 0); in check_item_in_log()
2168 ret = -EIO; in check_item_in_log()
2206 return -ENOMEM; in replay_xattr_deletes()
2216 nritems = btrfs_header_nritems(path->nodes[0]); in replay_xattr_deletes()
2217 for (i = path->slots[0]; i < nritems; i++) { in replay_xattr_deletes()
2224 btrfs_item_key_to_cpu(path->nodes[0], &key, i); in replay_xattr_deletes()
2230 di = btrfs_item_ptr(path->nodes[0], i, struct btrfs_dir_item); in replay_xattr_deletes()
2231 total_size = btrfs_item_size(path->nodes[0], i); in replay_xattr_deletes()
2234 u16 name_len = btrfs_dir_name_len(path->nodes[0], di); in replay_xattr_deletes()
2235 u16 data_len = btrfs_dir_data_len(path->nodes[0], di); in replay_xattr_deletes()
2241 ret = -ENOMEM; in replay_xattr_deletes()
2244 read_extent_buffer(path->nodes[0], name, in replay_xattr_deletes()
2254 name, name_len, -1); in replay_xattr_deletes()
2318 return -ENOMEM; in replay_dir_deletes()
2334 range_end = (u64)-1; in replay_dir_deletes()
2352 nritems = btrfs_header_nritems(path->nodes[0]); in replay_dir_deletes()
2353 if (path->slots[0] >= nritems) { in replay_dir_deletes()
2360 btrfs_item_key_to_cpu(path->nodes[0], &found_key, in replay_dir_deletes()
2361 path->slots[0]); in replay_dir_deletes()
2376 if (found_key.offset == (u64)-1) in replay_dir_deletes()
2381 if (range_end == (u64)-1) in replay_dir_deletes()
2405 struct walk_control *wc, u64 gen, int level) in replay_one_buffer() argument
2410 .level = level in replay_one_buffer()
2413 struct btrfs_root *root = wc->replay_dest; in replay_one_buffer()
2422 level = btrfs_header_level(eb); in replay_one_buffer()
2424 if (level != 0) in replay_one_buffer()
2429 return -ENOMEM; in replay_one_buffer()
2437 wc->stage == LOG_WALK_REPLAY_INODES) { in replay_one_buffer()
2452 wc->ignore_cur_inode = true; in replay_one_buffer()
2455 wc->ignore_cur_inode = false; in replay_one_buffer()
2457 ret = replay_xattr_deletes(wc->trans, root, log, in replay_one_buffer()
2463 ret = replay_dir_deletes(wc->trans, in replay_one_buffer()
2468 ret = overwrite_item(wc->trans, root, path, in replay_one_buffer()
2488 ret = -EIO; in replay_one_buffer()
2492 root->fs_info->sectorsize); in replay_one_buffer()
2494 drop_args.end = (u64)-1; in replay_one_buffer()
2496 ret = btrfs_drop_extents(wc->trans, root, in replay_one_buffer()
2503 ret = btrfs_update_inode(wc->trans, in replay_one_buffer()
2511 ret = link_to_fixup_dir(wc->trans, root, in replay_one_buffer()
2517 if (wc->ignore_cur_inode) in replay_one_buffer()
2521 wc->stage == LOG_WALK_REPLAY_DIR_INDEX) { in replay_one_buffer()
2522 ret = replay_one_dir_item(wc->trans, root, path, in replay_one_buffer()
2528 if (wc->stage < LOG_WALK_REPLAY_ALL) in replay_one_buffer()
2533 ret = overwrite_item(wc->trans, root, path, in replay_one_buffer()
2539 ret = add_inode_ref(wc->trans, root, log, path, in replay_one_buffer()
2541 if (ret && ret != -ENOENT) in replay_one_buffer()
2545 ret = replay_one_extent(wc->trans, root, path, in replay_one_buffer()
2574 spin_lock(&cache->space_info->lock); in unaccount_log_buffer()
2575 spin_lock(&cache->lock); in unaccount_log_buffer()
2576 cache->reserved -= fs_info->nodesize; in unaccount_log_buffer()
2577 cache->space_info->bytes_reserved -= fs_info->nodesize; in unaccount_log_buffer()
2578 spin_unlock(&cache->lock); in unaccount_log_buffer()
2579 spin_unlock(&cache->space_info->lock); in unaccount_log_buffer()
2599 unaccount_log_buffer(eb->fs_info, eb->start); in clean_log_buffer()
2607 struct btrfs_path *path, int *level, in walk_down_log_tree() argument
2610 struct btrfs_fs_info *fs_info = root->fs_info; in walk_down_log_tree()
2617 while (*level > 0) { in walk_down_log_tree()
2620 cur = path->nodes[*level]; in walk_down_log_tree()
2622 WARN_ON(btrfs_header_level(cur) != *level); in walk_down_log_tree()
2624 if (path->slots[*level] >= in walk_down_log_tree()
2628 bytenr = btrfs_node_blockptr(cur, path->slots[*level]); in walk_down_log_tree()
2629 ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]); in walk_down_log_tree()
2631 check.level = *level - 1; in walk_down_log_tree()
2633 btrfs_node_key_to_cpu(cur, &check.first_key, path->slots[*level]); in walk_down_log_tree()
2637 *level - 1); in walk_down_log_tree()
2641 if (*level == 1) { in walk_down_log_tree()
2642 ret = wc->process_func(root, next, wc, ptr_gen, in walk_down_log_tree()
2643 *level - 1); in walk_down_log_tree()
2649 path->slots[*level]++; in walk_down_log_tree()
2650 if (wc->free) { in walk_down_log_tree()
2672 if (path->nodes[*level-1]) in walk_down_log_tree()
2673 free_extent_buffer(path->nodes[*level-1]); in walk_down_log_tree()
2674 path->nodes[*level-1] = next; in walk_down_log_tree()
2675 *level = btrfs_header_level(next); in walk_down_log_tree()
2676 path->slots[*level] = 0; in walk_down_log_tree()
2679 path->slots[*level] = btrfs_header_nritems(path->nodes[*level]); in walk_down_log_tree()
2687 struct btrfs_path *path, int *level, in walk_up_log_tree() argument
2694 for (i = *level; i < BTRFS_MAX_LEVEL - 1 && path->nodes[i]; i++) { in walk_up_log_tree()
2695 slot = path->slots[i]; in walk_up_log_tree()
2696 if (slot + 1 < btrfs_header_nritems(path->nodes[i])) { in walk_up_log_tree()
2697 path->slots[i]++; in walk_up_log_tree()
2698 *level = i; in walk_up_log_tree()
2699 WARN_ON(*level == 0); in walk_up_log_tree()
2702 ret = wc->process_func(root, path->nodes[*level], wc, in walk_up_log_tree()
2703 btrfs_header_generation(path->nodes[*level]), in walk_up_log_tree()
2704 *level); in walk_up_log_tree()
2708 if (wc->free) { in walk_up_log_tree()
2709 ret = clean_log_buffer(trans, path->nodes[*level]); in walk_up_log_tree()
2713 free_extent_buffer(path->nodes[*level]); in walk_up_log_tree()
2714 path->nodes[*level] = NULL; in walk_up_log_tree()
2715 *level = i + 1; in walk_up_log_tree()
2731 int level; in walk_log_tree() local
2737 return -ENOMEM; in walk_log_tree()
2739 level = btrfs_header_level(log->node); in walk_log_tree()
2740 orig_level = level; in walk_log_tree()
2741 path->nodes[level] = log->node; in walk_log_tree()
2742 atomic_inc(&log->node->refs); in walk_log_tree()
2743 path->slots[level] = 0; in walk_log_tree()
2746 wret = walk_down_log_tree(trans, log, path, &level, wc); in walk_log_tree()
2754 wret = walk_up_log_tree(trans, log, path, &level, wc); in walk_log_tree()
2764 if (path->nodes[orig_level]) { in walk_log_tree()
2765 ret = wc->process_func(log, path->nodes[orig_level], wc, in walk_log_tree()
2766 btrfs_header_generation(path->nodes[orig_level]), in walk_log_tree()
2770 if (wc->free) in walk_log_tree()
2771 ret = clean_log_buffer(trans, path->nodes[orig_level]); in walk_log_tree()
2787 struct btrfs_fs_info *fs_info = log->fs_info; in update_log_root()
2790 if (log->log_transid == 1) { in update_log_root()
2792 ret = btrfs_insert_root(trans, fs_info->log_root_tree, in update_log_root()
2793 &log->root_key, root_item); in update_log_root()
2795 ret = btrfs_update_root(trans, fs_info->log_root_tree, in update_log_root()
2796 &log->root_key, root_item); in update_log_root()
2812 prepare_to_wait(&root->log_commit_wait[index], in wait_log_commit()
2815 if (!(root->log_transid_committed < transid && in wait_log_commit()
2816 atomic_read(&root->log_commit[index]))) in wait_log_commit()
2819 mutex_unlock(&root->log_mutex); in wait_log_commit()
2821 mutex_lock(&root->log_mutex); in wait_log_commit()
2823 finish_wait(&root->log_commit_wait[index], &wait); in wait_log_commit()
2831 prepare_to_wait(&root->log_writer_wait, &wait, in wait_for_writer()
2833 if (!atomic_read(&root->log_writers)) in wait_for_writer()
2836 mutex_unlock(&root->log_mutex); in wait_for_writer()
2838 mutex_lock(&root->log_mutex); in wait_for_writer()
2840 finish_wait(&root->log_writer_wait, &wait); in wait_for_writer()
2845 ctx->log_ret = 0; in btrfs_init_log_ctx()
2846 ctx->log_transid = 0; in btrfs_init_log_ctx()
2847 ctx->log_new_dentries = false; in btrfs_init_log_ctx()
2848 ctx->logging_new_name = false; in btrfs_init_log_ctx()
2849 ctx->logging_new_delayed_dentries = false; in btrfs_init_log_ctx()
2850 ctx->logged_before = false; in btrfs_init_log_ctx()
2851 ctx->inode = inode; in btrfs_init_log_ctx()
2852 INIT_LIST_HEAD(&ctx->list); in btrfs_init_log_ctx()
2853 INIT_LIST_HEAD(&ctx->ordered_extents); in btrfs_init_log_ctx()
2854 INIT_LIST_HEAD(&ctx->conflict_inodes); in btrfs_init_log_ctx()
2855 ctx->num_conflict_inodes = 0; in btrfs_init_log_ctx()
2856 ctx->logging_conflict_inodes = false; in btrfs_init_log_ctx()
2857 ctx->scratch_eb = NULL; in btrfs_init_log_ctx()
2862 struct btrfs_inode *inode = ctx->inode; in btrfs_init_log_ctx_scratch_eb()
2864 if (!test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags) && in btrfs_init_log_ctx_scratch_eb()
2865 !test_bit(BTRFS_INODE_COPY_EVERYTHING, &inode->runtime_flags)) in btrfs_init_log_ctx_scratch_eb()
2872 ctx->scratch_eb = alloc_dummy_extent_buffer(inode->root->fs_info, 0); in btrfs_init_log_ctx_scratch_eb()
2880 btrfs_assert_inode_locked(ctx->inode); in btrfs_release_log_ctx_extents()
2882 list_for_each_entry_safe(ordered, tmp, &ctx->ordered_extents, log_list) { in btrfs_release_log_ctx_extents()
2883 list_del_init(&ordered->log_list); in btrfs_release_log_ctx_extents()
2892 mutex_lock(&root->log_mutex); in btrfs_remove_log_ctx()
2893 list_del_init(&ctx->list); in btrfs_remove_log_ctx()
2894 mutex_unlock(&root->log_mutex); in btrfs_remove_log_ctx()
2907 list_for_each_entry_safe(ctx, safe, &root->log_ctxs[index], list) { in btrfs_remove_all_log_ctxs()
2908 list_del_init(&ctx->list); in btrfs_remove_all_log_ctxs()
2909 ctx->log_ret = error; in btrfs_remove_all_log_ctxs()
2921 * fsync is to commit the whole FS. When btrfs_sync_log returns -EAGAIN,
2931 struct btrfs_fs_info *fs_info = root->fs_info; in btrfs_sync_log()
2932 struct btrfs_root *log = root->log_root; in btrfs_sync_log()
2933 struct btrfs_root *log_root_tree = fs_info->log_root_tree; in btrfs_sync_log()
2941 mutex_lock(&root->log_mutex); in btrfs_sync_log()
2942 log_transid = ctx->log_transid; in btrfs_sync_log()
2943 if (root->log_transid_committed >= log_transid) { in btrfs_sync_log()
2944 mutex_unlock(&root->log_mutex); in btrfs_sync_log()
2945 return ctx->log_ret; in btrfs_sync_log()
2949 if (atomic_read(&root->log_commit[index1])) { in btrfs_sync_log()
2951 mutex_unlock(&root->log_mutex); in btrfs_sync_log()
2952 return ctx->log_ret; in btrfs_sync_log()
2954 ASSERT(log_transid == root->log_transid); in btrfs_sync_log()
2955 atomic_set(&root->log_commit[index1], 1); in btrfs_sync_log()
2958 if (atomic_read(&root->log_commit[(index1 + 1) % 2])) in btrfs_sync_log()
2959 wait_log_commit(root, log_transid - 1); in btrfs_sync_log()
2962 int batch = atomic_read(&root->log_batch); in btrfs_sync_log()
2965 test_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state)) { in btrfs_sync_log()
2966 mutex_unlock(&root->log_mutex); in btrfs_sync_log()
2968 mutex_lock(&root->log_mutex); in btrfs_sync_log()
2971 if (batch == atomic_read(&root->log_batch)) in btrfs_sync_log()
2978 mutex_unlock(&root->log_mutex); in btrfs_sync_log()
2991 ret = btrfs_write_marked_extents(fs_info, &log->dirty_log_pages, mark); in btrfs_sync_log()
2993 * -EAGAIN happens when someone, e.g., a concurrent transaction in btrfs_sync_log()
2994 * commit, writes a dirty extent in this tree-log commit. This in btrfs_sync_log()
3001 if (ret == -EAGAIN && btrfs_is_zoned(fs_info)) in btrfs_sync_log()
3006 mutex_unlock(&root->log_mutex); in btrfs_sync_log()
3011 * We _must_ update under the root->log_mutex in order to make sure we in btrfs_sync_log()
3016 * log_root_tree->log_mutex yet. This is important because when we in btrfs_sync_log()
3023 btrfs_set_root_node(&log->root_item, log->node); in btrfs_sync_log()
3024 memcpy(&new_root_item, &log->root_item, sizeof(new_root_item)); in btrfs_sync_log()
3026 btrfs_set_root_log_transid(root, root->log_transid + 1); in btrfs_sync_log()
3027 log->log_transid = root->log_transid; in btrfs_sync_log()
3028 root->log_start_pid = 0; in btrfs_sync_log()
3034 mutex_unlock(&root->log_mutex); in btrfs_sync_log()
3037 mutex_lock(&fs_info->tree_root->log_mutex); in btrfs_sync_log()
3038 if (!log_root_tree->node) { in btrfs_sync_log()
3041 mutex_unlock(&fs_info->tree_root->log_mutex); in btrfs_sync_log()
3046 mutex_unlock(&fs_info->tree_root->log_mutex); in btrfs_sync_log()
3051 mutex_lock(&log_root_tree->log_mutex); in btrfs_sync_log()
3053 index2 = log_root_tree->log_transid % 2; in btrfs_sync_log()
3054 list_add_tail(&root_log_ctx.list, &log_root_tree->log_ctxs[index2]); in btrfs_sync_log()
3055 root_log_ctx.log_transid = log_root_tree->log_transid; in btrfs_sync_log()
3067 if (ret != -ENOSPC) in btrfs_sync_log()
3072 mutex_unlock(&log_root_tree->log_mutex); in btrfs_sync_log()
3076 if (log_root_tree->log_transid_committed >= root_log_ctx.log_transid) { in btrfs_sync_log()
3079 mutex_unlock(&log_root_tree->log_mutex); in btrfs_sync_log()
3084 if (atomic_read(&log_root_tree->log_commit[index2])) { in btrfs_sync_log()
3089 mutex_unlock(&log_root_tree->log_mutex); in btrfs_sync_log()
3094 ASSERT(root_log_ctx.log_transid == log_root_tree->log_transid); in btrfs_sync_log()
3095 atomic_set(&log_root_tree->log_commit[index2], 1); in btrfs_sync_log()
3097 if (atomic_read(&log_root_tree->log_commit[(index2 + 1) % 2])) { in btrfs_sync_log()
3099 root_log_ctx.log_transid - 1); in btrfs_sync_log()
3109 mutex_unlock(&log_root_tree->log_mutex); in btrfs_sync_log()
3115 &log_root_tree->dirty_log_pages, in btrfs_sync_log()
3119 * As described above, -EAGAIN indicates a hole in the extents. We in btrfs_sync_log()
3123 if (ret == -EAGAIN && btrfs_is_zoned(fs_info)) { in btrfs_sync_log()
3126 mutex_unlock(&log_root_tree->log_mutex); in btrfs_sync_log()
3130 mutex_unlock(&log_root_tree->log_mutex); in btrfs_sync_log()
3139 mutex_unlock(&log_root_tree->log_mutex); in btrfs_sync_log()
3143 log_root_start = log_root_tree->node->start; in btrfs_sync_log()
3144 log_root_level = btrfs_header_level(log_root_tree->node); in btrfs_sync_log()
3145 log_root_tree->log_transid++; in btrfs_sync_log()
3146 mutex_unlock(&log_root_tree->log_mutex); in btrfs_sync_log()
3163 mutex_lock(&fs_info->tree_log_mutex); in btrfs_sync_log()
3173 ret = -EIO; in btrfs_sync_log()
3176 mutex_unlock(&fs_info->tree_log_mutex); in btrfs_sync_log()
3180 btrfs_set_super_log_root(fs_info->super_for_commit, log_root_start); in btrfs_sync_log()
3181 btrfs_set_super_log_root_level(fs_info->super_for_commit, log_root_level); in btrfs_sync_log()
3183 mutex_unlock(&fs_info->tree_log_mutex); in btrfs_sync_log()
3192 * root->log_commit[index1] to 0 and any task attempting to sync the in btrfs_sync_log()
3202 mutex_lock(&log_root_tree->log_mutex); in btrfs_sync_log()
3205 log_root_tree->log_transid_committed++; in btrfs_sync_log()
3206 atomic_set(&log_root_tree->log_commit[index2], 0); in btrfs_sync_log()
3207 mutex_unlock(&log_root_tree->log_mutex); in btrfs_sync_log()
3214 cond_wake_up(&log_root_tree->log_commit_wait[index2]); in btrfs_sync_log()
3216 mutex_lock(&root->log_mutex); in btrfs_sync_log()
3218 root->log_transid_committed++; in btrfs_sync_log()
3219 atomic_set(&root->log_commit[index1], 0); in btrfs_sync_log()
3220 mutex_unlock(&root->log_mutex); in btrfs_sync_log()
3227 cond_wake_up(&root->log_commit_wait[index1]); in btrfs_sync_log()
3240 if (log->node) { in free_log_tree()
3245 * typical scenario is getting an -EIO when reading an in free_log_tree()
3250 &log->fs_info->fs_state); in free_log_tree()
3260 btrfs_write_marked_extents(log->fs_info, in free_log_tree()
3261 &log->dirty_log_pages, in free_log_tree()
3269 btrfs_handle_fs_error(log->fs_info, ret, NULL); in free_log_tree()
3273 extent_io_tree_release(&log->dirty_log_pages); in free_log_tree()
3274 extent_io_tree_release(&log->log_csum_range); in free_log_tree()
3285 if (root->log_root) { in btrfs_free_log()
3286 free_log_tree(trans, root->log_root); in btrfs_free_log()
3287 root->log_root = NULL; in btrfs_free_log()
3288 clear_bit(BTRFS_ROOT_HAS_LOG_TREE, &root->state); in btrfs_free_log()
3296 if (fs_info->log_root_tree) { in btrfs_free_log_root_tree()
3297 free_log_tree(trans, fs_info->log_root_tree); in btrfs_free_log_root_tree()
3298 fs_info->log_root_tree = NULL; in btrfs_free_log_root_tree()
3299 clear_bit(BTRFS_ROOT_HAS_LOG_TREE, &fs_info->tree_root->state); in btrfs_free_log_root_tree()
3321 if (inode->logged_trans == trans->transid) in inode_logged()
3328 if (inode->logged_trans > 0) in inode_logged()
3338 if (!test_bit(BTRFS_ROOT_HAS_LOG_TREE, &inode->root->state)) { in inode_logged()
3339 inode->logged_trans = trans->transid - 1; in inode_logged()
3374 return -ENOMEM; in inode_logged()
3377 ret = btrfs_search_slot(NULL, inode->root->log_root, &key, path, 0, 0); in inode_logged()
3395 inode->logged_trans = trans->transid - 1; in inode_logged()
3404 inode->logged_trans = trans->transid; in inode_logged()
3412 * because last_dir_index_offset is an in-memory only field, not persisted in inode_logged()
3413 * in the inode item or any other on-disk structure, so its value is lost in inode_logged()
3416 if (S_ISDIR(inode->vfs_inode.i_mode)) in inode_logged()
3417 inode->last_dir_index_offset = (u64)-1; in inode_logged()
3443 index, name, -1); in del_logged_dentry()
3498 mutex_lock(&dir->log_mutex); in btrfs_del_dir_entries_in_log()
3502 ret = -ENOMEM; in btrfs_del_dir_entries_in_log()
3506 ret = del_logged_dentry(trans, root->log_root, path, btrfs_ino(dir), in btrfs_del_dir_entries_in_log()
3510 mutex_unlock(&dir->log_mutex); in btrfs_del_dir_entries_in_log()
3537 log = root->log_root; in btrfs_del_inode_ref_in_log()
3538 mutex_lock(&inode->log_mutex); in btrfs_del_inode_ref_in_log()
3542 mutex_unlock(&inode->log_mutex); in btrfs_del_inode_ref_in_log()
3543 if (ret < 0 && ret != -ENOENT) in btrfs_del_inode_ref_in_log()
3568 * -EEXIST is fine and can happen sporadically when we are logging a in insert_dir_log_key()
3574 if (ret && ret != -EEXIST) in insert_dir_log_key()
3577 item = btrfs_item_ptr(path->nodes[0], path->slots[0], in insert_dir_log_key()
3579 if (ret == -EEXIST) { in insert_dir_log_key()
3580 const u64 curr_end = btrfs_dir_log_end(path->nodes[0], item); in insert_dir_log_key()
3590 btrfs_set_dir_log_end(path->nodes[0], item, last_offset); in insert_dir_log_key()
3591 btrfs_mark_buffer_dirty(trans, path->nodes[0]); in insert_dir_log_key()
3603 struct btrfs_root *log = inode->root->log_root; in flush_dir_items_batch()
3631 return -ENOMEM; in flush_dir_items_batch()
3652 dst = dst_path->nodes[0]; in flush_dir_items_batch()
3663 dst_offset = btrfs_item_ptr_offset(dst, dst_path->slots[0] + count - 1); in flush_dir_items_batch()
3664 src_offset = btrfs_item_ptr_offset(src, start_slot + count - 1); in flush_dir_items_batch()
3668 last_index = batch.keys[count - 1].offset; in flush_dir_items_batch()
3669 ASSERT(last_index > inode->last_dir_index_offset); in flush_dir_items_batch()
3675 if (WARN_ON(last_index <= inode->last_dir_index_offset)) in flush_dir_items_batch()
3678 inode->last_dir_index_offset = last_index; in flush_dir_items_batch()
3690 const int slot = path->slots[0]; in clone_leaf()
3692 if (ctx->scratch_eb) { in clone_leaf()
3693 copy_extent_buffer_full(ctx->scratch_eb, path->nodes[0]); in clone_leaf()
3695 ctx->scratch_eb = btrfs_clone_extent_buffer(path->nodes[0]); in clone_leaf()
3696 if (!ctx->scratch_eb) in clone_leaf()
3697 return -ENOMEM; in clone_leaf()
3701 path->nodes[0] = ctx->scratch_eb; in clone_leaf()
3702 path->slots[0] = slot; in clone_leaf()
3707 atomic_inc(&ctx->scratch_eb->refs); in clone_leaf()
3719 struct btrfs_root *log = inode->root->log_root; in process_dir_items_leaf()
3721 const int nritems = btrfs_header_nritems(path->nodes[0]); in process_dir_items_leaf()
3737 src = path->nodes[0]; in process_dir_items_leaf()
3739 for (int i = path->slots[0]; i < nritems; i++) { in process_dir_items_leaf()
3759 if (btrfs_dir_transid(src, di) < trans->transid) { in process_dir_items_leaf()
3763 key.offset - 1); in process_dir_items_leaf()
3773 if (key.offset <= inode->last_dir_index_offset) in process_dir_items_leaf()
3785 * xfs_io -c "fsync" mydir in process_dir_items_leaf()
3796 * resulting in -ENOTEMPTY errors. in process_dir_items_leaf()
3798 if (!ctx->log_new_dentries) { in process_dir_items_leaf()
3803 ctx->log_new_dentries = true; in process_dir_items_leaf()
3836 struct btrfs_root *root = inode->root; in log_dir_items()
3837 struct btrfs_root *log = root->log_root; in log_dir_items()
3839 u64 last_old_dentry_offset = min_offset - 1; in log_dir_items()
3840 u64 last_offset = (u64)-1; in log_dir_items()
3847 ret = btrfs_search_forward(root, &min_key, path, trans->transid); in log_dir_items()
3857 min_key.offset = (u64)-1; in log_dir_items()
3874 btrfs_item_key_to_cpu(path->nodes[0], &tmp, in log_dir_items()
3875 path->slots[0]); in log_dir_items()
3890 btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]); in log_dir_items()
3917 * and we can end up logging a dir index range that ends at (u64)-1 in log_dir_items()
3946 path->slots[0] = btrfs_header_nritems(path->nodes[0]); in log_dir_items()
3955 last_offset = (u64)-1; in log_dir_items()
3960 btrfs_item_key_to_cpu(path->nodes[0], &min_key, path->slots[0]); in log_dir_items()
3962 last_offset = (u64)-1; in log_dir_items()
3965 if (btrfs_header_generation(path->nodes[0]) != trans->transid) { in log_dir_items()
3975 last_offset = min_key.offset - 1; in log_dir_items()
4010 * last_dir_index_offset is (u64)-1, so we don't the value of the last index
4024 lockdep_assert_held(&inode->log_mutex); in update_last_dir_index_offset()
4026 if (inode->last_dir_index_offset != (u64)-1) in update_last_dir_index_offset()
4029 if (!ctx->logged_before) { in update_last_dir_index_offset()
4030 inode->last_dir_index_offset = BTRFS_DIR_START_INDEX - 1; in update_last_dir_index_offset()
4036 key.offset = (u64)-1; in update_last_dir_index_offset()
4038 ret = btrfs_search_slot(NULL, inode->root->log_root, &key, path, 0, 0); in update_last_dir_index_offset()
4041 * value of (u64)-1. Bail out, we're done. in update_last_dir_index_offset()
4047 inode->last_dir_index_offset = BTRFS_DIR_START_INDEX - 1; in update_last_dir_index_offset()
4053 if (path->slots[0] == 0) in update_last_dir_index_offset()
4063 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0] - 1); in update_last_dir_index_offset()
4065 inode->last_dir_index_offset = key.offset; in update_last_dir_index_offset()
4107 if (max_key == (u64)-1) in log_directory_changes()
4134 key.offset = (u64)-1; in drop_inode_items()
4137 ret = btrfs_search_slot(trans, log, &key, path, -1, 1); in drop_inode_items()
4141 if (path->slots[0] == 0) in drop_inode_items()
4143 path->slots[0]--; in drop_inode_items()
4146 btrfs_item_key_to_cpu(path->nodes[0], &found_key, in drop_inode_items()
4147 path->slots[0]); in drop_inode_items()
4154 ret = btrfs_bin_search(path->nodes[0], 0, &found_key, &start_slot); in drop_inode_items()
4159 path->slots[0] - start_slot + 1); in drop_inode_items()
4161 * If start slot isn't 0 then we don't need to re-search, we've in drop_inode_items()
4210 BTRFS_I(inode)->generation); in fill_inode_item()
4211 btrfs_set_token_inode_size(&token, item, inode->i_size); in fill_inode_item()
4216 btrfs_set_token_inode_mode(&token, item, inode->i_mode); in fill_inode_item()
4217 btrfs_set_token_inode_nlink(&token, item, inode->i_nlink); in fill_inode_item()
4219 btrfs_set_token_timespec_sec(&token, &item->atime, in fill_inode_item()
4221 btrfs_set_token_timespec_nsec(&token, &item->atime, in fill_inode_item()
4224 btrfs_set_token_timespec_sec(&token, &item->mtime, in fill_inode_item()
4226 btrfs_set_token_timespec_nsec(&token, &item->mtime, in fill_inode_item()
4229 btrfs_set_token_timespec_sec(&token, &item->ctime, in fill_inode_item()
4231 btrfs_set_token_timespec_nsec(&token, &item->ctime, in fill_inode_item()
4244 btrfs_set_token_inode_transid(&token, item, trans->transid); in fill_inode_item()
4245 btrfs_set_token_inode_rdev(&token, item, inode->i_rdev); in fill_inode_item()
4246 flags = btrfs_inode_combine_flags(BTRFS_I(inode)->flags, in fill_inode_item()
4247 BTRFS_I(inode)->ro_flags); in fill_inode_item()
4271 if (!inode_item_dropped && inode->logged_trans == trans->transid) { in log_inode_item()
4275 ret = -ENOENT; in log_inode_item()
4280 * We can never get -EEXIST because we are only called for a fast in log_inode_item()
4284 * flags and set ->logged_trans to 0. in log_inode_item()
4288 ASSERT(ret != -EEXIST); in log_inode_item()
4292 inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0], in log_inode_item()
4294 fill_inode_item(trans, path->nodes[0], inode_item, &inode->vfs_inode, in log_inode_item()
4305 const u64 lock_end = sums->logical + sums->len - 1; in log_csums()
4314 if (inode->last_reflink_trans < trans->transid) in log_csums()
4323 ret = lock_extent(&log_root->log_csum_range, sums->logical, lock_end, in log_csums()
4336 ret = btrfs_del_csums(trans, log_root, sums->logical, sums->len); in log_csums()
4340 unlock_extent(&log_root->log_csum_range, sums->logical, lock_end, in log_csums()
4353 struct btrfs_root *log = inode->root->log_root; in copy_items()
4362 const bool skip_csum = (inode->flags & BTRFS_INODE_NODATASUM); in copy_items()
4363 const u64 i_size = i_size_read(&inode->vfs_inode); in copy_items()
4384 * buffers of a subvolume tree - all this while holding a write lock in copy_items()
4397 src = src_path->nodes[0]; in copy_items()
4402 return -ENOMEM; in copy_items()
4433 trans->transid); in copy_items()
4441 * generations, so we can skip them - as long as the inode has in copy_items()
4450 inode->last_reflink_trans < trans->transid) in copy_items()
4483 csum_root = btrfs_csum_root(trans->fs_info, disk_bytenr); in copy_items()
4486 disk_bytenr + extent_num_bytes - 1, in copy_items()
4495 list_del(&sums->list); in copy_items()
4522 const int dst_slot = dst_path->slots[0] + dst_index; in copy_items()
4543 if (btrfs_file_extent_generation(src, extent) < trans->transid && in copy_items()
4545 inode->last_reflink_trans < trans->transid) in copy_items()
4549 dst_offset = btrfs_item_ptr_offset(dst_path->nodes[0], dst_slot); in copy_items()
4555 inode_item = btrfs_item_ptr(dst_path->nodes[0], dst_slot, in copy_items()
4557 fill_inode_item(trans, dst_path->nodes[0], inode_item, in copy_items()
4558 &inode->vfs_inode, in copy_items()
4562 copy_extent_buffer(dst_path->nodes[0], src, dst_offset, in copy_items()
4569 btrfs_mark_buffer_dirty(trans, dst_path->nodes[0]); in copy_items()
4585 if (em1->start < em2->start) in extent_cmp()
4586 return -1; in extent_cmp()
4587 else if (em1->start > em2->start) in extent_cmp()
4603 u64 mod_start = em->start; in log_extent_csums()
4604 u64 mod_len = em->len; in log_extent_csums()
4608 if (inode->flags & BTRFS_INODE_NODATASUM || in log_extent_csums()
4609 (em->flags & EXTENT_FLAG_PREALLOC) || in log_extent_csums()
4610 em->disk_bytenr == EXTENT_MAP_HOLE) in log_extent_csums()
4613 list_for_each_entry(ordered, &ctx->ordered_extents, log_list) { in log_extent_csums()
4614 const u64 ordered_end = ordered->file_offset + ordered->num_bytes; in log_extent_csums()
4623 if (mod_end <= ordered->file_offset) in log_extent_csums()
4631 if (ordered->file_offset > mod_start) { in log_extent_csums()
4633 mod_len = ordered->file_offset - mod_start; in log_extent_csums()
4637 * |--------- logged extent ---------| in log_extent_csums()
4638 * |----- ordered extent ----| in log_extent_csums()
4646 mod_len = mod_end - ordered_end; in log_extent_csums()
4657 if (test_and_set_bit(BTRFS_ORDERED_LOGGED_CSUM, &ordered->flags)) in log_extent_csums()
4660 list_for_each_entry(sums, &ordered->list, list) { in log_extent_csums()
4674 csum_len = em->disk_num_bytes; in log_extent_csums()
4676 csum_offset = mod_start - em->start; in log_extent_csums()
4682 csum_root = btrfs_csum_root(trans->fs_info, block_start); in log_extent_csums()
4684 block_start + csum_offset + csum_len - 1, in log_extent_csums()
4696 list_del(&sums->list); in log_extent_csums()
4710 struct btrfs_root *log = inode->root->log_root; in log_one_extent()
4715 u64 extent_offset = em->offset; in log_one_extent()
4720 btrfs_set_stack_file_extent_generation(&fi, trans->transid); in log_one_extent()
4721 if (em->flags & EXTENT_FLAG_PREALLOC) in log_one_extent()
4726 block_len = em->disk_num_bytes; in log_one_extent()
4731 } else if (em->disk_bytenr < EXTENT_MAP_LAST_BYTE) { in log_one_extent()
4732 btrfs_set_stack_file_extent_disk_bytenr(&fi, block_start - extent_offset); in log_one_extent()
4737 btrfs_set_stack_file_extent_num_bytes(&fi, em->len); in log_one_extent()
4738 btrfs_set_stack_file_extent_ram_bytes(&fi, em->ram_bytes); in log_one_extent()
4751 * are small, with a root at level 2 or 3 at most, due to their short in log_one_extent()
4754 if (ctx->logged_before) { in log_one_extent()
4756 drop_args.start = em->start; in log_one_extent()
4757 drop_args.end = em->start + em->len; in log_one_extent()
4768 key.offset = em->start; in log_one_extent()
4775 leaf = path->nodes[0]; in log_one_extent()
4777 btrfs_item_ptr_offset(leaf, path->slots[0]), in log_one_extent()
4799 struct btrfs_root *root = inode->root; in btrfs_log_prealloc_extents()
4801 const u64 i_size = i_size_read(&inode->vfs_inode); in btrfs_log_prealloc_extents()
4812 if (!(inode->flags & BTRFS_INODE_PREALLOC)) in btrfs_log_prealloc_extents()
4837 leaf = path->nodes[0]; in btrfs_log_prealloc_extents()
4838 slot = path->slots[0]; in btrfs_log_prealloc_extents()
4857 leaf = path->nodes[0]; in btrfs_log_prealloc_extents()
4858 slot = path->slots[0]; in btrfs_log_prealloc_extents()
4884 path->slots[0]++; in btrfs_log_prealloc_extents()
4896 ret = truncate_inode_items(trans, root->log_root, inode, in btrfs_log_prealloc_extents()
4907 path->slots[0]++; in btrfs_log_prealloc_extents()
4911 ret = -ENOMEM; in btrfs_log_prealloc_extents()
4934 struct extent_map_tree *tree = &inode->extent_tree; in btrfs_log_changed_extents()
4938 write_lock(&tree->lock); in btrfs_log_changed_extents()
4940 list_for_each_entry_safe(em, n, &tree->modified_extents, list) { in btrfs_log_changed_extents()
4941 list_del_init(&em->list); in btrfs_log_changed_extents()
4949 list_del_init(&tree->modified_extents); in btrfs_log_changed_extents()
4950 ret = -EFBIG; in btrfs_log_changed_extents()
4954 if (em->generation < trans->transid) in btrfs_log_changed_extents()
4958 if ((em->flags & EXTENT_FLAG_PREALLOC) && in btrfs_log_changed_extents()
4959 em->start >= i_size_read(&inode->vfs_inode)) in btrfs_log_changed_extents()
4963 refcount_inc(&em->refs); in btrfs_log_changed_extents()
4964 em->flags |= EXTENT_FLAG_LOGGING; in btrfs_log_changed_extents()
4965 list_add_tail(&em->list, &extents); in btrfs_log_changed_extents()
4974 list_del_init(&em->list); in btrfs_log_changed_extents()
4986 write_unlock(&tree->lock); in btrfs_log_changed_extents()
4989 write_lock(&tree->lock); in btrfs_log_changed_extents()
4994 write_unlock(&tree->lock); in btrfs_log_changed_extents()
5008 list_for_each_entry_safe(ordered, tmp, &ctx->ordered_extents, log_list) { in btrfs_log_changed_extents()
5009 list_del_init(&ordered->log_list); in btrfs_log_changed_extents()
5010 set_bit(BTRFS_ORDERED_LOGGED, &ordered->flags); in btrfs_log_changed_extents()
5012 if (!test_bit(BTRFS_ORDERED_COMPLETE, &ordered->flags)) { in btrfs_log_changed_extents()
5013 spin_lock_irq(&inode->ordered_tree_lock); in btrfs_log_changed_extents()
5014 if (!test_bit(BTRFS_ORDERED_COMPLETE, &ordered->flags)) { in btrfs_log_changed_extents()
5015 set_bit(BTRFS_ORDERED_PENDING, &ordered->flags); in btrfs_log_changed_extents()
5016 atomic_inc(&trans->transaction->pending_ordered); in btrfs_log_changed_extents()
5018 spin_unlock_irq(&inode->ordered_tree_lock); in btrfs_log_changed_extents()
5044 item = btrfs_item_ptr(path->nodes[0], path->slots[0], in logged_inode_size()
5046 *size_ret = btrfs_inode_size(path->nodes[0], item); in logged_inode_size()
5048 * If the in-memory inode's i_size is smaller then the inode in logged_inode_size()
5058 if (*size_ret > inode->vfs_inode.i_size) in logged_inode_size()
5059 *size_ret = inode->vfs_inode.i_size; in logged_inode_size()
5081 struct btrfs_root *root = inode->root; in btrfs_log_all_xattrs()
5089 if (test_bit(BTRFS_INODE_NO_XATTRS, &inode->runtime_flags)) in btrfs_log_all_xattrs()
5101 int slot = path->slots[0]; in btrfs_log_all_xattrs()
5102 struct extent_buffer *leaf = path->nodes[0]; in btrfs_log_all_xattrs()
5128 path->slots[0]++; in btrfs_log_all_xattrs()
5140 set_bit(BTRFS_INODE_NO_XATTRS, &inode->runtime_flags); in btrfs_log_all_xattrs()
5158 struct btrfs_root *root = inode->root; in btrfs_log_holes()
5159 struct btrfs_fs_info *fs_info = root->fs_info; in btrfs_log_holes()
5162 const u64 i_size = i_size_read(&inode->vfs_inode); in btrfs_log_holes()
5178 struct extent_buffer *leaf = path->nodes[0]; in btrfs_log_holes()
5180 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) { in btrfs_log_holes()
5188 leaf = path->nodes[0]; in btrfs_log_holes()
5191 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); in btrfs_log_holes()
5197 const u64 hole_len = key.offset - prev_extent_end; in btrfs_log_holes()
5205 ret = btrfs_insert_hole_extent(trans, root->log_root, in btrfs_log_holes()
5222 return -ENOENT; in btrfs_log_holes()
5223 leaf = path->nodes[0]; in btrfs_log_holes()
5227 path->slots[0]++; in btrfs_log_holes()
5235 hole_len = ALIGN(i_size - prev_extent_end, fs_info->sectorsize); in btrfs_log_holes()
5236 ret = btrfs_insert_hole_extent(trans, root->log_root, ino, in btrfs_log_holes()
5256 * xfs_io -c fsync /mnt/x
5268 * xfs_io -c fsync /mnt/foo
5303 return -ENOMEM; in btrfs_check_ref_name_override()
5304 search_path->search_commit_root = 1; in btrfs_check_ref_name_override()
5305 search_path->skip_locking = 1; in btrfs_check_ref_name_override()
5315 if (key->type == BTRFS_INODE_REF_KEY) { in btrfs_check_ref_name_override()
5316 struct btrfs_inode_ref *iref; in btrfs_check_ref_name_override() local
5318 iref = (struct btrfs_inode_ref *)(ptr + cur_offset); in btrfs_check_ref_name_override()
5319 parent = key->offset; in btrfs_check_ref_name_override()
5320 this_name_len = btrfs_inode_ref_name_len(eb, iref); in btrfs_check_ref_name_override()
5321 name_ptr = (unsigned long)(iref + 1); in btrfs_check_ref_name_override()
5322 this_len = sizeof(*iref) + this_name_len; in btrfs_check_ref_name_override()
5330 name_ptr = (unsigned long)&extref->name; in btrfs_check_ref_name_override()
5339 ret = -ENOMEM; in btrfs_check_ref_name_override()
5350 di = btrfs_lookup_dir_item(NULL, inode->root, search_path, in btrfs_check_ref_name_override()
5355 btrfs_dir_item_key_to_cpu(search_path->nodes[0], in btrfs_check_ref_name_override()
5358 if (di_key.objectid != key->objectid) { in btrfs_check_ref_name_override()
5366 ret = -EAGAIN; in btrfs_check_ref_name_override()
5389 * while here we do not care if the log transaction was already committed - our
5390 * caller will commit the log later - and we want to avoid logging an inode
5400 if (S_ISDIR(inode->vfs_inode.i_mode) && inode->last_trans < trans->transid) in need_log_inode()
5413 !test_bit(BTRFS_INODE_COPY_EVERYTHING, &inode->runtime_flags)) in need_log_inode()
5427 * This is a recursive operation - if an existing dentry corresponds to a
5434 * ---- ----
5435 * lock(&type->i_mutex_dir_key#3/2);
5437 * lock(&type->i_mutex_dir_key#3/2);
5438 * lock(&sb->s_type->i_mutex_key#14);
5456 * names - this is ok, not a problem, because at log replay time we set the
5464 struct btrfs_root *root = start_inode->root; in log_new_dir_dentries()
5477 if (ctx->logging_new_name) in log_new_dir_dentries()
5482 return -ENOMEM; in log_new_dir_dentries()
5485 ihold(&curr_inode->vfs_inode); in log_new_dir_dentries()
5500 btrfs_for_each_slot(root->log_root, &key, &found_key, path, iter_ret) { in log_new_dir_dentries()
5501 struct extent_buffer *leaf = path->nodes[0]; in log_new_dir_dentries()
5516 di = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dir_item); in log_new_dir_dentries()
5518 if (btrfs_dir_transid(leaf, di) < trans->transid) in log_new_dir_dentries()
5536 ctx->log_new_dentries = false; in log_new_dir_dentries()
5544 if (ctx->log_new_dentries) { in log_new_dir_dentries()
5547 ret = -ENOMEM; in log_new_dir_dentries()
5550 dir_elem->ino = di_key.objectid; in log_new_dir_dentries()
5551 list_add_tail(&dir_elem->list, &dir_list); in log_new_dir_dentries()
5567 if (continue_curr_inode && key.offset < (u64)-1) { in log_new_dir_dentries()
5578 ino = dir_elem->ino; in log_new_dir_dentries()
5579 list_del(&dir_elem->list); in log_new_dir_dentries()
5618 list_for_each_entry_safe(curr, next, &ctx->conflict_inodes, list) { in free_conflicting_inodes()
5619 list_del(&curr->list); in free_conflicting_inodes()
5634 path->search_commit_root = 1; in conflicting_inode_is_dir()
5635 path->skip_locking = 1; in conflicting_inode_is_dir()
5644 ret = -ENOENT; in conflicting_inode_is_dir()
5648 item = btrfs_item_ptr(path->nodes[0], path->slots[0], in conflicting_inode_is_dir()
5650 if (S_ISDIR(btrfs_inode_mode(path->nodes[0], item))) in conflicting_inode_is_dir()
5655 path->search_commit_root = 0; in conflicting_inode_is_dir()
5656 path->skip_locking = 0; in conflicting_inode_is_dir()
5677 if (ctx->num_conflict_inodes >= MAX_CONFLICT_INODES) in add_conflicting_inode()
5698 * case it has a last_unlink_trans == trans->transid, due to moving in add_conflicting_inode()
5704 if (ret != -ENOENT) in add_conflicting_inode()
5715 return -ENOMEM; in add_conflicting_inode()
5716 ino_elem->ino = ino; in add_conflicting_inode()
5717 ino_elem->parent = parent; in add_conflicting_inode()
5718 list_add_tail(&ino_elem->list, &ctx->conflict_inodes); in add_conflicting_inode()
5719 ctx->num_conflict_inodes++; in add_conflicting_inode()
5725 * If the inode was already logged skip it - otherwise we can hit an in add_conflicting_inode()
5745 * - we detect inode 258 as a conflicting inode, with inode 261 in add_conflicting_inode()
5748 * - we detect inode 259 as a conflicting inode, with inode 258 in add_conflicting_inode()
5751 * - we detect inode 258 as a conflicting inode, with inode 259 in add_conflicting_inode()
5752 * on reference "zz_link", and log it - again! After this we in add_conflicting_inode()
5768 return -ENOMEM; in add_conflicting_inode()
5769 ino_elem->ino = ino; in add_conflicting_inode()
5770 ino_elem->parent = parent; in add_conflicting_inode()
5771 list_add_tail(&ino_elem->list, &ctx->conflict_inodes); in add_conflicting_inode()
5772 ctx->num_conflict_inodes++; in add_conflicting_inode()
5786 * calls. This check guarantees we can have only 1 level of recursion. in log_conflicting_inodes()
5788 if (ctx->logging_conflict_inodes) in log_conflicting_inodes()
5791 ctx->logging_conflict_inodes = true; in log_conflicting_inodes()
5798 while (!list_empty(&ctx->conflict_inodes)) { in log_conflicting_inodes()
5804 curr = list_first_entry(&ctx->conflict_inodes, in log_conflicting_inodes()
5806 ino = curr->ino; in log_conflicting_inodes()
5807 parent = curr->parent; in log_conflicting_inodes()
5808 list_del(&curr->list); in log_conflicting_inodes()
5819 if (ret != -ENOENT) in log_conflicting_inodes()
5872 ctx->logging_conflict_inodes = false; in log_conflicting_inodes()
5890 const u64 i_size = i_size_read(&inode->vfs_inode); in copy_inode_items_to_log()
5891 struct btrfs_root *root = inode->root; in copy_inode_items_to_log()
5897 ret = btrfs_search_forward(root, min_key, path, trans->transid); in copy_inode_items_to_log()
5906 if (min_key->objectid != max_key->objectid) in copy_inode_items_to_log()
5908 if (min_key->type > max_key->type) in copy_inode_items_to_log()
5911 if (min_key->type == BTRFS_INODE_ITEM_KEY) { in copy_inode_items_to_log()
5913 } else if (min_key->type == BTRFS_EXTENT_DATA_KEY && in copy_inode_items_to_log()
5914 min_key->offset >= i_size) { in copy_inode_items_to_log()
5922 } else if ((min_key->type == BTRFS_INODE_REF_KEY || in copy_inode_items_to_log()
5923 min_key->type == BTRFS_INODE_EXTREF_KEY) && in copy_inode_items_to_log()
5924 (inode->generation == trans->transid || in copy_inode_items_to_log()
5925 ctx->logging_conflict_inodes)) { in copy_inode_items_to_log()
5929 ret = btrfs_check_ref_name_override(path->nodes[0], in copy_inode_items_to_log()
5930 path->slots[0], min_key, inode, in copy_inode_items_to_log()
5935 other_ino != btrfs_ino(ctx->inode)) { in copy_inode_items_to_log()
5940 ins_start_slot = path->slots[0]; in copy_inode_items_to_log()
5957 } else if (min_key->type == BTRFS_XATTR_ITEM_KEY) { in copy_inode_items_to_log()
5970 if (ins_nr && ins_start_slot + ins_nr == path->slots[0]) { in copy_inode_items_to_log()
5974 ins_start_slot = path->slots[0]; in copy_inode_items_to_log()
5984 ins_start_slot = path->slots[0]; in copy_inode_items_to_log()
5986 path->slots[0]++; in copy_inode_items_to_log()
5987 if (path->slots[0] < btrfs_header_nritems(path->nodes[0])) { in copy_inode_items_to_log()
5988 btrfs_item_key_to_cpu(path->nodes[0], min_key, in copy_inode_items_to_log()
5989 path->slots[0]); in copy_inode_items_to_log()
6002 if (min_key->offset < (u64)-1) { in copy_inode_items_to_log()
6003 min_key->offset++; in copy_inode_items_to_log()
6004 } else if (min_key->type < max_key->type) { in copy_inode_items_to_log()
6005 min_key->type++; in copy_inode_items_to_log()
6006 min_key->offset = 0; in copy_inode_items_to_log()
6025 if (inode_only == LOG_INODE_ALL && S_ISREG(inode->vfs_inode.i_mode)) { in copy_inode_items_to_log()
6050 for (int i = 0; i < batch->nr; i++) { in insert_delayed_items_batch()
6053 data_ptr = btrfs_item_ptr(path->nodes[0], path->slots[0], char); in insert_delayed_items_batch()
6054 write_extent_buffer(path->nodes[0], &curr->data, in insert_delayed_items_batch()
6055 (unsigned long)data_ptr, curr->data_len); in insert_delayed_items_batch()
6057 path->slots[0]++; in insert_delayed_items_batch()
6073 const int leaf_data_size = BTRFS_LEAF_DATA_SIZE(trans->fs_info); in log_delayed_insertion_items()
6075 struct btrfs_root *log = inode->root->log_root; in log_delayed_insertion_items()
6090 lockdep_assert_held(&inode->log_mutex); in log_delayed_insertion_items()
6101 if (curr->index > inode->last_dir_index_offset) { in log_delayed_insertion_items()
6114 return -ENOMEM; in log_delayed_insertion_items()
6122 const u32 curr_size = curr->data_len + sizeof(struct btrfs_item); in log_delayed_insertion_items()
6137 ins_sizes[batch_idx] = curr->data_len; in log_delayed_insertion_items()
6140 ins_keys[batch_idx].offset = curr->index; in log_delayed_insertion_items()
6142 batch.total_data_size += curr->data_len; in log_delayed_insertion_items()
6153 inode->last_dir_index_offset = curr->index; in log_delayed_insertion_items()
6173 u64 first_dir_index = curr->index; in log_delayed_deletions_full()
6185 if (next->index != curr->index + 1) in log_delayed_deletions_full()
6191 last_dir_index = curr->index; in log_delayed_deletions_full()
6194 ret = insert_dir_log_key(trans, inode->root->log_root, path, in log_delayed_deletions_full()
6212 struct extent_buffer *leaf = path->nodes[0]; in batch_delete_dir_index_items()
6213 const int last_slot = btrfs_header_nritems(leaf) - 1; in batch_delete_dir_index_items()
6214 int slot = path->slots[0] + 1; in batch_delete_dir_index_items()
6226 key.offset != next->index) in batch_delete_dir_index_items()
6234 return btrfs_del_items(trans, inode->root->log_root, path, in batch_delete_dir_index_items()
6235 path->slots[0], slot - path->slots[0]); in batch_delete_dir_index_items()
6244 struct btrfs_root *log = inode->root->log_root; in log_delayed_deletions_incremental()
6257 u64 first_dir_index = curr->index; in log_delayed_deletions_incremental()
6262 key.offset = curr->index; in log_delayed_deletions_incremental()
6263 ret = btrfs_search_slot(trans, log, &key, path, -1, 1); in log_delayed_deletions_incremental()
6285 last_dir_index = last->index; in log_delayed_deletions_incremental()
6320 lockdep_assert_held(&inode->log_mutex); in log_delayed_deletion_items()
6325 if (ctx->logged_before) in log_delayed_deletion_items()
6342 const bool orig_log_new_dentries = ctx->log_new_dentries; in log_new_delayed_dentries()
6351 lockdep_assert_not_held(&inode->log_mutex); in log_new_delayed_dentries()
6353 ASSERT(!ctx->logging_new_delayed_dentries); in log_new_delayed_dentries()
6354 ctx->logging_new_delayed_dentries = true; in log_new_delayed_dentries()
6362 dir_item = (struct btrfs_dir_item *)item->data; in log_new_delayed_dentries()
6363 btrfs_disk_key_to_cpu(&key, &dir_item->location); in log_new_delayed_dentries()
6368 di_inode = btrfs_iget_logging(key.objectid, inode->root); in log_new_delayed_dentries()
6382 ctx->log_new_dentries = false; in log_new_delayed_dentries()
6385 if (!ret && ctx->log_new_dentries) in log_new_delayed_dentries()
6394 ctx->log_new_dentries = orig_log_new_dentries; in log_new_delayed_dentries()
6395 ctx->logging_new_delayed_dentries = false; in log_new_delayed_dentries()
6423 struct btrfs_root *log = inode->root->log_root; in btrfs_log_inode()
6427 struct extent_map_tree *em_tree = &inode->extent_tree; in btrfs_log_inode()
6438 return -ENOMEM; in btrfs_log_inode()
6442 return -ENOMEM; in btrfs_log_inode()
6453 if (S_ISDIR(inode->vfs_inode.i_mode) || in btrfs_log_inode()
6455 &inode->runtime_flags) && in btrfs_log_inode()
6459 max_key.type = (u8)-1; in btrfs_log_inode()
6460 max_key.offset = (u64)-1; in btrfs_log_inode()
6462 if (S_ISDIR(inode->vfs_inode.i_mode) && inode_only == LOG_INODE_ALL) in btrfs_log_inode()
6469 * is to prevent more than one level of recursion into btrfs_log_inode() in btrfs_log_inode()
6472 * $ mkdir -p a/b/c/d/e/f/g/h/... in btrfs_log_inode()
6473 * $ xfs_io -c "fsync" a in btrfs_log_inode()
6495 if (full_dir_logging && ctx->logging_new_delayed_dentries) { in btrfs_log_inode()
6501 mutex_lock(&inode->log_mutex); in btrfs_log_inode()
6506 * log replay, which is invalid on linux (symlink(2) returns -ENOENT if in btrfs_log_inode()
6512 if (S_ISLNK(inode->vfs_inode.i_mode)) in btrfs_log_inode()
6523 ctx->logged_before = (ret == 1); in btrfs_log_inode()
6533 if (full_dir_logging && inode->last_unlink_trans >= trans->transid) { in btrfs_log_inode()
6542 if (S_ISDIR(inode->vfs_inode.i_mode)) { in btrfs_log_inode()
6543 clear_bit(BTRFS_INODE_COPY_EVERYTHING, &inode->runtime_flags); in btrfs_log_inode()
6544 if (ctx->logged_before) in btrfs_log_inode()
6548 if (inode_only == LOG_INODE_EXISTS && ctx->logged_before) { in btrfs_log_inode()
6554 * truncate - for e.g. create file, write 4K into offset in btrfs_log_inode()
6556 * fsync some other file (to sync log), power fail - if in btrfs_log_inode()
6567 &inode->runtime_flags)) { in btrfs_log_inode()
6570 if (ctx->logged_before) in btrfs_log_inode()
6575 &inode->runtime_flags); in btrfs_log_inode()
6577 &inode->runtime_flags); in btrfs_log_inode()
6578 if (ctx->logged_before) in btrfs_log_inode()
6583 &inode->runtime_flags) || in btrfs_log_inode()
6588 if (ctx->logged_before) in btrfs_log_inode()
6608 if (full_dir_logging && !ctx->logging_new_delayed_dentries) in btrfs_log_inode()
6647 if (!xattrs_logged && inode->logged_trans < trans->transid) { in btrfs_log_inode()
6661 write_lock(&em_tree->lock); in btrfs_log_inode()
6662 list_for_each_entry_safe(em, n, &em_tree->modified_extents, list) in btrfs_log_inode()
6663 list_del_init(&em->list); in btrfs_log_inode()
6664 write_unlock(&em_tree->lock); in btrfs_log_inode()
6681 spin_lock(&inode->lock); in btrfs_log_inode()
6682 inode->logged_trans = trans->transid; in btrfs_log_inode()
6708 * its last_log_commit - otherwise if an explicit fsync is made in btrfs_log_inode()
6715 inode->last_log_commit = inode->last_sub_trans; in btrfs_log_inode()
6716 spin_unlock(&inode->lock); in btrfs_log_inode()
6723 inode->last_reflink_trans = 0; in btrfs_log_inode()
6726 mutex_unlock(&inode->log_mutex); in btrfs_log_inode()
6734 ret = log_conflicting_inodes(trans, inode->root, ctx); in btrfs_log_inode()
6736 if (full_dir_logging && !ctx->logging_new_delayed_dentries) { in btrfs_log_inode()
6755 struct btrfs_root *root = inode->root; in btrfs_log_all_parents()
6760 return -ENOMEM; in btrfs_log_all_parents()
6761 path->skip_locking = 1; in btrfs_log_all_parents()
6762 path->search_commit_root = 1; in btrfs_log_all_parents()
6772 struct extent_buffer *leaf = path->nodes[0]; in btrfs_log_all_parents()
6773 int slot = path->slots[0]; in btrfs_log_all_parents()
6831 * mv -T /mnt/A /mnt/B in btrfs_log_all_parents()
6850 ctx->log_new_dentries = false; in btrfs_log_all_parents()
6853 if (!ret && ctx->log_new_dentries) in btrfs_log_all_parents()
6860 path->slots[0]++; in btrfs_log_all_parents()
6875 btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]); in log_new_ancestors()
6896 if (BTRFS_I(inode)->generation >= trans->transid && in log_new_ancestors()
6912 leaf = path->nodes[0]; in log_new_ancestors()
6913 slot = path->slots[0]; in log_new_ancestors()
6919 return -ENOENT; in log_new_ancestors()
6920 leaf = path->nodes[0]; in log_new_ancestors()
6921 slot = path->slots[0]; in log_new_ancestors()
6927 return -ENOENT; in log_new_ancestors()
6937 struct btrfs_root *root = inode->root; in log_new_ancestors_fast()
6939 struct super_block *sb = inode->vfs_inode.i_sb; in log_new_ancestors_fast()
6944 sb != parent->d_sb) in log_new_ancestors_fast()
6948 if (root != inode->root) in log_new_ancestors_fast()
6951 if (inode->generation >= trans->transid && in log_new_ancestors_fast()
6975 struct btrfs_root *root = inode->root; in log_all_new_ancestors()
6985 if (inode->vfs_inode.i_nlink < 2) in log_all_new_ancestors()
6990 return -ENOMEM; in log_all_new_ancestors()
7000 path->slots[0]++; in log_all_new_ancestors()
7003 struct extent_buffer *leaf = path->nodes[0]; in log_all_new_ancestors()
7004 int slot = path->slots[0]; in log_all_new_ancestors()
7029 ret = -EMLINK; in log_all_new_ancestors()
7065 struct btrfs_root *root = inode->root; in btrfs_log_inode_parent()
7066 struct btrfs_fs_info *fs_info = root->fs_info; in btrfs_log_inode_parent()
7075 if (btrfs_root_refs(&root->root_item) == 0) { in btrfs_log_inode_parent()
7084 if (btrfs_root_generation(&root->root_item) == trans->transid) { in btrfs_log_inode_parent()
7094 if ((btrfs_inode_in_log(inode, trans->transid) && in btrfs_log_inode_parent()
7095 list_empty(&ctx->ordered_extents)) || in btrfs_log_inode_parent()
7096 inode->vfs_inode.i_nlink == 0) { in btrfs_log_inode_parent()
7115 if (S_ISREG(inode->vfs_inode.i_mode) && in btrfs_log_inode_parent()
7116 inode->generation < trans->transid && in btrfs_log_inode_parent()
7117 inode->last_unlink_trans < trans->transid) { in btrfs_log_inode_parent()
7122 if (S_ISDIR(inode->vfs_inode.i_mode) && ctx->log_new_dentries) in btrfs_log_inode_parent()
7131 * error -ENOTEMPTY). in btrfs_log_inode_parent()
7140 * xfs_io -c fsync testdir/foo in btrfs_log_inode_parent()
7157 * xfs_io -c fsync foo in btrfs_log_inode_parent()
7166 if (inode->last_unlink_trans >= trans->transid) { in btrfs_log_inode_parent()
7225 struct btrfs_fs_info *fs_info = log_root_tree->fs_info; in btrfs_recover_log_trees()
7233 return -ENOMEM; in btrfs_recover_log_trees()
7235 set_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags); in btrfs_recover_log_trees()
7237 trans = btrfs_start_transaction(fs_info->tree_root, 0); in btrfs_recover_log_trees()
7254 key.offset = (u64)-1; in btrfs_recover_log_trees()
7265 if (path->slots[0] == 0) in btrfs_recover_log_trees()
7267 path->slots[0]--; in btrfs_recover_log_trees()
7269 btrfs_item_key_to_cpu(path->nodes[0], &found_key, in btrfs_recover_log_trees()
7270 path->slots[0]); in btrfs_recover_log_trees()
7298 if (ret == -ENOENT) in btrfs_recover_log_trees()
7299 ret = btrfs_pin_extent_for_log_replay(trans, log->node); in btrfs_recover_log_trees()
7308 wc.replay_dest->log_root = log; in btrfs_recover_log_trees()
7333 * root->objectid_mutex is not acquired as log replay in btrfs_recover_log_trees()
7341 wc.replay_dest->log_root = NULL; in btrfs_recover_log_trees()
7350 key.offset = found_key.offset - 1; in btrfs_recover_log_trees()
7374 log_root_tree->log_root = NULL; in btrfs_recover_log_trees()
7375 clear_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags); in btrfs_recover_log_trees()
7382 clear_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags); in btrfs_recover_log_trees()
7412 mutex_lock(&inode->log_mutex); in btrfs_record_unlink_dir()
7413 inode->last_unlink_trans = trans->transid; in btrfs_record_unlink_dir()
7414 mutex_unlock(&inode->log_mutex); in btrfs_record_unlink_dir()
7444 mutex_lock(&dir->log_mutex); in btrfs_record_unlink_dir()
7445 dir->last_unlink_trans = trans->transid; in btrfs_record_unlink_dir()
7446 mutex_unlock(&dir->log_mutex); in btrfs_record_unlink_dir()
7464 mutex_lock(&dir->log_mutex); in btrfs_record_snapshot_destroy()
7465 dir->last_unlink_trans = trans->transid; in btrfs_record_snapshot_destroy()
7466 mutex_unlock(&dir->log_mutex); in btrfs_record_snapshot_destroy()
7482 mutex_lock(&dir->log_mutex); in btrfs_record_new_subvolume()
7483 dir->last_unlink_trans = trans->transid; in btrfs_record_new_subvolume()
7484 mutex_unlock(&dir->log_mutex); in btrfs_record_new_subvolume()
7509 struct btrfs_root *root = inode->root; in btrfs_log_new_name()
7518 if (!S_ISDIR(inode->vfs_inode.i_mode)) in btrfs_log_new_name()
7519 inode->last_unlink_trans = trans->transid; in btrfs_log_new_name()
7533 * NULL), check if old_dir was logged - if it was not we can return and in btrfs_log_new_name()
7551 if (old_dir && old_dir->logged_trans == trans->transid) { in btrfs_log_new_name()
7552 struct btrfs_root *log = old_dir->root->log_root; in btrfs_log_new_name()
7558 ret = fscrypt_setup_filename(&old_dir->vfs_inode, in btrfs_log_new_name()
7559 &old_dentry->d_name, 0, &fname); in btrfs_log_new_name()
7583 ret = -ENOMEM; in btrfs_log_new_name()
7598 mutex_lock(&old_dir->log_mutex); in btrfs_log_new_name()
7611 mutex_unlock(&old_dir->log_mutex); in btrfs_log_new_name()