1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2008 Oracle. All rights reserved. 4 */ 5 6 #include <linux/sched.h> 7 #include <linux/slab.h> 8 #include <linux/blkdev.h> 9 #include <linux/list_sort.h> 10 #include <linux/iversion.h> 11 #include "misc.h" 12 #include "ctree.h" 13 #include "tree-log.h" 14 #include "disk-io.h" 15 #include "locking.h" 16 #include "backref.h" 17 #include "compression.h" 18 #include "qgroup.h" 19 #include "block-group.h" 20 #include "space-info.h" 21 #include "inode-item.h" 22 #include "fs.h" 23 #include "accessors.h" 24 #include "extent-tree.h" 25 #include "root-tree.h" 26 #include "dir-item.h" 27 #include "file-item.h" 28 #include "file.h" 29 #include "orphan.h" 30 #include "tree-checker.h" 31 32 #define MAX_CONFLICT_INODES 10 33 34 /* magic values for the inode_only field in btrfs_log_inode: 35 * 36 * LOG_INODE_ALL means to log everything 37 * LOG_INODE_EXISTS means to log just enough to recreate the inode 38 * during log replay 39 */ 40 enum { 41 LOG_INODE_ALL, 42 LOG_INODE_EXISTS, 43 }; 44 45 /* 46 * directory trouble cases 47 * 48 * 1) on rename or unlink, if the inode being unlinked isn't in the fsync 49 * log, we must force a full commit before doing an fsync of the directory 50 * where the unlink was done. 51 * ---> record transid of last unlink/rename per directory 52 * 53 * mkdir foo/some_dir 54 * normal commit 55 * rename foo/some_dir foo2/some_dir 56 * mkdir foo/some_dir 57 * fsync foo/some_dir/some_file 58 * 59 * The fsync above will unlink the original some_dir without recording 60 * it in its new location (foo2). After a crash, some_dir will be gone 61 * unless the fsync of some_file forces a full commit 62 * 63 * 2) we must log any new names for any file or dir that is in the fsync 64 * log. ---> check inode while renaming/linking. 65 * 66 * 2a) we must log any new names for any file or dir during rename 67 * when the directory they are being removed from was logged. 68 * ---> check inode and old parent dir during rename 69 * 70 * 2a is actually the more important variant. With the extra logging 71 * a crash might unlink the old name without recreating the new one 72 * 73 * 3) after a crash, we must go through any directories with a link count 74 * of zero and redo the rm -rf 75 * 76 * mkdir f1/foo 77 * normal commit 78 * rm -rf f1/foo 79 * fsync(f1) 80 * 81 * The directory f1 was fully removed from the FS, but fsync was never 82 * called on f1, only its parent dir. After a crash the rm -rf must 83 * be replayed. This must be able to recurse down the entire 84 * directory tree. The inode link count fixup code takes care of the 85 * ugly details. 86 */ 87 88 /* 89 * stages for the tree walking. The first 90 * stage (0) is to only pin down the blocks we find 91 * the second stage (1) is to make sure that all the inodes 92 * we find in the log are created in the subvolume. 93 * 94 * The last stage is to deal with directories and links and extents 95 * and all the other fun semantics 96 */ 97 enum { 98 LOG_WALK_PIN_ONLY, 99 LOG_WALK_REPLAY_INODES, 100 LOG_WALK_REPLAY_DIR_INDEX, 101 LOG_WALK_REPLAY_ALL, 102 }; 103 104 static int btrfs_log_inode(struct btrfs_trans_handle *trans, 105 struct btrfs_inode *inode, 106 int inode_only, 107 struct btrfs_log_ctx *ctx); 108 static int link_to_fixup_dir(struct btrfs_trans_handle *trans, 109 struct btrfs_root *root, 110 struct btrfs_path *path, u64 objectid); 111 static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans, 112 struct btrfs_root *root, 113 struct btrfs_root *log, 114 struct btrfs_path *path, 115 u64 dirid, int del_all); 116 static void wait_log_commit(struct btrfs_root *root, int transid); 117 118 /* 119 * tree logging is a special write ahead log used to make sure that 120 * fsyncs and O_SYNCs can happen without doing full tree commits. 121 * 122 * Full tree commits are expensive because they require commonly 123 * modified blocks to be recowed, creating many dirty pages in the 124 * extent tree an 4x-6x higher write load than ext3. 125 * 126 * Instead of doing a tree commit on every fsync, we use the 127 * key ranges and transaction ids to find items for a given file or directory 128 * that have changed in this transaction. Those items are copied into 129 * a special tree (one per subvolume root), that tree is written to disk 130 * and then the fsync is considered complete. 131 * 132 * After a crash, items are copied out of the log-tree back into the 133 * subvolume tree. Any file data extents found are recorded in the extent 134 * allocation tree, and the log-tree freed. 135 * 136 * The log tree is read three times, once to pin down all the extents it is 137 * using in ram and once, once to create all the inodes logged in the tree 138 * and once to do all the other items. 139 */ 140 141 /* 142 * start a sub transaction and setup the log tree 143 * this increments the log tree writer count to make the people 144 * syncing the tree wait for us to finish 145 */ 146 static int start_log_trans(struct btrfs_trans_handle *trans, 147 struct btrfs_root *root, 148 struct btrfs_log_ctx *ctx) 149 { 150 struct btrfs_fs_info *fs_info = root->fs_info; 151 struct btrfs_root *tree_root = fs_info->tree_root; 152 const bool zoned = btrfs_is_zoned(fs_info); 153 int ret = 0; 154 bool created = false; 155 156 /* 157 * First check if the log root tree was already created. If not, create 158 * it before locking the root's log_mutex, just to keep lockdep happy. 159 */ 160 if (!test_bit(BTRFS_ROOT_HAS_LOG_TREE, &tree_root->state)) { 161 mutex_lock(&tree_root->log_mutex); 162 if (!fs_info->log_root_tree) { 163 ret = btrfs_init_log_root_tree(trans, fs_info); 164 if (!ret) { 165 set_bit(BTRFS_ROOT_HAS_LOG_TREE, &tree_root->state); 166 created = true; 167 } 168 } 169 mutex_unlock(&tree_root->log_mutex); 170 if (ret) 171 return ret; 172 } 173 174 mutex_lock(&root->log_mutex); 175 176 again: 177 if (root->log_root) { 178 int index = (root->log_transid + 1) % 2; 179 180 if (btrfs_need_log_full_commit(trans)) { 181 ret = BTRFS_LOG_FORCE_COMMIT; 182 goto out; 183 } 184 185 if (zoned && atomic_read(&root->log_commit[index])) { 186 wait_log_commit(root, root->log_transid - 1); 187 goto again; 188 } 189 190 if (!root->log_start_pid) { 191 clear_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state); 192 root->log_start_pid = current->pid; 193 } else if (root->log_start_pid != current->pid) { 194 set_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state); 195 } 196 } else { 197 /* 198 * This means fs_info->log_root_tree was already created 199 * for some other FS trees. Do the full commit not to mix 200 * nodes from multiple log transactions to do sequential 201 * writing. 202 */ 203 if (zoned && !created) { 204 ret = BTRFS_LOG_FORCE_COMMIT; 205 goto out; 206 } 207 208 ret = btrfs_add_log_tree(trans, root); 209 if (ret) 210 goto out; 211 212 set_bit(BTRFS_ROOT_HAS_LOG_TREE, &root->state); 213 clear_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state); 214 root->log_start_pid = current->pid; 215 } 216 217 atomic_inc(&root->log_writers); 218 if (!ctx->logging_new_name) { 219 int index = root->log_transid % 2; 220 list_add_tail(&ctx->list, &root->log_ctxs[index]); 221 ctx->log_transid = root->log_transid; 222 } 223 224 out: 225 mutex_unlock(&root->log_mutex); 226 return ret; 227 } 228 229 /* 230 * returns 0 if there was a log transaction running and we were able 231 * to join, or returns -ENOENT if there were not transactions 232 * in progress 233 */ 234 static int join_running_log_trans(struct btrfs_root *root) 235 { 236 const bool zoned = btrfs_is_zoned(root->fs_info); 237 int ret = -ENOENT; 238 239 if (!test_bit(BTRFS_ROOT_HAS_LOG_TREE, &root->state)) 240 return ret; 241 242 mutex_lock(&root->log_mutex); 243 again: 244 if (root->log_root) { 245 int index = (root->log_transid + 1) % 2; 246 247 ret = 0; 248 if (zoned && atomic_read(&root->log_commit[index])) { 249 wait_log_commit(root, root->log_transid - 1); 250 goto again; 251 } 252 atomic_inc(&root->log_writers); 253 } 254 mutex_unlock(&root->log_mutex); 255 return ret; 256 } 257 258 /* 259 * This either makes the current running log transaction wait 260 * until you call btrfs_end_log_trans() or it makes any future 261 * log transactions wait until you call btrfs_end_log_trans() 262 */ 263 void btrfs_pin_log_trans(struct btrfs_root *root) 264 { 265 atomic_inc(&root->log_writers); 266 } 267 268 /* 269 * indicate we're done making changes to the log tree 270 * and wake up anyone waiting to do a sync 271 */ 272 void btrfs_end_log_trans(struct btrfs_root *root) 273 { 274 if (atomic_dec_and_test(&root->log_writers)) { 275 /* atomic_dec_and_test implies a barrier */ 276 cond_wake_up_nomb(&root->log_writer_wait); 277 } 278 } 279 280 /* 281 * the walk control struct is used to pass state down the chain when 282 * processing the log tree. The stage field tells us which part 283 * of the log tree processing we are currently doing. The others 284 * are state fields used for that specific part 285 */ 286 struct walk_control { 287 /* should we free the extent on disk when done? This is used 288 * at transaction commit time while freeing a log tree 289 */ 290 int free; 291 292 /* pin only walk, we record which extents on disk belong to the 293 * log trees 294 */ 295 int pin; 296 297 /* what stage of the replay code we're currently in */ 298 int stage; 299 300 /* 301 * Ignore any items from the inode currently being processed. Needs 302 * to be set every time we find a BTRFS_INODE_ITEM_KEY and we are in 303 * the LOG_WALK_REPLAY_INODES stage. 304 */ 305 bool ignore_cur_inode; 306 307 /* the root we are currently replaying */ 308 struct btrfs_root *replay_dest; 309 310 /* the trans handle for the current replay */ 311 struct btrfs_trans_handle *trans; 312 313 /* the function that gets used to process blocks we find in the 314 * tree. Note the extent_buffer might not be up to date when it is 315 * passed in, and it must be checked or read if you need the data 316 * inside it 317 */ 318 int (*process_func)(struct btrfs_root *log, struct extent_buffer *eb, 319 struct walk_control *wc, u64 gen, int level); 320 }; 321 322 /* 323 * process_func used to pin down extents, write them or wait on them 324 */ 325 static int process_one_buffer(struct btrfs_root *log, 326 struct extent_buffer *eb, 327 struct walk_control *wc, u64 gen, int level) 328 { 329 struct btrfs_fs_info *fs_info = log->fs_info; 330 int ret = 0; 331 332 /* 333 * If this fs is mixed then we need to be able to process the leaves to 334 * pin down any logged extents, so we have to read the block. 335 */ 336 if (btrfs_fs_incompat(fs_info, MIXED_GROUPS)) { 337 struct btrfs_tree_parent_check check = { 338 .level = level, 339 .transid = gen 340 }; 341 342 ret = btrfs_read_extent_buffer(eb, &check); 343 if (ret) 344 return ret; 345 } 346 347 if (wc->pin) { 348 ret = btrfs_pin_extent_for_log_replay(wc->trans, eb); 349 if (ret) 350 return ret; 351 352 if (btrfs_buffer_uptodate(eb, gen, 0) && 353 btrfs_header_level(eb) == 0) 354 ret = btrfs_exclude_logged_extents(eb); 355 } 356 return ret; 357 } 358 359 /* 360 * Item overwrite used by replay and tree logging. eb, slot and key all refer 361 * to the src data we are copying out. 362 * 363 * root is the tree we are copying into, and path is a scratch 364 * path for use in this function (it should be released on entry and 365 * will be released on exit). 366 * 367 * If the key is already in the destination tree the existing item is 368 * overwritten. If the existing item isn't big enough, it is extended. 369 * If it is too large, it is truncated. 370 * 371 * If the key isn't in the destination yet, a new item is inserted. 372 */ 373 static int overwrite_item(struct btrfs_trans_handle *trans, 374 struct btrfs_root *root, 375 struct btrfs_path *path, 376 struct extent_buffer *eb, int slot, 377 struct btrfs_key *key) 378 { 379 int ret; 380 u32 item_size; 381 u64 saved_i_size = 0; 382 int save_old_i_size = 0; 383 unsigned long src_ptr; 384 unsigned long dst_ptr; 385 bool inode_item = key->type == BTRFS_INODE_ITEM_KEY; 386 387 /* 388 * This is only used during log replay, so the root is always from a 389 * fs/subvolume tree. In case we ever need to support a log root, then 390 * we'll have to clone the leaf in the path, release the path and use 391 * the leaf before writing into the log tree. See the comments at 392 * copy_items() for more details. 393 */ 394 ASSERT(btrfs_root_id(root) != BTRFS_TREE_LOG_OBJECTID); 395 396 item_size = btrfs_item_size(eb, slot); 397 src_ptr = btrfs_item_ptr_offset(eb, slot); 398 399 /* Look for the key in the destination tree. */ 400 ret = btrfs_search_slot(NULL, root, key, path, 0, 0); 401 if (ret < 0) 402 return ret; 403 404 if (ret == 0) { 405 char *src_copy; 406 char *dst_copy; 407 u32 dst_size = btrfs_item_size(path->nodes[0], 408 path->slots[0]); 409 if (dst_size != item_size) 410 goto insert; 411 412 if (item_size == 0) { 413 btrfs_release_path(path); 414 return 0; 415 } 416 dst_copy = kmalloc(item_size, GFP_NOFS); 417 src_copy = kmalloc(item_size, GFP_NOFS); 418 if (!dst_copy || !src_copy) { 419 btrfs_release_path(path); 420 kfree(dst_copy); 421 kfree(src_copy); 422 return -ENOMEM; 423 } 424 425 read_extent_buffer(eb, src_copy, src_ptr, item_size); 426 427 dst_ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]); 428 read_extent_buffer(path->nodes[0], dst_copy, dst_ptr, 429 item_size); 430 ret = memcmp(dst_copy, src_copy, item_size); 431 432 kfree(dst_copy); 433 kfree(src_copy); 434 /* 435 * they have the same contents, just return, this saves 436 * us from cowing blocks in the destination tree and doing 437 * extra writes that may not have been done by a previous 438 * sync 439 */ 440 if (ret == 0) { 441 btrfs_release_path(path); 442 return 0; 443 } 444 445 /* 446 * We need to load the old nbytes into the inode so when we 447 * replay the extents we've logged we get the right nbytes. 448 */ 449 if (inode_item) { 450 struct btrfs_inode_item *item; 451 u64 nbytes; 452 u32 mode; 453 454 item = btrfs_item_ptr(path->nodes[0], path->slots[0], 455 struct btrfs_inode_item); 456 nbytes = btrfs_inode_nbytes(path->nodes[0], item); 457 item = btrfs_item_ptr(eb, slot, 458 struct btrfs_inode_item); 459 btrfs_set_inode_nbytes(eb, item, nbytes); 460 461 /* 462 * If this is a directory we need to reset the i_size to 463 * 0 so that we can set it up properly when replaying 464 * the rest of the items in this log. 465 */ 466 mode = btrfs_inode_mode(eb, item); 467 if (S_ISDIR(mode)) 468 btrfs_set_inode_size(eb, item, 0); 469 } 470 } else if (inode_item) { 471 struct btrfs_inode_item *item; 472 u32 mode; 473 474 /* 475 * New inode, set nbytes to 0 so that the nbytes comes out 476 * properly when we replay the extents. 477 */ 478 item = btrfs_item_ptr(eb, slot, struct btrfs_inode_item); 479 btrfs_set_inode_nbytes(eb, item, 0); 480 481 /* 482 * If this is a directory we need to reset the i_size to 0 so 483 * that we can set it up properly when replaying the rest of 484 * the items in this log. 485 */ 486 mode = btrfs_inode_mode(eb, item); 487 if (S_ISDIR(mode)) 488 btrfs_set_inode_size(eb, item, 0); 489 } 490 insert: 491 btrfs_release_path(path); 492 /* try to insert the key into the destination tree */ 493 path->skip_release_on_error = 1; 494 ret = btrfs_insert_empty_item(trans, root, path, 495 key, item_size); 496 path->skip_release_on_error = 0; 497 498 /* make sure any existing item is the correct size */ 499 if (ret == -EEXIST || ret == -EOVERFLOW) { 500 u32 found_size; 501 found_size = btrfs_item_size(path->nodes[0], 502 path->slots[0]); 503 if (found_size > item_size) 504 btrfs_truncate_item(trans, path, item_size, 1); 505 else if (found_size < item_size) 506 btrfs_extend_item(trans, path, item_size - found_size); 507 } else if (ret) { 508 return ret; 509 } 510 dst_ptr = btrfs_item_ptr_offset(path->nodes[0], 511 path->slots[0]); 512 513 /* don't overwrite an existing inode if the generation number 514 * was logged as zero. This is done when the tree logging code 515 * is just logging an inode to make sure it exists after recovery. 516 * 517 * Also, don't overwrite i_size on directories during replay. 518 * log replay inserts and removes directory items based on the 519 * state of the tree found in the subvolume, and i_size is modified 520 * as it goes 521 */ 522 if (key->type == BTRFS_INODE_ITEM_KEY && ret == -EEXIST) { 523 struct btrfs_inode_item *src_item; 524 struct btrfs_inode_item *dst_item; 525 526 src_item = (struct btrfs_inode_item *)src_ptr; 527 dst_item = (struct btrfs_inode_item *)dst_ptr; 528 529 if (btrfs_inode_generation(eb, src_item) == 0) { 530 struct extent_buffer *dst_eb = path->nodes[0]; 531 const u64 ino_size = btrfs_inode_size(eb, src_item); 532 533 /* 534 * For regular files an ino_size == 0 is used only when 535 * logging that an inode exists, as part of a directory 536 * fsync, and the inode wasn't fsynced before. In this 537 * case don't set the size of the inode in the fs/subvol 538 * tree, otherwise we would be throwing valid data away. 539 */ 540 if (S_ISREG(btrfs_inode_mode(eb, src_item)) && 541 S_ISREG(btrfs_inode_mode(dst_eb, dst_item)) && 542 ino_size != 0) 543 btrfs_set_inode_size(dst_eb, dst_item, ino_size); 544 goto no_copy; 545 } 546 547 if (S_ISDIR(btrfs_inode_mode(eb, src_item)) && 548 S_ISDIR(btrfs_inode_mode(path->nodes[0], dst_item))) { 549 save_old_i_size = 1; 550 saved_i_size = btrfs_inode_size(path->nodes[0], 551 dst_item); 552 } 553 } 554 555 copy_extent_buffer(path->nodes[0], eb, dst_ptr, 556 src_ptr, item_size); 557 558 if (save_old_i_size) { 559 struct btrfs_inode_item *dst_item; 560 dst_item = (struct btrfs_inode_item *)dst_ptr; 561 btrfs_set_inode_size(path->nodes[0], dst_item, saved_i_size); 562 } 563 564 /* make sure the generation is filled in */ 565 if (key->type == BTRFS_INODE_ITEM_KEY) { 566 struct btrfs_inode_item *dst_item; 567 dst_item = (struct btrfs_inode_item *)dst_ptr; 568 if (btrfs_inode_generation(path->nodes[0], dst_item) == 0) { 569 btrfs_set_inode_generation(path->nodes[0], dst_item, 570 trans->transid); 571 } 572 } 573 no_copy: 574 btrfs_mark_buffer_dirty(trans, path->nodes[0]); 575 btrfs_release_path(path); 576 return 0; 577 } 578 579 static int read_alloc_one_name(struct extent_buffer *eb, void *start, int len, 580 struct fscrypt_str *name) 581 { 582 char *buf; 583 584 buf = kmalloc(len, GFP_NOFS); 585 if (!buf) 586 return -ENOMEM; 587 588 read_extent_buffer(eb, buf, (unsigned long)start, len); 589 name->name = buf; 590 name->len = len; 591 return 0; 592 } 593 594 /* 595 * simple helper to read an inode off the disk from a given root 596 * This can only be called for subvolume roots and not for the log 597 */ 598 static noinline struct inode *read_one_inode(struct btrfs_root *root, 599 u64 objectid) 600 { 601 struct inode *inode; 602 603 inode = btrfs_iget(root->fs_info->sb, objectid, root); 604 if (IS_ERR(inode)) 605 inode = NULL; 606 return inode; 607 } 608 609 /* replays a single extent in 'eb' at 'slot' with 'key' into the 610 * subvolume 'root'. path is released on entry and should be released 611 * on exit. 612 * 613 * extents in the log tree have not been allocated out of the extent 614 * tree yet. So, this completes the allocation, taking a reference 615 * as required if the extent already exists or creating a new extent 616 * if it isn't in the extent allocation tree yet. 617 * 618 * The extent is inserted into the file, dropping any existing extents 619 * from the file that overlap the new one. 620 */ 621 static noinline int replay_one_extent(struct btrfs_trans_handle *trans, 622 struct btrfs_root *root, 623 struct btrfs_path *path, 624 struct extent_buffer *eb, int slot, 625 struct btrfs_key *key) 626 { 627 struct btrfs_drop_extents_args drop_args = { 0 }; 628 struct btrfs_fs_info *fs_info = root->fs_info; 629 int found_type; 630 u64 extent_end; 631 u64 start = key->offset; 632 u64 nbytes = 0; 633 struct btrfs_file_extent_item *item; 634 struct inode *inode = NULL; 635 unsigned long size; 636 int ret = 0; 637 638 item = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item); 639 found_type = btrfs_file_extent_type(eb, item); 640 641 if (found_type == BTRFS_FILE_EXTENT_REG || 642 found_type == BTRFS_FILE_EXTENT_PREALLOC) { 643 nbytes = btrfs_file_extent_num_bytes(eb, item); 644 extent_end = start + nbytes; 645 646 /* 647 * We don't add to the inodes nbytes if we are prealloc or a 648 * hole. 649 */ 650 if (btrfs_file_extent_disk_bytenr(eb, item) == 0) 651 nbytes = 0; 652 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) { 653 size = btrfs_file_extent_ram_bytes(eb, item); 654 nbytes = btrfs_file_extent_ram_bytes(eb, item); 655 extent_end = ALIGN(start + size, 656 fs_info->sectorsize); 657 } else { 658 ret = 0; 659 goto out; 660 } 661 662 inode = read_one_inode(root, key->objectid); 663 if (!inode) { 664 ret = -EIO; 665 goto out; 666 } 667 668 /* 669 * first check to see if we already have this extent in the 670 * file. This must be done before the btrfs_drop_extents run 671 * so we don't try to drop this extent. 672 */ 673 ret = btrfs_lookup_file_extent(trans, root, path, 674 btrfs_ino(BTRFS_I(inode)), start, 0); 675 676 if (ret == 0 && 677 (found_type == BTRFS_FILE_EXTENT_REG || 678 found_type == BTRFS_FILE_EXTENT_PREALLOC)) { 679 struct btrfs_file_extent_item cmp1; 680 struct btrfs_file_extent_item cmp2; 681 struct btrfs_file_extent_item *existing; 682 struct extent_buffer *leaf; 683 684 leaf = path->nodes[0]; 685 existing = btrfs_item_ptr(leaf, path->slots[0], 686 struct btrfs_file_extent_item); 687 688 read_extent_buffer(eb, &cmp1, (unsigned long)item, 689 sizeof(cmp1)); 690 read_extent_buffer(leaf, &cmp2, (unsigned long)existing, 691 sizeof(cmp2)); 692 693 /* 694 * we already have a pointer to this exact extent, 695 * we don't have to do anything 696 */ 697 if (memcmp(&cmp1, &cmp2, sizeof(cmp1)) == 0) { 698 btrfs_release_path(path); 699 goto out; 700 } 701 } 702 btrfs_release_path(path); 703 704 /* drop any overlapping extents */ 705 drop_args.start = start; 706 drop_args.end = extent_end; 707 drop_args.drop_cache = true; 708 ret = btrfs_drop_extents(trans, root, BTRFS_I(inode), &drop_args); 709 if (ret) 710 goto out; 711 712 if (found_type == BTRFS_FILE_EXTENT_REG || 713 found_type == BTRFS_FILE_EXTENT_PREALLOC) { 714 u64 offset; 715 unsigned long dest_offset; 716 struct btrfs_key ins; 717 718 if (btrfs_file_extent_disk_bytenr(eb, item) == 0 && 719 btrfs_fs_incompat(fs_info, NO_HOLES)) 720 goto update_inode; 721 722 ret = btrfs_insert_empty_item(trans, root, path, key, 723 sizeof(*item)); 724 if (ret) 725 goto out; 726 dest_offset = btrfs_item_ptr_offset(path->nodes[0], 727 path->slots[0]); 728 copy_extent_buffer(path->nodes[0], eb, dest_offset, 729 (unsigned long)item, sizeof(*item)); 730 731 ins.objectid = btrfs_file_extent_disk_bytenr(eb, item); 732 ins.offset = btrfs_file_extent_disk_num_bytes(eb, item); 733 ins.type = BTRFS_EXTENT_ITEM_KEY; 734 offset = key->offset - btrfs_file_extent_offset(eb, item); 735 736 /* 737 * Manually record dirty extent, as here we did a shallow 738 * file extent item copy and skip normal backref update, 739 * but modifying extent tree all by ourselves. 740 * So need to manually record dirty extent for qgroup, 741 * as the owner of the file extent changed from log tree 742 * (doesn't affect qgroup) to fs/file tree(affects qgroup) 743 */ 744 ret = btrfs_qgroup_trace_extent(trans, 745 btrfs_file_extent_disk_bytenr(eb, item), 746 btrfs_file_extent_disk_num_bytes(eb, item)); 747 if (ret < 0) 748 goto out; 749 750 if (ins.objectid > 0) { 751 u64 csum_start; 752 u64 csum_end; 753 LIST_HEAD(ordered_sums); 754 755 /* 756 * is this extent already allocated in the extent 757 * allocation tree? If so, just add a reference 758 */ 759 ret = btrfs_lookup_data_extent(fs_info, ins.objectid, 760 ins.offset); 761 if (ret < 0) { 762 goto out; 763 } else if (ret == 0) { 764 struct btrfs_ref ref = { 765 .action = BTRFS_ADD_DELAYED_REF, 766 .bytenr = ins.objectid, 767 .num_bytes = ins.offset, 768 .owning_root = btrfs_root_id(root), 769 .ref_root = btrfs_root_id(root), 770 }; 771 btrfs_init_data_ref(&ref, key->objectid, offset, 772 0, false); 773 ret = btrfs_inc_extent_ref(trans, &ref); 774 if (ret) 775 goto out; 776 } else { 777 /* 778 * insert the extent pointer in the extent 779 * allocation tree 780 */ 781 ret = btrfs_alloc_logged_file_extent(trans, 782 btrfs_root_id(root), 783 key->objectid, offset, &ins); 784 if (ret) 785 goto out; 786 } 787 btrfs_release_path(path); 788 789 if (btrfs_file_extent_compression(eb, item)) { 790 csum_start = ins.objectid; 791 csum_end = csum_start + ins.offset; 792 } else { 793 csum_start = ins.objectid + 794 btrfs_file_extent_offset(eb, item); 795 csum_end = csum_start + 796 btrfs_file_extent_num_bytes(eb, item); 797 } 798 799 ret = btrfs_lookup_csums_list(root->log_root, 800 csum_start, csum_end - 1, 801 &ordered_sums, false); 802 if (ret < 0) 803 goto out; 804 ret = 0; 805 /* 806 * Now delete all existing cums in the csum root that 807 * cover our range. We do this because we can have an 808 * extent that is completely referenced by one file 809 * extent item and partially referenced by another 810 * file extent item (like after using the clone or 811 * extent_same ioctls). In this case if we end up doing 812 * the replay of the one that partially references the 813 * extent first, and we do not do the csum deletion 814 * below, we can get 2 csum items in the csum tree that 815 * overlap each other. For example, imagine our log has 816 * the two following file extent items: 817 * 818 * key (257 EXTENT_DATA 409600) 819 * extent data disk byte 12845056 nr 102400 820 * extent data offset 20480 nr 20480 ram 102400 821 * 822 * key (257 EXTENT_DATA 819200) 823 * extent data disk byte 12845056 nr 102400 824 * extent data offset 0 nr 102400 ram 102400 825 * 826 * Where the second one fully references the 100K extent 827 * that starts at disk byte 12845056, and the log tree 828 * has a single csum item that covers the entire range 829 * of the extent: 830 * 831 * key (EXTENT_CSUM EXTENT_CSUM 12845056) itemsize 100 832 * 833 * After the first file extent item is replayed, the 834 * csum tree gets the following csum item: 835 * 836 * key (EXTENT_CSUM EXTENT_CSUM 12865536) itemsize 20 837 * 838 * Which covers the 20K sub-range starting at offset 20K 839 * of our extent. Now when we replay the second file 840 * extent item, if we do not delete existing csum items 841 * that cover any of its blocks, we end up getting two 842 * csum items in our csum tree that overlap each other: 843 * 844 * key (EXTENT_CSUM EXTENT_CSUM 12845056) itemsize 100 845 * key (EXTENT_CSUM EXTENT_CSUM 12865536) itemsize 20 846 * 847 * Which is a problem, because after this anyone trying 848 * to lookup up for the checksum of any block of our 849 * extent starting at an offset of 40K or higher, will 850 * end up looking at the second csum item only, which 851 * does not contain the checksum for any block starting 852 * at offset 40K or higher of our extent. 853 */ 854 while (!list_empty(&ordered_sums)) { 855 struct btrfs_ordered_sum *sums; 856 struct btrfs_root *csum_root; 857 858 sums = list_entry(ordered_sums.next, 859 struct btrfs_ordered_sum, 860 list); 861 csum_root = btrfs_csum_root(fs_info, 862 sums->logical); 863 if (!ret) 864 ret = btrfs_del_csums(trans, csum_root, 865 sums->logical, 866 sums->len); 867 if (!ret) 868 ret = btrfs_csum_file_blocks(trans, 869 csum_root, 870 sums); 871 list_del(&sums->list); 872 kfree(sums); 873 } 874 if (ret) 875 goto out; 876 } else { 877 btrfs_release_path(path); 878 } 879 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) { 880 /* inline extents are easy, we just overwrite them */ 881 ret = overwrite_item(trans, root, path, eb, slot, key); 882 if (ret) 883 goto out; 884 } 885 886 ret = btrfs_inode_set_file_extent_range(BTRFS_I(inode), start, 887 extent_end - start); 888 if (ret) 889 goto out; 890 891 update_inode: 892 btrfs_update_inode_bytes(BTRFS_I(inode), nbytes, drop_args.bytes_found); 893 ret = btrfs_update_inode(trans, BTRFS_I(inode)); 894 out: 895 iput(inode); 896 return ret; 897 } 898 899 static int unlink_inode_for_log_replay(struct btrfs_trans_handle *trans, 900 struct btrfs_inode *dir, 901 struct btrfs_inode *inode, 902 const struct fscrypt_str *name) 903 { 904 int ret; 905 906 ret = btrfs_unlink_inode(trans, dir, inode, name); 907 if (ret) 908 return ret; 909 /* 910 * Whenever we need to check if a name exists or not, we check the 911 * fs/subvolume tree. So after an unlink we must run delayed items, so 912 * that future checks for a name during log replay see that the name 913 * does not exists anymore. 914 */ 915 return btrfs_run_delayed_items(trans); 916 } 917 918 /* 919 * when cleaning up conflicts between the directory names in the 920 * subvolume, directory names in the log and directory names in the 921 * inode back references, we may have to unlink inodes from directories. 922 * 923 * This is a helper function to do the unlink of a specific directory 924 * item 925 */ 926 static noinline int drop_one_dir_item(struct btrfs_trans_handle *trans, 927 struct btrfs_path *path, 928 struct btrfs_inode *dir, 929 struct btrfs_dir_item *di) 930 { 931 struct btrfs_root *root = dir->root; 932 struct inode *inode; 933 struct fscrypt_str name; 934 struct extent_buffer *leaf; 935 struct btrfs_key location; 936 int ret; 937 938 leaf = path->nodes[0]; 939 940 btrfs_dir_item_key_to_cpu(leaf, di, &location); 941 ret = read_alloc_one_name(leaf, di + 1, btrfs_dir_name_len(leaf, di), &name); 942 if (ret) 943 return -ENOMEM; 944 945 btrfs_release_path(path); 946 947 inode = read_one_inode(root, location.objectid); 948 if (!inode) { 949 ret = -EIO; 950 goto out; 951 } 952 953 ret = link_to_fixup_dir(trans, root, path, location.objectid); 954 if (ret) 955 goto out; 956 957 ret = unlink_inode_for_log_replay(trans, dir, BTRFS_I(inode), &name); 958 out: 959 kfree(name.name); 960 iput(inode); 961 return ret; 962 } 963 964 /* 965 * See if a given name and sequence number found in an inode back reference are 966 * already in a directory and correctly point to this inode. 967 * 968 * Returns: < 0 on error, 0 if the directory entry does not exists and 1 if it 969 * exists. 970 */ 971 static noinline int inode_in_dir(struct btrfs_root *root, 972 struct btrfs_path *path, 973 u64 dirid, u64 objectid, u64 index, 974 struct fscrypt_str *name) 975 { 976 struct btrfs_dir_item *di; 977 struct btrfs_key location; 978 int ret = 0; 979 980 di = btrfs_lookup_dir_index_item(NULL, root, path, dirid, 981 index, name, 0); 982 if (IS_ERR(di)) { 983 ret = PTR_ERR(di); 984 goto out; 985 } else if (di) { 986 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location); 987 if (location.objectid != objectid) 988 goto out; 989 } else { 990 goto out; 991 } 992 993 btrfs_release_path(path); 994 di = btrfs_lookup_dir_item(NULL, root, path, dirid, name, 0); 995 if (IS_ERR(di)) { 996 ret = PTR_ERR(di); 997 goto out; 998 } else if (di) { 999 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location); 1000 if (location.objectid == objectid) 1001 ret = 1; 1002 } 1003 out: 1004 btrfs_release_path(path); 1005 return ret; 1006 } 1007 1008 /* 1009 * helper function to check a log tree for a named back reference in 1010 * an inode. This is used to decide if a back reference that is 1011 * found in the subvolume conflicts with what we find in the log. 1012 * 1013 * inode backreferences may have multiple refs in a single item, 1014 * during replay we process one reference at a time, and we don't 1015 * want to delete valid links to a file from the subvolume if that 1016 * link is also in the log. 1017 */ 1018 static noinline int backref_in_log(struct btrfs_root *log, 1019 struct btrfs_key *key, 1020 u64 ref_objectid, 1021 const struct fscrypt_str *name) 1022 { 1023 struct btrfs_path *path; 1024 int ret; 1025 1026 path = btrfs_alloc_path(); 1027 if (!path) 1028 return -ENOMEM; 1029 1030 ret = btrfs_search_slot(NULL, log, key, path, 0, 0); 1031 if (ret < 0) { 1032 goto out; 1033 } else if (ret == 1) { 1034 ret = 0; 1035 goto out; 1036 } 1037 1038 if (key->type == BTRFS_INODE_EXTREF_KEY) 1039 ret = !!btrfs_find_name_in_ext_backref(path->nodes[0], 1040 path->slots[0], 1041 ref_objectid, name); 1042 else 1043 ret = !!btrfs_find_name_in_backref(path->nodes[0], 1044 path->slots[0], name); 1045 out: 1046 btrfs_free_path(path); 1047 return ret; 1048 } 1049 1050 static inline int __add_inode_ref(struct btrfs_trans_handle *trans, 1051 struct btrfs_root *root, 1052 struct btrfs_path *path, 1053 struct btrfs_root *log_root, 1054 struct btrfs_inode *dir, 1055 struct btrfs_inode *inode, 1056 u64 inode_objectid, u64 parent_objectid, 1057 u64 ref_index, struct fscrypt_str *name) 1058 { 1059 int ret; 1060 struct extent_buffer *leaf; 1061 struct btrfs_dir_item *di; 1062 struct btrfs_key search_key; 1063 struct btrfs_inode_extref *extref; 1064 1065 again: 1066 /* Search old style refs */ 1067 search_key.objectid = inode_objectid; 1068 search_key.type = BTRFS_INODE_REF_KEY; 1069 search_key.offset = parent_objectid; 1070 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0); 1071 if (ret == 0) { 1072 struct btrfs_inode_ref *victim_ref; 1073 unsigned long ptr; 1074 unsigned long ptr_end; 1075 1076 leaf = path->nodes[0]; 1077 1078 /* are we trying to overwrite a back ref for the root directory 1079 * if so, just jump out, we're done 1080 */ 1081 if (search_key.objectid == search_key.offset) 1082 return 1; 1083 1084 /* check all the names in this back reference to see 1085 * if they are in the log. if so, we allow them to stay 1086 * otherwise they must be unlinked as a conflict 1087 */ 1088 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); 1089 ptr_end = ptr + btrfs_item_size(leaf, path->slots[0]); 1090 while (ptr < ptr_end) { 1091 struct fscrypt_str victim_name; 1092 1093 victim_ref = (struct btrfs_inode_ref *)ptr; 1094 ret = read_alloc_one_name(leaf, (victim_ref + 1), 1095 btrfs_inode_ref_name_len(leaf, victim_ref), 1096 &victim_name); 1097 if (ret) 1098 return ret; 1099 1100 ret = backref_in_log(log_root, &search_key, 1101 parent_objectid, &victim_name); 1102 if (ret < 0) { 1103 kfree(victim_name.name); 1104 return ret; 1105 } else if (!ret) { 1106 inc_nlink(&inode->vfs_inode); 1107 btrfs_release_path(path); 1108 1109 ret = unlink_inode_for_log_replay(trans, dir, inode, 1110 &victim_name); 1111 kfree(victim_name.name); 1112 if (ret) 1113 return ret; 1114 goto again; 1115 } 1116 kfree(victim_name.name); 1117 1118 ptr = (unsigned long)(victim_ref + 1) + victim_name.len; 1119 } 1120 } 1121 btrfs_release_path(path); 1122 1123 /* Same search but for extended refs */ 1124 extref = btrfs_lookup_inode_extref(NULL, root, path, name, 1125 inode_objectid, parent_objectid, 0, 1126 0); 1127 if (IS_ERR(extref)) { 1128 return PTR_ERR(extref); 1129 } else if (extref) { 1130 u32 item_size; 1131 u32 cur_offset = 0; 1132 unsigned long base; 1133 struct inode *victim_parent; 1134 1135 leaf = path->nodes[0]; 1136 1137 item_size = btrfs_item_size(leaf, path->slots[0]); 1138 base = btrfs_item_ptr_offset(leaf, path->slots[0]); 1139 1140 while (cur_offset < item_size) { 1141 struct fscrypt_str victim_name; 1142 1143 extref = (struct btrfs_inode_extref *)(base + cur_offset); 1144 1145 if (btrfs_inode_extref_parent(leaf, extref) != parent_objectid) 1146 goto next; 1147 1148 ret = read_alloc_one_name(leaf, &extref->name, 1149 btrfs_inode_extref_name_len(leaf, extref), 1150 &victim_name); 1151 if (ret) 1152 return ret; 1153 1154 search_key.objectid = inode_objectid; 1155 search_key.type = BTRFS_INODE_EXTREF_KEY; 1156 search_key.offset = btrfs_extref_hash(parent_objectid, 1157 victim_name.name, 1158 victim_name.len); 1159 ret = backref_in_log(log_root, &search_key, 1160 parent_objectid, &victim_name); 1161 if (ret < 0) { 1162 kfree(victim_name.name); 1163 return ret; 1164 } else if (!ret) { 1165 ret = -ENOENT; 1166 victim_parent = read_one_inode(root, 1167 parent_objectid); 1168 if (victim_parent) { 1169 inc_nlink(&inode->vfs_inode); 1170 btrfs_release_path(path); 1171 1172 ret = unlink_inode_for_log_replay(trans, 1173 BTRFS_I(victim_parent), 1174 inode, &victim_name); 1175 } 1176 iput(victim_parent); 1177 kfree(victim_name.name); 1178 if (ret) 1179 return ret; 1180 goto again; 1181 } 1182 kfree(victim_name.name); 1183 next: 1184 cur_offset += victim_name.len + sizeof(*extref); 1185 } 1186 } 1187 btrfs_release_path(path); 1188 1189 /* look for a conflicting sequence number */ 1190 di = btrfs_lookup_dir_index_item(trans, root, path, btrfs_ino(dir), 1191 ref_index, name, 0); 1192 if (IS_ERR(di)) { 1193 return PTR_ERR(di); 1194 } else if (di) { 1195 ret = drop_one_dir_item(trans, path, dir, di); 1196 if (ret) 1197 return ret; 1198 } 1199 btrfs_release_path(path); 1200 1201 /* look for a conflicting name */ 1202 di = btrfs_lookup_dir_item(trans, root, path, btrfs_ino(dir), name, 0); 1203 if (IS_ERR(di)) { 1204 return PTR_ERR(di); 1205 } else if (di) { 1206 ret = drop_one_dir_item(trans, path, dir, di); 1207 if (ret) 1208 return ret; 1209 } 1210 btrfs_release_path(path); 1211 1212 return 0; 1213 } 1214 1215 static int extref_get_fields(struct extent_buffer *eb, unsigned long ref_ptr, 1216 struct fscrypt_str *name, u64 *index, 1217 u64 *parent_objectid) 1218 { 1219 struct btrfs_inode_extref *extref; 1220 int ret; 1221 1222 extref = (struct btrfs_inode_extref *)ref_ptr; 1223 1224 ret = read_alloc_one_name(eb, &extref->name, 1225 btrfs_inode_extref_name_len(eb, extref), name); 1226 if (ret) 1227 return ret; 1228 1229 if (index) 1230 *index = btrfs_inode_extref_index(eb, extref); 1231 if (parent_objectid) 1232 *parent_objectid = btrfs_inode_extref_parent(eb, extref); 1233 1234 return 0; 1235 } 1236 1237 static int ref_get_fields(struct extent_buffer *eb, unsigned long ref_ptr, 1238 struct fscrypt_str *name, u64 *index) 1239 { 1240 struct btrfs_inode_ref *ref; 1241 int ret; 1242 1243 ref = (struct btrfs_inode_ref *)ref_ptr; 1244 1245 ret = read_alloc_one_name(eb, ref + 1, btrfs_inode_ref_name_len(eb, ref), 1246 name); 1247 if (ret) 1248 return ret; 1249 1250 if (index) 1251 *index = btrfs_inode_ref_index(eb, ref); 1252 1253 return 0; 1254 } 1255 1256 /* 1257 * Take an inode reference item from the log tree and iterate all names from the 1258 * inode reference item in the subvolume tree with the same key (if it exists). 1259 * For any name that is not in the inode reference item from the log tree, do a 1260 * proper unlink of that name (that is, remove its entry from the inode 1261 * reference item and both dir index keys). 1262 */ 1263 static int unlink_old_inode_refs(struct btrfs_trans_handle *trans, 1264 struct btrfs_root *root, 1265 struct btrfs_path *path, 1266 struct btrfs_inode *inode, 1267 struct extent_buffer *log_eb, 1268 int log_slot, 1269 struct btrfs_key *key) 1270 { 1271 int ret; 1272 unsigned long ref_ptr; 1273 unsigned long ref_end; 1274 struct extent_buffer *eb; 1275 1276 again: 1277 btrfs_release_path(path); 1278 ret = btrfs_search_slot(NULL, root, key, path, 0, 0); 1279 if (ret > 0) { 1280 ret = 0; 1281 goto out; 1282 } 1283 if (ret < 0) 1284 goto out; 1285 1286 eb = path->nodes[0]; 1287 ref_ptr = btrfs_item_ptr_offset(eb, path->slots[0]); 1288 ref_end = ref_ptr + btrfs_item_size(eb, path->slots[0]); 1289 while (ref_ptr < ref_end) { 1290 struct fscrypt_str name; 1291 u64 parent_id; 1292 1293 if (key->type == BTRFS_INODE_EXTREF_KEY) { 1294 ret = extref_get_fields(eb, ref_ptr, &name, 1295 NULL, &parent_id); 1296 } else { 1297 parent_id = key->offset; 1298 ret = ref_get_fields(eb, ref_ptr, &name, NULL); 1299 } 1300 if (ret) 1301 goto out; 1302 1303 if (key->type == BTRFS_INODE_EXTREF_KEY) 1304 ret = !!btrfs_find_name_in_ext_backref(log_eb, log_slot, 1305 parent_id, &name); 1306 else 1307 ret = !!btrfs_find_name_in_backref(log_eb, log_slot, &name); 1308 1309 if (!ret) { 1310 struct inode *dir; 1311 1312 btrfs_release_path(path); 1313 dir = read_one_inode(root, parent_id); 1314 if (!dir) { 1315 ret = -ENOENT; 1316 kfree(name.name); 1317 goto out; 1318 } 1319 ret = unlink_inode_for_log_replay(trans, BTRFS_I(dir), 1320 inode, &name); 1321 kfree(name.name); 1322 iput(dir); 1323 if (ret) 1324 goto out; 1325 goto again; 1326 } 1327 1328 kfree(name.name); 1329 ref_ptr += name.len; 1330 if (key->type == BTRFS_INODE_EXTREF_KEY) 1331 ref_ptr += sizeof(struct btrfs_inode_extref); 1332 else 1333 ref_ptr += sizeof(struct btrfs_inode_ref); 1334 } 1335 ret = 0; 1336 out: 1337 btrfs_release_path(path); 1338 return ret; 1339 } 1340 1341 /* 1342 * replay one inode back reference item found in the log tree. 1343 * eb, slot and key refer to the buffer and key found in the log tree. 1344 * root is the destination we are replaying into, and path is for temp 1345 * use by this function. (it should be released on return). 1346 */ 1347 static noinline int add_inode_ref(struct btrfs_trans_handle *trans, 1348 struct btrfs_root *root, 1349 struct btrfs_root *log, 1350 struct btrfs_path *path, 1351 struct extent_buffer *eb, int slot, 1352 struct btrfs_key *key) 1353 { 1354 struct inode *dir = NULL; 1355 struct inode *inode = NULL; 1356 unsigned long ref_ptr; 1357 unsigned long ref_end; 1358 struct fscrypt_str name; 1359 int ret; 1360 int log_ref_ver = 0; 1361 u64 parent_objectid; 1362 u64 inode_objectid; 1363 u64 ref_index = 0; 1364 int ref_struct_size; 1365 1366 ref_ptr = btrfs_item_ptr_offset(eb, slot); 1367 ref_end = ref_ptr + btrfs_item_size(eb, slot); 1368 1369 if (key->type == BTRFS_INODE_EXTREF_KEY) { 1370 struct btrfs_inode_extref *r; 1371 1372 ref_struct_size = sizeof(struct btrfs_inode_extref); 1373 log_ref_ver = 1; 1374 r = (struct btrfs_inode_extref *)ref_ptr; 1375 parent_objectid = btrfs_inode_extref_parent(eb, r); 1376 } else { 1377 ref_struct_size = sizeof(struct btrfs_inode_ref); 1378 parent_objectid = key->offset; 1379 } 1380 inode_objectid = key->objectid; 1381 1382 /* 1383 * it is possible that we didn't log all the parent directories 1384 * for a given inode. If we don't find the dir, just don't 1385 * copy the back ref in. The link count fixup code will take 1386 * care of the rest 1387 */ 1388 dir = read_one_inode(root, parent_objectid); 1389 if (!dir) { 1390 ret = -ENOENT; 1391 goto out; 1392 } 1393 1394 inode = read_one_inode(root, inode_objectid); 1395 if (!inode) { 1396 ret = -EIO; 1397 goto out; 1398 } 1399 1400 while (ref_ptr < ref_end) { 1401 if (log_ref_ver) { 1402 ret = extref_get_fields(eb, ref_ptr, &name, 1403 &ref_index, &parent_objectid); 1404 /* 1405 * parent object can change from one array 1406 * item to another. 1407 */ 1408 if (!dir) 1409 dir = read_one_inode(root, parent_objectid); 1410 if (!dir) { 1411 ret = -ENOENT; 1412 goto out; 1413 } 1414 } else { 1415 ret = ref_get_fields(eb, ref_ptr, &name, &ref_index); 1416 } 1417 if (ret) 1418 goto out; 1419 1420 ret = inode_in_dir(root, path, btrfs_ino(BTRFS_I(dir)), 1421 btrfs_ino(BTRFS_I(inode)), ref_index, &name); 1422 if (ret < 0) { 1423 goto out; 1424 } else if (ret == 0) { 1425 /* 1426 * look for a conflicting back reference in the 1427 * metadata. if we find one we have to unlink that name 1428 * of the file before we add our new link. Later on, we 1429 * overwrite any existing back reference, and we don't 1430 * want to create dangling pointers in the directory. 1431 */ 1432 ret = __add_inode_ref(trans, root, path, log, 1433 BTRFS_I(dir), BTRFS_I(inode), 1434 inode_objectid, parent_objectid, 1435 ref_index, &name); 1436 if (ret) { 1437 if (ret == 1) 1438 ret = 0; 1439 goto out; 1440 } 1441 1442 /* insert our name */ 1443 ret = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode), 1444 &name, 0, ref_index); 1445 if (ret) 1446 goto out; 1447 1448 ret = btrfs_update_inode(trans, BTRFS_I(inode)); 1449 if (ret) 1450 goto out; 1451 } 1452 /* Else, ret == 1, we already have a perfect match, we're done. */ 1453 1454 ref_ptr = (unsigned long)(ref_ptr + ref_struct_size) + name.len; 1455 kfree(name.name); 1456 name.name = NULL; 1457 if (log_ref_ver) { 1458 iput(dir); 1459 dir = NULL; 1460 } 1461 } 1462 1463 /* 1464 * Before we overwrite the inode reference item in the subvolume tree 1465 * with the item from the log tree, we must unlink all names from the 1466 * parent directory that are in the subvolume's tree inode reference 1467 * item, otherwise we end up with an inconsistent subvolume tree where 1468 * dir index entries exist for a name but there is no inode reference 1469 * item with the same name. 1470 */ 1471 ret = unlink_old_inode_refs(trans, root, path, BTRFS_I(inode), eb, slot, 1472 key); 1473 if (ret) 1474 goto out; 1475 1476 /* finally write the back reference in the inode */ 1477 ret = overwrite_item(trans, root, path, eb, slot, key); 1478 out: 1479 btrfs_release_path(path); 1480 kfree(name.name); 1481 iput(dir); 1482 iput(inode); 1483 return ret; 1484 } 1485 1486 static int count_inode_extrefs(struct btrfs_inode *inode, struct btrfs_path *path) 1487 { 1488 int ret = 0; 1489 int name_len; 1490 unsigned int nlink = 0; 1491 u32 item_size; 1492 u32 cur_offset = 0; 1493 u64 inode_objectid = btrfs_ino(inode); 1494 u64 offset = 0; 1495 unsigned long ptr; 1496 struct btrfs_inode_extref *extref; 1497 struct extent_buffer *leaf; 1498 1499 while (1) { 1500 ret = btrfs_find_one_extref(inode->root, inode_objectid, offset, 1501 path, &extref, &offset); 1502 if (ret) 1503 break; 1504 1505 leaf = path->nodes[0]; 1506 item_size = btrfs_item_size(leaf, path->slots[0]); 1507 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); 1508 cur_offset = 0; 1509 1510 while (cur_offset < item_size) { 1511 extref = (struct btrfs_inode_extref *) (ptr + cur_offset); 1512 name_len = btrfs_inode_extref_name_len(leaf, extref); 1513 1514 nlink++; 1515 1516 cur_offset += name_len + sizeof(*extref); 1517 } 1518 1519 offset++; 1520 btrfs_release_path(path); 1521 } 1522 btrfs_release_path(path); 1523 1524 if (ret < 0 && ret != -ENOENT) 1525 return ret; 1526 return nlink; 1527 } 1528 1529 static int count_inode_refs(struct btrfs_inode *inode, struct btrfs_path *path) 1530 { 1531 int ret; 1532 struct btrfs_key key; 1533 unsigned int nlink = 0; 1534 unsigned long ptr; 1535 unsigned long ptr_end; 1536 int name_len; 1537 u64 ino = btrfs_ino(inode); 1538 1539 key.objectid = ino; 1540 key.type = BTRFS_INODE_REF_KEY; 1541 key.offset = (u64)-1; 1542 1543 while (1) { 1544 ret = btrfs_search_slot(NULL, inode->root, &key, path, 0, 0); 1545 if (ret < 0) 1546 break; 1547 if (ret > 0) { 1548 if (path->slots[0] == 0) 1549 break; 1550 path->slots[0]--; 1551 } 1552 process_slot: 1553 btrfs_item_key_to_cpu(path->nodes[0], &key, 1554 path->slots[0]); 1555 if (key.objectid != ino || 1556 key.type != BTRFS_INODE_REF_KEY) 1557 break; 1558 ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]); 1559 ptr_end = ptr + btrfs_item_size(path->nodes[0], 1560 path->slots[0]); 1561 while (ptr < ptr_end) { 1562 struct btrfs_inode_ref *ref; 1563 1564 ref = (struct btrfs_inode_ref *)ptr; 1565 name_len = btrfs_inode_ref_name_len(path->nodes[0], 1566 ref); 1567 ptr = (unsigned long)(ref + 1) + name_len; 1568 nlink++; 1569 } 1570 1571 if (key.offset == 0) 1572 break; 1573 if (path->slots[0] > 0) { 1574 path->slots[0]--; 1575 goto process_slot; 1576 } 1577 key.offset--; 1578 btrfs_release_path(path); 1579 } 1580 btrfs_release_path(path); 1581 1582 return nlink; 1583 } 1584 1585 /* 1586 * There are a few corners where the link count of the file can't 1587 * be properly maintained during replay. So, instead of adding 1588 * lots of complexity to the log code, we just scan the backrefs 1589 * for any file that has been through replay. 1590 * 1591 * The scan will update the link count on the inode to reflect the 1592 * number of back refs found. If it goes down to zero, the iput 1593 * will free the inode. 1594 */ 1595 static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans, 1596 struct inode *inode) 1597 { 1598 struct btrfs_root *root = BTRFS_I(inode)->root; 1599 struct btrfs_path *path; 1600 int ret; 1601 u64 nlink = 0; 1602 u64 ino = btrfs_ino(BTRFS_I(inode)); 1603 1604 path = btrfs_alloc_path(); 1605 if (!path) 1606 return -ENOMEM; 1607 1608 ret = count_inode_refs(BTRFS_I(inode), path); 1609 if (ret < 0) 1610 goto out; 1611 1612 nlink = ret; 1613 1614 ret = count_inode_extrefs(BTRFS_I(inode), path); 1615 if (ret < 0) 1616 goto out; 1617 1618 nlink += ret; 1619 1620 ret = 0; 1621 1622 if (nlink != inode->i_nlink) { 1623 set_nlink(inode, nlink); 1624 ret = btrfs_update_inode(trans, BTRFS_I(inode)); 1625 if (ret) 1626 goto out; 1627 } 1628 BTRFS_I(inode)->index_cnt = (u64)-1; 1629 1630 if (inode->i_nlink == 0) { 1631 if (S_ISDIR(inode->i_mode)) { 1632 ret = replay_dir_deletes(trans, root, NULL, path, 1633 ino, 1); 1634 if (ret) 1635 goto out; 1636 } 1637 ret = btrfs_insert_orphan_item(trans, root, ino); 1638 if (ret == -EEXIST) 1639 ret = 0; 1640 } 1641 1642 out: 1643 btrfs_free_path(path); 1644 return ret; 1645 } 1646 1647 static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans, 1648 struct btrfs_root *root, 1649 struct btrfs_path *path) 1650 { 1651 int ret; 1652 struct btrfs_key key; 1653 struct inode *inode; 1654 1655 key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID; 1656 key.type = BTRFS_ORPHAN_ITEM_KEY; 1657 key.offset = (u64)-1; 1658 while (1) { 1659 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 1660 if (ret < 0) 1661 break; 1662 1663 if (ret == 1) { 1664 ret = 0; 1665 if (path->slots[0] == 0) 1666 break; 1667 path->slots[0]--; 1668 } 1669 1670 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 1671 if (key.objectid != BTRFS_TREE_LOG_FIXUP_OBJECTID || 1672 key.type != BTRFS_ORPHAN_ITEM_KEY) 1673 break; 1674 1675 ret = btrfs_del_item(trans, root, path); 1676 if (ret) 1677 break; 1678 1679 btrfs_release_path(path); 1680 inode = read_one_inode(root, key.offset); 1681 if (!inode) { 1682 ret = -EIO; 1683 break; 1684 } 1685 1686 ret = fixup_inode_link_count(trans, inode); 1687 iput(inode); 1688 if (ret) 1689 break; 1690 1691 /* 1692 * fixup on a directory may create new entries, 1693 * make sure we always look for the highset possible 1694 * offset 1695 */ 1696 key.offset = (u64)-1; 1697 } 1698 btrfs_release_path(path); 1699 return ret; 1700 } 1701 1702 1703 /* 1704 * record a given inode in the fixup dir so we can check its link 1705 * count when replay is done. The link count is incremented here 1706 * so the inode won't go away until we check it 1707 */ 1708 static noinline int link_to_fixup_dir(struct btrfs_trans_handle *trans, 1709 struct btrfs_root *root, 1710 struct btrfs_path *path, 1711 u64 objectid) 1712 { 1713 struct btrfs_key key; 1714 int ret = 0; 1715 struct inode *inode; 1716 1717 inode = read_one_inode(root, objectid); 1718 if (!inode) 1719 return -EIO; 1720 1721 key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID; 1722 key.type = BTRFS_ORPHAN_ITEM_KEY; 1723 key.offset = objectid; 1724 1725 ret = btrfs_insert_empty_item(trans, root, path, &key, 0); 1726 1727 btrfs_release_path(path); 1728 if (ret == 0) { 1729 if (!inode->i_nlink) 1730 set_nlink(inode, 1); 1731 else 1732 inc_nlink(inode); 1733 ret = btrfs_update_inode(trans, BTRFS_I(inode)); 1734 } else if (ret == -EEXIST) { 1735 ret = 0; 1736 } 1737 iput(inode); 1738 1739 return ret; 1740 } 1741 1742 /* 1743 * when replaying the log for a directory, we only insert names 1744 * for inodes that actually exist. This means an fsync on a directory 1745 * does not implicitly fsync all the new files in it 1746 */ 1747 static noinline int insert_one_name(struct btrfs_trans_handle *trans, 1748 struct btrfs_root *root, 1749 u64 dirid, u64 index, 1750 const struct fscrypt_str *name, 1751 struct btrfs_key *location) 1752 { 1753 struct inode *inode; 1754 struct inode *dir; 1755 int ret; 1756 1757 inode = read_one_inode(root, location->objectid); 1758 if (!inode) 1759 return -ENOENT; 1760 1761 dir = read_one_inode(root, dirid); 1762 if (!dir) { 1763 iput(inode); 1764 return -EIO; 1765 } 1766 1767 ret = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode), name, 1768 1, index); 1769 1770 /* FIXME, put inode into FIXUP list */ 1771 1772 iput(inode); 1773 iput(dir); 1774 return ret; 1775 } 1776 1777 static int delete_conflicting_dir_entry(struct btrfs_trans_handle *trans, 1778 struct btrfs_inode *dir, 1779 struct btrfs_path *path, 1780 struct btrfs_dir_item *dst_di, 1781 const struct btrfs_key *log_key, 1782 u8 log_flags, 1783 bool exists) 1784 { 1785 struct btrfs_key found_key; 1786 1787 btrfs_dir_item_key_to_cpu(path->nodes[0], dst_di, &found_key); 1788 /* The existing dentry points to the same inode, don't delete it. */ 1789 if (found_key.objectid == log_key->objectid && 1790 found_key.type == log_key->type && 1791 found_key.offset == log_key->offset && 1792 btrfs_dir_flags(path->nodes[0], dst_di) == log_flags) 1793 return 1; 1794 1795 /* 1796 * Don't drop the conflicting directory entry if the inode for the new 1797 * entry doesn't exist. 1798 */ 1799 if (!exists) 1800 return 0; 1801 1802 return drop_one_dir_item(trans, path, dir, dst_di); 1803 } 1804 1805 /* 1806 * take a single entry in a log directory item and replay it into 1807 * the subvolume. 1808 * 1809 * if a conflicting item exists in the subdirectory already, 1810 * the inode it points to is unlinked and put into the link count 1811 * fix up tree. 1812 * 1813 * If a name from the log points to a file or directory that does 1814 * not exist in the FS, it is skipped. fsyncs on directories 1815 * do not force down inodes inside that directory, just changes to the 1816 * names or unlinks in a directory. 1817 * 1818 * Returns < 0 on error, 0 if the name wasn't replayed (dentry points to a 1819 * non-existing inode) and 1 if the name was replayed. 1820 */ 1821 static noinline int replay_one_name(struct btrfs_trans_handle *trans, 1822 struct btrfs_root *root, 1823 struct btrfs_path *path, 1824 struct extent_buffer *eb, 1825 struct btrfs_dir_item *di, 1826 struct btrfs_key *key) 1827 { 1828 struct fscrypt_str name; 1829 struct btrfs_dir_item *dir_dst_di; 1830 struct btrfs_dir_item *index_dst_di; 1831 bool dir_dst_matches = false; 1832 bool index_dst_matches = false; 1833 struct btrfs_key log_key; 1834 struct btrfs_key search_key; 1835 struct inode *dir; 1836 u8 log_flags; 1837 bool exists; 1838 int ret; 1839 bool update_size = true; 1840 bool name_added = false; 1841 1842 dir = read_one_inode(root, key->objectid); 1843 if (!dir) 1844 return -EIO; 1845 1846 ret = read_alloc_one_name(eb, di + 1, btrfs_dir_name_len(eb, di), &name); 1847 if (ret) 1848 goto out; 1849 1850 log_flags = btrfs_dir_flags(eb, di); 1851 btrfs_dir_item_key_to_cpu(eb, di, &log_key); 1852 ret = btrfs_lookup_inode(trans, root, path, &log_key, 0); 1853 btrfs_release_path(path); 1854 if (ret < 0) 1855 goto out; 1856 exists = (ret == 0); 1857 ret = 0; 1858 1859 dir_dst_di = btrfs_lookup_dir_item(trans, root, path, key->objectid, 1860 &name, 1); 1861 if (IS_ERR(dir_dst_di)) { 1862 ret = PTR_ERR(dir_dst_di); 1863 goto out; 1864 } else if (dir_dst_di) { 1865 ret = delete_conflicting_dir_entry(trans, BTRFS_I(dir), path, 1866 dir_dst_di, &log_key, 1867 log_flags, exists); 1868 if (ret < 0) 1869 goto out; 1870 dir_dst_matches = (ret == 1); 1871 } 1872 1873 btrfs_release_path(path); 1874 1875 index_dst_di = btrfs_lookup_dir_index_item(trans, root, path, 1876 key->objectid, key->offset, 1877 &name, 1); 1878 if (IS_ERR(index_dst_di)) { 1879 ret = PTR_ERR(index_dst_di); 1880 goto out; 1881 } else if (index_dst_di) { 1882 ret = delete_conflicting_dir_entry(trans, BTRFS_I(dir), path, 1883 index_dst_di, &log_key, 1884 log_flags, exists); 1885 if (ret < 0) 1886 goto out; 1887 index_dst_matches = (ret == 1); 1888 } 1889 1890 btrfs_release_path(path); 1891 1892 if (dir_dst_matches && index_dst_matches) { 1893 ret = 0; 1894 update_size = false; 1895 goto out; 1896 } 1897 1898 /* 1899 * Check if the inode reference exists in the log for the given name, 1900 * inode and parent inode 1901 */ 1902 search_key.objectid = log_key.objectid; 1903 search_key.type = BTRFS_INODE_REF_KEY; 1904 search_key.offset = key->objectid; 1905 ret = backref_in_log(root->log_root, &search_key, 0, &name); 1906 if (ret < 0) { 1907 goto out; 1908 } else if (ret) { 1909 /* The dentry will be added later. */ 1910 ret = 0; 1911 update_size = false; 1912 goto out; 1913 } 1914 1915 search_key.objectid = log_key.objectid; 1916 search_key.type = BTRFS_INODE_EXTREF_KEY; 1917 search_key.offset = key->objectid; 1918 ret = backref_in_log(root->log_root, &search_key, key->objectid, &name); 1919 if (ret < 0) { 1920 goto out; 1921 } else if (ret) { 1922 /* The dentry will be added later. */ 1923 ret = 0; 1924 update_size = false; 1925 goto out; 1926 } 1927 btrfs_release_path(path); 1928 ret = insert_one_name(trans, root, key->objectid, key->offset, 1929 &name, &log_key); 1930 if (ret && ret != -ENOENT && ret != -EEXIST) 1931 goto out; 1932 if (!ret) 1933 name_added = true; 1934 update_size = false; 1935 ret = 0; 1936 1937 out: 1938 if (!ret && update_size) { 1939 btrfs_i_size_write(BTRFS_I(dir), dir->i_size + name.len * 2); 1940 ret = btrfs_update_inode(trans, BTRFS_I(dir)); 1941 } 1942 kfree(name.name); 1943 iput(dir); 1944 if (!ret && name_added) 1945 ret = 1; 1946 return ret; 1947 } 1948 1949 /* Replay one dir item from a BTRFS_DIR_INDEX_KEY key. */ 1950 static noinline int replay_one_dir_item(struct btrfs_trans_handle *trans, 1951 struct btrfs_root *root, 1952 struct btrfs_path *path, 1953 struct extent_buffer *eb, int slot, 1954 struct btrfs_key *key) 1955 { 1956 int ret; 1957 struct btrfs_dir_item *di; 1958 1959 /* We only log dir index keys, which only contain a single dir item. */ 1960 ASSERT(key->type == BTRFS_DIR_INDEX_KEY); 1961 1962 di = btrfs_item_ptr(eb, slot, struct btrfs_dir_item); 1963 ret = replay_one_name(trans, root, path, eb, di, key); 1964 if (ret < 0) 1965 return ret; 1966 1967 /* 1968 * If this entry refers to a non-directory (directories can not have a 1969 * link count > 1) and it was added in the transaction that was not 1970 * committed, make sure we fixup the link count of the inode the entry 1971 * points to. Otherwise something like the following would result in a 1972 * directory pointing to an inode with a wrong link that does not account 1973 * for this dir entry: 1974 * 1975 * mkdir testdir 1976 * touch testdir/foo 1977 * touch testdir/bar 1978 * sync 1979 * 1980 * ln testdir/bar testdir/bar_link 1981 * ln testdir/foo testdir/foo_link 1982 * xfs_io -c "fsync" testdir/bar 1983 * 1984 * <power failure> 1985 * 1986 * mount fs, log replay happens 1987 * 1988 * File foo would remain with a link count of 1 when it has two entries 1989 * pointing to it in the directory testdir. This would make it impossible 1990 * to ever delete the parent directory has it would result in stale 1991 * dentries that can never be deleted. 1992 */ 1993 if (ret == 1 && btrfs_dir_ftype(eb, di) != BTRFS_FT_DIR) { 1994 struct btrfs_path *fixup_path; 1995 struct btrfs_key di_key; 1996 1997 fixup_path = btrfs_alloc_path(); 1998 if (!fixup_path) 1999 return -ENOMEM; 2000 2001 btrfs_dir_item_key_to_cpu(eb, di, &di_key); 2002 ret = link_to_fixup_dir(trans, root, fixup_path, di_key.objectid); 2003 btrfs_free_path(fixup_path); 2004 } 2005 2006 return ret; 2007 } 2008 2009 /* 2010 * directory replay has two parts. There are the standard directory 2011 * items in the log copied from the subvolume, and range items 2012 * created in the log while the subvolume was logged. 2013 * 2014 * The range items tell us which parts of the key space the log 2015 * is authoritative for. During replay, if a key in the subvolume 2016 * directory is in a logged range item, but not actually in the log 2017 * that means it was deleted from the directory before the fsync 2018 * and should be removed. 2019 */ 2020 static noinline int find_dir_range(struct btrfs_root *root, 2021 struct btrfs_path *path, 2022 u64 dirid, 2023 u64 *start_ret, u64 *end_ret) 2024 { 2025 struct btrfs_key key; 2026 u64 found_end; 2027 struct btrfs_dir_log_item *item; 2028 int ret; 2029 int nritems; 2030 2031 if (*start_ret == (u64)-1) 2032 return 1; 2033 2034 key.objectid = dirid; 2035 key.type = BTRFS_DIR_LOG_INDEX_KEY; 2036 key.offset = *start_ret; 2037 2038 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 2039 if (ret < 0) 2040 goto out; 2041 if (ret > 0) { 2042 if (path->slots[0] == 0) 2043 goto out; 2044 path->slots[0]--; 2045 } 2046 if (ret != 0) 2047 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 2048 2049 if (key.type != BTRFS_DIR_LOG_INDEX_KEY || key.objectid != dirid) { 2050 ret = 1; 2051 goto next; 2052 } 2053 item = btrfs_item_ptr(path->nodes[0], path->slots[0], 2054 struct btrfs_dir_log_item); 2055 found_end = btrfs_dir_log_end(path->nodes[0], item); 2056 2057 if (*start_ret >= key.offset && *start_ret <= found_end) { 2058 ret = 0; 2059 *start_ret = key.offset; 2060 *end_ret = found_end; 2061 goto out; 2062 } 2063 ret = 1; 2064 next: 2065 /* check the next slot in the tree to see if it is a valid item */ 2066 nritems = btrfs_header_nritems(path->nodes[0]); 2067 path->slots[0]++; 2068 if (path->slots[0] >= nritems) { 2069 ret = btrfs_next_leaf(root, path); 2070 if (ret) 2071 goto out; 2072 } 2073 2074 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 2075 2076 if (key.type != BTRFS_DIR_LOG_INDEX_KEY || key.objectid != dirid) { 2077 ret = 1; 2078 goto out; 2079 } 2080 item = btrfs_item_ptr(path->nodes[0], path->slots[0], 2081 struct btrfs_dir_log_item); 2082 found_end = btrfs_dir_log_end(path->nodes[0], item); 2083 *start_ret = key.offset; 2084 *end_ret = found_end; 2085 ret = 0; 2086 out: 2087 btrfs_release_path(path); 2088 return ret; 2089 } 2090 2091 /* 2092 * this looks for a given directory item in the log. If the directory 2093 * item is not in the log, the item is removed and the inode it points 2094 * to is unlinked 2095 */ 2096 static noinline int check_item_in_log(struct btrfs_trans_handle *trans, 2097 struct btrfs_root *log, 2098 struct btrfs_path *path, 2099 struct btrfs_path *log_path, 2100 struct inode *dir, 2101 struct btrfs_key *dir_key) 2102 { 2103 struct btrfs_root *root = BTRFS_I(dir)->root; 2104 int ret; 2105 struct extent_buffer *eb; 2106 int slot; 2107 struct btrfs_dir_item *di; 2108 struct fscrypt_str name; 2109 struct inode *inode = NULL; 2110 struct btrfs_key location; 2111 2112 /* 2113 * Currently we only log dir index keys. Even if we replay a log created 2114 * by an older kernel that logged both dir index and dir item keys, all 2115 * we need to do is process the dir index keys, we (and our caller) can 2116 * safely ignore dir item keys (key type BTRFS_DIR_ITEM_KEY). 2117 */ 2118 ASSERT(dir_key->type == BTRFS_DIR_INDEX_KEY); 2119 2120 eb = path->nodes[0]; 2121 slot = path->slots[0]; 2122 di = btrfs_item_ptr(eb, slot, struct btrfs_dir_item); 2123 ret = read_alloc_one_name(eb, di + 1, btrfs_dir_name_len(eb, di), &name); 2124 if (ret) 2125 goto out; 2126 2127 if (log) { 2128 struct btrfs_dir_item *log_di; 2129 2130 log_di = btrfs_lookup_dir_index_item(trans, log, log_path, 2131 dir_key->objectid, 2132 dir_key->offset, &name, 0); 2133 if (IS_ERR(log_di)) { 2134 ret = PTR_ERR(log_di); 2135 goto out; 2136 } else if (log_di) { 2137 /* The dentry exists in the log, we have nothing to do. */ 2138 ret = 0; 2139 goto out; 2140 } 2141 } 2142 2143 btrfs_dir_item_key_to_cpu(eb, di, &location); 2144 btrfs_release_path(path); 2145 btrfs_release_path(log_path); 2146 inode = read_one_inode(root, location.objectid); 2147 if (!inode) { 2148 ret = -EIO; 2149 goto out; 2150 } 2151 2152 ret = link_to_fixup_dir(trans, root, path, location.objectid); 2153 if (ret) 2154 goto out; 2155 2156 inc_nlink(inode); 2157 ret = unlink_inode_for_log_replay(trans, BTRFS_I(dir), BTRFS_I(inode), 2158 &name); 2159 /* 2160 * Unlike dir item keys, dir index keys can only have one name (entry) in 2161 * them, as there are no key collisions since each key has a unique offset 2162 * (an index number), so we're done. 2163 */ 2164 out: 2165 btrfs_release_path(path); 2166 btrfs_release_path(log_path); 2167 kfree(name.name); 2168 iput(inode); 2169 return ret; 2170 } 2171 2172 static int replay_xattr_deletes(struct btrfs_trans_handle *trans, 2173 struct btrfs_root *root, 2174 struct btrfs_root *log, 2175 struct btrfs_path *path, 2176 const u64 ino) 2177 { 2178 struct btrfs_key search_key; 2179 struct btrfs_path *log_path; 2180 int i; 2181 int nritems; 2182 int ret; 2183 2184 log_path = btrfs_alloc_path(); 2185 if (!log_path) 2186 return -ENOMEM; 2187 2188 search_key.objectid = ino; 2189 search_key.type = BTRFS_XATTR_ITEM_KEY; 2190 search_key.offset = 0; 2191 again: 2192 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0); 2193 if (ret < 0) 2194 goto out; 2195 process_leaf: 2196 nritems = btrfs_header_nritems(path->nodes[0]); 2197 for (i = path->slots[0]; i < nritems; i++) { 2198 struct btrfs_key key; 2199 struct btrfs_dir_item *di; 2200 struct btrfs_dir_item *log_di; 2201 u32 total_size; 2202 u32 cur; 2203 2204 btrfs_item_key_to_cpu(path->nodes[0], &key, i); 2205 if (key.objectid != ino || key.type != BTRFS_XATTR_ITEM_KEY) { 2206 ret = 0; 2207 goto out; 2208 } 2209 2210 di = btrfs_item_ptr(path->nodes[0], i, struct btrfs_dir_item); 2211 total_size = btrfs_item_size(path->nodes[0], i); 2212 cur = 0; 2213 while (cur < total_size) { 2214 u16 name_len = btrfs_dir_name_len(path->nodes[0], di); 2215 u16 data_len = btrfs_dir_data_len(path->nodes[0], di); 2216 u32 this_len = sizeof(*di) + name_len + data_len; 2217 char *name; 2218 2219 name = kmalloc(name_len, GFP_NOFS); 2220 if (!name) { 2221 ret = -ENOMEM; 2222 goto out; 2223 } 2224 read_extent_buffer(path->nodes[0], name, 2225 (unsigned long)(di + 1), name_len); 2226 2227 log_di = btrfs_lookup_xattr(NULL, log, log_path, ino, 2228 name, name_len, 0); 2229 btrfs_release_path(log_path); 2230 if (!log_di) { 2231 /* Doesn't exist in log tree, so delete it. */ 2232 btrfs_release_path(path); 2233 di = btrfs_lookup_xattr(trans, root, path, ino, 2234 name, name_len, -1); 2235 kfree(name); 2236 if (IS_ERR(di)) { 2237 ret = PTR_ERR(di); 2238 goto out; 2239 } 2240 ASSERT(di); 2241 ret = btrfs_delete_one_dir_name(trans, root, 2242 path, di); 2243 if (ret) 2244 goto out; 2245 btrfs_release_path(path); 2246 search_key = key; 2247 goto again; 2248 } 2249 kfree(name); 2250 if (IS_ERR(log_di)) { 2251 ret = PTR_ERR(log_di); 2252 goto out; 2253 } 2254 cur += this_len; 2255 di = (struct btrfs_dir_item *)((char *)di + this_len); 2256 } 2257 } 2258 ret = btrfs_next_leaf(root, path); 2259 if (ret > 0) 2260 ret = 0; 2261 else if (ret == 0) 2262 goto process_leaf; 2263 out: 2264 btrfs_free_path(log_path); 2265 btrfs_release_path(path); 2266 return ret; 2267 } 2268 2269 2270 /* 2271 * deletion replay happens before we copy any new directory items 2272 * out of the log or out of backreferences from inodes. It 2273 * scans the log to find ranges of keys that log is authoritative for, 2274 * and then scans the directory to find items in those ranges that are 2275 * not present in the log. 2276 * 2277 * Anything we don't find in the log is unlinked and removed from the 2278 * directory. 2279 */ 2280 static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans, 2281 struct btrfs_root *root, 2282 struct btrfs_root *log, 2283 struct btrfs_path *path, 2284 u64 dirid, int del_all) 2285 { 2286 u64 range_start; 2287 u64 range_end; 2288 int ret = 0; 2289 struct btrfs_key dir_key; 2290 struct btrfs_key found_key; 2291 struct btrfs_path *log_path; 2292 struct inode *dir; 2293 2294 dir_key.objectid = dirid; 2295 dir_key.type = BTRFS_DIR_INDEX_KEY; 2296 log_path = btrfs_alloc_path(); 2297 if (!log_path) 2298 return -ENOMEM; 2299 2300 dir = read_one_inode(root, dirid); 2301 /* it isn't an error if the inode isn't there, that can happen 2302 * because we replay the deletes before we copy in the inode item 2303 * from the log 2304 */ 2305 if (!dir) { 2306 btrfs_free_path(log_path); 2307 return 0; 2308 } 2309 2310 range_start = 0; 2311 range_end = 0; 2312 while (1) { 2313 if (del_all) 2314 range_end = (u64)-1; 2315 else { 2316 ret = find_dir_range(log, path, dirid, 2317 &range_start, &range_end); 2318 if (ret < 0) 2319 goto out; 2320 else if (ret > 0) 2321 break; 2322 } 2323 2324 dir_key.offset = range_start; 2325 while (1) { 2326 int nritems; 2327 ret = btrfs_search_slot(NULL, root, &dir_key, path, 2328 0, 0); 2329 if (ret < 0) 2330 goto out; 2331 2332 nritems = btrfs_header_nritems(path->nodes[0]); 2333 if (path->slots[0] >= nritems) { 2334 ret = btrfs_next_leaf(root, path); 2335 if (ret == 1) 2336 break; 2337 else if (ret < 0) 2338 goto out; 2339 } 2340 btrfs_item_key_to_cpu(path->nodes[0], &found_key, 2341 path->slots[0]); 2342 if (found_key.objectid != dirid || 2343 found_key.type != dir_key.type) { 2344 ret = 0; 2345 goto out; 2346 } 2347 2348 if (found_key.offset > range_end) 2349 break; 2350 2351 ret = check_item_in_log(trans, log, path, 2352 log_path, dir, 2353 &found_key); 2354 if (ret) 2355 goto out; 2356 if (found_key.offset == (u64)-1) 2357 break; 2358 dir_key.offset = found_key.offset + 1; 2359 } 2360 btrfs_release_path(path); 2361 if (range_end == (u64)-1) 2362 break; 2363 range_start = range_end + 1; 2364 } 2365 ret = 0; 2366 out: 2367 btrfs_release_path(path); 2368 btrfs_free_path(log_path); 2369 iput(dir); 2370 return ret; 2371 } 2372 2373 /* 2374 * the process_func used to replay items from the log tree. This 2375 * gets called in two different stages. The first stage just looks 2376 * for inodes and makes sure they are all copied into the subvolume. 2377 * 2378 * The second stage copies all the other item types from the log into 2379 * the subvolume. The two stage approach is slower, but gets rid of 2380 * lots of complexity around inodes referencing other inodes that exist 2381 * only in the log (references come from either directory items or inode 2382 * back refs). 2383 */ 2384 static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb, 2385 struct walk_control *wc, u64 gen, int level) 2386 { 2387 int nritems; 2388 struct btrfs_tree_parent_check check = { 2389 .transid = gen, 2390 .level = level 2391 }; 2392 struct btrfs_path *path; 2393 struct btrfs_root *root = wc->replay_dest; 2394 struct btrfs_key key; 2395 int i; 2396 int ret; 2397 2398 ret = btrfs_read_extent_buffer(eb, &check); 2399 if (ret) 2400 return ret; 2401 2402 level = btrfs_header_level(eb); 2403 2404 if (level != 0) 2405 return 0; 2406 2407 path = btrfs_alloc_path(); 2408 if (!path) 2409 return -ENOMEM; 2410 2411 nritems = btrfs_header_nritems(eb); 2412 for (i = 0; i < nritems; i++) { 2413 btrfs_item_key_to_cpu(eb, &key, i); 2414 2415 /* inode keys are done during the first stage */ 2416 if (key.type == BTRFS_INODE_ITEM_KEY && 2417 wc->stage == LOG_WALK_REPLAY_INODES) { 2418 struct btrfs_inode_item *inode_item; 2419 u32 mode; 2420 2421 inode_item = btrfs_item_ptr(eb, i, 2422 struct btrfs_inode_item); 2423 /* 2424 * If we have a tmpfile (O_TMPFILE) that got fsync'ed 2425 * and never got linked before the fsync, skip it, as 2426 * replaying it is pointless since it would be deleted 2427 * later. We skip logging tmpfiles, but it's always 2428 * possible we are replaying a log created with a kernel 2429 * that used to log tmpfiles. 2430 */ 2431 if (btrfs_inode_nlink(eb, inode_item) == 0) { 2432 wc->ignore_cur_inode = true; 2433 continue; 2434 } else { 2435 wc->ignore_cur_inode = false; 2436 } 2437 ret = replay_xattr_deletes(wc->trans, root, log, 2438 path, key.objectid); 2439 if (ret) 2440 break; 2441 mode = btrfs_inode_mode(eb, inode_item); 2442 if (S_ISDIR(mode)) { 2443 ret = replay_dir_deletes(wc->trans, 2444 root, log, path, key.objectid, 0); 2445 if (ret) 2446 break; 2447 } 2448 ret = overwrite_item(wc->trans, root, path, 2449 eb, i, &key); 2450 if (ret) 2451 break; 2452 2453 /* 2454 * Before replaying extents, truncate the inode to its 2455 * size. We need to do it now and not after log replay 2456 * because before an fsync we can have prealloc extents 2457 * added beyond the inode's i_size. If we did it after, 2458 * through orphan cleanup for example, we would drop 2459 * those prealloc extents just after replaying them. 2460 */ 2461 if (S_ISREG(mode)) { 2462 struct btrfs_drop_extents_args drop_args = { 0 }; 2463 struct inode *inode; 2464 u64 from; 2465 2466 inode = read_one_inode(root, key.objectid); 2467 if (!inode) { 2468 ret = -EIO; 2469 break; 2470 } 2471 from = ALIGN(i_size_read(inode), 2472 root->fs_info->sectorsize); 2473 drop_args.start = from; 2474 drop_args.end = (u64)-1; 2475 drop_args.drop_cache = true; 2476 ret = btrfs_drop_extents(wc->trans, root, 2477 BTRFS_I(inode), 2478 &drop_args); 2479 if (!ret) { 2480 inode_sub_bytes(inode, 2481 drop_args.bytes_found); 2482 /* Update the inode's nbytes. */ 2483 ret = btrfs_update_inode(wc->trans, 2484 BTRFS_I(inode)); 2485 } 2486 iput(inode); 2487 if (ret) 2488 break; 2489 } 2490 2491 ret = link_to_fixup_dir(wc->trans, root, 2492 path, key.objectid); 2493 if (ret) 2494 break; 2495 } 2496 2497 if (wc->ignore_cur_inode) 2498 continue; 2499 2500 if (key.type == BTRFS_DIR_INDEX_KEY && 2501 wc->stage == LOG_WALK_REPLAY_DIR_INDEX) { 2502 ret = replay_one_dir_item(wc->trans, root, path, 2503 eb, i, &key); 2504 if (ret) 2505 break; 2506 } 2507 2508 if (wc->stage < LOG_WALK_REPLAY_ALL) 2509 continue; 2510 2511 /* these keys are simply copied */ 2512 if (key.type == BTRFS_XATTR_ITEM_KEY) { 2513 ret = overwrite_item(wc->trans, root, path, 2514 eb, i, &key); 2515 if (ret) 2516 break; 2517 } else if (key.type == BTRFS_INODE_REF_KEY || 2518 key.type == BTRFS_INODE_EXTREF_KEY) { 2519 ret = add_inode_ref(wc->trans, root, log, path, 2520 eb, i, &key); 2521 if (ret && ret != -ENOENT) 2522 break; 2523 ret = 0; 2524 } else if (key.type == BTRFS_EXTENT_DATA_KEY) { 2525 ret = replay_one_extent(wc->trans, root, path, 2526 eb, i, &key); 2527 if (ret) 2528 break; 2529 } 2530 /* 2531 * We don't log BTRFS_DIR_ITEM_KEY keys anymore, only the 2532 * BTRFS_DIR_INDEX_KEY items which we use to derive the 2533 * BTRFS_DIR_ITEM_KEY items. If we are replaying a log from an 2534 * older kernel with such keys, ignore them. 2535 */ 2536 } 2537 btrfs_free_path(path); 2538 return ret; 2539 } 2540 2541 /* 2542 * Correctly adjust the reserved bytes occupied by a log tree extent buffer 2543 */ 2544 static void unaccount_log_buffer(struct btrfs_fs_info *fs_info, u64 start) 2545 { 2546 struct btrfs_block_group *cache; 2547 2548 cache = btrfs_lookup_block_group(fs_info, start); 2549 if (!cache) { 2550 btrfs_err(fs_info, "unable to find block group for %llu", start); 2551 return; 2552 } 2553 2554 spin_lock(&cache->space_info->lock); 2555 spin_lock(&cache->lock); 2556 cache->reserved -= fs_info->nodesize; 2557 cache->space_info->bytes_reserved -= fs_info->nodesize; 2558 spin_unlock(&cache->lock); 2559 spin_unlock(&cache->space_info->lock); 2560 2561 btrfs_put_block_group(cache); 2562 } 2563 2564 static int clean_log_buffer(struct btrfs_trans_handle *trans, 2565 struct extent_buffer *eb) 2566 { 2567 int ret; 2568 2569 btrfs_tree_lock(eb); 2570 btrfs_clear_buffer_dirty(trans, eb); 2571 wait_on_extent_buffer_writeback(eb); 2572 btrfs_tree_unlock(eb); 2573 2574 if (trans) { 2575 ret = btrfs_pin_reserved_extent(trans, eb); 2576 if (ret) 2577 return ret; 2578 } else { 2579 unaccount_log_buffer(eb->fs_info, eb->start); 2580 } 2581 2582 return 0; 2583 } 2584 2585 static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans, 2586 struct btrfs_root *root, 2587 struct btrfs_path *path, int *level, 2588 struct walk_control *wc) 2589 { 2590 struct btrfs_fs_info *fs_info = root->fs_info; 2591 u64 bytenr; 2592 u64 ptr_gen; 2593 struct extent_buffer *next; 2594 struct extent_buffer *cur; 2595 int ret = 0; 2596 2597 while (*level > 0) { 2598 struct btrfs_tree_parent_check check = { 0 }; 2599 2600 cur = path->nodes[*level]; 2601 2602 WARN_ON(btrfs_header_level(cur) != *level); 2603 2604 if (path->slots[*level] >= 2605 btrfs_header_nritems(cur)) 2606 break; 2607 2608 bytenr = btrfs_node_blockptr(cur, path->slots[*level]); 2609 ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]); 2610 check.transid = ptr_gen; 2611 check.level = *level - 1; 2612 check.has_first_key = true; 2613 btrfs_node_key_to_cpu(cur, &check.first_key, path->slots[*level]); 2614 2615 next = btrfs_find_create_tree_block(fs_info, bytenr, 2616 btrfs_header_owner(cur), 2617 *level - 1); 2618 if (IS_ERR(next)) 2619 return PTR_ERR(next); 2620 2621 if (*level == 1) { 2622 ret = wc->process_func(root, next, wc, ptr_gen, 2623 *level - 1); 2624 if (ret) { 2625 free_extent_buffer(next); 2626 return ret; 2627 } 2628 2629 path->slots[*level]++; 2630 if (wc->free) { 2631 ret = btrfs_read_extent_buffer(next, &check); 2632 if (ret) { 2633 free_extent_buffer(next); 2634 return ret; 2635 } 2636 2637 ret = clean_log_buffer(trans, next); 2638 if (ret) { 2639 free_extent_buffer(next); 2640 return ret; 2641 } 2642 } 2643 free_extent_buffer(next); 2644 continue; 2645 } 2646 ret = btrfs_read_extent_buffer(next, &check); 2647 if (ret) { 2648 free_extent_buffer(next); 2649 return ret; 2650 } 2651 2652 if (path->nodes[*level-1]) 2653 free_extent_buffer(path->nodes[*level-1]); 2654 path->nodes[*level-1] = next; 2655 *level = btrfs_header_level(next); 2656 path->slots[*level] = 0; 2657 cond_resched(); 2658 } 2659 path->slots[*level] = btrfs_header_nritems(path->nodes[*level]); 2660 2661 cond_resched(); 2662 return 0; 2663 } 2664 2665 static noinline int walk_up_log_tree(struct btrfs_trans_handle *trans, 2666 struct btrfs_root *root, 2667 struct btrfs_path *path, int *level, 2668 struct walk_control *wc) 2669 { 2670 int i; 2671 int slot; 2672 int ret; 2673 2674 for (i = *level; i < BTRFS_MAX_LEVEL - 1 && path->nodes[i]; i++) { 2675 slot = path->slots[i]; 2676 if (slot + 1 < btrfs_header_nritems(path->nodes[i])) { 2677 path->slots[i]++; 2678 *level = i; 2679 WARN_ON(*level == 0); 2680 return 0; 2681 } else { 2682 ret = wc->process_func(root, path->nodes[*level], wc, 2683 btrfs_header_generation(path->nodes[*level]), 2684 *level); 2685 if (ret) 2686 return ret; 2687 2688 if (wc->free) { 2689 ret = clean_log_buffer(trans, path->nodes[*level]); 2690 if (ret) 2691 return ret; 2692 } 2693 free_extent_buffer(path->nodes[*level]); 2694 path->nodes[*level] = NULL; 2695 *level = i + 1; 2696 } 2697 } 2698 return 1; 2699 } 2700 2701 /* 2702 * drop the reference count on the tree rooted at 'snap'. This traverses 2703 * the tree freeing any blocks that have a ref count of zero after being 2704 * decremented. 2705 */ 2706 static int walk_log_tree(struct btrfs_trans_handle *trans, 2707 struct btrfs_root *log, struct walk_control *wc) 2708 { 2709 int ret = 0; 2710 int wret; 2711 int level; 2712 struct btrfs_path *path; 2713 int orig_level; 2714 2715 path = btrfs_alloc_path(); 2716 if (!path) 2717 return -ENOMEM; 2718 2719 level = btrfs_header_level(log->node); 2720 orig_level = level; 2721 path->nodes[level] = log->node; 2722 atomic_inc(&log->node->refs); 2723 path->slots[level] = 0; 2724 2725 while (1) { 2726 wret = walk_down_log_tree(trans, log, path, &level, wc); 2727 if (wret > 0) 2728 break; 2729 if (wret < 0) { 2730 ret = wret; 2731 goto out; 2732 } 2733 2734 wret = walk_up_log_tree(trans, log, path, &level, wc); 2735 if (wret > 0) 2736 break; 2737 if (wret < 0) { 2738 ret = wret; 2739 goto out; 2740 } 2741 } 2742 2743 /* was the root node processed? if not, catch it here */ 2744 if (path->nodes[orig_level]) { 2745 ret = wc->process_func(log, path->nodes[orig_level], wc, 2746 btrfs_header_generation(path->nodes[orig_level]), 2747 orig_level); 2748 if (ret) 2749 goto out; 2750 if (wc->free) 2751 ret = clean_log_buffer(trans, path->nodes[orig_level]); 2752 } 2753 2754 out: 2755 btrfs_free_path(path); 2756 return ret; 2757 } 2758 2759 /* 2760 * helper function to update the item for a given subvolumes log root 2761 * in the tree of log roots 2762 */ 2763 static int update_log_root(struct btrfs_trans_handle *trans, 2764 struct btrfs_root *log, 2765 struct btrfs_root_item *root_item) 2766 { 2767 struct btrfs_fs_info *fs_info = log->fs_info; 2768 int ret; 2769 2770 if (log->log_transid == 1) { 2771 /* insert root item on the first sync */ 2772 ret = btrfs_insert_root(trans, fs_info->log_root_tree, 2773 &log->root_key, root_item); 2774 } else { 2775 ret = btrfs_update_root(trans, fs_info->log_root_tree, 2776 &log->root_key, root_item); 2777 } 2778 return ret; 2779 } 2780 2781 static void wait_log_commit(struct btrfs_root *root, int transid) 2782 { 2783 DEFINE_WAIT(wait); 2784 int index = transid % 2; 2785 2786 /* 2787 * we only allow two pending log transactions at a time, 2788 * so we know that if ours is more than 2 older than the 2789 * current transaction, we're done 2790 */ 2791 for (;;) { 2792 prepare_to_wait(&root->log_commit_wait[index], 2793 &wait, TASK_UNINTERRUPTIBLE); 2794 2795 if (!(root->log_transid_committed < transid && 2796 atomic_read(&root->log_commit[index]))) 2797 break; 2798 2799 mutex_unlock(&root->log_mutex); 2800 schedule(); 2801 mutex_lock(&root->log_mutex); 2802 } 2803 finish_wait(&root->log_commit_wait[index], &wait); 2804 } 2805 2806 static void wait_for_writer(struct btrfs_root *root) 2807 { 2808 DEFINE_WAIT(wait); 2809 2810 for (;;) { 2811 prepare_to_wait(&root->log_writer_wait, &wait, 2812 TASK_UNINTERRUPTIBLE); 2813 if (!atomic_read(&root->log_writers)) 2814 break; 2815 2816 mutex_unlock(&root->log_mutex); 2817 schedule(); 2818 mutex_lock(&root->log_mutex); 2819 } 2820 finish_wait(&root->log_writer_wait, &wait); 2821 } 2822 2823 void btrfs_init_log_ctx(struct btrfs_log_ctx *ctx, struct inode *inode) 2824 { 2825 ctx->log_ret = 0; 2826 ctx->log_transid = 0; 2827 ctx->log_new_dentries = false; 2828 ctx->logging_new_name = false; 2829 ctx->logging_new_delayed_dentries = false; 2830 ctx->logged_before = false; 2831 ctx->inode = inode; 2832 INIT_LIST_HEAD(&ctx->list); 2833 INIT_LIST_HEAD(&ctx->ordered_extents); 2834 INIT_LIST_HEAD(&ctx->conflict_inodes); 2835 ctx->num_conflict_inodes = 0; 2836 ctx->logging_conflict_inodes = false; 2837 ctx->scratch_eb = NULL; 2838 } 2839 2840 void btrfs_init_log_ctx_scratch_eb(struct btrfs_log_ctx *ctx) 2841 { 2842 struct btrfs_inode *inode = BTRFS_I(ctx->inode); 2843 2844 if (!test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags) && 2845 !test_bit(BTRFS_INODE_COPY_EVERYTHING, &inode->runtime_flags)) 2846 return; 2847 2848 /* 2849 * Don't care about allocation failure. This is just for optimization, 2850 * if we fail to allocate here, we will try again later if needed. 2851 */ 2852 ctx->scratch_eb = alloc_dummy_extent_buffer(inode->root->fs_info, 0); 2853 } 2854 2855 void btrfs_release_log_ctx_extents(struct btrfs_log_ctx *ctx) 2856 { 2857 struct btrfs_ordered_extent *ordered; 2858 struct btrfs_ordered_extent *tmp; 2859 2860 ASSERT(inode_is_locked(ctx->inode)); 2861 2862 list_for_each_entry_safe(ordered, tmp, &ctx->ordered_extents, log_list) { 2863 list_del_init(&ordered->log_list); 2864 btrfs_put_ordered_extent(ordered); 2865 } 2866 } 2867 2868 2869 static inline void btrfs_remove_log_ctx(struct btrfs_root *root, 2870 struct btrfs_log_ctx *ctx) 2871 { 2872 mutex_lock(&root->log_mutex); 2873 list_del_init(&ctx->list); 2874 mutex_unlock(&root->log_mutex); 2875 } 2876 2877 /* 2878 * Invoked in log mutex context, or be sure there is no other task which 2879 * can access the list. 2880 */ 2881 static inline void btrfs_remove_all_log_ctxs(struct btrfs_root *root, 2882 int index, int error) 2883 { 2884 struct btrfs_log_ctx *ctx; 2885 struct btrfs_log_ctx *safe; 2886 2887 list_for_each_entry_safe(ctx, safe, &root->log_ctxs[index], list) { 2888 list_del_init(&ctx->list); 2889 ctx->log_ret = error; 2890 } 2891 } 2892 2893 /* 2894 * Sends a given tree log down to the disk and updates the super blocks to 2895 * record it. When this call is done, you know that any inodes previously 2896 * logged are safely on disk only if it returns 0. 2897 * 2898 * Any other return value means you need to call btrfs_commit_transaction. 2899 * Some of the edge cases for fsyncing directories that have had unlinks 2900 * or renames done in the past mean that sometimes the only safe 2901 * fsync is to commit the whole FS. When btrfs_sync_log returns -EAGAIN, 2902 * that has happened. 2903 */ 2904 int btrfs_sync_log(struct btrfs_trans_handle *trans, 2905 struct btrfs_root *root, struct btrfs_log_ctx *ctx) 2906 { 2907 int index1; 2908 int index2; 2909 int mark; 2910 int ret; 2911 struct btrfs_fs_info *fs_info = root->fs_info; 2912 struct btrfs_root *log = root->log_root; 2913 struct btrfs_root *log_root_tree = fs_info->log_root_tree; 2914 struct btrfs_root_item new_root_item; 2915 int log_transid = 0; 2916 struct btrfs_log_ctx root_log_ctx; 2917 struct blk_plug plug; 2918 u64 log_root_start; 2919 u64 log_root_level; 2920 2921 mutex_lock(&root->log_mutex); 2922 log_transid = ctx->log_transid; 2923 if (root->log_transid_committed >= log_transid) { 2924 mutex_unlock(&root->log_mutex); 2925 return ctx->log_ret; 2926 } 2927 2928 index1 = log_transid % 2; 2929 if (atomic_read(&root->log_commit[index1])) { 2930 wait_log_commit(root, log_transid); 2931 mutex_unlock(&root->log_mutex); 2932 return ctx->log_ret; 2933 } 2934 ASSERT(log_transid == root->log_transid); 2935 atomic_set(&root->log_commit[index1], 1); 2936 2937 /* wait for previous tree log sync to complete */ 2938 if (atomic_read(&root->log_commit[(index1 + 1) % 2])) 2939 wait_log_commit(root, log_transid - 1); 2940 2941 while (1) { 2942 int batch = atomic_read(&root->log_batch); 2943 /* when we're on an ssd, just kick the log commit out */ 2944 if (!btrfs_test_opt(fs_info, SSD) && 2945 test_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state)) { 2946 mutex_unlock(&root->log_mutex); 2947 schedule_timeout_uninterruptible(1); 2948 mutex_lock(&root->log_mutex); 2949 } 2950 wait_for_writer(root); 2951 if (batch == atomic_read(&root->log_batch)) 2952 break; 2953 } 2954 2955 /* bail out if we need to do a full commit */ 2956 if (btrfs_need_log_full_commit(trans)) { 2957 ret = BTRFS_LOG_FORCE_COMMIT; 2958 mutex_unlock(&root->log_mutex); 2959 goto out; 2960 } 2961 2962 if (log_transid % 2 == 0) 2963 mark = EXTENT_DIRTY; 2964 else 2965 mark = EXTENT_NEW; 2966 2967 /* we start IO on all the marked extents here, but we don't actually 2968 * wait for them until later. 2969 */ 2970 blk_start_plug(&plug); 2971 ret = btrfs_write_marked_extents(fs_info, &log->dirty_log_pages, mark); 2972 /* 2973 * -EAGAIN happens when someone, e.g., a concurrent transaction 2974 * commit, writes a dirty extent in this tree-log commit. This 2975 * concurrent write will create a hole writing out the extents, 2976 * and we cannot proceed on a zoned filesystem, requiring 2977 * sequential writing. While we can bail out to a full commit 2978 * here, but we can continue hoping the concurrent writing fills 2979 * the hole. 2980 */ 2981 if (ret == -EAGAIN && btrfs_is_zoned(fs_info)) 2982 ret = 0; 2983 if (ret) { 2984 blk_finish_plug(&plug); 2985 btrfs_set_log_full_commit(trans); 2986 mutex_unlock(&root->log_mutex); 2987 goto out; 2988 } 2989 2990 /* 2991 * We _must_ update under the root->log_mutex in order to make sure we 2992 * have a consistent view of the log root we are trying to commit at 2993 * this moment. 2994 * 2995 * We _must_ copy this into a local copy, because we are not holding the 2996 * log_root_tree->log_mutex yet. This is important because when we 2997 * commit the log_root_tree we must have a consistent view of the 2998 * log_root_tree when we update the super block to point at the 2999 * log_root_tree bytenr. If we update the log_root_tree here we'll race 3000 * with the commit and possibly point at the new block which we may not 3001 * have written out. 3002 */ 3003 btrfs_set_root_node(&log->root_item, log->node); 3004 memcpy(&new_root_item, &log->root_item, sizeof(new_root_item)); 3005 3006 btrfs_set_root_log_transid(root, root->log_transid + 1); 3007 log->log_transid = root->log_transid; 3008 root->log_start_pid = 0; 3009 /* 3010 * IO has been started, blocks of the log tree have WRITTEN flag set 3011 * in their headers. new modifications of the log will be written to 3012 * new positions. so it's safe to allow log writers to go in. 3013 */ 3014 mutex_unlock(&root->log_mutex); 3015 3016 if (btrfs_is_zoned(fs_info)) { 3017 mutex_lock(&fs_info->tree_root->log_mutex); 3018 if (!log_root_tree->node) { 3019 ret = btrfs_alloc_log_tree_node(trans, log_root_tree); 3020 if (ret) { 3021 mutex_unlock(&fs_info->tree_root->log_mutex); 3022 blk_finish_plug(&plug); 3023 goto out; 3024 } 3025 } 3026 mutex_unlock(&fs_info->tree_root->log_mutex); 3027 } 3028 3029 btrfs_init_log_ctx(&root_log_ctx, NULL); 3030 3031 mutex_lock(&log_root_tree->log_mutex); 3032 3033 index2 = log_root_tree->log_transid % 2; 3034 list_add_tail(&root_log_ctx.list, &log_root_tree->log_ctxs[index2]); 3035 root_log_ctx.log_transid = log_root_tree->log_transid; 3036 3037 /* 3038 * Now we are safe to update the log_root_tree because we're under the 3039 * log_mutex, and we're a current writer so we're holding the commit 3040 * open until we drop the log_mutex. 3041 */ 3042 ret = update_log_root(trans, log, &new_root_item); 3043 if (ret) { 3044 list_del_init(&root_log_ctx.list); 3045 blk_finish_plug(&plug); 3046 btrfs_set_log_full_commit(trans); 3047 if (ret != -ENOSPC) 3048 btrfs_err(fs_info, 3049 "failed to update log for root %llu ret %d", 3050 btrfs_root_id(root), ret); 3051 btrfs_wait_tree_log_extents(log, mark); 3052 mutex_unlock(&log_root_tree->log_mutex); 3053 goto out; 3054 } 3055 3056 if (log_root_tree->log_transid_committed >= root_log_ctx.log_transid) { 3057 blk_finish_plug(&plug); 3058 list_del_init(&root_log_ctx.list); 3059 mutex_unlock(&log_root_tree->log_mutex); 3060 ret = root_log_ctx.log_ret; 3061 goto out; 3062 } 3063 3064 if (atomic_read(&log_root_tree->log_commit[index2])) { 3065 blk_finish_plug(&plug); 3066 ret = btrfs_wait_tree_log_extents(log, mark); 3067 wait_log_commit(log_root_tree, 3068 root_log_ctx.log_transid); 3069 mutex_unlock(&log_root_tree->log_mutex); 3070 if (!ret) 3071 ret = root_log_ctx.log_ret; 3072 goto out; 3073 } 3074 ASSERT(root_log_ctx.log_transid == log_root_tree->log_transid); 3075 atomic_set(&log_root_tree->log_commit[index2], 1); 3076 3077 if (atomic_read(&log_root_tree->log_commit[(index2 + 1) % 2])) { 3078 wait_log_commit(log_root_tree, 3079 root_log_ctx.log_transid - 1); 3080 } 3081 3082 /* 3083 * now that we've moved on to the tree of log tree roots, 3084 * check the full commit flag again 3085 */ 3086 if (btrfs_need_log_full_commit(trans)) { 3087 blk_finish_plug(&plug); 3088 btrfs_wait_tree_log_extents(log, mark); 3089 mutex_unlock(&log_root_tree->log_mutex); 3090 ret = BTRFS_LOG_FORCE_COMMIT; 3091 goto out_wake_log_root; 3092 } 3093 3094 ret = btrfs_write_marked_extents(fs_info, 3095 &log_root_tree->dirty_log_pages, 3096 EXTENT_DIRTY | EXTENT_NEW); 3097 blk_finish_plug(&plug); 3098 /* 3099 * As described above, -EAGAIN indicates a hole in the extents. We 3100 * cannot wait for these write outs since the waiting cause a 3101 * deadlock. Bail out to the full commit instead. 3102 */ 3103 if (ret == -EAGAIN && btrfs_is_zoned(fs_info)) { 3104 btrfs_set_log_full_commit(trans); 3105 btrfs_wait_tree_log_extents(log, mark); 3106 mutex_unlock(&log_root_tree->log_mutex); 3107 goto out_wake_log_root; 3108 } else if (ret) { 3109 btrfs_set_log_full_commit(trans); 3110 mutex_unlock(&log_root_tree->log_mutex); 3111 goto out_wake_log_root; 3112 } 3113 ret = btrfs_wait_tree_log_extents(log, mark); 3114 if (!ret) 3115 ret = btrfs_wait_tree_log_extents(log_root_tree, 3116 EXTENT_NEW | EXTENT_DIRTY); 3117 if (ret) { 3118 btrfs_set_log_full_commit(trans); 3119 mutex_unlock(&log_root_tree->log_mutex); 3120 goto out_wake_log_root; 3121 } 3122 3123 log_root_start = log_root_tree->node->start; 3124 log_root_level = btrfs_header_level(log_root_tree->node); 3125 log_root_tree->log_transid++; 3126 mutex_unlock(&log_root_tree->log_mutex); 3127 3128 /* 3129 * Here we are guaranteed that nobody is going to write the superblock 3130 * for the current transaction before us and that neither we do write 3131 * our superblock before the previous transaction finishes its commit 3132 * and writes its superblock, because: 3133 * 3134 * 1) We are holding a handle on the current transaction, so no body 3135 * can commit it until we release the handle; 3136 * 3137 * 2) Before writing our superblock we acquire the tree_log_mutex, so 3138 * if the previous transaction is still committing, and hasn't yet 3139 * written its superblock, we wait for it to do it, because a 3140 * transaction commit acquires the tree_log_mutex when the commit 3141 * begins and releases it only after writing its superblock. 3142 */ 3143 mutex_lock(&fs_info->tree_log_mutex); 3144 3145 /* 3146 * The previous transaction writeout phase could have failed, and thus 3147 * marked the fs in an error state. We must not commit here, as we 3148 * could have updated our generation in the super_for_commit and 3149 * writing the super here would result in transid mismatches. If there 3150 * is an error here just bail. 3151 */ 3152 if (BTRFS_FS_ERROR(fs_info)) { 3153 ret = -EIO; 3154 btrfs_set_log_full_commit(trans); 3155 btrfs_abort_transaction(trans, ret); 3156 mutex_unlock(&fs_info->tree_log_mutex); 3157 goto out_wake_log_root; 3158 } 3159 3160 btrfs_set_super_log_root(fs_info->super_for_commit, log_root_start); 3161 btrfs_set_super_log_root_level(fs_info->super_for_commit, log_root_level); 3162 ret = write_all_supers(fs_info, 1); 3163 mutex_unlock(&fs_info->tree_log_mutex); 3164 if (ret) { 3165 btrfs_set_log_full_commit(trans); 3166 btrfs_abort_transaction(trans, ret); 3167 goto out_wake_log_root; 3168 } 3169 3170 /* 3171 * We know there can only be one task here, since we have not yet set 3172 * root->log_commit[index1] to 0 and any task attempting to sync the 3173 * log must wait for the previous log transaction to commit if it's 3174 * still in progress or wait for the current log transaction commit if 3175 * someone else already started it. We use <= and not < because the 3176 * first log transaction has an ID of 0. 3177 */ 3178 ASSERT(btrfs_get_root_last_log_commit(root) <= log_transid); 3179 btrfs_set_root_last_log_commit(root, log_transid); 3180 3181 out_wake_log_root: 3182 mutex_lock(&log_root_tree->log_mutex); 3183 btrfs_remove_all_log_ctxs(log_root_tree, index2, ret); 3184 3185 log_root_tree->log_transid_committed++; 3186 atomic_set(&log_root_tree->log_commit[index2], 0); 3187 mutex_unlock(&log_root_tree->log_mutex); 3188 3189 /* 3190 * The barrier before waitqueue_active (in cond_wake_up) is needed so 3191 * all the updates above are seen by the woken threads. It might not be 3192 * necessary, but proving that seems to be hard. 3193 */ 3194 cond_wake_up(&log_root_tree->log_commit_wait[index2]); 3195 out: 3196 mutex_lock(&root->log_mutex); 3197 btrfs_remove_all_log_ctxs(root, index1, ret); 3198 root->log_transid_committed++; 3199 atomic_set(&root->log_commit[index1], 0); 3200 mutex_unlock(&root->log_mutex); 3201 3202 /* 3203 * The barrier before waitqueue_active (in cond_wake_up) is needed so 3204 * all the updates above are seen by the woken threads. It might not be 3205 * necessary, but proving that seems to be hard. 3206 */ 3207 cond_wake_up(&root->log_commit_wait[index1]); 3208 return ret; 3209 } 3210 3211 static void free_log_tree(struct btrfs_trans_handle *trans, 3212 struct btrfs_root *log) 3213 { 3214 int ret; 3215 struct walk_control wc = { 3216 .free = 1, 3217 .process_func = process_one_buffer 3218 }; 3219 3220 if (log->node) { 3221 ret = walk_log_tree(trans, log, &wc); 3222 if (ret) { 3223 /* 3224 * We weren't able to traverse the entire log tree, the 3225 * typical scenario is getting an -EIO when reading an 3226 * extent buffer of the tree, due to a previous writeback 3227 * failure of it. 3228 */ 3229 set_bit(BTRFS_FS_STATE_LOG_CLEANUP_ERROR, 3230 &log->fs_info->fs_state); 3231 3232 /* 3233 * Some extent buffers of the log tree may still be dirty 3234 * and not yet written back to storage, because we may 3235 * have updates to a log tree without syncing a log tree, 3236 * such as during rename and link operations. So flush 3237 * them out and wait for their writeback to complete, so 3238 * that we properly cleanup their state and pages. 3239 */ 3240 btrfs_write_marked_extents(log->fs_info, 3241 &log->dirty_log_pages, 3242 EXTENT_DIRTY | EXTENT_NEW); 3243 btrfs_wait_tree_log_extents(log, 3244 EXTENT_DIRTY | EXTENT_NEW); 3245 3246 if (trans) 3247 btrfs_abort_transaction(trans, ret); 3248 else 3249 btrfs_handle_fs_error(log->fs_info, ret, NULL); 3250 } 3251 } 3252 3253 extent_io_tree_release(&log->dirty_log_pages); 3254 extent_io_tree_release(&log->log_csum_range); 3255 3256 btrfs_put_root(log); 3257 } 3258 3259 /* 3260 * free all the extents used by the tree log. This should be called 3261 * at commit time of the full transaction 3262 */ 3263 int btrfs_free_log(struct btrfs_trans_handle *trans, struct btrfs_root *root) 3264 { 3265 if (root->log_root) { 3266 free_log_tree(trans, root->log_root); 3267 root->log_root = NULL; 3268 clear_bit(BTRFS_ROOT_HAS_LOG_TREE, &root->state); 3269 } 3270 return 0; 3271 } 3272 3273 int btrfs_free_log_root_tree(struct btrfs_trans_handle *trans, 3274 struct btrfs_fs_info *fs_info) 3275 { 3276 if (fs_info->log_root_tree) { 3277 free_log_tree(trans, fs_info->log_root_tree); 3278 fs_info->log_root_tree = NULL; 3279 clear_bit(BTRFS_ROOT_HAS_LOG_TREE, &fs_info->tree_root->state); 3280 } 3281 return 0; 3282 } 3283 3284 /* 3285 * Check if an inode was logged in the current transaction. This correctly deals 3286 * with the case where the inode was logged but has a logged_trans of 0, which 3287 * happens if the inode is evicted and loaded again, as logged_trans is an in 3288 * memory only field (not persisted). 3289 * 3290 * Returns 1 if the inode was logged before in the transaction, 0 if it was not, 3291 * and < 0 on error. 3292 */ 3293 static int inode_logged(const struct btrfs_trans_handle *trans, 3294 struct btrfs_inode *inode, 3295 struct btrfs_path *path_in) 3296 { 3297 struct btrfs_path *path = path_in; 3298 struct btrfs_key key; 3299 int ret; 3300 3301 if (inode->logged_trans == trans->transid) 3302 return 1; 3303 3304 /* 3305 * If logged_trans is not 0, then we know the inode logged was not logged 3306 * in this transaction, so we can return false right away. 3307 */ 3308 if (inode->logged_trans > 0) 3309 return 0; 3310 3311 /* 3312 * If no log tree was created for this root in this transaction, then 3313 * the inode can not have been logged in this transaction. In that case 3314 * set logged_trans to anything greater than 0 and less than the current 3315 * transaction's ID, to avoid the search below in a future call in case 3316 * a log tree gets created after this. 3317 */ 3318 if (!test_bit(BTRFS_ROOT_HAS_LOG_TREE, &inode->root->state)) { 3319 inode->logged_trans = trans->transid - 1; 3320 return 0; 3321 } 3322 3323 /* 3324 * We have a log tree and the inode's logged_trans is 0. We can't tell 3325 * for sure if the inode was logged before in this transaction by looking 3326 * only at logged_trans. We could be pessimistic and assume it was, but 3327 * that can lead to unnecessarily logging an inode during rename and link 3328 * operations, and then further updating the log in followup rename and 3329 * link operations, specially if it's a directory, which adds latency 3330 * visible to applications doing a series of rename or link operations. 3331 * 3332 * A logged_trans of 0 here can mean several things: 3333 * 3334 * 1) The inode was never logged since the filesystem was mounted, and may 3335 * or may have not been evicted and loaded again; 3336 * 3337 * 2) The inode was logged in a previous transaction, then evicted and 3338 * then loaded again; 3339 * 3340 * 3) The inode was logged in the current transaction, then evicted and 3341 * then loaded again. 3342 * 3343 * For cases 1) and 2) we don't want to return true, but we need to detect 3344 * case 3) and return true. So we do a search in the log root for the inode 3345 * item. 3346 */ 3347 key.objectid = btrfs_ino(inode); 3348 key.type = BTRFS_INODE_ITEM_KEY; 3349 key.offset = 0; 3350 3351 if (!path) { 3352 path = btrfs_alloc_path(); 3353 if (!path) 3354 return -ENOMEM; 3355 } 3356 3357 ret = btrfs_search_slot(NULL, inode->root->log_root, &key, path, 0, 0); 3358 3359 if (path_in) 3360 btrfs_release_path(path); 3361 else 3362 btrfs_free_path(path); 3363 3364 /* 3365 * Logging an inode always results in logging its inode item. So if we 3366 * did not find the item we know the inode was not logged for sure. 3367 */ 3368 if (ret < 0) { 3369 return ret; 3370 } else if (ret > 0) { 3371 /* 3372 * Set logged_trans to a value greater than 0 and less then the 3373 * current transaction to avoid doing the search in future calls. 3374 */ 3375 inode->logged_trans = trans->transid - 1; 3376 return 0; 3377 } 3378 3379 /* 3380 * The inode was previously logged and then evicted, set logged_trans to 3381 * the current transacion's ID, to avoid future tree searches as long as 3382 * the inode is not evicted again. 3383 */ 3384 inode->logged_trans = trans->transid; 3385 3386 /* 3387 * If it's a directory, then we must set last_dir_index_offset to the 3388 * maximum possible value, so that the next attempt to log the inode does 3389 * not skip checking if dir index keys found in modified subvolume tree 3390 * leaves have been logged before, otherwise it would result in attempts 3391 * to insert duplicate dir index keys in the log tree. This must be done 3392 * because last_dir_index_offset is an in-memory only field, not persisted 3393 * in the inode item or any other on-disk structure, so its value is lost 3394 * once the inode is evicted. 3395 */ 3396 if (S_ISDIR(inode->vfs_inode.i_mode)) 3397 inode->last_dir_index_offset = (u64)-1; 3398 3399 return 1; 3400 } 3401 3402 /* 3403 * Delete a directory entry from the log if it exists. 3404 * 3405 * Returns < 0 on error 3406 * 1 if the entry does not exists 3407 * 0 if the entry existed and was successfully deleted 3408 */ 3409 static int del_logged_dentry(struct btrfs_trans_handle *trans, 3410 struct btrfs_root *log, 3411 struct btrfs_path *path, 3412 u64 dir_ino, 3413 const struct fscrypt_str *name, 3414 u64 index) 3415 { 3416 struct btrfs_dir_item *di; 3417 3418 /* 3419 * We only log dir index items of a directory, so we don't need to look 3420 * for dir item keys. 3421 */ 3422 di = btrfs_lookup_dir_index_item(trans, log, path, dir_ino, 3423 index, name, -1); 3424 if (IS_ERR(di)) 3425 return PTR_ERR(di); 3426 else if (!di) 3427 return 1; 3428 3429 /* 3430 * We do not need to update the size field of the directory's 3431 * inode item because on log replay we update the field to reflect 3432 * all existing entries in the directory (see overwrite_item()). 3433 */ 3434 return btrfs_delete_one_dir_name(trans, log, path, di); 3435 } 3436 3437 /* 3438 * If both a file and directory are logged, and unlinks or renames are 3439 * mixed in, we have a few interesting corners: 3440 * 3441 * create file X in dir Y 3442 * link file X to X.link in dir Y 3443 * fsync file X 3444 * unlink file X but leave X.link 3445 * fsync dir Y 3446 * 3447 * After a crash we would expect only X.link to exist. But file X 3448 * didn't get fsync'd again so the log has back refs for X and X.link. 3449 * 3450 * We solve this by removing directory entries and inode backrefs from the 3451 * log when a file that was logged in the current transaction is 3452 * unlinked. Any later fsync will include the updated log entries, and 3453 * we'll be able to reconstruct the proper directory items from backrefs. 3454 * 3455 * This optimizations allows us to avoid relogging the entire inode 3456 * or the entire directory. 3457 */ 3458 void btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans, 3459 struct btrfs_root *root, 3460 const struct fscrypt_str *name, 3461 struct btrfs_inode *dir, u64 index) 3462 { 3463 struct btrfs_path *path; 3464 int ret; 3465 3466 ret = inode_logged(trans, dir, NULL); 3467 if (ret == 0) 3468 return; 3469 else if (ret < 0) { 3470 btrfs_set_log_full_commit(trans); 3471 return; 3472 } 3473 3474 ret = join_running_log_trans(root); 3475 if (ret) 3476 return; 3477 3478 mutex_lock(&dir->log_mutex); 3479 3480 path = btrfs_alloc_path(); 3481 if (!path) { 3482 ret = -ENOMEM; 3483 goto out_unlock; 3484 } 3485 3486 ret = del_logged_dentry(trans, root->log_root, path, btrfs_ino(dir), 3487 name, index); 3488 btrfs_free_path(path); 3489 out_unlock: 3490 mutex_unlock(&dir->log_mutex); 3491 if (ret < 0) 3492 btrfs_set_log_full_commit(trans); 3493 btrfs_end_log_trans(root); 3494 } 3495 3496 /* see comments for btrfs_del_dir_entries_in_log */ 3497 void btrfs_del_inode_ref_in_log(struct btrfs_trans_handle *trans, 3498 struct btrfs_root *root, 3499 const struct fscrypt_str *name, 3500 struct btrfs_inode *inode, u64 dirid) 3501 { 3502 struct btrfs_root *log; 3503 u64 index; 3504 int ret; 3505 3506 ret = inode_logged(trans, inode, NULL); 3507 if (ret == 0) 3508 return; 3509 else if (ret < 0) { 3510 btrfs_set_log_full_commit(trans); 3511 return; 3512 } 3513 3514 ret = join_running_log_trans(root); 3515 if (ret) 3516 return; 3517 log = root->log_root; 3518 mutex_lock(&inode->log_mutex); 3519 3520 ret = btrfs_del_inode_ref(trans, log, name, btrfs_ino(inode), 3521 dirid, &index); 3522 mutex_unlock(&inode->log_mutex); 3523 if (ret < 0 && ret != -ENOENT) 3524 btrfs_set_log_full_commit(trans); 3525 btrfs_end_log_trans(root); 3526 } 3527 3528 /* 3529 * creates a range item in the log for 'dirid'. first_offset and 3530 * last_offset tell us which parts of the key space the log should 3531 * be considered authoritative for. 3532 */ 3533 static noinline int insert_dir_log_key(struct btrfs_trans_handle *trans, 3534 struct btrfs_root *log, 3535 struct btrfs_path *path, 3536 u64 dirid, 3537 u64 first_offset, u64 last_offset) 3538 { 3539 int ret; 3540 struct btrfs_key key; 3541 struct btrfs_dir_log_item *item; 3542 3543 key.objectid = dirid; 3544 key.offset = first_offset; 3545 key.type = BTRFS_DIR_LOG_INDEX_KEY; 3546 ret = btrfs_insert_empty_item(trans, log, path, &key, sizeof(*item)); 3547 /* 3548 * -EEXIST is fine and can happen sporadically when we are logging a 3549 * directory and have concurrent insertions in the subvolume's tree for 3550 * items from other inodes and that result in pushing off some dir items 3551 * from one leaf to another in order to accommodate for the new items. 3552 * This results in logging the same dir index range key. 3553 */ 3554 if (ret && ret != -EEXIST) 3555 return ret; 3556 3557 item = btrfs_item_ptr(path->nodes[0], path->slots[0], 3558 struct btrfs_dir_log_item); 3559 if (ret == -EEXIST) { 3560 const u64 curr_end = btrfs_dir_log_end(path->nodes[0], item); 3561 3562 /* 3563 * btrfs_del_dir_entries_in_log() might have been called during 3564 * an unlink between the initial insertion of this key and the 3565 * current update, or we might be logging a single entry deletion 3566 * during a rename, so set the new last_offset to the max value. 3567 */ 3568 last_offset = max(last_offset, curr_end); 3569 } 3570 btrfs_set_dir_log_end(path->nodes[0], item, last_offset); 3571 btrfs_mark_buffer_dirty(trans, path->nodes[0]); 3572 btrfs_release_path(path); 3573 return 0; 3574 } 3575 3576 static int flush_dir_items_batch(struct btrfs_trans_handle *trans, 3577 struct btrfs_inode *inode, 3578 struct extent_buffer *src, 3579 struct btrfs_path *dst_path, 3580 int start_slot, 3581 int count) 3582 { 3583 struct btrfs_root *log = inode->root->log_root; 3584 char *ins_data = NULL; 3585 struct btrfs_item_batch batch; 3586 struct extent_buffer *dst; 3587 unsigned long src_offset; 3588 unsigned long dst_offset; 3589 u64 last_index; 3590 struct btrfs_key key; 3591 u32 item_size; 3592 int ret; 3593 int i; 3594 3595 ASSERT(count > 0); 3596 batch.nr = count; 3597 3598 if (count == 1) { 3599 btrfs_item_key_to_cpu(src, &key, start_slot); 3600 item_size = btrfs_item_size(src, start_slot); 3601 batch.keys = &key; 3602 batch.data_sizes = &item_size; 3603 batch.total_data_size = item_size; 3604 } else { 3605 struct btrfs_key *ins_keys; 3606 u32 *ins_sizes; 3607 3608 ins_data = kmalloc(count * sizeof(u32) + 3609 count * sizeof(struct btrfs_key), GFP_NOFS); 3610 if (!ins_data) 3611 return -ENOMEM; 3612 3613 ins_sizes = (u32 *)ins_data; 3614 ins_keys = (struct btrfs_key *)(ins_data + count * sizeof(u32)); 3615 batch.keys = ins_keys; 3616 batch.data_sizes = ins_sizes; 3617 batch.total_data_size = 0; 3618 3619 for (i = 0; i < count; i++) { 3620 const int slot = start_slot + i; 3621 3622 btrfs_item_key_to_cpu(src, &ins_keys[i], slot); 3623 ins_sizes[i] = btrfs_item_size(src, slot); 3624 batch.total_data_size += ins_sizes[i]; 3625 } 3626 } 3627 3628 ret = btrfs_insert_empty_items(trans, log, dst_path, &batch); 3629 if (ret) 3630 goto out; 3631 3632 dst = dst_path->nodes[0]; 3633 /* 3634 * Copy all the items in bulk, in a single copy operation. Item data is 3635 * organized such that it's placed at the end of a leaf and from right 3636 * to left. For example, the data for the second item ends at an offset 3637 * that matches the offset where the data for the first item starts, the 3638 * data for the third item ends at an offset that matches the offset 3639 * where the data of the second items starts, and so on. 3640 * Therefore our source and destination start offsets for copy match the 3641 * offsets of the last items (highest slots). 3642 */ 3643 dst_offset = btrfs_item_ptr_offset(dst, dst_path->slots[0] + count - 1); 3644 src_offset = btrfs_item_ptr_offset(src, start_slot + count - 1); 3645 copy_extent_buffer(dst, src, dst_offset, src_offset, batch.total_data_size); 3646 btrfs_release_path(dst_path); 3647 3648 last_index = batch.keys[count - 1].offset; 3649 ASSERT(last_index > inode->last_dir_index_offset); 3650 3651 /* 3652 * If for some unexpected reason the last item's index is not greater 3653 * than the last index we logged, warn and force a transaction commit. 3654 */ 3655 if (WARN_ON(last_index <= inode->last_dir_index_offset)) 3656 ret = BTRFS_LOG_FORCE_COMMIT; 3657 else 3658 inode->last_dir_index_offset = last_index; 3659 3660 if (btrfs_get_first_dir_index_to_log(inode) == 0) 3661 btrfs_set_first_dir_index_to_log(inode, batch.keys[0].offset); 3662 out: 3663 kfree(ins_data); 3664 3665 return ret; 3666 } 3667 3668 static int clone_leaf(struct btrfs_path *path, struct btrfs_log_ctx *ctx) 3669 { 3670 const int slot = path->slots[0]; 3671 3672 if (ctx->scratch_eb) { 3673 copy_extent_buffer_full(ctx->scratch_eb, path->nodes[0]); 3674 } else { 3675 ctx->scratch_eb = btrfs_clone_extent_buffer(path->nodes[0]); 3676 if (!ctx->scratch_eb) 3677 return -ENOMEM; 3678 } 3679 3680 btrfs_release_path(path); 3681 path->nodes[0] = ctx->scratch_eb; 3682 path->slots[0] = slot; 3683 /* 3684 * Add extra ref to scratch eb so that it is not freed when callers 3685 * release the path, so we can reuse it later if needed. 3686 */ 3687 atomic_inc(&ctx->scratch_eb->refs); 3688 3689 return 0; 3690 } 3691 3692 static int process_dir_items_leaf(struct btrfs_trans_handle *trans, 3693 struct btrfs_inode *inode, 3694 struct btrfs_path *path, 3695 struct btrfs_path *dst_path, 3696 struct btrfs_log_ctx *ctx, 3697 u64 *last_old_dentry_offset) 3698 { 3699 struct btrfs_root *log = inode->root->log_root; 3700 struct extent_buffer *src; 3701 const int nritems = btrfs_header_nritems(path->nodes[0]); 3702 const u64 ino = btrfs_ino(inode); 3703 bool last_found = false; 3704 int batch_start = 0; 3705 int batch_size = 0; 3706 int ret; 3707 3708 /* 3709 * We need to clone the leaf, release the read lock on it, and use the 3710 * clone before modifying the log tree. See the comment at copy_items() 3711 * about why we need to do this. 3712 */ 3713 ret = clone_leaf(path, ctx); 3714 if (ret < 0) 3715 return ret; 3716 3717 src = path->nodes[0]; 3718 3719 for (int i = path->slots[0]; i < nritems; i++) { 3720 struct btrfs_dir_item *di; 3721 struct btrfs_key key; 3722 int ret; 3723 3724 btrfs_item_key_to_cpu(src, &key, i); 3725 3726 if (key.objectid != ino || key.type != BTRFS_DIR_INDEX_KEY) { 3727 last_found = true; 3728 break; 3729 } 3730 3731 di = btrfs_item_ptr(src, i, struct btrfs_dir_item); 3732 3733 /* 3734 * Skip ranges of items that consist only of dir item keys created 3735 * in past transactions. However if we find a gap, we must log a 3736 * dir index range item for that gap, so that index keys in that 3737 * gap are deleted during log replay. 3738 */ 3739 if (btrfs_dir_transid(src, di) < trans->transid) { 3740 if (key.offset > *last_old_dentry_offset + 1) { 3741 ret = insert_dir_log_key(trans, log, dst_path, 3742 ino, *last_old_dentry_offset + 1, 3743 key.offset - 1); 3744 if (ret < 0) 3745 return ret; 3746 } 3747 3748 *last_old_dentry_offset = key.offset; 3749 continue; 3750 } 3751 3752 /* If we logged this dir index item before, we can skip it. */ 3753 if (key.offset <= inode->last_dir_index_offset) 3754 continue; 3755 3756 /* 3757 * We must make sure that when we log a directory entry, the 3758 * corresponding inode, after log replay, has a matching link 3759 * count. For example: 3760 * 3761 * touch foo 3762 * mkdir mydir 3763 * sync 3764 * ln foo mydir/bar 3765 * xfs_io -c "fsync" mydir 3766 * <crash> 3767 * <mount fs and log replay> 3768 * 3769 * Would result in a fsync log that when replayed, our file inode 3770 * would have a link count of 1, but we get two directory entries 3771 * pointing to the same inode. After removing one of the names, 3772 * it would not be possible to remove the other name, which 3773 * resulted always in stale file handle errors, and would not be 3774 * possible to rmdir the parent directory, since its i_size could 3775 * never be decremented to the value BTRFS_EMPTY_DIR_SIZE, 3776 * resulting in -ENOTEMPTY errors. 3777 */ 3778 if (!ctx->log_new_dentries) { 3779 struct btrfs_key di_key; 3780 3781 btrfs_dir_item_key_to_cpu(src, di, &di_key); 3782 if (di_key.type != BTRFS_ROOT_ITEM_KEY) 3783 ctx->log_new_dentries = true; 3784 } 3785 3786 if (batch_size == 0) 3787 batch_start = i; 3788 batch_size++; 3789 } 3790 3791 if (batch_size > 0) { 3792 int ret; 3793 3794 ret = flush_dir_items_batch(trans, inode, src, dst_path, 3795 batch_start, batch_size); 3796 if (ret < 0) 3797 return ret; 3798 } 3799 3800 return last_found ? 1 : 0; 3801 } 3802 3803 /* 3804 * log all the items included in the current transaction for a given 3805 * directory. This also creates the range items in the log tree required 3806 * to replay anything deleted before the fsync 3807 */ 3808 static noinline int log_dir_items(struct btrfs_trans_handle *trans, 3809 struct btrfs_inode *inode, 3810 struct btrfs_path *path, 3811 struct btrfs_path *dst_path, 3812 struct btrfs_log_ctx *ctx, 3813 u64 min_offset, u64 *last_offset_ret) 3814 { 3815 struct btrfs_key min_key; 3816 struct btrfs_root *root = inode->root; 3817 struct btrfs_root *log = root->log_root; 3818 int ret; 3819 u64 last_old_dentry_offset = min_offset - 1; 3820 u64 last_offset = (u64)-1; 3821 u64 ino = btrfs_ino(inode); 3822 3823 min_key.objectid = ino; 3824 min_key.type = BTRFS_DIR_INDEX_KEY; 3825 min_key.offset = min_offset; 3826 3827 ret = btrfs_search_forward(root, &min_key, path, trans->transid); 3828 3829 /* 3830 * we didn't find anything from this transaction, see if there 3831 * is anything at all 3832 */ 3833 if (ret != 0 || min_key.objectid != ino || 3834 min_key.type != BTRFS_DIR_INDEX_KEY) { 3835 min_key.objectid = ino; 3836 min_key.type = BTRFS_DIR_INDEX_KEY; 3837 min_key.offset = (u64)-1; 3838 btrfs_release_path(path); 3839 ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0); 3840 if (ret < 0) { 3841 btrfs_release_path(path); 3842 return ret; 3843 } 3844 ret = btrfs_previous_item(root, path, ino, BTRFS_DIR_INDEX_KEY); 3845 3846 /* if ret == 0 there are items for this type, 3847 * create a range to tell us the last key of this type. 3848 * otherwise, there are no items in this directory after 3849 * *min_offset, and we create a range to indicate that. 3850 */ 3851 if (ret == 0) { 3852 struct btrfs_key tmp; 3853 3854 btrfs_item_key_to_cpu(path->nodes[0], &tmp, 3855 path->slots[0]); 3856 if (tmp.type == BTRFS_DIR_INDEX_KEY) 3857 last_old_dentry_offset = tmp.offset; 3858 } else if (ret > 0) { 3859 ret = 0; 3860 } 3861 3862 goto done; 3863 } 3864 3865 /* go backward to find any previous key */ 3866 ret = btrfs_previous_item(root, path, ino, BTRFS_DIR_INDEX_KEY); 3867 if (ret == 0) { 3868 struct btrfs_key tmp; 3869 3870 btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]); 3871 /* 3872 * The dir index key before the first one we found that needs to 3873 * be logged might be in a previous leaf, and there might be a 3874 * gap between these keys, meaning that we had deletions that 3875 * happened. So the key range item we log (key type 3876 * BTRFS_DIR_LOG_INDEX_KEY) must cover a range that starts at the 3877 * previous key's offset plus 1, so that those deletes are replayed. 3878 */ 3879 if (tmp.type == BTRFS_DIR_INDEX_KEY) 3880 last_old_dentry_offset = tmp.offset; 3881 } else if (ret < 0) { 3882 goto done; 3883 } 3884 3885 btrfs_release_path(path); 3886 3887 /* 3888 * Find the first key from this transaction again or the one we were at 3889 * in the loop below in case we had to reschedule. We may be logging the 3890 * directory without holding its VFS lock, which happen when logging new 3891 * dentries (through log_new_dir_dentries()) or in some cases when we 3892 * need to log the parent directory of an inode. This means a dir index 3893 * key might be deleted from the inode's root, and therefore we may not 3894 * find it anymore. If we can't find it, just move to the next key. We 3895 * can not bail out and ignore, because if we do that we will simply 3896 * not log dir index keys that come after the one that was just deleted 3897 * and we can end up logging a dir index range that ends at (u64)-1 3898 * (@last_offset is initialized to that), resulting in removing dir 3899 * entries we should not remove at log replay time. 3900 */ 3901 search: 3902 ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0); 3903 if (ret > 0) { 3904 ret = btrfs_next_item(root, path); 3905 if (ret > 0) { 3906 /* There are no more keys in the inode's root. */ 3907 ret = 0; 3908 goto done; 3909 } 3910 } 3911 if (ret < 0) 3912 goto done; 3913 3914 /* 3915 * we have a block from this transaction, log every item in it 3916 * from our directory 3917 */ 3918 while (1) { 3919 ret = process_dir_items_leaf(trans, inode, path, dst_path, ctx, 3920 &last_old_dentry_offset); 3921 if (ret != 0) { 3922 if (ret > 0) 3923 ret = 0; 3924 goto done; 3925 } 3926 path->slots[0] = btrfs_header_nritems(path->nodes[0]); 3927 3928 /* 3929 * look ahead to the next item and see if it is also 3930 * from this directory and from this transaction 3931 */ 3932 ret = btrfs_next_leaf(root, path); 3933 if (ret) { 3934 if (ret == 1) { 3935 last_offset = (u64)-1; 3936 ret = 0; 3937 } 3938 goto done; 3939 } 3940 btrfs_item_key_to_cpu(path->nodes[0], &min_key, path->slots[0]); 3941 if (min_key.objectid != ino || min_key.type != BTRFS_DIR_INDEX_KEY) { 3942 last_offset = (u64)-1; 3943 goto done; 3944 } 3945 if (btrfs_header_generation(path->nodes[0]) != trans->transid) { 3946 /* 3947 * The next leaf was not changed in the current transaction 3948 * and has at least one dir index key. 3949 * We check for the next key because there might have been 3950 * one or more deletions between the last key we logged and 3951 * that next key. So the key range item we log (key type 3952 * BTRFS_DIR_LOG_INDEX_KEY) must end at the next key's 3953 * offset minus 1, so that those deletes are replayed. 3954 */ 3955 last_offset = min_key.offset - 1; 3956 goto done; 3957 } 3958 if (need_resched()) { 3959 btrfs_release_path(path); 3960 cond_resched(); 3961 goto search; 3962 } 3963 } 3964 done: 3965 btrfs_release_path(path); 3966 btrfs_release_path(dst_path); 3967 3968 if (ret == 0) { 3969 *last_offset_ret = last_offset; 3970 /* 3971 * In case the leaf was changed in the current transaction but 3972 * all its dir items are from a past transaction, the last item 3973 * in the leaf is a dir item and there's no gap between that last 3974 * dir item and the first one on the next leaf (which did not 3975 * change in the current transaction), then we don't need to log 3976 * a range, last_old_dentry_offset is == to last_offset. 3977 */ 3978 ASSERT(last_old_dentry_offset <= last_offset); 3979 if (last_old_dentry_offset < last_offset) 3980 ret = insert_dir_log_key(trans, log, path, ino, 3981 last_old_dentry_offset + 1, 3982 last_offset); 3983 } 3984 3985 return ret; 3986 } 3987 3988 /* 3989 * If the inode was logged before and it was evicted, then its 3990 * last_dir_index_offset is (u64)-1, so we don't the value of the last index 3991 * key offset. If that's the case, search for it and update the inode. This 3992 * is to avoid lookups in the log tree every time we try to insert a dir index 3993 * key from a leaf changed in the current transaction, and to allow us to always 3994 * do batch insertions of dir index keys. 3995 */ 3996 static int update_last_dir_index_offset(struct btrfs_inode *inode, 3997 struct btrfs_path *path, 3998 const struct btrfs_log_ctx *ctx) 3999 { 4000 const u64 ino = btrfs_ino(inode); 4001 struct btrfs_key key; 4002 int ret; 4003 4004 lockdep_assert_held(&inode->log_mutex); 4005 4006 if (inode->last_dir_index_offset != (u64)-1) 4007 return 0; 4008 4009 if (!ctx->logged_before) { 4010 inode->last_dir_index_offset = BTRFS_DIR_START_INDEX - 1; 4011 return 0; 4012 } 4013 4014 key.objectid = ino; 4015 key.type = BTRFS_DIR_INDEX_KEY; 4016 key.offset = (u64)-1; 4017 4018 ret = btrfs_search_slot(NULL, inode->root->log_root, &key, path, 0, 0); 4019 /* 4020 * An error happened or we actually have an index key with an offset 4021 * value of (u64)-1. Bail out, we're done. 4022 */ 4023 if (ret <= 0) 4024 goto out; 4025 4026 ret = 0; 4027 inode->last_dir_index_offset = BTRFS_DIR_START_INDEX - 1; 4028 4029 /* 4030 * No dir index items, bail out and leave last_dir_index_offset with 4031 * the value right before the first valid index value. 4032 */ 4033 if (path->slots[0] == 0) 4034 goto out; 4035 4036 /* 4037 * btrfs_search_slot() left us at one slot beyond the slot with the last 4038 * index key, or beyond the last key of the directory that is not an 4039 * index key. If we have an index key before, set last_dir_index_offset 4040 * to its offset value, otherwise leave it with a value right before the 4041 * first valid index value, as it means we have an empty directory. 4042 */ 4043 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0] - 1); 4044 if (key.objectid == ino && key.type == BTRFS_DIR_INDEX_KEY) 4045 inode->last_dir_index_offset = key.offset; 4046 4047 out: 4048 btrfs_release_path(path); 4049 4050 return ret; 4051 } 4052 4053 /* 4054 * logging directories is very similar to logging inodes, We find all the items 4055 * from the current transaction and write them to the log. 4056 * 4057 * The recovery code scans the directory in the subvolume, and if it finds a 4058 * key in the range logged that is not present in the log tree, then it means 4059 * that dir entry was unlinked during the transaction. 4060 * 4061 * In order for that scan to work, we must include one key smaller than 4062 * the smallest logged by this transaction and one key larger than the largest 4063 * key logged by this transaction. 4064 */ 4065 static noinline int log_directory_changes(struct btrfs_trans_handle *trans, 4066 struct btrfs_inode *inode, 4067 struct btrfs_path *path, 4068 struct btrfs_path *dst_path, 4069 struct btrfs_log_ctx *ctx) 4070 { 4071 u64 min_key; 4072 u64 max_key; 4073 int ret; 4074 4075 ret = update_last_dir_index_offset(inode, path, ctx); 4076 if (ret) 4077 return ret; 4078 4079 min_key = BTRFS_DIR_START_INDEX; 4080 max_key = 0; 4081 4082 while (1) { 4083 ret = log_dir_items(trans, inode, path, dst_path, 4084 ctx, min_key, &max_key); 4085 if (ret) 4086 return ret; 4087 if (max_key == (u64)-1) 4088 break; 4089 min_key = max_key + 1; 4090 } 4091 4092 return 0; 4093 } 4094 4095 /* 4096 * a helper function to drop items from the log before we relog an 4097 * inode. max_key_type indicates the highest item type to remove. 4098 * This cannot be run for file data extents because it does not 4099 * free the extents they point to. 4100 */ 4101 static int drop_inode_items(struct btrfs_trans_handle *trans, 4102 struct btrfs_root *log, 4103 struct btrfs_path *path, 4104 struct btrfs_inode *inode, 4105 int max_key_type) 4106 { 4107 int ret; 4108 struct btrfs_key key; 4109 struct btrfs_key found_key; 4110 int start_slot; 4111 4112 key.objectid = btrfs_ino(inode); 4113 key.type = max_key_type; 4114 key.offset = (u64)-1; 4115 4116 while (1) { 4117 ret = btrfs_search_slot(trans, log, &key, path, -1, 1); 4118 if (ret < 0) { 4119 break; 4120 } else if (ret > 0) { 4121 if (path->slots[0] == 0) 4122 break; 4123 path->slots[0]--; 4124 } 4125 4126 btrfs_item_key_to_cpu(path->nodes[0], &found_key, 4127 path->slots[0]); 4128 4129 if (found_key.objectid != key.objectid) 4130 break; 4131 4132 found_key.offset = 0; 4133 found_key.type = 0; 4134 ret = btrfs_bin_search(path->nodes[0], 0, &found_key, &start_slot); 4135 if (ret < 0) 4136 break; 4137 4138 ret = btrfs_del_items(trans, log, path, start_slot, 4139 path->slots[0] - start_slot + 1); 4140 /* 4141 * If start slot isn't 0 then we don't need to re-search, we've 4142 * found the last guy with the objectid in this tree. 4143 */ 4144 if (ret || start_slot != 0) 4145 break; 4146 btrfs_release_path(path); 4147 } 4148 btrfs_release_path(path); 4149 if (ret > 0) 4150 ret = 0; 4151 return ret; 4152 } 4153 4154 static int truncate_inode_items(struct btrfs_trans_handle *trans, 4155 struct btrfs_root *log_root, 4156 struct btrfs_inode *inode, 4157 u64 new_size, u32 min_type) 4158 { 4159 struct btrfs_truncate_control control = { 4160 .new_size = new_size, 4161 .ino = btrfs_ino(inode), 4162 .min_type = min_type, 4163 .skip_ref_updates = true, 4164 }; 4165 4166 return btrfs_truncate_inode_items(trans, log_root, &control); 4167 } 4168 4169 static void fill_inode_item(struct btrfs_trans_handle *trans, 4170 struct extent_buffer *leaf, 4171 struct btrfs_inode_item *item, 4172 struct inode *inode, int log_inode_only, 4173 u64 logged_isize) 4174 { 4175 struct btrfs_map_token token; 4176 u64 flags; 4177 4178 btrfs_init_map_token(&token, leaf); 4179 4180 if (log_inode_only) { 4181 /* set the generation to zero so the recover code 4182 * can tell the difference between an logging 4183 * just to say 'this inode exists' and a logging 4184 * to say 'update this inode with these values' 4185 */ 4186 btrfs_set_token_inode_generation(&token, item, 0); 4187 btrfs_set_token_inode_size(&token, item, logged_isize); 4188 } else { 4189 btrfs_set_token_inode_generation(&token, item, 4190 BTRFS_I(inode)->generation); 4191 btrfs_set_token_inode_size(&token, item, inode->i_size); 4192 } 4193 4194 btrfs_set_token_inode_uid(&token, item, i_uid_read(inode)); 4195 btrfs_set_token_inode_gid(&token, item, i_gid_read(inode)); 4196 btrfs_set_token_inode_mode(&token, item, inode->i_mode); 4197 btrfs_set_token_inode_nlink(&token, item, inode->i_nlink); 4198 4199 btrfs_set_token_timespec_sec(&token, &item->atime, 4200 inode_get_atime_sec(inode)); 4201 btrfs_set_token_timespec_nsec(&token, &item->atime, 4202 inode_get_atime_nsec(inode)); 4203 4204 btrfs_set_token_timespec_sec(&token, &item->mtime, 4205 inode_get_mtime_sec(inode)); 4206 btrfs_set_token_timespec_nsec(&token, &item->mtime, 4207 inode_get_mtime_nsec(inode)); 4208 4209 btrfs_set_token_timespec_sec(&token, &item->ctime, 4210 inode_get_ctime_sec(inode)); 4211 btrfs_set_token_timespec_nsec(&token, &item->ctime, 4212 inode_get_ctime_nsec(inode)); 4213 4214 /* 4215 * We do not need to set the nbytes field, in fact during a fast fsync 4216 * its value may not even be correct, since a fast fsync does not wait 4217 * for ordered extent completion, which is where we update nbytes, it 4218 * only waits for writeback to complete. During log replay as we find 4219 * file extent items and replay them, we adjust the nbytes field of the 4220 * inode item in subvolume tree as needed (see overwrite_item()). 4221 */ 4222 4223 btrfs_set_token_inode_sequence(&token, item, inode_peek_iversion(inode)); 4224 btrfs_set_token_inode_transid(&token, item, trans->transid); 4225 btrfs_set_token_inode_rdev(&token, item, inode->i_rdev); 4226 flags = btrfs_inode_combine_flags(BTRFS_I(inode)->flags, 4227 BTRFS_I(inode)->ro_flags); 4228 btrfs_set_token_inode_flags(&token, item, flags); 4229 btrfs_set_token_inode_block_group(&token, item, 0); 4230 } 4231 4232 static int log_inode_item(struct btrfs_trans_handle *trans, 4233 struct btrfs_root *log, struct btrfs_path *path, 4234 struct btrfs_inode *inode, bool inode_item_dropped) 4235 { 4236 struct btrfs_inode_item *inode_item; 4237 int ret; 4238 4239 /* 4240 * If we are doing a fast fsync and the inode was logged before in the 4241 * current transaction, then we know the inode was previously logged and 4242 * it exists in the log tree. For performance reasons, in this case use 4243 * btrfs_search_slot() directly with ins_len set to 0 so that we never 4244 * attempt a write lock on the leaf's parent, which adds unnecessary lock 4245 * contention in case there are concurrent fsyncs for other inodes of the 4246 * same subvolume. Using btrfs_insert_empty_item() when the inode item 4247 * already exists can also result in unnecessarily splitting a leaf. 4248 */ 4249 if (!inode_item_dropped && inode->logged_trans == trans->transid) { 4250 ret = btrfs_search_slot(trans, log, &inode->location, path, 0, 1); 4251 ASSERT(ret <= 0); 4252 if (ret > 0) 4253 ret = -ENOENT; 4254 } else { 4255 /* 4256 * This means it is the first fsync in the current transaction, 4257 * so the inode item is not in the log and we need to insert it. 4258 * We can never get -EEXIST because we are only called for a fast 4259 * fsync and in case an inode eviction happens after the inode was 4260 * logged before in the current transaction, when we load again 4261 * the inode, we set BTRFS_INODE_NEEDS_FULL_SYNC on its runtime 4262 * flags and set ->logged_trans to 0. 4263 */ 4264 ret = btrfs_insert_empty_item(trans, log, path, &inode->location, 4265 sizeof(*inode_item)); 4266 ASSERT(ret != -EEXIST); 4267 } 4268 if (ret) 4269 return ret; 4270 inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0], 4271 struct btrfs_inode_item); 4272 fill_inode_item(trans, path->nodes[0], inode_item, &inode->vfs_inode, 4273 0, 0); 4274 btrfs_release_path(path); 4275 return 0; 4276 } 4277 4278 static int log_csums(struct btrfs_trans_handle *trans, 4279 struct btrfs_inode *inode, 4280 struct btrfs_root *log_root, 4281 struct btrfs_ordered_sum *sums) 4282 { 4283 const u64 lock_end = sums->logical + sums->len - 1; 4284 struct extent_state *cached_state = NULL; 4285 int ret; 4286 4287 /* 4288 * If this inode was not used for reflink operations in the current 4289 * transaction with new extents, then do the fast path, no need to 4290 * worry about logging checksum items with overlapping ranges. 4291 */ 4292 if (inode->last_reflink_trans < trans->transid) 4293 return btrfs_csum_file_blocks(trans, log_root, sums); 4294 4295 /* 4296 * Serialize logging for checksums. This is to avoid racing with the 4297 * same checksum being logged by another task that is logging another 4298 * file which happens to refer to the same extent as well. Such races 4299 * can leave checksum items in the log with overlapping ranges. 4300 */ 4301 ret = lock_extent(&log_root->log_csum_range, sums->logical, lock_end, 4302 &cached_state); 4303 if (ret) 4304 return ret; 4305 /* 4306 * Due to extent cloning, we might have logged a csum item that covers a 4307 * subrange of a cloned extent, and later we can end up logging a csum 4308 * item for a larger subrange of the same extent or the entire range. 4309 * This would leave csum items in the log tree that cover the same range 4310 * and break the searches for checksums in the log tree, resulting in 4311 * some checksums missing in the fs/subvolume tree. So just delete (or 4312 * trim and adjust) any existing csum items in the log for this range. 4313 */ 4314 ret = btrfs_del_csums(trans, log_root, sums->logical, sums->len); 4315 if (!ret) 4316 ret = btrfs_csum_file_blocks(trans, log_root, sums); 4317 4318 unlock_extent(&log_root->log_csum_range, sums->logical, lock_end, 4319 &cached_state); 4320 4321 return ret; 4322 } 4323 4324 static noinline int copy_items(struct btrfs_trans_handle *trans, 4325 struct btrfs_inode *inode, 4326 struct btrfs_path *dst_path, 4327 struct btrfs_path *src_path, 4328 int start_slot, int nr, int inode_only, 4329 u64 logged_isize, struct btrfs_log_ctx *ctx) 4330 { 4331 struct btrfs_root *log = inode->root->log_root; 4332 struct btrfs_file_extent_item *extent; 4333 struct extent_buffer *src; 4334 int ret; 4335 struct btrfs_key *ins_keys; 4336 u32 *ins_sizes; 4337 struct btrfs_item_batch batch; 4338 char *ins_data; 4339 int dst_index; 4340 const bool skip_csum = (inode->flags & BTRFS_INODE_NODATASUM); 4341 const u64 i_size = i_size_read(&inode->vfs_inode); 4342 4343 /* 4344 * To keep lockdep happy and avoid deadlocks, clone the source leaf and 4345 * use the clone. This is because otherwise we would be changing the log 4346 * tree, to insert items from the subvolume tree or insert csum items, 4347 * while holding a read lock on a leaf from the subvolume tree, which 4348 * creates a nasty lock dependency when COWing log tree nodes/leaves: 4349 * 4350 * 1) Modifying the log tree triggers an extent buffer allocation while 4351 * holding a write lock on a parent extent buffer from the log tree. 4352 * Allocating the pages for an extent buffer, or the extent buffer 4353 * struct, can trigger inode eviction and finally the inode eviction 4354 * will trigger a release/remove of a delayed node, which requires 4355 * taking the delayed node's mutex; 4356 * 4357 * 2) Allocating a metadata extent for a log tree can trigger the async 4358 * reclaim thread and make us wait for it to release enough space and 4359 * unblock our reservation ticket. The reclaim thread can start 4360 * flushing delayed items, and that in turn results in the need to 4361 * lock delayed node mutexes and in the need to write lock extent 4362 * buffers of a subvolume tree - all this while holding a write lock 4363 * on the parent extent buffer in the log tree. 4364 * 4365 * So one task in scenario 1) running in parallel with another task in 4366 * scenario 2) could lead to a deadlock, one wanting to lock a delayed 4367 * node mutex while having a read lock on a leaf from the subvolume, 4368 * while the other is holding the delayed node's mutex and wants to 4369 * write lock the same subvolume leaf for flushing delayed items. 4370 */ 4371 ret = clone_leaf(src_path, ctx); 4372 if (ret < 0) 4373 return ret; 4374 4375 src = src_path->nodes[0]; 4376 4377 ins_data = kmalloc(nr * sizeof(struct btrfs_key) + 4378 nr * sizeof(u32), GFP_NOFS); 4379 if (!ins_data) 4380 return -ENOMEM; 4381 4382 ins_sizes = (u32 *)ins_data; 4383 ins_keys = (struct btrfs_key *)(ins_data + nr * sizeof(u32)); 4384 batch.keys = ins_keys; 4385 batch.data_sizes = ins_sizes; 4386 batch.total_data_size = 0; 4387 batch.nr = 0; 4388 4389 dst_index = 0; 4390 for (int i = 0; i < nr; i++) { 4391 const int src_slot = start_slot + i; 4392 struct btrfs_root *csum_root; 4393 struct btrfs_ordered_sum *sums; 4394 struct btrfs_ordered_sum *sums_next; 4395 LIST_HEAD(ordered_sums); 4396 u64 disk_bytenr; 4397 u64 disk_num_bytes; 4398 u64 extent_offset; 4399 u64 extent_num_bytes; 4400 bool is_old_extent; 4401 4402 btrfs_item_key_to_cpu(src, &ins_keys[dst_index], src_slot); 4403 4404 if (ins_keys[dst_index].type != BTRFS_EXTENT_DATA_KEY) 4405 goto add_to_batch; 4406 4407 extent = btrfs_item_ptr(src, src_slot, 4408 struct btrfs_file_extent_item); 4409 4410 is_old_extent = (btrfs_file_extent_generation(src, extent) < 4411 trans->transid); 4412 4413 /* 4414 * Don't copy extents from past generations. That would make us 4415 * log a lot more metadata for common cases like doing only a 4416 * few random writes into a file and then fsync it for the first 4417 * time or after the full sync flag is set on the inode. We can 4418 * get leaves full of extent items, most of which are from past 4419 * generations, so we can skip them - as long as the inode has 4420 * not been the target of a reflink operation in this transaction, 4421 * as in that case it might have had file extent items with old 4422 * generations copied into it. We also must always log prealloc 4423 * extents that start at or beyond eof, otherwise we would lose 4424 * them on log replay. 4425 */ 4426 if (is_old_extent && 4427 ins_keys[dst_index].offset < i_size && 4428 inode->last_reflink_trans < trans->transid) 4429 continue; 4430 4431 if (skip_csum) 4432 goto add_to_batch; 4433 4434 /* Only regular extents have checksums. */ 4435 if (btrfs_file_extent_type(src, extent) != BTRFS_FILE_EXTENT_REG) 4436 goto add_to_batch; 4437 4438 /* 4439 * If it's an extent created in a past transaction, then its 4440 * checksums are already accessible from the committed csum tree, 4441 * no need to log them. 4442 */ 4443 if (is_old_extent) 4444 goto add_to_batch; 4445 4446 disk_bytenr = btrfs_file_extent_disk_bytenr(src, extent); 4447 /* If it's an explicit hole, there are no checksums. */ 4448 if (disk_bytenr == 0) 4449 goto add_to_batch; 4450 4451 disk_num_bytes = btrfs_file_extent_disk_num_bytes(src, extent); 4452 4453 if (btrfs_file_extent_compression(src, extent)) { 4454 extent_offset = 0; 4455 extent_num_bytes = disk_num_bytes; 4456 } else { 4457 extent_offset = btrfs_file_extent_offset(src, extent); 4458 extent_num_bytes = btrfs_file_extent_num_bytes(src, extent); 4459 } 4460 4461 csum_root = btrfs_csum_root(trans->fs_info, disk_bytenr); 4462 disk_bytenr += extent_offset; 4463 ret = btrfs_lookup_csums_list(csum_root, disk_bytenr, 4464 disk_bytenr + extent_num_bytes - 1, 4465 &ordered_sums, false); 4466 if (ret < 0) 4467 goto out; 4468 ret = 0; 4469 4470 list_for_each_entry_safe(sums, sums_next, &ordered_sums, list) { 4471 if (!ret) 4472 ret = log_csums(trans, inode, log, sums); 4473 list_del(&sums->list); 4474 kfree(sums); 4475 } 4476 if (ret) 4477 goto out; 4478 4479 add_to_batch: 4480 ins_sizes[dst_index] = btrfs_item_size(src, src_slot); 4481 batch.total_data_size += ins_sizes[dst_index]; 4482 batch.nr++; 4483 dst_index++; 4484 } 4485 4486 /* 4487 * We have a leaf full of old extent items that don't need to be logged, 4488 * so we don't need to do anything. 4489 */ 4490 if (batch.nr == 0) 4491 goto out; 4492 4493 ret = btrfs_insert_empty_items(trans, log, dst_path, &batch); 4494 if (ret) 4495 goto out; 4496 4497 dst_index = 0; 4498 for (int i = 0; i < nr; i++) { 4499 const int src_slot = start_slot + i; 4500 const int dst_slot = dst_path->slots[0] + dst_index; 4501 struct btrfs_key key; 4502 unsigned long src_offset; 4503 unsigned long dst_offset; 4504 4505 /* 4506 * We're done, all the remaining items in the source leaf 4507 * correspond to old file extent items. 4508 */ 4509 if (dst_index >= batch.nr) 4510 break; 4511 4512 btrfs_item_key_to_cpu(src, &key, src_slot); 4513 4514 if (key.type != BTRFS_EXTENT_DATA_KEY) 4515 goto copy_item; 4516 4517 extent = btrfs_item_ptr(src, src_slot, 4518 struct btrfs_file_extent_item); 4519 4520 /* See the comment in the previous loop, same logic. */ 4521 if (btrfs_file_extent_generation(src, extent) < trans->transid && 4522 key.offset < i_size && 4523 inode->last_reflink_trans < trans->transid) 4524 continue; 4525 4526 copy_item: 4527 dst_offset = btrfs_item_ptr_offset(dst_path->nodes[0], dst_slot); 4528 src_offset = btrfs_item_ptr_offset(src, src_slot); 4529 4530 if (key.type == BTRFS_INODE_ITEM_KEY) { 4531 struct btrfs_inode_item *inode_item; 4532 4533 inode_item = btrfs_item_ptr(dst_path->nodes[0], dst_slot, 4534 struct btrfs_inode_item); 4535 fill_inode_item(trans, dst_path->nodes[0], inode_item, 4536 &inode->vfs_inode, 4537 inode_only == LOG_INODE_EXISTS, 4538 logged_isize); 4539 } else { 4540 copy_extent_buffer(dst_path->nodes[0], src, dst_offset, 4541 src_offset, ins_sizes[dst_index]); 4542 } 4543 4544 dst_index++; 4545 } 4546 4547 btrfs_mark_buffer_dirty(trans, dst_path->nodes[0]); 4548 btrfs_release_path(dst_path); 4549 out: 4550 kfree(ins_data); 4551 4552 return ret; 4553 } 4554 4555 static int extent_cmp(void *priv, const struct list_head *a, 4556 const struct list_head *b) 4557 { 4558 const struct extent_map *em1, *em2; 4559 4560 em1 = list_entry(a, struct extent_map, list); 4561 em2 = list_entry(b, struct extent_map, list); 4562 4563 if (em1->start < em2->start) 4564 return -1; 4565 else if (em1->start > em2->start) 4566 return 1; 4567 return 0; 4568 } 4569 4570 static int log_extent_csums(struct btrfs_trans_handle *trans, 4571 struct btrfs_inode *inode, 4572 struct btrfs_root *log_root, 4573 const struct extent_map *em, 4574 struct btrfs_log_ctx *ctx) 4575 { 4576 struct btrfs_ordered_extent *ordered; 4577 struct btrfs_root *csum_root; 4578 u64 csum_offset; 4579 u64 csum_len; 4580 u64 mod_start = em->start; 4581 u64 mod_len = em->len; 4582 LIST_HEAD(ordered_sums); 4583 int ret = 0; 4584 4585 if (inode->flags & BTRFS_INODE_NODATASUM || 4586 (em->flags & EXTENT_FLAG_PREALLOC) || 4587 em->block_start == EXTENT_MAP_HOLE) 4588 return 0; 4589 4590 list_for_each_entry(ordered, &ctx->ordered_extents, log_list) { 4591 const u64 ordered_end = ordered->file_offset + ordered->num_bytes; 4592 const u64 mod_end = mod_start + mod_len; 4593 struct btrfs_ordered_sum *sums; 4594 4595 if (mod_len == 0) 4596 break; 4597 4598 if (ordered_end <= mod_start) 4599 continue; 4600 if (mod_end <= ordered->file_offset) 4601 break; 4602 4603 /* 4604 * We are going to copy all the csums on this ordered extent, so 4605 * go ahead and adjust mod_start and mod_len in case this ordered 4606 * extent has already been logged. 4607 */ 4608 if (ordered->file_offset > mod_start) { 4609 if (ordered_end >= mod_end) 4610 mod_len = ordered->file_offset - mod_start; 4611 /* 4612 * If we have this case 4613 * 4614 * |--------- logged extent ---------| 4615 * |----- ordered extent ----| 4616 * 4617 * Just don't mess with mod_start and mod_len, we'll 4618 * just end up logging more csums than we need and it 4619 * will be ok. 4620 */ 4621 } else { 4622 if (ordered_end < mod_end) { 4623 mod_len = mod_end - ordered_end; 4624 mod_start = ordered_end; 4625 } else { 4626 mod_len = 0; 4627 } 4628 } 4629 4630 /* 4631 * To keep us from looping for the above case of an ordered 4632 * extent that falls inside of the logged extent. 4633 */ 4634 if (test_and_set_bit(BTRFS_ORDERED_LOGGED_CSUM, &ordered->flags)) 4635 continue; 4636 4637 list_for_each_entry(sums, &ordered->list, list) { 4638 ret = log_csums(trans, inode, log_root, sums); 4639 if (ret) 4640 return ret; 4641 } 4642 } 4643 4644 /* We're done, found all csums in the ordered extents. */ 4645 if (mod_len == 0) 4646 return 0; 4647 4648 /* If we're compressed we have to save the entire range of csums. */ 4649 if (extent_map_is_compressed(em)) { 4650 csum_offset = 0; 4651 csum_len = max(em->block_len, em->orig_block_len); 4652 } else { 4653 csum_offset = mod_start - em->start; 4654 csum_len = mod_len; 4655 } 4656 4657 /* block start is already adjusted for the file extent offset. */ 4658 csum_root = btrfs_csum_root(trans->fs_info, em->block_start); 4659 ret = btrfs_lookup_csums_list(csum_root, em->block_start + csum_offset, 4660 em->block_start + csum_offset + 4661 csum_len - 1, &ordered_sums, false); 4662 if (ret < 0) 4663 return ret; 4664 ret = 0; 4665 4666 while (!list_empty(&ordered_sums)) { 4667 struct btrfs_ordered_sum *sums = list_entry(ordered_sums.next, 4668 struct btrfs_ordered_sum, 4669 list); 4670 if (!ret) 4671 ret = log_csums(trans, inode, log_root, sums); 4672 list_del(&sums->list); 4673 kfree(sums); 4674 } 4675 4676 return ret; 4677 } 4678 4679 static int log_one_extent(struct btrfs_trans_handle *trans, 4680 struct btrfs_inode *inode, 4681 const struct extent_map *em, 4682 struct btrfs_path *path, 4683 struct btrfs_log_ctx *ctx) 4684 { 4685 struct btrfs_drop_extents_args drop_args = { 0 }; 4686 struct btrfs_root *log = inode->root->log_root; 4687 struct btrfs_file_extent_item fi = { 0 }; 4688 struct extent_buffer *leaf; 4689 struct btrfs_key key; 4690 enum btrfs_compression_type compress_type; 4691 u64 extent_offset = em->start - em->orig_start; 4692 u64 block_len; 4693 int ret; 4694 4695 btrfs_set_stack_file_extent_generation(&fi, trans->transid); 4696 if (em->flags & EXTENT_FLAG_PREALLOC) 4697 btrfs_set_stack_file_extent_type(&fi, BTRFS_FILE_EXTENT_PREALLOC); 4698 else 4699 btrfs_set_stack_file_extent_type(&fi, BTRFS_FILE_EXTENT_REG); 4700 4701 block_len = max(em->block_len, em->orig_block_len); 4702 compress_type = extent_map_compression(em); 4703 if (compress_type != BTRFS_COMPRESS_NONE) { 4704 btrfs_set_stack_file_extent_disk_bytenr(&fi, em->block_start); 4705 btrfs_set_stack_file_extent_disk_num_bytes(&fi, block_len); 4706 } else if (em->block_start < EXTENT_MAP_LAST_BYTE) { 4707 btrfs_set_stack_file_extent_disk_bytenr(&fi, em->block_start - 4708 extent_offset); 4709 btrfs_set_stack_file_extent_disk_num_bytes(&fi, block_len); 4710 } 4711 4712 btrfs_set_stack_file_extent_offset(&fi, extent_offset); 4713 btrfs_set_stack_file_extent_num_bytes(&fi, em->len); 4714 btrfs_set_stack_file_extent_ram_bytes(&fi, em->ram_bytes); 4715 btrfs_set_stack_file_extent_compression(&fi, compress_type); 4716 4717 ret = log_extent_csums(trans, inode, log, em, ctx); 4718 if (ret) 4719 return ret; 4720 4721 /* 4722 * If this is the first time we are logging the inode in the current 4723 * transaction, we can avoid btrfs_drop_extents(), which is expensive 4724 * because it does a deletion search, which always acquires write locks 4725 * for extent buffers at levels 2, 1 and 0. This not only wastes time 4726 * but also adds significant contention in a log tree, since log trees 4727 * are small, with a root at level 2 or 3 at most, due to their short 4728 * life span. 4729 */ 4730 if (ctx->logged_before) { 4731 drop_args.path = path; 4732 drop_args.start = em->start; 4733 drop_args.end = em->start + em->len; 4734 drop_args.replace_extent = true; 4735 drop_args.extent_item_size = sizeof(fi); 4736 ret = btrfs_drop_extents(trans, log, inode, &drop_args); 4737 if (ret) 4738 return ret; 4739 } 4740 4741 if (!drop_args.extent_inserted) { 4742 key.objectid = btrfs_ino(inode); 4743 key.type = BTRFS_EXTENT_DATA_KEY; 4744 key.offset = em->start; 4745 4746 ret = btrfs_insert_empty_item(trans, log, path, &key, 4747 sizeof(fi)); 4748 if (ret) 4749 return ret; 4750 } 4751 leaf = path->nodes[0]; 4752 write_extent_buffer(leaf, &fi, 4753 btrfs_item_ptr_offset(leaf, path->slots[0]), 4754 sizeof(fi)); 4755 btrfs_mark_buffer_dirty(trans, leaf); 4756 4757 btrfs_release_path(path); 4758 4759 return ret; 4760 } 4761 4762 /* 4763 * Log all prealloc extents beyond the inode's i_size to make sure we do not 4764 * lose them after doing a full/fast fsync and replaying the log. We scan the 4765 * subvolume's root instead of iterating the inode's extent map tree because 4766 * otherwise we can log incorrect extent items based on extent map conversion. 4767 * That can happen due to the fact that extent maps are merged when they 4768 * are not in the extent map tree's list of modified extents. 4769 */ 4770 static int btrfs_log_prealloc_extents(struct btrfs_trans_handle *trans, 4771 struct btrfs_inode *inode, 4772 struct btrfs_path *path, 4773 struct btrfs_log_ctx *ctx) 4774 { 4775 struct btrfs_root *root = inode->root; 4776 struct btrfs_key key; 4777 const u64 i_size = i_size_read(&inode->vfs_inode); 4778 const u64 ino = btrfs_ino(inode); 4779 struct btrfs_path *dst_path = NULL; 4780 bool dropped_extents = false; 4781 u64 truncate_offset = i_size; 4782 struct extent_buffer *leaf; 4783 int slot; 4784 int ins_nr = 0; 4785 int start_slot = 0; 4786 int ret; 4787 4788 if (!(inode->flags & BTRFS_INODE_PREALLOC)) 4789 return 0; 4790 4791 key.objectid = ino; 4792 key.type = BTRFS_EXTENT_DATA_KEY; 4793 key.offset = i_size; 4794 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 4795 if (ret < 0) 4796 goto out; 4797 4798 /* 4799 * We must check if there is a prealloc extent that starts before the 4800 * i_size and crosses the i_size boundary. This is to ensure later we 4801 * truncate down to the end of that extent and not to the i_size, as 4802 * otherwise we end up losing part of the prealloc extent after a log 4803 * replay and with an implicit hole if there is another prealloc extent 4804 * that starts at an offset beyond i_size. 4805 */ 4806 ret = btrfs_previous_item(root, path, ino, BTRFS_EXTENT_DATA_KEY); 4807 if (ret < 0) 4808 goto out; 4809 4810 if (ret == 0) { 4811 struct btrfs_file_extent_item *ei; 4812 4813 leaf = path->nodes[0]; 4814 slot = path->slots[0]; 4815 ei = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item); 4816 4817 if (btrfs_file_extent_type(leaf, ei) == 4818 BTRFS_FILE_EXTENT_PREALLOC) { 4819 u64 extent_end; 4820 4821 btrfs_item_key_to_cpu(leaf, &key, slot); 4822 extent_end = key.offset + 4823 btrfs_file_extent_num_bytes(leaf, ei); 4824 4825 if (extent_end > i_size) 4826 truncate_offset = extent_end; 4827 } 4828 } else { 4829 ret = 0; 4830 } 4831 4832 while (true) { 4833 leaf = path->nodes[0]; 4834 slot = path->slots[0]; 4835 4836 if (slot >= btrfs_header_nritems(leaf)) { 4837 if (ins_nr > 0) { 4838 ret = copy_items(trans, inode, dst_path, path, 4839 start_slot, ins_nr, 1, 0, ctx); 4840 if (ret < 0) 4841 goto out; 4842 ins_nr = 0; 4843 } 4844 ret = btrfs_next_leaf(root, path); 4845 if (ret < 0) 4846 goto out; 4847 if (ret > 0) { 4848 ret = 0; 4849 break; 4850 } 4851 continue; 4852 } 4853 4854 btrfs_item_key_to_cpu(leaf, &key, slot); 4855 if (key.objectid > ino) 4856 break; 4857 if (WARN_ON_ONCE(key.objectid < ino) || 4858 key.type < BTRFS_EXTENT_DATA_KEY || 4859 key.offset < i_size) { 4860 path->slots[0]++; 4861 continue; 4862 } 4863 if (!dropped_extents) { 4864 /* 4865 * Avoid logging extent items logged in past fsync calls 4866 * and leading to duplicate keys in the log tree. 4867 */ 4868 ret = truncate_inode_items(trans, root->log_root, inode, 4869 truncate_offset, 4870 BTRFS_EXTENT_DATA_KEY); 4871 if (ret) 4872 goto out; 4873 dropped_extents = true; 4874 } 4875 if (ins_nr == 0) 4876 start_slot = slot; 4877 ins_nr++; 4878 path->slots[0]++; 4879 if (!dst_path) { 4880 dst_path = btrfs_alloc_path(); 4881 if (!dst_path) { 4882 ret = -ENOMEM; 4883 goto out; 4884 } 4885 } 4886 } 4887 if (ins_nr > 0) 4888 ret = copy_items(trans, inode, dst_path, path, 4889 start_slot, ins_nr, 1, 0, ctx); 4890 out: 4891 btrfs_release_path(path); 4892 btrfs_free_path(dst_path); 4893 return ret; 4894 } 4895 4896 static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans, 4897 struct btrfs_inode *inode, 4898 struct btrfs_path *path, 4899 struct btrfs_log_ctx *ctx) 4900 { 4901 struct btrfs_ordered_extent *ordered; 4902 struct btrfs_ordered_extent *tmp; 4903 struct extent_map *em, *n; 4904 LIST_HEAD(extents); 4905 struct extent_map_tree *tree = &inode->extent_tree; 4906 int ret = 0; 4907 int num = 0; 4908 4909 write_lock(&tree->lock); 4910 4911 list_for_each_entry_safe(em, n, &tree->modified_extents, list) { 4912 list_del_init(&em->list); 4913 /* 4914 * Just an arbitrary number, this can be really CPU intensive 4915 * once we start getting a lot of extents, and really once we 4916 * have a bunch of extents we just want to commit since it will 4917 * be faster. 4918 */ 4919 if (++num > 32768) { 4920 list_del_init(&tree->modified_extents); 4921 ret = -EFBIG; 4922 goto process; 4923 } 4924 4925 if (em->generation < trans->transid) 4926 continue; 4927 4928 /* We log prealloc extents beyond eof later. */ 4929 if ((em->flags & EXTENT_FLAG_PREALLOC) && 4930 em->start >= i_size_read(&inode->vfs_inode)) 4931 continue; 4932 4933 /* Need a ref to keep it from getting evicted from cache */ 4934 refcount_inc(&em->refs); 4935 em->flags |= EXTENT_FLAG_LOGGING; 4936 list_add_tail(&em->list, &extents); 4937 num++; 4938 } 4939 4940 list_sort(NULL, &extents, extent_cmp); 4941 process: 4942 while (!list_empty(&extents)) { 4943 em = list_entry(extents.next, struct extent_map, list); 4944 4945 list_del_init(&em->list); 4946 4947 /* 4948 * If we had an error we just need to delete everybody from our 4949 * private list. 4950 */ 4951 if (ret) { 4952 clear_em_logging(inode, em); 4953 free_extent_map(em); 4954 continue; 4955 } 4956 4957 write_unlock(&tree->lock); 4958 4959 ret = log_one_extent(trans, inode, em, path, ctx); 4960 write_lock(&tree->lock); 4961 clear_em_logging(inode, em); 4962 free_extent_map(em); 4963 } 4964 WARN_ON(!list_empty(&extents)); 4965 write_unlock(&tree->lock); 4966 4967 if (!ret) 4968 ret = btrfs_log_prealloc_extents(trans, inode, path, ctx); 4969 if (ret) 4970 return ret; 4971 4972 /* 4973 * We have logged all extents successfully, now make sure the commit of 4974 * the current transaction waits for the ordered extents to complete 4975 * before it commits and wipes out the log trees, otherwise we would 4976 * lose data if an ordered extents completes after the transaction 4977 * commits and a power failure happens after the transaction commit. 4978 */ 4979 list_for_each_entry_safe(ordered, tmp, &ctx->ordered_extents, log_list) { 4980 list_del_init(&ordered->log_list); 4981 set_bit(BTRFS_ORDERED_LOGGED, &ordered->flags); 4982 4983 if (!test_bit(BTRFS_ORDERED_COMPLETE, &ordered->flags)) { 4984 spin_lock_irq(&inode->ordered_tree_lock); 4985 if (!test_bit(BTRFS_ORDERED_COMPLETE, &ordered->flags)) { 4986 set_bit(BTRFS_ORDERED_PENDING, &ordered->flags); 4987 atomic_inc(&trans->transaction->pending_ordered); 4988 } 4989 spin_unlock_irq(&inode->ordered_tree_lock); 4990 } 4991 btrfs_put_ordered_extent(ordered); 4992 } 4993 4994 return 0; 4995 } 4996 4997 static int logged_inode_size(struct btrfs_root *log, struct btrfs_inode *inode, 4998 struct btrfs_path *path, u64 *size_ret) 4999 { 5000 struct btrfs_key key; 5001 int ret; 5002 5003 key.objectid = btrfs_ino(inode); 5004 key.type = BTRFS_INODE_ITEM_KEY; 5005 key.offset = 0; 5006 5007 ret = btrfs_search_slot(NULL, log, &key, path, 0, 0); 5008 if (ret < 0) { 5009 return ret; 5010 } else if (ret > 0) { 5011 *size_ret = 0; 5012 } else { 5013 struct btrfs_inode_item *item; 5014 5015 item = btrfs_item_ptr(path->nodes[0], path->slots[0], 5016 struct btrfs_inode_item); 5017 *size_ret = btrfs_inode_size(path->nodes[0], item); 5018 /* 5019 * If the in-memory inode's i_size is smaller then the inode 5020 * size stored in the btree, return the inode's i_size, so 5021 * that we get a correct inode size after replaying the log 5022 * when before a power failure we had a shrinking truncate 5023 * followed by addition of a new name (rename / new hard link). 5024 * Otherwise return the inode size from the btree, to avoid 5025 * data loss when replaying a log due to previously doing a 5026 * write that expands the inode's size and logging a new name 5027 * immediately after. 5028 */ 5029 if (*size_ret > inode->vfs_inode.i_size) 5030 *size_ret = inode->vfs_inode.i_size; 5031 } 5032 5033 btrfs_release_path(path); 5034 return 0; 5035 } 5036 5037 /* 5038 * At the moment we always log all xattrs. This is to figure out at log replay 5039 * time which xattrs must have their deletion replayed. If a xattr is missing 5040 * in the log tree and exists in the fs/subvol tree, we delete it. This is 5041 * because if a xattr is deleted, the inode is fsynced and a power failure 5042 * happens, causing the log to be replayed the next time the fs is mounted, 5043 * we want the xattr to not exist anymore (same behaviour as other filesystems 5044 * with a journal, ext3/4, xfs, f2fs, etc). 5045 */ 5046 static int btrfs_log_all_xattrs(struct btrfs_trans_handle *trans, 5047 struct btrfs_inode *inode, 5048 struct btrfs_path *path, 5049 struct btrfs_path *dst_path, 5050 struct btrfs_log_ctx *ctx) 5051 { 5052 struct btrfs_root *root = inode->root; 5053 int ret; 5054 struct btrfs_key key; 5055 const u64 ino = btrfs_ino(inode); 5056 int ins_nr = 0; 5057 int start_slot = 0; 5058 bool found_xattrs = false; 5059 5060 if (test_bit(BTRFS_INODE_NO_XATTRS, &inode->runtime_flags)) 5061 return 0; 5062 5063 key.objectid = ino; 5064 key.type = BTRFS_XATTR_ITEM_KEY; 5065 key.offset = 0; 5066 5067 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 5068 if (ret < 0) 5069 return ret; 5070 5071 while (true) { 5072 int slot = path->slots[0]; 5073 struct extent_buffer *leaf = path->nodes[0]; 5074 int nritems = btrfs_header_nritems(leaf); 5075 5076 if (slot >= nritems) { 5077 if (ins_nr > 0) { 5078 ret = copy_items(trans, inode, dst_path, path, 5079 start_slot, ins_nr, 1, 0, ctx); 5080 if (ret < 0) 5081 return ret; 5082 ins_nr = 0; 5083 } 5084 ret = btrfs_next_leaf(root, path); 5085 if (ret < 0) 5086 return ret; 5087 else if (ret > 0) 5088 break; 5089 continue; 5090 } 5091 5092 btrfs_item_key_to_cpu(leaf, &key, slot); 5093 if (key.objectid != ino || key.type != BTRFS_XATTR_ITEM_KEY) 5094 break; 5095 5096 if (ins_nr == 0) 5097 start_slot = slot; 5098 ins_nr++; 5099 path->slots[0]++; 5100 found_xattrs = true; 5101 cond_resched(); 5102 } 5103 if (ins_nr > 0) { 5104 ret = copy_items(trans, inode, dst_path, path, 5105 start_slot, ins_nr, 1, 0, ctx); 5106 if (ret < 0) 5107 return ret; 5108 } 5109 5110 if (!found_xattrs) 5111 set_bit(BTRFS_INODE_NO_XATTRS, &inode->runtime_flags); 5112 5113 return 0; 5114 } 5115 5116 /* 5117 * When using the NO_HOLES feature if we punched a hole that causes the 5118 * deletion of entire leafs or all the extent items of the first leaf (the one 5119 * that contains the inode item and references) we may end up not processing 5120 * any extents, because there are no leafs with a generation matching the 5121 * current transaction that have extent items for our inode. So we need to find 5122 * if any holes exist and then log them. We also need to log holes after any 5123 * truncate operation that changes the inode's size. 5124 */ 5125 static int btrfs_log_holes(struct btrfs_trans_handle *trans, 5126 struct btrfs_inode *inode, 5127 struct btrfs_path *path) 5128 { 5129 struct btrfs_root *root = inode->root; 5130 struct btrfs_fs_info *fs_info = root->fs_info; 5131 struct btrfs_key key; 5132 const u64 ino = btrfs_ino(inode); 5133 const u64 i_size = i_size_read(&inode->vfs_inode); 5134 u64 prev_extent_end = 0; 5135 int ret; 5136 5137 if (!btrfs_fs_incompat(fs_info, NO_HOLES) || i_size == 0) 5138 return 0; 5139 5140 key.objectid = ino; 5141 key.type = BTRFS_EXTENT_DATA_KEY; 5142 key.offset = 0; 5143 5144 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 5145 if (ret < 0) 5146 return ret; 5147 5148 while (true) { 5149 struct extent_buffer *leaf = path->nodes[0]; 5150 5151 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) { 5152 ret = btrfs_next_leaf(root, path); 5153 if (ret < 0) 5154 return ret; 5155 if (ret > 0) { 5156 ret = 0; 5157 break; 5158 } 5159 leaf = path->nodes[0]; 5160 } 5161 5162 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 5163 if (key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY) 5164 break; 5165 5166 /* We have a hole, log it. */ 5167 if (prev_extent_end < key.offset) { 5168 const u64 hole_len = key.offset - prev_extent_end; 5169 5170 /* 5171 * Release the path to avoid deadlocks with other code 5172 * paths that search the root while holding locks on 5173 * leafs from the log root. 5174 */ 5175 btrfs_release_path(path); 5176 ret = btrfs_insert_hole_extent(trans, root->log_root, 5177 ino, prev_extent_end, 5178 hole_len); 5179 if (ret < 0) 5180 return ret; 5181 5182 /* 5183 * Search for the same key again in the root. Since it's 5184 * an extent item and we are holding the inode lock, the 5185 * key must still exist. If it doesn't just emit warning 5186 * and return an error to fall back to a transaction 5187 * commit. 5188 */ 5189 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 5190 if (ret < 0) 5191 return ret; 5192 if (WARN_ON(ret > 0)) 5193 return -ENOENT; 5194 leaf = path->nodes[0]; 5195 } 5196 5197 prev_extent_end = btrfs_file_extent_end(path); 5198 path->slots[0]++; 5199 cond_resched(); 5200 } 5201 5202 if (prev_extent_end < i_size) { 5203 u64 hole_len; 5204 5205 btrfs_release_path(path); 5206 hole_len = ALIGN(i_size - prev_extent_end, fs_info->sectorsize); 5207 ret = btrfs_insert_hole_extent(trans, root->log_root, ino, 5208 prev_extent_end, hole_len); 5209 if (ret < 0) 5210 return ret; 5211 } 5212 5213 return 0; 5214 } 5215 5216 /* 5217 * When we are logging a new inode X, check if it doesn't have a reference that 5218 * matches the reference from some other inode Y created in a past transaction 5219 * and that was renamed in the current transaction. If we don't do this, then at 5220 * log replay time we can lose inode Y (and all its files if it's a directory): 5221 * 5222 * mkdir /mnt/x 5223 * echo "hello world" > /mnt/x/foobar 5224 * sync 5225 * mv /mnt/x /mnt/y 5226 * mkdir /mnt/x # or touch /mnt/x 5227 * xfs_io -c fsync /mnt/x 5228 * <power fail> 5229 * mount fs, trigger log replay 5230 * 5231 * After the log replay procedure, we would lose the first directory and all its 5232 * files (file foobar). 5233 * For the case where inode Y is not a directory we simply end up losing it: 5234 * 5235 * echo "123" > /mnt/foo 5236 * sync 5237 * mv /mnt/foo /mnt/bar 5238 * echo "abc" > /mnt/foo 5239 * xfs_io -c fsync /mnt/foo 5240 * <power fail> 5241 * 5242 * We also need this for cases where a snapshot entry is replaced by some other 5243 * entry (file or directory) otherwise we end up with an unreplayable log due to 5244 * attempts to delete the snapshot entry (entry of type BTRFS_ROOT_ITEM_KEY) as 5245 * if it were a regular entry: 5246 * 5247 * mkdir /mnt/x 5248 * btrfs subvolume snapshot /mnt /mnt/x/snap 5249 * btrfs subvolume delete /mnt/x/snap 5250 * rmdir /mnt/x 5251 * mkdir /mnt/x 5252 * fsync /mnt/x or fsync some new file inside it 5253 * <power fail> 5254 * 5255 * The snapshot delete, rmdir of x, mkdir of a new x and the fsync all happen in 5256 * the same transaction. 5257 */ 5258 static int btrfs_check_ref_name_override(struct extent_buffer *eb, 5259 const int slot, 5260 const struct btrfs_key *key, 5261 struct btrfs_inode *inode, 5262 u64 *other_ino, u64 *other_parent) 5263 { 5264 int ret; 5265 struct btrfs_path *search_path; 5266 char *name = NULL; 5267 u32 name_len = 0; 5268 u32 item_size = btrfs_item_size(eb, slot); 5269 u32 cur_offset = 0; 5270 unsigned long ptr = btrfs_item_ptr_offset(eb, slot); 5271 5272 search_path = btrfs_alloc_path(); 5273 if (!search_path) 5274 return -ENOMEM; 5275 search_path->search_commit_root = 1; 5276 search_path->skip_locking = 1; 5277 5278 while (cur_offset < item_size) { 5279 u64 parent; 5280 u32 this_name_len; 5281 u32 this_len; 5282 unsigned long name_ptr; 5283 struct btrfs_dir_item *di; 5284 struct fscrypt_str name_str; 5285 5286 if (key->type == BTRFS_INODE_REF_KEY) { 5287 struct btrfs_inode_ref *iref; 5288 5289 iref = (struct btrfs_inode_ref *)(ptr + cur_offset); 5290 parent = key->offset; 5291 this_name_len = btrfs_inode_ref_name_len(eb, iref); 5292 name_ptr = (unsigned long)(iref + 1); 5293 this_len = sizeof(*iref) + this_name_len; 5294 } else { 5295 struct btrfs_inode_extref *extref; 5296 5297 extref = (struct btrfs_inode_extref *)(ptr + 5298 cur_offset); 5299 parent = btrfs_inode_extref_parent(eb, extref); 5300 this_name_len = btrfs_inode_extref_name_len(eb, extref); 5301 name_ptr = (unsigned long)&extref->name; 5302 this_len = sizeof(*extref) + this_name_len; 5303 } 5304 5305 if (this_name_len > name_len) { 5306 char *new_name; 5307 5308 new_name = krealloc(name, this_name_len, GFP_NOFS); 5309 if (!new_name) { 5310 ret = -ENOMEM; 5311 goto out; 5312 } 5313 name_len = this_name_len; 5314 name = new_name; 5315 } 5316 5317 read_extent_buffer(eb, name, name_ptr, this_name_len); 5318 5319 name_str.name = name; 5320 name_str.len = this_name_len; 5321 di = btrfs_lookup_dir_item(NULL, inode->root, search_path, 5322 parent, &name_str, 0); 5323 if (di && !IS_ERR(di)) { 5324 struct btrfs_key di_key; 5325 5326 btrfs_dir_item_key_to_cpu(search_path->nodes[0], 5327 di, &di_key); 5328 if (di_key.type == BTRFS_INODE_ITEM_KEY) { 5329 if (di_key.objectid != key->objectid) { 5330 ret = 1; 5331 *other_ino = di_key.objectid; 5332 *other_parent = parent; 5333 } else { 5334 ret = 0; 5335 } 5336 } else { 5337 ret = -EAGAIN; 5338 } 5339 goto out; 5340 } else if (IS_ERR(di)) { 5341 ret = PTR_ERR(di); 5342 goto out; 5343 } 5344 btrfs_release_path(search_path); 5345 5346 cur_offset += this_len; 5347 } 5348 ret = 0; 5349 out: 5350 btrfs_free_path(search_path); 5351 kfree(name); 5352 return ret; 5353 } 5354 5355 /* 5356 * Check if we need to log an inode. This is used in contexts where while 5357 * logging an inode we need to log another inode (either that it exists or in 5358 * full mode). This is used instead of btrfs_inode_in_log() because the later 5359 * requires the inode to be in the log and have the log transaction committed, 5360 * while here we do not care if the log transaction was already committed - our 5361 * caller will commit the log later - and we want to avoid logging an inode 5362 * multiple times when multiple tasks have joined the same log transaction. 5363 */ 5364 static bool need_log_inode(const struct btrfs_trans_handle *trans, 5365 struct btrfs_inode *inode) 5366 { 5367 /* 5368 * If a directory was not modified, no dentries added or removed, we can 5369 * and should avoid logging it. 5370 */ 5371 if (S_ISDIR(inode->vfs_inode.i_mode) && inode->last_trans < trans->transid) 5372 return false; 5373 5374 /* 5375 * If this inode does not have new/updated/deleted xattrs since the last 5376 * time it was logged and is flagged as logged in the current transaction, 5377 * we can skip logging it. As for new/deleted names, those are updated in 5378 * the log by link/unlink/rename operations. 5379 * In case the inode was logged and then evicted and reloaded, its 5380 * logged_trans will be 0, in which case we have to fully log it since 5381 * logged_trans is a transient field, not persisted. 5382 */ 5383 if (inode_logged(trans, inode, NULL) == 1 && 5384 !test_bit(BTRFS_INODE_COPY_EVERYTHING, &inode->runtime_flags)) 5385 return false; 5386 5387 return true; 5388 } 5389 5390 struct btrfs_dir_list { 5391 u64 ino; 5392 struct list_head list; 5393 }; 5394 5395 /* 5396 * Log the inodes of the new dentries of a directory. 5397 * See process_dir_items_leaf() for details about why it is needed. 5398 * This is a recursive operation - if an existing dentry corresponds to a 5399 * directory, that directory's new entries are logged too (same behaviour as 5400 * ext3/4, xfs, f2fs, reiserfs, nilfs2). Note that when logging the inodes 5401 * the dentries point to we do not acquire their VFS lock, otherwise lockdep 5402 * complains about the following circular lock dependency / possible deadlock: 5403 * 5404 * CPU0 CPU1 5405 * ---- ---- 5406 * lock(&type->i_mutex_dir_key#3/2); 5407 * lock(sb_internal#2); 5408 * lock(&type->i_mutex_dir_key#3/2); 5409 * lock(&sb->s_type->i_mutex_key#14); 5410 * 5411 * Where sb_internal is the lock (a counter that works as a lock) acquired by 5412 * sb_start_intwrite() in btrfs_start_transaction(). 5413 * Not acquiring the VFS lock of the inodes is still safe because: 5414 * 5415 * 1) For regular files we log with a mode of LOG_INODE_EXISTS. It's possible 5416 * that while logging the inode new references (names) are added or removed 5417 * from the inode, leaving the logged inode item with a link count that does 5418 * not match the number of logged inode reference items. This is fine because 5419 * at log replay time we compute the real number of links and correct the 5420 * link count in the inode item (see replay_one_buffer() and 5421 * link_to_fixup_dir()); 5422 * 5423 * 2) For directories we log with a mode of LOG_INODE_ALL. It's possible that 5424 * while logging the inode's items new index items (key type 5425 * BTRFS_DIR_INDEX_KEY) are added to fs/subvol tree and the logged inode item 5426 * has a size that doesn't match the sum of the lengths of all the logged 5427 * names - this is ok, not a problem, because at log replay time we set the 5428 * directory's i_size to the correct value (see replay_one_name() and 5429 * overwrite_item()). 5430 */ 5431 static int log_new_dir_dentries(struct btrfs_trans_handle *trans, 5432 struct btrfs_inode *start_inode, 5433 struct btrfs_log_ctx *ctx) 5434 { 5435 struct btrfs_root *root = start_inode->root; 5436 struct btrfs_fs_info *fs_info = root->fs_info; 5437 struct btrfs_path *path; 5438 LIST_HEAD(dir_list); 5439 struct btrfs_dir_list *dir_elem; 5440 u64 ino = btrfs_ino(start_inode); 5441 struct btrfs_inode *curr_inode = start_inode; 5442 int ret = 0; 5443 5444 /* 5445 * If we are logging a new name, as part of a link or rename operation, 5446 * don't bother logging new dentries, as we just want to log the names 5447 * of an inode and that any new parents exist. 5448 */ 5449 if (ctx->logging_new_name) 5450 return 0; 5451 5452 path = btrfs_alloc_path(); 5453 if (!path) 5454 return -ENOMEM; 5455 5456 /* Pairs with btrfs_add_delayed_iput below. */ 5457 ihold(&curr_inode->vfs_inode); 5458 5459 while (true) { 5460 struct inode *vfs_inode; 5461 struct btrfs_key key; 5462 struct btrfs_key found_key; 5463 u64 next_index; 5464 bool continue_curr_inode = true; 5465 int iter_ret; 5466 5467 key.objectid = ino; 5468 key.type = BTRFS_DIR_INDEX_KEY; 5469 key.offset = btrfs_get_first_dir_index_to_log(curr_inode); 5470 next_index = key.offset; 5471 again: 5472 btrfs_for_each_slot(root->log_root, &key, &found_key, path, iter_ret) { 5473 struct extent_buffer *leaf = path->nodes[0]; 5474 struct btrfs_dir_item *di; 5475 struct btrfs_key di_key; 5476 struct inode *di_inode; 5477 int log_mode = LOG_INODE_EXISTS; 5478 int type; 5479 5480 if (found_key.objectid != ino || 5481 found_key.type != BTRFS_DIR_INDEX_KEY) { 5482 continue_curr_inode = false; 5483 break; 5484 } 5485 5486 next_index = found_key.offset + 1; 5487 5488 di = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dir_item); 5489 type = btrfs_dir_ftype(leaf, di); 5490 if (btrfs_dir_transid(leaf, di) < trans->transid) 5491 continue; 5492 btrfs_dir_item_key_to_cpu(leaf, di, &di_key); 5493 if (di_key.type == BTRFS_ROOT_ITEM_KEY) 5494 continue; 5495 5496 btrfs_release_path(path); 5497 di_inode = btrfs_iget(fs_info->sb, di_key.objectid, root); 5498 if (IS_ERR(di_inode)) { 5499 ret = PTR_ERR(di_inode); 5500 goto out; 5501 } 5502 5503 if (!need_log_inode(trans, BTRFS_I(di_inode))) { 5504 btrfs_add_delayed_iput(BTRFS_I(di_inode)); 5505 break; 5506 } 5507 5508 ctx->log_new_dentries = false; 5509 if (type == BTRFS_FT_DIR) 5510 log_mode = LOG_INODE_ALL; 5511 ret = btrfs_log_inode(trans, BTRFS_I(di_inode), 5512 log_mode, ctx); 5513 btrfs_add_delayed_iput(BTRFS_I(di_inode)); 5514 if (ret) 5515 goto out; 5516 if (ctx->log_new_dentries) { 5517 dir_elem = kmalloc(sizeof(*dir_elem), GFP_NOFS); 5518 if (!dir_elem) { 5519 ret = -ENOMEM; 5520 goto out; 5521 } 5522 dir_elem->ino = di_key.objectid; 5523 list_add_tail(&dir_elem->list, &dir_list); 5524 } 5525 break; 5526 } 5527 5528 btrfs_release_path(path); 5529 5530 if (iter_ret < 0) { 5531 ret = iter_ret; 5532 goto out; 5533 } else if (iter_ret > 0) { 5534 continue_curr_inode = false; 5535 } else { 5536 key = found_key; 5537 } 5538 5539 if (continue_curr_inode && key.offset < (u64)-1) { 5540 key.offset++; 5541 goto again; 5542 } 5543 5544 btrfs_set_first_dir_index_to_log(curr_inode, next_index); 5545 5546 if (list_empty(&dir_list)) 5547 break; 5548 5549 dir_elem = list_first_entry(&dir_list, struct btrfs_dir_list, list); 5550 ino = dir_elem->ino; 5551 list_del(&dir_elem->list); 5552 kfree(dir_elem); 5553 5554 btrfs_add_delayed_iput(curr_inode); 5555 curr_inode = NULL; 5556 5557 vfs_inode = btrfs_iget(fs_info->sb, ino, root); 5558 if (IS_ERR(vfs_inode)) { 5559 ret = PTR_ERR(vfs_inode); 5560 break; 5561 } 5562 curr_inode = BTRFS_I(vfs_inode); 5563 } 5564 out: 5565 btrfs_free_path(path); 5566 if (curr_inode) 5567 btrfs_add_delayed_iput(curr_inode); 5568 5569 if (ret) { 5570 struct btrfs_dir_list *next; 5571 5572 list_for_each_entry_safe(dir_elem, next, &dir_list, list) 5573 kfree(dir_elem); 5574 } 5575 5576 return ret; 5577 } 5578 5579 struct btrfs_ino_list { 5580 u64 ino; 5581 u64 parent; 5582 struct list_head list; 5583 }; 5584 5585 static void free_conflicting_inodes(struct btrfs_log_ctx *ctx) 5586 { 5587 struct btrfs_ino_list *curr; 5588 struct btrfs_ino_list *next; 5589 5590 list_for_each_entry_safe(curr, next, &ctx->conflict_inodes, list) { 5591 list_del(&curr->list); 5592 kfree(curr); 5593 } 5594 } 5595 5596 static int conflicting_inode_is_dir(struct btrfs_root *root, u64 ino, 5597 struct btrfs_path *path) 5598 { 5599 struct btrfs_key key; 5600 int ret; 5601 5602 key.objectid = ino; 5603 key.type = BTRFS_INODE_ITEM_KEY; 5604 key.offset = 0; 5605 5606 path->search_commit_root = 1; 5607 path->skip_locking = 1; 5608 5609 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 5610 if (WARN_ON_ONCE(ret > 0)) { 5611 /* 5612 * We have previously found the inode through the commit root 5613 * so this should not happen. If it does, just error out and 5614 * fallback to a transaction commit. 5615 */ 5616 ret = -ENOENT; 5617 } else if (ret == 0) { 5618 struct btrfs_inode_item *item; 5619 5620 item = btrfs_item_ptr(path->nodes[0], path->slots[0], 5621 struct btrfs_inode_item); 5622 if (S_ISDIR(btrfs_inode_mode(path->nodes[0], item))) 5623 ret = 1; 5624 } 5625 5626 btrfs_release_path(path); 5627 path->search_commit_root = 0; 5628 path->skip_locking = 0; 5629 5630 return ret; 5631 } 5632 5633 static int add_conflicting_inode(struct btrfs_trans_handle *trans, 5634 struct btrfs_root *root, 5635 struct btrfs_path *path, 5636 u64 ino, u64 parent, 5637 struct btrfs_log_ctx *ctx) 5638 { 5639 struct btrfs_ino_list *ino_elem; 5640 struct inode *inode; 5641 5642 /* 5643 * It's rare to have a lot of conflicting inodes, in practice it is not 5644 * common to have more than 1 or 2. We don't want to collect too many, 5645 * as we could end up logging too many inodes (even if only in 5646 * LOG_INODE_EXISTS mode) and slow down other fsyncs or transaction 5647 * commits. 5648 */ 5649 if (ctx->num_conflict_inodes >= MAX_CONFLICT_INODES) 5650 return BTRFS_LOG_FORCE_COMMIT; 5651 5652 inode = btrfs_iget(root->fs_info->sb, ino, root); 5653 /* 5654 * If the other inode that had a conflicting dir entry was deleted in 5655 * the current transaction then we either: 5656 * 5657 * 1) Log the parent directory (later after adding it to the list) if 5658 * the inode is a directory. This is because it may be a deleted 5659 * subvolume/snapshot or it may be a regular directory that had 5660 * deleted subvolumes/snapshots (or subdirectories that had them), 5661 * and at the moment we can't deal with dropping subvolumes/snapshots 5662 * during log replay. So we just log the parent, which will result in 5663 * a fallback to a transaction commit if we are dealing with those 5664 * cases (last_unlink_trans will match the current transaction); 5665 * 5666 * 2) Do nothing if it's not a directory. During log replay we simply 5667 * unlink the conflicting dentry from the parent directory and then 5668 * add the dentry for our inode. Like this we can avoid logging the 5669 * parent directory (and maybe fallback to a transaction commit in 5670 * case it has a last_unlink_trans == trans->transid, due to moving 5671 * some inode from it to some other directory). 5672 */ 5673 if (IS_ERR(inode)) { 5674 int ret = PTR_ERR(inode); 5675 5676 if (ret != -ENOENT) 5677 return ret; 5678 5679 ret = conflicting_inode_is_dir(root, ino, path); 5680 /* Not a directory or we got an error. */ 5681 if (ret <= 0) 5682 return ret; 5683 5684 /* Conflicting inode is a directory, so we'll log its parent. */ 5685 ino_elem = kmalloc(sizeof(*ino_elem), GFP_NOFS); 5686 if (!ino_elem) 5687 return -ENOMEM; 5688 ino_elem->ino = ino; 5689 ino_elem->parent = parent; 5690 list_add_tail(&ino_elem->list, &ctx->conflict_inodes); 5691 ctx->num_conflict_inodes++; 5692 5693 return 0; 5694 } 5695 5696 /* 5697 * If the inode was already logged skip it - otherwise we can hit an 5698 * infinite loop. Example: 5699 * 5700 * From the commit root (previous transaction) we have the following 5701 * inodes: 5702 * 5703 * inode 257 a directory 5704 * inode 258 with references "zz" and "zz_link" on inode 257 5705 * inode 259 with reference "a" on inode 257 5706 * 5707 * And in the current (uncommitted) transaction we have: 5708 * 5709 * inode 257 a directory, unchanged 5710 * inode 258 with references "a" and "a2" on inode 257 5711 * inode 259 with reference "zz_link" on inode 257 5712 * inode 261 with reference "zz" on inode 257 5713 * 5714 * When logging inode 261 the following infinite loop could 5715 * happen if we don't skip already logged inodes: 5716 * 5717 * - we detect inode 258 as a conflicting inode, with inode 261 5718 * on reference "zz", and log it; 5719 * 5720 * - we detect inode 259 as a conflicting inode, with inode 258 5721 * on reference "a", and log it; 5722 * 5723 * - we detect inode 258 as a conflicting inode, with inode 259 5724 * on reference "zz_link", and log it - again! After this we 5725 * repeat the above steps forever. 5726 * 5727 * Here we can use need_log_inode() because we only need to log the 5728 * inode in LOG_INODE_EXISTS mode and rename operations update the log, 5729 * so that the log ends up with the new name and without the old name. 5730 */ 5731 if (!need_log_inode(trans, BTRFS_I(inode))) { 5732 btrfs_add_delayed_iput(BTRFS_I(inode)); 5733 return 0; 5734 } 5735 5736 btrfs_add_delayed_iput(BTRFS_I(inode)); 5737 5738 ino_elem = kmalloc(sizeof(*ino_elem), GFP_NOFS); 5739 if (!ino_elem) 5740 return -ENOMEM; 5741 ino_elem->ino = ino; 5742 ino_elem->parent = parent; 5743 list_add_tail(&ino_elem->list, &ctx->conflict_inodes); 5744 ctx->num_conflict_inodes++; 5745 5746 return 0; 5747 } 5748 5749 static int log_conflicting_inodes(struct btrfs_trans_handle *trans, 5750 struct btrfs_root *root, 5751 struct btrfs_log_ctx *ctx) 5752 { 5753 struct btrfs_fs_info *fs_info = root->fs_info; 5754 int ret = 0; 5755 5756 /* 5757 * Conflicting inodes are logged by the first call to btrfs_log_inode(), 5758 * otherwise we could have unbounded recursion of btrfs_log_inode() 5759 * calls. This check guarantees we can have only 1 level of recursion. 5760 */ 5761 if (ctx->logging_conflict_inodes) 5762 return 0; 5763 5764 ctx->logging_conflict_inodes = true; 5765 5766 /* 5767 * New conflicting inodes may be found and added to the list while we 5768 * are logging a conflicting inode, so keep iterating while the list is 5769 * not empty. 5770 */ 5771 while (!list_empty(&ctx->conflict_inodes)) { 5772 struct btrfs_ino_list *curr; 5773 struct inode *inode; 5774 u64 ino; 5775 u64 parent; 5776 5777 curr = list_first_entry(&ctx->conflict_inodes, 5778 struct btrfs_ino_list, list); 5779 ino = curr->ino; 5780 parent = curr->parent; 5781 list_del(&curr->list); 5782 kfree(curr); 5783 5784 inode = btrfs_iget(fs_info->sb, ino, root); 5785 /* 5786 * If the other inode that had a conflicting dir entry was 5787 * deleted in the current transaction, we need to log its parent 5788 * directory. See the comment at add_conflicting_inode(). 5789 */ 5790 if (IS_ERR(inode)) { 5791 ret = PTR_ERR(inode); 5792 if (ret != -ENOENT) 5793 break; 5794 5795 inode = btrfs_iget(fs_info->sb, parent, root); 5796 if (IS_ERR(inode)) { 5797 ret = PTR_ERR(inode); 5798 break; 5799 } 5800 5801 /* 5802 * Always log the directory, we cannot make this 5803 * conditional on need_log_inode() because the directory 5804 * might have been logged in LOG_INODE_EXISTS mode or 5805 * the dir index of the conflicting inode is not in a 5806 * dir index key range logged for the directory. So we 5807 * must make sure the deletion is recorded. 5808 */ 5809 ret = btrfs_log_inode(trans, BTRFS_I(inode), 5810 LOG_INODE_ALL, ctx); 5811 btrfs_add_delayed_iput(BTRFS_I(inode)); 5812 if (ret) 5813 break; 5814 continue; 5815 } 5816 5817 /* 5818 * Here we can use need_log_inode() because we only need to log 5819 * the inode in LOG_INODE_EXISTS mode and rename operations 5820 * update the log, so that the log ends up with the new name and 5821 * without the old name. 5822 * 5823 * We did this check at add_conflicting_inode(), but here we do 5824 * it again because if some other task logged the inode after 5825 * that, we can avoid doing it again. 5826 */ 5827 if (!need_log_inode(trans, BTRFS_I(inode))) { 5828 btrfs_add_delayed_iput(BTRFS_I(inode)); 5829 continue; 5830 } 5831 5832 /* 5833 * We are safe logging the other inode without acquiring its 5834 * lock as long as we log with the LOG_INODE_EXISTS mode. We 5835 * are safe against concurrent renames of the other inode as 5836 * well because during a rename we pin the log and update the 5837 * log with the new name before we unpin it. 5838 */ 5839 ret = btrfs_log_inode(trans, BTRFS_I(inode), LOG_INODE_EXISTS, ctx); 5840 btrfs_add_delayed_iput(BTRFS_I(inode)); 5841 if (ret) 5842 break; 5843 } 5844 5845 ctx->logging_conflict_inodes = false; 5846 if (ret) 5847 free_conflicting_inodes(ctx); 5848 5849 return ret; 5850 } 5851 5852 static int copy_inode_items_to_log(struct btrfs_trans_handle *trans, 5853 struct btrfs_inode *inode, 5854 struct btrfs_key *min_key, 5855 const struct btrfs_key *max_key, 5856 struct btrfs_path *path, 5857 struct btrfs_path *dst_path, 5858 const u64 logged_isize, 5859 const int inode_only, 5860 struct btrfs_log_ctx *ctx, 5861 bool *need_log_inode_item) 5862 { 5863 const u64 i_size = i_size_read(&inode->vfs_inode); 5864 struct btrfs_root *root = inode->root; 5865 int ins_start_slot = 0; 5866 int ins_nr = 0; 5867 int ret; 5868 5869 while (1) { 5870 ret = btrfs_search_forward(root, min_key, path, trans->transid); 5871 if (ret < 0) 5872 return ret; 5873 if (ret > 0) { 5874 ret = 0; 5875 break; 5876 } 5877 again: 5878 /* Note, ins_nr might be > 0 here, cleanup outside the loop */ 5879 if (min_key->objectid != max_key->objectid) 5880 break; 5881 if (min_key->type > max_key->type) 5882 break; 5883 5884 if (min_key->type == BTRFS_INODE_ITEM_KEY) { 5885 *need_log_inode_item = false; 5886 } else if (min_key->type == BTRFS_EXTENT_DATA_KEY && 5887 min_key->offset >= i_size) { 5888 /* 5889 * Extents at and beyond eof are logged with 5890 * btrfs_log_prealloc_extents(). 5891 * Only regular files have BTRFS_EXTENT_DATA_KEY keys, 5892 * and no keys greater than that, so bail out. 5893 */ 5894 break; 5895 } else if ((min_key->type == BTRFS_INODE_REF_KEY || 5896 min_key->type == BTRFS_INODE_EXTREF_KEY) && 5897 (inode->generation == trans->transid || 5898 ctx->logging_conflict_inodes)) { 5899 u64 other_ino = 0; 5900 u64 other_parent = 0; 5901 5902 ret = btrfs_check_ref_name_override(path->nodes[0], 5903 path->slots[0], min_key, inode, 5904 &other_ino, &other_parent); 5905 if (ret < 0) { 5906 return ret; 5907 } else if (ret > 0 && 5908 other_ino != btrfs_ino(BTRFS_I(ctx->inode))) { 5909 if (ins_nr > 0) { 5910 ins_nr++; 5911 } else { 5912 ins_nr = 1; 5913 ins_start_slot = path->slots[0]; 5914 } 5915 ret = copy_items(trans, inode, dst_path, path, 5916 ins_start_slot, ins_nr, 5917 inode_only, logged_isize, ctx); 5918 if (ret < 0) 5919 return ret; 5920 ins_nr = 0; 5921 5922 btrfs_release_path(path); 5923 ret = add_conflicting_inode(trans, root, path, 5924 other_ino, 5925 other_parent, ctx); 5926 if (ret) 5927 return ret; 5928 goto next_key; 5929 } 5930 } else if (min_key->type == BTRFS_XATTR_ITEM_KEY) { 5931 /* Skip xattrs, logged later with btrfs_log_all_xattrs() */ 5932 if (ins_nr == 0) 5933 goto next_slot; 5934 ret = copy_items(trans, inode, dst_path, path, 5935 ins_start_slot, 5936 ins_nr, inode_only, logged_isize, ctx); 5937 if (ret < 0) 5938 return ret; 5939 ins_nr = 0; 5940 goto next_slot; 5941 } 5942 5943 if (ins_nr && ins_start_slot + ins_nr == path->slots[0]) { 5944 ins_nr++; 5945 goto next_slot; 5946 } else if (!ins_nr) { 5947 ins_start_slot = path->slots[0]; 5948 ins_nr = 1; 5949 goto next_slot; 5950 } 5951 5952 ret = copy_items(trans, inode, dst_path, path, ins_start_slot, 5953 ins_nr, inode_only, logged_isize, ctx); 5954 if (ret < 0) 5955 return ret; 5956 ins_nr = 1; 5957 ins_start_slot = path->slots[0]; 5958 next_slot: 5959 path->slots[0]++; 5960 if (path->slots[0] < btrfs_header_nritems(path->nodes[0])) { 5961 btrfs_item_key_to_cpu(path->nodes[0], min_key, 5962 path->slots[0]); 5963 goto again; 5964 } 5965 if (ins_nr) { 5966 ret = copy_items(trans, inode, dst_path, path, 5967 ins_start_slot, ins_nr, inode_only, 5968 logged_isize, ctx); 5969 if (ret < 0) 5970 return ret; 5971 ins_nr = 0; 5972 } 5973 btrfs_release_path(path); 5974 next_key: 5975 if (min_key->offset < (u64)-1) { 5976 min_key->offset++; 5977 } else if (min_key->type < max_key->type) { 5978 min_key->type++; 5979 min_key->offset = 0; 5980 } else { 5981 break; 5982 } 5983 5984 /* 5985 * We may process many leaves full of items for our inode, so 5986 * avoid monopolizing a cpu for too long by rescheduling while 5987 * not holding locks on any tree. 5988 */ 5989 cond_resched(); 5990 } 5991 if (ins_nr) { 5992 ret = copy_items(trans, inode, dst_path, path, ins_start_slot, 5993 ins_nr, inode_only, logged_isize, ctx); 5994 if (ret) 5995 return ret; 5996 } 5997 5998 if (inode_only == LOG_INODE_ALL && S_ISREG(inode->vfs_inode.i_mode)) { 5999 /* 6000 * Release the path because otherwise we might attempt to double 6001 * lock the same leaf with btrfs_log_prealloc_extents() below. 6002 */ 6003 btrfs_release_path(path); 6004 ret = btrfs_log_prealloc_extents(trans, inode, dst_path, ctx); 6005 } 6006 6007 return ret; 6008 } 6009 6010 static int insert_delayed_items_batch(struct btrfs_trans_handle *trans, 6011 struct btrfs_root *log, 6012 struct btrfs_path *path, 6013 const struct btrfs_item_batch *batch, 6014 const struct btrfs_delayed_item *first_item) 6015 { 6016 const struct btrfs_delayed_item *curr = first_item; 6017 int ret; 6018 6019 ret = btrfs_insert_empty_items(trans, log, path, batch); 6020 if (ret) 6021 return ret; 6022 6023 for (int i = 0; i < batch->nr; i++) { 6024 char *data_ptr; 6025 6026 data_ptr = btrfs_item_ptr(path->nodes[0], path->slots[0], char); 6027 write_extent_buffer(path->nodes[0], &curr->data, 6028 (unsigned long)data_ptr, curr->data_len); 6029 curr = list_next_entry(curr, log_list); 6030 path->slots[0]++; 6031 } 6032 6033 btrfs_release_path(path); 6034 6035 return 0; 6036 } 6037 6038 static int log_delayed_insertion_items(struct btrfs_trans_handle *trans, 6039 struct btrfs_inode *inode, 6040 struct btrfs_path *path, 6041 const struct list_head *delayed_ins_list, 6042 struct btrfs_log_ctx *ctx) 6043 { 6044 /* 195 (4095 bytes of keys and sizes) fits in a single 4K page. */ 6045 const int max_batch_size = 195; 6046 const int leaf_data_size = BTRFS_LEAF_DATA_SIZE(trans->fs_info); 6047 const u64 ino = btrfs_ino(inode); 6048 struct btrfs_root *log = inode->root->log_root; 6049 struct btrfs_item_batch batch = { 6050 .nr = 0, 6051 .total_data_size = 0, 6052 }; 6053 const struct btrfs_delayed_item *first = NULL; 6054 const struct btrfs_delayed_item *curr; 6055 char *ins_data; 6056 struct btrfs_key *ins_keys; 6057 u32 *ins_sizes; 6058 u64 curr_batch_size = 0; 6059 int batch_idx = 0; 6060 int ret; 6061 6062 /* We are adding dir index items to the log tree. */ 6063 lockdep_assert_held(&inode->log_mutex); 6064 6065 /* 6066 * We collect delayed items before copying index keys from the subvolume 6067 * to the log tree. However just after we collected them, they may have 6068 * been flushed (all of them or just some of them), and therefore we 6069 * could have copied them from the subvolume tree to the log tree. 6070 * So find the first delayed item that was not yet logged (they are 6071 * sorted by index number). 6072 */ 6073 list_for_each_entry(curr, delayed_ins_list, log_list) { 6074 if (curr->index > inode->last_dir_index_offset) { 6075 first = curr; 6076 break; 6077 } 6078 } 6079 6080 /* Empty list or all delayed items were already logged. */ 6081 if (!first) 6082 return 0; 6083 6084 ins_data = kmalloc(max_batch_size * sizeof(u32) + 6085 max_batch_size * sizeof(struct btrfs_key), GFP_NOFS); 6086 if (!ins_data) 6087 return -ENOMEM; 6088 ins_sizes = (u32 *)ins_data; 6089 batch.data_sizes = ins_sizes; 6090 ins_keys = (struct btrfs_key *)(ins_data + max_batch_size * sizeof(u32)); 6091 batch.keys = ins_keys; 6092 6093 curr = first; 6094 while (!list_entry_is_head(curr, delayed_ins_list, log_list)) { 6095 const u32 curr_size = curr->data_len + sizeof(struct btrfs_item); 6096 6097 if (curr_batch_size + curr_size > leaf_data_size || 6098 batch.nr == max_batch_size) { 6099 ret = insert_delayed_items_batch(trans, log, path, 6100 &batch, first); 6101 if (ret) 6102 goto out; 6103 batch_idx = 0; 6104 batch.nr = 0; 6105 batch.total_data_size = 0; 6106 curr_batch_size = 0; 6107 first = curr; 6108 } 6109 6110 ins_sizes[batch_idx] = curr->data_len; 6111 ins_keys[batch_idx].objectid = ino; 6112 ins_keys[batch_idx].type = BTRFS_DIR_INDEX_KEY; 6113 ins_keys[batch_idx].offset = curr->index; 6114 curr_batch_size += curr_size; 6115 batch.total_data_size += curr->data_len; 6116 batch.nr++; 6117 batch_idx++; 6118 curr = list_next_entry(curr, log_list); 6119 } 6120 6121 ASSERT(batch.nr >= 1); 6122 ret = insert_delayed_items_batch(trans, log, path, &batch, first); 6123 6124 curr = list_last_entry(delayed_ins_list, struct btrfs_delayed_item, 6125 log_list); 6126 inode->last_dir_index_offset = curr->index; 6127 out: 6128 kfree(ins_data); 6129 6130 return ret; 6131 } 6132 6133 static int log_delayed_deletions_full(struct btrfs_trans_handle *trans, 6134 struct btrfs_inode *inode, 6135 struct btrfs_path *path, 6136 const struct list_head *delayed_del_list, 6137 struct btrfs_log_ctx *ctx) 6138 { 6139 const u64 ino = btrfs_ino(inode); 6140 const struct btrfs_delayed_item *curr; 6141 6142 curr = list_first_entry(delayed_del_list, struct btrfs_delayed_item, 6143 log_list); 6144 6145 while (!list_entry_is_head(curr, delayed_del_list, log_list)) { 6146 u64 first_dir_index = curr->index; 6147 u64 last_dir_index; 6148 const struct btrfs_delayed_item *next; 6149 int ret; 6150 6151 /* 6152 * Find a range of consecutive dir index items to delete. Like 6153 * this we log a single dir range item spanning several contiguous 6154 * dir items instead of logging one range item per dir index item. 6155 */ 6156 next = list_next_entry(curr, log_list); 6157 while (!list_entry_is_head(next, delayed_del_list, log_list)) { 6158 if (next->index != curr->index + 1) 6159 break; 6160 curr = next; 6161 next = list_next_entry(next, log_list); 6162 } 6163 6164 last_dir_index = curr->index; 6165 ASSERT(last_dir_index >= first_dir_index); 6166 6167 ret = insert_dir_log_key(trans, inode->root->log_root, path, 6168 ino, first_dir_index, last_dir_index); 6169 if (ret) 6170 return ret; 6171 curr = list_next_entry(curr, log_list); 6172 } 6173 6174 return 0; 6175 } 6176 6177 static int batch_delete_dir_index_items(struct btrfs_trans_handle *trans, 6178 struct btrfs_inode *inode, 6179 struct btrfs_path *path, 6180 struct btrfs_log_ctx *ctx, 6181 const struct list_head *delayed_del_list, 6182 const struct btrfs_delayed_item *first, 6183 const struct btrfs_delayed_item **last_ret) 6184 { 6185 const struct btrfs_delayed_item *next; 6186 struct extent_buffer *leaf = path->nodes[0]; 6187 const int last_slot = btrfs_header_nritems(leaf) - 1; 6188 int slot = path->slots[0] + 1; 6189 const u64 ino = btrfs_ino(inode); 6190 6191 next = list_next_entry(first, log_list); 6192 6193 while (slot < last_slot && 6194 !list_entry_is_head(next, delayed_del_list, log_list)) { 6195 struct btrfs_key key; 6196 6197 btrfs_item_key_to_cpu(leaf, &key, slot); 6198 if (key.objectid != ino || 6199 key.type != BTRFS_DIR_INDEX_KEY || 6200 key.offset != next->index) 6201 break; 6202 6203 slot++; 6204 *last_ret = next; 6205 next = list_next_entry(next, log_list); 6206 } 6207 6208 return btrfs_del_items(trans, inode->root->log_root, path, 6209 path->slots[0], slot - path->slots[0]); 6210 } 6211 6212 static int log_delayed_deletions_incremental(struct btrfs_trans_handle *trans, 6213 struct btrfs_inode *inode, 6214 struct btrfs_path *path, 6215 const struct list_head *delayed_del_list, 6216 struct btrfs_log_ctx *ctx) 6217 { 6218 struct btrfs_root *log = inode->root->log_root; 6219 const struct btrfs_delayed_item *curr; 6220 u64 last_range_start = 0; 6221 u64 last_range_end = 0; 6222 struct btrfs_key key; 6223 6224 key.objectid = btrfs_ino(inode); 6225 key.type = BTRFS_DIR_INDEX_KEY; 6226 curr = list_first_entry(delayed_del_list, struct btrfs_delayed_item, 6227 log_list); 6228 6229 while (!list_entry_is_head(curr, delayed_del_list, log_list)) { 6230 const struct btrfs_delayed_item *last = curr; 6231 u64 first_dir_index = curr->index; 6232 u64 last_dir_index; 6233 bool deleted_items = false; 6234 int ret; 6235 6236 key.offset = curr->index; 6237 ret = btrfs_search_slot(trans, log, &key, path, -1, 1); 6238 if (ret < 0) { 6239 return ret; 6240 } else if (ret == 0) { 6241 ret = batch_delete_dir_index_items(trans, inode, path, ctx, 6242 delayed_del_list, curr, 6243 &last); 6244 if (ret) 6245 return ret; 6246 deleted_items = true; 6247 } 6248 6249 btrfs_release_path(path); 6250 6251 /* 6252 * If we deleted items from the leaf, it means we have a range 6253 * item logging their range, so no need to add one or update an 6254 * existing one. Otherwise we have to log a dir range item. 6255 */ 6256 if (deleted_items) 6257 goto next_batch; 6258 6259 last_dir_index = last->index; 6260 ASSERT(last_dir_index >= first_dir_index); 6261 /* 6262 * If this range starts right after where the previous one ends, 6263 * then we want to reuse the previous range item and change its 6264 * end offset to the end of this range. This is just to minimize 6265 * leaf space usage, by avoiding adding a new range item. 6266 */ 6267 if (last_range_end != 0 && first_dir_index == last_range_end + 1) 6268 first_dir_index = last_range_start; 6269 6270 ret = insert_dir_log_key(trans, log, path, key.objectid, 6271 first_dir_index, last_dir_index); 6272 if (ret) 6273 return ret; 6274 6275 last_range_start = first_dir_index; 6276 last_range_end = last_dir_index; 6277 next_batch: 6278 curr = list_next_entry(last, log_list); 6279 } 6280 6281 return 0; 6282 } 6283 6284 static int log_delayed_deletion_items(struct btrfs_trans_handle *trans, 6285 struct btrfs_inode *inode, 6286 struct btrfs_path *path, 6287 const struct list_head *delayed_del_list, 6288 struct btrfs_log_ctx *ctx) 6289 { 6290 /* 6291 * We are deleting dir index items from the log tree or adding range 6292 * items to it. 6293 */ 6294 lockdep_assert_held(&inode->log_mutex); 6295 6296 if (list_empty(delayed_del_list)) 6297 return 0; 6298 6299 if (ctx->logged_before) 6300 return log_delayed_deletions_incremental(trans, inode, path, 6301 delayed_del_list, ctx); 6302 6303 return log_delayed_deletions_full(trans, inode, path, delayed_del_list, 6304 ctx); 6305 } 6306 6307 /* 6308 * Similar logic as for log_new_dir_dentries(), but it iterates over the delayed 6309 * items instead of the subvolume tree. 6310 */ 6311 static int log_new_delayed_dentries(struct btrfs_trans_handle *trans, 6312 struct btrfs_inode *inode, 6313 const struct list_head *delayed_ins_list, 6314 struct btrfs_log_ctx *ctx) 6315 { 6316 const bool orig_log_new_dentries = ctx->log_new_dentries; 6317 struct btrfs_fs_info *fs_info = trans->fs_info; 6318 struct btrfs_delayed_item *item; 6319 int ret = 0; 6320 6321 /* 6322 * No need for the log mutex, plus to avoid potential deadlocks or 6323 * lockdep annotations due to nesting of delayed inode mutexes and log 6324 * mutexes. 6325 */ 6326 lockdep_assert_not_held(&inode->log_mutex); 6327 6328 ASSERT(!ctx->logging_new_delayed_dentries); 6329 ctx->logging_new_delayed_dentries = true; 6330 6331 list_for_each_entry(item, delayed_ins_list, log_list) { 6332 struct btrfs_dir_item *dir_item; 6333 struct inode *di_inode; 6334 struct btrfs_key key; 6335 int log_mode = LOG_INODE_EXISTS; 6336 6337 dir_item = (struct btrfs_dir_item *)item->data; 6338 btrfs_disk_key_to_cpu(&key, &dir_item->location); 6339 6340 if (key.type == BTRFS_ROOT_ITEM_KEY) 6341 continue; 6342 6343 di_inode = btrfs_iget(fs_info->sb, key.objectid, inode->root); 6344 if (IS_ERR(di_inode)) { 6345 ret = PTR_ERR(di_inode); 6346 break; 6347 } 6348 6349 if (!need_log_inode(trans, BTRFS_I(di_inode))) { 6350 btrfs_add_delayed_iput(BTRFS_I(di_inode)); 6351 continue; 6352 } 6353 6354 if (btrfs_stack_dir_ftype(dir_item) == BTRFS_FT_DIR) 6355 log_mode = LOG_INODE_ALL; 6356 6357 ctx->log_new_dentries = false; 6358 ret = btrfs_log_inode(trans, BTRFS_I(di_inode), log_mode, ctx); 6359 6360 if (!ret && ctx->log_new_dentries) 6361 ret = log_new_dir_dentries(trans, BTRFS_I(di_inode), ctx); 6362 6363 btrfs_add_delayed_iput(BTRFS_I(di_inode)); 6364 6365 if (ret) 6366 break; 6367 } 6368 6369 ctx->log_new_dentries = orig_log_new_dentries; 6370 ctx->logging_new_delayed_dentries = false; 6371 6372 return ret; 6373 } 6374 6375 /* log a single inode in the tree log. 6376 * At least one parent directory for this inode must exist in the tree 6377 * or be logged already. 6378 * 6379 * Any items from this inode changed by the current transaction are copied 6380 * to the log tree. An extra reference is taken on any extents in this 6381 * file, allowing us to avoid a whole pile of corner cases around logging 6382 * blocks that have been removed from the tree. 6383 * 6384 * See LOG_INODE_ALL and related defines for a description of what inode_only 6385 * does. 6386 * 6387 * This handles both files and directories. 6388 */ 6389 static int btrfs_log_inode(struct btrfs_trans_handle *trans, 6390 struct btrfs_inode *inode, 6391 int inode_only, 6392 struct btrfs_log_ctx *ctx) 6393 { 6394 struct btrfs_path *path; 6395 struct btrfs_path *dst_path; 6396 struct btrfs_key min_key; 6397 struct btrfs_key max_key; 6398 struct btrfs_root *log = inode->root->log_root; 6399 int ret; 6400 bool fast_search = false; 6401 u64 ino = btrfs_ino(inode); 6402 struct extent_map_tree *em_tree = &inode->extent_tree; 6403 u64 logged_isize = 0; 6404 bool need_log_inode_item = true; 6405 bool xattrs_logged = false; 6406 bool inode_item_dropped = true; 6407 bool full_dir_logging = false; 6408 LIST_HEAD(delayed_ins_list); 6409 LIST_HEAD(delayed_del_list); 6410 6411 path = btrfs_alloc_path(); 6412 if (!path) 6413 return -ENOMEM; 6414 dst_path = btrfs_alloc_path(); 6415 if (!dst_path) { 6416 btrfs_free_path(path); 6417 return -ENOMEM; 6418 } 6419 6420 min_key.objectid = ino; 6421 min_key.type = BTRFS_INODE_ITEM_KEY; 6422 min_key.offset = 0; 6423 6424 max_key.objectid = ino; 6425 6426 6427 /* today the code can only do partial logging of directories */ 6428 if (S_ISDIR(inode->vfs_inode.i_mode) || 6429 (!test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, 6430 &inode->runtime_flags) && 6431 inode_only >= LOG_INODE_EXISTS)) 6432 max_key.type = BTRFS_XATTR_ITEM_KEY; 6433 else 6434 max_key.type = (u8)-1; 6435 max_key.offset = (u64)-1; 6436 6437 if (S_ISDIR(inode->vfs_inode.i_mode) && inode_only == LOG_INODE_ALL) 6438 full_dir_logging = true; 6439 6440 /* 6441 * If we are logging a directory while we are logging dentries of the 6442 * delayed items of some other inode, then we need to flush the delayed 6443 * items of this directory and not log the delayed items directly. This 6444 * is to prevent more than one level of recursion into btrfs_log_inode() 6445 * by having something like this: 6446 * 6447 * $ mkdir -p a/b/c/d/e/f/g/h/... 6448 * $ xfs_io -c "fsync" a 6449 * 6450 * Where all directories in the path did not exist before and are 6451 * created in the current transaction. 6452 * So in such a case we directly log the delayed items of the main 6453 * directory ("a") without flushing them first, while for each of its 6454 * subdirectories we flush their delayed items before logging them. 6455 * This prevents a potential unbounded recursion like this: 6456 * 6457 * btrfs_log_inode() 6458 * log_new_delayed_dentries() 6459 * btrfs_log_inode() 6460 * log_new_delayed_dentries() 6461 * btrfs_log_inode() 6462 * log_new_delayed_dentries() 6463 * (...) 6464 * 6465 * We have thresholds for the maximum number of delayed items to have in 6466 * memory, and once they are hit, the items are flushed asynchronously. 6467 * However the limit is quite high, so lets prevent deep levels of 6468 * recursion to happen by limiting the maximum depth to be 1. 6469 */ 6470 if (full_dir_logging && ctx->logging_new_delayed_dentries) { 6471 ret = btrfs_commit_inode_delayed_items(trans, inode); 6472 if (ret) 6473 goto out; 6474 } 6475 6476 mutex_lock(&inode->log_mutex); 6477 6478 /* 6479 * For symlinks, we must always log their content, which is stored in an 6480 * inline extent, otherwise we could end up with an empty symlink after 6481 * log replay, which is invalid on linux (symlink(2) returns -ENOENT if 6482 * one attempts to create an empty symlink). 6483 * We don't need to worry about flushing delalloc, because when we create 6484 * the inline extent when the symlink is created (we never have delalloc 6485 * for symlinks). 6486 */ 6487 if (S_ISLNK(inode->vfs_inode.i_mode)) 6488 inode_only = LOG_INODE_ALL; 6489 6490 /* 6491 * Before logging the inode item, cache the value returned by 6492 * inode_logged(), because after that we have the need to figure out if 6493 * the inode was previously logged in this transaction. 6494 */ 6495 ret = inode_logged(trans, inode, path); 6496 if (ret < 0) 6497 goto out_unlock; 6498 ctx->logged_before = (ret == 1); 6499 ret = 0; 6500 6501 /* 6502 * This is for cases where logging a directory could result in losing a 6503 * a file after replaying the log. For example, if we move a file from a 6504 * directory A to a directory B, then fsync directory A, we have no way 6505 * to known the file was moved from A to B, so logging just A would 6506 * result in losing the file after a log replay. 6507 */ 6508 if (full_dir_logging && inode->last_unlink_trans >= trans->transid) { 6509 ret = BTRFS_LOG_FORCE_COMMIT; 6510 goto out_unlock; 6511 } 6512 6513 /* 6514 * a brute force approach to making sure we get the most uptodate 6515 * copies of everything. 6516 */ 6517 if (S_ISDIR(inode->vfs_inode.i_mode)) { 6518 clear_bit(BTRFS_INODE_COPY_EVERYTHING, &inode->runtime_flags); 6519 if (ctx->logged_before) 6520 ret = drop_inode_items(trans, log, path, inode, 6521 BTRFS_XATTR_ITEM_KEY); 6522 } else { 6523 if (inode_only == LOG_INODE_EXISTS && ctx->logged_before) { 6524 /* 6525 * Make sure the new inode item we write to the log has 6526 * the same isize as the current one (if it exists). 6527 * This is necessary to prevent data loss after log 6528 * replay, and also to prevent doing a wrong expanding 6529 * truncate - for e.g. create file, write 4K into offset 6530 * 0, fsync, write 4K into offset 4096, add hard link, 6531 * fsync some other file (to sync log), power fail - if 6532 * we use the inode's current i_size, after log replay 6533 * we get a 8Kb file, with the last 4Kb extent as a hole 6534 * (zeroes), as if an expanding truncate happened, 6535 * instead of getting a file of 4Kb only. 6536 */ 6537 ret = logged_inode_size(log, inode, path, &logged_isize); 6538 if (ret) 6539 goto out_unlock; 6540 } 6541 if (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, 6542 &inode->runtime_flags)) { 6543 if (inode_only == LOG_INODE_EXISTS) { 6544 max_key.type = BTRFS_XATTR_ITEM_KEY; 6545 if (ctx->logged_before) 6546 ret = drop_inode_items(trans, log, path, 6547 inode, max_key.type); 6548 } else { 6549 clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC, 6550 &inode->runtime_flags); 6551 clear_bit(BTRFS_INODE_COPY_EVERYTHING, 6552 &inode->runtime_flags); 6553 if (ctx->logged_before) 6554 ret = truncate_inode_items(trans, log, 6555 inode, 0, 0); 6556 } 6557 } else if (test_and_clear_bit(BTRFS_INODE_COPY_EVERYTHING, 6558 &inode->runtime_flags) || 6559 inode_only == LOG_INODE_EXISTS) { 6560 if (inode_only == LOG_INODE_ALL) 6561 fast_search = true; 6562 max_key.type = BTRFS_XATTR_ITEM_KEY; 6563 if (ctx->logged_before) 6564 ret = drop_inode_items(trans, log, path, inode, 6565 max_key.type); 6566 } else { 6567 if (inode_only == LOG_INODE_ALL) 6568 fast_search = true; 6569 inode_item_dropped = false; 6570 goto log_extents; 6571 } 6572 6573 } 6574 if (ret) 6575 goto out_unlock; 6576 6577 /* 6578 * If we are logging a directory in full mode, collect the delayed items 6579 * before iterating the subvolume tree, so that we don't miss any new 6580 * dir index items in case they get flushed while or right after we are 6581 * iterating the subvolume tree. 6582 */ 6583 if (full_dir_logging && !ctx->logging_new_delayed_dentries) 6584 btrfs_log_get_delayed_items(inode, &delayed_ins_list, 6585 &delayed_del_list); 6586 6587 ret = copy_inode_items_to_log(trans, inode, &min_key, &max_key, 6588 path, dst_path, logged_isize, 6589 inode_only, ctx, 6590 &need_log_inode_item); 6591 if (ret) 6592 goto out_unlock; 6593 6594 btrfs_release_path(path); 6595 btrfs_release_path(dst_path); 6596 ret = btrfs_log_all_xattrs(trans, inode, path, dst_path, ctx); 6597 if (ret) 6598 goto out_unlock; 6599 xattrs_logged = true; 6600 if (max_key.type >= BTRFS_EXTENT_DATA_KEY && !fast_search) { 6601 btrfs_release_path(path); 6602 btrfs_release_path(dst_path); 6603 ret = btrfs_log_holes(trans, inode, path); 6604 if (ret) 6605 goto out_unlock; 6606 } 6607 log_extents: 6608 btrfs_release_path(path); 6609 btrfs_release_path(dst_path); 6610 if (need_log_inode_item) { 6611 ret = log_inode_item(trans, log, dst_path, inode, inode_item_dropped); 6612 if (ret) 6613 goto out_unlock; 6614 /* 6615 * If we are doing a fast fsync and the inode was logged before 6616 * in this transaction, we don't need to log the xattrs because 6617 * they were logged before. If xattrs were added, changed or 6618 * deleted since the last time we logged the inode, then we have 6619 * already logged them because the inode had the runtime flag 6620 * BTRFS_INODE_COPY_EVERYTHING set. 6621 */ 6622 if (!xattrs_logged && inode->logged_trans < trans->transid) { 6623 ret = btrfs_log_all_xattrs(trans, inode, path, dst_path, ctx); 6624 if (ret) 6625 goto out_unlock; 6626 btrfs_release_path(path); 6627 } 6628 } 6629 if (fast_search) { 6630 ret = btrfs_log_changed_extents(trans, inode, dst_path, ctx); 6631 if (ret) 6632 goto out_unlock; 6633 } else if (inode_only == LOG_INODE_ALL) { 6634 struct extent_map *em, *n; 6635 6636 write_lock(&em_tree->lock); 6637 list_for_each_entry_safe(em, n, &em_tree->modified_extents, list) 6638 list_del_init(&em->list); 6639 write_unlock(&em_tree->lock); 6640 } 6641 6642 if (full_dir_logging) { 6643 ret = log_directory_changes(trans, inode, path, dst_path, ctx); 6644 if (ret) 6645 goto out_unlock; 6646 ret = log_delayed_insertion_items(trans, inode, path, 6647 &delayed_ins_list, ctx); 6648 if (ret) 6649 goto out_unlock; 6650 ret = log_delayed_deletion_items(trans, inode, path, 6651 &delayed_del_list, ctx); 6652 if (ret) 6653 goto out_unlock; 6654 } 6655 6656 spin_lock(&inode->lock); 6657 inode->logged_trans = trans->transid; 6658 /* 6659 * Don't update last_log_commit if we logged that an inode exists. 6660 * We do this for three reasons: 6661 * 6662 * 1) We might have had buffered writes to this inode that were 6663 * flushed and had their ordered extents completed in this 6664 * transaction, but we did not previously log the inode with 6665 * LOG_INODE_ALL. Later the inode was evicted and after that 6666 * it was loaded again and this LOG_INODE_EXISTS log operation 6667 * happened. We must make sure that if an explicit fsync against 6668 * the inode is performed later, it logs the new extents, an 6669 * updated inode item, etc, and syncs the log. The same logic 6670 * applies to direct IO writes instead of buffered writes. 6671 * 6672 * 2) When we log the inode with LOG_INODE_EXISTS, its inode item 6673 * is logged with an i_size of 0 or whatever value was logged 6674 * before. If later the i_size of the inode is increased by a 6675 * truncate operation, the log is synced through an fsync of 6676 * some other inode and then finally an explicit fsync against 6677 * this inode is made, we must make sure this fsync logs the 6678 * inode with the new i_size, the hole between old i_size and 6679 * the new i_size, and syncs the log. 6680 * 6681 * 3) If we are logging that an ancestor inode exists as part of 6682 * logging a new name from a link or rename operation, don't update 6683 * its last_log_commit - otherwise if an explicit fsync is made 6684 * against an ancestor, the fsync considers the inode in the log 6685 * and doesn't sync the log, resulting in the ancestor missing after 6686 * a power failure unless the log was synced as part of an fsync 6687 * against any other unrelated inode. 6688 */ 6689 if (inode_only != LOG_INODE_EXISTS) 6690 inode->last_log_commit = inode->last_sub_trans; 6691 spin_unlock(&inode->lock); 6692 6693 /* 6694 * Reset the last_reflink_trans so that the next fsync does not need to 6695 * go through the slower path when logging extents and their checksums. 6696 */ 6697 if (inode_only == LOG_INODE_ALL) 6698 inode->last_reflink_trans = 0; 6699 6700 out_unlock: 6701 mutex_unlock(&inode->log_mutex); 6702 out: 6703 btrfs_free_path(path); 6704 btrfs_free_path(dst_path); 6705 6706 if (ret) 6707 free_conflicting_inodes(ctx); 6708 else 6709 ret = log_conflicting_inodes(trans, inode->root, ctx); 6710 6711 if (full_dir_logging && !ctx->logging_new_delayed_dentries) { 6712 if (!ret) 6713 ret = log_new_delayed_dentries(trans, inode, 6714 &delayed_ins_list, ctx); 6715 6716 btrfs_log_put_delayed_items(inode, &delayed_ins_list, 6717 &delayed_del_list); 6718 } 6719 6720 return ret; 6721 } 6722 6723 static int btrfs_log_all_parents(struct btrfs_trans_handle *trans, 6724 struct btrfs_inode *inode, 6725 struct btrfs_log_ctx *ctx) 6726 { 6727 struct btrfs_fs_info *fs_info = trans->fs_info; 6728 int ret; 6729 struct btrfs_path *path; 6730 struct btrfs_key key; 6731 struct btrfs_root *root = inode->root; 6732 const u64 ino = btrfs_ino(inode); 6733 6734 path = btrfs_alloc_path(); 6735 if (!path) 6736 return -ENOMEM; 6737 path->skip_locking = 1; 6738 path->search_commit_root = 1; 6739 6740 key.objectid = ino; 6741 key.type = BTRFS_INODE_REF_KEY; 6742 key.offset = 0; 6743 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 6744 if (ret < 0) 6745 goto out; 6746 6747 while (true) { 6748 struct extent_buffer *leaf = path->nodes[0]; 6749 int slot = path->slots[0]; 6750 u32 cur_offset = 0; 6751 u32 item_size; 6752 unsigned long ptr; 6753 6754 if (slot >= btrfs_header_nritems(leaf)) { 6755 ret = btrfs_next_leaf(root, path); 6756 if (ret < 0) 6757 goto out; 6758 else if (ret > 0) 6759 break; 6760 continue; 6761 } 6762 6763 btrfs_item_key_to_cpu(leaf, &key, slot); 6764 /* BTRFS_INODE_EXTREF_KEY is BTRFS_INODE_REF_KEY + 1 */ 6765 if (key.objectid != ino || key.type > BTRFS_INODE_EXTREF_KEY) 6766 break; 6767 6768 item_size = btrfs_item_size(leaf, slot); 6769 ptr = btrfs_item_ptr_offset(leaf, slot); 6770 while (cur_offset < item_size) { 6771 struct btrfs_key inode_key; 6772 struct inode *dir_inode; 6773 6774 inode_key.type = BTRFS_INODE_ITEM_KEY; 6775 inode_key.offset = 0; 6776 6777 if (key.type == BTRFS_INODE_EXTREF_KEY) { 6778 struct btrfs_inode_extref *extref; 6779 6780 extref = (struct btrfs_inode_extref *) 6781 (ptr + cur_offset); 6782 inode_key.objectid = btrfs_inode_extref_parent( 6783 leaf, extref); 6784 cur_offset += sizeof(*extref); 6785 cur_offset += btrfs_inode_extref_name_len(leaf, 6786 extref); 6787 } else { 6788 inode_key.objectid = key.offset; 6789 cur_offset = item_size; 6790 } 6791 6792 dir_inode = btrfs_iget(fs_info->sb, inode_key.objectid, 6793 root); 6794 /* 6795 * If the parent inode was deleted, return an error to 6796 * fallback to a transaction commit. This is to prevent 6797 * getting an inode that was moved from one parent A to 6798 * a parent B, got its former parent A deleted and then 6799 * it got fsync'ed, from existing at both parents after 6800 * a log replay (and the old parent still existing). 6801 * Example: 6802 * 6803 * mkdir /mnt/A 6804 * mkdir /mnt/B 6805 * touch /mnt/B/bar 6806 * sync 6807 * mv /mnt/B/bar /mnt/A/bar 6808 * mv -T /mnt/A /mnt/B 6809 * fsync /mnt/B/bar 6810 * <power fail> 6811 * 6812 * If we ignore the old parent B which got deleted, 6813 * after a log replay we would have file bar linked 6814 * at both parents and the old parent B would still 6815 * exist. 6816 */ 6817 if (IS_ERR(dir_inode)) { 6818 ret = PTR_ERR(dir_inode); 6819 goto out; 6820 } 6821 6822 if (!need_log_inode(trans, BTRFS_I(dir_inode))) { 6823 btrfs_add_delayed_iput(BTRFS_I(dir_inode)); 6824 continue; 6825 } 6826 6827 ctx->log_new_dentries = false; 6828 ret = btrfs_log_inode(trans, BTRFS_I(dir_inode), 6829 LOG_INODE_ALL, ctx); 6830 if (!ret && ctx->log_new_dentries) 6831 ret = log_new_dir_dentries(trans, 6832 BTRFS_I(dir_inode), ctx); 6833 btrfs_add_delayed_iput(BTRFS_I(dir_inode)); 6834 if (ret) 6835 goto out; 6836 } 6837 path->slots[0]++; 6838 } 6839 ret = 0; 6840 out: 6841 btrfs_free_path(path); 6842 return ret; 6843 } 6844 6845 static int log_new_ancestors(struct btrfs_trans_handle *trans, 6846 struct btrfs_root *root, 6847 struct btrfs_path *path, 6848 struct btrfs_log_ctx *ctx) 6849 { 6850 struct btrfs_key found_key; 6851 6852 btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]); 6853 6854 while (true) { 6855 struct btrfs_fs_info *fs_info = root->fs_info; 6856 struct extent_buffer *leaf; 6857 int slot; 6858 struct btrfs_key search_key; 6859 struct inode *inode; 6860 u64 ino; 6861 int ret = 0; 6862 6863 btrfs_release_path(path); 6864 6865 ino = found_key.offset; 6866 6867 search_key.objectid = found_key.offset; 6868 search_key.type = BTRFS_INODE_ITEM_KEY; 6869 search_key.offset = 0; 6870 inode = btrfs_iget(fs_info->sb, ino, root); 6871 if (IS_ERR(inode)) 6872 return PTR_ERR(inode); 6873 6874 if (BTRFS_I(inode)->generation >= trans->transid && 6875 need_log_inode(trans, BTRFS_I(inode))) 6876 ret = btrfs_log_inode(trans, BTRFS_I(inode), 6877 LOG_INODE_EXISTS, ctx); 6878 btrfs_add_delayed_iput(BTRFS_I(inode)); 6879 if (ret) 6880 return ret; 6881 6882 if (search_key.objectid == BTRFS_FIRST_FREE_OBJECTID) 6883 break; 6884 6885 search_key.type = BTRFS_INODE_REF_KEY; 6886 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0); 6887 if (ret < 0) 6888 return ret; 6889 6890 leaf = path->nodes[0]; 6891 slot = path->slots[0]; 6892 if (slot >= btrfs_header_nritems(leaf)) { 6893 ret = btrfs_next_leaf(root, path); 6894 if (ret < 0) 6895 return ret; 6896 else if (ret > 0) 6897 return -ENOENT; 6898 leaf = path->nodes[0]; 6899 slot = path->slots[0]; 6900 } 6901 6902 btrfs_item_key_to_cpu(leaf, &found_key, slot); 6903 if (found_key.objectid != search_key.objectid || 6904 found_key.type != BTRFS_INODE_REF_KEY) 6905 return -ENOENT; 6906 } 6907 return 0; 6908 } 6909 6910 static int log_new_ancestors_fast(struct btrfs_trans_handle *trans, 6911 struct btrfs_inode *inode, 6912 struct dentry *parent, 6913 struct btrfs_log_ctx *ctx) 6914 { 6915 struct btrfs_root *root = inode->root; 6916 struct dentry *old_parent = NULL; 6917 struct super_block *sb = inode->vfs_inode.i_sb; 6918 int ret = 0; 6919 6920 while (true) { 6921 if (!parent || d_really_is_negative(parent) || 6922 sb != parent->d_sb) 6923 break; 6924 6925 inode = BTRFS_I(d_inode(parent)); 6926 if (root != inode->root) 6927 break; 6928 6929 if (inode->generation >= trans->transid && 6930 need_log_inode(trans, inode)) { 6931 ret = btrfs_log_inode(trans, inode, 6932 LOG_INODE_EXISTS, ctx); 6933 if (ret) 6934 break; 6935 } 6936 if (IS_ROOT(parent)) 6937 break; 6938 6939 parent = dget_parent(parent); 6940 dput(old_parent); 6941 old_parent = parent; 6942 } 6943 dput(old_parent); 6944 6945 return ret; 6946 } 6947 6948 static int log_all_new_ancestors(struct btrfs_trans_handle *trans, 6949 struct btrfs_inode *inode, 6950 struct dentry *parent, 6951 struct btrfs_log_ctx *ctx) 6952 { 6953 struct btrfs_root *root = inode->root; 6954 const u64 ino = btrfs_ino(inode); 6955 struct btrfs_path *path; 6956 struct btrfs_key search_key; 6957 int ret; 6958 6959 /* 6960 * For a single hard link case, go through a fast path that does not 6961 * need to iterate the fs/subvolume tree. 6962 */ 6963 if (inode->vfs_inode.i_nlink < 2) 6964 return log_new_ancestors_fast(trans, inode, parent, ctx); 6965 6966 path = btrfs_alloc_path(); 6967 if (!path) 6968 return -ENOMEM; 6969 6970 search_key.objectid = ino; 6971 search_key.type = BTRFS_INODE_REF_KEY; 6972 search_key.offset = 0; 6973 again: 6974 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0); 6975 if (ret < 0) 6976 goto out; 6977 if (ret == 0) 6978 path->slots[0]++; 6979 6980 while (true) { 6981 struct extent_buffer *leaf = path->nodes[0]; 6982 int slot = path->slots[0]; 6983 struct btrfs_key found_key; 6984 6985 if (slot >= btrfs_header_nritems(leaf)) { 6986 ret = btrfs_next_leaf(root, path); 6987 if (ret < 0) 6988 goto out; 6989 else if (ret > 0) 6990 break; 6991 continue; 6992 } 6993 6994 btrfs_item_key_to_cpu(leaf, &found_key, slot); 6995 if (found_key.objectid != ino || 6996 found_key.type > BTRFS_INODE_EXTREF_KEY) 6997 break; 6998 6999 /* 7000 * Don't deal with extended references because they are rare 7001 * cases and too complex to deal with (we would need to keep 7002 * track of which subitem we are processing for each item in 7003 * this loop, etc). So just return some error to fallback to 7004 * a transaction commit. 7005 */ 7006 if (found_key.type == BTRFS_INODE_EXTREF_KEY) { 7007 ret = -EMLINK; 7008 goto out; 7009 } 7010 7011 /* 7012 * Logging ancestors needs to do more searches on the fs/subvol 7013 * tree, so it releases the path as needed to avoid deadlocks. 7014 * Keep track of the last inode ref key and resume from that key 7015 * after logging all new ancestors for the current hard link. 7016 */ 7017 memcpy(&search_key, &found_key, sizeof(search_key)); 7018 7019 ret = log_new_ancestors(trans, root, path, ctx); 7020 if (ret) 7021 goto out; 7022 btrfs_release_path(path); 7023 goto again; 7024 } 7025 ret = 0; 7026 out: 7027 btrfs_free_path(path); 7028 return ret; 7029 } 7030 7031 /* 7032 * helper function around btrfs_log_inode to make sure newly created 7033 * parent directories also end up in the log. A minimal inode and backref 7034 * only logging is done of any parent directories that are older than 7035 * the last committed transaction 7036 */ 7037 static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans, 7038 struct btrfs_inode *inode, 7039 struct dentry *parent, 7040 int inode_only, 7041 struct btrfs_log_ctx *ctx) 7042 { 7043 struct btrfs_root *root = inode->root; 7044 struct btrfs_fs_info *fs_info = root->fs_info; 7045 int ret = 0; 7046 bool log_dentries = false; 7047 7048 if (btrfs_test_opt(fs_info, NOTREELOG)) { 7049 ret = BTRFS_LOG_FORCE_COMMIT; 7050 goto end_no_trans; 7051 } 7052 7053 if (btrfs_root_refs(&root->root_item) == 0) { 7054 ret = BTRFS_LOG_FORCE_COMMIT; 7055 goto end_no_trans; 7056 } 7057 7058 /* 7059 * Skip already logged inodes or inodes corresponding to tmpfiles 7060 * (since logging them is pointless, a link count of 0 means they 7061 * will never be accessible). 7062 */ 7063 if ((btrfs_inode_in_log(inode, trans->transid) && 7064 list_empty(&ctx->ordered_extents)) || 7065 inode->vfs_inode.i_nlink == 0) { 7066 ret = BTRFS_NO_LOG_SYNC; 7067 goto end_no_trans; 7068 } 7069 7070 ret = start_log_trans(trans, root, ctx); 7071 if (ret) 7072 goto end_no_trans; 7073 7074 ret = btrfs_log_inode(trans, inode, inode_only, ctx); 7075 if (ret) 7076 goto end_trans; 7077 7078 /* 7079 * for regular files, if its inode is already on disk, we don't 7080 * have to worry about the parents at all. This is because 7081 * we can use the last_unlink_trans field to record renames 7082 * and other fun in this file. 7083 */ 7084 if (S_ISREG(inode->vfs_inode.i_mode) && 7085 inode->generation < trans->transid && 7086 inode->last_unlink_trans < trans->transid) { 7087 ret = 0; 7088 goto end_trans; 7089 } 7090 7091 if (S_ISDIR(inode->vfs_inode.i_mode) && ctx->log_new_dentries) 7092 log_dentries = true; 7093 7094 /* 7095 * On unlink we must make sure all our current and old parent directory 7096 * inodes are fully logged. This is to prevent leaving dangling 7097 * directory index entries in directories that were our parents but are 7098 * not anymore. Not doing this results in old parent directory being 7099 * impossible to delete after log replay (rmdir will always fail with 7100 * error -ENOTEMPTY). 7101 * 7102 * Example 1: 7103 * 7104 * mkdir testdir 7105 * touch testdir/foo 7106 * ln testdir/foo testdir/bar 7107 * sync 7108 * unlink testdir/bar 7109 * xfs_io -c fsync testdir/foo 7110 * <power failure> 7111 * mount fs, triggers log replay 7112 * 7113 * If we don't log the parent directory (testdir), after log replay the 7114 * directory still has an entry pointing to the file inode using the bar 7115 * name, but a matching BTRFS_INODE_[REF|EXTREF]_KEY does not exist and 7116 * the file inode has a link count of 1. 7117 * 7118 * Example 2: 7119 * 7120 * mkdir testdir 7121 * touch foo 7122 * ln foo testdir/foo2 7123 * ln foo testdir/foo3 7124 * sync 7125 * unlink testdir/foo3 7126 * xfs_io -c fsync foo 7127 * <power failure> 7128 * mount fs, triggers log replay 7129 * 7130 * Similar as the first example, after log replay the parent directory 7131 * testdir still has an entry pointing to the inode file with name foo3 7132 * but the file inode does not have a matching BTRFS_INODE_REF_KEY item 7133 * and has a link count of 2. 7134 */ 7135 if (inode->last_unlink_trans >= trans->transid) { 7136 ret = btrfs_log_all_parents(trans, inode, ctx); 7137 if (ret) 7138 goto end_trans; 7139 } 7140 7141 ret = log_all_new_ancestors(trans, inode, parent, ctx); 7142 if (ret) 7143 goto end_trans; 7144 7145 if (log_dentries) 7146 ret = log_new_dir_dentries(trans, inode, ctx); 7147 else 7148 ret = 0; 7149 end_trans: 7150 if (ret < 0) { 7151 btrfs_set_log_full_commit(trans); 7152 ret = BTRFS_LOG_FORCE_COMMIT; 7153 } 7154 7155 if (ret) 7156 btrfs_remove_log_ctx(root, ctx); 7157 btrfs_end_log_trans(root); 7158 end_no_trans: 7159 return ret; 7160 } 7161 7162 /* 7163 * it is not safe to log dentry if the chunk root has added new 7164 * chunks. This returns 0 if the dentry was logged, and 1 otherwise. 7165 * If this returns 1, you must commit the transaction to safely get your 7166 * data on disk. 7167 */ 7168 int btrfs_log_dentry_safe(struct btrfs_trans_handle *trans, 7169 struct dentry *dentry, 7170 struct btrfs_log_ctx *ctx) 7171 { 7172 struct dentry *parent = dget_parent(dentry); 7173 int ret; 7174 7175 ret = btrfs_log_inode_parent(trans, BTRFS_I(d_inode(dentry)), parent, 7176 LOG_INODE_ALL, ctx); 7177 dput(parent); 7178 7179 return ret; 7180 } 7181 7182 /* 7183 * should be called during mount to recover any replay any log trees 7184 * from the FS 7185 */ 7186 int btrfs_recover_log_trees(struct btrfs_root *log_root_tree) 7187 { 7188 int ret; 7189 struct btrfs_path *path; 7190 struct btrfs_trans_handle *trans; 7191 struct btrfs_key key; 7192 struct btrfs_key found_key; 7193 struct btrfs_root *log; 7194 struct btrfs_fs_info *fs_info = log_root_tree->fs_info; 7195 struct walk_control wc = { 7196 .process_func = process_one_buffer, 7197 .stage = LOG_WALK_PIN_ONLY, 7198 }; 7199 7200 path = btrfs_alloc_path(); 7201 if (!path) 7202 return -ENOMEM; 7203 7204 set_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags); 7205 7206 trans = btrfs_start_transaction(fs_info->tree_root, 0); 7207 if (IS_ERR(trans)) { 7208 ret = PTR_ERR(trans); 7209 goto error; 7210 } 7211 7212 wc.trans = trans; 7213 wc.pin = 1; 7214 7215 ret = walk_log_tree(trans, log_root_tree, &wc); 7216 if (ret) { 7217 btrfs_abort_transaction(trans, ret); 7218 goto error; 7219 } 7220 7221 again: 7222 key.objectid = BTRFS_TREE_LOG_OBJECTID; 7223 key.offset = (u64)-1; 7224 key.type = BTRFS_ROOT_ITEM_KEY; 7225 7226 while (1) { 7227 ret = btrfs_search_slot(NULL, log_root_tree, &key, path, 0, 0); 7228 7229 if (ret < 0) { 7230 btrfs_abort_transaction(trans, ret); 7231 goto error; 7232 } 7233 if (ret > 0) { 7234 if (path->slots[0] == 0) 7235 break; 7236 path->slots[0]--; 7237 } 7238 btrfs_item_key_to_cpu(path->nodes[0], &found_key, 7239 path->slots[0]); 7240 btrfs_release_path(path); 7241 if (found_key.objectid != BTRFS_TREE_LOG_OBJECTID) 7242 break; 7243 7244 log = btrfs_read_tree_root(log_root_tree, &found_key); 7245 if (IS_ERR(log)) { 7246 ret = PTR_ERR(log); 7247 btrfs_abort_transaction(trans, ret); 7248 goto error; 7249 } 7250 7251 wc.replay_dest = btrfs_get_fs_root(fs_info, found_key.offset, 7252 true); 7253 if (IS_ERR(wc.replay_dest)) { 7254 ret = PTR_ERR(wc.replay_dest); 7255 7256 /* 7257 * We didn't find the subvol, likely because it was 7258 * deleted. This is ok, simply skip this log and go to 7259 * the next one. 7260 * 7261 * We need to exclude the root because we can't have 7262 * other log replays overwriting this log as we'll read 7263 * it back in a few more times. This will keep our 7264 * block from being modified, and we'll just bail for 7265 * each subsequent pass. 7266 */ 7267 if (ret == -ENOENT) 7268 ret = btrfs_pin_extent_for_log_replay(trans, log->node); 7269 btrfs_put_root(log); 7270 7271 if (!ret) 7272 goto next; 7273 btrfs_abort_transaction(trans, ret); 7274 goto error; 7275 } 7276 7277 wc.replay_dest->log_root = log; 7278 ret = btrfs_record_root_in_trans(trans, wc.replay_dest); 7279 if (ret) 7280 /* The loop needs to continue due to the root refs */ 7281 btrfs_abort_transaction(trans, ret); 7282 else 7283 ret = walk_log_tree(trans, log, &wc); 7284 7285 if (!ret && wc.stage == LOG_WALK_REPLAY_ALL) { 7286 ret = fixup_inode_link_counts(trans, wc.replay_dest, 7287 path); 7288 if (ret) 7289 btrfs_abort_transaction(trans, ret); 7290 } 7291 7292 if (!ret && wc.stage == LOG_WALK_REPLAY_ALL) { 7293 struct btrfs_root *root = wc.replay_dest; 7294 7295 btrfs_release_path(path); 7296 7297 /* 7298 * We have just replayed everything, and the highest 7299 * objectid of fs roots probably has changed in case 7300 * some inode_item's got replayed. 7301 * 7302 * root->objectid_mutex is not acquired as log replay 7303 * could only happen during mount. 7304 */ 7305 ret = btrfs_init_root_free_objectid(root); 7306 if (ret) 7307 btrfs_abort_transaction(trans, ret); 7308 } 7309 7310 wc.replay_dest->log_root = NULL; 7311 btrfs_put_root(wc.replay_dest); 7312 btrfs_put_root(log); 7313 7314 if (ret) 7315 goto error; 7316 next: 7317 if (found_key.offset == 0) 7318 break; 7319 key.offset = found_key.offset - 1; 7320 } 7321 btrfs_release_path(path); 7322 7323 /* step one is to pin it all, step two is to replay just inodes */ 7324 if (wc.pin) { 7325 wc.pin = 0; 7326 wc.process_func = replay_one_buffer; 7327 wc.stage = LOG_WALK_REPLAY_INODES; 7328 goto again; 7329 } 7330 /* step three is to replay everything */ 7331 if (wc.stage < LOG_WALK_REPLAY_ALL) { 7332 wc.stage++; 7333 goto again; 7334 } 7335 7336 btrfs_free_path(path); 7337 7338 /* step 4: commit the transaction, which also unpins the blocks */ 7339 ret = btrfs_commit_transaction(trans); 7340 if (ret) 7341 return ret; 7342 7343 log_root_tree->log_root = NULL; 7344 clear_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags); 7345 btrfs_put_root(log_root_tree); 7346 7347 return 0; 7348 error: 7349 if (wc.trans) 7350 btrfs_end_transaction(wc.trans); 7351 clear_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags); 7352 btrfs_free_path(path); 7353 return ret; 7354 } 7355 7356 /* 7357 * there are some corner cases where we want to force a full 7358 * commit instead of allowing a directory to be logged. 7359 * 7360 * They revolve around files there were unlinked from the directory, and 7361 * this function updates the parent directory so that a full commit is 7362 * properly done if it is fsync'd later after the unlinks are done. 7363 * 7364 * Must be called before the unlink operations (updates to the subvolume tree, 7365 * inodes, etc) are done. 7366 */ 7367 void btrfs_record_unlink_dir(struct btrfs_trans_handle *trans, 7368 struct btrfs_inode *dir, struct btrfs_inode *inode, 7369 bool for_rename) 7370 { 7371 /* 7372 * when we're logging a file, if it hasn't been renamed 7373 * or unlinked, and its inode is fully committed on disk, 7374 * we don't have to worry about walking up the directory chain 7375 * to log its parents. 7376 * 7377 * So, we use the last_unlink_trans field to put this transid 7378 * into the file. When the file is logged we check it and 7379 * don't log the parents if the file is fully on disk. 7380 */ 7381 mutex_lock(&inode->log_mutex); 7382 inode->last_unlink_trans = trans->transid; 7383 mutex_unlock(&inode->log_mutex); 7384 7385 if (!for_rename) 7386 return; 7387 7388 /* 7389 * If this directory was already logged, any new names will be logged 7390 * with btrfs_log_new_name() and old names will be deleted from the log 7391 * tree with btrfs_del_dir_entries_in_log() or with 7392 * btrfs_del_inode_ref_in_log(). 7393 */ 7394 if (inode_logged(trans, dir, NULL) == 1) 7395 return; 7396 7397 /* 7398 * If the inode we're about to unlink was logged before, the log will be 7399 * properly updated with the new name with btrfs_log_new_name() and the 7400 * old name removed with btrfs_del_dir_entries_in_log() or with 7401 * btrfs_del_inode_ref_in_log(). 7402 */ 7403 if (inode_logged(trans, inode, NULL) == 1) 7404 return; 7405 7406 /* 7407 * when renaming files across directories, if the directory 7408 * there we're unlinking from gets fsync'd later on, there's 7409 * no way to find the destination directory later and fsync it 7410 * properly. So, we have to be conservative and force commits 7411 * so the new name gets discovered. 7412 */ 7413 mutex_lock(&dir->log_mutex); 7414 dir->last_unlink_trans = trans->transid; 7415 mutex_unlock(&dir->log_mutex); 7416 } 7417 7418 /* 7419 * Make sure that if someone attempts to fsync the parent directory of a deleted 7420 * snapshot, it ends up triggering a transaction commit. This is to guarantee 7421 * that after replaying the log tree of the parent directory's root we will not 7422 * see the snapshot anymore and at log replay time we will not see any log tree 7423 * corresponding to the deleted snapshot's root, which could lead to replaying 7424 * it after replaying the log tree of the parent directory (which would replay 7425 * the snapshot delete operation). 7426 * 7427 * Must be called before the actual snapshot destroy operation (updates to the 7428 * parent root and tree of tree roots trees, etc) are done. 7429 */ 7430 void btrfs_record_snapshot_destroy(struct btrfs_trans_handle *trans, 7431 struct btrfs_inode *dir) 7432 { 7433 mutex_lock(&dir->log_mutex); 7434 dir->last_unlink_trans = trans->transid; 7435 mutex_unlock(&dir->log_mutex); 7436 } 7437 7438 /* 7439 * Update the log after adding a new name for an inode. 7440 * 7441 * @trans: Transaction handle. 7442 * @old_dentry: The dentry associated with the old name and the old 7443 * parent directory. 7444 * @old_dir: The inode of the previous parent directory for the case 7445 * of a rename. For a link operation, it must be NULL. 7446 * @old_dir_index: The index number associated with the old name, meaningful 7447 * only for rename operations (when @old_dir is not NULL). 7448 * Ignored for link operations. 7449 * @parent: The dentry associated with the directory under which the 7450 * new name is located. 7451 * 7452 * Call this after adding a new name for an inode, as a result of a link or 7453 * rename operation, and it will properly update the log to reflect the new name. 7454 */ 7455 void btrfs_log_new_name(struct btrfs_trans_handle *trans, 7456 struct dentry *old_dentry, struct btrfs_inode *old_dir, 7457 u64 old_dir_index, struct dentry *parent) 7458 { 7459 struct btrfs_inode *inode = BTRFS_I(d_inode(old_dentry)); 7460 struct btrfs_root *root = inode->root; 7461 struct btrfs_log_ctx ctx; 7462 bool log_pinned = false; 7463 int ret; 7464 7465 /* 7466 * this will force the logging code to walk the dentry chain 7467 * up for the file 7468 */ 7469 if (!S_ISDIR(inode->vfs_inode.i_mode)) 7470 inode->last_unlink_trans = trans->transid; 7471 7472 /* 7473 * if this inode hasn't been logged and directory we're renaming it 7474 * from hasn't been logged, we don't need to log it 7475 */ 7476 ret = inode_logged(trans, inode, NULL); 7477 if (ret < 0) { 7478 goto out; 7479 } else if (ret == 0) { 7480 if (!old_dir) 7481 return; 7482 /* 7483 * If the inode was not logged and we are doing a rename (old_dir is not 7484 * NULL), check if old_dir was logged - if it was not we can return and 7485 * do nothing. 7486 */ 7487 ret = inode_logged(trans, old_dir, NULL); 7488 if (ret < 0) 7489 goto out; 7490 else if (ret == 0) 7491 return; 7492 } 7493 ret = 0; 7494 7495 /* 7496 * If we are doing a rename (old_dir is not NULL) from a directory that 7497 * was previously logged, make sure that on log replay we get the old 7498 * dir entry deleted. This is needed because we will also log the new 7499 * name of the renamed inode, so we need to make sure that after log 7500 * replay we don't end up with both the new and old dir entries existing. 7501 */ 7502 if (old_dir && old_dir->logged_trans == trans->transid) { 7503 struct btrfs_root *log = old_dir->root->log_root; 7504 struct btrfs_path *path; 7505 struct fscrypt_name fname; 7506 7507 ASSERT(old_dir_index >= BTRFS_DIR_START_INDEX); 7508 7509 ret = fscrypt_setup_filename(&old_dir->vfs_inode, 7510 &old_dentry->d_name, 0, &fname); 7511 if (ret) 7512 goto out; 7513 /* 7514 * We have two inodes to update in the log, the old directory and 7515 * the inode that got renamed, so we must pin the log to prevent 7516 * anyone from syncing the log until we have updated both inodes 7517 * in the log. 7518 */ 7519 ret = join_running_log_trans(root); 7520 /* 7521 * At least one of the inodes was logged before, so this should 7522 * not fail, but if it does, it's not serious, just bail out and 7523 * mark the log for a full commit. 7524 */ 7525 if (WARN_ON_ONCE(ret < 0)) { 7526 fscrypt_free_filename(&fname); 7527 goto out; 7528 } 7529 7530 log_pinned = true; 7531 7532 path = btrfs_alloc_path(); 7533 if (!path) { 7534 ret = -ENOMEM; 7535 fscrypt_free_filename(&fname); 7536 goto out; 7537 } 7538 7539 /* 7540 * Other concurrent task might be logging the old directory, 7541 * as it can be triggered when logging other inode that had or 7542 * still has a dentry in the old directory. We lock the old 7543 * directory's log_mutex to ensure the deletion of the old 7544 * name is persisted, because during directory logging we 7545 * delete all BTRFS_DIR_LOG_INDEX_KEY keys and the deletion of 7546 * the old name's dir index item is in the delayed items, so 7547 * it could be missed by an in progress directory logging. 7548 */ 7549 mutex_lock(&old_dir->log_mutex); 7550 ret = del_logged_dentry(trans, log, path, btrfs_ino(old_dir), 7551 &fname.disk_name, old_dir_index); 7552 if (ret > 0) { 7553 /* 7554 * The dentry does not exist in the log, so record its 7555 * deletion. 7556 */ 7557 btrfs_release_path(path); 7558 ret = insert_dir_log_key(trans, log, path, 7559 btrfs_ino(old_dir), 7560 old_dir_index, old_dir_index); 7561 } 7562 mutex_unlock(&old_dir->log_mutex); 7563 7564 btrfs_free_path(path); 7565 fscrypt_free_filename(&fname); 7566 if (ret < 0) 7567 goto out; 7568 } 7569 7570 btrfs_init_log_ctx(&ctx, &inode->vfs_inode); 7571 ctx.logging_new_name = true; 7572 btrfs_init_log_ctx_scratch_eb(&ctx); 7573 /* 7574 * We don't care about the return value. If we fail to log the new name 7575 * then we know the next attempt to sync the log will fallback to a full 7576 * transaction commit (due to a call to btrfs_set_log_full_commit()), so 7577 * we don't need to worry about getting a log committed that has an 7578 * inconsistent state after a rename operation. 7579 */ 7580 btrfs_log_inode_parent(trans, inode, parent, LOG_INODE_EXISTS, &ctx); 7581 free_extent_buffer(ctx.scratch_eb); 7582 ASSERT(list_empty(&ctx.conflict_inodes)); 7583 out: 7584 /* 7585 * If an error happened mark the log for a full commit because it's not 7586 * consistent and up to date or we couldn't find out if one of the 7587 * inodes was logged before in this transaction. Do it before unpinning 7588 * the log, to avoid any races with someone else trying to commit it. 7589 */ 7590 if (ret < 0) 7591 btrfs_set_log_full_commit(trans); 7592 if (log_pinned) 7593 btrfs_end_log_trans(root); 7594 } 7595 7596