1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2008 Oracle. All rights reserved. 4 */ 5 6 #include <linux/sched.h> 7 #include <linux/slab.h> 8 #include <linux/blkdev.h> 9 #include <linux/list_sort.h> 10 #include <linux/iversion.h> 11 #include "misc.h" 12 #include "ctree.h" 13 #include "tree-log.h" 14 #include "disk-io.h" 15 #include "locking.h" 16 #include "print-tree.h" 17 #include "backref.h" 18 #include "compression.h" 19 #include "qgroup.h" 20 #include "inode-map.h" 21 22 /* magic values for the inode_only field in btrfs_log_inode: 23 * 24 * LOG_INODE_ALL means to log everything 25 * LOG_INODE_EXISTS means to log just enough to recreate the inode 26 * during log replay 27 */ 28 enum { 29 LOG_INODE_ALL, 30 LOG_INODE_EXISTS, 31 LOG_OTHER_INODE, 32 LOG_OTHER_INODE_ALL, 33 }; 34 35 /* 36 * directory trouble cases 37 * 38 * 1) on rename or unlink, if the inode being unlinked isn't in the fsync 39 * log, we must force a full commit before doing an fsync of the directory 40 * where the unlink was done. 41 * ---> record transid of last unlink/rename per directory 42 * 43 * mkdir foo/some_dir 44 * normal commit 45 * rename foo/some_dir foo2/some_dir 46 * mkdir foo/some_dir 47 * fsync foo/some_dir/some_file 48 * 49 * The fsync above will unlink the original some_dir without recording 50 * it in its new location (foo2). After a crash, some_dir will be gone 51 * unless the fsync of some_file forces a full commit 52 * 53 * 2) we must log any new names for any file or dir that is in the fsync 54 * log. ---> check inode while renaming/linking. 55 * 56 * 2a) we must log any new names for any file or dir during rename 57 * when the directory they are being removed from was logged. 58 * ---> check inode and old parent dir during rename 59 * 60 * 2a is actually the more important variant. With the extra logging 61 * a crash might unlink the old name without recreating the new one 62 * 63 * 3) after a crash, we must go through any directories with a link count 64 * of zero and redo the rm -rf 65 * 66 * mkdir f1/foo 67 * normal commit 68 * rm -rf f1/foo 69 * fsync(f1) 70 * 71 * The directory f1 was fully removed from the FS, but fsync was never 72 * called on f1, only its parent dir. After a crash the rm -rf must 73 * be replayed. This must be able to recurse down the entire 74 * directory tree. The inode link count fixup code takes care of the 75 * ugly details. 76 */ 77 78 /* 79 * stages for the tree walking. The first 80 * stage (0) is to only pin down the blocks we find 81 * the second stage (1) is to make sure that all the inodes 82 * we find in the log are created in the subvolume. 83 * 84 * The last stage is to deal with directories and links and extents 85 * and all the other fun semantics 86 */ 87 enum { 88 LOG_WALK_PIN_ONLY, 89 LOG_WALK_REPLAY_INODES, 90 LOG_WALK_REPLAY_DIR_INDEX, 91 LOG_WALK_REPLAY_ALL, 92 }; 93 94 static int btrfs_log_inode(struct btrfs_trans_handle *trans, 95 struct btrfs_root *root, struct btrfs_inode *inode, 96 int inode_only, 97 const loff_t start, 98 const loff_t end, 99 struct btrfs_log_ctx *ctx); 100 static int link_to_fixup_dir(struct btrfs_trans_handle *trans, 101 struct btrfs_root *root, 102 struct btrfs_path *path, u64 objectid); 103 static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans, 104 struct btrfs_root *root, 105 struct btrfs_root *log, 106 struct btrfs_path *path, 107 u64 dirid, int del_all); 108 109 /* 110 * tree logging is a special write ahead log used to make sure that 111 * fsyncs and O_SYNCs can happen without doing full tree commits. 112 * 113 * Full tree commits are expensive because they require commonly 114 * modified blocks to be recowed, creating many dirty pages in the 115 * extent tree an 4x-6x higher write load than ext3. 116 * 117 * Instead of doing a tree commit on every fsync, we use the 118 * key ranges and transaction ids to find items for a given file or directory 119 * that have changed in this transaction. Those items are copied into 120 * a special tree (one per subvolume root), that tree is written to disk 121 * and then the fsync is considered complete. 122 * 123 * After a crash, items are copied out of the log-tree back into the 124 * subvolume tree. Any file data extents found are recorded in the extent 125 * allocation tree, and the log-tree freed. 126 * 127 * The log tree is read three times, once to pin down all the extents it is 128 * using in ram and once, once to create all the inodes logged in the tree 129 * and once to do all the other items. 130 */ 131 132 /* 133 * start a sub transaction and setup the log tree 134 * this increments the log tree writer count to make the people 135 * syncing the tree wait for us to finish 136 */ 137 static int start_log_trans(struct btrfs_trans_handle *trans, 138 struct btrfs_root *root, 139 struct btrfs_log_ctx *ctx) 140 { 141 struct btrfs_fs_info *fs_info = root->fs_info; 142 int ret = 0; 143 144 mutex_lock(&root->log_mutex); 145 146 if (root->log_root) { 147 if (btrfs_need_log_full_commit(trans)) { 148 ret = -EAGAIN; 149 goto out; 150 } 151 152 if (!root->log_start_pid) { 153 clear_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state); 154 root->log_start_pid = current->pid; 155 } else if (root->log_start_pid != current->pid) { 156 set_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state); 157 } 158 } else { 159 mutex_lock(&fs_info->tree_log_mutex); 160 if (!fs_info->log_root_tree) 161 ret = btrfs_init_log_root_tree(trans, fs_info); 162 mutex_unlock(&fs_info->tree_log_mutex); 163 if (ret) 164 goto out; 165 166 ret = btrfs_add_log_tree(trans, root); 167 if (ret) 168 goto out; 169 170 clear_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state); 171 root->log_start_pid = current->pid; 172 } 173 174 atomic_inc(&root->log_batch); 175 atomic_inc(&root->log_writers); 176 if (ctx) { 177 int index = root->log_transid % 2; 178 list_add_tail(&ctx->list, &root->log_ctxs[index]); 179 ctx->log_transid = root->log_transid; 180 } 181 182 out: 183 mutex_unlock(&root->log_mutex); 184 return ret; 185 } 186 187 /* 188 * returns 0 if there was a log transaction running and we were able 189 * to join, or returns -ENOENT if there were not transactions 190 * in progress 191 */ 192 static int join_running_log_trans(struct btrfs_root *root) 193 { 194 int ret = -ENOENT; 195 196 mutex_lock(&root->log_mutex); 197 if (root->log_root) { 198 ret = 0; 199 atomic_inc(&root->log_writers); 200 } 201 mutex_unlock(&root->log_mutex); 202 return ret; 203 } 204 205 /* 206 * This either makes the current running log transaction wait 207 * until you call btrfs_end_log_trans() or it makes any future 208 * log transactions wait until you call btrfs_end_log_trans() 209 */ 210 void btrfs_pin_log_trans(struct btrfs_root *root) 211 { 212 mutex_lock(&root->log_mutex); 213 atomic_inc(&root->log_writers); 214 mutex_unlock(&root->log_mutex); 215 } 216 217 /* 218 * indicate we're done making changes to the log tree 219 * and wake up anyone waiting to do a sync 220 */ 221 void btrfs_end_log_trans(struct btrfs_root *root) 222 { 223 if (atomic_dec_and_test(&root->log_writers)) { 224 /* atomic_dec_and_test implies a barrier */ 225 cond_wake_up_nomb(&root->log_writer_wait); 226 } 227 } 228 229 static int btrfs_write_tree_block(struct extent_buffer *buf) 230 { 231 return filemap_fdatawrite_range(buf->pages[0]->mapping, buf->start, 232 buf->start + buf->len - 1); 233 } 234 235 static void btrfs_wait_tree_block_writeback(struct extent_buffer *buf) 236 { 237 filemap_fdatawait_range(buf->pages[0]->mapping, 238 buf->start, buf->start + buf->len - 1); 239 } 240 241 /* 242 * the walk control struct is used to pass state down the chain when 243 * processing the log tree. The stage field tells us which part 244 * of the log tree processing we are currently doing. The others 245 * are state fields used for that specific part 246 */ 247 struct walk_control { 248 /* should we free the extent on disk when done? This is used 249 * at transaction commit time while freeing a log tree 250 */ 251 int free; 252 253 /* should we write out the extent buffer? This is used 254 * while flushing the log tree to disk during a sync 255 */ 256 int write; 257 258 /* should we wait for the extent buffer io to finish? Also used 259 * while flushing the log tree to disk for a sync 260 */ 261 int wait; 262 263 /* pin only walk, we record which extents on disk belong to the 264 * log trees 265 */ 266 int pin; 267 268 /* what stage of the replay code we're currently in */ 269 int stage; 270 271 /* 272 * Ignore any items from the inode currently being processed. Needs 273 * to be set every time we find a BTRFS_INODE_ITEM_KEY and we are in 274 * the LOG_WALK_REPLAY_INODES stage. 275 */ 276 bool ignore_cur_inode; 277 278 /* the root we are currently replaying */ 279 struct btrfs_root *replay_dest; 280 281 /* the trans handle for the current replay */ 282 struct btrfs_trans_handle *trans; 283 284 /* the function that gets used to process blocks we find in the 285 * tree. Note the extent_buffer might not be up to date when it is 286 * passed in, and it must be checked or read if you need the data 287 * inside it 288 */ 289 int (*process_func)(struct btrfs_root *log, struct extent_buffer *eb, 290 struct walk_control *wc, u64 gen, int level); 291 }; 292 293 /* 294 * process_func used to pin down extents, write them or wait on them 295 */ 296 static int process_one_buffer(struct btrfs_root *log, 297 struct extent_buffer *eb, 298 struct walk_control *wc, u64 gen, int level) 299 { 300 struct btrfs_fs_info *fs_info = log->fs_info; 301 int ret = 0; 302 303 /* 304 * If this fs is mixed then we need to be able to process the leaves to 305 * pin down any logged extents, so we have to read the block. 306 */ 307 if (btrfs_fs_incompat(fs_info, MIXED_GROUPS)) { 308 ret = btrfs_read_buffer(eb, gen, level, NULL); 309 if (ret) 310 return ret; 311 } 312 313 if (wc->pin) 314 ret = btrfs_pin_extent_for_log_replay(fs_info, eb->start, 315 eb->len); 316 317 if (!ret && btrfs_buffer_uptodate(eb, gen, 0)) { 318 if (wc->pin && btrfs_header_level(eb) == 0) 319 ret = btrfs_exclude_logged_extents(eb); 320 if (wc->write) 321 btrfs_write_tree_block(eb); 322 if (wc->wait) 323 btrfs_wait_tree_block_writeback(eb); 324 } 325 return ret; 326 } 327 328 /* 329 * Item overwrite used by replay and tree logging. eb, slot and key all refer 330 * to the src data we are copying out. 331 * 332 * root is the tree we are copying into, and path is a scratch 333 * path for use in this function (it should be released on entry and 334 * will be released on exit). 335 * 336 * If the key is already in the destination tree the existing item is 337 * overwritten. If the existing item isn't big enough, it is extended. 338 * If it is too large, it is truncated. 339 * 340 * If the key isn't in the destination yet, a new item is inserted. 341 */ 342 static noinline int overwrite_item(struct btrfs_trans_handle *trans, 343 struct btrfs_root *root, 344 struct btrfs_path *path, 345 struct extent_buffer *eb, int slot, 346 struct btrfs_key *key) 347 { 348 int ret; 349 u32 item_size; 350 u64 saved_i_size = 0; 351 int save_old_i_size = 0; 352 unsigned long src_ptr; 353 unsigned long dst_ptr; 354 int overwrite_root = 0; 355 bool inode_item = key->type == BTRFS_INODE_ITEM_KEY; 356 357 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) 358 overwrite_root = 1; 359 360 item_size = btrfs_item_size_nr(eb, slot); 361 src_ptr = btrfs_item_ptr_offset(eb, slot); 362 363 /* look for the key in the destination tree */ 364 ret = btrfs_search_slot(NULL, root, key, path, 0, 0); 365 if (ret < 0) 366 return ret; 367 368 if (ret == 0) { 369 char *src_copy; 370 char *dst_copy; 371 u32 dst_size = btrfs_item_size_nr(path->nodes[0], 372 path->slots[0]); 373 if (dst_size != item_size) 374 goto insert; 375 376 if (item_size == 0) { 377 btrfs_release_path(path); 378 return 0; 379 } 380 dst_copy = kmalloc(item_size, GFP_NOFS); 381 src_copy = kmalloc(item_size, GFP_NOFS); 382 if (!dst_copy || !src_copy) { 383 btrfs_release_path(path); 384 kfree(dst_copy); 385 kfree(src_copy); 386 return -ENOMEM; 387 } 388 389 read_extent_buffer(eb, src_copy, src_ptr, item_size); 390 391 dst_ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]); 392 read_extent_buffer(path->nodes[0], dst_copy, dst_ptr, 393 item_size); 394 ret = memcmp(dst_copy, src_copy, item_size); 395 396 kfree(dst_copy); 397 kfree(src_copy); 398 /* 399 * they have the same contents, just return, this saves 400 * us from cowing blocks in the destination tree and doing 401 * extra writes that may not have been done by a previous 402 * sync 403 */ 404 if (ret == 0) { 405 btrfs_release_path(path); 406 return 0; 407 } 408 409 /* 410 * We need to load the old nbytes into the inode so when we 411 * replay the extents we've logged we get the right nbytes. 412 */ 413 if (inode_item) { 414 struct btrfs_inode_item *item; 415 u64 nbytes; 416 u32 mode; 417 418 item = btrfs_item_ptr(path->nodes[0], path->slots[0], 419 struct btrfs_inode_item); 420 nbytes = btrfs_inode_nbytes(path->nodes[0], item); 421 item = btrfs_item_ptr(eb, slot, 422 struct btrfs_inode_item); 423 btrfs_set_inode_nbytes(eb, item, nbytes); 424 425 /* 426 * If this is a directory we need to reset the i_size to 427 * 0 so that we can set it up properly when replaying 428 * the rest of the items in this log. 429 */ 430 mode = btrfs_inode_mode(eb, item); 431 if (S_ISDIR(mode)) 432 btrfs_set_inode_size(eb, item, 0); 433 } 434 } else if (inode_item) { 435 struct btrfs_inode_item *item; 436 u32 mode; 437 438 /* 439 * New inode, set nbytes to 0 so that the nbytes comes out 440 * properly when we replay the extents. 441 */ 442 item = btrfs_item_ptr(eb, slot, struct btrfs_inode_item); 443 btrfs_set_inode_nbytes(eb, item, 0); 444 445 /* 446 * If this is a directory we need to reset the i_size to 0 so 447 * that we can set it up properly when replaying the rest of 448 * the items in this log. 449 */ 450 mode = btrfs_inode_mode(eb, item); 451 if (S_ISDIR(mode)) 452 btrfs_set_inode_size(eb, item, 0); 453 } 454 insert: 455 btrfs_release_path(path); 456 /* try to insert the key into the destination tree */ 457 path->skip_release_on_error = 1; 458 ret = btrfs_insert_empty_item(trans, root, path, 459 key, item_size); 460 path->skip_release_on_error = 0; 461 462 /* make sure any existing item is the correct size */ 463 if (ret == -EEXIST || ret == -EOVERFLOW) { 464 u32 found_size; 465 found_size = btrfs_item_size_nr(path->nodes[0], 466 path->slots[0]); 467 if (found_size > item_size) 468 btrfs_truncate_item(path, item_size, 1); 469 else if (found_size < item_size) 470 btrfs_extend_item(path, item_size - found_size); 471 } else if (ret) { 472 return ret; 473 } 474 dst_ptr = btrfs_item_ptr_offset(path->nodes[0], 475 path->slots[0]); 476 477 /* don't overwrite an existing inode if the generation number 478 * was logged as zero. This is done when the tree logging code 479 * is just logging an inode to make sure it exists after recovery. 480 * 481 * Also, don't overwrite i_size on directories during replay. 482 * log replay inserts and removes directory items based on the 483 * state of the tree found in the subvolume, and i_size is modified 484 * as it goes 485 */ 486 if (key->type == BTRFS_INODE_ITEM_KEY && ret == -EEXIST) { 487 struct btrfs_inode_item *src_item; 488 struct btrfs_inode_item *dst_item; 489 490 src_item = (struct btrfs_inode_item *)src_ptr; 491 dst_item = (struct btrfs_inode_item *)dst_ptr; 492 493 if (btrfs_inode_generation(eb, src_item) == 0) { 494 struct extent_buffer *dst_eb = path->nodes[0]; 495 const u64 ino_size = btrfs_inode_size(eb, src_item); 496 497 /* 498 * For regular files an ino_size == 0 is used only when 499 * logging that an inode exists, as part of a directory 500 * fsync, and the inode wasn't fsynced before. In this 501 * case don't set the size of the inode in the fs/subvol 502 * tree, otherwise we would be throwing valid data away. 503 */ 504 if (S_ISREG(btrfs_inode_mode(eb, src_item)) && 505 S_ISREG(btrfs_inode_mode(dst_eb, dst_item)) && 506 ino_size != 0) { 507 struct btrfs_map_token token; 508 509 btrfs_init_map_token(&token, dst_eb); 510 btrfs_set_token_inode_size(dst_eb, dst_item, 511 ino_size, &token); 512 } 513 goto no_copy; 514 } 515 516 if (overwrite_root && 517 S_ISDIR(btrfs_inode_mode(eb, src_item)) && 518 S_ISDIR(btrfs_inode_mode(path->nodes[0], dst_item))) { 519 save_old_i_size = 1; 520 saved_i_size = btrfs_inode_size(path->nodes[0], 521 dst_item); 522 } 523 } 524 525 copy_extent_buffer(path->nodes[0], eb, dst_ptr, 526 src_ptr, item_size); 527 528 if (save_old_i_size) { 529 struct btrfs_inode_item *dst_item; 530 dst_item = (struct btrfs_inode_item *)dst_ptr; 531 btrfs_set_inode_size(path->nodes[0], dst_item, saved_i_size); 532 } 533 534 /* make sure the generation is filled in */ 535 if (key->type == BTRFS_INODE_ITEM_KEY) { 536 struct btrfs_inode_item *dst_item; 537 dst_item = (struct btrfs_inode_item *)dst_ptr; 538 if (btrfs_inode_generation(path->nodes[0], dst_item) == 0) { 539 btrfs_set_inode_generation(path->nodes[0], dst_item, 540 trans->transid); 541 } 542 } 543 no_copy: 544 btrfs_mark_buffer_dirty(path->nodes[0]); 545 btrfs_release_path(path); 546 return 0; 547 } 548 549 /* 550 * simple helper to read an inode off the disk from a given root 551 * This can only be called for subvolume roots and not for the log 552 */ 553 static noinline struct inode *read_one_inode(struct btrfs_root *root, 554 u64 objectid) 555 { 556 struct btrfs_key key; 557 struct inode *inode; 558 559 key.objectid = objectid; 560 key.type = BTRFS_INODE_ITEM_KEY; 561 key.offset = 0; 562 inode = btrfs_iget(root->fs_info->sb, &key, root, NULL); 563 if (IS_ERR(inode)) 564 inode = NULL; 565 return inode; 566 } 567 568 /* replays a single extent in 'eb' at 'slot' with 'key' into the 569 * subvolume 'root'. path is released on entry and should be released 570 * on exit. 571 * 572 * extents in the log tree have not been allocated out of the extent 573 * tree yet. So, this completes the allocation, taking a reference 574 * as required if the extent already exists or creating a new extent 575 * if it isn't in the extent allocation tree yet. 576 * 577 * The extent is inserted into the file, dropping any existing extents 578 * from the file that overlap the new one. 579 */ 580 static noinline int replay_one_extent(struct btrfs_trans_handle *trans, 581 struct btrfs_root *root, 582 struct btrfs_path *path, 583 struct extent_buffer *eb, int slot, 584 struct btrfs_key *key) 585 { 586 struct btrfs_fs_info *fs_info = root->fs_info; 587 int found_type; 588 u64 extent_end; 589 u64 start = key->offset; 590 u64 nbytes = 0; 591 struct btrfs_file_extent_item *item; 592 struct inode *inode = NULL; 593 unsigned long size; 594 int ret = 0; 595 596 item = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item); 597 found_type = btrfs_file_extent_type(eb, item); 598 599 if (found_type == BTRFS_FILE_EXTENT_REG || 600 found_type == BTRFS_FILE_EXTENT_PREALLOC) { 601 nbytes = btrfs_file_extent_num_bytes(eb, item); 602 extent_end = start + nbytes; 603 604 /* 605 * We don't add to the inodes nbytes if we are prealloc or a 606 * hole. 607 */ 608 if (btrfs_file_extent_disk_bytenr(eb, item) == 0) 609 nbytes = 0; 610 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) { 611 size = btrfs_file_extent_ram_bytes(eb, item); 612 nbytes = btrfs_file_extent_ram_bytes(eb, item); 613 extent_end = ALIGN(start + size, 614 fs_info->sectorsize); 615 } else { 616 ret = 0; 617 goto out; 618 } 619 620 inode = read_one_inode(root, key->objectid); 621 if (!inode) { 622 ret = -EIO; 623 goto out; 624 } 625 626 /* 627 * first check to see if we already have this extent in the 628 * file. This must be done before the btrfs_drop_extents run 629 * so we don't try to drop this extent. 630 */ 631 ret = btrfs_lookup_file_extent(trans, root, path, 632 btrfs_ino(BTRFS_I(inode)), start, 0); 633 634 if (ret == 0 && 635 (found_type == BTRFS_FILE_EXTENT_REG || 636 found_type == BTRFS_FILE_EXTENT_PREALLOC)) { 637 struct btrfs_file_extent_item cmp1; 638 struct btrfs_file_extent_item cmp2; 639 struct btrfs_file_extent_item *existing; 640 struct extent_buffer *leaf; 641 642 leaf = path->nodes[0]; 643 existing = btrfs_item_ptr(leaf, path->slots[0], 644 struct btrfs_file_extent_item); 645 646 read_extent_buffer(eb, &cmp1, (unsigned long)item, 647 sizeof(cmp1)); 648 read_extent_buffer(leaf, &cmp2, (unsigned long)existing, 649 sizeof(cmp2)); 650 651 /* 652 * we already have a pointer to this exact extent, 653 * we don't have to do anything 654 */ 655 if (memcmp(&cmp1, &cmp2, sizeof(cmp1)) == 0) { 656 btrfs_release_path(path); 657 goto out; 658 } 659 } 660 btrfs_release_path(path); 661 662 /* drop any overlapping extents */ 663 ret = btrfs_drop_extents(trans, root, inode, start, extent_end, 1); 664 if (ret) 665 goto out; 666 667 if (found_type == BTRFS_FILE_EXTENT_REG || 668 found_type == BTRFS_FILE_EXTENT_PREALLOC) { 669 u64 offset; 670 unsigned long dest_offset; 671 struct btrfs_key ins; 672 673 if (btrfs_file_extent_disk_bytenr(eb, item) == 0 && 674 btrfs_fs_incompat(fs_info, NO_HOLES)) 675 goto update_inode; 676 677 ret = btrfs_insert_empty_item(trans, root, path, key, 678 sizeof(*item)); 679 if (ret) 680 goto out; 681 dest_offset = btrfs_item_ptr_offset(path->nodes[0], 682 path->slots[0]); 683 copy_extent_buffer(path->nodes[0], eb, dest_offset, 684 (unsigned long)item, sizeof(*item)); 685 686 ins.objectid = btrfs_file_extent_disk_bytenr(eb, item); 687 ins.offset = btrfs_file_extent_disk_num_bytes(eb, item); 688 ins.type = BTRFS_EXTENT_ITEM_KEY; 689 offset = key->offset - btrfs_file_extent_offset(eb, item); 690 691 /* 692 * Manually record dirty extent, as here we did a shallow 693 * file extent item copy and skip normal backref update, 694 * but modifying extent tree all by ourselves. 695 * So need to manually record dirty extent for qgroup, 696 * as the owner of the file extent changed from log tree 697 * (doesn't affect qgroup) to fs/file tree(affects qgroup) 698 */ 699 ret = btrfs_qgroup_trace_extent(trans, 700 btrfs_file_extent_disk_bytenr(eb, item), 701 btrfs_file_extent_disk_num_bytes(eb, item), 702 GFP_NOFS); 703 if (ret < 0) 704 goto out; 705 706 if (ins.objectid > 0) { 707 struct btrfs_ref ref = { 0 }; 708 u64 csum_start; 709 u64 csum_end; 710 LIST_HEAD(ordered_sums); 711 712 /* 713 * is this extent already allocated in the extent 714 * allocation tree? If so, just add a reference 715 */ 716 ret = btrfs_lookup_data_extent(fs_info, ins.objectid, 717 ins.offset); 718 if (ret == 0) { 719 btrfs_init_generic_ref(&ref, 720 BTRFS_ADD_DELAYED_REF, 721 ins.objectid, ins.offset, 0); 722 btrfs_init_data_ref(&ref, 723 root->root_key.objectid, 724 key->objectid, offset); 725 ret = btrfs_inc_extent_ref(trans, &ref); 726 if (ret) 727 goto out; 728 } else { 729 /* 730 * insert the extent pointer in the extent 731 * allocation tree 732 */ 733 ret = btrfs_alloc_logged_file_extent(trans, 734 root->root_key.objectid, 735 key->objectid, offset, &ins); 736 if (ret) 737 goto out; 738 } 739 btrfs_release_path(path); 740 741 if (btrfs_file_extent_compression(eb, item)) { 742 csum_start = ins.objectid; 743 csum_end = csum_start + ins.offset; 744 } else { 745 csum_start = ins.objectid + 746 btrfs_file_extent_offset(eb, item); 747 csum_end = csum_start + 748 btrfs_file_extent_num_bytes(eb, item); 749 } 750 751 ret = btrfs_lookup_csums_range(root->log_root, 752 csum_start, csum_end - 1, 753 &ordered_sums, 0); 754 if (ret) 755 goto out; 756 /* 757 * Now delete all existing cums in the csum root that 758 * cover our range. We do this because we can have an 759 * extent that is completely referenced by one file 760 * extent item and partially referenced by another 761 * file extent item (like after using the clone or 762 * extent_same ioctls). In this case if we end up doing 763 * the replay of the one that partially references the 764 * extent first, and we do not do the csum deletion 765 * below, we can get 2 csum items in the csum tree that 766 * overlap each other. For example, imagine our log has 767 * the two following file extent items: 768 * 769 * key (257 EXTENT_DATA 409600) 770 * extent data disk byte 12845056 nr 102400 771 * extent data offset 20480 nr 20480 ram 102400 772 * 773 * key (257 EXTENT_DATA 819200) 774 * extent data disk byte 12845056 nr 102400 775 * extent data offset 0 nr 102400 ram 102400 776 * 777 * Where the second one fully references the 100K extent 778 * that starts at disk byte 12845056, and the log tree 779 * has a single csum item that covers the entire range 780 * of the extent: 781 * 782 * key (EXTENT_CSUM EXTENT_CSUM 12845056) itemsize 100 783 * 784 * After the first file extent item is replayed, the 785 * csum tree gets the following csum item: 786 * 787 * key (EXTENT_CSUM EXTENT_CSUM 12865536) itemsize 20 788 * 789 * Which covers the 20K sub-range starting at offset 20K 790 * of our extent. Now when we replay the second file 791 * extent item, if we do not delete existing csum items 792 * that cover any of its blocks, we end up getting two 793 * csum items in our csum tree that overlap each other: 794 * 795 * key (EXTENT_CSUM EXTENT_CSUM 12845056) itemsize 100 796 * key (EXTENT_CSUM EXTENT_CSUM 12865536) itemsize 20 797 * 798 * Which is a problem, because after this anyone trying 799 * to lookup up for the checksum of any block of our 800 * extent starting at an offset of 40K or higher, will 801 * end up looking at the second csum item only, which 802 * does not contain the checksum for any block starting 803 * at offset 40K or higher of our extent. 804 */ 805 while (!list_empty(&ordered_sums)) { 806 struct btrfs_ordered_sum *sums; 807 sums = list_entry(ordered_sums.next, 808 struct btrfs_ordered_sum, 809 list); 810 if (!ret) 811 ret = btrfs_del_csums(trans, fs_info, 812 sums->bytenr, 813 sums->len); 814 if (!ret) 815 ret = btrfs_csum_file_blocks(trans, 816 fs_info->csum_root, sums); 817 list_del(&sums->list); 818 kfree(sums); 819 } 820 if (ret) 821 goto out; 822 } else { 823 btrfs_release_path(path); 824 } 825 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) { 826 /* inline extents are easy, we just overwrite them */ 827 ret = overwrite_item(trans, root, path, eb, slot, key); 828 if (ret) 829 goto out; 830 } 831 832 inode_add_bytes(inode, nbytes); 833 update_inode: 834 ret = btrfs_update_inode(trans, root, inode); 835 out: 836 if (inode) 837 iput(inode); 838 return ret; 839 } 840 841 /* 842 * when cleaning up conflicts between the directory names in the 843 * subvolume, directory names in the log and directory names in the 844 * inode back references, we may have to unlink inodes from directories. 845 * 846 * This is a helper function to do the unlink of a specific directory 847 * item 848 */ 849 static noinline int drop_one_dir_item(struct btrfs_trans_handle *trans, 850 struct btrfs_root *root, 851 struct btrfs_path *path, 852 struct btrfs_inode *dir, 853 struct btrfs_dir_item *di) 854 { 855 struct inode *inode; 856 char *name; 857 int name_len; 858 struct extent_buffer *leaf; 859 struct btrfs_key location; 860 int ret; 861 862 leaf = path->nodes[0]; 863 864 btrfs_dir_item_key_to_cpu(leaf, di, &location); 865 name_len = btrfs_dir_name_len(leaf, di); 866 name = kmalloc(name_len, GFP_NOFS); 867 if (!name) 868 return -ENOMEM; 869 870 read_extent_buffer(leaf, name, (unsigned long)(di + 1), name_len); 871 btrfs_release_path(path); 872 873 inode = read_one_inode(root, location.objectid); 874 if (!inode) { 875 ret = -EIO; 876 goto out; 877 } 878 879 ret = link_to_fixup_dir(trans, root, path, location.objectid); 880 if (ret) 881 goto out; 882 883 ret = btrfs_unlink_inode(trans, root, dir, BTRFS_I(inode), name, 884 name_len); 885 if (ret) 886 goto out; 887 else 888 ret = btrfs_run_delayed_items(trans); 889 out: 890 kfree(name); 891 iput(inode); 892 return ret; 893 } 894 895 /* 896 * helper function to see if a given name and sequence number found 897 * in an inode back reference are already in a directory and correctly 898 * point to this inode 899 */ 900 static noinline int inode_in_dir(struct btrfs_root *root, 901 struct btrfs_path *path, 902 u64 dirid, u64 objectid, u64 index, 903 const char *name, int name_len) 904 { 905 struct btrfs_dir_item *di; 906 struct btrfs_key location; 907 int match = 0; 908 909 di = btrfs_lookup_dir_index_item(NULL, root, path, dirid, 910 index, name, name_len, 0); 911 if (di && !IS_ERR(di)) { 912 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location); 913 if (location.objectid != objectid) 914 goto out; 915 } else 916 goto out; 917 btrfs_release_path(path); 918 919 di = btrfs_lookup_dir_item(NULL, root, path, dirid, name, name_len, 0); 920 if (di && !IS_ERR(di)) { 921 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location); 922 if (location.objectid != objectid) 923 goto out; 924 } else 925 goto out; 926 match = 1; 927 out: 928 btrfs_release_path(path); 929 return match; 930 } 931 932 /* 933 * helper function to check a log tree for a named back reference in 934 * an inode. This is used to decide if a back reference that is 935 * found in the subvolume conflicts with what we find in the log. 936 * 937 * inode backreferences may have multiple refs in a single item, 938 * during replay we process one reference at a time, and we don't 939 * want to delete valid links to a file from the subvolume if that 940 * link is also in the log. 941 */ 942 static noinline int backref_in_log(struct btrfs_root *log, 943 struct btrfs_key *key, 944 u64 ref_objectid, 945 const char *name, int namelen) 946 { 947 struct btrfs_path *path; 948 struct btrfs_inode_ref *ref; 949 unsigned long ptr; 950 unsigned long ptr_end; 951 unsigned long name_ptr; 952 int found_name_len; 953 int item_size; 954 int ret; 955 int match = 0; 956 957 path = btrfs_alloc_path(); 958 if (!path) 959 return -ENOMEM; 960 961 ret = btrfs_search_slot(NULL, log, key, path, 0, 0); 962 if (ret != 0) 963 goto out; 964 965 ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]); 966 967 if (key->type == BTRFS_INODE_EXTREF_KEY) { 968 if (btrfs_find_name_in_ext_backref(path->nodes[0], 969 path->slots[0], 970 ref_objectid, 971 name, namelen)) 972 match = 1; 973 974 goto out; 975 } 976 977 item_size = btrfs_item_size_nr(path->nodes[0], path->slots[0]); 978 ptr_end = ptr + item_size; 979 while (ptr < ptr_end) { 980 ref = (struct btrfs_inode_ref *)ptr; 981 found_name_len = btrfs_inode_ref_name_len(path->nodes[0], ref); 982 if (found_name_len == namelen) { 983 name_ptr = (unsigned long)(ref + 1); 984 ret = memcmp_extent_buffer(path->nodes[0], name, 985 name_ptr, namelen); 986 if (ret == 0) { 987 match = 1; 988 goto out; 989 } 990 } 991 ptr = (unsigned long)(ref + 1) + found_name_len; 992 } 993 out: 994 btrfs_free_path(path); 995 return match; 996 } 997 998 static inline int __add_inode_ref(struct btrfs_trans_handle *trans, 999 struct btrfs_root *root, 1000 struct btrfs_path *path, 1001 struct btrfs_root *log_root, 1002 struct btrfs_inode *dir, 1003 struct btrfs_inode *inode, 1004 u64 inode_objectid, u64 parent_objectid, 1005 u64 ref_index, char *name, int namelen, 1006 int *search_done) 1007 { 1008 int ret; 1009 char *victim_name; 1010 int victim_name_len; 1011 struct extent_buffer *leaf; 1012 struct btrfs_dir_item *di; 1013 struct btrfs_key search_key; 1014 struct btrfs_inode_extref *extref; 1015 1016 again: 1017 /* Search old style refs */ 1018 search_key.objectid = inode_objectid; 1019 search_key.type = BTRFS_INODE_REF_KEY; 1020 search_key.offset = parent_objectid; 1021 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0); 1022 if (ret == 0) { 1023 struct btrfs_inode_ref *victim_ref; 1024 unsigned long ptr; 1025 unsigned long ptr_end; 1026 1027 leaf = path->nodes[0]; 1028 1029 /* are we trying to overwrite a back ref for the root directory 1030 * if so, just jump out, we're done 1031 */ 1032 if (search_key.objectid == search_key.offset) 1033 return 1; 1034 1035 /* check all the names in this back reference to see 1036 * if they are in the log. if so, we allow them to stay 1037 * otherwise they must be unlinked as a conflict 1038 */ 1039 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); 1040 ptr_end = ptr + btrfs_item_size_nr(leaf, path->slots[0]); 1041 while (ptr < ptr_end) { 1042 victim_ref = (struct btrfs_inode_ref *)ptr; 1043 victim_name_len = btrfs_inode_ref_name_len(leaf, 1044 victim_ref); 1045 victim_name = kmalloc(victim_name_len, GFP_NOFS); 1046 if (!victim_name) 1047 return -ENOMEM; 1048 1049 read_extent_buffer(leaf, victim_name, 1050 (unsigned long)(victim_ref + 1), 1051 victim_name_len); 1052 1053 if (!backref_in_log(log_root, &search_key, 1054 parent_objectid, 1055 victim_name, 1056 victim_name_len)) { 1057 inc_nlink(&inode->vfs_inode); 1058 btrfs_release_path(path); 1059 1060 ret = btrfs_unlink_inode(trans, root, dir, inode, 1061 victim_name, victim_name_len); 1062 kfree(victim_name); 1063 if (ret) 1064 return ret; 1065 ret = btrfs_run_delayed_items(trans); 1066 if (ret) 1067 return ret; 1068 *search_done = 1; 1069 goto again; 1070 } 1071 kfree(victim_name); 1072 1073 ptr = (unsigned long)(victim_ref + 1) + victim_name_len; 1074 } 1075 1076 /* 1077 * NOTE: we have searched root tree and checked the 1078 * corresponding ref, it does not need to check again. 1079 */ 1080 *search_done = 1; 1081 } 1082 btrfs_release_path(path); 1083 1084 /* Same search but for extended refs */ 1085 extref = btrfs_lookup_inode_extref(NULL, root, path, name, namelen, 1086 inode_objectid, parent_objectid, 0, 1087 0); 1088 if (!IS_ERR_OR_NULL(extref)) { 1089 u32 item_size; 1090 u32 cur_offset = 0; 1091 unsigned long base; 1092 struct inode *victim_parent; 1093 1094 leaf = path->nodes[0]; 1095 1096 item_size = btrfs_item_size_nr(leaf, path->slots[0]); 1097 base = btrfs_item_ptr_offset(leaf, path->slots[0]); 1098 1099 while (cur_offset < item_size) { 1100 extref = (struct btrfs_inode_extref *)(base + cur_offset); 1101 1102 victim_name_len = btrfs_inode_extref_name_len(leaf, extref); 1103 1104 if (btrfs_inode_extref_parent(leaf, extref) != parent_objectid) 1105 goto next; 1106 1107 victim_name = kmalloc(victim_name_len, GFP_NOFS); 1108 if (!victim_name) 1109 return -ENOMEM; 1110 read_extent_buffer(leaf, victim_name, (unsigned long)&extref->name, 1111 victim_name_len); 1112 1113 search_key.objectid = inode_objectid; 1114 search_key.type = BTRFS_INODE_EXTREF_KEY; 1115 search_key.offset = btrfs_extref_hash(parent_objectid, 1116 victim_name, 1117 victim_name_len); 1118 ret = 0; 1119 if (!backref_in_log(log_root, &search_key, 1120 parent_objectid, victim_name, 1121 victim_name_len)) { 1122 ret = -ENOENT; 1123 victim_parent = read_one_inode(root, 1124 parent_objectid); 1125 if (victim_parent) { 1126 inc_nlink(&inode->vfs_inode); 1127 btrfs_release_path(path); 1128 1129 ret = btrfs_unlink_inode(trans, root, 1130 BTRFS_I(victim_parent), 1131 inode, 1132 victim_name, 1133 victim_name_len); 1134 if (!ret) 1135 ret = btrfs_run_delayed_items( 1136 trans); 1137 } 1138 iput(victim_parent); 1139 kfree(victim_name); 1140 if (ret) 1141 return ret; 1142 *search_done = 1; 1143 goto again; 1144 } 1145 kfree(victim_name); 1146 next: 1147 cur_offset += victim_name_len + sizeof(*extref); 1148 } 1149 *search_done = 1; 1150 } 1151 btrfs_release_path(path); 1152 1153 /* look for a conflicting sequence number */ 1154 di = btrfs_lookup_dir_index_item(trans, root, path, btrfs_ino(dir), 1155 ref_index, name, namelen, 0); 1156 if (di && !IS_ERR(di)) { 1157 ret = drop_one_dir_item(trans, root, path, dir, di); 1158 if (ret) 1159 return ret; 1160 } 1161 btrfs_release_path(path); 1162 1163 /* look for a conflicting name */ 1164 di = btrfs_lookup_dir_item(trans, root, path, btrfs_ino(dir), 1165 name, namelen, 0); 1166 if (di && !IS_ERR(di)) { 1167 ret = drop_one_dir_item(trans, root, path, dir, di); 1168 if (ret) 1169 return ret; 1170 } 1171 btrfs_release_path(path); 1172 1173 return 0; 1174 } 1175 1176 static int extref_get_fields(struct extent_buffer *eb, unsigned long ref_ptr, 1177 u32 *namelen, char **name, u64 *index, 1178 u64 *parent_objectid) 1179 { 1180 struct btrfs_inode_extref *extref; 1181 1182 extref = (struct btrfs_inode_extref *)ref_ptr; 1183 1184 *namelen = btrfs_inode_extref_name_len(eb, extref); 1185 *name = kmalloc(*namelen, GFP_NOFS); 1186 if (*name == NULL) 1187 return -ENOMEM; 1188 1189 read_extent_buffer(eb, *name, (unsigned long)&extref->name, 1190 *namelen); 1191 1192 if (index) 1193 *index = btrfs_inode_extref_index(eb, extref); 1194 if (parent_objectid) 1195 *parent_objectid = btrfs_inode_extref_parent(eb, extref); 1196 1197 return 0; 1198 } 1199 1200 static int ref_get_fields(struct extent_buffer *eb, unsigned long ref_ptr, 1201 u32 *namelen, char **name, u64 *index) 1202 { 1203 struct btrfs_inode_ref *ref; 1204 1205 ref = (struct btrfs_inode_ref *)ref_ptr; 1206 1207 *namelen = btrfs_inode_ref_name_len(eb, ref); 1208 *name = kmalloc(*namelen, GFP_NOFS); 1209 if (*name == NULL) 1210 return -ENOMEM; 1211 1212 read_extent_buffer(eb, *name, (unsigned long)(ref + 1), *namelen); 1213 1214 if (index) 1215 *index = btrfs_inode_ref_index(eb, ref); 1216 1217 return 0; 1218 } 1219 1220 /* 1221 * Take an inode reference item from the log tree and iterate all names from the 1222 * inode reference item in the subvolume tree with the same key (if it exists). 1223 * For any name that is not in the inode reference item from the log tree, do a 1224 * proper unlink of that name (that is, remove its entry from the inode 1225 * reference item and both dir index keys). 1226 */ 1227 static int unlink_old_inode_refs(struct btrfs_trans_handle *trans, 1228 struct btrfs_root *root, 1229 struct btrfs_path *path, 1230 struct btrfs_inode *inode, 1231 struct extent_buffer *log_eb, 1232 int log_slot, 1233 struct btrfs_key *key) 1234 { 1235 int ret; 1236 unsigned long ref_ptr; 1237 unsigned long ref_end; 1238 struct extent_buffer *eb; 1239 1240 again: 1241 btrfs_release_path(path); 1242 ret = btrfs_search_slot(NULL, root, key, path, 0, 0); 1243 if (ret > 0) { 1244 ret = 0; 1245 goto out; 1246 } 1247 if (ret < 0) 1248 goto out; 1249 1250 eb = path->nodes[0]; 1251 ref_ptr = btrfs_item_ptr_offset(eb, path->slots[0]); 1252 ref_end = ref_ptr + btrfs_item_size_nr(eb, path->slots[0]); 1253 while (ref_ptr < ref_end) { 1254 char *name = NULL; 1255 int namelen; 1256 u64 parent_id; 1257 1258 if (key->type == BTRFS_INODE_EXTREF_KEY) { 1259 ret = extref_get_fields(eb, ref_ptr, &namelen, &name, 1260 NULL, &parent_id); 1261 } else { 1262 parent_id = key->offset; 1263 ret = ref_get_fields(eb, ref_ptr, &namelen, &name, 1264 NULL); 1265 } 1266 if (ret) 1267 goto out; 1268 1269 if (key->type == BTRFS_INODE_EXTREF_KEY) 1270 ret = !!btrfs_find_name_in_ext_backref(log_eb, log_slot, 1271 parent_id, name, 1272 namelen); 1273 else 1274 ret = !!btrfs_find_name_in_backref(log_eb, log_slot, 1275 name, namelen); 1276 1277 if (!ret) { 1278 struct inode *dir; 1279 1280 btrfs_release_path(path); 1281 dir = read_one_inode(root, parent_id); 1282 if (!dir) { 1283 ret = -ENOENT; 1284 kfree(name); 1285 goto out; 1286 } 1287 ret = btrfs_unlink_inode(trans, root, BTRFS_I(dir), 1288 inode, name, namelen); 1289 kfree(name); 1290 iput(dir); 1291 if (ret) 1292 goto out; 1293 goto again; 1294 } 1295 1296 kfree(name); 1297 ref_ptr += namelen; 1298 if (key->type == BTRFS_INODE_EXTREF_KEY) 1299 ref_ptr += sizeof(struct btrfs_inode_extref); 1300 else 1301 ref_ptr += sizeof(struct btrfs_inode_ref); 1302 } 1303 ret = 0; 1304 out: 1305 btrfs_release_path(path); 1306 return ret; 1307 } 1308 1309 static int btrfs_inode_ref_exists(struct inode *inode, struct inode *dir, 1310 const u8 ref_type, const char *name, 1311 const int namelen) 1312 { 1313 struct btrfs_key key; 1314 struct btrfs_path *path; 1315 const u64 parent_id = btrfs_ino(BTRFS_I(dir)); 1316 int ret; 1317 1318 path = btrfs_alloc_path(); 1319 if (!path) 1320 return -ENOMEM; 1321 1322 key.objectid = btrfs_ino(BTRFS_I(inode)); 1323 key.type = ref_type; 1324 if (key.type == BTRFS_INODE_REF_KEY) 1325 key.offset = parent_id; 1326 else 1327 key.offset = btrfs_extref_hash(parent_id, name, namelen); 1328 1329 ret = btrfs_search_slot(NULL, BTRFS_I(inode)->root, &key, path, 0, 0); 1330 if (ret < 0) 1331 goto out; 1332 if (ret > 0) { 1333 ret = 0; 1334 goto out; 1335 } 1336 if (key.type == BTRFS_INODE_EXTREF_KEY) 1337 ret = !!btrfs_find_name_in_ext_backref(path->nodes[0], 1338 path->slots[0], parent_id, name, namelen); 1339 else 1340 ret = !!btrfs_find_name_in_backref(path->nodes[0], path->slots[0], 1341 name, namelen); 1342 1343 out: 1344 btrfs_free_path(path); 1345 return ret; 1346 } 1347 1348 static int add_link(struct btrfs_trans_handle *trans, struct btrfs_root *root, 1349 struct inode *dir, struct inode *inode, const char *name, 1350 int namelen, u64 ref_index) 1351 { 1352 struct btrfs_dir_item *dir_item; 1353 struct btrfs_key key; 1354 struct btrfs_path *path; 1355 struct inode *other_inode = NULL; 1356 int ret; 1357 1358 path = btrfs_alloc_path(); 1359 if (!path) 1360 return -ENOMEM; 1361 1362 dir_item = btrfs_lookup_dir_item(NULL, root, path, 1363 btrfs_ino(BTRFS_I(dir)), 1364 name, namelen, 0); 1365 if (!dir_item) { 1366 btrfs_release_path(path); 1367 goto add_link; 1368 } else if (IS_ERR(dir_item)) { 1369 ret = PTR_ERR(dir_item); 1370 goto out; 1371 } 1372 1373 /* 1374 * Our inode's dentry collides with the dentry of another inode which is 1375 * in the log but not yet processed since it has a higher inode number. 1376 * So delete that other dentry. 1377 */ 1378 btrfs_dir_item_key_to_cpu(path->nodes[0], dir_item, &key); 1379 btrfs_release_path(path); 1380 other_inode = read_one_inode(root, key.objectid); 1381 if (!other_inode) { 1382 ret = -ENOENT; 1383 goto out; 1384 } 1385 ret = btrfs_unlink_inode(trans, root, BTRFS_I(dir), BTRFS_I(other_inode), 1386 name, namelen); 1387 if (ret) 1388 goto out; 1389 /* 1390 * If we dropped the link count to 0, bump it so that later the iput() 1391 * on the inode will not free it. We will fixup the link count later. 1392 */ 1393 if (other_inode->i_nlink == 0) 1394 inc_nlink(other_inode); 1395 1396 ret = btrfs_run_delayed_items(trans); 1397 if (ret) 1398 goto out; 1399 add_link: 1400 ret = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode), 1401 name, namelen, 0, ref_index); 1402 out: 1403 iput(other_inode); 1404 btrfs_free_path(path); 1405 1406 return ret; 1407 } 1408 1409 /* 1410 * replay one inode back reference item found in the log tree. 1411 * eb, slot and key refer to the buffer and key found in the log tree. 1412 * root is the destination we are replaying into, and path is for temp 1413 * use by this function. (it should be released on return). 1414 */ 1415 static noinline int add_inode_ref(struct btrfs_trans_handle *trans, 1416 struct btrfs_root *root, 1417 struct btrfs_root *log, 1418 struct btrfs_path *path, 1419 struct extent_buffer *eb, int slot, 1420 struct btrfs_key *key) 1421 { 1422 struct inode *dir = NULL; 1423 struct inode *inode = NULL; 1424 unsigned long ref_ptr; 1425 unsigned long ref_end; 1426 char *name = NULL; 1427 int namelen; 1428 int ret; 1429 int search_done = 0; 1430 int log_ref_ver = 0; 1431 u64 parent_objectid; 1432 u64 inode_objectid; 1433 u64 ref_index = 0; 1434 int ref_struct_size; 1435 1436 ref_ptr = btrfs_item_ptr_offset(eb, slot); 1437 ref_end = ref_ptr + btrfs_item_size_nr(eb, slot); 1438 1439 if (key->type == BTRFS_INODE_EXTREF_KEY) { 1440 struct btrfs_inode_extref *r; 1441 1442 ref_struct_size = sizeof(struct btrfs_inode_extref); 1443 log_ref_ver = 1; 1444 r = (struct btrfs_inode_extref *)ref_ptr; 1445 parent_objectid = btrfs_inode_extref_parent(eb, r); 1446 } else { 1447 ref_struct_size = sizeof(struct btrfs_inode_ref); 1448 parent_objectid = key->offset; 1449 } 1450 inode_objectid = key->objectid; 1451 1452 /* 1453 * it is possible that we didn't log all the parent directories 1454 * for a given inode. If we don't find the dir, just don't 1455 * copy the back ref in. The link count fixup code will take 1456 * care of the rest 1457 */ 1458 dir = read_one_inode(root, parent_objectid); 1459 if (!dir) { 1460 ret = -ENOENT; 1461 goto out; 1462 } 1463 1464 inode = read_one_inode(root, inode_objectid); 1465 if (!inode) { 1466 ret = -EIO; 1467 goto out; 1468 } 1469 1470 while (ref_ptr < ref_end) { 1471 if (log_ref_ver) { 1472 ret = extref_get_fields(eb, ref_ptr, &namelen, &name, 1473 &ref_index, &parent_objectid); 1474 /* 1475 * parent object can change from one array 1476 * item to another. 1477 */ 1478 if (!dir) 1479 dir = read_one_inode(root, parent_objectid); 1480 if (!dir) { 1481 ret = -ENOENT; 1482 goto out; 1483 } 1484 } else { 1485 ret = ref_get_fields(eb, ref_ptr, &namelen, &name, 1486 &ref_index); 1487 } 1488 if (ret) 1489 goto out; 1490 1491 /* if we already have a perfect match, we're done */ 1492 if (!inode_in_dir(root, path, btrfs_ino(BTRFS_I(dir)), 1493 btrfs_ino(BTRFS_I(inode)), ref_index, 1494 name, namelen)) { 1495 /* 1496 * look for a conflicting back reference in the 1497 * metadata. if we find one we have to unlink that name 1498 * of the file before we add our new link. Later on, we 1499 * overwrite any existing back reference, and we don't 1500 * want to create dangling pointers in the directory. 1501 */ 1502 1503 if (!search_done) { 1504 ret = __add_inode_ref(trans, root, path, log, 1505 BTRFS_I(dir), 1506 BTRFS_I(inode), 1507 inode_objectid, 1508 parent_objectid, 1509 ref_index, name, namelen, 1510 &search_done); 1511 if (ret) { 1512 if (ret == 1) 1513 ret = 0; 1514 goto out; 1515 } 1516 } 1517 1518 /* 1519 * If a reference item already exists for this inode 1520 * with the same parent and name, but different index, 1521 * drop it and the corresponding directory index entries 1522 * from the parent before adding the new reference item 1523 * and dir index entries, otherwise we would fail with 1524 * -EEXIST returned from btrfs_add_link() below. 1525 */ 1526 ret = btrfs_inode_ref_exists(inode, dir, key->type, 1527 name, namelen); 1528 if (ret > 0) { 1529 ret = btrfs_unlink_inode(trans, root, 1530 BTRFS_I(dir), 1531 BTRFS_I(inode), 1532 name, namelen); 1533 /* 1534 * If we dropped the link count to 0, bump it so 1535 * that later the iput() on the inode will not 1536 * free it. We will fixup the link count later. 1537 */ 1538 if (!ret && inode->i_nlink == 0) 1539 inc_nlink(inode); 1540 } 1541 if (ret < 0) 1542 goto out; 1543 1544 /* insert our name */ 1545 ret = add_link(trans, root, dir, inode, name, namelen, 1546 ref_index); 1547 if (ret) 1548 goto out; 1549 1550 btrfs_update_inode(trans, root, inode); 1551 } 1552 1553 ref_ptr = (unsigned long)(ref_ptr + ref_struct_size) + namelen; 1554 kfree(name); 1555 name = NULL; 1556 if (log_ref_ver) { 1557 iput(dir); 1558 dir = NULL; 1559 } 1560 } 1561 1562 /* 1563 * Before we overwrite the inode reference item in the subvolume tree 1564 * with the item from the log tree, we must unlink all names from the 1565 * parent directory that are in the subvolume's tree inode reference 1566 * item, otherwise we end up with an inconsistent subvolume tree where 1567 * dir index entries exist for a name but there is no inode reference 1568 * item with the same name. 1569 */ 1570 ret = unlink_old_inode_refs(trans, root, path, BTRFS_I(inode), eb, slot, 1571 key); 1572 if (ret) 1573 goto out; 1574 1575 /* finally write the back reference in the inode */ 1576 ret = overwrite_item(trans, root, path, eb, slot, key); 1577 out: 1578 btrfs_release_path(path); 1579 kfree(name); 1580 iput(dir); 1581 iput(inode); 1582 return ret; 1583 } 1584 1585 static int insert_orphan_item(struct btrfs_trans_handle *trans, 1586 struct btrfs_root *root, u64 ino) 1587 { 1588 int ret; 1589 1590 ret = btrfs_insert_orphan_item(trans, root, ino); 1591 if (ret == -EEXIST) 1592 ret = 0; 1593 1594 return ret; 1595 } 1596 1597 static int count_inode_extrefs(struct btrfs_root *root, 1598 struct btrfs_inode *inode, struct btrfs_path *path) 1599 { 1600 int ret = 0; 1601 int name_len; 1602 unsigned int nlink = 0; 1603 u32 item_size; 1604 u32 cur_offset = 0; 1605 u64 inode_objectid = btrfs_ino(inode); 1606 u64 offset = 0; 1607 unsigned long ptr; 1608 struct btrfs_inode_extref *extref; 1609 struct extent_buffer *leaf; 1610 1611 while (1) { 1612 ret = btrfs_find_one_extref(root, inode_objectid, offset, path, 1613 &extref, &offset); 1614 if (ret) 1615 break; 1616 1617 leaf = path->nodes[0]; 1618 item_size = btrfs_item_size_nr(leaf, path->slots[0]); 1619 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); 1620 cur_offset = 0; 1621 1622 while (cur_offset < item_size) { 1623 extref = (struct btrfs_inode_extref *) (ptr + cur_offset); 1624 name_len = btrfs_inode_extref_name_len(leaf, extref); 1625 1626 nlink++; 1627 1628 cur_offset += name_len + sizeof(*extref); 1629 } 1630 1631 offset++; 1632 btrfs_release_path(path); 1633 } 1634 btrfs_release_path(path); 1635 1636 if (ret < 0 && ret != -ENOENT) 1637 return ret; 1638 return nlink; 1639 } 1640 1641 static int count_inode_refs(struct btrfs_root *root, 1642 struct btrfs_inode *inode, struct btrfs_path *path) 1643 { 1644 int ret; 1645 struct btrfs_key key; 1646 unsigned int nlink = 0; 1647 unsigned long ptr; 1648 unsigned long ptr_end; 1649 int name_len; 1650 u64 ino = btrfs_ino(inode); 1651 1652 key.objectid = ino; 1653 key.type = BTRFS_INODE_REF_KEY; 1654 key.offset = (u64)-1; 1655 1656 while (1) { 1657 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 1658 if (ret < 0) 1659 break; 1660 if (ret > 0) { 1661 if (path->slots[0] == 0) 1662 break; 1663 path->slots[0]--; 1664 } 1665 process_slot: 1666 btrfs_item_key_to_cpu(path->nodes[0], &key, 1667 path->slots[0]); 1668 if (key.objectid != ino || 1669 key.type != BTRFS_INODE_REF_KEY) 1670 break; 1671 ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]); 1672 ptr_end = ptr + btrfs_item_size_nr(path->nodes[0], 1673 path->slots[0]); 1674 while (ptr < ptr_end) { 1675 struct btrfs_inode_ref *ref; 1676 1677 ref = (struct btrfs_inode_ref *)ptr; 1678 name_len = btrfs_inode_ref_name_len(path->nodes[0], 1679 ref); 1680 ptr = (unsigned long)(ref + 1) + name_len; 1681 nlink++; 1682 } 1683 1684 if (key.offset == 0) 1685 break; 1686 if (path->slots[0] > 0) { 1687 path->slots[0]--; 1688 goto process_slot; 1689 } 1690 key.offset--; 1691 btrfs_release_path(path); 1692 } 1693 btrfs_release_path(path); 1694 1695 return nlink; 1696 } 1697 1698 /* 1699 * There are a few corners where the link count of the file can't 1700 * be properly maintained during replay. So, instead of adding 1701 * lots of complexity to the log code, we just scan the backrefs 1702 * for any file that has been through replay. 1703 * 1704 * The scan will update the link count on the inode to reflect the 1705 * number of back refs found. If it goes down to zero, the iput 1706 * will free the inode. 1707 */ 1708 static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans, 1709 struct btrfs_root *root, 1710 struct inode *inode) 1711 { 1712 struct btrfs_path *path; 1713 int ret; 1714 u64 nlink = 0; 1715 u64 ino = btrfs_ino(BTRFS_I(inode)); 1716 1717 path = btrfs_alloc_path(); 1718 if (!path) 1719 return -ENOMEM; 1720 1721 ret = count_inode_refs(root, BTRFS_I(inode), path); 1722 if (ret < 0) 1723 goto out; 1724 1725 nlink = ret; 1726 1727 ret = count_inode_extrefs(root, BTRFS_I(inode), path); 1728 if (ret < 0) 1729 goto out; 1730 1731 nlink += ret; 1732 1733 ret = 0; 1734 1735 if (nlink != inode->i_nlink) { 1736 set_nlink(inode, nlink); 1737 btrfs_update_inode(trans, root, inode); 1738 } 1739 BTRFS_I(inode)->index_cnt = (u64)-1; 1740 1741 if (inode->i_nlink == 0) { 1742 if (S_ISDIR(inode->i_mode)) { 1743 ret = replay_dir_deletes(trans, root, NULL, path, 1744 ino, 1); 1745 if (ret) 1746 goto out; 1747 } 1748 ret = insert_orphan_item(trans, root, ino); 1749 } 1750 1751 out: 1752 btrfs_free_path(path); 1753 return ret; 1754 } 1755 1756 static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans, 1757 struct btrfs_root *root, 1758 struct btrfs_path *path) 1759 { 1760 int ret; 1761 struct btrfs_key key; 1762 struct inode *inode; 1763 1764 key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID; 1765 key.type = BTRFS_ORPHAN_ITEM_KEY; 1766 key.offset = (u64)-1; 1767 while (1) { 1768 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 1769 if (ret < 0) 1770 break; 1771 1772 if (ret == 1) { 1773 if (path->slots[0] == 0) 1774 break; 1775 path->slots[0]--; 1776 } 1777 1778 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 1779 if (key.objectid != BTRFS_TREE_LOG_FIXUP_OBJECTID || 1780 key.type != BTRFS_ORPHAN_ITEM_KEY) 1781 break; 1782 1783 ret = btrfs_del_item(trans, root, path); 1784 if (ret) 1785 goto out; 1786 1787 btrfs_release_path(path); 1788 inode = read_one_inode(root, key.offset); 1789 if (!inode) 1790 return -EIO; 1791 1792 ret = fixup_inode_link_count(trans, root, inode); 1793 iput(inode); 1794 if (ret) 1795 goto out; 1796 1797 /* 1798 * fixup on a directory may create new entries, 1799 * make sure we always look for the highset possible 1800 * offset 1801 */ 1802 key.offset = (u64)-1; 1803 } 1804 ret = 0; 1805 out: 1806 btrfs_release_path(path); 1807 return ret; 1808 } 1809 1810 1811 /* 1812 * record a given inode in the fixup dir so we can check its link 1813 * count when replay is done. The link count is incremented here 1814 * so the inode won't go away until we check it 1815 */ 1816 static noinline int link_to_fixup_dir(struct btrfs_trans_handle *trans, 1817 struct btrfs_root *root, 1818 struct btrfs_path *path, 1819 u64 objectid) 1820 { 1821 struct btrfs_key key; 1822 int ret = 0; 1823 struct inode *inode; 1824 1825 inode = read_one_inode(root, objectid); 1826 if (!inode) 1827 return -EIO; 1828 1829 key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID; 1830 key.type = BTRFS_ORPHAN_ITEM_KEY; 1831 key.offset = objectid; 1832 1833 ret = btrfs_insert_empty_item(trans, root, path, &key, 0); 1834 1835 btrfs_release_path(path); 1836 if (ret == 0) { 1837 if (!inode->i_nlink) 1838 set_nlink(inode, 1); 1839 else 1840 inc_nlink(inode); 1841 ret = btrfs_update_inode(trans, root, inode); 1842 } else if (ret == -EEXIST) { 1843 ret = 0; 1844 } else { 1845 BUG(); /* Logic Error */ 1846 } 1847 iput(inode); 1848 1849 return ret; 1850 } 1851 1852 /* 1853 * when replaying the log for a directory, we only insert names 1854 * for inodes that actually exist. This means an fsync on a directory 1855 * does not implicitly fsync all the new files in it 1856 */ 1857 static noinline int insert_one_name(struct btrfs_trans_handle *trans, 1858 struct btrfs_root *root, 1859 u64 dirid, u64 index, 1860 char *name, int name_len, 1861 struct btrfs_key *location) 1862 { 1863 struct inode *inode; 1864 struct inode *dir; 1865 int ret; 1866 1867 inode = read_one_inode(root, location->objectid); 1868 if (!inode) 1869 return -ENOENT; 1870 1871 dir = read_one_inode(root, dirid); 1872 if (!dir) { 1873 iput(inode); 1874 return -EIO; 1875 } 1876 1877 ret = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode), name, 1878 name_len, 1, index); 1879 1880 /* FIXME, put inode into FIXUP list */ 1881 1882 iput(inode); 1883 iput(dir); 1884 return ret; 1885 } 1886 1887 /* 1888 * Return true if an inode reference exists in the log for the given name, 1889 * inode and parent inode. 1890 */ 1891 static bool name_in_log_ref(struct btrfs_root *log_root, 1892 const char *name, const int name_len, 1893 const u64 dirid, const u64 ino) 1894 { 1895 struct btrfs_key search_key; 1896 1897 search_key.objectid = ino; 1898 search_key.type = BTRFS_INODE_REF_KEY; 1899 search_key.offset = dirid; 1900 if (backref_in_log(log_root, &search_key, dirid, name, name_len)) 1901 return true; 1902 1903 search_key.type = BTRFS_INODE_EXTREF_KEY; 1904 search_key.offset = btrfs_extref_hash(dirid, name, name_len); 1905 if (backref_in_log(log_root, &search_key, dirid, name, name_len)) 1906 return true; 1907 1908 return false; 1909 } 1910 1911 /* 1912 * take a single entry in a log directory item and replay it into 1913 * the subvolume. 1914 * 1915 * if a conflicting item exists in the subdirectory already, 1916 * the inode it points to is unlinked and put into the link count 1917 * fix up tree. 1918 * 1919 * If a name from the log points to a file or directory that does 1920 * not exist in the FS, it is skipped. fsyncs on directories 1921 * do not force down inodes inside that directory, just changes to the 1922 * names or unlinks in a directory. 1923 * 1924 * Returns < 0 on error, 0 if the name wasn't replayed (dentry points to a 1925 * non-existing inode) and 1 if the name was replayed. 1926 */ 1927 static noinline int replay_one_name(struct btrfs_trans_handle *trans, 1928 struct btrfs_root *root, 1929 struct btrfs_path *path, 1930 struct extent_buffer *eb, 1931 struct btrfs_dir_item *di, 1932 struct btrfs_key *key) 1933 { 1934 char *name; 1935 int name_len; 1936 struct btrfs_dir_item *dst_di; 1937 struct btrfs_key found_key; 1938 struct btrfs_key log_key; 1939 struct inode *dir; 1940 u8 log_type; 1941 int exists; 1942 int ret = 0; 1943 bool update_size = (key->type == BTRFS_DIR_INDEX_KEY); 1944 bool name_added = false; 1945 1946 dir = read_one_inode(root, key->objectid); 1947 if (!dir) 1948 return -EIO; 1949 1950 name_len = btrfs_dir_name_len(eb, di); 1951 name = kmalloc(name_len, GFP_NOFS); 1952 if (!name) { 1953 ret = -ENOMEM; 1954 goto out; 1955 } 1956 1957 log_type = btrfs_dir_type(eb, di); 1958 read_extent_buffer(eb, name, (unsigned long)(di + 1), 1959 name_len); 1960 1961 btrfs_dir_item_key_to_cpu(eb, di, &log_key); 1962 exists = btrfs_lookup_inode(trans, root, path, &log_key, 0); 1963 if (exists == 0) 1964 exists = 1; 1965 else 1966 exists = 0; 1967 btrfs_release_path(path); 1968 1969 if (key->type == BTRFS_DIR_ITEM_KEY) { 1970 dst_di = btrfs_lookup_dir_item(trans, root, path, key->objectid, 1971 name, name_len, 1); 1972 } else if (key->type == BTRFS_DIR_INDEX_KEY) { 1973 dst_di = btrfs_lookup_dir_index_item(trans, root, path, 1974 key->objectid, 1975 key->offset, name, 1976 name_len, 1); 1977 } else { 1978 /* Corruption */ 1979 ret = -EINVAL; 1980 goto out; 1981 } 1982 if (IS_ERR_OR_NULL(dst_di)) { 1983 /* we need a sequence number to insert, so we only 1984 * do inserts for the BTRFS_DIR_INDEX_KEY types 1985 */ 1986 if (key->type != BTRFS_DIR_INDEX_KEY) 1987 goto out; 1988 goto insert; 1989 } 1990 1991 btrfs_dir_item_key_to_cpu(path->nodes[0], dst_di, &found_key); 1992 /* the existing item matches the logged item */ 1993 if (found_key.objectid == log_key.objectid && 1994 found_key.type == log_key.type && 1995 found_key.offset == log_key.offset && 1996 btrfs_dir_type(path->nodes[0], dst_di) == log_type) { 1997 update_size = false; 1998 goto out; 1999 } 2000 2001 /* 2002 * don't drop the conflicting directory entry if the inode 2003 * for the new entry doesn't exist 2004 */ 2005 if (!exists) 2006 goto out; 2007 2008 ret = drop_one_dir_item(trans, root, path, BTRFS_I(dir), dst_di); 2009 if (ret) 2010 goto out; 2011 2012 if (key->type == BTRFS_DIR_INDEX_KEY) 2013 goto insert; 2014 out: 2015 btrfs_release_path(path); 2016 if (!ret && update_size) { 2017 btrfs_i_size_write(BTRFS_I(dir), dir->i_size + name_len * 2); 2018 ret = btrfs_update_inode(trans, root, dir); 2019 } 2020 kfree(name); 2021 iput(dir); 2022 if (!ret && name_added) 2023 ret = 1; 2024 return ret; 2025 2026 insert: 2027 if (name_in_log_ref(root->log_root, name, name_len, 2028 key->objectid, log_key.objectid)) { 2029 /* The dentry will be added later. */ 2030 ret = 0; 2031 update_size = false; 2032 goto out; 2033 } 2034 btrfs_release_path(path); 2035 ret = insert_one_name(trans, root, key->objectid, key->offset, 2036 name, name_len, &log_key); 2037 if (ret && ret != -ENOENT && ret != -EEXIST) 2038 goto out; 2039 if (!ret) 2040 name_added = true; 2041 update_size = false; 2042 ret = 0; 2043 goto out; 2044 } 2045 2046 /* 2047 * find all the names in a directory item and reconcile them into 2048 * the subvolume. Only BTRFS_DIR_ITEM_KEY types will have more than 2049 * one name in a directory item, but the same code gets used for 2050 * both directory index types 2051 */ 2052 static noinline int replay_one_dir_item(struct btrfs_trans_handle *trans, 2053 struct btrfs_root *root, 2054 struct btrfs_path *path, 2055 struct extent_buffer *eb, int slot, 2056 struct btrfs_key *key) 2057 { 2058 int ret = 0; 2059 u32 item_size = btrfs_item_size_nr(eb, slot); 2060 struct btrfs_dir_item *di; 2061 int name_len; 2062 unsigned long ptr; 2063 unsigned long ptr_end; 2064 struct btrfs_path *fixup_path = NULL; 2065 2066 ptr = btrfs_item_ptr_offset(eb, slot); 2067 ptr_end = ptr + item_size; 2068 while (ptr < ptr_end) { 2069 di = (struct btrfs_dir_item *)ptr; 2070 name_len = btrfs_dir_name_len(eb, di); 2071 ret = replay_one_name(trans, root, path, eb, di, key); 2072 if (ret < 0) 2073 break; 2074 ptr = (unsigned long)(di + 1); 2075 ptr += name_len; 2076 2077 /* 2078 * If this entry refers to a non-directory (directories can not 2079 * have a link count > 1) and it was added in the transaction 2080 * that was not committed, make sure we fixup the link count of 2081 * the inode it the entry points to. Otherwise something like 2082 * the following would result in a directory pointing to an 2083 * inode with a wrong link that does not account for this dir 2084 * entry: 2085 * 2086 * mkdir testdir 2087 * touch testdir/foo 2088 * touch testdir/bar 2089 * sync 2090 * 2091 * ln testdir/bar testdir/bar_link 2092 * ln testdir/foo testdir/foo_link 2093 * xfs_io -c "fsync" testdir/bar 2094 * 2095 * <power failure> 2096 * 2097 * mount fs, log replay happens 2098 * 2099 * File foo would remain with a link count of 1 when it has two 2100 * entries pointing to it in the directory testdir. This would 2101 * make it impossible to ever delete the parent directory has 2102 * it would result in stale dentries that can never be deleted. 2103 */ 2104 if (ret == 1 && btrfs_dir_type(eb, di) != BTRFS_FT_DIR) { 2105 struct btrfs_key di_key; 2106 2107 if (!fixup_path) { 2108 fixup_path = btrfs_alloc_path(); 2109 if (!fixup_path) { 2110 ret = -ENOMEM; 2111 break; 2112 } 2113 } 2114 2115 btrfs_dir_item_key_to_cpu(eb, di, &di_key); 2116 ret = link_to_fixup_dir(trans, root, fixup_path, 2117 di_key.objectid); 2118 if (ret) 2119 break; 2120 } 2121 ret = 0; 2122 } 2123 btrfs_free_path(fixup_path); 2124 return ret; 2125 } 2126 2127 /* 2128 * directory replay has two parts. There are the standard directory 2129 * items in the log copied from the subvolume, and range items 2130 * created in the log while the subvolume was logged. 2131 * 2132 * The range items tell us which parts of the key space the log 2133 * is authoritative for. During replay, if a key in the subvolume 2134 * directory is in a logged range item, but not actually in the log 2135 * that means it was deleted from the directory before the fsync 2136 * and should be removed. 2137 */ 2138 static noinline int find_dir_range(struct btrfs_root *root, 2139 struct btrfs_path *path, 2140 u64 dirid, int key_type, 2141 u64 *start_ret, u64 *end_ret) 2142 { 2143 struct btrfs_key key; 2144 u64 found_end; 2145 struct btrfs_dir_log_item *item; 2146 int ret; 2147 int nritems; 2148 2149 if (*start_ret == (u64)-1) 2150 return 1; 2151 2152 key.objectid = dirid; 2153 key.type = key_type; 2154 key.offset = *start_ret; 2155 2156 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 2157 if (ret < 0) 2158 goto out; 2159 if (ret > 0) { 2160 if (path->slots[0] == 0) 2161 goto out; 2162 path->slots[0]--; 2163 } 2164 if (ret != 0) 2165 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 2166 2167 if (key.type != key_type || key.objectid != dirid) { 2168 ret = 1; 2169 goto next; 2170 } 2171 item = btrfs_item_ptr(path->nodes[0], path->slots[0], 2172 struct btrfs_dir_log_item); 2173 found_end = btrfs_dir_log_end(path->nodes[0], item); 2174 2175 if (*start_ret >= key.offset && *start_ret <= found_end) { 2176 ret = 0; 2177 *start_ret = key.offset; 2178 *end_ret = found_end; 2179 goto out; 2180 } 2181 ret = 1; 2182 next: 2183 /* check the next slot in the tree to see if it is a valid item */ 2184 nritems = btrfs_header_nritems(path->nodes[0]); 2185 path->slots[0]++; 2186 if (path->slots[0] >= nritems) { 2187 ret = btrfs_next_leaf(root, path); 2188 if (ret) 2189 goto out; 2190 } 2191 2192 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 2193 2194 if (key.type != key_type || key.objectid != dirid) { 2195 ret = 1; 2196 goto out; 2197 } 2198 item = btrfs_item_ptr(path->nodes[0], path->slots[0], 2199 struct btrfs_dir_log_item); 2200 found_end = btrfs_dir_log_end(path->nodes[0], item); 2201 *start_ret = key.offset; 2202 *end_ret = found_end; 2203 ret = 0; 2204 out: 2205 btrfs_release_path(path); 2206 return ret; 2207 } 2208 2209 /* 2210 * this looks for a given directory item in the log. If the directory 2211 * item is not in the log, the item is removed and the inode it points 2212 * to is unlinked 2213 */ 2214 static noinline int check_item_in_log(struct btrfs_trans_handle *trans, 2215 struct btrfs_root *root, 2216 struct btrfs_root *log, 2217 struct btrfs_path *path, 2218 struct btrfs_path *log_path, 2219 struct inode *dir, 2220 struct btrfs_key *dir_key) 2221 { 2222 int ret; 2223 struct extent_buffer *eb; 2224 int slot; 2225 u32 item_size; 2226 struct btrfs_dir_item *di; 2227 struct btrfs_dir_item *log_di; 2228 int name_len; 2229 unsigned long ptr; 2230 unsigned long ptr_end; 2231 char *name; 2232 struct inode *inode; 2233 struct btrfs_key location; 2234 2235 again: 2236 eb = path->nodes[0]; 2237 slot = path->slots[0]; 2238 item_size = btrfs_item_size_nr(eb, slot); 2239 ptr = btrfs_item_ptr_offset(eb, slot); 2240 ptr_end = ptr + item_size; 2241 while (ptr < ptr_end) { 2242 di = (struct btrfs_dir_item *)ptr; 2243 name_len = btrfs_dir_name_len(eb, di); 2244 name = kmalloc(name_len, GFP_NOFS); 2245 if (!name) { 2246 ret = -ENOMEM; 2247 goto out; 2248 } 2249 read_extent_buffer(eb, name, (unsigned long)(di + 1), 2250 name_len); 2251 log_di = NULL; 2252 if (log && dir_key->type == BTRFS_DIR_ITEM_KEY) { 2253 log_di = btrfs_lookup_dir_item(trans, log, log_path, 2254 dir_key->objectid, 2255 name, name_len, 0); 2256 } else if (log && dir_key->type == BTRFS_DIR_INDEX_KEY) { 2257 log_di = btrfs_lookup_dir_index_item(trans, log, 2258 log_path, 2259 dir_key->objectid, 2260 dir_key->offset, 2261 name, name_len, 0); 2262 } 2263 if (!log_di || log_di == ERR_PTR(-ENOENT)) { 2264 btrfs_dir_item_key_to_cpu(eb, di, &location); 2265 btrfs_release_path(path); 2266 btrfs_release_path(log_path); 2267 inode = read_one_inode(root, location.objectid); 2268 if (!inode) { 2269 kfree(name); 2270 return -EIO; 2271 } 2272 2273 ret = link_to_fixup_dir(trans, root, 2274 path, location.objectid); 2275 if (ret) { 2276 kfree(name); 2277 iput(inode); 2278 goto out; 2279 } 2280 2281 inc_nlink(inode); 2282 ret = btrfs_unlink_inode(trans, root, BTRFS_I(dir), 2283 BTRFS_I(inode), name, name_len); 2284 if (!ret) 2285 ret = btrfs_run_delayed_items(trans); 2286 kfree(name); 2287 iput(inode); 2288 if (ret) 2289 goto out; 2290 2291 /* there might still be more names under this key 2292 * check and repeat if required 2293 */ 2294 ret = btrfs_search_slot(NULL, root, dir_key, path, 2295 0, 0); 2296 if (ret == 0) 2297 goto again; 2298 ret = 0; 2299 goto out; 2300 } else if (IS_ERR(log_di)) { 2301 kfree(name); 2302 return PTR_ERR(log_di); 2303 } 2304 btrfs_release_path(log_path); 2305 kfree(name); 2306 2307 ptr = (unsigned long)(di + 1); 2308 ptr += name_len; 2309 } 2310 ret = 0; 2311 out: 2312 btrfs_release_path(path); 2313 btrfs_release_path(log_path); 2314 return ret; 2315 } 2316 2317 static int replay_xattr_deletes(struct btrfs_trans_handle *trans, 2318 struct btrfs_root *root, 2319 struct btrfs_root *log, 2320 struct btrfs_path *path, 2321 const u64 ino) 2322 { 2323 struct btrfs_key search_key; 2324 struct btrfs_path *log_path; 2325 int i; 2326 int nritems; 2327 int ret; 2328 2329 log_path = btrfs_alloc_path(); 2330 if (!log_path) 2331 return -ENOMEM; 2332 2333 search_key.objectid = ino; 2334 search_key.type = BTRFS_XATTR_ITEM_KEY; 2335 search_key.offset = 0; 2336 again: 2337 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0); 2338 if (ret < 0) 2339 goto out; 2340 process_leaf: 2341 nritems = btrfs_header_nritems(path->nodes[0]); 2342 for (i = path->slots[0]; i < nritems; i++) { 2343 struct btrfs_key key; 2344 struct btrfs_dir_item *di; 2345 struct btrfs_dir_item *log_di; 2346 u32 total_size; 2347 u32 cur; 2348 2349 btrfs_item_key_to_cpu(path->nodes[0], &key, i); 2350 if (key.objectid != ino || key.type != BTRFS_XATTR_ITEM_KEY) { 2351 ret = 0; 2352 goto out; 2353 } 2354 2355 di = btrfs_item_ptr(path->nodes[0], i, struct btrfs_dir_item); 2356 total_size = btrfs_item_size_nr(path->nodes[0], i); 2357 cur = 0; 2358 while (cur < total_size) { 2359 u16 name_len = btrfs_dir_name_len(path->nodes[0], di); 2360 u16 data_len = btrfs_dir_data_len(path->nodes[0], di); 2361 u32 this_len = sizeof(*di) + name_len + data_len; 2362 char *name; 2363 2364 name = kmalloc(name_len, GFP_NOFS); 2365 if (!name) { 2366 ret = -ENOMEM; 2367 goto out; 2368 } 2369 read_extent_buffer(path->nodes[0], name, 2370 (unsigned long)(di + 1), name_len); 2371 2372 log_di = btrfs_lookup_xattr(NULL, log, log_path, ino, 2373 name, name_len, 0); 2374 btrfs_release_path(log_path); 2375 if (!log_di) { 2376 /* Doesn't exist in log tree, so delete it. */ 2377 btrfs_release_path(path); 2378 di = btrfs_lookup_xattr(trans, root, path, ino, 2379 name, name_len, -1); 2380 kfree(name); 2381 if (IS_ERR(di)) { 2382 ret = PTR_ERR(di); 2383 goto out; 2384 } 2385 ASSERT(di); 2386 ret = btrfs_delete_one_dir_name(trans, root, 2387 path, di); 2388 if (ret) 2389 goto out; 2390 btrfs_release_path(path); 2391 search_key = key; 2392 goto again; 2393 } 2394 kfree(name); 2395 if (IS_ERR(log_di)) { 2396 ret = PTR_ERR(log_di); 2397 goto out; 2398 } 2399 cur += this_len; 2400 di = (struct btrfs_dir_item *)((char *)di + this_len); 2401 } 2402 } 2403 ret = btrfs_next_leaf(root, path); 2404 if (ret > 0) 2405 ret = 0; 2406 else if (ret == 0) 2407 goto process_leaf; 2408 out: 2409 btrfs_free_path(log_path); 2410 btrfs_release_path(path); 2411 return ret; 2412 } 2413 2414 2415 /* 2416 * deletion replay happens before we copy any new directory items 2417 * out of the log or out of backreferences from inodes. It 2418 * scans the log to find ranges of keys that log is authoritative for, 2419 * and then scans the directory to find items in those ranges that are 2420 * not present in the log. 2421 * 2422 * Anything we don't find in the log is unlinked and removed from the 2423 * directory. 2424 */ 2425 static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans, 2426 struct btrfs_root *root, 2427 struct btrfs_root *log, 2428 struct btrfs_path *path, 2429 u64 dirid, int del_all) 2430 { 2431 u64 range_start; 2432 u64 range_end; 2433 int key_type = BTRFS_DIR_LOG_ITEM_KEY; 2434 int ret = 0; 2435 struct btrfs_key dir_key; 2436 struct btrfs_key found_key; 2437 struct btrfs_path *log_path; 2438 struct inode *dir; 2439 2440 dir_key.objectid = dirid; 2441 dir_key.type = BTRFS_DIR_ITEM_KEY; 2442 log_path = btrfs_alloc_path(); 2443 if (!log_path) 2444 return -ENOMEM; 2445 2446 dir = read_one_inode(root, dirid); 2447 /* it isn't an error if the inode isn't there, that can happen 2448 * because we replay the deletes before we copy in the inode item 2449 * from the log 2450 */ 2451 if (!dir) { 2452 btrfs_free_path(log_path); 2453 return 0; 2454 } 2455 again: 2456 range_start = 0; 2457 range_end = 0; 2458 while (1) { 2459 if (del_all) 2460 range_end = (u64)-1; 2461 else { 2462 ret = find_dir_range(log, path, dirid, key_type, 2463 &range_start, &range_end); 2464 if (ret != 0) 2465 break; 2466 } 2467 2468 dir_key.offset = range_start; 2469 while (1) { 2470 int nritems; 2471 ret = btrfs_search_slot(NULL, root, &dir_key, path, 2472 0, 0); 2473 if (ret < 0) 2474 goto out; 2475 2476 nritems = btrfs_header_nritems(path->nodes[0]); 2477 if (path->slots[0] >= nritems) { 2478 ret = btrfs_next_leaf(root, path); 2479 if (ret == 1) 2480 break; 2481 else if (ret < 0) 2482 goto out; 2483 } 2484 btrfs_item_key_to_cpu(path->nodes[0], &found_key, 2485 path->slots[0]); 2486 if (found_key.objectid != dirid || 2487 found_key.type != dir_key.type) 2488 goto next_type; 2489 2490 if (found_key.offset > range_end) 2491 break; 2492 2493 ret = check_item_in_log(trans, root, log, path, 2494 log_path, dir, 2495 &found_key); 2496 if (ret) 2497 goto out; 2498 if (found_key.offset == (u64)-1) 2499 break; 2500 dir_key.offset = found_key.offset + 1; 2501 } 2502 btrfs_release_path(path); 2503 if (range_end == (u64)-1) 2504 break; 2505 range_start = range_end + 1; 2506 } 2507 2508 next_type: 2509 ret = 0; 2510 if (key_type == BTRFS_DIR_LOG_ITEM_KEY) { 2511 key_type = BTRFS_DIR_LOG_INDEX_KEY; 2512 dir_key.type = BTRFS_DIR_INDEX_KEY; 2513 btrfs_release_path(path); 2514 goto again; 2515 } 2516 out: 2517 btrfs_release_path(path); 2518 btrfs_free_path(log_path); 2519 iput(dir); 2520 return ret; 2521 } 2522 2523 /* 2524 * the process_func used to replay items from the log tree. This 2525 * gets called in two different stages. The first stage just looks 2526 * for inodes and makes sure they are all copied into the subvolume. 2527 * 2528 * The second stage copies all the other item types from the log into 2529 * the subvolume. The two stage approach is slower, but gets rid of 2530 * lots of complexity around inodes referencing other inodes that exist 2531 * only in the log (references come from either directory items or inode 2532 * back refs). 2533 */ 2534 static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb, 2535 struct walk_control *wc, u64 gen, int level) 2536 { 2537 int nritems; 2538 struct btrfs_path *path; 2539 struct btrfs_root *root = wc->replay_dest; 2540 struct btrfs_key key; 2541 int i; 2542 int ret; 2543 2544 ret = btrfs_read_buffer(eb, gen, level, NULL); 2545 if (ret) 2546 return ret; 2547 2548 level = btrfs_header_level(eb); 2549 2550 if (level != 0) 2551 return 0; 2552 2553 path = btrfs_alloc_path(); 2554 if (!path) 2555 return -ENOMEM; 2556 2557 nritems = btrfs_header_nritems(eb); 2558 for (i = 0; i < nritems; i++) { 2559 btrfs_item_key_to_cpu(eb, &key, i); 2560 2561 /* inode keys are done during the first stage */ 2562 if (key.type == BTRFS_INODE_ITEM_KEY && 2563 wc->stage == LOG_WALK_REPLAY_INODES) { 2564 struct btrfs_inode_item *inode_item; 2565 u32 mode; 2566 2567 inode_item = btrfs_item_ptr(eb, i, 2568 struct btrfs_inode_item); 2569 /* 2570 * If we have a tmpfile (O_TMPFILE) that got fsync'ed 2571 * and never got linked before the fsync, skip it, as 2572 * replaying it is pointless since it would be deleted 2573 * later. We skip logging tmpfiles, but it's always 2574 * possible we are replaying a log created with a kernel 2575 * that used to log tmpfiles. 2576 */ 2577 if (btrfs_inode_nlink(eb, inode_item) == 0) { 2578 wc->ignore_cur_inode = true; 2579 continue; 2580 } else { 2581 wc->ignore_cur_inode = false; 2582 } 2583 ret = replay_xattr_deletes(wc->trans, root, log, 2584 path, key.objectid); 2585 if (ret) 2586 break; 2587 mode = btrfs_inode_mode(eb, inode_item); 2588 if (S_ISDIR(mode)) { 2589 ret = replay_dir_deletes(wc->trans, 2590 root, log, path, key.objectid, 0); 2591 if (ret) 2592 break; 2593 } 2594 ret = overwrite_item(wc->trans, root, path, 2595 eb, i, &key); 2596 if (ret) 2597 break; 2598 2599 /* 2600 * Before replaying extents, truncate the inode to its 2601 * size. We need to do it now and not after log replay 2602 * because before an fsync we can have prealloc extents 2603 * added beyond the inode's i_size. If we did it after, 2604 * through orphan cleanup for example, we would drop 2605 * those prealloc extents just after replaying them. 2606 */ 2607 if (S_ISREG(mode)) { 2608 struct inode *inode; 2609 u64 from; 2610 2611 inode = read_one_inode(root, key.objectid); 2612 if (!inode) { 2613 ret = -EIO; 2614 break; 2615 } 2616 from = ALIGN(i_size_read(inode), 2617 root->fs_info->sectorsize); 2618 ret = btrfs_drop_extents(wc->trans, root, inode, 2619 from, (u64)-1, 1); 2620 if (!ret) { 2621 /* Update the inode's nbytes. */ 2622 ret = btrfs_update_inode(wc->trans, 2623 root, inode); 2624 } 2625 iput(inode); 2626 if (ret) 2627 break; 2628 } 2629 2630 ret = link_to_fixup_dir(wc->trans, root, 2631 path, key.objectid); 2632 if (ret) 2633 break; 2634 } 2635 2636 if (wc->ignore_cur_inode) 2637 continue; 2638 2639 if (key.type == BTRFS_DIR_INDEX_KEY && 2640 wc->stage == LOG_WALK_REPLAY_DIR_INDEX) { 2641 ret = replay_one_dir_item(wc->trans, root, path, 2642 eb, i, &key); 2643 if (ret) 2644 break; 2645 } 2646 2647 if (wc->stage < LOG_WALK_REPLAY_ALL) 2648 continue; 2649 2650 /* these keys are simply copied */ 2651 if (key.type == BTRFS_XATTR_ITEM_KEY) { 2652 ret = overwrite_item(wc->trans, root, path, 2653 eb, i, &key); 2654 if (ret) 2655 break; 2656 } else if (key.type == BTRFS_INODE_REF_KEY || 2657 key.type == BTRFS_INODE_EXTREF_KEY) { 2658 ret = add_inode_ref(wc->trans, root, log, path, 2659 eb, i, &key); 2660 if (ret && ret != -ENOENT) 2661 break; 2662 ret = 0; 2663 } else if (key.type == BTRFS_EXTENT_DATA_KEY) { 2664 ret = replay_one_extent(wc->trans, root, path, 2665 eb, i, &key); 2666 if (ret) 2667 break; 2668 } else if (key.type == BTRFS_DIR_ITEM_KEY) { 2669 ret = replay_one_dir_item(wc->trans, root, path, 2670 eb, i, &key); 2671 if (ret) 2672 break; 2673 } 2674 } 2675 btrfs_free_path(path); 2676 return ret; 2677 } 2678 2679 static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans, 2680 struct btrfs_root *root, 2681 struct btrfs_path *path, int *level, 2682 struct walk_control *wc) 2683 { 2684 struct btrfs_fs_info *fs_info = root->fs_info; 2685 u64 root_owner; 2686 u64 bytenr; 2687 u64 ptr_gen; 2688 struct extent_buffer *next; 2689 struct extent_buffer *cur; 2690 struct extent_buffer *parent; 2691 u32 blocksize; 2692 int ret = 0; 2693 2694 WARN_ON(*level < 0); 2695 WARN_ON(*level >= BTRFS_MAX_LEVEL); 2696 2697 while (*level > 0) { 2698 struct btrfs_key first_key; 2699 2700 WARN_ON(*level < 0); 2701 WARN_ON(*level >= BTRFS_MAX_LEVEL); 2702 cur = path->nodes[*level]; 2703 2704 WARN_ON(btrfs_header_level(cur) != *level); 2705 2706 if (path->slots[*level] >= 2707 btrfs_header_nritems(cur)) 2708 break; 2709 2710 bytenr = btrfs_node_blockptr(cur, path->slots[*level]); 2711 ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]); 2712 btrfs_node_key_to_cpu(cur, &first_key, path->slots[*level]); 2713 blocksize = fs_info->nodesize; 2714 2715 parent = path->nodes[*level]; 2716 root_owner = btrfs_header_owner(parent); 2717 2718 next = btrfs_find_create_tree_block(fs_info, bytenr); 2719 if (IS_ERR(next)) 2720 return PTR_ERR(next); 2721 2722 if (*level == 1) { 2723 ret = wc->process_func(root, next, wc, ptr_gen, 2724 *level - 1); 2725 if (ret) { 2726 free_extent_buffer(next); 2727 return ret; 2728 } 2729 2730 path->slots[*level]++; 2731 if (wc->free) { 2732 ret = btrfs_read_buffer(next, ptr_gen, 2733 *level - 1, &first_key); 2734 if (ret) { 2735 free_extent_buffer(next); 2736 return ret; 2737 } 2738 2739 if (trans) { 2740 btrfs_tree_lock(next); 2741 btrfs_set_lock_blocking_write(next); 2742 btrfs_clean_tree_block(next); 2743 btrfs_wait_tree_block_writeback(next); 2744 btrfs_tree_unlock(next); 2745 } else { 2746 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags)) 2747 clear_extent_buffer_dirty(next); 2748 } 2749 2750 WARN_ON(root_owner != 2751 BTRFS_TREE_LOG_OBJECTID); 2752 ret = btrfs_free_and_pin_reserved_extent( 2753 fs_info, bytenr, 2754 blocksize); 2755 if (ret) { 2756 free_extent_buffer(next); 2757 return ret; 2758 } 2759 } 2760 free_extent_buffer(next); 2761 continue; 2762 } 2763 ret = btrfs_read_buffer(next, ptr_gen, *level - 1, &first_key); 2764 if (ret) { 2765 free_extent_buffer(next); 2766 return ret; 2767 } 2768 2769 WARN_ON(*level <= 0); 2770 if (path->nodes[*level-1]) 2771 free_extent_buffer(path->nodes[*level-1]); 2772 path->nodes[*level-1] = next; 2773 *level = btrfs_header_level(next); 2774 path->slots[*level] = 0; 2775 cond_resched(); 2776 } 2777 WARN_ON(*level < 0); 2778 WARN_ON(*level >= BTRFS_MAX_LEVEL); 2779 2780 path->slots[*level] = btrfs_header_nritems(path->nodes[*level]); 2781 2782 cond_resched(); 2783 return 0; 2784 } 2785 2786 static noinline int walk_up_log_tree(struct btrfs_trans_handle *trans, 2787 struct btrfs_root *root, 2788 struct btrfs_path *path, int *level, 2789 struct walk_control *wc) 2790 { 2791 struct btrfs_fs_info *fs_info = root->fs_info; 2792 u64 root_owner; 2793 int i; 2794 int slot; 2795 int ret; 2796 2797 for (i = *level; i < BTRFS_MAX_LEVEL - 1 && path->nodes[i]; i++) { 2798 slot = path->slots[i]; 2799 if (slot + 1 < btrfs_header_nritems(path->nodes[i])) { 2800 path->slots[i]++; 2801 *level = i; 2802 WARN_ON(*level == 0); 2803 return 0; 2804 } else { 2805 struct extent_buffer *parent; 2806 if (path->nodes[*level] == root->node) 2807 parent = path->nodes[*level]; 2808 else 2809 parent = path->nodes[*level + 1]; 2810 2811 root_owner = btrfs_header_owner(parent); 2812 ret = wc->process_func(root, path->nodes[*level], wc, 2813 btrfs_header_generation(path->nodes[*level]), 2814 *level); 2815 if (ret) 2816 return ret; 2817 2818 if (wc->free) { 2819 struct extent_buffer *next; 2820 2821 next = path->nodes[*level]; 2822 2823 if (trans) { 2824 btrfs_tree_lock(next); 2825 btrfs_set_lock_blocking_write(next); 2826 btrfs_clean_tree_block(next); 2827 btrfs_wait_tree_block_writeback(next); 2828 btrfs_tree_unlock(next); 2829 } else { 2830 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags)) 2831 clear_extent_buffer_dirty(next); 2832 } 2833 2834 WARN_ON(root_owner != BTRFS_TREE_LOG_OBJECTID); 2835 ret = btrfs_free_and_pin_reserved_extent( 2836 fs_info, 2837 path->nodes[*level]->start, 2838 path->nodes[*level]->len); 2839 if (ret) 2840 return ret; 2841 } 2842 free_extent_buffer(path->nodes[*level]); 2843 path->nodes[*level] = NULL; 2844 *level = i + 1; 2845 } 2846 } 2847 return 1; 2848 } 2849 2850 /* 2851 * drop the reference count on the tree rooted at 'snap'. This traverses 2852 * the tree freeing any blocks that have a ref count of zero after being 2853 * decremented. 2854 */ 2855 static int walk_log_tree(struct btrfs_trans_handle *trans, 2856 struct btrfs_root *log, struct walk_control *wc) 2857 { 2858 struct btrfs_fs_info *fs_info = log->fs_info; 2859 int ret = 0; 2860 int wret; 2861 int level; 2862 struct btrfs_path *path; 2863 int orig_level; 2864 2865 path = btrfs_alloc_path(); 2866 if (!path) 2867 return -ENOMEM; 2868 2869 level = btrfs_header_level(log->node); 2870 orig_level = level; 2871 path->nodes[level] = log->node; 2872 extent_buffer_get(log->node); 2873 path->slots[level] = 0; 2874 2875 while (1) { 2876 wret = walk_down_log_tree(trans, log, path, &level, wc); 2877 if (wret > 0) 2878 break; 2879 if (wret < 0) { 2880 ret = wret; 2881 goto out; 2882 } 2883 2884 wret = walk_up_log_tree(trans, log, path, &level, wc); 2885 if (wret > 0) 2886 break; 2887 if (wret < 0) { 2888 ret = wret; 2889 goto out; 2890 } 2891 } 2892 2893 /* was the root node processed? if not, catch it here */ 2894 if (path->nodes[orig_level]) { 2895 ret = wc->process_func(log, path->nodes[orig_level], wc, 2896 btrfs_header_generation(path->nodes[orig_level]), 2897 orig_level); 2898 if (ret) 2899 goto out; 2900 if (wc->free) { 2901 struct extent_buffer *next; 2902 2903 next = path->nodes[orig_level]; 2904 2905 if (trans) { 2906 btrfs_tree_lock(next); 2907 btrfs_set_lock_blocking_write(next); 2908 btrfs_clean_tree_block(next); 2909 btrfs_wait_tree_block_writeback(next); 2910 btrfs_tree_unlock(next); 2911 } else { 2912 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags)) 2913 clear_extent_buffer_dirty(next); 2914 } 2915 2916 WARN_ON(log->root_key.objectid != 2917 BTRFS_TREE_LOG_OBJECTID); 2918 ret = btrfs_free_and_pin_reserved_extent(fs_info, 2919 next->start, next->len); 2920 if (ret) 2921 goto out; 2922 } 2923 } 2924 2925 out: 2926 btrfs_free_path(path); 2927 return ret; 2928 } 2929 2930 /* 2931 * helper function to update the item for a given subvolumes log root 2932 * in the tree of log roots 2933 */ 2934 static int update_log_root(struct btrfs_trans_handle *trans, 2935 struct btrfs_root *log, 2936 struct btrfs_root_item *root_item) 2937 { 2938 struct btrfs_fs_info *fs_info = log->fs_info; 2939 int ret; 2940 2941 if (log->log_transid == 1) { 2942 /* insert root item on the first sync */ 2943 ret = btrfs_insert_root(trans, fs_info->log_root_tree, 2944 &log->root_key, root_item); 2945 } else { 2946 ret = btrfs_update_root(trans, fs_info->log_root_tree, 2947 &log->root_key, root_item); 2948 } 2949 return ret; 2950 } 2951 2952 static void wait_log_commit(struct btrfs_root *root, int transid) 2953 { 2954 DEFINE_WAIT(wait); 2955 int index = transid % 2; 2956 2957 /* 2958 * we only allow two pending log transactions at a time, 2959 * so we know that if ours is more than 2 older than the 2960 * current transaction, we're done 2961 */ 2962 for (;;) { 2963 prepare_to_wait(&root->log_commit_wait[index], 2964 &wait, TASK_UNINTERRUPTIBLE); 2965 2966 if (!(root->log_transid_committed < transid && 2967 atomic_read(&root->log_commit[index]))) 2968 break; 2969 2970 mutex_unlock(&root->log_mutex); 2971 schedule(); 2972 mutex_lock(&root->log_mutex); 2973 } 2974 finish_wait(&root->log_commit_wait[index], &wait); 2975 } 2976 2977 static void wait_for_writer(struct btrfs_root *root) 2978 { 2979 DEFINE_WAIT(wait); 2980 2981 for (;;) { 2982 prepare_to_wait(&root->log_writer_wait, &wait, 2983 TASK_UNINTERRUPTIBLE); 2984 if (!atomic_read(&root->log_writers)) 2985 break; 2986 2987 mutex_unlock(&root->log_mutex); 2988 schedule(); 2989 mutex_lock(&root->log_mutex); 2990 } 2991 finish_wait(&root->log_writer_wait, &wait); 2992 } 2993 2994 static inline void btrfs_remove_log_ctx(struct btrfs_root *root, 2995 struct btrfs_log_ctx *ctx) 2996 { 2997 if (!ctx) 2998 return; 2999 3000 mutex_lock(&root->log_mutex); 3001 list_del_init(&ctx->list); 3002 mutex_unlock(&root->log_mutex); 3003 } 3004 3005 /* 3006 * Invoked in log mutex context, or be sure there is no other task which 3007 * can access the list. 3008 */ 3009 static inline void btrfs_remove_all_log_ctxs(struct btrfs_root *root, 3010 int index, int error) 3011 { 3012 struct btrfs_log_ctx *ctx; 3013 struct btrfs_log_ctx *safe; 3014 3015 list_for_each_entry_safe(ctx, safe, &root->log_ctxs[index], list) { 3016 list_del_init(&ctx->list); 3017 ctx->log_ret = error; 3018 } 3019 3020 INIT_LIST_HEAD(&root->log_ctxs[index]); 3021 } 3022 3023 /* 3024 * btrfs_sync_log does sends a given tree log down to the disk and 3025 * updates the super blocks to record it. When this call is done, 3026 * you know that any inodes previously logged are safely on disk only 3027 * if it returns 0. 3028 * 3029 * Any other return value means you need to call btrfs_commit_transaction. 3030 * Some of the edge cases for fsyncing directories that have had unlinks 3031 * or renames done in the past mean that sometimes the only safe 3032 * fsync is to commit the whole FS. When btrfs_sync_log returns -EAGAIN, 3033 * that has happened. 3034 */ 3035 int btrfs_sync_log(struct btrfs_trans_handle *trans, 3036 struct btrfs_root *root, struct btrfs_log_ctx *ctx) 3037 { 3038 int index1; 3039 int index2; 3040 int mark; 3041 int ret; 3042 struct btrfs_fs_info *fs_info = root->fs_info; 3043 struct btrfs_root *log = root->log_root; 3044 struct btrfs_root *log_root_tree = fs_info->log_root_tree; 3045 struct btrfs_root_item new_root_item; 3046 int log_transid = 0; 3047 struct btrfs_log_ctx root_log_ctx; 3048 struct blk_plug plug; 3049 3050 mutex_lock(&root->log_mutex); 3051 log_transid = ctx->log_transid; 3052 if (root->log_transid_committed >= log_transid) { 3053 mutex_unlock(&root->log_mutex); 3054 return ctx->log_ret; 3055 } 3056 3057 index1 = log_transid % 2; 3058 if (atomic_read(&root->log_commit[index1])) { 3059 wait_log_commit(root, log_transid); 3060 mutex_unlock(&root->log_mutex); 3061 return ctx->log_ret; 3062 } 3063 ASSERT(log_transid == root->log_transid); 3064 atomic_set(&root->log_commit[index1], 1); 3065 3066 /* wait for previous tree log sync to complete */ 3067 if (atomic_read(&root->log_commit[(index1 + 1) % 2])) 3068 wait_log_commit(root, log_transid - 1); 3069 3070 while (1) { 3071 int batch = atomic_read(&root->log_batch); 3072 /* when we're on an ssd, just kick the log commit out */ 3073 if (!btrfs_test_opt(fs_info, SSD) && 3074 test_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state)) { 3075 mutex_unlock(&root->log_mutex); 3076 schedule_timeout_uninterruptible(1); 3077 mutex_lock(&root->log_mutex); 3078 } 3079 wait_for_writer(root); 3080 if (batch == atomic_read(&root->log_batch)) 3081 break; 3082 } 3083 3084 /* bail out if we need to do a full commit */ 3085 if (btrfs_need_log_full_commit(trans)) { 3086 ret = -EAGAIN; 3087 mutex_unlock(&root->log_mutex); 3088 goto out; 3089 } 3090 3091 if (log_transid % 2 == 0) 3092 mark = EXTENT_DIRTY; 3093 else 3094 mark = EXTENT_NEW; 3095 3096 /* we start IO on all the marked extents here, but we don't actually 3097 * wait for them until later. 3098 */ 3099 blk_start_plug(&plug); 3100 ret = btrfs_write_marked_extents(fs_info, &log->dirty_log_pages, mark); 3101 if (ret) { 3102 blk_finish_plug(&plug); 3103 btrfs_abort_transaction(trans, ret); 3104 btrfs_set_log_full_commit(trans); 3105 mutex_unlock(&root->log_mutex); 3106 goto out; 3107 } 3108 3109 /* 3110 * We _must_ update under the root->log_mutex in order to make sure we 3111 * have a consistent view of the log root we are trying to commit at 3112 * this moment. 3113 * 3114 * We _must_ copy this into a local copy, because we are not holding the 3115 * log_root_tree->log_mutex yet. This is important because when we 3116 * commit the log_root_tree we must have a consistent view of the 3117 * log_root_tree when we update the super block to point at the 3118 * log_root_tree bytenr. If we update the log_root_tree here we'll race 3119 * with the commit and possibly point at the new block which we may not 3120 * have written out. 3121 */ 3122 btrfs_set_root_node(&log->root_item, log->node); 3123 memcpy(&new_root_item, &log->root_item, sizeof(new_root_item)); 3124 3125 root->log_transid++; 3126 log->log_transid = root->log_transid; 3127 root->log_start_pid = 0; 3128 /* 3129 * IO has been started, blocks of the log tree have WRITTEN flag set 3130 * in their headers. new modifications of the log will be written to 3131 * new positions. so it's safe to allow log writers to go in. 3132 */ 3133 mutex_unlock(&root->log_mutex); 3134 3135 btrfs_init_log_ctx(&root_log_ctx, NULL); 3136 3137 mutex_lock(&log_root_tree->log_mutex); 3138 atomic_inc(&log_root_tree->log_batch); 3139 atomic_inc(&log_root_tree->log_writers); 3140 3141 index2 = log_root_tree->log_transid % 2; 3142 list_add_tail(&root_log_ctx.list, &log_root_tree->log_ctxs[index2]); 3143 root_log_ctx.log_transid = log_root_tree->log_transid; 3144 3145 mutex_unlock(&log_root_tree->log_mutex); 3146 3147 mutex_lock(&log_root_tree->log_mutex); 3148 3149 /* 3150 * Now we are safe to update the log_root_tree because we're under the 3151 * log_mutex, and we're a current writer so we're holding the commit 3152 * open until we drop the log_mutex. 3153 */ 3154 ret = update_log_root(trans, log, &new_root_item); 3155 3156 if (atomic_dec_and_test(&log_root_tree->log_writers)) { 3157 /* atomic_dec_and_test implies a barrier */ 3158 cond_wake_up_nomb(&log_root_tree->log_writer_wait); 3159 } 3160 3161 if (ret) { 3162 if (!list_empty(&root_log_ctx.list)) 3163 list_del_init(&root_log_ctx.list); 3164 3165 blk_finish_plug(&plug); 3166 btrfs_set_log_full_commit(trans); 3167 3168 if (ret != -ENOSPC) { 3169 btrfs_abort_transaction(trans, ret); 3170 mutex_unlock(&log_root_tree->log_mutex); 3171 goto out; 3172 } 3173 btrfs_wait_tree_log_extents(log, mark); 3174 mutex_unlock(&log_root_tree->log_mutex); 3175 ret = -EAGAIN; 3176 goto out; 3177 } 3178 3179 if (log_root_tree->log_transid_committed >= root_log_ctx.log_transid) { 3180 blk_finish_plug(&plug); 3181 list_del_init(&root_log_ctx.list); 3182 mutex_unlock(&log_root_tree->log_mutex); 3183 ret = root_log_ctx.log_ret; 3184 goto out; 3185 } 3186 3187 index2 = root_log_ctx.log_transid % 2; 3188 if (atomic_read(&log_root_tree->log_commit[index2])) { 3189 blk_finish_plug(&plug); 3190 ret = btrfs_wait_tree_log_extents(log, mark); 3191 wait_log_commit(log_root_tree, 3192 root_log_ctx.log_transid); 3193 mutex_unlock(&log_root_tree->log_mutex); 3194 if (!ret) 3195 ret = root_log_ctx.log_ret; 3196 goto out; 3197 } 3198 ASSERT(root_log_ctx.log_transid == log_root_tree->log_transid); 3199 atomic_set(&log_root_tree->log_commit[index2], 1); 3200 3201 if (atomic_read(&log_root_tree->log_commit[(index2 + 1) % 2])) { 3202 wait_log_commit(log_root_tree, 3203 root_log_ctx.log_transid - 1); 3204 } 3205 3206 wait_for_writer(log_root_tree); 3207 3208 /* 3209 * now that we've moved on to the tree of log tree roots, 3210 * check the full commit flag again 3211 */ 3212 if (btrfs_need_log_full_commit(trans)) { 3213 blk_finish_plug(&plug); 3214 btrfs_wait_tree_log_extents(log, mark); 3215 mutex_unlock(&log_root_tree->log_mutex); 3216 ret = -EAGAIN; 3217 goto out_wake_log_root; 3218 } 3219 3220 ret = btrfs_write_marked_extents(fs_info, 3221 &log_root_tree->dirty_log_pages, 3222 EXTENT_DIRTY | EXTENT_NEW); 3223 blk_finish_plug(&plug); 3224 if (ret) { 3225 btrfs_set_log_full_commit(trans); 3226 btrfs_abort_transaction(trans, ret); 3227 mutex_unlock(&log_root_tree->log_mutex); 3228 goto out_wake_log_root; 3229 } 3230 ret = btrfs_wait_tree_log_extents(log, mark); 3231 if (!ret) 3232 ret = btrfs_wait_tree_log_extents(log_root_tree, 3233 EXTENT_NEW | EXTENT_DIRTY); 3234 if (ret) { 3235 btrfs_set_log_full_commit(trans); 3236 mutex_unlock(&log_root_tree->log_mutex); 3237 goto out_wake_log_root; 3238 } 3239 3240 btrfs_set_super_log_root(fs_info->super_for_commit, 3241 log_root_tree->node->start); 3242 btrfs_set_super_log_root_level(fs_info->super_for_commit, 3243 btrfs_header_level(log_root_tree->node)); 3244 3245 log_root_tree->log_transid++; 3246 mutex_unlock(&log_root_tree->log_mutex); 3247 3248 /* 3249 * Nobody else is going to jump in and write the ctree 3250 * super here because the log_commit atomic below is protecting 3251 * us. We must be called with a transaction handle pinning 3252 * the running transaction open, so a full commit can't hop 3253 * in and cause problems either. 3254 */ 3255 ret = write_all_supers(fs_info, 1); 3256 if (ret) { 3257 btrfs_set_log_full_commit(trans); 3258 btrfs_abort_transaction(trans, ret); 3259 goto out_wake_log_root; 3260 } 3261 3262 mutex_lock(&root->log_mutex); 3263 if (root->last_log_commit < log_transid) 3264 root->last_log_commit = log_transid; 3265 mutex_unlock(&root->log_mutex); 3266 3267 out_wake_log_root: 3268 mutex_lock(&log_root_tree->log_mutex); 3269 btrfs_remove_all_log_ctxs(log_root_tree, index2, ret); 3270 3271 log_root_tree->log_transid_committed++; 3272 atomic_set(&log_root_tree->log_commit[index2], 0); 3273 mutex_unlock(&log_root_tree->log_mutex); 3274 3275 /* 3276 * The barrier before waitqueue_active (in cond_wake_up) is needed so 3277 * all the updates above are seen by the woken threads. It might not be 3278 * necessary, but proving that seems to be hard. 3279 */ 3280 cond_wake_up(&log_root_tree->log_commit_wait[index2]); 3281 out: 3282 mutex_lock(&root->log_mutex); 3283 btrfs_remove_all_log_ctxs(root, index1, ret); 3284 root->log_transid_committed++; 3285 atomic_set(&root->log_commit[index1], 0); 3286 mutex_unlock(&root->log_mutex); 3287 3288 /* 3289 * The barrier before waitqueue_active (in cond_wake_up) is needed so 3290 * all the updates above are seen by the woken threads. It might not be 3291 * necessary, but proving that seems to be hard. 3292 */ 3293 cond_wake_up(&root->log_commit_wait[index1]); 3294 return ret; 3295 } 3296 3297 static void free_log_tree(struct btrfs_trans_handle *trans, 3298 struct btrfs_root *log) 3299 { 3300 int ret; 3301 struct walk_control wc = { 3302 .free = 1, 3303 .process_func = process_one_buffer 3304 }; 3305 3306 ret = walk_log_tree(trans, log, &wc); 3307 if (ret) { 3308 if (trans) 3309 btrfs_abort_transaction(trans, ret); 3310 else 3311 btrfs_handle_fs_error(log->fs_info, ret, NULL); 3312 } 3313 3314 clear_extent_bits(&log->dirty_log_pages, 0, (u64)-1, 3315 EXTENT_DIRTY | EXTENT_NEW | EXTENT_NEED_WAIT); 3316 free_extent_buffer(log->node); 3317 kfree(log); 3318 } 3319 3320 /* 3321 * free all the extents used by the tree log. This should be called 3322 * at commit time of the full transaction 3323 */ 3324 int btrfs_free_log(struct btrfs_trans_handle *trans, struct btrfs_root *root) 3325 { 3326 if (root->log_root) { 3327 free_log_tree(trans, root->log_root); 3328 root->log_root = NULL; 3329 } 3330 return 0; 3331 } 3332 3333 int btrfs_free_log_root_tree(struct btrfs_trans_handle *trans, 3334 struct btrfs_fs_info *fs_info) 3335 { 3336 if (fs_info->log_root_tree) { 3337 free_log_tree(trans, fs_info->log_root_tree); 3338 fs_info->log_root_tree = NULL; 3339 } 3340 return 0; 3341 } 3342 3343 /* 3344 * Check if an inode was logged in the current transaction. We can't always rely 3345 * on an inode's logged_trans value, because it's an in-memory only field and 3346 * therefore not persisted. This means that its value is lost if the inode gets 3347 * evicted and loaded again from disk (in which case it has a value of 0, and 3348 * certainly it is smaller then any possible transaction ID), when that happens 3349 * the full_sync flag is set in the inode's runtime flags, so on that case we 3350 * assume eviction happened and ignore the logged_trans value, assuming the 3351 * worst case, that the inode was logged before in the current transaction. 3352 */ 3353 static bool inode_logged(struct btrfs_trans_handle *trans, 3354 struct btrfs_inode *inode) 3355 { 3356 if (inode->logged_trans == trans->transid) 3357 return true; 3358 3359 if (inode->last_trans == trans->transid && 3360 test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags) && 3361 !test_bit(BTRFS_FS_LOG_RECOVERING, &trans->fs_info->flags)) 3362 return true; 3363 3364 return false; 3365 } 3366 3367 /* 3368 * If both a file and directory are logged, and unlinks or renames are 3369 * mixed in, we have a few interesting corners: 3370 * 3371 * create file X in dir Y 3372 * link file X to X.link in dir Y 3373 * fsync file X 3374 * unlink file X but leave X.link 3375 * fsync dir Y 3376 * 3377 * After a crash we would expect only X.link to exist. But file X 3378 * didn't get fsync'd again so the log has back refs for X and X.link. 3379 * 3380 * We solve this by removing directory entries and inode backrefs from the 3381 * log when a file that was logged in the current transaction is 3382 * unlinked. Any later fsync will include the updated log entries, and 3383 * we'll be able to reconstruct the proper directory items from backrefs. 3384 * 3385 * This optimizations allows us to avoid relogging the entire inode 3386 * or the entire directory. 3387 */ 3388 int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans, 3389 struct btrfs_root *root, 3390 const char *name, int name_len, 3391 struct btrfs_inode *dir, u64 index) 3392 { 3393 struct btrfs_root *log; 3394 struct btrfs_dir_item *di; 3395 struct btrfs_path *path; 3396 int ret; 3397 int err = 0; 3398 int bytes_del = 0; 3399 u64 dir_ino = btrfs_ino(dir); 3400 3401 if (!inode_logged(trans, dir)) 3402 return 0; 3403 3404 ret = join_running_log_trans(root); 3405 if (ret) 3406 return 0; 3407 3408 mutex_lock(&dir->log_mutex); 3409 3410 log = root->log_root; 3411 path = btrfs_alloc_path(); 3412 if (!path) { 3413 err = -ENOMEM; 3414 goto out_unlock; 3415 } 3416 3417 di = btrfs_lookup_dir_item(trans, log, path, dir_ino, 3418 name, name_len, -1); 3419 if (IS_ERR(di)) { 3420 err = PTR_ERR(di); 3421 goto fail; 3422 } 3423 if (di) { 3424 ret = btrfs_delete_one_dir_name(trans, log, path, di); 3425 bytes_del += name_len; 3426 if (ret) { 3427 err = ret; 3428 goto fail; 3429 } 3430 } 3431 btrfs_release_path(path); 3432 di = btrfs_lookup_dir_index_item(trans, log, path, dir_ino, 3433 index, name, name_len, -1); 3434 if (IS_ERR(di)) { 3435 err = PTR_ERR(di); 3436 goto fail; 3437 } 3438 if (di) { 3439 ret = btrfs_delete_one_dir_name(trans, log, path, di); 3440 bytes_del += name_len; 3441 if (ret) { 3442 err = ret; 3443 goto fail; 3444 } 3445 } 3446 3447 /* update the directory size in the log to reflect the names 3448 * we have removed 3449 */ 3450 if (bytes_del) { 3451 struct btrfs_key key; 3452 3453 key.objectid = dir_ino; 3454 key.offset = 0; 3455 key.type = BTRFS_INODE_ITEM_KEY; 3456 btrfs_release_path(path); 3457 3458 ret = btrfs_search_slot(trans, log, &key, path, 0, 1); 3459 if (ret < 0) { 3460 err = ret; 3461 goto fail; 3462 } 3463 if (ret == 0) { 3464 struct btrfs_inode_item *item; 3465 u64 i_size; 3466 3467 item = btrfs_item_ptr(path->nodes[0], path->slots[0], 3468 struct btrfs_inode_item); 3469 i_size = btrfs_inode_size(path->nodes[0], item); 3470 if (i_size > bytes_del) 3471 i_size -= bytes_del; 3472 else 3473 i_size = 0; 3474 btrfs_set_inode_size(path->nodes[0], item, i_size); 3475 btrfs_mark_buffer_dirty(path->nodes[0]); 3476 } else 3477 ret = 0; 3478 btrfs_release_path(path); 3479 } 3480 fail: 3481 btrfs_free_path(path); 3482 out_unlock: 3483 mutex_unlock(&dir->log_mutex); 3484 if (ret == -ENOSPC) { 3485 btrfs_set_log_full_commit(trans); 3486 ret = 0; 3487 } else if (ret < 0) 3488 btrfs_abort_transaction(trans, ret); 3489 3490 btrfs_end_log_trans(root); 3491 3492 return err; 3493 } 3494 3495 /* see comments for btrfs_del_dir_entries_in_log */ 3496 int btrfs_del_inode_ref_in_log(struct btrfs_trans_handle *trans, 3497 struct btrfs_root *root, 3498 const char *name, int name_len, 3499 struct btrfs_inode *inode, u64 dirid) 3500 { 3501 struct btrfs_root *log; 3502 u64 index; 3503 int ret; 3504 3505 if (!inode_logged(trans, inode)) 3506 return 0; 3507 3508 ret = join_running_log_trans(root); 3509 if (ret) 3510 return 0; 3511 log = root->log_root; 3512 mutex_lock(&inode->log_mutex); 3513 3514 ret = btrfs_del_inode_ref(trans, log, name, name_len, btrfs_ino(inode), 3515 dirid, &index); 3516 mutex_unlock(&inode->log_mutex); 3517 if (ret == -ENOSPC) { 3518 btrfs_set_log_full_commit(trans); 3519 ret = 0; 3520 } else if (ret < 0 && ret != -ENOENT) 3521 btrfs_abort_transaction(trans, ret); 3522 btrfs_end_log_trans(root); 3523 3524 return ret; 3525 } 3526 3527 /* 3528 * creates a range item in the log for 'dirid'. first_offset and 3529 * last_offset tell us which parts of the key space the log should 3530 * be considered authoritative for. 3531 */ 3532 static noinline int insert_dir_log_key(struct btrfs_trans_handle *trans, 3533 struct btrfs_root *log, 3534 struct btrfs_path *path, 3535 int key_type, u64 dirid, 3536 u64 first_offset, u64 last_offset) 3537 { 3538 int ret; 3539 struct btrfs_key key; 3540 struct btrfs_dir_log_item *item; 3541 3542 key.objectid = dirid; 3543 key.offset = first_offset; 3544 if (key_type == BTRFS_DIR_ITEM_KEY) 3545 key.type = BTRFS_DIR_LOG_ITEM_KEY; 3546 else 3547 key.type = BTRFS_DIR_LOG_INDEX_KEY; 3548 ret = btrfs_insert_empty_item(trans, log, path, &key, sizeof(*item)); 3549 if (ret) 3550 return ret; 3551 3552 item = btrfs_item_ptr(path->nodes[0], path->slots[0], 3553 struct btrfs_dir_log_item); 3554 btrfs_set_dir_log_end(path->nodes[0], item, last_offset); 3555 btrfs_mark_buffer_dirty(path->nodes[0]); 3556 btrfs_release_path(path); 3557 return 0; 3558 } 3559 3560 /* 3561 * log all the items included in the current transaction for a given 3562 * directory. This also creates the range items in the log tree required 3563 * to replay anything deleted before the fsync 3564 */ 3565 static noinline int log_dir_items(struct btrfs_trans_handle *trans, 3566 struct btrfs_root *root, struct btrfs_inode *inode, 3567 struct btrfs_path *path, 3568 struct btrfs_path *dst_path, int key_type, 3569 struct btrfs_log_ctx *ctx, 3570 u64 min_offset, u64 *last_offset_ret) 3571 { 3572 struct btrfs_key min_key; 3573 struct btrfs_root *log = root->log_root; 3574 struct extent_buffer *src; 3575 int err = 0; 3576 int ret; 3577 int i; 3578 int nritems; 3579 u64 first_offset = min_offset; 3580 u64 last_offset = (u64)-1; 3581 u64 ino = btrfs_ino(inode); 3582 3583 log = root->log_root; 3584 3585 min_key.objectid = ino; 3586 min_key.type = key_type; 3587 min_key.offset = min_offset; 3588 3589 ret = btrfs_search_forward(root, &min_key, path, trans->transid); 3590 3591 /* 3592 * we didn't find anything from this transaction, see if there 3593 * is anything at all 3594 */ 3595 if (ret != 0 || min_key.objectid != ino || min_key.type != key_type) { 3596 min_key.objectid = ino; 3597 min_key.type = key_type; 3598 min_key.offset = (u64)-1; 3599 btrfs_release_path(path); 3600 ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0); 3601 if (ret < 0) { 3602 btrfs_release_path(path); 3603 return ret; 3604 } 3605 ret = btrfs_previous_item(root, path, ino, key_type); 3606 3607 /* if ret == 0 there are items for this type, 3608 * create a range to tell us the last key of this type. 3609 * otherwise, there are no items in this directory after 3610 * *min_offset, and we create a range to indicate that. 3611 */ 3612 if (ret == 0) { 3613 struct btrfs_key tmp; 3614 btrfs_item_key_to_cpu(path->nodes[0], &tmp, 3615 path->slots[0]); 3616 if (key_type == tmp.type) 3617 first_offset = max(min_offset, tmp.offset) + 1; 3618 } 3619 goto done; 3620 } 3621 3622 /* go backward to find any previous key */ 3623 ret = btrfs_previous_item(root, path, ino, key_type); 3624 if (ret == 0) { 3625 struct btrfs_key tmp; 3626 btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]); 3627 if (key_type == tmp.type) { 3628 first_offset = tmp.offset; 3629 ret = overwrite_item(trans, log, dst_path, 3630 path->nodes[0], path->slots[0], 3631 &tmp); 3632 if (ret) { 3633 err = ret; 3634 goto done; 3635 } 3636 } 3637 } 3638 btrfs_release_path(path); 3639 3640 /* 3641 * Find the first key from this transaction again. See the note for 3642 * log_new_dir_dentries, if we're logging a directory recursively we 3643 * won't be holding its i_mutex, which means we can modify the directory 3644 * while we're logging it. If we remove an entry between our first 3645 * search and this search we'll not find the key again and can just 3646 * bail. 3647 */ 3648 ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0); 3649 if (ret != 0) 3650 goto done; 3651 3652 /* 3653 * we have a block from this transaction, log every item in it 3654 * from our directory 3655 */ 3656 while (1) { 3657 struct btrfs_key tmp; 3658 src = path->nodes[0]; 3659 nritems = btrfs_header_nritems(src); 3660 for (i = path->slots[0]; i < nritems; i++) { 3661 struct btrfs_dir_item *di; 3662 3663 btrfs_item_key_to_cpu(src, &min_key, i); 3664 3665 if (min_key.objectid != ino || min_key.type != key_type) 3666 goto done; 3667 ret = overwrite_item(trans, log, dst_path, src, i, 3668 &min_key); 3669 if (ret) { 3670 err = ret; 3671 goto done; 3672 } 3673 3674 /* 3675 * We must make sure that when we log a directory entry, 3676 * the corresponding inode, after log replay, has a 3677 * matching link count. For example: 3678 * 3679 * touch foo 3680 * mkdir mydir 3681 * sync 3682 * ln foo mydir/bar 3683 * xfs_io -c "fsync" mydir 3684 * <crash> 3685 * <mount fs and log replay> 3686 * 3687 * Would result in a fsync log that when replayed, our 3688 * file inode would have a link count of 1, but we get 3689 * two directory entries pointing to the same inode. 3690 * After removing one of the names, it would not be 3691 * possible to remove the other name, which resulted 3692 * always in stale file handle errors, and would not 3693 * be possible to rmdir the parent directory, since 3694 * its i_size could never decrement to the value 3695 * BTRFS_EMPTY_DIR_SIZE, resulting in -ENOTEMPTY errors. 3696 */ 3697 di = btrfs_item_ptr(src, i, struct btrfs_dir_item); 3698 btrfs_dir_item_key_to_cpu(src, di, &tmp); 3699 if (ctx && 3700 (btrfs_dir_transid(src, di) == trans->transid || 3701 btrfs_dir_type(src, di) == BTRFS_FT_DIR) && 3702 tmp.type != BTRFS_ROOT_ITEM_KEY) 3703 ctx->log_new_dentries = true; 3704 } 3705 path->slots[0] = nritems; 3706 3707 /* 3708 * look ahead to the next item and see if it is also 3709 * from this directory and from this transaction 3710 */ 3711 ret = btrfs_next_leaf(root, path); 3712 if (ret) { 3713 if (ret == 1) 3714 last_offset = (u64)-1; 3715 else 3716 err = ret; 3717 goto done; 3718 } 3719 btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]); 3720 if (tmp.objectid != ino || tmp.type != key_type) { 3721 last_offset = (u64)-1; 3722 goto done; 3723 } 3724 if (btrfs_header_generation(path->nodes[0]) != trans->transid) { 3725 ret = overwrite_item(trans, log, dst_path, 3726 path->nodes[0], path->slots[0], 3727 &tmp); 3728 if (ret) 3729 err = ret; 3730 else 3731 last_offset = tmp.offset; 3732 goto done; 3733 } 3734 } 3735 done: 3736 btrfs_release_path(path); 3737 btrfs_release_path(dst_path); 3738 3739 if (err == 0) { 3740 *last_offset_ret = last_offset; 3741 /* 3742 * insert the log range keys to indicate where the log 3743 * is valid 3744 */ 3745 ret = insert_dir_log_key(trans, log, path, key_type, 3746 ino, first_offset, last_offset); 3747 if (ret) 3748 err = ret; 3749 } 3750 return err; 3751 } 3752 3753 /* 3754 * logging directories is very similar to logging inodes, We find all the items 3755 * from the current transaction and write them to the log. 3756 * 3757 * The recovery code scans the directory in the subvolume, and if it finds a 3758 * key in the range logged that is not present in the log tree, then it means 3759 * that dir entry was unlinked during the transaction. 3760 * 3761 * In order for that scan to work, we must include one key smaller than 3762 * the smallest logged by this transaction and one key larger than the largest 3763 * key logged by this transaction. 3764 */ 3765 static noinline int log_directory_changes(struct btrfs_trans_handle *trans, 3766 struct btrfs_root *root, struct btrfs_inode *inode, 3767 struct btrfs_path *path, 3768 struct btrfs_path *dst_path, 3769 struct btrfs_log_ctx *ctx) 3770 { 3771 u64 min_key; 3772 u64 max_key; 3773 int ret; 3774 int key_type = BTRFS_DIR_ITEM_KEY; 3775 3776 again: 3777 min_key = 0; 3778 max_key = 0; 3779 while (1) { 3780 ret = log_dir_items(trans, root, inode, path, dst_path, key_type, 3781 ctx, min_key, &max_key); 3782 if (ret) 3783 return ret; 3784 if (max_key == (u64)-1) 3785 break; 3786 min_key = max_key + 1; 3787 } 3788 3789 if (key_type == BTRFS_DIR_ITEM_KEY) { 3790 key_type = BTRFS_DIR_INDEX_KEY; 3791 goto again; 3792 } 3793 return 0; 3794 } 3795 3796 /* 3797 * a helper function to drop items from the log before we relog an 3798 * inode. max_key_type indicates the highest item type to remove. 3799 * This cannot be run for file data extents because it does not 3800 * free the extents they point to. 3801 */ 3802 static int drop_objectid_items(struct btrfs_trans_handle *trans, 3803 struct btrfs_root *log, 3804 struct btrfs_path *path, 3805 u64 objectid, int max_key_type) 3806 { 3807 int ret; 3808 struct btrfs_key key; 3809 struct btrfs_key found_key; 3810 int start_slot; 3811 3812 key.objectid = objectid; 3813 key.type = max_key_type; 3814 key.offset = (u64)-1; 3815 3816 while (1) { 3817 ret = btrfs_search_slot(trans, log, &key, path, -1, 1); 3818 BUG_ON(ret == 0); /* Logic error */ 3819 if (ret < 0) 3820 break; 3821 3822 if (path->slots[0] == 0) 3823 break; 3824 3825 path->slots[0]--; 3826 btrfs_item_key_to_cpu(path->nodes[0], &found_key, 3827 path->slots[0]); 3828 3829 if (found_key.objectid != objectid) 3830 break; 3831 3832 found_key.offset = 0; 3833 found_key.type = 0; 3834 ret = btrfs_bin_search(path->nodes[0], &found_key, 0, 3835 &start_slot); 3836 if (ret < 0) 3837 break; 3838 3839 ret = btrfs_del_items(trans, log, path, start_slot, 3840 path->slots[0] - start_slot + 1); 3841 /* 3842 * If start slot isn't 0 then we don't need to re-search, we've 3843 * found the last guy with the objectid in this tree. 3844 */ 3845 if (ret || start_slot != 0) 3846 break; 3847 btrfs_release_path(path); 3848 } 3849 btrfs_release_path(path); 3850 if (ret > 0) 3851 ret = 0; 3852 return ret; 3853 } 3854 3855 static void fill_inode_item(struct btrfs_trans_handle *trans, 3856 struct extent_buffer *leaf, 3857 struct btrfs_inode_item *item, 3858 struct inode *inode, int log_inode_only, 3859 u64 logged_isize) 3860 { 3861 struct btrfs_map_token token; 3862 3863 btrfs_init_map_token(&token, leaf); 3864 3865 if (log_inode_only) { 3866 /* set the generation to zero so the recover code 3867 * can tell the difference between an logging 3868 * just to say 'this inode exists' and a logging 3869 * to say 'update this inode with these values' 3870 */ 3871 btrfs_set_token_inode_generation(leaf, item, 0, &token); 3872 btrfs_set_token_inode_size(leaf, item, logged_isize, &token); 3873 } else { 3874 btrfs_set_token_inode_generation(leaf, item, 3875 BTRFS_I(inode)->generation, 3876 &token); 3877 btrfs_set_token_inode_size(leaf, item, inode->i_size, &token); 3878 } 3879 3880 btrfs_set_token_inode_uid(leaf, item, i_uid_read(inode), &token); 3881 btrfs_set_token_inode_gid(leaf, item, i_gid_read(inode), &token); 3882 btrfs_set_token_inode_mode(leaf, item, inode->i_mode, &token); 3883 btrfs_set_token_inode_nlink(leaf, item, inode->i_nlink, &token); 3884 3885 btrfs_set_token_timespec_sec(leaf, &item->atime, 3886 inode->i_atime.tv_sec, &token); 3887 btrfs_set_token_timespec_nsec(leaf, &item->atime, 3888 inode->i_atime.tv_nsec, &token); 3889 3890 btrfs_set_token_timespec_sec(leaf, &item->mtime, 3891 inode->i_mtime.tv_sec, &token); 3892 btrfs_set_token_timespec_nsec(leaf, &item->mtime, 3893 inode->i_mtime.tv_nsec, &token); 3894 3895 btrfs_set_token_timespec_sec(leaf, &item->ctime, 3896 inode->i_ctime.tv_sec, &token); 3897 btrfs_set_token_timespec_nsec(leaf, &item->ctime, 3898 inode->i_ctime.tv_nsec, &token); 3899 3900 btrfs_set_token_inode_nbytes(leaf, item, inode_get_bytes(inode), 3901 &token); 3902 3903 btrfs_set_token_inode_sequence(leaf, item, 3904 inode_peek_iversion(inode), &token); 3905 btrfs_set_token_inode_transid(leaf, item, trans->transid, &token); 3906 btrfs_set_token_inode_rdev(leaf, item, inode->i_rdev, &token); 3907 btrfs_set_token_inode_flags(leaf, item, BTRFS_I(inode)->flags, &token); 3908 btrfs_set_token_inode_block_group(leaf, item, 0, &token); 3909 } 3910 3911 static int log_inode_item(struct btrfs_trans_handle *trans, 3912 struct btrfs_root *log, struct btrfs_path *path, 3913 struct btrfs_inode *inode) 3914 { 3915 struct btrfs_inode_item *inode_item; 3916 int ret; 3917 3918 ret = btrfs_insert_empty_item(trans, log, path, 3919 &inode->location, sizeof(*inode_item)); 3920 if (ret && ret != -EEXIST) 3921 return ret; 3922 inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0], 3923 struct btrfs_inode_item); 3924 fill_inode_item(trans, path->nodes[0], inode_item, &inode->vfs_inode, 3925 0, 0); 3926 btrfs_release_path(path); 3927 return 0; 3928 } 3929 3930 static noinline int copy_items(struct btrfs_trans_handle *trans, 3931 struct btrfs_inode *inode, 3932 struct btrfs_path *dst_path, 3933 struct btrfs_path *src_path, u64 *last_extent, 3934 int start_slot, int nr, int inode_only, 3935 u64 logged_isize) 3936 { 3937 struct btrfs_fs_info *fs_info = trans->fs_info; 3938 unsigned long src_offset; 3939 unsigned long dst_offset; 3940 struct btrfs_root *log = inode->root->log_root; 3941 struct btrfs_file_extent_item *extent; 3942 struct btrfs_inode_item *inode_item; 3943 struct extent_buffer *src = src_path->nodes[0]; 3944 struct btrfs_key first_key, last_key, key; 3945 int ret; 3946 struct btrfs_key *ins_keys; 3947 u32 *ins_sizes; 3948 char *ins_data; 3949 int i; 3950 struct list_head ordered_sums; 3951 int skip_csum = inode->flags & BTRFS_INODE_NODATASUM; 3952 bool has_extents = false; 3953 bool need_find_last_extent = true; 3954 bool done = false; 3955 3956 INIT_LIST_HEAD(&ordered_sums); 3957 3958 ins_data = kmalloc(nr * sizeof(struct btrfs_key) + 3959 nr * sizeof(u32), GFP_NOFS); 3960 if (!ins_data) 3961 return -ENOMEM; 3962 3963 first_key.objectid = (u64)-1; 3964 3965 ins_sizes = (u32 *)ins_data; 3966 ins_keys = (struct btrfs_key *)(ins_data + nr * sizeof(u32)); 3967 3968 for (i = 0; i < nr; i++) { 3969 ins_sizes[i] = btrfs_item_size_nr(src, i + start_slot); 3970 btrfs_item_key_to_cpu(src, ins_keys + i, i + start_slot); 3971 } 3972 ret = btrfs_insert_empty_items(trans, log, dst_path, 3973 ins_keys, ins_sizes, nr); 3974 if (ret) { 3975 kfree(ins_data); 3976 return ret; 3977 } 3978 3979 for (i = 0; i < nr; i++, dst_path->slots[0]++) { 3980 dst_offset = btrfs_item_ptr_offset(dst_path->nodes[0], 3981 dst_path->slots[0]); 3982 3983 src_offset = btrfs_item_ptr_offset(src, start_slot + i); 3984 3985 if (i == nr - 1) 3986 last_key = ins_keys[i]; 3987 3988 if (ins_keys[i].type == BTRFS_INODE_ITEM_KEY) { 3989 inode_item = btrfs_item_ptr(dst_path->nodes[0], 3990 dst_path->slots[0], 3991 struct btrfs_inode_item); 3992 fill_inode_item(trans, dst_path->nodes[0], inode_item, 3993 &inode->vfs_inode, 3994 inode_only == LOG_INODE_EXISTS, 3995 logged_isize); 3996 } else { 3997 copy_extent_buffer(dst_path->nodes[0], src, dst_offset, 3998 src_offset, ins_sizes[i]); 3999 } 4000 4001 /* 4002 * We set need_find_last_extent here in case we know we were 4003 * processing other items and then walk into the first extent in 4004 * the inode. If we don't hit an extent then nothing changes, 4005 * we'll do the last search the next time around. 4006 */ 4007 if (ins_keys[i].type == BTRFS_EXTENT_DATA_KEY) { 4008 has_extents = true; 4009 if (first_key.objectid == (u64)-1) 4010 first_key = ins_keys[i]; 4011 } else { 4012 need_find_last_extent = false; 4013 } 4014 4015 /* take a reference on file data extents so that truncates 4016 * or deletes of this inode don't have to relog the inode 4017 * again 4018 */ 4019 if (ins_keys[i].type == BTRFS_EXTENT_DATA_KEY && 4020 !skip_csum) { 4021 int found_type; 4022 extent = btrfs_item_ptr(src, start_slot + i, 4023 struct btrfs_file_extent_item); 4024 4025 if (btrfs_file_extent_generation(src, extent) < trans->transid) 4026 continue; 4027 4028 found_type = btrfs_file_extent_type(src, extent); 4029 if (found_type == BTRFS_FILE_EXTENT_REG) { 4030 u64 ds, dl, cs, cl; 4031 ds = btrfs_file_extent_disk_bytenr(src, 4032 extent); 4033 /* ds == 0 is a hole */ 4034 if (ds == 0) 4035 continue; 4036 4037 dl = btrfs_file_extent_disk_num_bytes(src, 4038 extent); 4039 cs = btrfs_file_extent_offset(src, extent); 4040 cl = btrfs_file_extent_num_bytes(src, 4041 extent); 4042 if (btrfs_file_extent_compression(src, 4043 extent)) { 4044 cs = 0; 4045 cl = dl; 4046 } 4047 4048 ret = btrfs_lookup_csums_range( 4049 fs_info->csum_root, 4050 ds + cs, ds + cs + cl - 1, 4051 &ordered_sums, 0); 4052 if (ret) { 4053 btrfs_release_path(dst_path); 4054 kfree(ins_data); 4055 return ret; 4056 } 4057 } 4058 } 4059 } 4060 4061 btrfs_mark_buffer_dirty(dst_path->nodes[0]); 4062 btrfs_release_path(dst_path); 4063 kfree(ins_data); 4064 4065 /* 4066 * we have to do this after the loop above to avoid changing the 4067 * log tree while trying to change the log tree. 4068 */ 4069 ret = 0; 4070 while (!list_empty(&ordered_sums)) { 4071 struct btrfs_ordered_sum *sums = list_entry(ordered_sums.next, 4072 struct btrfs_ordered_sum, 4073 list); 4074 if (!ret) 4075 ret = btrfs_csum_file_blocks(trans, log, sums); 4076 list_del(&sums->list); 4077 kfree(sums); 4078 } 4079 4080 if (!has_extents) 4081 return ret; 4082 4083 if (need_find_last_extent && *last_extent == first_key.offset) { 4084 /* 4085 * We don't have any leafs between our current one and the one 4086 * we processed before that can have file extent items for our 4087 * inode (and have a generation number smaller than our current 4088 * transaction id). 4089 */ 4090 need_find_last_extent = false; 4091 } 4092 4093 /* 4094 * Because we use btrfs_search_forward we could skip leaves that were 4095 * not modified and then assume *last_extent is valid when it really 4096 * isn't. So back up to the previous leaf and read the end of the last 4097 * extent before we go and fill in holes. 4098 */ 4099 if (need_find_last_extent) { 4100 u64 len; 4101 4102 ret = btrfs_prev_leaf(inode->root, src_path); 4103 if (ret < 0) 4104 return ret; 4105 if (ret) 4106 goto fill_holes; 4107 if (src_path->slots[0]) 4108 src_path->slots[0]--; 4109 src = src_path->nodes[0]; 4110 btrfs_item_key_to_cpu(src, &key, src_path->slots[0]); 4111 if (key.objectid != btrfs_ino(inode) || 4112 key.type != BTRFS_EXTENT_DATA_KEY) 4113 goto fill_holes; 4114 extent = btrfs_item_ptr(src, src_path->slots[0], 4115 struct btrfs_file_extent_item); 4116 if (btrfs_file_extent_type(src, extent) == 4117 BTRFS_FILE_EXTENT_INLINE) { 4118 len = btrfs_file_extent_ram_bytes(src, extent); 4119 *last_extent = ALIGN(key.offset + len, 4120 fs_info->sectorsize); 4121 } else { 4122 len = btrfs_file_extent_num_bytes(src, extent); 4123 *last_extent = key.offset + len; 4124 } 4125 } 4126 fill_holes: 4127 /* So we did prev_leaf, now we need to move to the next leaf, but a few 4128 * things could have happened 4129 * 4130 * 1) A merge could have happened, so we could currently be on a leaf 4131 * that holds what we were copying in the first place. 4132 * 2) A split could have happened, and now not all of the items we want 4133 * are on the same leaf. 4134 * 4135 * So we need to adjust how we search for holes, we need to drop the 4136 * path and re-search for the first extent key we found, and then walk 4137 * forward until we hit the last one we copied. 4138 */ 4139 if (need_find_last_extent) { 4140 /* btrfs_prev_leaf could return 1 without releasing the path */ 4141 btrfs_release_path(src_path); 4142 ret = btrfs_search_slot(NULL, inode->root, &first_key, 4143 src_path, 0, 0); 4144 if (ret < 0) 4145 return ret; 4146 ASSERT(ret == 0); 4147 src = src_path->nodes[0]; 4148 i = src_path->slots[0]; 4149 } else { 4150 i = start_slot; 4151 } 4152 4153 /* 4154 * Ok so here we need to go through and fill in any holes we may have 4155 * to make sure that holes are punched for those areas in case they had 4156 * extents previously. 4157 */ 4158 while (!done) { 4159 u64 offset, len; 4160 u64 extent_end; 4161 4162 if (i >= btrfs_header_nritems(src_path->nodes[0])) { 4163 ret = btrfs_next_leaf(inode->root, src_path); 4164 if (ret < 0) 4165 return ret; 4166 ASSERT(ret == 0); 4167 src = src_path->nodes[0]; 4168 i = 0; 4169 need_find_last_extent = true; 4170 } 4171 4172 btrfs_item_key_to_cpu(src, &key, i); 4173 if (!btrfs_comp_cpu_keys(&key, &last_key)) 4174 done = true; 4175 if (key.objectid != btrfs_ino(inode) || 4176 key.type != BTRFS_EXTENT_DATA_KEY) { 4177 i++; 4178 continue; 4179 } 4180 extent = btrfs_item_ptr(src, i, struct btrfs_file_extent_item); 4181 if (btrfs_file_extent_type(src, extent) == 4182 BTRFS_FILE_EXTENT_INLINE) { 4183 len = btrfs_file_extent_ram_bytes(src, extent); 4184 extent_end = ALIGN(key.offset + len, 4185 fs_info->sectorsize); 4186 } else { 4187 len = btrfs_file_extent_num_bytes(src, extent); 4188 extent_end = key.offset + len; 4189 } 4190 i++; 4191 4192 if (*last_extent == key.offset) { 4193 *last_extent = extent_end; 4194 continue; 4195 } 4196 offset = *last_extent; 4197 len = key.offset - *last_extent; 4198 ret = btrfs_insert_file_extent(trans, log, btrfs_ino(inode), 4199 offset, 0, 0, len, 0, len, 0, 0, 0); 4200 if (ret) 4201 break; 4202 *last_extent = extent_end; 4203 } 4204 4205 /* 4206 * Check if there is a hole between the last extent found in our leaf 4207 * and the first extent in the next leaf. If there is one, we need to 4208 * log an explicit hole so that at replay time we can punch the hole. 4209 */ 4210 if (ret == 0 && 4211 key.objectid == btrfs_ino(inode) && 4212 key.type == BTRFS_EXTENT_DATA_KEY && 4213 i == btrfs_header_nritems(src_path->nodes[0])) { 4214 ret = btrfs_next_leaf(inode->root, src_path); 4215 need_find_last_extent = true; 4216 if (ret > 0) { 4217 ret = 0; 4218 } else if (ret == 0) { 4219 btrfs_item_key_to_cpu(src_path->nodes[0], &key, 4220 src_path->slots[0]); 4221 if (key.objectid == btrfs_ino(inode) && 4222 key.type == BTRFS_EXTENT_DATA_KEY && 4223 *last_extent < key.offset) { 4224 const u64 len = key.offset - *last_extent; 4225 4226 ret = btrfs_insert_file_extent(trans, log, 4227 btrfs_ino(inode), 4228 *last_extent, 0, 4229 0, len, 0, len, 4230 0, 0, 0); 4231 *last_extent += len; 4232 } 4233 } 4234 } 4235 /* 4236 * Need to let the callers know we dropped the path so they should 4237 * re-search. 4238 */ 4239 if (!ret && need_find_last_extent) 4240 ret = 1; 4241 return ret; 4242 } 4243 4244 static int extent_cmp(void *priv, struct list_head *a, struct list_head *b) 4245 { 4246 struct extent_map *em1, *em2; 4247 4248 em1 = list_entry(a, struct extent_map, list); 4249 em2 = list_entry(b, struct extent_map, list); 4250 4251 if (em1->start < em2->start) 4252 return -1; 4253 else if (em1->start > em2->start) 4254 return 1; 4255 return 0; 4256 } 4257 4258 static int log_extent_csums(struct btrfs_trans_handle *trans, 4259 struct btrfs_inode *inode, 4260 struct btrfs_root *log_root, 4261 const struct extent_map *em) 4262 { 4263 u64 csum_offset; 4264 u64 csum_len; 4265 LIST_HEAD(ordered_sums); 4266 int ret = 0; 4267 4268 if (inode->flags & BTRFS_INODE_NODATASUM || 4269 test_bit(EXTENT_FLAG_PREALLOC, &em->flags) || 4270 em->block_start == EXTENT_MAP_HOLE) 4271 return 0; 4272 4273 /* If we're compressed we have to save the entire range of csums. */ 4274 if (em->compress_type) { 4275 csum_offset = 0; 4276 csum_len = max(em->block_len, em->orig_block_len); 4277 } else { 4278 csum_offset = em->mod_start - em->start; 4279 csum_len = em->mod_len; 4280 } 4281 4282 /* block start is already adjusted for the file extent offset. */ 4283 ret = btrfs_lookup_csums_range(trans->fs_info->csum_root, 4284 em->block_start + csum_offset, 4285 em->block_start + csum_offset + 4286 csum_len - 1, &ordered_sums, 0); 4287 if (ret) 4288 return ret; 4289 4290 while (!list_empty(&ordered_sums)) { 4291 struct btrfs_ordered_sum *sums = list_entry(ordered_sums.next, 4292 struct btrfs_ordered_sum, 4293 list); 4294 if (!ret) 4295 ret = btrfs_csum_file_blocks(trans, log_root, sums); 4296 list_del(&sums->list); 4297 kfree(sums); 4298 } 4299 4300 return ret; 4301 } 4302 4303 static int log_one_extent(struct btrfs_trans_handle *trans, 4304 struct btrfs_inode *inode, struct btrfs_root *root, 4305 const struct extent_map *em, 4306 struct btrfs_path *path, 4307 struct btrfs_log_ctx *ctx) 4308 { 4309 struct btrfs_root *log = root->log_root; 4310 struct btrfs_file_extent_item *fi; 4311 struct extent_buffer *leaf; 4312 struct btrfs_map_token token; 4313 struct btrfs_key key; 4314 u64 extent_offset = em->start - em->orig_start; 4315 u64 block_len; 4316 int ret; 4317 int extent_inserted = 0; 4318 4319 ret = log_extent_csums(trans, inode, log, em); 4320 if (ret) 4321 return ret; 4322 4323 ret = __btrfs_drop_extents(trans, log, &inode->vfs_inode, path, em->start, 4324 em->start + em->len, NULL, 0, 1, 4325 sizeof(*fi), &extent_inserted); 4326 if (ret) 4327 return ret; 4328 4329 if (!extent_inserted) { 4330 key.objectid = btrfs_ino(inode); 4331 key.type = BTRFS_EXTENT_DATA_KEY; 4332 key.offset = em->start; 4333 4334 ret = btrfs_insert_empty_item(trans, log, path, &key, 4335 sizeof(*fi)); 4336 if (ret) 4337 return ret; 4338 } 4339 leaf = path->nodes[0]; 4340 btrfs_init_map_token(&token, leaf); 4341 fi = btrfs_item_ptr(leaf, path->slots[0], 4342 struct btrfs_file_extent_item); 4343 4344 btrfs_set_token_file_extent_generation(leaf, fi, trans->transid, 4345 &token); 4346 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) 4347 btrfs_set_token_file_extent_type(leaf, fi, 4348 BTRFS_FILE_EXTENT_PREALLOC, 4349 &token); 4350 else 4351 btrfs_set_token_file_extent_type(leaf, fi, 4352 BTRFS_FILE_EXTENT_REG, 4353 &token); 4354 4355 block_len = max(em->block_len, em->orig_block_len); 4356 if (em->compress_type != BTRFS_COMPRESS_NONE) { 4357 btrfs_set_token_file_extent_disk_bytenr(leaf, fi, 4358 em->block_start, 4359 &token); 4360 btrfs_set_token_file_extent_disk_num_bytes(leaf, fi, block_len, 4361 &token); 4362 } else if (em->block_start < EXTENT_MAP_LAST_BYTE) { 4363 btrfs_set_token_file_extent_disk_bytenr(leaf, fi, 4364 em->block_start - 4365 extent_offset, &token); 4366 btrfs_set_token_file_extent_disk_num_bytes(leaf, fi, block_len, 4367 &token); 4368 } else { 4369 btrfs_set_token_file_extent_disk_bytenr(leaf, fi, 0, &token); 4370 btrfs_set_token_file_extent_disk_num_bytes(leaf, fi, 0, 4371 &token); 4372 } 4373 4374 btrfs_set_token_file_extent_offset(leaf, fi, extent_offset, &token); 4375 btrfs_set_token_file_extent_num_bytes(leaf, fi, em->len, &token); 4376 btrfs_set_token_file_extent_ram_bytes(leaf, fi, em->ram_bytes, &token); 4377 btrfs_set_token_file_extent_compression(leaf, fi, em->compress_type, 4378 &token); 4379 btrfs_set_token_file_extent_encryption(leaf, fi, 0, &token); 4380 btrfs_set_token_file_extent_other_encoding(leaf, fi, 0, &token); 4381 btrfs_mark_buffer_dirty(leaf); 4382 4383 btrfs_release_path(path); 4384 4385 return ret; 4386 } 4387 4388 /* 4389 * Log all prealloc extents beyond the inode's i_size to make sure we do not 4390 * lose them after doing a fast fsync and replaying the log. We scan the 4391 * subvolume's root instead of iterating the inode's extent map tree because 4392 * otherwise we can log incorrect extent items based on extent map conversion. 4393 * That can happen due to the fact that extent maps are merged when they 4394 * are not in the extent map tree's list of modified extents. 4395 */ 4396 static int btrfs_log_prealloc_extents(struct btrfs_trans_handle *trans, 4397 struct btrfs_inode *inode, 4398 struct btrfs_path *path) 4399 { 4400 struct btrfs_root *root = inode->root; 4401 struct btrfs_key key; 4402 const u64 i_size = i_size_read(&inode->vfs_inode); 4403 const u64 ino = btrfs_ino(inode); 4404 struct btrfs_path *dst_path = NULL; 4405 u64 last_extent = (u64)-1; 4406 int ins_nr = 0; 4407 int start_slot; 4408 int ret; 4409 4410 if (!(inode->flags & BTRFS_INODE_PREALLOC)) 4411 return 0; 4412 4413 key.objectid = ino; 4414 key.type = BTRFS_EXTENT_DATA_KEY; 4415 key.offset = i_size; 4416 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 4417 if (ret < 0) 4418 goto out; 4419 4420 while (true) { 4421 struct extent_buffer *leaf = path->nodes[0]; 4422 int slot = path->slots[0]; 4423 4424 if (slot >= btrfs_header_nritems(leaf)) { 4425 if (ins_nr > 0) { 4426 ret = copy_items(trans, inode, dst_path, path, 4427 &last_extent, start_slot, 4428 ins_nr, 1, 0); 4429 if (ret < 0) 4430 goto out; 4431 ins_nr = 0; 4432 } 4433 ret = btrfs_next_leaf(root, path); 4434 if (ret < 0) 4435 goto out; 4436 if (ret > 0) { 4437 ret = 0; 4438 break; 4439 } 4440 continue; 4441 } 4442 4443 btrfs_item_key_to_cpu(leaf, &key, slot); 4444 if (key.objectid > ino) 4445 break; 4446 if (WARN_ON_ONCE(key.objectid < ino) || 4447 key.type < BTRFS_EXTENT_DATA_KEY || 4448 key.offset < i_size) { 4449 path->slots[0]++; 4450 continue; 4451 } 4452 if (last_extent == (u64)-1) { 4453 last_extent = key.offset; 4454 /* 4455 * Avoid logging extent items logged in past fsync calls 4456 * and leading to duplicate keys in the log tree. 4457 */ 4458 do { 4459 ret = btrfs_truncate_inode_items(trans, 4460 root->log_root, 4461 &inode->vfs_inode, 4462 i_size, 4463 BTRFS_EXTENT_DATA_KEY); 4464 } while (ret == -EAGAIN); 4465 if (ret) 4466 goto out; 4467 } 4468 if (ins_nr == 0) 4469 start_slot = slot; 4470 ins_nr++; 4471 path->slots[0]++; 4472 if (!dst_path) { 4473 dst_path = btrfs_alloc_path(); 4474 if (!dst_path) { 4475 ret = -ENOMEM; 4476 goto out; 4477 } 4478 } 4479 } 4480 if (ins_nr > 0) { 4481 ret = copy_items(trans, inode, dst_path, path, &last_extent, 4482 start_slot, ins_nr, 1, 0); 4483 if (ret > 0) 4484 ret = 0; 4485 } 4486 out: 4487 btrfs_release_path(path); 4488 btrfs_free_path(dst_path); 4489 return ret; 4490 } 4491 4492 static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans, 4493 struct btrfs_root *root, 4494 struct btrfs_inode *inode, 4495 struct btrfs_path *path, 4496 struct btrfs_log_ctx *ctx, 4497 const u64 start, 4498 const u64 end) 4499 { 4500 struct extent_map *em, *n; 4501 struct list_head extents; 4502 struct extent_map_tree *tree = &inode->extent_tree; 4503 u64 test_gen; 4504 int ret = 0; 4505 int num = 0; 4506 4507 INIT_LIST_HEAD(&extents); 4508 4509 write_lock(&tree->lock); 4510 test_gen = root->fs_info->last_trans_committed; 4511 4512 list_for_each_entry_safe(em, n, &tree->modified_extents, list) { 4513 /* 4514 * Skip extents outside our logging range. It's important to do 4515 * it for correctness because if we don't ignore them, we may 4516 * log them before their ordered extent completes, and therefore 4517 * we could log them without logging their respective checksums 4518 * (the checksum items are added to the csum tree at the very 4519 * end of btrfs_finish_ordered_io()). Also leave such extents 4520 * outside of our range in the list, since we may have another 4521 * ranged fsync in the near future that needs them. If an extent 4522 * outside our range corresponds to a hole, log it to avoid 4523 * leaving gaps between extents (fsck will complain when we are 4524 * not using the NO_HOLES feature). 4525 */ 4526 if ((em->start > end || em->start + em->len <= start) && 4527 em->block_start != EXTENT_MAP_HOLE) 4528 continue; 4529 4530 list_del_init(&em->list); 4531 /* 4532 * Just an arbitrary number, this can be really CPU intensive 4533 * once we start getting a lot of extents, and really once we 4534 * have a bunch of extents we just want to commit since it will 4535 * be faster. 4536 */ 4537 if (++num > 32768) { 4538 list_del_init(&tree->modified_extents); 4539 ret = -EFBIG; 4540 goto process; 4541 } 4542 4543 if (em->generation <= test_gen) 4544 continue; 4545 4546 /* We log prealloc extents beyond eof later. */ 4547 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) && 4548 em->start >= i_size_read(&inode->vfs_inode)) 4549 continue; 4550 4551 /* Need a ref to keep it from getting evicted from cache */ 4552 refcount_inc(&em->refs); 4553 set_bit(EXTENT_FLAG_LOGGING, &em->flags); 4554 list_add_tail(&em->list, &extents); 4555 num++; 4556 } 4557 4558 list_sort(NULL, &extents, extent_cmp); 4559 process: 4560 while (!list_empty(&extents)) { 4561 em = list_entry(extents.next, struct extent_map, list); 4562 4563 list_del_init(&em->list); 4564 4565 /* 4566 * If we had an error we just need to delete everybody from our 4567 * private list. 4568 */ 4569 if (ret) { 4570 clear_em_logging(tree, em); 4571 free_extent_map(em); 4572 continue; 4573 } 4574 4575 write_unlock(&tree->lock); 4576 4577 ret = log_one_extent(trans, inode, root, em, path, ctx); 4578 write_lock(&tree->lock); 4579 clear_em_logging(tree, em); 4580 free_extent_map(em); 4581 } 4582 WARN_ON(!list_empty(&extents)); 4583 write_unlock(&tree->lock); 4584 4585 btrfs_release_path(path); 4586 if (!ret) 4587 ret = btrfs_log_prealloc_extents(trans, inode, path); 4588 4589 return ret; 4590 } 4591 4592 static int logged_inode_size(struct btrfs_root *log, struct btrfs_inode *inode, 4593 struct btrfs_path *path, u64 *size_ret) 4594 { 4595 struct btrfs_key key; 4596 int ret; 4597 4598 key.objectid = btrfs_ino(inode); 4599 key.type = BTRFS_INODE_ITEM_KEY; 4600 key.offset = 0; 4601 4602 ret = btrfs_search_slot(NULL, log, &key, path, 0, 0); 4603 if (ret < 0) { 4604 return ret; 4605 } else if (ret > 0) { 4606 *size_ret = 0; 4607 } else { 4608 struct btrfs_inode_item *item; 4609 4610 item = btrfs_item_ptr(path->nodes[0], path->slots[0], 4611 struct btrfs_inode_item); 4612 *size_ret = btrfs_inode_size(path->nodes[0], item); 4613 /* 4614 * If the in-memory inode's i_size is smaller then the inode 4615 * size stored in the btree, return the inode's i_size, so 4616 * that we get a correct inode size after replaying the log 4617 * when before a power failure we had a shrinking truncate 4618 * followed by addition of a new name (rename / new hard link). 4619 * Otherwise return the inode size from the btree, to avoid 4620 * data loss when replaying a log due to previously doing a 4621 * write that expands the inode's size and logging a new name 4622 * immediately after. 4623 */ 4624 if (*size_ret > inode->vfs_inode.i_size) 4625 *size_ret = inode->vfs_inode.i_size; 4626 } 4627 4628 btrfs_release_path(path); 4629 return 0; 4630 } 4631 4632 /* 4633 * At the moment we always log all xattrs. This is to figure out at log replay 4634 * time which xattrs must have their deletion replayed. If a xattr is missing 4635 * in the log tree and exists in the fs/subvol tree, we delete it. This is 4636 * because if a xattr is deleted, the inode is fsynced and a power failure 4637 * happens, causing the log to be replayed the next time the fs is mounted, 4638 * we want the xattr to not exist anymore (same behaviour as other filesystems 4639 * with a journal, ext3/4, xfs, f2fs, etc). 4640 */ 4641 static int btrfs_log_all_xattrs(struct btrfs_trans_handle *trans, 4642 struct btrfs_root *root, 4643 struct btrfs_inode *inode, 4644 struct btrfs_path *path, 4645 struct btrfs_path *dst_path) 4646 { 4647 int ret; 4648 struct btrfs_key key; 4649 const u64 ino = btrfs_ino(inode); 4650 int ins_nr = 0; 4651 int start_slot = 0; 4652 4653 key.objectid = ino; 4654 key.type = BTRFS_XATTR_ITEM_KEY; 4655 key.offset = 0; 4656 4657 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 4658 if (ret < 0) 4659 return ret; 4660 4661 while (true) { 4662 int slot = path->slots[0]; 4663 struct extent_buffer *leaf = path->nodes[0]; 4664 int nritems = btrfs_header_nritems(leaf); 4665 4666 if (slot >= nritems) { 4667 if (ins_nr > 0) { 4668 u64 last_extent = 0; 4669 4670 ret = copy_items(trans, inode, dst_path, path, 4671 &last_extent, start_slot, 4672 ins_nr, 1, 0); 4673 /* can't be 1, extent items aren't processed */ 4674 ASSERT(ret <= 0); 4675 if (ret < 0) 4676 return ret; 4677 ins_nr = 0; 4678 } 4679 ret = btrfs_next_leaf(root, path); 4680 if (ret < 0) 4681 return ret; 4682 else if (ret > 0) 4683 break; 4684 continue; 4685 } 4686 4687 btrfs_item_key_to_cpu(leaf, &key, slot); 4688 if (key.objectid != ino || key.type != BTRFS_XATTR_ITEM_KEY) 4689 break; 4690 4691 if (ins_nr == 0) 4692 start_slot = slot; 4693 ins_nr++; 4694 path->slots[0]++; 4695 cond_resched(); 4696 } 4697 if (ins_nr > 0) { 4698 u64 last_extent = 0; 4699 4700 ret = copy_items(trans, inode, dst_path, path, 4701 &last_extent, start_slot, 4702 ins_nr, 1, 0); 4703 /* can't be 1, extent items aren't processed */ 4704 ASSERT(ret <= 0); 4705 if (ret < 0) 4706 return ret; 4707 } 4708 4709 return 0; 4710 } 4711 4712 /* 4713 * If the no holes feature is enabled we need to make sure any hole between the 4714 * last extent and the i_size of our inode is explicitly marked in the log. This 4715 * is to make sure that doing something like: 4716 * 4717 * 1) create file with 128Kb of data 4718 * 2) truncate file to 64Kb 4719 * 3) truncate file to 256Kb 4720 * 4) fsync file 4721 * 5) <crash/power failure> 4722 * 6) mount fs and trigger log replay 4723 * 4724 * Will give us a file with a size of 256Kb, the first 64Kb of data match what 4725 * the file had in its first 64Kb of data at step 1 and the last 192Kb of the 4726 * file correspond to a hole. The presence of explicit holes in a log tree is 4727 * what guarantees that log replay will remove/adjust file extent items in the 4728 * fs/subvol tree. 4729 * 4730 * Here we do not need to care about holes between extents, that is already done 4731 * by copy_items(). We also only need to do this in the full sync path, where we 4732 * lookup for extents from the fs/subvol tree only. In the fast path case, we 4733 * lookup the list of modified extent maps and if any represents a hole, we 4734 * insert a corresponding extent representing a hole in the log tree. 4735 */ 4736 static int btrfs_log_trailing_hole(struct btrfs_trans_handle *trans, 4737 struct btrfs_root *root, 4738 struct btrfs_inode *inode, 4739 struct btrfs_path *path) 4740 { 4741 struct btrfs_fs_info *fs_info = root->fs_info; 4742 int ret; 4743 struct btrfs_key key; 4744 u64 hole_start; 4745 u64 hole_size; 4746 struct extent_buffer *leaf; 4747 struct btrfs_root *log = root->log_root; 4748 const u64 ino = btrfs_ino(inode); 4749 const u64 i_size = i_size_read(&inode->vfs_inode); 4750 4751 if (!btrfs_fs_incompat(fs_info, NO_HOLES)) 4752 return 0; 4753 4754 key.objectid = ino; 4755 key.type = BTRFS_EXTENT_DATA_KEY; 4756 key.offset = (u64)-1; 4757 4758 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 4759 ASSERT(ret != 0); 4760 if (ret < 0) 4761 return ret; 4762 4763 ASSERT(path->slots[0] > 0); 4764 path->slots[0]--; 4765 leaf = path->nodes[0]; 4766 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 4767 4768 if (key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY) { 4769 /* inode does not have any extents */ 4770 hole_start = 0; 4771 hole_size = i_size; 4772 } else { 4773 struct btrfs_file_extent_item *extent; 4774 u64 len; 4775 4776 /* 4777 * If there's an extent beyond i_size, an explicit hole was 4778 * already inserted by copy_items(). 4779 */ 4780 if (key.offset >= i_size) 4781 return 0; 4782 4783 extent = btrfs_item_ptr(leaf, path->slots[0], 4784 struct btrfs_file_extent_item); 4785 4786 if (btrfs_file_extent_type(leaf, extent) == 4787 BTRFS_FILE_EXTENT_INLINE) 4788 return 0; 4789 4790 len = btrfs_file_extent_num_bytes(leaf, extent); 4791 /* Last extent goes beyond i_size, no need to log a hole. */ 4792 if (key.offset + len > i_size) 4793 return 0; 4794 hole_start = key.offset + len; 4795 hole_size = i_size - hole_start; 4796 } 4797 btrfs_release_path(path); 4798 4799 /* Last extent ends at i_size. */ 4800 if (hole_size == 0) 4801 return 0; 4802 4803 hole_size = ALIGN(hole_size, fs_info->sectorsize); 4804 ret = btrfs_insert_file_extent(trans, log, ino, hole_start, 0, 0, 4805 hole_size, 0, hole_size, 0, 0, 0); 4806 return ret; 4807 } 4808 4809 /* 4810 * When we are logging a new inode X, check if it doesn't have a reference that 4811 * matches the reference from some other inode Y created in a past transaction 4812 * and that was renamed in the current transaction. If we don't do this, then at 4813 * log replay time we can lose inode Y (and all its files if it's a directory): 4814 * 4815 * mkdir /mnt/x 4816 * echo "hello world" > /mnt/x/foobar 4817 * sync 4818 * mv /mnt/x /mnt/y 4819 * mkdir /mnt/x # or touch /mnt/x 4820 * xfs_io -c fsync /mnt/x 4821 * <power fail> 4822 * mount fs, trigger log replay 4823 * 4824 * After the log replay procedure, we would lose the first directory and all its 4825 * files (file foobar). 4826 * For the case where inode Y is not a directory we simply end up losing it: 4827 * 4828 * echo "123" > /mnt/foo 4829 * sync 4830 * mv /mnt/foo /mnt/bar 4831 * echo "abc" > /mnt/foo 4832 * xfs_io -c fsync /mnt/foo 4833 * <power fail> 4834 * 4835 * We also need this for cases where a snapshot entry is replaced by some other 4836 * entry (file or directory) otherwise we end up with an unreplayable log due to 4837 * attempts to delete the snapshot entry (entry of type BTRFS_ROOT_ITEM_KEY) as 4838 * if it were a regular entry: 4839 * 4840 * mkdir /mnt/x 4841 * btrfs subvolume snapshot /mnt /mnt/x/snap 4842 * btrfs subvolume delete /mnt/x/snap 4843 * rmdir /mnt/x 4844 * mkdir /mnt/x 4845 * fsync /mnt/x or fsync some new file inside it 4846 * <power fail> 4847 * 4848 * The snapshot delete, rmdir of x, mkdir of a new x and the fsync all happen in 4849 * the same transaction. 4850 */ 4851 static int btrfs_check_ref_name_override(struct extent_buffer *eb, 4852 const int slot, 4853 const struct btrfs_key *key, 4854 struct btrfs_inode *inode, 4855 u64 *other_ino, u64 *other_parent) 4856 { 4857 int ret; 4858 struct btrfs_path *search_path; 4859 char *name = NULL; 4860 u32 name_len = 0; 4861 u32 item_size = btrfs_item_size_nr(eb, slot); 4862 u32 cur_offset = 0; 4863 unsigned long ptr = btrfs_item_ptr_offset(eb, slot); 4864 4865 search_path = btrfs_alloc_path(); 4866 if (!search_path) 4867 return -ENOMEM; 4868 search_path->search_commit_root = 1; 4869 search_path->skip_locking = 1; 4870 4871 while (cur_offset < item_size) { 4872 u64 parent; 4873 u32 this_name_len; 4874 u32 this_len; 4875 unsigned long name_ptr; 4876 struct btrfs_dir_item *di; 4877 4878 if (key->type == BTRFS_INODE_REF_KEY) { 4879 struct btrfs_inode_ref *iref; 4880 4881 iref = (struct btrfs_inode_ref *)(ptr + cur_offset); 4882 parent = key->offset; 4883 this_name_len = btrfs_inode_ref_name_len(eb, iref); 4884 name_ptr = (unsigned long)(iref + 1); 4885 this_len = sizeof(*iref) + this_name_len; 4886 } else { 4887 struct btrfs_inode_extref *extref; 4888 4889 extref = (struct btrfs_inode_extref *)(ptr + 4890 cur_offset); 4891 parent = btrfs_inode_extref_parent(eb, extref); 4892 this_name_len = btrfs_inode_extref_name_len(eb, extref); 4893 name_ptr = (unsigned long)&extref->name; 4894 this_len = sizeof(*extref) + this_name_len; 4895 } 4896 4897 if (this_name_len > name_len) { 4898 char *new_name; 4899 4900 new_name = krealloc(name, this_name_len, GFP_NOFS); 4901 if (!new_name) { 4902 ret = -ENOMEM; 4903 goto out; 4904 } 4905 name_len = this_name_len; 4906 name = new_name; 4907 } 4908 4909 read_extent_buffer(eb, name, name_ptr, this_name_len); 4910 di = btrfs_lookup_dir_item(NULL, inode->root, search_path, 4911 parent, name, this_name_len, 0); 4912 if (di && !IS_ERR(di)) { 4913 struct btrfs_key di_key; 4914 4915 btrfs_dir_item_key_to_cpu(search_path->nodes[0], 4916 di, &di_key); 4917 if (di_key.type == BTRFS_INODE_ITEM_KEY) { 4918 if (di_key.objectid != key->objectid) { 4919 ret = 1; 4920 *other_ino = di_key.objectid; 4921 *other_parent = parent; 4922 } else { 4923 ret = 0; 4924 } 4925 } else { 4926 ret = -EAGAIN; 4927 } 4928 goto out; 4929 } else if (IS_ERR(di)) { 4930 ret = PTR_ERR(di); 4931 goto out; 4932 } 4933 btrfs_release_path(search_path); 4934 4935 cur_offset += this_len; 4936 } 4937 ret = 0; 4938 out: 4939 btrfs_free_path(search_path); 4940 kfree(name); 4941 return ret; 4942 } 4943 4944 struct btrfs_ino_list { 4945 u64 ino; 4946 u64 parent; 4947 struct list_head list; 4948 }; 4949 4950 static int log_conflicting_inodes(struct btrfs_trans_handle *trans, 4951 struct btrfs_root *root, 4952 struct btrfs_path *path, 4953 struct btrfs_log_ctx *ctx, 4954 u64 ino, u64 parent) 4955 { 4956 struct btrfs_ino_list *ino_elem; 4957 LIST_HEAD(inode_list); 4958 int ret = 0; 4959 4960 ino_elem = kmalloc(sizeof(*ino_elem), GFP_NOFS); 4961 if (!ino_elem) 4962 return -ENOMEM; 4963 ino_elem->ino = ino; 4964 ino_elem->parent = parent; 4965 list_add_tail(&ino_elem->list, &inode_list); 4966 4967 while (!list_empty(&inode_list)) { 4968 struct btrfs_fs_info *fs_info = root->fs_info; 4969 struct btrfs_key key; 4970 struct inode *inode; 4971 4972 ino_elem = list_first_entry(&inode_list, struct btrfs_ino_list, 4973 list); 4974 ino = ino_elem->ino; 4975 parent = ino_elem->parent; 4976 list_del(&ino_elem->list); 4977 kfree(ino_elem); 4978 if (ret) 4979 continue; 4980 4981 btrfs_release_path(path); 4982 4983 key.objectid = ino; 4984 key.type = BTRFS_INODE_ITEM_KEY; 4985 key.offset = 0; 4986 inode = btrfs_iget(fs_info->sb, &key, root, NULL); 4987 /* 4988 * If the other inode that had a conflicting dir entry was 4989 * deleted in the current transaction, we need to log its parent 4990 * directory. 4991 */ 4992 if (IS_ERR(inode)) { 4993 ret = PTR_ERR(inode); 4994 if (ret == -ENOENT) { 4995 key.objectid = parent; 4996 inode = btrfs_iget(fs_info->sb, &key, root, 4997 NULL); 4998 if (IS_ERR(inode)) { 4999 ret = PTR_ERR(inode); 5000 } else { 5001 ret = btrfs_log_inode(trans, root, 5002 BTRFS_I(inode), 5003 LOG_OTHER_INODE_ALL, 5004 0, LLONG_MAX, ctx); 5005 btrfs_add_delayed_iput(inode); 5006 } 5007 } 5008 continue; 5009 } 5010 /* 5011 * We are safe logging the other inode without acquiring its 5012 * lock as long as we log with the LOG_INODE_EXISTS mode. We 5013 * are safe against concurrent renames of the other inode as 5014 * well because during a rename we pin the log and update the 5015 * log with the new name before we unpin it. 5016 */ 5017 ret = btrfs_log_inode(trans, root, BTRFS_I(inode), 5018 LOG_OTHER_INODE, 0, LLONG_MAX, ctx); 5019 if (ret) { 5020 btrfs_add_delayed_iput(inode); 5021 continue; 5022 } 5023 5024 key.objectid = ino; 5025 key.type = BTRFS_INODE_REF_KEY; 5026 key.offset = 0; 5027 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 5028 if (ret < 0) { 5029 btrfs_add_delayed_iput(inode); 5030 continue; 5031 } 5032 5033 while (true) { 5034 struct extent_buffer *leaf = path->nodes[0]; 5035 int slot = path->slots[0]; 5036 u64 other_ino = 0; 5037 u64 other_parent = 0; 5038 5039 if (slot >= btrfs_header_nritems(leaf)) { 5040 ret = btrfs_next_leaf(root, path); 5041 if (ret < 0) { 5042 break; 5043 } else if (ret > 0) { 5044 ret = 0; 5045 break; 5046 } 5047 continue; 5048 } 5049 5050 btrfs_item_key_to_cpu(leaf, &key, slot); 5051 if (key.objectid != ino || 5052 (key.type != BTRFS_INODE_REF_KEY && 5053 key.type != BTRFS_INODE_EXTREF_KEY)) { 5054 ret = 0; 5055 break; 5056 } 5057 5058 ret = btrfs_check_ref_name_override(leaf, slot, &key, 5059 BTRFS_I(inode), &other_ino, 5060 &other_parent); 5061 if (ret < 0) 5062 break; 5063 if (ret > 0) { 5064 ino_elem = kmalloc(sizeof(*ino_elem), GFP_NOFS); 5065 if (!ino_elem) { 5066 ret = -ENOMEM; 5067 break; 5068 } 5069 ino_elem->ino = other_ino; 5070 ino_elem->parent = other_parent; 5071 list_add_tail(&ino_elem->list, &inode_list); 5072 ret = 0; 5073 } 5074 path->slots[0]++; 5075 } 5076 btrfs_add_delayed_iput(inode); 5077 } 5078 5079 return ret; 5080 } 5081 5082 /* log a single inode in the tree log. 5083 * At least one parent directory for this inode must exist in the tree 5084 * or be logged already. 5085 * 5086 * Any items from this inode changed by the current transaction are copied 5087 * to the log tree. An extra reference is taken on any extents in this 5088 * file, allowing us to avoid a whole pile of corner cases around logging 5089 * blocks that have been removed from the tree. 5090 * 5091 * See LOG_INODE_ALL and related defines for a description of what inode_only 5092 * does. 5093 * 5094 * This handles both files and directories. 5095 */ 5096 static int btrfs_log_inode(struct btrfs_trans_handle *trans, 5097 struct btrfs_root *root, struct btrfs_inode *inode, 5098 int inode_only, 5099 const loff_t start, 5100 const loff_t end, 5101 struct btrfs_log_ctx *ctx) 5102 { 5103 struct btrfs_fs_info *fs_info = root->fs_info; 5104 struct btrfs_path *path; 5105 struct btrfs_path *dst_path; 5106 struct btrfs_key min_key; 5107 struct btrfs_key max_key; 5108 struct btrfs_root *log = root->log_root; 5109 u64 last_extent = 0; 5110 int err = 0; 5111 int ret; 5112 int nritems; 5113 int ins_start_slot = 0; 5114 int ins_nr; 5115 bool fast_search = false; 5116 u64 ino = btrfs_ino(inode); 5117 struct extent_map_tree *em_tree = &inode->extent_tree; 5118 u64 logged_isize = 0; 5119 bool need_log_inode_item = true; 5120 bool xattrs_logged = false; 5121 bool recursive_logging = false; 5122 5123 path = btrfs_alloc_path(); 5124 if (!path) 5125 return -ENOMEM; 5126 dst_path = btrfs_alloc_path(); 5127 if (!dst_path) { 5128 btrfs_free_path(path); 5129 return -ENOMEM; 5130 } 5131 5132 min_key.objectid = ino; 5133 min_key.type = BTRFS_INODE_ITEM_KEY; 5134 min_key.offset = 0; 5135 5136 max_key.objectid = ino; 5137 5138 5139 /* today the code can only do partial logging of directories */ 5140 if (S_ISDIR(inode->vfs_inode.i_mode) || 5141 (!test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, 5142 &inode->runtime_flags) && 5143 inode_only >= LOG_INODE_EXISTS)) 5144 max_key.type = BTRFS_XATTR_ITEM_KEY; 5145 else 5146 max_key.type = (u8)-1; 5147 max_key.offset = (u64)-1; 5148 5149 /* 5150 * Only run delayed items if we are a dir or a new file. 5151 * Otherwise commit the delayed inode only, which is needed in 5152 * order for the log replay code to mark inodes for link count 5153 * fixup (create temporary BTRFS_TREE_LOG_FIXUP_OBJECTID items). 5154 */ 5155 if (S_ISDIR(inode->vfs_inode.i_mode) || 5156 inode->generation > fs_info->last_trans_committed) 5157 ret = btrfs_commit_inode_delayed_items(trans, inode); 5158 else 5159 ret = btrfs_commit_inode_delayed_inode(inode); 5160 5161 if (ret) { 5162 btrfs_free_path(path); 5163 btrfs_free_path(dst_path); 5164 return ret; 5165 } 5166 5167 if (inode_only == LOG_OTHER_INODE || inode_only == LOG_OTHER_INODE_ALL) { 5168 recursive_logging = true; 5169 if (inode_only == LOG_OTHER_INODE) 5170 inode_only = LOG_INODE_EXISTS; 5171 else 5172 inode_only = LOG_INODE_ALL; 5173 mutex_lock_nested(&inode->log_mutex, SINGLE_DEPTH_NESTING); 5174 } else { 5175 mutex_lock(&inode->log_mutex); 5176 } 5177 5178 /* 5179 * a brute force approach to making sure we get the most uptodate 5180 * copies of everything. 5181 */ 5182 if (S_ISDIR(inode->vfs_inode.i_mode)) { 5183 int max_key_type = BTRFS_DIR_LOG_INDEX_KEY; 5184 5185 if (inode_only == LOG_INODE_EXISTS) 5186 max_key_type = BTRFS_XATTR_ITEM_KEY; 5187 ret = drop_objectid_items(trans, log, path, ino, max_key_type); 5188 } else { 5189 if (inode_only == LOG_INODE_EXISTS) { 5190 /* 5191 * Make sure the new inode item we write to the log has 5192 * the same isize as the current one (if it exists). 5193 * This is necessary to prevent data loss after log 5194 * replay, and also to prevent doing a wrong expanding 5195 * truncate - for e.g. create file, write 4K into offset 5196 * 0, fsync, write 4K into offset 4096, add hard link, 5197 * fsync some other file (to sync log), power fail - if 5198 * we use the inode's current i_size, after log replay 5199 * we get a 8Kb file, with the last 4Kb extent as a hole 5200 * (zeroes), as if an expanding truncate happened, 5201 * instead of getting a file of 4Kb only. 5202 */ 5203 err = logged_inode_size(log, inode, path, &logged_isize); 5204 if (err) 5205 goto out_unlock; 5206 } 5207 if (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, 5208 &inode->runtime_flags)) { 5209 if (inode_only == LOG_INODE_EXISTS) { 5210 max_key.type = BTRFS_XATTR_ITEM_KEY; 5211 ret = drop_objectid_items(trans, log, path, ino, 5212 max_key.type); 5213 } else { 5214 clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC, 5215 &inode->runtime_flags); 5216 clear_bit(BTRFS_INODE_COPY_EVERYTHING, 5217 &inode->runtime_flags); 5218 while(1) { 5219 ret = btrfs_truncate_inode_items(trans, 5220 log, &inode->vfs_inode, 0, 0); 5221 if (ret != -EAGAIN) 5222 break; 5223 } 5224 } 5225 } else if (test_and_clear_bit(BTRFS_INODE_COPY_EVERYTHING, 5226 &inode->runtime_flags) || 5227 inode_only == LOG_INODE_EXISTS) { 5228 if (inode_only == LOG_INODE_ALL) 5229 fast_search = true; 5230 max_key.type = BTRFS_XATTR_ITEM_KEY; 5231 ret = drop_objectid_items(trans, log, path, ino, 5232 max_key.type); 5233 } else { 5234 if (inode_only == LOG_INODE_ALL) 5235 fast_search = true; 5236 goto log_extents; 5237 } 5238 5239 } 5240 if (ret) { 5241 err = ret; 5242 goto out_unlock; 5243 } 5244 5245 while (1) { 5246 ins_nr = 0; 5247 ret = btrfs_search_forward(root, &min_key, 5248 path, trans->transid); 5249 if (ret < 0) { 5250 err = ret; 5251 goto out_unlock; 5252 } 5253 if (ret != 0) 5254 break; 5255 again: 5256 /* note, ins_nr might be > 0 here, cleanup outside the loop */ 5257 if (min_key.objectid != ino) 5258 break; 5259 if (min_key.type > max_key.type) 5260 break; 5261 5262 if (min_key.type == BTRFS_INODE_ITEM_KEY) 5263 need_log_inode_item = false; 5264 5265 if ((min_key.type == BTRFS_INODE_REF_KEY || 5266 min_key.type == BTRFS_INODE_EXTREF_KEY) && 5267 inode->generation == trans->transid && 5268 !recursive_logging) { 5269 u64 other_ino = 0; 5270 u64 other_parent = 0; 5271 5272 ret = btrfs_check_ref_name_override(path->nodes[0], 5273 path->slots[0], &min_key, inode, 5274 &other_ino, &other_parent); 5275 if (ret < 0) { 5276 err = ret; 5277 goto out_unlock; 5278 } else if (ret > 0 && ctx && 5279 other_ino != btrfs_ino(BTRFS_I(ctx->inode))) { 5280 if (ins_nr > 0) { 5281 ins_nr++; 5282 } else { 5283 ins_nr = 1; 5284 ins_start_slot = path->slots[0]; 5285 } 5286 ret = copy_items(trans, inode, dst_path, path, 5287 &last_extent, ins_start_slot, 5288 ins_nr, inode_only, 5289 logged_isize); 5290 if (ret < 0) { 5291 err = ret; 5292 goto out_unlock; 5293 } 5294 ins_nr = 0; 5295 5296 err = log_conflicting_inodes(trans, root, path, 5297 ctx, other_ino, other_parent); 5298 if (err) 5299 goto out_unlock; 5300 btrfs_release_path(path); 5301 goto next_key; 5302 } 5303 } 5304 5305 /* Skip xattrs, we log them later with btrfs_log_all_xattrs() */ 5306 if (min_key.type == BTRFS_XATTR_ITEM_KEY) { 5307 if (ins_nr == 0) 5308 goto next_slot; 5309 ret = copy_items(trans, inode, dst_path, path, 5310 &last_extent, ins_start_slot, 5311 ins_nr, inode_only, logged_isize); 5312 if (ret < 0) { 5313 err = ret; 5314 goto out_unlock; 5315 } 5316 ins_nr = 0; 5317 if (ret) { 5318 btrfs_release_path(path); 5319 continue; 5320 } 5321 goto next_slot; 5322 } 5323 5324 if (ins_nr && ins_start_slot + ins_nr == path->slots[0]) { 5325 ins_nr++; 5326 goto next_slot; 5327 } else if (!ins_nr) { 5328 ins_start_slot = path->slots[0]; 5329 ins_nr = 1; 5330 goto next_slot; 5331 } 5332 5333 ret = copy_items(trans, inode, dst_path, path, &last_extent, 5334 ins_start_slot, ins_nr, inode_only, 5335 logged_isize); 5336 if (ret < 0) { 5337 err = ret; 5338 goto out_unlock; 5339 } 5340 if (ret) { 5341 ins_nr = 0; 5342 btrfs_release_path(path); 5343 continue; 5344 } 5345 ins_nr = 1; 5346 ins_start_slot = path->slots[0]; 5347 next_slot: 5348 5349 nritems = btrfs_header_nritems(path->nodes[0]); 5350 path->slots[0]++; 5351 if (path->slots[0] < nritems) { 5352 btrfs_item_key_to_cpu(path->nodes[0], &min_key, 5353 path->slots[0]); 5354 goto again; 5355 } 5356 if (ins_nr) { 5357 ret = copy_items(trans, inode, dst_path, path, 5358 &last_extent, ins_start_slot, 5359 ins_nr, inode_only, logged_isize); 5360 if (ret < 0) { 5361 err = ret; 5362 goto out_unlock; 5363 } 5364 ret = 0; 5365 ins_nr = 0; 5366 } 5367 btrfs_release_path(path); 5368 next_key: 5369 if (min_key.offset < (u64)-1) { 5370 min_key.offset++; 5371 } else if (min_key.type < max_key.type) { 5372 min_key.type++; 5373 min_key.offset = 0; 5374 } else { 5375 break; 5376 } 5377 } 5378 if (ins_nr) { 5379 ret = copy_items(trans, inode, dst_path, path, &last_extent, 5380 ins_start_slot, ins_nr, inode_only, 5381 logged_isize); 5382 if (ret < 0) { 5383 err = ret; 5384 goto out_unlock; 5385 } 5386 ret = 0; 5387 ins_nr = 0; 5388 } 5389 5390 btrfs_release_path(path); 5391 btrfs_release_path(dst_path); 5392 err = btrfs_log_all_xattrs(trans, root, inode, path, dst_path); 5393 if (err) 5394 goto out_unlock; 5395 xattrs_logged = true; 5396 if (max_key.type >= BTRFS_EXTENT_DATA_KEY && !fast_search) { 5397 btrfs_release_path(path); 5398 btrfs_release_path(dst_path); 5399 err = btrfs_log_trailing_hole(trans, root, inode, path); 5400 if (err) 5401 goto out_unlock; 5402 } 5403 log_extents: 5404 btrfs_release_path(path); 5405 btrfs_release_path(dst_path); 5406 if (need_log_inode_item) { 5407 err = log_inode_item(trans, log, dst_path, inode); 5408 if (!err && !xattrs_logged) { 5409 err = btrfs_log_all_xattrs(trans, root, inode, path, 5410 dst_path); 5411 btrfs_release_path(path); 5412 } 5413 if (err) 5414 goto out_unlock; 5415 } 5416 if (fast_search) { 5417 ret = btrfs_log_changed_extents(trans, root, inode, dst_path, 5418 ctx, start, end); 5419 if (ret) { 5420 err = ret; 5421 goto out_unlock; 5422 } 5423 } else if (inode_only == LOG_INODE_ALL) { 5424 struct extent_map *em, *n; 5425 5426 write_lock(&em_tree->lock); 5427 /* 5428 * We can't just remove every em if we're called for a ranged 5429 * fsync - that is, one that doesn't cover the whole possible 5430 * file range (0 to LLONG_MAX). This is because we can have 5431 * em's that fall outside the range we're logging and therefore 5432 * their ordered operations haven't completed yet 5433 * (btrfs_finish_ordered_io() not invoked yet). This means we 5434 * didn't get their respective file extent item in the fs/subvol 5435 * tree yet, and need to let the next fast fsync (one which 5436 * consults the list of modified extent maps) find the em so 5437 * that it logs a matching file extent item and waits for the 5438 * respective ordered operation to complete (if it's still 5439 * running). 5440 * 5441 * Removing every em outside the range we're logging would make 5442 * the next fast fsync not log their matching file extent items, 5443 * therefore making us lose data after a log replay. 5444 */ 5445 list_for_each_entry_safe(em, n, &em_tree->modified_extents, 5446 list) { 5447 const u64 mod_end = em->mod_start + em->mod_len - 1; 5448 5449 if (em->mod_start >= start && mod_end <= end) 5450 list_del_init(&em->list); 5451 } 5452 write_unlock(&em_tree->lock); 5453 } 5454 5455 if (inode_only == LOG_INODE_ALL && S_ISDIR(inode->vfs_inode.i_mode)) { 5456 ret = log_directory_changes(trans, root, inode, path, dst_path, 5457 ctx); 5458 if (ret) { 5459 err = ret; 5460 goto out_unlock; 5461 } 5462 } 5463 5464 /* 5465 * Don't update last_log_commit if we logged that an inode exists after 5466 * it was loaded to memory (full_sync bit set). 5467 * This is to prevent data loss when we do a write to the inode, then 5468 * the inode gets evicted after all delalloc was flushed, then we log 5469 * it exists (due to a rename for example) and then fsync it. This last 5470 * fsync would do nothing (not logging the extents previously written). 5471 */ 5472 spin_lock(&inode->lock); 5473 inode->logged_trans = trans->transid; 5474 if (inode_only != LOG_INODE_EXISTS || 5475 !test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags)) 5476 inode->last_log_commit = inode->last_sub_trans; 5477 spin_unlock(&inode->lock); 5478 out_unlock: 5479 mutex_unlock(&inode->log_mutex); 5480 5481 btrfs_free_path(path); 5482 btrfs_free_path(dst_path); 5483 return err; 5484 } 5485 5486 /* 5487 * Check if we must fallback to a transaction commit when logging an inode. 5488 * This must be called after logging the inode and is used only in the context 5489 * when fsyncing an inode requires the need to log some other inode - in which 5490 * case we can't lock the i_mutex of each other inode we need to log as that 5491 * can lead to deadlocks with concurrent fsync against other inodes (as we can 5492 * log inodes up or down in the hierarchy) or rename operations for example. So 5493 * we take the log_mutex of the inode after we have logged it and then check for 5494 * its last_unlink_trans value - this is safe because any task setting 5495 * last_unlink_trans must take the log_mutex and it must do this before it does 5496 * the actual unlink operation, so if we do this check before a concurrent task 5497 * sets last_unlink_trans it means we've logged a consistent version/state of 5498 * all the inode items, otherwise we are not sure and must do a transaction 5499 * commit (the concurrent task might have only updated last_unlink_trans before 5500 * we logged the inode or it might have also done the unlink). 5501 */ 5502 static bool btrfs_must_commit_transaction(struct btrfs_trans_handle *trans, 5503 struct btrfs_inode *inode) 5504 { 5505 struct btrfs_fs_info *fs_info = inode->root->fs_info; 5506 bool ret = false; 5507 5508 mutex_lock(&inode->log_mutex); 5509 if (inode->last_unlink_trans > fs_info->last_trans_committed) { 5510 /* 5511 * Make sure any commits to the log are forced to be full 5512 * commits. 5513 */ 5514 btrfs_set_log_full_commit(trans); 5515 ret = true; 5516 } 5517 mutex_unlock(&inode->log_mutex); 5518 5519 return ret; 5520 } 5521 5522 /* 5523 * follow the dentry parent pointers up the chain and see if any 5524 * of the directories in it require a full commit before they can 5525 * be logged. Returns zero if nothing special needs to be done or 1 if 5526 * a full commit is required. 5527 */ 5528 static noinline int check_parent_dirs_for_sync(struct btrfs_trans_handle *trans, 5529 struct btrfs_inode *inode, 5530 struct dentry *parent, 5531 struct super_block *sb, 5532 u64 last_committed) 5533 { 5534 int ret = 0; 5535 struct dentry *old_parent = NULL; 5536 5537 /* 5538 * for regular files, if its inode is already on disk, we don't 5539 * have to worry about the parents at all. This is because 5540 * we can use the last_unlink_trans field to record renames 5541 * and other fun in this file. 5542 */ 5543 if (S_ISREG(inode->vfs_inode.i_mode) && 5544 inode->generation <= last_committed && 5545 inode->last_unlink_trans <= last_committed) 5546 goto out; 5547 5548 if (!S_ISDIR(inode->vfs_inode.i_mode)) { 5549 if (!parent || d_really_is_negative(parent) || sb != parent->d_sb) 5550 goto out; 5551 inode = BTRFS_I(d_inode(parent)); 5552 } 5553 5554 while (1) { 5555 if (btrfs_must_commit_transaction(trans, inode)) { 5556 ret = 1; 5557 break; 5558 } 5559 5560 if (!parent || d_really_is_negative(parent) || sb != parent->d_sb) 5561 break; 5562 5563 if (IS_ROOT(parent)) { 5564 inode = BTRFS_I(d_inode(parent)); 5565 if (btrfs_must_commit_transaction(trans, inode)) 5566 ret = 1; 5567 break; 5568 } 5569 5570 parent = dget_parent(parent); 5571 dput(old_parent); 5572 old_parent = parent; 5573 inode = BTRFS_I(d_inode(parent)); 5574 5575 } 5576 dput(old_parent); 5577 out: 5578 return ret; 5579 } 5580 5581 struct btrfs_dir_list { 5582 u64 ino; 5583 struct list_head list; 5584 }; 5585 5586 /* 5587 * Log the inodes of the new dentries of a directory. See log_dir_items() for 5588 * details about the why it is needed. 5589 * This is a recursive operation - if an existing dentry corresponds to a 5590 * directory, that directory's new entries are logged too (same behaviour as 5591 * ext3/4, xfs, f2fs, reiserfs, nilfs2). Note that when logging the inodes 5592 * the dentries point to we do not lock their i_mutex, otherwise lockdep 5593 * complains about the following circular lock dependency / possible deadlock: 5594 * 5595 * CPU0 CPU1 5596 * ---- ---- 5597 * lock(&type->i_mutex_dir_key#3/2); 5598 * lock(sb_internal#2); 5599 * lock(&type->i_mutex_dir_key#3/2); 5600 * lock(&sb->s_type->i_mutex_key#14); 5601 * 5602 * Where sb_internal is the lock (a counter that works as a lock) acquired by 5603 * sb_start_intwrite() in btrfs_start_transaction(). 5604 * Not locking i_mutex of the inodes is still safe because: 5605 * 5606 * 1) For regular files we log with a mode of LOG_INODE_EXISTS. It's possible 5607 * that while logging the inode new references (names) are added or removed 5608 * from the inode, leaving the logged inode item with a link count that does 5609 * not match the number of logged inode reference items. This is fine because 5610 * at log replay time we compute the real number of links and correct the 5611 * link count in the inode item (see replay_one_buffer() and 5612 * link_to_fixup_dir()); 5613 * 5614 * 2) For directories we log with a mode of LOG_INODE_ALL. It's possible that 5615 * while logging the inode's items new items with keys BTRFS_DIR_ITEM_KEY and 5616 * BTRFS_DIR_INDEX_KEY are added to fs/subvol tree and the logged inode item 5617 * has a size that doesn't match the sum of the lengths of all the logged 5618 * names. This does not result in a problem because if a dir_item key is 5619 * logged but its matching dir_index key is not logged, at log replay time we 5620 * don't use it to replay the respective name (see replay_one_name()). On the 5621 * other hand if only the dir_index key ends up being logged, the respective 5622 * name is added to the fs/subvol tree with both the dir_item and dir_index 5623 * keys created (see replay_one_name()). 5624 * The directory's inode item with a wrong i_size is not a problem as well, 5625 * since we don't use it at log replay time to set the i_size in the inode 5626 * item of the fs/subvol tree (see overwrite_item()). 5627 */ 5628 static int log_new_dir_dentries(struct btrfs_trans_handle *trans, 5629 struct btrfs_root *root, 5630 struct btrfs_inode *start_inode, 5631 struct btrfs_log_ctx *ctx) 5632 { 5633 struct btrfs_fs_info *fs_info = root->fs_info; 5634 struct btrfs_root *log = root->log_root; 5635 struct btrfs_path *path; 5636 LIST_HEAD(dir_list); 5637 struct btrfs_dir_list *dir_elem; 5638 int ret = 0; 5639 5640 path = btrfs_alloc_path(); 5641 if (!path) 5642 return -ENOMEM; 5643 5644 dir_elem = kmalloc(sizeof(*dir_elem), GFP_NOFS); 5645 if (!dir_elem) { 5646 btrfs_free_path(path); 5647 return -ENOMEM; 5648 } 5649 dir_elem->ino = btrfs_ino(start_inode); 5650 list_add_tail(&dir_elem->list, &dir_list); 5651 5652 while (!list_empty(&dir_list)) { 5653 struct extent_buffer *leaf; 5654 struct btrfs_key min_key; 5655 int nritems; 5656 int i; 5657 5658 dir_elem = list_first_entry(&dir_list, struct btrfs_dir_list, 5659 list); 5660 if (ret) 5661 goto next_dir_inode; 5662 5663 min_key.objectid = dir_elem->ino; 5664 min_key.type = BTRFS_DIR_ITEM_KEY; 5665 min_key.offset = 0; 5666 again: 5667 btrfs_release_path(path); 5668 ret = btrfs_search_forward(log, &min_key, path, trans->transid); 5669 if (ret < 0) { 5670 goto next_dir_inode; 5671 } else if (ret > 0) { 5672 ret = 0; 5673 goto next_dir_inode; 5674 } 5675 5676 process_leaf: 5677 leaf = path->nodes[0]; 5678 nritems = btrfs_header_nritems(leaf); 5679 for (i = path->slots[0]; i < nritems; i++) { 5680 struct btrfs_dir_item *di; 5681 struct btrfs_key di_key; 5682 struct inode *di_inode; 5683 struct btrfs_dir_list *new_dir_elem; 5684 int log_mode = LOG_INODE_EXISTS; 5685 int type; 5686 5687 btrfs_item_key_to_cpu(leaf, &min_key, i); 5688 if (min_key.objectid != dir_elem->ino || 5689 min_key.type != BTRFS_DIR_ITEM_KEY) 5690 goto next_dir_inode; 5691 5692 di = btrfs_item_ptr(leaf, i, struct btrfs_dir_item); 5693 type = btrfs_dir_type(leaf, di); 5694 if (btrfs_dir_transid(leaf, di) < trans->transid && 5695 type != BTRFS_FT_DIR) 5696 continue; 5697 btrfs_dir_item_key_to_cpu(leaf, di, &di_key); 5698 if (di_key.type == BTRFS_ROOT_ITEM_KEY) 5699 continue; 5700 5701 btrfs_release_path(path); 5702 di_inode = btrfs_iget(fs_info->sb, &di_key, root, NULL); 5703 if (IS_ERR(di_inode)) { 5704 ret = PTR_ERR(di_inode); 5705 goto next_dir_inode; 5706 } 5707 5708 if (btrfs_inode_in_log(BTRFS_I(di_inode), trans->transid)) { 5709 btrfs_add_delayed_iput(di_inode); 5710 break; 5711 } 5712 5713 ctx->log_new_dentries = false; 5714 if (type == BTRFS_FT_DIR || type == BTRFS_FT_SYMLINK) 5715 log_mode = LOG_INODE_ALL; 5716 ret = btrfs_log_inode(trans, root, BTRFS_I(di_inode), 5717 log_mode, 0, LLONG_MAX, ctx); 5718 if (!ret && 5719 btrfs_must_commit_transaction(trans, BTRFS_I(di_inode))) 5720 ret = 1; 5721 btrfs_add_delayed_iput(di_inode); 5722 if (ret) 5723 goto next_dir_inode; 5724 if (ctx->log_new_dentries) { 5725 new_dir_elem = kmalloc(sizeof(*new_dir_elem), 5726 GFP_NOFS); 5727 if (!new_dir_elem) { 5728 ret = -ENOMEM; 5729 goto next_dir_inode; 5730 } 5731 new_dir_elem->ino = di_key.objectid; 5732 list_add_tail(&new_dir_elem->list, &dir_list); 5733 } 5734 break; 5735 } 5736 if (i == nritems) { 5737 ret = btrfs_next_leaf(log, path); 5738 if (ret < 0) { 5739 goto next_dir_inode; 5740 } else if (ret > 0) { 5741 ret = 0; 5742 goto next_dir_inode; 5743 } 5744 goto process_leaf; 5745 } 5746 if (min_key.offset < (u64)-1) { 5747 min_key.offset++; 5748 goto again; 5749 } 5750 next_dir_inode: 5751 list_del(&dir_elem->list); 5752 kfree(dir_elem); 5753 } 5754 5755 btrfs_free_path(path); 5756 return ret; 5757 } 5758 5759 static int btrfs_log_all_parents(struct btrfs_trans_handle *trans, 5760 struct btrfs_inode *inode, 5761 struct btrfs_log_ctx *ctx) 5762 { 5763 struct btrfs_fs_info *fs_info = trans->fs_info; 5764 int ret; 5765 struct btrfs_path *path; 5766 struct btrfs_key key; 5767 struct btrfs_root *root = inode->root; 5768 const u64 ino = btrfs_ino(inode); 5769 5770 path = btrfs_alloc_path(); 5771 if (!path) 5772 return -ENOMEM; 5773 path->skip_locking = 1; 5774 path->search_commit_root = 1; 5775 5776 key.objectid = ino; 5777 key.type = BTRFS_INODE_REF_KEY; 5778 key.offset = 0; 5779 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 5780 if (ret < 0) 5781 goto out; 5782 5783 while (true) { 5784 struct extent_buffer *leaf = path->nodes[0]; 5785 int slot = path->slots[0]; 5786 u32 cur_offset = 0; 5787 u32 item_size; 5788 unsigned long ptr; 5789 5790 if (slot >= btrfs_header_nritems(leaf)) { 5791 ret = btrfs_next_leaf(root, path); 5792 if (ret < 0) 5793 goto out; 5794 else if (ret > 0) 5795 break; 5796 continue; 5797 } 5798 5799 btrfs_item_key_to_cpu(leaf, &key, slot); 5800 /* BTRFS_INODE_EXTREF_KEY is BTRFS_INODE_REF_KEY + 1 */ 5801 if (key.objectid != ino || key.type > BTRFS_INODE_EXTREF_KEY) 5802 break; 5803 5804 item_size = btrfs_item_size_nr(leaf, slot); 5805 ptr = btrfs_item_ptr_offset(leaf, slot); 5806 while (cur_offset < item_size) { 5807 struct btrfs_key inode_key; 5808 struct inode *dir_inode; 5809 5810 inode_key.type = BTRFS_INODE_ITEM_KEY; 5811 inode_key.offset = 0; 5812 5813 if (key.type == BTRFS_INODE_EXTREF_KEY) { 5814 struct btrfs_inode_extref *extref; 5815 5816 extref = (struct btrfs_inode_extref *) 5817 (ptr + cur_offset); 5818 inode_key.objectid = btrfs_inode_extref_parent( 5819 leaf, extref); 5820 cur_offset += sizeof(*extref); 5821 cur_offset += btrfs_inode_extref_name_len(leaf, 5822 extref); 5823 } else { 5824 inode_key.objectid = key.offset; 5825 cur_offset = item_size; 5826 } 5827 5828 dir_inode = btrfs_iget(fs_info->sb, &inode_key, 5829 root, NULL); 5830 /* 5831 * If the parent inode was deleted, return an error to 5832 * fallback to a transaction commit. This is to prevent 5833 * getting an inode that was moved from one parent A to 5834 * a parent B, got its former parent A deleted and then 5835 * it got fsync'ed, from existing at both parents after 5836 * a log replay (and the old parent still existing). 5837 * Example: 5838 * 5839 * mkdir /mnt/A 5840 * mkdir /mnt/B 5841 * touch /mnt/B/bar 5842 * sync 5843 * mv /mnt/B/bar /mnt/A/bar 5844 * mv -T /mnt/A /mnt/B 5845 * fsync /mnt/B/bar 5846 * <power fail> 5847 * 5848 * If we ignore the old parent B which got deleted, 5849 * after a log replay we would have file bar linked 5850 * at both parents and the old parent B would still 5851 * exist. 5852 */ 5853 if (IS_ERR(dir_inode)) { 5854 ret = PTR_ERR(dir_inode); 5855 goto out; 5856 } 5857 5858 if (ctx) 5859 ctx->log_new_dentries = false; 5860 ret = btrfs_log_inode(trans, root, BTRFS_I(dir_inode), 5861 LOG_INODE_ALL, 0, LLONG_MAX, ctx); 5862 if (!ret && 5863 btrfs_must_commit_transaction(trans, BTRFS_I(dir_inode))) 5864 ret = 1; 5865 if (!ret && ctx && ctx->log_new_dentries) 5866 ret = log_new_dir_dentries(trans, root, 5867 BTRFS_I(dir_inode), ctx); 5868 btrfs_add_delayed_iput(dir_inode); 5869 if (ret) 5870 goto out; 5871 } 5872 path->slots[0]++; 5873 } 5874 ret = 0; 5875 out: 5876 btrfs_free_path(path); 5877 return ret; 5878 } 5879 5880 static int log_new_ancestors(struct btrfs_trans_handle *trans, 5881 struct btrfs_root *root, 5882 struct btrfs_path *path, 5883 struct btrfs_log_ctx *ctx) 5884 { 5885 struct btrfs_key found_key; 5886 5887 btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]); 5888 5889 while (true) { 5890 struct btrfs_fs_info *fs_info = root->fs_info; 5891 const u64 last_committed = fs_info->last_trans_committed; 5892 struct extent_buffer *leaf = path->nodes[0]; 5893 int slot = path->slots[0]; 5894 struct btrfs_key search_key; 5895 struct inode *inode; 5896 int ret = 0; 5897 5898 btrfs_release_path(path); 5899 5900 search_key.objectid = found_key.offset; 5901 search_key.type = BTRFS_INODE_ITEM_KEY; 5902 search_key.offset = 0; 5903 inode = btrfs_iget(fs_info->sb, &search_key, root, NULL); 5904 if (IS_ERR(inode)) 5905 return PTR_ERR(inode); 5906 5907 if (BTRFS_I(inode)->generation > last_committed) 5908 ret = btrfs_log_inode(trans, root, BTRFS_I(inode), 5909 LOG_INODE_EXISTS, 5910 0, LLONG_MAX, ctx); 5911 btrfs_add_delayed_iput(inode); 5912 if (ret) 5913 return ret; 5914 5915 if (search_key.objectid == BTRFS_FIRST_FREE_OBJECTID) 5916 break; 5917 5918 search_key.type = BTRFS_INODE_REF_KEY; 5919 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0); 5920 if (ret < 0) 5921 return ret; 5922 5923 leaf = path->nodes[0]; 5924 slot = path->slots[0]; 5925 if (slot >= btrfs_header_nritems(leaf)) { 5926 ret = btrfs_next_leaf(root, path); 5927 if (ret < 0) 5928 return ret; 5929 else if (ret > 0) 5930 return -ENOENT; 5931 leaf = path->nodes[0]; 5932 slot = path->slots[0]; 5933 } 5934 5935 btrfs_item_key_to_cpu(leaf, &found_key, slot); 5936 if (found_key.objectid != search_key.objectid || 5937 found_key.type != BTRFS_INODE_REF_KEY) 5938 return -ENOENT; 5939 } 5940 return 0; 5941 } 5942 5943 static int log_new_ancestors_fast(struct btrfs_trans_handle *trans, 5944 struct btrfs_inode *inode, 5945 struct dentry *parent, 5946 struct btrfs_log_ctx *ctx) 5947 { 5948 struct btrfs_root *root = inode->root; 5949 struct btrfs_fs_info *fs_info = root->fs_info; 5950 struct dentry *old_parent = NULL; 5951 struct super_block *sb = inode->vfs_inode.i_sb; 5952 int ret = 0; 5953 5954 while (true) { 5955 if (!parent || d_really_is_negative(parent) || 5956 sb != parent->d_sb) 5957 break; 5958 5959 inode = BTRFS_I(d_inode(parent)); 5960 if (root != inode->root) 5961 break; 5962 5963 if (inode->generation > fs_info->last_trans_committed) { 5964 ret = btrfs_log_inode(trans, root, inode, 5965 LOG_INODE_EXISTS, 0, LLONG_MAX, ctx); 5966 if (ret) 5967 break; 5968 } 5969 if (IS_ROOT(parent)) 5970 break; 5971 5972 parent = dget_parent(parent); 5973 dput(old_parent); 5974 old_parent = parent; 5975 } 5976 dput(old_parent); 5977 5978 return ret; 5979 } 5980 5981 static int log_all_new_ancestors(struct btrfs_trans_handle *trans, 5982 struct btrfs_inode *inode, 5983 struct dentry *parent, 5984 struct btrfs_log_ctx *ctx) 5985 { 5986 struct btrfs_root *root = inode->root; 5987 const u64 ino = btrfs_ino(inode); 5988 struct btrfs_path *path; 5989 struct btrfs_key search_key; 5990 int ret; 5991 5992 /* 5993 * For a single hard link case, go through a fast path that does not 5994 * need to iterate the fs/subvolume tree. 5995 */ 5996 if (inode->vfs_inode.i_nlink < 2) 5997 return log_new_ancestors_fast(trans, inode, parent, ctx); 5998 5999 path = btrfs_alloc_path(); 6000 if (!path) 6001 return -ENOMEM; 6002 6003 search_key.objectid = ino; 6004 search_key.type = BTRFS_INODE_REF_KEY; 6005 search_key.offset = 0; 6006 again: 6007 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0); 6008 if (ret < 0) 6009 goto out; 6010 if (ret == 0) 6011 path->slots[0]++; 6012 6013 while (true) { 6014 struct extent_buffer *leaf = path->nodes[0]; 6015 int slot = path->slots[0]; 6016 struct btrfs_key found_key; 6017 6018 if (slot >= btrfs_header_nritems(leaf)) { 6019 ret = btrfs_next_leaf(root, path); 6020 if (ret < 0) 6021 goto out; 6022 else if (ret > 0) 6023 break; 6024 continue; 6025 } 6026 6027 btrfs_item_key_to_cpu(leaf, &found_key, slot); 6028 if (found_key.objectid != ino || 6029 found_key.type > BTRFS_INODE_EXTREF_KEY) 6030 break; 6031 6032 /* 6033 * Don't deal with extended references because they are rare 6034 * cases and too complex to deal with (we would need to keep 6035 * track of which subitem we are processing for each item in 6036 * this loop, etc). So just return some error to fallback to 6037 * a transaction commit. 6038 */ 6039 if (found_key.type == BTRFS_INODE_EXTREF_KEY) { 6040 ret = -EMLINK; 6041 goto out; 6042 } 6043 6044 /* 6045 * Logging ancestors needs to do more searches on the fs/subvol 6046 * tree, so it releases the path as needed to avoid deadlocks. 6047 * Keep track of the last inode ref key and resume from that key 6048 * after logging all new ancestors for the current hard link. 6049 */ 6050 memcpy(&search_key, &found_key, sizeof(search_key)); 6051 6052 ret = log_new_ancestors(trans, root, path, ctx); 6053 if (ret) 6054 goto out; 6055 btrfs_release_path(path); 6056 goto again; 6057 } 6058 ret = 0; 6059 out: 6060 btrfs_free_path(path); 6061 return ret; 6062 } 6063 6064 /* 6065 * helper function around btrfs_log_inode to make sure newly created 6066 * parent directories also end up in the log. A minimal inode and backref 6067 * only logging is done of any parent directories that are older than 6068 * the last committed transaction 6069 */ 6070 static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans, 6071 struct btrfs_inode *inode, 6072 struct dentry *parent, 6073 const loff_t start, 6074 const loff_t end, 6075 int inode_only, 6076 struct btrfs_log_ctx *ctx) 6077 { 6078 struct btrfs_root *root = inode->root; 6079 struct btrfs_fs_info *fs_info = root->fs_info; 6080 struct super_block *sb; 6081 int ret = 0; 6082 u64 last_committed = fs_info->last_trans_committed; 6083 bool log_dentries = false; 6084 6085 sb = inode->vfs_inode.i_sb; 6086 6087 if (btrfs_test_opt(fs_info, NOTREELOG)) { 6088 ret = 1; 6089 goto end_no_trans; 6090 } 6091 6092 /* 6093 * The prev transaction commit doesn't complete, we need do 6094 * full commit by ourselves. 6095 */ 6096 if (fs_info->last_trans_log_full_commit > 6097 fs_info->last_trans_committed) { 6098 ret = 1; 6099 goto end_no_trans; 6100 } 6101 6102 if (btrfs_root_refs(&root->root_item) == 0) { 6103 ret = 1; 6104 goto end_no_trans; 6105 } 6106 6107 ret = check_parent_dirs_for_sync(trans, inode, parent, sb, 6108 last_committed); 6109 if (ret) 6110 goto end_no_trans; 6111 6112 /* 6113 * Skip already logged inodes or inodes corresponding to tmpfiles 6114 * (since logging them is pointless, a link count of 0 means they 6115 * will never be accessible). 6116 */ 6117 if (btrfs_inode_in_log(inode, trans->transid) || 6118 inode->vfs_inode.i_nlink == 0) { 6119 ret = BTRFS_NO_LOG_SYNC; 6120 goto end_no_trans; 6121 } 6122 6123 ret = start_log_trans(trans, root, ctx); 6124 if (ret) 6125 goto end_no_trans; 6126 6127 ret = btrfs_log_inode(trans, root, inode, inode_only, start, end, ctx); 6128 if (ret) 6129 goto end_trans; 6130 6131 /* 6132 * for regular files, if its inode is already on disk, we don't 6133 * have to worry about the parents at all. This is because 6134 * we can use the last_unlink_trans field to record renames 6135 * and other fun in this file. 6136 */ 6137 if (S_ISREG(inode->vfs_inode.i_mode) && 6138 inode->generation <= last_committed && 6139 inode->last_unlink_trans <= last_committed) { 6140 ret = 0; 6141 goto end_trans; 6142 } 6143 6144 if (S_ISDIR(inode->vfs_inode.i_mode) && ctx && ctx->log_new_dentries) 6145 log_dentries = true; 6146 6147 /* 6148 * On unlink we must make sure all our current and old parent directory 6149 * inodes are fully logged. This is to prevent leaving dangling 6150 * directory index entries in directories that were our parents but are 6151 * not anymore. Not doing this results in old parent directory being 6152 * impossible to delete after log replay (rmdir will always fail with 6153 * error -ENOTEMPTY). 6154 * 6155 * Example 1: 6156 * 6157 * mkdir testdir 6158 * touch testdir/foo 6159 * ln testdir/foo testdir/bar 6160 * sync 6161 * unlink testdir/bar 6162 * xfs_io -c fsync testdir/foo 6163 * <power failure> 6164 * mount fs, triggers log replay 6165 * 6166 * If we don't log the parent directory (testdir), after log replay the 6167 * directory still has an entry pointing to the file inode using the bar 6168 * name, but a matching BTRFS_INODE_[REF|EXTREF]_KEY does not exist and 6169 * the file inode has a link count of 1. 6170 * 6171 * Example 2: 6172 * 6173 * mkdir testdir 6174 * touch foo 6175 * ln foo testdir/foo2 6176 * ln foo testdir/foo3 6177 * sync 6178 * unlink testdir/foo3 6179 * xfs_io -c fsync foo 6180 * <power failure> 6181 * mount fs, triggers log replay 6182 * 6183 * Similar as the first example, after log replay the parent directory 6184 * testdir still has an entry pointing to the inode file with name foo3 6185 * but the file inode does not have a matching BTRFS_INODE_REF_KEY item 6186 * and has a link count of 2. 6187 */ 6188 if (inode->last_unlink_trans > last_committed) { 6189 ret = btrfs_log_all_parents(trans, inode, ctx); 6190 if (ret) 6191 goto end_trans; 6192 } 6193 6194 ret = log_all_new_ancestors(trans, inode, parent, ctx); 6195 if (ret) 6196 goto end_trans; 6197 6198 if (log_dentries) 6199 ret = log_new_dir_dentries(trans, root, inode, ctx); 6200 else 6201 ret = 0; 6202 end_trans: 6203 if (ret < 0) { 6204 btrfs_set_log_full_commit(trans); 6205 ret = 1; 6206 } 6207 6208 if (ret) 6209 btrfs_remove_log_ctx(root, ctx); 6210 btrfs_end_log_trans(root); 6211 end_no_trans: 6212 return ret; 6213 } 6214 6215 /* 6216 * it is not safe to log dentry if the chunk root has added new 6217 * chunks. This returns 0 if the dentry was logged, and 1 otherwise. 6218 * If this returns 1, you must commit the transaction to safely get your 6219 * data on disk. 6220 */ 6221 int btrfs_log_dentry_safe(struct btrfs_trans_handle *trans, 6222 struct dentry *dentry, 6223 const loff_t start, 6224 const loff_t end, 6225 struct btrfs_log_ctx *ctx) 6226 { 6227 struct dentry *parent = dget_parent(dentry); 6228 int ret; 6229 6230 ret = btrfs_log_inode_parent(trans, BTRFS_I(d_inode(dentry)), parent, 6231 start, end, LOG_INODE_ALL, ctx); 6232 dput(parent); 6233 6234 return ret; 6235 } 6236 6237 /* 6238 * should be called during mount to recover any replay any log trees 6239 * from the FS 6240 */ 6241 int btrfs_recover_log_trees(struct btrfs_root *log_root_tree) 6242 { 6243 int ret; 6244 struct btrfs_path *path; 6245 struct btrfs_trans_handle *trans; 6246 struct btrfs_key key; 6247 struct btrfs_key found_key; 6248 struct btrfs_key tmp_key; 6249 struct btrfs_root *log; 6250 struct btrfs_fs_info *fs_info = log_root_tree->fs_info; 6251 struct walk_control wc = { 6252 .process_func = process_one_buffer, 6253 .stage = LOG_WALK_PIN_ONLY, 6254 }; 6255 6256 path = btrfs_alloc_path(); 6257 if (!path) 6258 return -ENOMEM; 6259 6260 set_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags); 6261 6262 trans = btrfs_start_transaction(fs_info->tree_root, 0); 6263 if (IS_ERR(trans)) { 6264 ret = PTR_ERR(trans); 6265 goto error; 6266 } 6267 6268 wc.trans = trans; 6269 wc.pin = 1; 6270 6271 ret = walk_log_tree(trans, log_root_tree, &wc); 6272 if (ret) { 6273 btrfs_handle_fs_error(fs_info, ret, 6274 "Failed to pin buffers while recovering log root tree."); 6275 goto error; 6276 } 6277 6278 again: 6279 key.objectid = BTRFS_TREE_LOG_OBJECTID; 6280 key.offset = (u64)-1; 6281 key.type = BTRFS_ROOT_ITEM_KEY; 6282 6283 while (1) { 6284 ret = btrfs_search_slot(NULL, log_root_tree, &key, path, 0, 0); 6285 6286 if (ret < 0) { 6287 btrfs_handle_fs_error(fs_info, ret, 6288 "Couldn't find tree log root."); 6289 goto error; 6290 } 6291 if (ret > 0) { 6292 if (path->slots[0] == 0) 6293 break; 6294 path->slots[0]--; 6295 } 6296 btrfs_item_key_to_cpu(path->nodes[0], &found_key, 6297 path->slots[0]); 6298 btrfs_release_path(path); 6299 if (found_key.objectid != BTRFS_TREE_LOG_OBJECTID) 6300 break; 6301 6302 log = btrfs_read_fs_root(log_root_tree, &found_key); 6303 if (IS_ERR(log)) { 6304 ret = PTR_ERR(log); 6305 btrfs_handle_fs_error(fs_info, ret, 6306 "Couldn't read tree log root."); 6307 goto error; 6308 } 6309 6310 tmp_key.objectid = found_key.offset; 6311 tmp_key.type = BTRFS_ROOT_ITEM_KEY; 6312 tmp_key.offset = (u64)-1; 6313 6314 wc.replay_dest = btrfs_read_fs_root_no_name(fs_info, &tmp_key); 6315 if (IS_ERR(wc.replay_dest)) { 6316 ret = PTR_ERR(wc.replay_dest); 6317 free_extent_buffer(log->node); 6318 free_extent_buffer(log->commit_root); 6319 kfree(log); 6320 btrfs_handle_fs_error(fs_info, ret, 6321 "Couldn't read target root for tree log recovery."); 6322 goto error; 6323 } 6324 6325 wc.replay_dest->log_root = log; 6326 btrfs_record_root_in_trans(trans, wc.replay_dest); 6327 ret = walk_log_tree(trans, log, &wc); 6328 6329 if (!ret && wc.stage == LOG_WALK_REPLAY_ALL) { 6330 ret = fixup_inode_link_counts(trans, wc.replay_dest, 6331 path); 6332 } 6333 6334 if (!ret && wc.stage == LOG_WALK_REPLAY_ALL) { 6335 struct btrfs_root *root = wc.replay_dest; 6336 6337 btrfs_release_path(path); 6338 6339 /* 6340 * We have just replayed everything, and the highest 6341 * objectid of fs roots probably has changed in case 6342 * some inode_item's got replayed. 6343 * 6344 * root->objectid_mutex is not acquired as log replay 6345 * could only happen during mount. 6346 */ 6347 ret = btrfs_find_highest_objectid(root, 6348 &root->highest_objectid); 6349 } 6350 6351 key.offset = found_key.offset - 1; 6352 wc.replay_dest->log_root = NULL; 6353 free_extent_buffer(log->node); 6354 free_extent_buffer(log->commit_root); 6355 kfree(log); 6356 6357 if (ret) 6358 goto error; 6359 6360 if (found_key.offset == 0) 6361 break; 6362 } 6363 btrfs_release_path(path); 6364 6365 /* step one is to pin it all, step two is to replay just inodes */ 6366 if (wc.pin) { 6367 wc.pin = 0; 6368 wc.process_func = replay_one_buffer; 6369 wc.stage = LOG_WALK_REPLAY_INODES; 6370 goto again; 6371 } 6372 /* step three is to replay everything */ 6373 if (wc.stage < LOG_WALK_REPLAY_ALL) { 6374 wc.stage++; 6375 goto again; 6376 } 6377 6378 btrfs_free_path(path); 6379 6380 /* step 4: commit the transaction, which also unpins the blocks */ 6381 ret = btrfs_commit_transaction(trans); 6382 if (ret) 6383 return ret; 6384 6385 free_extent_buffer(log_root_tree->node); 6386 log_root_tree->log_root = NULL; 6387 clear_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags); 6388 kfree(log_root_tree); 6389 6390 return 0; 6391 error: 6392 if (wc.trans) 6393 btrfs_end_transaction(wc.trans); 6394 btrfs_free_path(path); 6395 return ret; 6396 } 6397 6398 /* 6399 * there are some corner cases where we want to force a full 6400 * commit instead of allowing a directory to be logged. 6401 * 6402 * They revolve around files there were unlinked from the directory, and 6403 * this function updates the parent directory so that a full commit is 6404 * properly done if it is fsync'd later after the unlinks are done. 6405 * 6406 * Must be called before the unlink operations (updates to the subvolume tree, 6407 * inodes, etc) are done. 6408 */ 6409 void btrfs_record_unlink_dir(struct btrfs_trans_handle *trans, 6410 struct btrfs_inode *dir, struct btrfs_inode *inode, 6411 int for_rename) 6412 { 6413 /* 6414 * when we're logging a file, if it hasn't been renamed 6415 * or unlinked, and its inode is fully committed on disk, 6416 * we don't have to worry about walking up the directory chain 6417 * to log its parents. 6418 * 6419 * So, we use the last_unlink_trans field to put this transid 6420 * into the file. When the file is logged we check it and 6421 * don't log the parents if the file is fully on disk. 6422 */ 6423 mutex_lock(&inode->log_mutex); 6424 inode->last_unlink_trans = trans->transid; 6425 mutex_unlock(&inode->log_mutex); 6426 6427 /* 6428 * if this directory was already logged any new 6429 * names for this file/dir will get recorded 6430 */ 6431 if (dir->logged_trans == trans->transid) 6432 return; 6433 6434 /* 6435 * if the inode we're about to unlink was logged, 6436 * the log will be properly updated for any new names 6437 */ 6438 if (inode->logged_trans == trans->transid) 6439 return; 6440 6441 /* 6442 * when renaming files across directories, if the directory 6443 * there we're unlinking from gets fsync'd later on, there's 6444 * no way to find the destination directory later and fsync it 6445 * properly. So, we have to be conservative and force commits 6446 * so the new name gets discovered. 6447 */ 6448 if (for_rename) 6449 goto record; 6450 6451 /* we can safely do the unlink without any special recording */ 6452 return; 6453 6454 record: 6455 mutex_lock(&dir->log_mutex); 6456 dir->last_unlink_trans = trans->transid; 6457 mutex_unlock(&dir->log_mutex); 6458 } 6459 6460 /* 6461 * Make sure that if someone attempts to fsync the parent directory of a deleted 6462 * snapshot, it ends up triggering a transaction commit. This is to guarantee 6463 * that after replaying the log tree of the parent directory's root we will not 6464 * see the snapshot anymore and at log replay time we will not see any log tree 6465 * corresponding to the deleted snapshot's root, which could lead to replaying 6466 * it after replaying the log tree of the parent directory (which would replay 6467 * the snapshot delete operation). 6468 * 6469 * Must be called before the actual snapshot destroy operation (updates to the 6470 * parent root and tree of tree roots trees, etc) are done. 6471 */ 6472 void btrfs_record_snapshot_destroy(struct btrfs_trans_handle *trans, 6473 struct btrfs_inode *dir) 6474 { 6475 mutex_lock(&dir->log_mutex); 6476 dir->last_unlink_trans = trans->transid; 6477 mutex_unlock(&dir->log_mutex); 6478 } 6479 6480 /* 6481 * Call this after adding a new name for a file and it will properly 6482 * update the log to reflect the new name. 6483 * 6484 * @ctx can not be NULL when @sync_log is false, and should be NULL when it's 6485 * true (because it's not used). 6486 * 6487 * Return value depends on whether @sync_log is true or false. 6488 * When true: returns BTRFS_NEED_TRANS_COMMIT if the transaction needs to be 6489 * committed by the caller, and BTRFS_DONT_NEED_TRANS_COMMIT 6490 * otherwise. 6491 * When false: returns BTRFS_DONT_NEED_LOG_SYNC if the caller does not need to 6492 * to sync the log, BTRFS_NEED_LOG_SYNC if it needs to sync the log, 6493 * or BTRFS_NEED_TRANS_COMMIT if the transaction needs to be 6494 * committed (without attempting to sync the log). 6495 */ 6496 int btrfs_log_new_name(struct btrfs_trans_handle *trans, 6497 struct btrfs_inode *inode, struct btrfs_inode *old_dir, 6498 struct dentry *parent, 6499 bool sync_log, struct btrfs_log_ctx *ctx) 6500 { 6501 struct btrfs_fs_info *fs_info = trans->fs_info; 6502 int ret; 6503 6504 /* 6505 * this will force the logging code to walk the dentry chain 6506 * up for the file 6507 */ 6508 if (!S_ISDIR(inode->vfs_inode.i_mode)) 6509 inode->last_unlink_trans = trans->transid; 6510 6511 /* 6512 * if this inode hasn't been logged and directory we're renaming it 6513 * from hasn't been logged, we don't need to log it 6514 */ 6515 if (inode->logged_trans <= fs_info->last_trans_committed && 6516 (!old_dir || old_dir->logged_trans <= fs_info->last_trans_committed)) 6517 return sync_log ? BTRFS_DONT_NEED_TRANS_COMMIT : 6518 BTRFS_DONT_NEED_LOG_SYNC; 6519 6520 if (sync_log) { 6521 struct btrfs_log_ctx ctx2; 6522 6523 btrfs_init_log_ctx(&ctx2, &inode->vfs_inode); 6524 ret = btrfs_log_inode_parent(trans, inode, parent, 0, LLONG_MAX, 6525 LOG_INODE_EXISTS, &ctx2); 6526 if (ret == BTRFS_NO_LOG_SYNC) 6527 return BTRFS_DONT_NEED_TRANS_COMMIT; 6528 else if (ret) 6529 return BTRFS_NEED_TRANS_COMMIT; 6530 6531 ret = btrfs_sync_log(trans, inode->root, &ctx2); 6532 if (ret) 6533 return BTRFS_NEED_TRANS_COMMIT; 6534 return BTRFS_DONT_NEED_TRANS_COMMIT; 6535 } 6536 6537 ASSERT(ctx); 6538 ret = btrfs_log_inode_parent(trans, inode, parent, 0, LLONG_MAX, 6539 LOG_INODE_EXISTS, ctx); 6540 if (ret == BTRFS_NO_LOG_SYNC) 6541 return BTRFS_DONT_NEED_LOG_SYNC; 6542 else if (ret) 6543 return BTRFS_NEED_TRANS_COMMIT; 6544 6545 return BTRFS_NEED_LOG_SYNC; 6546 } 6547 6548