1 /* 2 * Copyright (C) 2008 Oracle. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public 6 * License v2 as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public 14 * License along with this program; if not, write to the 15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 16 * Boston, MA 021110-1307, USA. 17 */ 18 19 #include <linux/sched.h> 20 #include <linux/slab.h> 21 #include "ctree.h" 22 #include "transaction.h" 23 #include "disk-io.h" 24 #include "locking.h" 25 #include "print-tree.h" 26 #include "compat.h" 27 #include "tree-log.h" 28 29 /* magic values for the inode_only field in btrfs_log_inode: 30 * 31 * LOG_INODE_ALL means to log everything 32 * LOG_INODE_EXISTS means to log just enough to recreate the inode 33 * during log replay 34 */ 35 #define LOG_INODE_ALL 0 36 #define LOG_INODE_EXISTS 1 37 38 /* 39 * directory trouble cases 40 * 41 * 1) on rename or unlink, if the inode being unlinked isn't in the fsync 42 * log, we must force a full commit before doing an fsync of the directory 43 * where the unlink was done. 44 * ---> record transid of last unlink/rename per directory 45 * 46 * mkdir foo/some_dir 47 * normal commit 48 * rename foo/some_dir foo2/some_dir 49 * mkdir foo/some_dir 50 * fsync foo/some_dir/some_file 51 * 52 * The fsync above will unlink the original some_dir without recording 53 * it in its new location (foo2). After a crash, some_dir will be gone 54 * unless the fsync of some_file forces a full commit 55 * 56 * 2) we must log any new names for any file or dir that is in the fsync 57 * log. ---> check inode while renaming/linking. 58 * 59 * 2a) we must log any new names for any file or dir during rename 60 * when the directory they are being removed from was logged. 61 * ---> check inode and old parent dir during rename 62 * 63 * 2a is actually the more important variant. With the extra logging 64 * a crash might unlink the old name without recreating the new one 65 * 66 * 3) after a crash, we must go through any directories with a link count 67 * of zero and redo the rm -rf 68 * 69 * mkdir f1/foo 70 * normal commit 71 * rm -rf f1/foo 72 * fsync(f1) 73 * 74 * The directory f1 was fully removed from the FS, but fsync was never 75 * called on f1, only its parent dir. After a crash the rm -rf must 76 * be replayed. This must be able to recurse down the entire 77 * directory tree. The inode link count fixup code takes care of the 78 * ugly details. 79 */ 80 81 /* 82 * stages for the tree walking. The first 83 * stage (0) is to only pin down the blocks we find 84 * the second stage (1) is to make sure that all the inodes 85 * we find in the log are created in the subvolume. 86 * 87 * The last stage is to deal with directories and links and extents 88 * and all the other fun semantics 89 */ 90 #define LOG_WALK_PIN_ONLY 0 91 #define LOG_WALK_REPLAY_INODES 1 92 #define LOG_WALK_REPLAY_ALL 2 93 94 static int btrfs_log_inode(struct btrfs_trans_handle *trans, 95 struct btrfs_root *root, struct inode *inode, 96 int inode_only); 97 static int link_to_fixup_dir(struct btrfs_trans_handle *trans, 98 struct btrfs_root *root, 99 struct btrfs_path *path, u64 objectid); 100 static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans, 101 struct btrfs_root *root, 102 struct btrfs_root *log, 103 struct btrfs_path *path, 104 u64 dirid, int del_all); 105 106 /* 107 * tree logging is a special write ahead log used to make sure that 108 * fsyncs and O_SYNCs can happen without doing full tree commits. 109 * 110 * Full tree commits are expensive because they require commonly 111 * modified blocks to be recowed, creating many dirty pages in the 112 * extent tree an 4x-6x higher write load than ext3. 113 * 114 * Instead of doing a tree commit on every fsync, we use the 115 * key ranges and transaction ids to find items for a given file or directory 116 * that have changed in this transaction. Those items are copied into 117 * a special tree (one per subvolume root), that tree is written to disk 118 * and then the fsync is considered complete. 119 * 120 * After a crash, items are copied out of the log-tree back into the 121 * subvolume tree. Any file data extents found are recorded in the extent 122 * allocation tree, and the log-tree freed. 123 * 124 * The log tree is read three times, once to pin down all the extents it is 125 * using in ram and once, once to create all the inodes logged in the tree 126 * and once to do all the other items. 127 */ 128 129 /* 130 * start a sub transaction and setup the log tree 131 * this increments the log tree writer count to make the people 132 * syncing the tree wait for us to finish 133 */ 134 static int start_log_trans(struct btrfs_trans_handle *trans, 135 struct btrfs_root *root) 136 { 137 int ret; 138 int err = 0; 139 140 mutex_lock(&root->log_mutex); 141 if (root->log_root) { 142 if (!root->log_start_pid) { 143 root->log_start_pid = current->pid; 144 root->log_multiple_pids = false; 145 } else if (root->log_start_pid != current->pid) { 146 root->log_multiple_pids = true; 147 } 148 149 root->log_batch++; 150 atomic_inc(&root->log_writers); 151 mutex_unlock(&root->log_mutex); 152 return 0; 153 } 154 root->log_multiple_pids = false; 155 root->log_start_pid = current->pid; 156 mutex_lock(&root->fs_info->tree_log_mutex); 157 if (!root->fs_info->log_root_tree) { 158 ret = btrfs_init_log_root_tree(trans, root->fs_info); 159 if (ret) 160 err = ret; 161 } 162 if (err == 0 && !root->log_root) { 163 ret = btrfs_add_log_tree(trans, root); 164 if (ret) 165 err = ret; 166 } 167 mutex_unlock(&root->fs_info->tree_log_mutex); 168 root->log_batch++; 169 atomic_inc(&root->log_writers); 170 mutex_unlock(&root->log_mutex); 171 return err; 172 } 173 174 /* 175 * returns 0 if there was a log transaction running and we were able 176 * to join, or returns -ENOENT if there were not transactions 177 * in progress 178 */ 179 static int join_running_log_trans(struct btrfs_root *root) 180 { 181 int ret = -ENOENT; 182 183 smp_mb(); 184 if (!root->log_root) 185 return -ENOENT; 186 187 mutex_lock(&root->log_mutex); 188 if (root->log_root) { 189 ret = 0; 190 atomic_inc(&root->log_writers); 191 } 192 mutex_unlock(&root->log_mutex); 193 return ret; 194 } 195 196 /* 197 * This either makes the current running log transaction wait 198 * until you call btrfs_end_log_trans() or it makes any future 199 * log transactions wait until you call btrfs_end_log_trans() 200 */ 201 int btrfs_pin_log_trans(struct btrfs_root *root) 202 { 203 int ret = -ENOENT; 204 205 mutex_lock(&root->log_mutex); 206 atomic_inc(&root->log_writers); 207 mutex_unlock(&root->log_mutex); 208 return ret; 209 } 210 211 /* 212 * indicate we're done making changes to the log tree 213 * and wake up anyone waiting to do a sync 214 */ 215 int btrfs_end_log_trans(struct btrfs_root *root) 216 { 217 if (atomic_dec_and_test(&root->log_writers)) { 218 smp_mb(); 219 if (waitqueue_active(&root->log_writer_wait)) 220 wake_up(&root->log_writer_wait); 221 } 222 return 0; 223 } 224 225 226 /* 227 * the walk control struct is used to pass state down the chain when 228 * processing the log tree. The stage field tells us which part 229 * of the log tree processing we are currently doing. The others 230 * are state fields used for that specific part 231 */ 232 struct walk_control { 233 /* should we free the extent on disk when done? This is used 234 * at transaction commit time while freeing a log tree 235 */ 236 int free; 237 238 /* should we write out the extent buffer? This is used 239 * while flushing the log tree to disk during a sync 240 */ 241 int write; 242 243 /* should we wait for the extent buffer io to finish? Also used 244 * while flushing the log tree to disk for a sync 245 */ 246 int wait; 247 248 /* pin only walk, we record which extents on disk belong to the 249 * log trees 250 */ 251 int pin; 252 253 /* what stage of the replay code we're currently in */ 254 int stage; 255 256 /* the root we are currently replaying */ 257 struct btrfs_root *replay_dest; 258 259 /* the trans handle for the current replay */ 260 struct btrfs_trans_handle *trans; 261 262 /* the function that gets used to process blocks we find in the 263 * tree. Note the extent_buffer might not be up to date when it is 264 * passed in, and it must be checked or read if you need the data 265 * inside it 266 */ 267 int (*process_func)(struct btrfs_root *log, struct extent_buffer *eb, 268 struct walk_control *wc, u64 gen); 269 }; 270 271 /* 272 * process_func used to pin down extents, write them or wait on them 273 */ 274 static int process_one_buffer(struct btrfs_root *log, 275 struct extent_buffer *eb, 276 struct walk_control *wc, u64 gen) 277 { 278 if (wc->pin) 279 btrfs_pin_extent(log->fs_info->extent_root, 280 eb->start, eb->len, 0); 281 282 if (btrfs_buffer_uptodate(eb, gen)) { 283 if (wc->write) 284 btrfs_write_tree_block(eb); 285 if (wc->wait) 286 btrfs_wait_tree_block_writeback(eb); 287 } 288 return 0; 289 } 290 291 /* 292 * Item overwrite used by replay and tree logging. eb, slot and key all refer 293 * to the src data we are copying out. 294 * 295 * root is the tree we are copying into, and path is a scratch 296 * path for use in this function (it should be released on entry and 297 * will be released on exit). 298 * 299 * If the key is already in the destination tree the existing item is 300 * overwritten. If the existing item isn't big enough, it is extended. 301 * If it is too large, it is truncated. 302 * 303 * If the key isn't in the destination yet, a new item is inserted. 304 */ 305 static noinline int overwrite_item(struct btrfs_trans_handle *trans, 306 struct btrfs_root *root, 307 struct btrfs_path *path, 308 struct extent_buffer *eb, int slot, 309 struct btrfs_key *key) 310 { 311 int ret; 312 u32 item_size; 313 u64 saved_i_size = 0; 314 int save_old_i_size = 0; 315 unsigned long src_ptr; 316 unsigned long dst_ptr; 317 int overwrite_root = 0; 318 319 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) 320 overwrite_root = 1; 321 322 item_size = btrfs_item_size_nr(eb, slot); 323 src_ptr = btrfs_item_ptr_offset(eb, slot); 324 325 /* look for the key in the destination tree */ 326 ret = btrfs_search_slot(NULL, root, key, path, 0, 0); 327 if (ret == 0) { 328 char *src_copy; 329 char *dst_copy; 330 u32 dst_size = btrfs_item_size_nr(path->nodes[0], 331 path->slots[0]); 332 if (dst_size != item_size) 333 goto insert; 334 335 if (item_size == 0) { 336 btrfs_release_path(root, path); 337 return 0; 338 } 339 dst_copy = kmalloc(item_size, GFP_NOFS); 340 src_copy = kmalloc(item_size, GFP_NOFS); 341 342 read_extent_buffer(eb, src_copy, src_ptr, item_size); 343 344 dst_ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]); 345 read_extent_buffer(path->nodes[0], dst_copy, dst_ptr, 346 item_size); 347 ret = memcmp(dst_copy, src_copy, item_size); 348 349 kfree(dst_copy); 350 kfree(src_copy); 351 /* 352 * they have the same contents, just return, this saves 353 * us from cowing blocks in the destination tree and doing 354 * extra writes that may not have been done by a previous 355 * sync 356 */ 357 if (ret == 0) { 358 btrfs_release_path(root, path); 359 return 0; 360 } 361 362 } 363 insert: 364 btrfs_release_path(root, path); 365 /* try to insert the key into the destination tree */ 366 ret = btrfs_insert_empty_item(trans, root, path, 367 key, item_size); 368 369 /* make sure any existing item is the correct size */ 370 if (ret == -EEXIST) { 371 u32 found_size; 372 found_size = btrfs_item_size_nr(path->nodes[0], 373 path->slots[0]); 374 if (found_size > item_size) { 375 btrfs_truncate_item(trans, root, path, item_size, 1); 376 } else if (found_size < item_size) { 377 ret = btrfs_extend_item(trans, root, path, 378 item_size - found_size); 379 BUG_ON(ret); 380 } 381 } else if (ret) { 382 return ret; 383 } 384 dst_ptr = btrfs_item_ptr_offset(path->nodes[0], 385 path->slots[0]); 386 387 /* don't overwrite an existing inode if the generation number 388 * was logged as zero. This is done when the tree logging code 389 * is just logging an inode to make sure it exists after recovery. 390 * 391 * Also, don't overwrite i_size on directories during replay. 392 * log replay inserts and removes directory items based on the 393 * state of the tree found in the subvolume, and i_size is modified 394 * as it goes 395 */ 396 if (key->type == BTRFS_INODE_ITEM_KEY && ret == -EEXIST) { 397 struct btrfs_inode_item *src_item; 398 struct btrfs_inode_item *dst_item; 399 400 src_item = (struct btrfs_inode_item *)src_ptr; 401 dst_item = (struct btrfs_inode_item *)dst_ptr; 402 403 if (btrfs_inode_generation(eb, src_item) == 0) 404 goto no_copy; 405 406 if (overwrite_root && 407 S_ISDIR(btrfs_inode_mode(eb, src_item)) && 408 S_ISDIR(btrfs_inode_mode(path->nodes[0], dst_item))) { 409 save_old_i_size = 1; 410 saved_i_size = btrfs_inode_size(path->nodes[0], 411 dst_item); 412 } 413 } 414 415 copy_extent_buffer(path->nodes[0], eb, dst_ptr, 416 src_ptr, item_size); 417 418 if (save_old_i_size) { 419 struct btrfs_inode_item *dst_item; 420 dst_item = (struct btrfs_inode_item *)dst_ptr; 421 btrfs_set_inode_size(path->nodes[0], dst_item, saved_i_size); 422 } 423 424 /* make sure the generation is filled in */ 425 if (key->type == BTRFS_INODE_ITEM_KEY) { 426 struct btrfs_inode_item *dst_item; 427 dst_item = (struct btrfs_inode_item *)dst_ptr; 428 if (btrfs_inode_generation(path->nodes[0], dst_item) == 0) { 429 btrfs_set_inode_generation(path->nodes[0], dst_item, 430 trans->transid); 431 } 432 } 433 no_copy: 434 btrfs_mark_buffer_dirty(path->nodes[0]); 435 btrfs_release_path(root, path); 436 return 0; 437 } 438 439 /* 440 * simple helper to read an inode off the disk from a given root 441 * This can only be called for subvolume roots and not for the log 442 */ 443 static noinline struct inode *read_one_inode(struct btrfs_root *root, 444 u64 objectid) 445 { 446 struct btrfs_key key; 447 struct inode *inode; 448 449 key.objectid = objectid; 450 key.type = BTRFS_INODE_ITEM_KEY; 451 key.offset = 0; 452 inode = btrfs_iget(root->fs_info->sb, &key, root, NULL); 453 if (IS_ERR(inode)) { 454 inode = NULL; 455 } else if (is_bad_inode(inode)) { 456 iput(inode); 457 inode = NULL; 458 } 459 return inode; 460 } 461 462 /* replays a single extent in 'eb' at 'slot' with 'key' into the 463 * subvolume 'root'. path is released on entry and should be released 464 * on exit. 465 * 466 * extents in the log tree have not been allocated out of the extent 467 * tree yet. So, this completes the allocation, taking a reference 468 * as required if the extent already exists or creating a new extent 469 * if it isn't in the extent allocation tree yet. 470 * 471 * The extent is inserted into the file, dropping any existing extents 472 * from the file that overlap the new one. 473 */ 474 static noinline int replay_one_extent(struct btrfs_trans_handle *trans, 475 struct btrfs_root *root, 476 struct btrfs_path *path, 477 struct extent_buffer *eb, int slot, 478 struct btrfs_key *key) 479 { 480 int found_type; 481 u64 mask = root->sectorsize - 1; 482 u64 extent_end; 483 u64 alloc_hint; 484 u64 start = key->offset; 485 u64 saved_nbytes; 486 struct btrfs_file_extent_item *item; 487 struct inode *inode = NULL; 488 unsigned long size; 489 int ret = 0; 490 491 item = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item); 492 found_type = btrfs_file_extent_type(eb, item); 493 494 if (found_type == BTRFS_FILE_EXTENT_REG || 495 found_type == BTRFS_FILE_EXTENT_PREALLOC) 496 extent_end = start + btrfs_file_extent_num_bytes(eb, item); 497 else if (found_type == BTRFS_FILE_EXTENT_INLINE) { 498 size = btrfs_file_extent_inline_len(eb, item); 499 extent_end = (start + size + mask) & ~mask; 500 } else { 501 ret = 0; 502 goto out; 503 } 504 505 inode = read_one_inode(root, key->objectid); 506 if (!inode) { 507 ret = -EIO; 508 goto out; 509 } 510 511 /* 512 * first check to see if we already have this extent in the 513 * file. This must be done before the btrfs_drop_extents run 514 * so we don't try to drop this extent. 515 */ 516 ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino, 517 start, 0); 518 519 if (ret == 0 && 520 (found_type == BTRFS_FILE_EXTENT_REG || 521 found_type == BTRFS_FILE_EXTENT_PREALLOC)) { 522 struct btrfs_file_extent_item cmp1; 523 struct btrfs_file_extent_item cmp2; 524 struct btrfs_file_extent_item *existing; 525 struct extent_buffer *leaf; 526 527 leaf = path->nodes[0]; 528 existing = btrfs_item_ptr(leaf, path->slots[0], 529 struct btrfs_file_extent_item); 530 531 read_extent_buffer(eb, &cmp1, (unsigned long)item, 532 sizeof(cmp1)); 533 read_extent_buffer(leaf, &cmp2, (unsigned long)existing, 534 sizeof(cmp2)); 535 536 /* 537 * we already have a pointer to this exact extent, 538 * we don't have to do anything 539 */ 540 if (memcmp(&cmp1, &cmp2, sizeof(cmp1)) == 0) { 541 btrfs_release_path(root, path); 542 goto out; 543 } 544 } 545 btrfs_release_path(root, path); 546 547 saved_nbytes = inode_get_bytes(inode); 548 /* drop any overlapping extents */ 549 ret = btrfs_drop_extents(trans, inode, start, extent_end, 550 &alloc_hint, 1); 551 BUG_ON(ret); 552 553 if (found_type == BTRFS_FILE_EXTENT_REG || 554 found_type == BTRFS_FILE_EXTENT_PREALLOC) { 555 u64 offset; 556 unsigned long dest_offset; 557 struct btrfs_key ins; 558 559 ret = btrfs_insert_empty_item(trans, root, path, key, 560 sizeof(*item)); 561 BUG_ON(ret); 562 dest_offset = btrfs_item_ptr_offset(path->nodes[0], 563 path->slots[0]); 564 copy_extent_buffer(path->nodes[0], eb, dest_offset, 565 (unsigned long)item, sizeof(*item)); 566 567 ins.objectid = btrfs_file_extent_disk_bytenr(eb, item); 568 ins.offset = btrfs_file_extent_disk_num_bytes(eb, item); 569 ins.type = BTRFS_EXTENT_ITEM_KEY; 570 offset = key->offset - btrfs_file_extent_offset(eb, item); 571 572 if (ins.objectid > 0) { 573 u64 csum_start; 574 u64 csum_end; 575 LIST_HEAD(ordered_sums); 576 /* 577 * is this extent already allocated in the extent 578 * allocation tree? If so, just add a reference 579 */ 580 ret = btrfs_lookup_extent(root, ins.objectid, 581 ins.offset); 582 if (ret == 0) { 583 ret = btrfs_inc_extent_ref(trans, root, 584 ins.objectid, ins.offset, 585 0, root->root_key.objectid, 586 key->objectid, offset); 587 } else { 588 /* 589 * insert the extent pointer in the extent 590 * allocation tree 591 */ 592 ret = btrfs_alloc_logged_file_extent(trans, 593 root, root->root_key.objectid, 594 key->objectid, offset, &ins); 595 BUG_ON(ret); 596 } 597 btrfs_release_path(root, path); 598 599 if (btrfs_file_extent_compression(eb, item)) { 600 csum_start = ins.objectid; 601 csum_end = csum_start + ins.offset; 602 } else { 603 csum_start = ins.objectid + 604 btrfs_file_extent_offset(eb, item); 605 csum_end = csum_start + 606 btrfs_file_extent_num_bytes(eb, item); 607 } 608 609 ret = btrfs_lookup_csums_range(root->log_root, 610 csum_start, csum_end - 1, 611 &ordered_sums); 612 BUG_ON(ret); 613 while (!list_empty(&ordered_sums)) { 614 struct btrfs_ordered_sum *sums; 615 sums = list_entry(ordered_sums.next, 616 struct btrfs_ordered_sum, 617 list); 618 ret = btrfs_csum_file_blocks(trans, 619 root->fs_info->csum_root, 620 sums); 621 BUG_ON(ret); 622 list_del(&sums->list); 623 kfree(sums); 624 } 625 } else { 626 btrfs_release_path(root, path); 627 } 628 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) { 629 /* inline extents are easy, we just overwrite them */ 630 ret = overwrite_item(trans, root, path, eb, slot, key); 631 BUG_ON(ret); 632 } 633 634 inode_set_bytes(inode, saved_nbytes); 635 btrfs_update_inode(trans, root, inode); 636 out: 637 if (inode) 638 iput(inode); 639 return ret; 640 } 641 642 /* 643 * when cleaning up conflicts between the directory names in the 644 * subvolume, directory names in the log and directory names in the 645 * inode back references, we may have to unlink inodes from directories. 646 * 647 * This is a helper function to do the unlink of a specific directory 648 * item 649 */ 650 static noinline int drop_one_dir_item(struct btrfs_trans_handle *trans, 651 struct btrfs_root *root, 652 struct btrfs_path *path, 653 struct inode *dir, 654 struct btrfs_dir_item *di) 655 { 656 struct inode *inode; 657 char *name; 658 int name_len; 659 struct extent_buffer *leaf; 660 struct btrfs_key location; 661 int ret; 662 663 leaf = path->nodes[0]; 664 665 btrfs_dir_item_key_to_cpu(leaf, di, &location); 666 name_len = btrfs_dir_name_len(leaf, di); 667 name = kmalloc(name_len, GFP_NOFS); 668 read_extent_buffer(leaf, name, (unsigned long)(di + 1), name_len); 669 btrfs_release_path(root, path); 670 671 inode = read_one_inode(root, location.objectid); 672 BUG_ON(!inode); 673 674 ret = link_to_fixup_dir(trans, root, path, location.objectid); 675 BUG_ON(ret); 676 677 ret = btrfs_unlink_inode(trans, root, dir, inode, name, name_len); 678 BUG_ON(ret); 679 kfree(name); 680 681 iput(inode); 682 return ret; 683 } 684 685 /* 686 * helper function to see if a given name and sequence number found 687 * in an inode back reference are already in a directory and correctly 688 * point to this inode 689 */ 690 static noinline int inode_in_dir(struct btrfs_root *root, 691 struct btrfs_path *path, 692 u64 dirid, u64 objectid, u64 index, 693 const char *name, int name_len) 694 { 695 struct btrfs_dir_item *di; 696 struct btrfs_key location; 697 int match = 0; 698 699 di = btrfs_lookup_dir_index_item(NULL, root, path, dirid, 700 index, name, name_len, 0); 701 if (di && !IS_ERR(di)) { 702 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location); 703 if (location.objectid != objectid) 704 goto out; 705 } else 706 goto out; 707 btrfs_release_path(root, path); 708 709 di = btrfs_lookup_dir_item(NULL, root, path, dirid, name, name_len, 0); 710 if (di && !IS_ERR(di)) { 711 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location); 712 if (location.objectid != objectid) 713 goto out; 714 } else 715 goto out; 716 match = 1; 717 out: 718 btrfs_release_path(root, path); 719 return match; 720 } 721 722 /* 723 * helper function to check a log tree for a named back reference in 724 * an inode. This is used to decide if a back reference that is 725 * found in the subvolume conflicts with what we find in the log. 726 * 727 * inode backreferences may have multiple refs in a single item, 728 * during replay we process one reference at a time, and we don't 729 * want to delete valid links to a file from the subvolume if that 730 * link is also in the log. 731 */ 732 static noinline int backref_in_log(struct btrfs_root *log, 733 struct btrfs_key *key, 734 char *name, int namelen) 735 { 736 struct btrfs_path *path; 737 struct btrfs_inode_ref *ref; 738 unsigned long ptr; 739 unsigned long ptr_end; 740 unsigned long name_ptr; 741 int found_name_len; 742 int item_size; 743 int ret; 744 int match = 0; 745 746 path = btrfs_alloc_path(); 747 ret = btrfs_search_slot(NULL, log, key, path, 0, 0); 748 if (ret != 0) 749 goto out; 750 751 item_size = btrfs_item_size_nr(path->nodes[0], path->slots[0]); 752 ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]); 753 ptr_end = ptr + item_size; 754 while (ptr < ptr_end) { 755 ref = (struct btrfs_inode_ref *)ptr; 756 found_name_len = btrfs_inode_ref_name_len(path->nodes[0], ref); 757 if (found_name_len == namelen) { 758 name_ptr = (unsigned long)(ref + 1); 759 ret = memcmp_extent_buffer(path->nodes[0], name, 760 name_ptr, namelen); 761 if (ret == 0) { 762 match = 1; 763 goto out; 764 } 765 } 766 ptr = (unsigned long)(ref + 1) + found_name_len; 767 } 768 out: 769 btrfs_free_path(path); 770 return match; 771 } 772 773 774 /* 775 * replay one inode back reference item found in the log tree. 776 * eb, slot and key refer to the buffer and key found in the log tree. 777 * root is the destination we are replaying into, and path is for temp 778 * use by this function. (it should be released on return). 779 */ 780 static noinline int add_inode_ref(struct btrfs_trans_handle *trans, 781 struct btrfs_root *root, 782 struct btrfs_root *log, 783 struct btrfs_path *path, 784 struct extent_buffer *eb, int slot, 785 struct btrfs_key *key) 786 { 787 struct inode *dir; 788 int ret; 789 struct btrfs_key location; 790 struct btrfs_inode_ref *ref; 791 struct btrfs_dir_item *di; 792 struct inode *inode; 793 char *name; 794 int namelen; 795 unsigned long ref_ptr; 796 unsigned long ref_end; 797 798 location.objectid = key->objectid; 799 location.type = BTRFS_INODE_ITEM_KEY; 800 location.offset = 0; 801 802 /* 803 * it is possible that we didn't log all the parent directories 804 * for a given inode. If we don't find the dir, just don't 805 * copy the back ref in. The link count fixup code will take 806 * care of the rest 807 */ 808 dir = read_one_inode(root, key->offset); 809 if (!dir) 810 return -ENOENT; 811 812 inode = read_one_inode(root, key->objectid); 813 BUG_ON(!inode); 814 815 ref_ptr = btrfs_item_ptr_offset(eb, slot); 816 ref_end = ref_ptr + btrfs_item_size_nr(eb, slot); 817 818 again: 819 ref = (struct btrfs_inode_ref *)ref_ptr; 820 821 namelen = btrfs_inode_ref_name_len(eb, ref); 822 name = kmalloc(namelen, GFP_NOFS); 823 BUG_ON(!name); 824 825 read_extent_buffer(eb, name, (unsigned long)(ref + 1), namelen); 826 827 /* if we already have a perfect match, we're done */ 828 if (inode_in_dir(root, path, dir->i_ino, inode->i_ino, 829 btrfs_inode_ref_index(eb, ref), 830 name, namelen)) { 831 goto out; 832 } 833 834 /* 835 * look for a conflicting back reference in the metadata. 836 * if we find one we have to unlink that name of the file 837 * before we add our new link. Later on, we overwrite any 838 * existing back reference, and we don't want to create 839 * dangling pointers in the directory. 840 */ 841 conflict_again: 842 ret = btrfs_search_slot(NULL, root, key, path, 0, 0); 843 if (ret == 0) { 844 char *victim_name; 845 int victim_name_len; 846 struct btrfs_inode_ref *victim_ref; 847 unsigned long ptr; 848 unsigned long ptr_end; 849 struct extent_buffer *leaf = path->nodes[0]; 850 851 /* are we trying to overwrite a back ref for the root directory 852 * if so, just jump out, we're done 853 */ 854 if (key->objectid == key->offset) 855 goto out_nowrite; 856 857 /* check all the names in this back reference to see 858 * if they are in the log. if so, we allow them to stay 859 * otherwise they must be unlinked as a conflict 860 */ 861 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); 862 ptr_end = ptr + btrfs_item_size_nr(leaf, path->slots[0]); 863 while (ptr < ptr_end) { 864 victim_ref = (struct btrfs_inode_ref *)ptr; 865 victim_name_len = btrfs_inode_ref_name_len(leaf, 866 victim_ref); 867 victim_name = kmalloc(victim_name_len, GFP_NOFS); 868 BUG_ON(!victim_name); 869 870 read_extent_buffer(leaf, victim_name, 871 (unsigned long)(victim_ref + 1), 872 victim_name_len); 873 874 if (!backref_in_log(log, key, victim_name, 875 victim_name_len)) { 876 btrfs_inc_nlink(inode); 877 btrfs_release_path(root, path); 878 879 ret = btrfs_unlink_inode(trans, root, dir, 880 inode, victim_name, 881 victim_name_len); 882 kfree(victim_name); 883 btrfs_release_path(root, path); 884 goto conflict_again; 885 } 886 kfree(victim_name); 887 ptr = (unsigned long)(victim_ref + 1) + victim_name_len; 888 } 889 BUG_ON(ret); 890 } 891 btrfs_release_path(root, path); 892 893 /* look for a conflicting sequence number */ 894 di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino, 895 btrfs_inode_ref_index(eb, ref), 896 name, namelen, 0); 897 if (di && !IS_ERR(di)) { 898 ret = drop_one_dir_item(trans, root, path, dir, di); 899 BUG_ON(ret); 900 } 901 btrfs_release_path(root, path); 902 903 904 /* look for a conflicting name */ 905 di = btrfs_lookup_dir_item(trans, root, path, dir->i_ino, 906 name, namelen, 0); 907 if (di && !IS_ERR(di)) { 908 ret = drop_one_dir_item(trans, root, path, dir, di); 909 BUG_ON(ret); 910 } 911 btrfs_release_path(root, path); 912 913 /* insert our name */ 914 ret = btrfs_add_link(trans, dir, inode, name, namelen, 0, 915 btrfs_inode_ref_index(eb, ref)); 916 BUG_ON(ret); 917 918 btrfs_update_inode(trans, root, inode); 919 920 out: 921 ref_ptr = (unsigned long)(ref + 1) + namelen; 922 kfree(name); 923 if (ref_ptr < ref_end) 924 goto again; 925 926 /* finally write the back reference in the inode */ 927 ret = overwrite_item(trans, root, path, eb, slot, key); 928 BUG_ON(ret); 929 930 out_nowrite: 931 btrfs_release_path(root, path); 932 iput(dir); 933 iput(inode); 934 return 0; 935 } 936 937 static int insert_orphan_item(struct btrfs_trans_handle *trans, 938 struct btrfs_root *root, u64 offset) 939 { 940 int ret; 941 ret = btrfs_find_orphan_item(root, offset); 942 if (ret > 0) 943 ret = btrfs_insert_orphan_item(trans, root, offset); 944 return ret; 945 } 946 947 948 /* 949 * There are a few corners where the link count of the file can't 950 * be properly maintained during replay. So, instead of adding 951 * lots of complexity to the log code, we just scan the backrefs 952 * for any file that has been through replay. 953 * 954 * The scan will update the link count on the inode to reflect the 955 * number of back refs found. If it goes down to zero, the iput 956 * will free the inode. 957 */ 958 static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans, 959 struct btrfs_root *root, 960 struct inode *inode) 961 { 962 struct btrfs_path *path; 963 int ret; 964 struct btrfs_key key; 965 u64 nlink = 0; 966 unsigned long ptr; 967 unsigned long ptr_end; 968 int name_len; 969 970 key.objectid = inode->i_ino; 971 key.type = BTRFS_INODE_REF_KEY; 972 key.offset = (u64)-1; 973 974 path = btrfs_alloc_path(); 975 976 while (1) { 977 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 978 if (ret < 0) 979 break; 980 if (ret > 0) { 981 if (path->slots[0] == 0) 982 break; 983 path->slots[0]--; 984 } 985 btrfs_item_key_to_cpu(path->nodes[0], &key, 986 path->slots[0]); 987 if (key.objectid != inode->i_ino || 988 key.type != BTRFS_INODE_REF_KEY) 989 break; 990 ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]); 991 ptr_end = ptr + btrfs_item_size_nr(path->nodes[0], 992 path->slots[0]); 993 while (ptr < ptr_end) { 994 struct btrfs_inode_ref *ref; 995 996 ref = (struct btrfs_inode_ref *)ptr; 997 name_len = btrfs_inode_ref_name_len(path->nodes[0], 998 ref); 999 ptr = (unsigned long)(ref + 1) + name_len; 1000 nlink++; 1001 } 1002 1003 if (key.offset == 0) 1004 break; 1005 key.offset--; 1006 btrfs_release_path(root, path); 1007 } 1008 btrfs_release_path(root, path); 1009 if (nlink != inode->i_nlink) { 1010 inode->i_nlink = nlink; 1011 btrfs_update_inode(trans, root, inode); 1012 } 1013 BTRFS_I(inode)->index_cnt = (u64)-1; 1014 1015 if (inode->i_nlink == 0) { 1016 if (S_ISDIR(inode->i_mode)) { 1017 ret = replay_dir_deletes(trans, root, NULL, path, 1018 inode->i_ino, 1); 1019 BUG_ON(ret); 1020 } 1021 ret = insert_orphan_item(trans, root, inode->i_ino); 1022 BUG_ON(ret); 1023 } 1024 btrfs_free_path(path); 1025 1026 return 0; 1027 } 1028 1029 static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans, 1030 struct btrfs_root *root, 1031 struct btrfs_path *path) 1032 { 1033 int ret; 1034 struct btrfs_key key; 1035 struct inode *inode; 1036 1037 key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID; 1038 key.type = BTRFS_ORPHAN_ITEM_KEY; 1039 key.offset = (u64)-1; 1040 while (1) { 1041 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 1042 if (ret < 0) 1043 break; 1044 1045 if (ret == 1) { 1046 if (path->slots[0] == 0) 1047 break; 1048 path->slots[0]--; 1049 } 1050 1051 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 1052 if (key.objectid != BTRFS_TREE_LOG_FIXUP_OBJECTID || 1053 key.type != BTRFS_ORPHAN_ITEM_KEY) 1054 break; 1055 1056 ret = btrfs_del_item(trans, root, path); 1057 BUG_ON(ret); 1058 1059 btrfs_release_path(root, path); 1060 inode = read_one_inode(root, key.offset); 1061 BUG_ON(!inode); 1062 1063 ret = fixup_inode_link_count(trans, root, inode); 1064 BUG_ON(ret); 1065 1066 iput(inode); 1067 1068 /* 1069 * fixup on a directory may create new entries, 1070 * make sure we always look for the highset possible 1071 * offset 1072 */ 1073 key.offset = (u64)-1; 1074 } 1075 btrfs_release_path(root, path); 1076 return 0; 1077 } 1078 1079 1080 /* 1081 * record a given inode in the fixup dir so we can check its link 1082 * count when replay is done. The link count is incremented here 1083 * so the inode won't go away until we check it 1084 */ 1085 static noinline int link_to_fixup_dir(struct btrfs_trans_handle *trans, 1086 struct btrfs_root *root, 1087 struct btrfs_path *path, 1088 u64 objectid) 1089 { 1090 struct btrfs_key key; 1091 int ret = 0; 1092 struct inode *inode; 1093 1094 inode = read_one_inode(root, objectid); 1095 BUG_ON(!inode); 1096 1097 key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID; 1098 btrfs_set_key_type(&key, BTRFS_ORPHAN_ITEM_KEY); 1099 key.offset = objectid; 1100 1101 ret = btrfs_insert_empty_item(trans, root, path, &key, 0); 1102 1103 btrfs_release_path(root, path); 1104 if (ret == 0) { 1105 btrfs_inc_nlink(inode); 1106 btrfs_update_inode(trans, root, inode); 1107 } else if (ret == -EEXIST) { 1108 ret = 0; 1109 } else { 1110 BUG(); 1111 } 1112 iput(inode); 1113 1114 return ret; 1115 } 1116 1117 /* 1118 * when replaying the log for a directory, we only insert names 1119 * for inodes that actually exist. This means an fsync on a directory 1120 * does not implicitly fsync all the new files in it 1121 */ 1122 static noinline int insert_one_name(struct btrfs_trans_handle *trans, 1123 struct btrfs_root *root, 1124 struct btrfs_path *path, 1125 u64 dirid, u64 index, 1126 char *name, int name_len, u8 type, 1127 struct btrfs_key *location) 1128 { 1129 struct inode *inode; 1130 struct inode *dir; 1131 int ret; 1132 1133 inode = read_one_inode(root, location->objectid); 1134 if (!inode) 1135 return -ENOENT; 1136 1137 dir = read_one_inode(root, dirid); 1138 if (!dir) { 1139 iput(inode); 1140 return -EIO; 1141 } 1142 ret = btrfs_add_link(trans, dir, inode, name, name_len, 1, index); 1143 1144 /* FIXME, put inode into FIXUP list */ 1145 1146 iput(inode); 1147 iput(dir); 1148 return ret; 1149 } 1150 1151 /* 1152 * take a single entry in a log directory item and replay it into 1153 * the subvolume. 1154 * 1155 * if a conflicting item exists in the subdirectory already, 1156 * the inode it points to is unlinked and put into the link count 1157 * fix up tree. 1158 * 1159 * If a name from the log points to a file or directory that does 1160 * not exist in the FS, it is skipped. fsyncs on directories 1161 * do not force down inodes inside that directory, just changes to the 1162 * names or unlinks in a directory. 1163 */ 1164 static noinline int replay_one_name(struct btrfs_trans_handle *trans, 1165 struct btrfs_root *root, 1166 struct btrfs_path *path, 1167 struct extent_buffer *eb, 1168 struct btrfs_dir_item *di, 1169 struct btrfs_key *key) 1170 { 1171 char *name; 1172 int name_len; 1173 struct btrfs_dir_item *dst_di; 1174 struct btrfs_key found_key; 1175 struct btrfs_key log_key; 1176 struct inode *dir; 1177 u8 log_type; 1178 int exists; 1179 int ret; 1180 1181 dir = read_one_inode(root, key->objectid); 1182 BUG_ON(!dir); 1183 1184 name_len = btrfs_dir_name_len(eb, di); 1185 name = kmalloc(name_len, GFP_NOFS); 1186 log_type = btrfs_dir_type(eb, di); 1187 read_extent_buffer(eb, name, (unsigned long)(di + 1), 1188 name_len); 1189 1190 btrfs_dir_item_key_to_cpu(eb, di, &log_key); 1191 exists = btrfs_lookup_inode(trans, root, path, &log_key, 0); 1192 if (exists == 0) 1193 exists = 1; 1194 else 1195 exists = 0; 1196 btrfs_release_path(root, path); 1197 1198 if (key->type == BTRFS_DIR_ITEM_KEY) { 1199 dst_di = btrfs_lookup_dir_item(trans, root, path, key->objectid, 1200 name, name_len, 1); 1201 } else if (key->type == BTRFS_DIR_INDEX_KEY) { 1202 dst_di = btrfs_lookup_dir_index_item(trans, root, path, 1203 key->objectid, 1204 key->offset, name, 1205 name_len, 1); 1206 } else { 1207 BUG(); 1208 } 1209 if (!dst_di || IS_ERR(dst_di)) { 1210 /* we need a sequence number to insert, so we only 1211 * do inserts for the BTRFS_DIR_INDEX_KEY types 1212 */ 1213 if (key->type != BTRFS_DIR_INDEX_KEY) 1214 goto out; 1215 goto insert; 1216 } 1217 1218 btrfs_dir_item_key_to_cpu(path->nodes[0], dst_di, &found_key); 1219 /* the existing item matches the logged item */ 1220 if (found_key.objectid == log_key.objectid && 1221 found_key.type == log_key.type && 1222 found_key.offset == log_key.offset && 1223 btrfs_dir_type(path->nodes[0], dst_di) == log_type) { 1224 goto out; 1225 } 1226 1227 /* 1228 * don't drop the conflicting directory entry if the inode 1229 * for the new entry doesn't exist 1230 */ 1231 if (!exists) 1232 goto out; 1233 1234 ret = drop_one_dir_item(trans, root, path, dir, dst_di); 1235 BUG_ON(ret); 1236 1237 if (key->type == BTRFS_DIR_INDEX_KEY) 1238 goto insert; 1239 out: 1240 btrfs_release_path(root, path); 1241 kfree(name); 1242 iput(dir); 1243 return 0; 1244 1245 insert: 1246 btrfs_release_path(root, path); 1247 ret = insert_one_name(trans, root, path, key->objectid, key->offset, 1248 name, name_len, log_type, &log_key); 1249 1250 BUG_ON(ret && ret != -ENOENT); 1251 goto out; 1252 } 1253 1254 /* 1255 * find all the names in a directory item and reconcile them into 1256 * the subvolume. Only BTRFS_DIR_ITEM_KEY types will have more than 1257 * one name in a directory item, but the same code gets used for 1258 * both directory index types 1259 */ 1260 static noinline int replay_one_dir_item(struct btrfs_trans_handle *trans, 1261 struct btrfs_root *root, 1262 struct btrfs_path *path, 1263 struct extent_buffer *eb, int slot, 1264 struct btrfs_key *key) 1265 { 1266 int ret; 1267 u32 item_size = btrfs_item_size_nr(eb, slot); 1268 struct btrfs_dir_item *di; 1269 int name_len; 1270 unsigned long ptr; 1271 unsigned long ptr_end; 1272 1273 ptr = btrfs_item_ptr_offset(eb, slot); 1274 ptr_end = ptr + item_size; 1275 while (ptr < ptr_end) { 1276 di = (struct btrfs_dir_item *)ptr; 1277 name_len = btrfs_dir_name_len(eb, di); 1278 ret = replay_one_name(trans, root, path, eb, di, key); 1279 BUG_ON(ret); 1280 ptr = (unsigned long)(di + 1); 1281 ptr += name_len; 1282 } 1283 return 0; 1284 } 1285 1286 /* 1287 * directory replay has two parts. There are the standard directory 1288 * items in the log copied from the subvolume, and range items 1289 * created in the log while the subvolume was logged. 1290 * 1291 * The range items tell us which parts of the key space the log 1292 * is authoritative for. During replay, if a key in the subvolume 1293 * directory is in a logged range item, but not actually in the log 1294 * that means it was deleted from the directory before the fsync 1295 * and should be removed. 1296 */ 1297 static noinline int find_dir_range(struct btrfs_root *root, 1298 struct btrfs_path *path, 1299 u64 dirid, int key_type, 1300 u64 *start_ret, u64 *end_ret) 1301 { 1302 struct btrfs_key key; 1303 u64 found_end; 1304 struct btrfs_dir_log_item *item; 1305 int ret; 1306 int nritems; 1307 1308 if (*start_ret == (u64)-1) 1309 return 1; 1310 1311 key.objectid = dirid; 1312 key.type = key_type; 1313 key.offset = *start_ret; 1314 1315 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 1316 if (ret < 0) 1317 goto out; 1318 if (ret > 0) { 1319 if (path->slots[0] == 0) 1320 goto out; 1321 path->slots[0]--; 1322 } 1323 if (ret != 0) 1324 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 1325 1326 if (key.type != key_type || key.objectid != dirid) { 1327 ret = 1; 1328 goto next; 1329 } 1330 item = btrfs_item_ptr(path->nodes[0], path->slots[0], 1331 struct btrfs_dir_log_item); 1332 found_end = btrfs_dir_log_end(path->nodes[0], item); 1333 1334 if (*start_ret >= key.offset && *start_ret <= found_end) { 1335 ret = 0; 1336 *start_ret = key.offset; 1337 *end_ret = found_end; 1338 goto out; 1339 } 1340 ret = 1; 1341 next: 1342 /* check the next slot in the tree to see if it is a valid item */ 1343 nritems = btrfs_header_nritems(path->nodes[0]); 1344 if (path->slots[0] >= nritems) { 1345 ret = btrfs_next_leaf(root, path); 1346 if (ret) 1347 goto out; 1348 } else { 1349 path->slots[0]++; 1350 } 1351 1352 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 1353 1354 if (key.type != key_type || key.objectid != dirid) { 1355 ret = 1; 1356 goto out; 1357 } 1358 item = btrfs_item_ptr(path->nodes[0], path->slots[0], 1359 struct btrfs_dir_log_item); 1360 found_end = btrfs_dir_log_end(path->nodes[0], item); 1361 *start_ret = key.offset; 1362 *end_ret = found_end; 1363 ret = 0; 1364 out: 1365 btrfs_release_path(root, path); 1366 return ret; 1367 } 1368 1369 /* 1370 * this looks for a given directory item in the log. If the directory 1371 * item is not in the log, the item is removed and the inode it points 1372 * to is unlinked 1373 */ 1374 static noinline int check_item_in_log(struct btrfs_trans_handle *trans, 1375 struct btrfs_root *root, 1376 struct btrfs_root *log, 1377 struct btrfs_path *path, 1378 struct btrfs_path *log_path, 1379 struct inode *dir, 1380 struct btrfs_key *dir_key) 1381 { 1382 int ret; 1383 struct extent_buffer *eb; 1384 int slot; 1385 u32 item_size; 1386 struct btrfs_dir_item *di; 1387 struct btrfs_dir_item *log_di; 1388 int name_len; 1389 unsigned long ptr; 1390 unsigned long ptr_end; 1391 char *name; 1392 struct inode *inode; 1393 struct btrfs_key location; 1394 1395 again: 1396 eb = path->nodes[0]; 1397 slot = path->slots[0]; 1398 item_size = btrfs_item_size_nr(eb, slot); 1399 ptr = btrfs_item_ptr_offset(eb, slot); 1400 ptr_end = ptr + item_size; 1401 while (ptr < ptr_end) { 1402 di = (struct btrfs_dir_item *)ptr; 1403 name_len = btrfs_dir_name_len(eb, di); 1404 name = kmalloc(name_len, GFP_NOFS); 1405 if (!name) { 1406 ret = -ENOMEM; 1407 goto out; 1408 } 1409 read_extent_buffer(eb, name, (unsigned long)(di + 1), 1410 name_len); 1411 log_di = NULL; 1412 if (log && dir_key->type == BTRFS_DIR_ITEM_KEY) { 1413 log_di = btrfs_lookup_dir_item(trans, log, log_path, 1414 dir_key->objectid, 1415 name, name_len, 0); 1416 } else if (log && dir_key->type == BTRFS_DIR_INDEX_KEY) { 1417 log_di = btrfs_lookup_dir_index_item(trans, log, 1418 log_path, 1419 dir_key->objectid, 1420 dir_key->offset, 1421 name, name_len, 0); 1422 } 1423 if (!log_di || IS_ERR(log_di)) { 1424 btrfs_dir_item_key_to_cpu(eb, di, &location); 1425 btrfs_release_path(root, path); 1426 btrfs_release_path(log, log_path); 1427 inode = read_one_inode(root, location.objectid); 1428 BUG_ON(!inode); 1429 1430 ret = link_to_fixup_dir(trans, root, 1431 path, location.objectid); 1432 BUG_ON(ret); 1433 btrfs_inc_nlink(inode); 1434 ret = btrfs_unlink_inode(trans, root, dir, inode, 1435 name, name_len); 1436 BUG_ON(ret); 1437 kfree(name); 1438 iput(inode); 1439 1440 /* there might still be more names under this key 1441 * check and repeat if required 1442 */ 1443 ret = btrfs_search_slot(NULL, root, dir_key, path, 1444 0, 0); 1445 if (ret == 0) 1446 goto again; 1447 ret = 0; 1448 goto out; 1449 } 1450 btrfs_release_path(log, log_path); 1451 kfree(name); 1452 1453 ptr = (unsigned long)(di + 1); 1454 ptr += name_len; 1455 } 1456 ret = 0; 1457 out: 1458 btrfs_release_path(root, path); 1459 btrfs_release_path(log, log_path); 1460 return ret; 1461 } 1462 1463 /* 1464 * deletion replay happens before we copy any new directory items 1465 * out of the log or out of backreferences from inodes. It 1466 * scans the log to find ranges of keys that log is authoritative for, 1467 * and then scans the directory to find items in those ranges that are 1468 * not present in the log. 1469 * 1470 * Anything we don't find in the log is unlinked and removed from the 1471 * directory. 1472 */ 1473 static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans, 1474 struct btrfs_root *root, 1475 struct btrfs_root *log, 1476 struct btrfs_path *path, 1477 u64 dirid, int del_all) 1478 { 1479 u64 range_start; 1480 u64 range_end; 1481 int key_type = BTRFS_DIR_LOG_ITEM_KEY; 1482 int ret = 0; 1483 struct btrfs_key dir_key; 1484 struct btrfs_key found_key; 1485 struct btrfs_path *log_path; 1486 struct inode *dir; 1487 1488 dir_key.objectid = dirid; 1489 dir_key.type = BTRFS_DIR_ITEM_KEY; 1490 log_path = btrfs_alloc_path(); 1491 if (!log_path) 1492 return -ENOMEM; 1493 1494 dir = read_one_inode(root, dirid); 1495 /* it isn't an error if the inode isn't there, that can happen 1496 * because we replay the deletes before we copy in the inode item 1497 * from the log 1498 */ 1499 if (!dir) { 1500 btrfs_free_path(log_path); 1501 return 0; 1502 } 1503 again: 1504 range_start = 0; 1505 range_end = 0; 1506 while (1) { 1507 if (del_all) 1508 range_end = (u64)-1; 1509 else { 1510 ret = find_dir_range(log, path, dirid, key_type, 1511 &range_start, &range_end); 1512 if (ret != 0) 1513 break; 1514 } 1515 1516 dir_key.offset = range_start; 1517 while (1) { 1518 int nritems; 1519 ret = btrfs_search_slot(NULL, root, &dir_key, path, 1520 0, 0); 1521 if (ret < 0) 1522 goto out; 1523 1524 nritems = btrfs_header_nritems(path->nodes[0]); 1525 if (path->slots[0] >= nritems) { 1526 ret = btrfs_next_leaf(root, path); 1527 if (ret) 1528 break; 1529 } 1530 btrfs_item_key_to_cpu(path->nodes[0], &found_key, 1531 path->slots[0]); 1532 if (found_key.objectid != dirid || 1533 found_key.type != dir_key.type) 1534 goto next_type; 1535 1536 if (found_key.offset > range_end) 1537 break; 1538 1539 ret = check_item_in_log(trans, root, log, path, 1540 log_path, dir, 1541 &found_key); 1542 BUG_ON(ret); 1543 if (found_key.offset == (u64)-1) 1544 break; 1545 dir_key.offset = found_key.offset + 1; 1546 } 1547 btrfs_release_path(root, path); 1548 if (range_end == (u64)-1) 1549 break; 1550 range_start = range_end + 1; 1551 } 1552 1553 next_type: 1554 ret = 0; 1555 if (key_type == BTRFS_DIR_LOG_ITEM_KEY) { 1556 key_type = BTRFS_DIR_LOG_INDEX_KEY; 1557 dir_key.type = BTRFS_DIR_INDEX_KEY; 1558 btrfs_release_path(root, path); 1559 goto again; 1560 } 1561 out: 1562 btrfs_release_path(root, path); 1563 btrfs_free_path(log_path); 1564 iput(dir); 1565 return ret; 1566 } 1567 1568 /* 1569 * the process_func used to replay items from the log tree. This 1570 * gets called in two different stages. The first stage just looks 1571 * for inodes and makes sure they are all copied into the subvolume. 1572 * 1573 * The second stage copies all the other item types from the log into 1574 * the subvolume. The two stage approach is slower, but gets rid of 1575 * lots of complexity around inodes referencing other inodes that exist 1576 * only in the log (references come from either directory items or inode 1577 * back refs). 1578 */ 1579 static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb, 1580 struct walk_control *wc, u64 gen) 1581 { 1582 int nritems; 1583 struct btrfs_path *path; 1584 struct btrfs_root *root = wc->replay_dest; 1585 struct btrfs_key key; 1586 u32 item_size; 1587 int level; 1588 int i; 1589 int ret; 1590 1591 btrfs_read_buffer(eb, gen); 1592 1593 level = btrfs_header_level(eb); 1594 1595 if (level != 0) 1596 return 0; 1597 1598 path = btrfs_alloc_path(); 1599 BUG_ON(!path); 1600 1601 nritems = btrfs_header_nritems(eb); 1602 for (i = 0; i < nritems; i++) { 1603 btrfs_item_key_to_cpu(eb, &key, i); 1604 item_size = btrfs_item_size_nr(eb, i); 1605 1606 /* inode keys are done during the first stage */ 1607 if (key.type == BTRFS_INODE_ITEM_KEY && 1608 wc->stage == LOG_WALK_REPLAY_INODES) { 1609 struct btrfs_inode_item *inode_item; 1610 u32 mode; 1611 1612 inode_item = btrfs_item_ptr(eb, i, 1613 struct btrfs_inode_item); 1614 mode = btrfs_inode_mode(eb, inode_item); 1615 if (S_ISDIR(mode)) { 1616 ret = replay_dir_deletes(wc->trans, 1617 root, log, path, key.objectid, 0); 1618 BUG_ON(ret); 1619 } 1620 ret = overwrite_item(wc->trans, root, path, 1621 eb, i, &key); 1622 BUG_ON(ret); 1623 1624 /* for regular files, make sure corresponding 1625 * orhpan item exist. extents past the new EOF 1626 * will be truncated later by orphan cleanup. 1627 */ 1628 if (S_ISREG(mode)) { 1629 ret = insert_orphan_item(wc->trans, root, 1630 key.objectid); 1631 BUG_ON(ret); 1632 } 1633 1634 ret = link_to_fixup_dir(wc->trans, root, 1635 path, key.objectid); 1636 BUG_ON(ret); 1637 } 1638 if (wc->stage < LOG_WALK_REPLAY_ALL) 1639 continue; 1640 1641 /* these keys are simply copied */ 1642 if (key.type == BTRFS_XATTR_ITEM_KEY) { 1643 ret = overwrite_item(wc->trans, root, path, 1644 eb, i, &key); 1645 BUG_ON(ret); 1646 } else if (key.type == BTRFS_INODE_REF_KEY) { 1647 ret = add_inode_ref(wc->trans, root, log, path, 1648 eb, i, &key); 1649 BUG_ON(ret && ret != -ENOENT); 1650 } else if (key.type == BTRFS_EXTENT_DATA_KEY) { 1651 ret = replay_one_extent(wc->trans, root, path, 1652 eb, i, &key); 1653 BUG_ON(ret); 1654 } else if (key.type == BTRFS_DIR_ITEM_KEY || 1655 key.type == BTRFS_DIR_INDEX_KEY) { 1656 ret = replay_one_dir_item(wc->trans, root, path, 1657 eb, i, &key); 1658 BUG_ON(ret); 1659 } 1660 } 1661 btrfs_free_path(path); 1662 return 0; 1663 } 1664 1665 static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans, 1666 struct btrfs_root *root, 1667 struct btrfs_path *path, int *level, 1668 struct walk_control *wc) 1669 { 1670 u64 root_owner; 1671 u64 root_gen; 1672 u64 bytenr; 1673 u64 ptr_gen; 1674 struct extent_buffer *next; 1675 struct extent_buffer *cur; 1676 struct extent_buffer *parent; 1677 u32 blocksize; 1678 int ret = 0; 1679 1680 WARN_ON(*level < 0); 1681 WARN_ON(*level >= BTRFS_MAX_LEVEL); 1682 1683 while (*level > 0) { 1684 WARN_ON(*level < 0); 1685 WARN_ON(*level >= BTRFS_MAX_LEVEL); 1686 cur = path->nodes[*level]; 1687 1688 if (btrfs_header_level(cur) != *level) 1689 WARN_ON(1); 1690 1691 if (path->slots[*level] >= 1692 btrfs_header_nritems(cur)) 1693 break; 1694 1695 bytenr = btrfs_node_blockptr(cur, path->slots[*level]); 1696 ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]); 1697 blocksize = btrfs_level_size(root, *level - 1); 1698 1699 parent = path->nodes[*level]; 1700 root_owner = btrfs_header_owner(parent); 1701 root_gen = btrfs_header_generation(parent); 1702 1703 next = btrfs_find_create_tree_block(root, bytenr, blocksize); 1704 1705 if (*level == 1) { 1706 wc->process_func(root, next, wc, ptr_gen); 1707 1708 path->slots[*level]++; 1709 if (wc->free) { 1710 btrfs_read_buffer(next, ptr_gen); 1711 1712 btrfs_tree_lock(next); 1713 clean_tree_block(trans, root, next); 1714 btrfs_set_lock_blocking(next); 1715 btrfs_wait_tree_block_writeback(next); 1716 btrfs_tree_unlock(next); 1717 1718 WARN_ON(root_owner != 1719 BTRFS_TREE_LOG_OBJECTID); 1720 ret = btrfs_free_reserved_extent(root, 1721 bytenr, blocksize); 1722 BUG_ON(ret); 1723 } 1724 free_extent_buffer(next); 1725 continue; 1726 } 1727 btrfs_read_buffer(next, ptr_gen); 1728 1729 WARN_ON(*level <= 0); 1730 if (path->nodes[*level-1]) 1731 free_extent_buffer(path->nodes[*level-1]); 1732 path->nodes[*level-1] = next; 1733 *level = btrfs_header_level(next); 1734 path->slots[*level] = 0; 1735 cond_resched(); 1736 } 1737 WARN_ON(*level < 0); 1738 WARN_ON(*level >= BTRFS_MAX_LEVEL); 1739 1740 path->slots[*level] = btrfs_header_nritems(path->nodes[*level]); 1741 1742 cond_resched(); 1743 return 0; 1744 } 1745 1746 static noinline int walk_up_log_tree(struct btrfs_trans_handle *trans, 1747 struct btrfs_root *root, 1748 struct btrfs_path *path, int *level, 1749 struct walk_control *wc) 1750 { 1751 u64 root_owner; 1752 u64 root_gen; 1753 int i; 1754 int slot; 1755 int ret; 1756 1757 for (i = *level; i < BTRFS_MAX_LEVEL - 1 && path->nodes[i]; i++) { 1758 slot = path->slots[i]; 1759 if (slot + 1 < btrfs_header_nritems(path->nodes[i])) { 1760 struct extent_buffer *node; 1761 node = path->nodes[i]; 1762 path->slots[i]++; 1763 *level = i; 1764 WARN_ON(*level == 0); 1765 return 0; 1766 } else { 1767 struct extent_buffer *parent; 1768 if (path->nodes[*level] == root->node) 1769 parent = path->nodes[*level]; 1770 else 1771 parent = path->nodes[*level + 1]; 1772 1773 root_owner = btrfs_header_owner(parent); 1774 root_gen = btrfs_header_generation(parent); 1775 wc->process_func(root, path->nodes[*level], wc, 1776 btrfs_header_generation(path->nodes[*level])); 1777 if (wc->free) { 1778 struct extent_buffer *next; 1779 1780 next = path->nodes[*level]; 1781 1782 btrfs_tree_lock(next); 1783 clean_tree_block(trans, root, next); 1784 btrfs_set_lock_blocking(next); 1785 btrfs_wait_tree_block_writeback(next); 1786 btrfs_tree_unlock(next); 1787 1788 WARN_ON(root_owner != BTRFS_TREE_LOG_OBJECTID); 1789 ret = btrfs_free_reserved_extent(root, 1790 path->nodes[*level]->start, 1791 path->nodes[*level]->len); 1792 BUG_ON(ret); 1793 } 1794 free_extent_buffer(path->nodes[*level]); 1795 path->nodes[*level] = NULL; 1796 *level = i + 1; 1797 } 1798 } 1799 return 1; 1800 } 1801 1802 /* 1803 * drop the reference count on the tree rooted at 'snap'. This traverses 1804 * the tree freeing any blocks that have a ref count of zero after being 1805 * decremented. 1806 */ 1807 static int walk_log_tree(struct btrfs_trans_handle *trans, 1808 struct btrfs_root *log, struct walk_control *wc) 1809 { 1810 int ret = 0; 1811 int wret; 1812 int level; 1813 struct btrfs_path *path; 1814 int i; 1815 int orig_level; 1816 1817 path = btrfs_alloc_path(); 1818 BUG_ON(!path); 1819 1820 level = btrfs_header_level(log->node); 1821 orig_level = level; 1822 path->nodes[level] = log->node; 1823 extent_buffer_get(log->node); 1824 path->slots[level] = 0; 1825 1826 while (1) { 1827 wret = walk_down_log_tree(trans, log, path, &level, wc); 1828 if (wret > 0) 1829 break; 1830 if (wret < 0) 1831 ret = wret; 1832 1833 wret = walk_up_log_tree(trans, log, path, &level, wc); 1834 if (wret > 0) 1835 break; 1836 if (wret < 0) 1837 ret = wret; 1838 } 1839 1840 /* was the root node processed? if not, catch it here */ 1841 if (path->nodes[orig_level]) { 1842 wc->process_func(log, path->nodes[orig_level], wc, 1843 btrfs_header_generation(path->nodes[orig_level])); 1844 if (wc->free) { 1845 struct extent_buffer *next; 1846 1847 next = path->nodes[orig_level]; 1848 1849 btrfs_tree_lock(next); 1850 clean_tree_block(trans, log, next); 1851 btrfs_set_lock_blocking(next); 1852 btrfs_wait_tree_block_writeback(next); 1853 btrfs_tree_unlock(next); 1854 1855 WARN_ON(log->root_key.objectid != 1856 BTRFS_TREE_LOG_OBJECTID); 1857 ret = btrfs_free_reserved_extent(log, next->start, 1858 next->len); 1859 BUG_ON(ret); 1860 } 1861 } 1862 1863 for (i = 0; i <= orig_level; i++) { 1864 if (path->nodes[i]) { 1865 free_extent_buffer(path->nodes[i]); 1866 path->nodes[i] = NULL; 1867 } 1868 } 1869 btrfs_free_path(path); 1870 return ret; 1871 } 1872 1873 /* 1874 * helper function to update the item for a given subvolumes log root 1875 * in the tree of log roots 1876 */ 1877 static int update_log_root(struct btrfs_trans_handle *trans, 1878 struct btrfs_root *log) 1879 { 1880 int ret; 1881 1882 if (log->log_transid == 1) { 1883 /* insert root item on the first sync */ 1884 ret = btrfs_insert_root(trans, log->fs_info->log_root_tree, 1885 &log->root_key, &log->root_item); 1886 } else { 1887 ret = btrfs_update_root(trans, log->fs_info->log_root_tree, 1888 &log->root_key, &log->root_item); 1889 } 1890 return ret; 1891 } 1892 1893 static int wait_log_commit(struct btrfs_trans_handle *trans, 1894 struct btrfs_root *root, unsigned long transid) 1895 { 1896 DEFINE_WAIT(wait); 1897 int index = transid % 2; 1898 1899 /* 1900 * we only allow two pending log transactions at a time, 1901 * so we know that if ours is more than 2 older than the 1902 * current transaction, we're done 1903 */ 1904 do { 1905 prepare_to_wait(&root->log_commit_wait[index], 1906 &wait, TASK_UNINTERRUPTIBLE); 1907 mutex_unlock(&root->log_mutex); 1908 1909 if (root->fs_info->last_trans_log_full_commit != 1910 trans->transid && root->log_transid < transid + 2 && 1911 atomic_read(&root->log_commit[index])) 1912 schedule(); 1913 1914 finish_wait(&root->log_commit_wait[index], &wait); 1915 mutex_lock(&root->log_mutex); 1916 } while (root->log_transid < transid + 2 && 1917 atomic_read(&root->log_commit[index])); 1918 return 0; 1919 } 1920 1921 static int wait_for_writer(struct btrfs_trans_handle *trans, 1922 struct btrfs_root *root) 1923 { 1924 DEFINE_WAIT(wait); 1925 while (atomic_read(&root->log_writers)) { 1926 prepare_to_wait(&root->log_writer_wait, 1927 &wait, TASK_UNINTERRUPTIBLE); 1928 mutex_unlock(&root->log_mutex); 1929 if (root->fs_info->last_trans_log_full_commit != 1930 trans->transid && atomic_read(&root->log_writers)) 1931 schedule(); 1932 mutex_lock(&root->log_mutex); 1933 finish_wait(&root->log_writer_wait, &wait); 1934 } 1935 return 0; 1936 } 1937 1938 /* 1939 * btrfs_sync_log does sends a given tree log down to the disk and 1940 * updates the super blocks to record it. When this call is done, 1941 * you know that any inodes previously logged are safely on disk only 1942 * if it returns 0. 1943 * 1944 * Any other return value means you need to call btrfs_commit_transaction. 1945 * Some of the edge cases for fsyncing directories that have had unlinks 1946 * or renames done in the past mean that sometimes the only safe 1947 * fsync is to commit the whole FS. When btrfs_sync_log returns -EAGAIN, 1948 * that has happened. 1949 */ 1950 int btrfs_sync_log(struct btrfs_trans_handle *trans, 1951 struct btrfs_root *root) 1952 { 1953 int index1; 1954 int index2; 1955 int mark; 1956 int ret; 1957 struct btrfs_root *log = root->log_root; 1958 struct btrfs_root *log_root_tree = root->fs_info->log_root_tree; 1959 unsigned long log_transid = 0; 1960 1961 mutex_lock(&root->log_mutex); 1962 index1 = root->log_transid % 2; 1963 if (atomic_read(&root->log_commit[index1])) { 1964 wait_log_commit(trans, root, root->log_transid); 1965 mutex_unlock(&root->log_mutex); 1966 return 0; 1967 } 1968 atomic_set(&root->log_commit[index1], 1); 1969 1970 /* wait for previous tree log sync to complete */ 1971 if (atomic_read(&root->log_commit[(index1 + 1) % 2])) 1972 wait_log_commit(trans, root, root->log_transid - 1); 1973 1974 while (1) { 1975 unsigned long batch = root->log_batch; 1976 if (root->log_multiple_pids) { 1977 mutex_unlock(&root->log_mutex); 1978 schedule_timeout_uninterruptible(1); 1979 mutex_lock(&root->log_mutex); 1980 } 1981 wait_for_writer(trans, root); 1982 if (batch == root->log_batch) 1983 break; 1984 } 1985 1986 /* bail out if we need to do a full commit */ 1987 if (root->fs_info->last_trans_log_full_commit == trans->transid) { 1988 ret = -EAGAIN; 1989 mutex_unlock(&root->log_mutex); 1990 goto out; 1991 } 1992 1993 log_transid = root->log_transid; 1994 if (log_transid % 2 == 0) 1995 mark = EXTENT_DIRTY; 1996 else 1997 mark = EXTENT_NEW; 1998 1999 /* we start IO on all the marked extents here, but we don't actually 2000 * wait for them until later. 2001 */ 2002 ret = btrfs_write_marked_extents(log, &log->dirty_log_pages, mark); 2003 BUG_ON(ret); 2004 2005 btrfs_set_root_node(&log->root_item, log->node); 2006 2007 root->log_batch = 0; 2008 root->log_transid++; 2009 log->log_transid = root->log_transid; 2010 root->log_start_pid = 0; 2011 smp_mb(); 2012 /* 2013 * IO has been started, blocks of the log tree have WRITTEN flag set 2014 * in their headers. new modifications of the log will be written to 2015 * new positions. so it's safe to allow log writers to go in. 2016 */ 2017 mutex_unlock(&root->log_mutex); 2018 2019 mutex_lock(&log_root_tree->log_mutex); 2020 log_root_tree->log_batch++; 2021 atomic_inc(&log_root_tree->log_writers); 2022 mutex_unlock(&log_root_tree->log_mutex); 2023 2024 ret = update_log_root(trans, log); 2025 2026 mutex_lock(&log_root_tree->log_mutex); 2027 if (atomic_dec_and_test(&log_root_tree->log_writers)) { 2028 smp_mb(); 2029 if (waitqueue_active(&log_root_tree->log_writer_wait)) 2030 wake_up(&log_root_tree->log_writer_wait); 2031 } 2032 2033 if (ret) { 2034 BUG_ON(ret != -ENOSPC); 2035 root->fs_info->last_trans_log_full_commit = trans->transid; 2036 btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark); 2037 mutex_unlock(&log_root_tree->log_mutex); 2038 ret = -EAGAIN; 2039 goto out; 2040 } 2041 2042 index2 = log_root_tree->log_transid % 2; 2043 if (atomic_read(&log_root_tree->log_commit[index2])) { 2044 btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark); 2045 wait_log_commit(trans, log_root_tree, 2046 log_root_tree->log_transid); 2047 mutex_unlock(&log_root_tree->log_mutex); 2048 goto out; 2049 } 2050 atomic_set(&log_root_tree->log_commit[index2], 1); 2051 2052 if (atomic_read(&log_root_tree->log_commit[(index2 + 1) % 2])) { 2053 wait_log_commit(trans, log_root_tree, 2054 log_root_tree->log_transid - 1); 2055 } 2056 2057 wait_for_writer(trans, log_root_tree); 2058 2059 /* 2060 * now that we've moved on to the tree of log tree roots, 2061 * check the full commit flag again 2062 */ 2063 if (root->fs_info->last_trans_log_full_commit == trans->transid) { 2064 btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark); 2065 mutex_unlock(&log_root_tree->log_mutex); 2066 ret = -EAGAIN; 2067 goto out_wake_log_root; 2068 } 2069 2070 ret = btrfs_write_and_wait_marked_extents(log_root_tree, 2071 &log_root_tree->dirty_log_pages, 2072 EXTENT_DIRTY | EXTENT_NEW); 2073 BUG_ON(ret); 2074 btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark); 2075 2076 btrfs_set_super_log_root(&root->fs_info->super_for_commit, 2077 log_root_tree->node->start); 2078 btrfs_set_super_log_root_level(&root->fs_info->super_for_commit, 2079 btrfs_header_level(log_root_tree->node)); 2080 2081 log_root_tree->log_batch = 0; 2082 log_root_tree->log_transid++; 2083 smp_mb(); 2084 2085 mutex_unlock(&log_root_tree->log_mutex); 2086 2087 /* 2088 * nobody else is going to jump in and write the the ctree 2089 * super here because the log_commit atomic below is protecting 2090 * us. We must be called with a transaction handle pinning 2091 * the running transaction open, so a full commit can't hop 2092 * in and cause problems either. 2093 */ 2094 write_ctree_super(trans, root->fs_info->tree_root, 1); 2095 ret = 0; 2096 2097 mutex_lock(&root->log_mutex); 2098 if (root->last_log_commit < log_transid) 2099 root->last_log_commit = log_transid; 2100 mutex_unlock(&root->log_mutex); 2101 2102 out_wake_log_root: 2103 atomic_set(&log_root_tree->log_commit[index2], 0); 2104 smp_mb(); 2105 if (waitqueue_active(&log_root_tree->log_commit_wait[index2])) 2106 wake_up(&log_root_tree->log_commit_wait[index2]); 2107 out: 2108 atomic_set(&root->log_commit[index1], 0); 2109 smp_mb(); 2110 if (waitqueue_active(&root->log_commit_wait[index1])) 2111 wake_up(&root->log_commit_wait[index1]); 2112 return 0; 2113 } 2114 2115 static void free_log_tree(struct btrfs_trans_handle *trans, 2116 struct btrfs_root *log) 2117 { 2118 int ret; 2119 u64 start; 2120 u64 end; 2121 struct walk_control wc = { 2122 .free = 1, 2123 .process_func = process_one_buffer 2124 }; 2125 2126 ret = walk_log_tree(trans, log, &wc); 2127 BUG_ON(ret); 2128 2129 while (1) { 2130 ret = find_first_extent_bit(&log->dirty_log_pages, 2131 0, &start, &end, EXTENT_DIRTY | EXTENT_NEW); 2132 if (ret) 2133 break; 2134 2135 clear_extent_bits(&log->dirty_log_pages, start, end, 2136 EXTENT_DIRTY | EXTENT_NEW, GFP_NOFS); 2137 } 2138 2139 free_extent_buffer(log->node); 2140 kfree(log); 2141 } 2142 2143 /* 2144 * free all the extents used by the tree log. This should be called 2145 * at commit time of the full transaction 2146 */ 2147 int btrfs_free_log(struct btrfs_trans_handle *trans, struct btrfs_root *root) 2148 { 2149 if (root->log_root) { 2150 free_log_tree(trans, root->log_root); 2151 root->log_root = NULL; 2152 } 2153 return 0; 2154 } 2155 2156 int btrfs_free_log_root_tree(struct btrfs_trans_handle *trans, 2157 struct btrfs_fs_info *fs_info) 2158 { 2159 if (fs_info->log_root_tree) { 2160 free_log_tree(trans, fs_info->log_root_tree); 2161 fs_info->log_root_tree = NULL; 2162 } 2163 return 0; 2164 } 2165 2166 /* 2167 * If both a file and directory are logged, and unlinks or renames are 2168 * mixed in, we have a few interesting corners: 2169 * 2170 * create file X in dir Y 2171 * link file X to X.link in dir Y 2172 * fsync file X 2173 * unlink file X but leave X.link 2174 * fsync dir Y 2175 * 2176 * After a crash we would expect only X.link to exist. But file X 2177 * didn't get fsync'd again so the log has back refs for X and X.link. 2178 * 2179 * We solve this by removing directory entries and inode backrefs from the 2180 * log when a file that was logged in the current transaction is 2181 * unlinked. Any later fsync will include the updated log entries, and 2182 * we'll be able to reconstruct the proper directory items from backrefs. 2183 * 2184 * This optimizations allows us to avoid relogging the entire inode 2185 * or the entire directory. 2186 */ 2187 int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans, 2188 struct btrfs_root *root, 2189 const char *name, int name_len, 2190 struct inode *dir, u64 index) 2191 { 2192 struct btrfs_root *log; 2193 struct btrfs_dir_item *di; 2194 struct btrfs_path *path; 2195 int ret; 2196 int err = 0; 2197 int bytes_del = 0; 2198 2199 if (BTRFS_I(dir)->logged_trans < trans->transid) 2200 return 0; 2201 2202 ret = join_running_log_trans(root); 2203 if (ret) 2204 return 0; 2205 2206 mutex_lock(&BTRFS_I(dir)->log_mutex); 2207 2208 log = root->log_root; 2209 path = btrfs_alloc_path(); 2210 di = btrfs_lookup_dir_item(trans, log, path, dir->i_ino, 2211 name, name_len, -1); 2212 if (IS_ERR(di)) { 2213 err = PTR_ERR(di); 2214 goto fail; 2215 } 2216 if (di) { 2217 ret = btrfs_delete_one_dir_name(trans, log, path, di); 2218 bytes_del += name_len; 2219 BUG_ON(ret); 2220 } 2221 btrfs_release_path(log, path); 2222 di = btrfs_lookup_dir_index_item(trans, log, path, dir->i_ino, 2223 index, name, name_len, -1); 2224 if (IS_ERR(di)) { 2225 err = PTR_ERR(di); 2226 goto fail; 2227 } 2228 if (di) { 2229 ret = btrfs_delete_one_dir_name(trans, log, path, di); 2230 bytes_del += name_len; 2231 BUG_ON(ret); 2232 } 2233 2234 /* update the directory size in the log to reflect the names 2235 * we have removed 2236 */ 2237 if (bytes_del) { 2238 struct btrfs_key key; 2239 2240 key.objectid = dir->i_ino; 2241 key.offset = 0; 2242 key.type = BTRFS_INODE_ITEM_KEY; 2243 btrfs_release_path(log, path); 2244 2245 ret = btrfs_search_slot(trans, log, &key, path, 0, 1); 2246 if (ret < 0) { 2247 err = ret; 2248 goto fail; 2249 } 2250 if (ret == 0) { 2251 struct btrfs_inode_item *item; 2252 u64 i_size; 2253 2254 item = btrfs_item_ptr(path->nodes[0], path->slots[0], 2255 struct btrfs_inode_item); 2256 i_size = btrfs_inode_size(path->nodes[0], item); 2257 if (i_size > bytes_del) 2258 i_size -= bytes_del; 2259 else 2260 i_size = 0; 2261 btrfs_set_inode_size(path->nodes[0], item, i_size); 2262 btrfs_mark_buffer_dirty(path->nodes[0]); 2263 } else 2264 ret = 0; 2265 btrfs_release_path(log, path); 2266 } 2267 fail: 2268 btrfs_free_path(path); 2269 mutex_unlock(&BTRFS_I(dir)->log_mutex); 2270 if (ret == -ENOSPC) { 2271 root->fs_info->last_trans_log_full_commit = trans->transid; 2272 ret = 0; 2273 } 2274 btrfs_end_log_trans(root); 2275 2276 return 0; 2277 } 2278 2279 /* see comments for btrfs_del_dir_entries_in_log */ 2280 int btrfs_del_inode_ref_in_log(struct btrfs_trans_handle *trans, 2281 struct btrfs_root *root, 2282 const char *name, int name_len, 2283 struct inode *inode, u64 dirid) 2284 { 2285 struct btrfs_root *log; 2286 u64 index; 2287 int ret; 2288 2289 if (BTRFS_I(inode)->logged_trans < trans->transid) 2290 return 0; 2291 2292 ret = join_running_log_trans(root); 2293 if (ret) 2294 return 0; 2295 log = root->log_root; 2296 mutex_lock(&BTRFS_I(inode)->log_mutex); 2297 2298 ret = btrfs_del_inode_ref(trans, log, name, name_len, inode->i_ino, 2299 dirid, &index); 2300 mutex_unlock(&BTRFS_I(inode)->log_mutex); 2301 if (ret == -ENOSPC) { 2302 root->fs_info->last_trans_log_full_commit = trans->transid; 2303 ret = 0; 2304 } 2305 btrfs_end_log_trans(root); 2306 2307 return ret; 2308 } 2309 2310 /* 2311 * creates a range item in the log for 'dirid'. first_offset and 2312 * last_offset tell us which parts of the key space the log should 2313 * be considered authoritative for. 2314 */ 2315 static noinline int insert_dir_log_key(struct btrfs_trans_handle *trans, 2316 struct btrfs_root *log, 2317 struct btrfs_path *path, 2318 int key_type, u64 dirid, 2319 u64 first_offset, u64 last_offset) 2320 { 2321 int ret; 2322 struct btrfs_key key; 2323 struct btrfs_dir_log_item *item; 2324 2325 key.objectid = dirid; 2326 key.offset = first_offset; 2327 if (key_type == BTRFS_DIR_ITEM_KEY) 2328 key.type = BTRFS_DIR_LOG_ITEM_KEY; 2329 else 2330 key.type = BTRFS_DIR_LOG_INDEX_KEY; 2331 ret = btrfs_insert_empty_item(trans, log, path, &key, sizeof(*item)); 2332 if (ret) 2333 return ret; 2334 2335 item = btrfs_item_ptr(path->nodes[0], path->slots[0], 2336 struct btrfs_dir_log_item); 2337 btrfs_set_dir_log_end(path->nodes[0], item, last_offset); 2338 btrfs_mark_buffer_dirty(path->nodes[0]); 2339 btrfs_release_path(log, path); 2340 return 0; 2341 } 2342 2343 /* 2344 * log all the items included in the current transaction for a given 2345 * directory. This also creates the range items in the log tree required 2346 * to replay anything deleted before the fsync 2347 */ 2348 static noinline int log_dir_items(struct btrfs_trans_handle *trans, 2349 struct btrfs_root *root, struct inode *inode, 2350 struct btrfs_path *path, 2351 struct btrfs_path *dst_path, int key_type, 2352 u64 min_offset, u64 *last_offset_ret) 2353 { 2354 struct btrfs_key min_key; 2355 struct btrfs_key max_key; 2356 struct btrfs_root *log = root->log_root; 2357 struct extent_buffer *src; 2358 int err = 0; 2359 int ret; 2360 int i; 2361 int nritems; 2362 u64 first_offset = min_offset; 2363 u64 last_offset = (u64)-1; 2364 2365 log = root->log_root; 2366 max_key.objectid = inode->i_ino; 2367 max_key.offset = (u64)-1; 2368 max_key.type = key_type; 2369 2370 min_key.objectid = inode->i_ino; 2371 min_key.type = key_type; 2372 min_key.offset = min_offset; 2373 2374 path->keep_locks = 1; 2375 2376 ret = btrfs_search_forward(root, &min_key, &max_key, 2377 path, 0, trans->transid); 2378 2379 /* 2380 * we didn't find anything from this transaction, see if there 2381 * is anything at all 2382 */ 2383 if (ret != 0 || min_key.objectid != inode->i_ino || 2384 min_key.type != key_type) { 2385 min_key.objectid = inode->i_ino; 2386 min_key.type = key_type; 2387 min_key.offset = (u64)-1; 2388 btrfs_release_path(root, path); 2389 ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0); 2390 if (ret < 0) { 2391 btrfs_release_path(root, path); 2392 return ret; 2393 } 2394 ret = btrfs_previous_item(root, path, inode->i_ino, key_type); 2395 2396 /* if ret == 0 there are items for this type, 2397 * create a range to tell us the last key of this type. 2398 * otherwise, there are no items in this directory after 2399 * *min_offset, and we create a range to indicate that. 2400 */ 2401 if (ret == 0) { 2402 struct btrfs_key tmp; 2403 btrfs_item_key_to_cpu(path->nodes[0], &tmp, 2404 path->slots[0]); 2405 if (key_type == tmp.type) 2406 first_offset = max(min_offset, tmp.offset) + 1; 2407 } 2408 goto done; 2409 } 2410 2411 /* go backward to find any previous key */ 2412 ret = btrfs_previous_item(root, path, inode->i_ino, key_type); 2413 if (ret == 0) { 2414 struct btrfs_key tmp; 2415 btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]); 2416 if (key_type == tmp.type) { 2417 first_offset = tmp.offset; 2418 ret = overwrite_item(trans, log, dst_path, 2419 path->nodes[0], path->slots[0], 2420 &tmp); 2421 if (ret) { 2422 err = ret; 2423 goto done; 2424 } 2425 } 2426 } 2427 btrfs_release_path(root, path); 2428 2429 /* find the first key from this transaction again */ 2430 ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0); 2431 if (ret != 0) { 2432 WARN_ON(1); 2433 goto done; 2434 } 2435 2436 /* 2437 * we have a block from this transaction, log every item in it 2438 * from our directory 2439 */ 2440 while (1) { 2441 struct btrfs_key tmp; 2442 src = path->nodes[0]; 2443 nritems = btrfs_header_nritems(src); 2444 for (i = path->slots[0]; i < nritems; i++) { 2445 btrfs_item_key_to_cpu(src, &min_key, i); 2446 2447 if (min_key.objectid != inode->i_ino || 2448 min_key.type != key_type) 2449 goto done; 2450 ret = overwrite_item(trans, log, dst_path, src, i, 2451 &min_key); 2452 if (ret) { 2453 err = ret; 2454 goto done; 2455 } 2456 } 2457 path->slots[0] = nritems; 2458 2459 /* 2460 * look ahead to the next item and see if it is also 2461 * from this directory and from this transaction 2462 */ 2463 ret = btrfs_next_leaf(root, path); 2464 if (ret == 1) { 2465 last_offset = (u64)-1; 2466 goto done; 2467 } 2468 btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]); 2469 if (tmp.objectid != inode->i_ino || tmp.type != key_type) { 2470 last_offset = (u64)-1; 2471 goto done; 2472 } 2473 if (btrfs_header_generation(path->nodes[0]) != trans->transid) { 2474 ret = overwrite_item(trans, log, dst_path, 2475 path->nodes[0], path->slots[0], 2476 &tmp); 2477 if (ret) 2478 err = ret; 2479 else 2480 last_offset = tmp.offset; 2481 goto done; 2482 } 2483 } 2484 done: 2485 btrfs_release_path(root, path); 2486 btrfs_release_path(log, dst_path); 2487 2488 if (err == 0) { 2489 *last_offset_ret = last_offset; 2490 /* 2491 * insert the log range keys to indicate where the log 2492 * is valid 2493 */ 2494 ret = insert_dir_log_key(trans, log, path, key_type, 2495 inode->i_ino, first_offset, 2496 last_offset); 2497 if (ret) 2498 err = ret; 2499 } 2500 return err; 2501 } 2502 2503 /* 2504 * logging directories is very similar to logging inodes, We find all the items 2505 * from the current transaction and write them to the log. 2506 * 2507 * The recovery code scans the directory in the subvolume, and if it finds a 2508 * key in the range logged that is not present in the log tree, then it means 2509 * that dir entry was unlinked during the transaction. 2510 * 2511 * In order for that scan to work, we must include one key smaller than 2512 * the smallest logged by this transaction and one key larger than the largest 2513 * key logged by this transaction. 2514 */ 2515 static noinline int log_directory_changes(struct btrfs_trans_handle *trans, 2516 struct btrfs_root *root, struct inode *inode, 2517 struct btrfs_path *path, 2518 struct btrfs_path *dst_path) 2519 { 2520 u64 min_key; 2521 u64 max_key; 2522 int ret; 2523 int key_type = BTRFS_DIR_ITEM_KEY; 2524 2525 again: 2526 min_key = 0; 2527 max_key = 0; 2528 while (1) { 2529 ret = log_dir_items(trans, root, inode, path, 2530 dst_path, key_type, min_key, 2531 &max_key); 2532 if (ret) 2533 return ret; 2534 if (max_key == (u64)-1) 2535 break; 2536 min_key = max_key + 1; 2537 } 2538 2539 if (key_type == BTRFS_DIR_ITEM_KEY) { 2540 key_type = BTRFS_DIR_INDEX_KEY; 2541 goto again; 2542 } 2543 return 0; 2544 } 2545 2546 /* 2547 * a helper function to drop items from the log before we relog an 2548 * inode. max_key_type indicates the highest item type to remove. 2549 * This cannot be run for file data extents because it does not 2550 * free the extents they point to. 2551 */ 2552 static int drop_objectid_items(struct btrfs_trans_handle *trans, 2553 struct btrfs_root *log, 2554 struct btrfs_path *path, 2555 u64 objectid, int max_key_type) 2556 { 2557 int ret; 2558 struct btrfs_key key; 2559 struct btrfs_key found_key; 2560 2561 key.objectid = objectid; 2562 key.type = max_key_type; 2563 key.offset = (u64)-1; 2564 2565 while (1) { 2566 ret = btrfs_search_slot(trans, log, &key, path, -1, 1); 2567 BUG_ON(ret == 0); 2568 if (ret < 0) 2569 break; 2570 2571 if (path->slots[0] == 0) 2572 break; 2573 2574 path->slots[0]--; 2575 btrfs_item_key_to_cpu(path->nodes[0], &found_key, 2576 path->slots[0]); 2577 2578 if (found_key.objectid != objectid) 2579 break; 2580 2581 ret = btrfs_del_item(trans, log, path); 2582 BUG_ON(ret); 2583 btrfs_release_path(log, path); 2584 } 2585 btrfs_release_path(log, path); 2586 return ret; 2587 } 2588 2589 static noinline int copy_items(struct btrfs_trans_handle *trans, 2590 struct btrfs_root *log, 2591 struct btrfs_path *dst_path, 2592 struct extent_buffer *src, 2593 int start_slot, int nr, int inode_only) 2594 { 2595 unsigned long src_offset; 2596 unsigned long dst_offset; 2597 struct btrfs_file_extent_item *extent; 2598 struct btrfs_inode_item *inode_item; 2599 int ret; 2600 struct btrfs_key *ins_keys; 2601 u32 *ins_sizes; 2602 char *ins_data; 2603 int i; 2604 struct list_head ordered_sums; 2605 2606 INIT_LIST_HEAD(&ordered_sums); 2607 2608 ins_data = kmalloc(nr * sizeof(struct btrfs_key) + 2609 nr * sizeof(u32), GFP_NOFS); 2610 ins_sizes = (u32 *)ins_data; 2611 ins_keys = (struct btrfs_key *)(ins_data + nr * sizeof(u32)); 2612 2613 for (i = 0; i < nr; i++) { 2614 ins_sizes[i] = btrfs_item_size_nr(src, i + start_slot); 2615 btrfs_item_key_to_cpu(src, ins_keys + i, i + start_slot); 2616 } 2617 ret = btrfs_insert_empty_items(trans, log, dst_path, 2618 ins_keys, ins_sizes, nr); 2619 if (ret) { 2620 kfree(ins_data); 2621 return ret; 2622 } 2623 2624 for (i = 0; i < nr; i++, dst_path->slots[0]++) { 2625 dst_offset = btrfs_item_ptr_offset(dst_path->nodes[0], 2626 dst_path->slots[0]); 2627 2628 src_offset = btrfs_item_ptr_offset(src, start_slot + i); 2629 2630 copy_extent_buffer(dst_path->nodes[0], src, dst_offset, 2631 src_offset, ins_sizes[i]); 2632 2633 if (inode_only == LOG_INODE_EXISTS && 2634 ins_keys[i].type == BTRFS_INODE_ITEM_KEY) { 2635 inode_item = btrfs_item_ptr(dst_path->nodes[0], 2636 dst_path->slots[0], 2637 struct btrfs_inode_item); 2638 btrfs_set_inode_size(dst_path->nodes[0], inode_item, 0); 2639 2640 /* set the generation to zero so the recover code 2641 * can tell the difference between an logging 2642 * just to say 'this inode exists' and a logging 2643 * to say 'update this inode with these values' 2644 */ 2645 btrfs_set_inode_generation(dst_path->nodes[0], 2646 inode_item, 0); 2647 } 2648 /* take a reference on file data extents so that truncates 2649 * or deletes of this inode don't have to relog the inode 2650 * again 2651 */ 2652 if (btrfs_key_type(ins_keys + i) == BTRFS_EXTENT_DATA_KEY) { 2653 int found_type; 2654 extent = btrfs_item_ptr(src, start_slot + i, 2655 struct btrfs_file_extent_item); 2656 2657 found_type = btrfs_file_extent_type(src, extent); 2658 if (found_type == BTRFS_FILE_EXTENT_REG || 2659 found_type == BTRFS_FILE_EXTENT_PREALLOC) { 2660 u64 ds, dl, cs, cl; 2661 ds = btrfs_file_extent_disk_bytenr(src, 2662 extent); 2663 /* ds == 0 is a hole */ 2664 if (ds == 0) 2665 continue; 2666 2667 dl = btrfs_file_extent_disk_num_bytes(src, 2668 extent); 2669 cs = btrfs_file_extent_offset(src, extent); 2670 cl = btrfs_file_extent_num_bytes(src, 2671 extent); 2672 if (btrfs_file_extent_compression(src, 2673 extent)) { 2674 cs = 0; 2675 cl = dl; 2676 } 2677 2678 ret = btrfs_lookup_csums_range( 2679 log->fs_info->csum_root, 2680 ds + cs, ds + cs + cl - 1, 2681 &ordered_sums); 2682 BUG_ON(ret); 2683 } 2684 } 2685 } 2686 2687 btrfs_mark_buffer_dirty(dst_path->nodes[0]); 2688 btrfs_release_path(log, dst_path); 2689 kfree(ins_data); 2690 2691 /* 2692 * we have to do this after the loop above to avoid changing the 2693 * log tree while trying to change the log tree. 2694 */ 2695 ret = 0; 2696 while (!list_empty(&ordered_sums)) { 2697 struct btrfs_ordered_sum *sums = list_entry(ordered_sums.next, 2698 struct btrfs_ordered_sum, 2699 list); 2700 if (!ret) 2701 ret = btrfs_csum_file_blocks(trans, log, sums); 2702 list_del(&sums->list); 2703 kfree(sums); 2704 } 2705 return ret; 2706 } 2707 2708 /* log a single inode in the tree log. 2709 * At least one parent directory for this inode must exist in the tree 2710 * or be logged already. 2711 * 2712 * Any items from this inode changed by the current transaction are copied 2713 * to the log tree. An extra reference is taken on any extents in this 2714 * file, allowing us to avoid a whole pile of corner cases around logging 2715 * blocks that have been removed from the tree. 2716 * 2717 * See LOG_INODE_ALL and related defines for a description of what inode_only 2718 * does. 2719 * 2720 * This handles both files and directories. 2721 */ 2722 static int btrfs_log_inode(struct btrfs_trans_handle *trans, 2723 struct btrfs_root *root, struct inode *inode, 2724 int inode_only) 2725 { 2726 struct btrfs_path *path; 2727 struct btrfs_path *dst_path; 2728 struct btrfs_key min_key; 2729 struct btrfs_key max_key; 2730 struct btrfs_root *log = root->log_root; 2731 struct extent_buffer *src = NULL; 2732 u32 size; 2733 int err = 0; 2734 int ret; 2735 int nritems; 2736 int ins_start_slot = 0; 2737 int ins_nr; 2738 2739 log = root->log_root; 2740 2741 path = btrfs_alloc_path(); 2742 dst_path = btrfs_alloc_path(); 2743 2744 min_key.objectid = inode->i_ino; 2745 min_key.type = BTRFS_INODE_ITEM_KEY; 2746 min_key.offset = 0; 2747 2748 max_key.objectid = inode->i_ino; 2749 2750 /* today the code can only do partial logging of directories */ 2751 if (!S_ISDIR(inode->i_mode)) 2752 inode_only = LOG_INODE_ALL; 2753 2754 if (inode_only == LOG_INODE_EXISTS || S_ISDIR(inode->i_mode)) 2755 max_key.type = BTRFS_XATTR_ITEM_KEY; 2756 else 2757 max_key.type = (u8)-1; 2758 max_key.offset = (u64)-1; 2759 2760 mutex_lock(&BTRFS_I(inode)->log_mutex); 2761 2762 /* 2763 * a brute force approach to making sure we get the most uptodate 2764 * copies of everything. 2765 */ 2766 if (S_ISDIR(inode->i_mode)) { 2767 int max_key_type = BTRFS_DIR_LOG_INDEX_KEY; 2768 2769 if (inode_only == LOG_INODE_EXISTS) 2770 max_key_type = BTRFS_XATTR_ITEM_KEY; 2771 ret = drop_objectid_items(trans, log, path, 2772 inode->i_ino, max_key_type); 2773 } else { 2774 ret = btrfs_truncate_inode_items(trans, log, inode, 0, 0); 2775 } 2776 if (ret) { 2777 err = ret; 2778 goto out_unlock; 2779 } 2780 path->keep_locks = 1; 2781 2782 while (1) { 2783 ins_nr = 0; 2784 ret = btrfs_search_forward(root, &min_key, &max_key, 2785 path, 0, trans->transid); 2786 if (ret != 0) 2787 break; 2788 again: 2789 /* note, ins_nr might be > 0 here, cleanup outside the loop */ 2790 if (min_key.objectid != inode->i_ino) 2791 break; 2792 if (min_key.type > max_key.type) 2793 break; 2794 2795 src = path->nodes[0]; 2796 size = btrfs_item_size_nr(src, path->slots[0]); 2797 if (ins_nr && ins_start_slot + ins_nr == path->slots[0]) { 2798 ins_nr++; 2799 goto next_slot; 2800 } else if (!ins_nr) { 2801 ins_start_slot = path->slots[0]; 2802 ins_nr = 1; 2803 goto next_slot; 2804 } 2805 2806 ret = copy_items(trans, log, dst_path, src, ins_start_slot, 2807 ins_nr, inode_only); 2808 if (ret) { 2809 err = ret; 2810 goto out_unlock; 2811 } 2812 ins_nr = 1; 2813 ins_start_slot = path->slots[0]; 2814 next_slot: 2815 2816 nritems = btrfs_header_nritems(path->nodes[0]); 2817 path->slots[0]++; 2818 if (path->slots[0] < nritems) { 2819 btrfs_item_key_to_cpu(path->nodes[0], &min_key, 2820 path->slots[0]); 2821 goto again; 2822 } 2823 if (ins_nr) { 2824 ret = copy_items(trans, log, dst_path, src, 2825 ins_start_slot, 2826 ins_nr, inode_only); 2827 if (ret) { 2828 err = ret; 2829 goto out_unlock; 2830 } 2831 ins_nr = 0; 2832 } 2833 btrfs_release_path(root, path); 2834 2835 if (min_key.offset < (u64)-1) 2836 min_key.offset++; 2837 else if (min_key.type < (u8)-1) 2838 min_key.type++; 2839 else if (min_key.objectid < (u64)-1) 2840 min_key.objectid++; 2841 else 2842 break; 2843 } 2844 if (ins_nr) { 2845 ret = copy_items(trans, log, dst_path, src, 2846 ins_start_slot, 2847 ins_nr, inode_only); 2848 if (ret) { 2849 err = ret; 2850 goto out_unlock; 2851 } 2852 ins_nr = 0; 2853 } 2854 WARN_ON(ins_nr); 2855 if (inode_only == LOG_INODE_ALL && S_ISDIR(inode->i_mode)) { 2856 btrfs_release_path(root, path); 2857 btrfs_release_path(log, dst_path); 2858 ret = log_directory_changes(trans, root, inode, path, dst_path); 2859 if (ret) { 2860 err = ret; 2861 goto out_unlock; 2862 } 2863 } 2864 BTRFS_I(inode)->logged_trans = trans->transid; 2865 out_unlock: 2866 mutex_unlock(&BTRFS_I(inode)->log_mutex); 2867 2868 btrfs_free_path(path); 2869 btrfs_free_path(dst_path); 2870 return err; 2871 } 2872 2873 /* 2874 * follow the dentry parent pointers up the chain and see if any 2875 * of the directories in it require a full commit before they can 2876 * be logged. Returns zero if nothing special needs to be done or 1 if 2877 * a full commit is required. 2878 */ 2879 static noinline int check_parent_dirs_for_sync(struct btrfs_trans_handle *trans, 2880 struct inode *inode, 2881 struct dentry *parent, 2882 struct super_block *sb, 2883 u64 last_committed) 2884 { 2885 int ret = 0; 2886 struct btrfs_root *root; 2887 2888 /* 2889 * for regular files, if its inode is already on disk, we don't 2890 * have to worry about the parents at all. This is because 2891 * we can use the last_unlink_trans field to record renames 2892 * and other fun in this file. 2893 */ 2894 if (S_ISREG(inode->i_mode) && 2895 BTRFS_I(inode)->generation <= last_committed && 2896 BTRFS_I(inode)->last_unlink_trans <= last_committed) 2897 goto out; 2898 2899 if (!S_ISDIR(inode->i_mode)) { 2900 if (!parent || !parent->d_inode || sb != parent->d_inode->i_sb) 2901 goto out; 2902 inode = parent->d_inode; 2903 } 2904 2905 while (1) { 2906 BTRFS_I(inode)->logged_trans = trans->transid; 2907 smp_mb(); 2908 2909 if (BTRFS_I(inode)->last_unlink_trans > last_committed) { 2910 root = BTRFS_I(inode)->root; 2911 2912 /* 2913 * make sure any commits to the log are forced 2914 * to be full commits 2915 */ 2916 root->fs_info->last_trans_log_full_commit = 2917 trans->transid; 2918 ret = 1; 2919 break; 2920 } 2921 2922 if (!parent || !parent->d_inode || sb != parent->d_inode->i_sb) 2923 break; 2924 2925 if (IS_ROOT(parent)) 2926 break; 2927 2928 parent = parent->d_parent; 2929 inode = parent->d_inode; 2930 2931 } 2932 out: 2933 return ret; 2934 } 2935 2936 static int inode_in_log(struct btrfs_trans_handle *trans, 2937 struct inode *inode) 2938 { 2939 struct btrfs_root *root = BTRFS_I(inode)->root; 2940 int ret = 0; 2941 2942 mutex_lock(&root->log_mutex); 2943 if (BTRFS_I(inode)->logged_trans == trans->transid && 2944 BTRFS_I(inode)->last_sub_trans <= root->last_log_commit) 2945 ret = 1; 2946 mutex_unlock(&root->log_mutex); 2947 return ret; 2948 } 2949 2950 2951 /* 2952 * helper function around btrfs_log_inode to make sure newly created 2953 * parent directories also end up in the log. A minimal inode and backref 2954 * only logging is done of any parent directories that are older than 2955 * the last committed transaction 2956 */ 2957 int btrfs_log_inode_parent(struct btrfs_trans_handle *trans, 2958 struct btrfs_root *root, struct inode *inode, 2959 struct dentry *parent, int exists_only) 2960 { 2961 int inode_only = exists_only ? LOG_INODE_EXISTS : LOG_INODE_ALL; 2962 struct super_block *sb; 2963 int ret = 0; 2964 u64 last_committed = root->fs_info->last_trans_committed; 2965 2966 sb = inode->i_sb; 2967 2968 if (btrfs_test_opt(root, NOTREELOG)) { 2969 ret = 1; 2970 goto end_no_trans; 2971 } 2972 2973 if (root->fs_info->last_trans_log_full_commit > 2974 root->fs_info->last_trans_committed) { 2975 ret = 1; 2976 goto end_no_trans; 2977 } 2978 2979 if (root != BTRFS_I(inode)->root || 2980 btrfs_root_refs(&root->root_item) == 0) { 2981 ret = 1; 2982 goto end_no_trans; 2983 } 2984 2985 ret = check_parent_dirs_for_sync(trans, inode, parent, 2986 sb, last_committed); 2987 if (ret) 2988 goto end_no_trans; 2989 2990 if (inode_in_log(trans, inode)) { 2991 ret = BTRFS_NO_LOG_SYNC; 2992 goto end_no_trans; 2993 } 2994 2995 ret = start_log_trans(trans, root); 2996 if (ret) 2997 goto end_trans; 2998 2999 ret = btrfs_log_inode(trans, root, inode, inode_only); 3000 if (ret) 3001 goto end_trans; 3002 3003 /* 3004 * for regular files, if its inode is already on disk, we don't 3005 * have to worry about the parents at all. This is because 3006 * we can use the last_unlink_trans field to record renames 3007 * and other fun in this file. 3008 */ 3009 if (S_ISREG(inode->i_mode) && 3010 BTRFS_I(inode)->generation <= last_committed && 3011 BTRFS_I(inode)->last_unlink_trans <= last_committed) { 3012 ret = 0; 3013 goto end_trans; 3014 } 3015 3016 inode_only = LOG_INODE_EXISTS; 3017 while (1) { 3018 if (!parent || !parent->d_inode || sb != parent->d_inode->i_sb) 3019 break; 3020 3021 inode = parent->d_inode; 3022 if (root != BTRFS_I(inode)->root) 3023 break; 3024 3025 if (BTRFS_I(inode)->generation > 3026 root->fs_info->last_trans_committed) { 3027 ret = btrfs_log_inode(trans, root, inode, inode_only); 3028 if (ret) 3029 goto end_trans; 3030 } 3031 if (IS_ROOT(parent)) 3032 break; 3033 3034 parent = parent->d_parent; 3035 } 3036 ret = 0; 3037 end_trans: 3038 if (ret < 0) { 3039 BUG_ON(ret != -ENOSPC); 3040 root->fs_info->last_trans_log_full_commit = trans->transid; 3041 ret = 1; 3042 } 3043 btrfs_end_log_trans(root); 3044 end_no_trans: 3045 return ret; 3046 } 3047 3048 /* 3049 * it is not safe to log dentry if the chunk root has added new 3050 * chunks. This returns 0 if the dentry was logged, and 1 otherwise. 3051 * If this returns 1, you must commit the transaction to safely get your 3052 * data on disk. 3053 */ 3054 int btrfs_log_dentry_safe(struct btrfs_trans_handle *trans, 3055 struct btrfs_root *root, struct dentry *dentry) 3056 { 3057 return btrfs_log_inode_parent(trans, root, dentry->d_inode, 3058 dentry->d_parent, 0); 3059 } 3060 3061 /* 3062 * should be called during mount to recover any replay any log trees 3063 * from the FS 3064 */ 3065 int btrfs_recover_log_trees(struct btrfs_root *log_root_tree) 3066 { 3067 int ret; 3068 struct btrfs_path *path; 3069 struct btrfs_trans_handle *trans; 3070 struct btrfs_key key; 3071 struct btrfs_key found_key; 3072 struct btrfs_key tmp_key; 3073 struct btrfs_root *log; 3074 struct btrfs_fs_info *fs_info = log_root_tree->fs_info; 3075 struct walk_control wc = { 3076 .process_func = process_one_buffer, 3077 .stage = 0, 3078 }; 3079 3080 fs_info->log_root_recovering = 1; 3081 path = btrfs_alloc_path(); 3082 BUG_ON(!path); 3083 3084 trans = btrfs_start_transaction(fs_info->tree_root, 0); 3085 3086 wc.trans = trans; 3087 wc.pin = 1; 3088 3089 walk_log_tree(trans, log_root_tree, &wc); 3090 3091 again: 3092 key.objectid = BTRFS_TREE_LOG_OBJECTID; 3093 key.offset = (u64)-1; 3094 btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY); 3095 3096 while (1) { 3097 ret = btrfs_search_slot(NULL, log_root_tree, &key, path, 0, 0); 3098 if (ret < 0) 3099 break; 3100 if (ret > 0) { 3101 if (path->slots[0] == 0) 3102 break; 3103 path->slots[0]--; 3104 } 3105 btrfs_item_key_to_cpu(path->nodes[0], &found_key, 3106 path->slots[0]); 3107 btrfs_release_path(log_root_tree, path); 3108 if (found_key.objectid != BTRFS_TREE_LOG_OBJECTID) 3109 break; 3110 3111 log = btrfs_read_fs_root_no_radix(log_root_tree, 3112 &found_key); 3113 BUG_ON(!log); 3114 3115 3116 tmp_key.objectid = found_key.offset; 3117 tmp_key.type = BTRFS_ROOT_ITEM_KEY; 3118 tmp_key.offset = (u64)-1; 3119 3120 wc.replay_dest = btrfs_read_fs_root_no_name(fs_info, &tmp_key); 3121 BUG_ON(!wc.replay_dest); 3122 3123 wc.replay_dest->log_root = log; 3124 btrfs_record_root_in_trans(trans, wc.replay_dest); 3125 ret = walk_log_tree(trans, log, &wc); 3126 BUG_ON(ret); 3127 3128 if (wc.stage == LOG_WALK_REPLAY_ALL) { 3129 ret = fixup_inode_link_counts(trans, wc.replay_dest, 3130 path); 3131 BUG_ON(ret); 3132 } 3133 3134 key.offset = found_key.offset - 1; 3135 wc.replay_dest->log_root = NULL; 3136 free_extent_buffer(log->node); 3137 free_extent_buffer(log->commit_root); 3138 kfree(log); 3139 3140 if (found_key.offset == 0) 3141 break; 3142 } 3143 btrfs_release_path(log_root_tree, path); 3144 3145 /* step one is to pin it all, step two is to replay just inodes */ 3146 if (wc.pin) { 3147 wc.pin = 0; 3148 wc.process_func = replay_one_buffer; 3149 wc.stage = LOG_WALK_REPLAY_INODES; 3150 goto again; 3151 } 3152 /* step three is to replay everything */ 3153 if (wc.stage < LOG_WALK_REPLAY_ALL) { 3154 wc.stage++; 3155 goto again; 3156 } 3157 3158 btrfs_free_path(path); 3159 3160 free_extent_buffer(log_root_tree->node); 3161 log_root_tree->log_root = NULL; 3162 fs_info->log_root_recovering = 0; 3163 3164 /* step 4: commit the transaction, which also unpins the blocks */ 3165 btrfs_commit_transaction(trans, fs_info->tree_root); 3166 3167 kfree(log_root_tree); 3168 return 0; 3169 } 3170 3171 /* 3172 * there are some corner cases where we want to force a full 3173 * commit instead of allowing a directory to be logged. 3174 * 3175 * They revolve around files there were unlinked from the directory, and 3176 * this function updates the parent directory so that a full commit is 3177 * properly done if it is fsync'd later after the unlinks are done. 3178 */ 3179 void btrfs_record_unlink_dir(struct btrfs_trans_handle *trans, 3180 struct inode *dir, struct inode *inode, 3181 int for_rename) 3182 { 3183 /* 3184 * when we're logging a file, if it hasn't been renamed 3185 * or unlinked, and its inode is fully committed on disk, 3186 * we don't have to worry about walking up the directory chain 3187 * to log its parents. 3188 * 3189 * So, we use the last_unlink_trans field to put this transid 3190 * into the file. When the file is logged we check it and 3191 * don't log the parents if the file is fully on disk. 3192 */ 3193 if (S_ISREG(inode->i_mode)) 3194 BTRFS_I(inode)->last_unlink_trans = trans->transid; 3195 3196 /* 3197 * if this directory was already logged any new 3198 * names for this file/dir will get recorded 3199 */ 3200 smp_mb(); 3201 if (BTRFS_I(dir)->logged_trans == trans->transid) 3202 return; 3203 3204 /* 3205 * if the inode we're about to unlink was logged, 3206 * the log will be properly updated for any new names 3207 */ 3208 if (BTRFS_I(inode)->logged_trans == trans->transid) 3209 return; 3210 3211 /* 3212 * when renaming files across directories, if the directory 3213 * there we're unlinking from gets fsync'd later on, there's 3214 * no way to find the destination directory later and fsync it 3215 * properly. So, we have to be conservative and force commits 3216 * so the new name gets discovered. 3217 */ 3218 if (for_rename) 3219 goto record; 3220 3221 /* we can safely do the unlink without any special recording */ 3222 return; 3223 3224 record: 3225 BTRFS_I(dir)->last_unlink_trans = trans->transid; 3226 } 3227 3228 /* 3229 * Call this after adding a new name for a file and it will properly 3230 * update the log to reflect the new name. 3231 * 3232 * It will return zero if all goes well, and it will return 1 if a 3233 * full transaction commit is required. 3234 */ 3235 int btrfs_log_new_name(struct btrfs_trans_handle *trans, 3236 struct inode *inode, struct inode *old_dir, 3237 struct dentry *parent) 3238 { 3239 struct btrfs_root * root = BTRFS_I(inode)->root; 3240 3241 /* 3242 * this will force the logging code to walk the dentry chain 3243 * up for the file 3244 */ 3245 if (S_ISREG(inode->i_mode)) 3246 BTRFS_I(inode)->last_unlink_trans = trans->transid; 3247 3248 /* 3249 * if this inode hasn't been logged and directory we're renaming it 3250 * from hasn't been logged, we don't need to log it 3251 */ 3252 if (BTRFS_I(inode)->logged_trans <= 3253 root->fs_info->last_trans_committed && 3254 (!old_dir || BTRFS_I(old_dir)->logged_trans <= 3255 root->fs_info->last_trans_committed)) 3256 return 0; 3257 3258 return btrfs_log_inode_parent(trans, root, inode, parent, 1); 3259 } 3260 3261