1 /* 2 * Copyright (C) 2008 Oracle. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public 6 * License v2 as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public 14 * License along with this program; if not, write to the 15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 16 * Boston, MA 021110-1307, USA. 17 */ 18 19 #include <linux/sched.h> 20 #include <linux/slab.h> 21 #include <linux/list_sort.h> 22 #include "ctree.h" 23 #include "transaction.h" 24 #include "disk-io.h" 25 #include "locking.h" 26 #include "print-tree.h" 27 #include "backref.h" 28 #include "compat.h" 29 #include "tree-log.h" 30 #include "hash.h" 31 32 /* magic values for the inode_only field in btrfs_log_inode: 33 * 34 * LOG_INODE_ALL means to log everything 35 * LOG_INODE_EXISTS means to log just enough to recreate the inode 36 * during log replay 37 */ 38 #define LOG_INODE_ALL 0 39 #define LOG_INODE_EXISTS 1 40 41 /* 42 * directory trouble cases 43 * 44 * 1) on rename or unlink, if the inode being unlinked isn't in the fsync 45 * log, we must force a full commit before doing an fsync of the directory 46 * where the unlink was done. 47 * ---> record transid of last unlink/rename per directory 48 * 49 * mkdir foo/some_dir 50 * normal commit 51 * rename foo/some_dir foo2/some_dir 52 * mkdir foo/some_dir 53 * fsync foo/some_dir/some_file 54 * 55 * The fsync above will unlink the original some_dir without recording 56 * it in its new location (foo2). After a crash, some_dir will be gone 57 * unless the fsync of some_file forces a full commit 58 * 59 * 2) we must log any new names for any file or dir that is in the fsync 60 * log. ---> check inode while renaming/linking. 61 * 62 * 2a) we must log any new names for any file or dir during rename 63 * when the directory they are being removed from was logged. 64 * ---> check inode and old parent dir during rename 65 * 66 * 2a is actually the more important variant. With the extra logging 67 * a crash might unlink the old name without recreating the new one 68 * 69 * 3) after a crash, we must go through any directories with a link count 70 * of zero and redo the rm -rf 71 * 72 * mkdir f1/foo 73 * normal commit 74 * rm -rf f1/foo 75 * fsync(f1) 76 * 77 * The directory f1 was fully removed from the FS, but fsync was never 78 * called on f1, only its parent dir. After a crash the rm -rf must 79 * be replayed. This must be able to recurse down the entire 80 * directory tree. The inode link count fixup code takes care of the 81 * ugly details. 82 */ 83 84 /* 85 * stages for the tree walking. The first 86 * stage (0) is to only pin down the blocks we find 87 * the second stage (1) is to make sure that all the inodes 88 * we find in the log are created in the subvolume. 89 * 90 * The last stage is to deal with directories and links and extents 91 * and all the other fun semantics 92 */ 93 #define LOG_WALK_PIN_ONLY 0 94 #define LOG_WALK_REPLAY_INODES 1 95 #define LOG_WALK_REPLAY_ALL 2 96 97 static int btrfs_log_inode(struct btrfs_trans_handle *trans, 98 struct btrfs_root *root, struct inode *inode, 99 int inode_only); 100 static int link_to_fixup_dir(struct btrfs_trans_handle *trans, 101 struct btrfs_root *root, 102 struct btrfs_path *path, u64 objectid); 103 static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans, 104 struct btrfs_root *root, 105 struct btrfs_root *log, 106 struct btrfs_path *path, 107 u64 dirid, int del_all); 108 109 /* 110 * tree logging is a special write ahead log used to make sure that 111 * fsyncs and O_SYNCs can happen without doing full tree commits. 112 * 113 * Full tree commits are expensive because they require commonly 114 * modified blocks to be recowed, creating many dirty pages in the 115 * extent tree an 4x-6x higher write load than ext3. 116 * 117 * Instead of doing a tree commit on every fsync, we use the 118 * key ranges and transaction ids to find items for a given file or directory 119 * that have changed in this transaction. Those items are copied into 120 * a special tree (one per subvolume root), that tree is written to disk 121 * and then the fsync is considered complete. 122 * 123 * After a crash, items are copied out of the log-tree back into the 124 * subvolume tree. Any file data extents found are recorded in the extent 125 * allocation tree, and the log-tree freed. 126 * 127 * The log tree is read three times, once to pin down all the extents it is 128 * using in ram and once, once to create all the inodes logged in the tree 129 * and once to do all the other items. 130 */ 131 132 /* 133 * start a sub transaction and setup the log tree 134 * this increments the log tree writer count to make the people 135 * syncing the tree wait for us to finish 136 */ 137 static int start_log_trans(struct btrfs_trans_handle *trans, 138 struct btrfs_root *root) 139 { 140 int ret; 141 int err = 0; 142 143 mutex_lock(&root->log_mutex); 144 if (root->log_root) { 145 if (!root->log_start_pid) { 146 root->log_start_pid = current->pid; 147 root->log_multiple_pids = false; 148 } else if (root->log_start_pid != current->pid) { 149 root->log_multiple_pids = true; 150 } 151 152 atomic_inc(&root->log_batch); 153 atomic_inc(&root->log_writers); 154 mutex_unlock(&root->log_mutex); 155 return 0; 156 } 157 root->log_multiple_pids = false; 158 root->log_start_pid = current->pid; 159 mutex_lock(&root->fs_info->tree_log_mutex); 160 if (!root->fs_info->log_root_tree) { 161 ret = btrfs_init_log_root_tree(trans, root->fs_info); 162 if (ret) 163 err = ret; 164 } 165 if (err == 0 && !root->log_root) { 166 ret = btrfs_add_log_tree(trans, root); 167 if (ret) 168 err = ret; 169 } 170 mutex_unlock(&root->fs_info->tree_log_mutex); 171 atomic_inc(&root->log_batch); 172 atomic_inc(&root->log_writers); 173 mutex_unlock(&root->log_mutex); 174 return err; 175 } 176 177 /* 178 * returns 0 if there was a log transaction running and we were able 179 * to join, or returns -ENOENT if there were not transactions 180 * in progress 181 */ 182 static int join_running_log_trans(struct btrfs_root *root) 183 { 184 int ret = -ENOENT; 185 186 smp_mb(); 187 if (!root->log_root) 188 return -ENOENT; 189 190 mutex_lock(&root->log_mutex); 191 if (root->log_root) { 192 ret = 0; 193 atomic_inc(&root->log_writers); 194 } 195 mutex_unlock(&root->log_mutex); 196 return ret; 197 } 198 199 /* 200 * This either makes the current running log transaction wait 201 * until you call btrfs_end_log_trans() or it makes any future 202 * log transactions wait until you call btrfs_end_log_trans() 203 */ 204 int btrfs_pin_log_trans(struct btrfs_root *root) 205 { 206 int ret = -ENOENT; 207 208 mutex_lock(&root->log_mutex); 209 atomic_inc(&root->log_writers); 210 mutex_unlock(&root->log_mutex); 211 return ret; 212 } 213 214 /* 215 * indicate we're done making changes to the log tree 216 * and wake up anyone waiting to do a sync 217 */ 218 void btrfs_end_log_trans(struct btrfs_root *root) 219 { 220 if (atomic_dec_and_test(&root->log_writers)) { 221 smp_mb(); 222 if (waitqueue_active(&root->log_writer_wait)) 223 wake_up(&root->log_writer_wait); 224 } 225 } 226 227 228 /* 229 * the walk control struct is used to pass state down the chain when 230 * processing the log tree. The stage field tells us which part 231 * of the log tree processing we are currently doing. The others 232 * are state fields used for that specific part 233 */ 234 struct walk_control { 235 /* should we free the extent on disk when done? This is used 236 * at transaction commit time while freeing a log tree 237 */ 238 int free; 239 240 /* should we write out the extent buffer? This is used 241 * while flushing the log tree to disk during a sync 242 */ 243 int write; 244 245 /* should we wait for the extent buffer io to finish? Also used 246 * while flushing the log tree to disk for a sync 247 */ 248 int wait; 249 250 /* pin only walk, we record which extents on disk belong to the 251 * log trees 252 */ 253 int pin; 254 255 /* what stage of the replay code we're currently in */ 256 int stage; 257 258 /* the root we are currently replaying */ 259 struct btrfs_root *replay_dest; 260 261 /* the trans handle for the current replay */ 262 struct btrfs_trans_handle *trans; 263 264 /* the function that gets used to process blocks we find in the 265 * tree. Note the extent_buffer might not be up to date when it is 266 * passed in, and it must be checked or read if you need the data 267 * inside it 268 */ 269 int (*process_func)(struct btrfs_root *log, struct extent_buffer *eb, 270 struct walk_control *wc, u64 gen); 271 }; 272 273 /* 274 * process_func used to pin down extents, write them or wait on them 275 */ 276 static int process_one_buffer(struct btrfs_root *log, 277 struct extent_buffer *eb, 278 struct walk_control *wc, u64 gen) 279 { 280 if (wc->pin) 281 btrfs_pin_extent_for_log_replay(log->fs_info->extent_root, 282 eb->start, eb->len); 283 284 if (btrfs_buffer_uptodate(eb, gen, 0)) { 285 if (wc->write) 286 btrfs_write_tree_block(eb); 287 if (wc->wait) 288 btrfs_wait_tree_block_writeback(eb); 289 } 290 return 0; 291 } 292 293 /* 294 * Item overwrite used by replay and tree logging. eb, slot and key all refer 295 * to the src data we are copying out. 296 * 297 * root is the tree we are copying into, and path is a scratch 298 * path for use in this function (it should be released on entry and 299 * will be released on exit). 300 * 301 * If the key is already in the destination tree the existing item is 302 * overwritten. If the existing item isn't big enough, it is extended. 303 * If it is too large, it is truncated. 304 * 305 * If the key isn't in the destination yet, a new item is inserted. 306 */ 307 static noinline int overwrite_item(struct btrfs_trans_handle *trans, 308 struct btrfs_root *root, 309 struct btrfs_path *path, 310 struct extent_buffer *eb, int slot, 311 struct btrfs_key *key) 312 { 313 int ret; 314 u32 item_size; 315 u64 saved_i_size = 0; 316 int save_old_i_size = 0; 317 unsigned long src_ptr; 318 unsigned long dst_ptr; 319 int overwrite_root = 0; 320 bool inode_item = key->type == BTRFS_INODE_ITEM_KEY; 321 322 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) 323 overwrite_root = 1; 324 325 item_size = btrfs_item_size_nr(eb, slot); 326 src_ptr = btrfs_item_ptr_offset(eb, slot); 327 328 /* look for the key in the destination tree */ 329 ret = btrfs_search_slot(NULL, root, key, path, 0, 0); 330 if (ret < 0) 331 return ret; 332 333 if (ret == 0) { 334 char *src_copy; 335 char *dst_copy; 336 u32 dst_size = btrfs_item_size_nr(path->nodes[0], 337 path->slots[0]); 338 if (dst_size != item_size) 339 goto insert; 340 341 if (item_size == 0) { 342 btrfs_release_path(path); 343 return 0; 344 } 345 dst_copy = kmalloc(item_size, GFP_NOFS); 346 src_copy = kmalloc(item_size, GFP_NOFS); 347 if (!dst_copy || !src_copy) { 348 btrfs_release_path(path); 349 kfree(dst_copy); 350 kfree(src_copy); 351 return -ENOMEM; 352 } 353 354 read_extent_buffer(eb, src_copy, src_ptr, item_size); 355 356 dst_ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]); 357 read_extent_buffer(path->nodes[0], dst_copy, dst_ptr, 358 item_size); 359 ret = memcmp(dst_copy, src_copy, item_size); 360 361 kfree(dst_copy); 362 kfree(src_copy); 363 /* 364 * they have the same contents, just return, this saves 365 * us from cowing blocks in the destination tree and doing 366 * extra writes that may not have been done by a previous 367 * sync 368 */ 369 if (ret == 0) { 370 btrfs_release_path(path); 371 return 0; 372 } 373 374 /* 375 * We need to load the old nbytes into the inode so when we 376 * replay the extents we've logged we get the right nbytes. 377 */ 378 if (inode_item) { 379 struct btrfs_inode_item *item; 380 u64 nbytes; 381 382 item = btrfs_item_ptr(path->nodes[0], path->slots[0], 383 struct btrfs_inode_item); 384 nbytes = btrfs_inode_nbytes(path->nodes[0], item); 385 item = btrfs_item_ptr(eb, slot, 386 struct btrfs_inode_item); 387 btrfs_set_inode_nbytes(eb, item, nbytes); 388 } 389 } else if (inode_item) { 390 struct btrfs_inode_item *item; 391 392 /* 393 * New inode, set nbytes to 0 so that the nbytes comes out 394 * properly when we replay the extents. 395 */ 396 item = btrfs_item_ptr(eb, slot, struct btrfs_inode_item); 397 btrfs_set_inode_nbytes(eb, item, 0); 398 } 399 insert: 400 btrfs_release_path(path); 401 /* try to insert the key into the destination tree */ 402 ret = btrfs_insert_empty_item(trans, root, path, 403 key, item_size); 404 405 /* make sure any existing item is the correct size */ 406 if (ret == -EEXIST) { 407 u32 found_size; 408 found_size = btrfs_item_size_nr(path->nodes[0], 409 path->slots[0]); 410 if (found_size > item_size) 411 btrfs_truncate_item(trans, root, path, item_size, 1); 412 else if (found_size < item_size) 413 btrfs_extend_item(trans, root, path, 414 item_size - found_size); 415 } else if (ret) { 416 return ret; 417 } 418 dst_ptr = btrfs_item_ptr_offset(path->nodes[0], 419 path->slots[0]); 420 421 /* don't overwrite an existing inode if the generation number 422 * was logged as zero. This is done when the tree logging code 423 * is just logging an inode to make sure it exists after recovery. 424 * 425 * Also, don't overwrite i_size on directories during replay. 426 * log replay inserts and removes directory items based on the 427 * state of the tree found in the subvolume, and i_size is modified 428 * as it goes 429 */ 430 if (key->type == BTRFS_INODE_ITEM_KEY && ret == -EEXIST) { 431 struct btrfs_inode_item *src_item; 432 struct btrfs_inode_item *dst_item; 433 434 src_item = (struct btrfs_inode_item *)src_ptr; 435 dst_item = (struct btrfs_inode_item *)dst_ptr; 436 437 if (btrfs_inode_generation(eb, src_item) == 0) 438 goto no_copy; 439 440 if (overwrite_root && 441 S_ISDIR(btrfs_inode_mode(eb, src_item)) && 442 S_ISDIR(btrfs_inode_mode(path->nodes[0], dst_item))) { 443 save_old_i_size = 1; 444 saved_i_size = btrfs_inode_size(path->nodes[0], 445 dst_item); 446 } 447 } 448 449 copy_extent_buffer(path->nodes[0], eb, dst_ptr, 450 src_ptr, item_size); 451 452 if (save_old_i_size) { 453 struct btrfs_inode_item *dst_item; 454 dst_item = (struct btrfs_inode_item *)dst_ptr; 455 btrfs_set_inode_size(path->nodes[0], dst_item, saved_i_size); 456 } 457 458 /* make sure the generation is filled in */ 459 if (key->type == BTRFS_INODE_ITEM_KEY) { 460 struct btrfs_inode_item *dst_item; 461 dst_item = (struct btrfs_inode_item *)dst_ptr; 462 if (btrfs_inode_generation(path->nodes[0], dst_item) == 0) { 463 btrfs_set_inode_generation(path->nodes[0], dst_item, 464 trans->transid); 465 } 466 } 467 no_copy: 468 btrfs_mark_buffer_dirty(path->nodes[0]); 469 btrfs_release_path(path); 470 return 0; 471 } 472 473 /* 474 * simple helper to read an inode off the disk from a given root 475 * This can only be called for subvolume roots and not for the log 476 */ 477 static noinline struct inode *read_one_inode(struct btrfs_root *root, 478 u64 objectid) 479 { 480 struct btrfs_key key; 481 struct inode *inode; 482 483 key.objectid = objectid; 484 key.type = BTRFS_INODE_ITEM_KEY; 485 key.offset = 0; 486 inode = btrfs_iget(root->fs_info->sb, &key, root, NULL); 487 if (IS_ERR(inode)) { 488 inode = NULL; 489 } else if (is_bad_inode(inode)) { 490 iput(inode); 491 inode = NULL; 492 } 493 return inode; 494 } 495 496 /* replays a single extent in 'eb' at 'slot' with 'key' into the 497 * subvolume 'root'. path is released on entry and should be released 498 * on exit. 499 * 500 * extents in the log tree have not been allocated out of the extent 501 * tree yet. So, this completes the allocation, taking a reference 502 * as required if the extent already exists or creating a new extent 503 * if it isn't in the extent allocation tree yet. 504 * 505 * The extent is inserted into the file, dropping any existing extents 506 * from the file that overlap the new one. 507 */ 508 static noinline int replay_one_extent(struct btrfs_trans_handle *trans, 509 struct btrfs_root *root, 510 struct btrfs_path *path, 511 struct extent_buffer *eb, int slot, 512 struct btrfs_key *key) 513 { 514 int found_type; 515 u64 extent_end; 516 u64 start = key->offset; 517 u64 nbytes = 0; 518 struct btrfs_file_extent_item *item; 519 struct inode *inode = NULL; 520 unsigned long size; 521 int ret = 0; 522 523 item = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item); 524 found_type = btrfs_file_extent_type(eb, item); 525 526 if (found_type == BTRFS_FILE_EXTENT_REG || 527 found_type == BTRFS_FILE_EXTENT_PREALLOC) { 528 nbytes = btrfs_file_extent_num_bytes(eb, item); 529 extent_end = start + nbytes; 530 531 /* 532 * We don't add to the inodes nbytes if we are prealloc or a 533 * hole. 534 */ 535 if (btrfs_file_extent_disk_bytenr(eb, item) == 0) 536 nbytes = 0; 537 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) { 538 size = btrfs_file_extent_inline_len(eb, item); 539 nbytes = btrfs_file_extent_ram_bytes(eb, item); 540 extent_end = ALIGN(start + size, root->sectorsize); 541 } else { 542 ret = 0; 543 goto out; 544 } 545 546 inode = read_one_inode(root, key->objectid); 547 if (!inode) { 548 ret = -EIO; 549 goto out; 550 } 551 552 /* 553 * first check to see if we already have this extent in the 554 * file. This must be done before the btrfs_drop_extents run 555 * so we don't try to drop this extent. 556 */ 557 ret = btrfs_lookup_file_extent(trans, root, path, btrfs_ino(inode), 558 start, 0); 559 560 if (ret == 0 && 561 (found_type == BTRFS_FILE_EXTENT_REG || 562 found_type == BTRFS_FILE_EXTENT_PREALLOC)) { 563 struct btrfs_file_extent_item cmp1; 564 struct btrfs_file_extent_item cmp2; 565 struct btrfs_file_extent_item *existing; 566 struct extent_buffer *leaf; 567 568 leaf = path->nodes[0]; 569 existing = btrfs_item_ptr(leaf, path->slots[0], 570 struct btrfs_file_extent_item); 571 572 read_extent_buffer(eb, &cmp1, (unsigned long)item, 573 sizeof(cmp1)); 574 read_extent_buffer(leaf, &cmp2, (unsigned long)existing, 575 sizeof(cmp2)); 576 577 /* 578 * we already have a pointer to this exact extent, 579 * we don't have to do anything 580 */ 581 if (memcmp(&cmp1, &cmp2, sizeof(cmp1)) == 0) { 582 btrfs_release_path(path); 583 goto out; 584 } 585 } 586 btrfs_release_path(path); 587 588 /* drop any overlapping extents */ 589 ret = btrfs_drop_extents(trans, root, inode, start, extent_end, 1); 590 BUG_ON(ret); 591 592 if (found_type == BTRFS_FILE_EXTENT_REG || 593 found_type == BTRFS_FILE_EXTENT_PREALLOC) { 594 u64 offset; 595 unsigned long dest_offset; 596 struct btrfs_key ins; 597 598 ret = btrfs_insert_empty_item(trans, root, path, key, 599 sizeof(*item)); 600 BUG_ON(ret); 601 dest_offset = btrfs_item_ptr_offset(path->nodes[0], 602 path->slots[0]); 603 copy_extent_buffer(path->nodes[0], eb, dest_offset, 604 (unsigned long)item, sizeof(*item)); 605 606 ins.objectid = btrfs_file_extent_disk_bytenr(eb, item); 607 ins.offset = btrfs_file_extent_disk_num_bytes(eb, item); 608 ins.type = BTRFS_EXTENT_ITEM_KEY; 609 offset = key->offset - btrfs_file_extent_offset(eb, item); 610 611 if (ins.objectid > 0) { 612 u64 csum_start; 613 u64 csum_end; 614 LIST_HEAD(ordered_sums); 615 /* 616 * is this extent already allocated in the extent 617 * allocation tree? If so, just add a reference 618 */ 619 ret = btrfs_lookup_extent(root, ins.objectid, 620 ins.offset); 621 if (ret == 0) { 622 ret = btrfs_inc_extent_ref(trans, root, 623 ins.objectid, ins.offset, 624 0, root->root_key.objectid, 625 key->objectid, offset, 0); 626 BUG_ON(ret); 627 } else { 628 /* 629 * insert the extent pointer in the extent 630 * allocation tree 631 */ 632 ret = btrfs_alloc_logged_file_extent(trans, 633 root, root->root_key.objectid, 634 key->objectid, offset, &ins); 635 BUG_ON(ret); 636 } 637 btrfs_release_path(path); 638 639 if (btrfs_file_extent_compression(eb, item)) { 640 csum_start = ins.objectid; 641 csum_end = csum_start + ins.offset; 642 } else { 643 csum_start = ins.objectid + 644 btrfs_file_extent_offset(eb, item); 645 csum_end = csum_start + 646 btrfs_file_extent_num_bytes(eb, item); 647 } 648 649 ret = btrfs_lookup_csums_range(root->log_root, 650 csum_start, csum_end - 1, 651 &ordered_sums, 0); 652 BUG_ON(ret); 653 while (!list_empty(&ordered_sums)) { 654 struct btrfs_ordered_sum *sums; 655 sums = list_entry(ordered_sums.next, 656 struct btrfs_ordered_sum, 657 list); 658 ret = btrfs_csum_file_blocks(trans, 659 root->fs_info->csum_root, 660 sums); 661 BUG_ON(ret); 662 list_del(&sums->list); 663 kfree(sums); 664 } 665 } else { 666 btrfs_release_path(path); 667 } 668 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) { 669 /* inline extents are easy, we just overwrite them */ 670 ret = overwrite_item(trans, root, path, eb, slot, key); 671 BUG_ON(ret); 672 } 673 674 inode_add_bytes(inode, nbytes); 675 ret = btrfs_update_inode(trans, root, inode); 676 out: 677 if (inode) 678 iput(inode); 679 return ret; 680 } 681 682 /* 683 * when cleaning up conflicts between the directory names in the 684 * subvolume, directory names in the log and directory names in the 685 * inode back references, we may have to unlink inodes from directories. 686 * 687 * This is a helper function to do the unlink of a specific directory 688 * item 689 */ 690 static noinline int drop_one_dir_item(struct btrfs_trans_handle *trans, 691 struct btrfs_root *root, 692 struct btrfs_path *path, 693 struct inode *dir, 694 struct btrfs_dir_item *di) 695 { 696 struct inode *inode; 697 char *name; 698 int name_len; 699 struct extent_buffer *leaf; 700 struct btrfs_key location; 701 int ret; 702 703 leaf = path->nodes[0]; 704 705 btrfs_dir_item_key_to_cpu(leaf, di, &location); 706 name_len = btrfs_dir_name_len(leaf, di); 707 name = kmalloc(name_len, GFP_NOFS); 708 if (!name) 709 return -ENOMEM; 710 711 read_extent_buffer(leaf, name, (unsigned long)(di + 1), name_len); 712 btrfs_release_path(path); 713 714 inode = read_one_inode(root, location.objectid); 715 if (!inode) { 716 kfree(name); 717 return -EIO; 718 } 719 720 ret = link_to_fixup_dir(trans, root, path, location.objectid); 721 BUG_ON(ret); 722 723 ret = btrfs_unlink_inode(trans, root, dir, inode, name, name_len); 724 BUG_ON(ret); 725 kfree(name); 726 727 iput(inode); 728 729 btrfs_run_delayed_items(trans, root); 730 return ret; 731 } 732 733 /* 734 * helper function to see if a given name and sequence number found 735 * in an inode back reference are already in a directory and correctly 736 * point to this inode 737 */ 738 static noinline int inode_in_dir(struct btrfs_root *root, 739 struct btrfs_path *path, 740 u64 dirid, u64 objectid, u64 index, 741 const char *name, int name_len) 742 { 743 struct btrfs_dir_item *di; 744 struct btrfs_key location; 745 int match = 0; 746 747 di = btrfs_lookup_dir_index_item(NULL, root, path, dirid, 748 index, name, name_len, 0); 749 if (di && !IS_ERR(di)) { 750 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location); 751 if (location.objectid != objectid) 752 goto out; 753 } else 754 goto out; 755 btrfs_release_path(path); 756 757 di = btrfs_lookup_dir_item(NULL, root, path, dirid, name, name_len, 0); 758 if (di && !IS_ERR(di)) { 759 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location); 760 if (location.objectid != objectid) 761 goto out; 762 } else 763 goto out; 764 match = 1; 765 out: 766 btrfs_release_path(path); 767 return match; 768 } 769 770 /* 771 * helper function to check a log tree for a named back reference in 772 * an inode. This is used to decide if a back reference that is 773 * found in the subvolume conflicts with what we find in the log. 774 * 775 * inode backreferences may have multiple refs in a single item, 776 * during replay we process one reference at a time, and we don't 777 * want to delete valid links to a file from the subvolume if that 778 * link is also in the log. 779 */ 780 static noinline int backref_in_log(struct btrfs_root *log, 781 struct btrfs_key *key, 782 u64 ref_objectid, 783 char *name, int namelen) 784 { 785 struct btrfs_path *path; 786 struct btrfs_inode_ref *ref; 787 unsigned long ptr; 788 unsigned long ptr_end; 789 unsigned long name_ptr; 790 int found_name_len; 791 int item_size; 792 int ret; 793 int match = 0; 794 795 path = btrfs_alloc_path(); 796 if (!path) 797 return -ENOMEM; 798 799 ret = btrfs_search_slot(NULL, log, key, path, 0, 0); 800 if (ret != 0) 801 goto out; 802 803 ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]); 804 805 if (key->type == BTRFS_INODE_EXTREF_KEY) { 806 if (btrfs_find_name_in_ext_backref(path, ref_objectid, 807 name, namelen, NULL)) 808 match = 1; 809 810 goto out; 811 } 812 813 item_size = btrfs_item_size_nr(path->nodes[0], path->slots[0]); 814 ptr_end = ptr + item_size; 815 while (ptr < ptr_end) { 816 ref = (struct btrfs_inode_ref *)ptr; 817 found_name_len = btrfs_inode_ref_name_len(path->nodes[0], ref); 818 if (found_name_len == namelen) { 819 name_ptr = (unsigned long)(ref + 1); 820 ret = memcmp_extent_buffer(path->nodes[0], name, 821 name_ptr, namelen); 822 if (ret == 0) { 823 match = 1; 824 goto out; 825 } 826 } 827 ptr = (unsigned long)(ref + 1) + found_name_len; 828 } 829 out: 830 btrfs_free_path(path); 831 return match; 832 } 833 834 static inline int __add_inode_ref(struct btrfs_trans_handle *trans, 835 struct btrfs_root *root, 836 struct btrfs_path *path, 837 struct btrfs_root *log_root, 838 struct inode *dir, struct inode *inode, 839 struct extent_buffer *eb, 840 u64 inode_objectid, u64 parent_objectid, 841 u64 ref_index, char *name, int namelen, 842 int *search_done) 843 { 844 int ret; 845 char *victim_name; 846 int victim_name_len; 847 struct extent_buffer *leaf; 848 struct btrfs_dir_item *di; 849 struct btrfs_key search_key; 850 struct btrfs_inode_extref *extref; 851 852 again: 853 /* Search old style refs */ 854 search_key.objectid = inode_objectid; 855 search_key.type = BTRFS_INODE_REF_KEY; 856 search_key.offset = parent_objectid; 857 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0); 858 if (ret == 0) { 859 struct btrfs_inode_ref *victim_ref; 860 unsigned long ptr; 861 unsigned long ptr_end; 862 863 leaf = path->nodes[0]; 864 865 /* are we trying to overwrite a back ref for the root directory 866 * if so, just jump out, we're done 867 */ 868 if (search_key.objectid == search_key.offset) 869 return 1; 870 871 /* check all the names in this back reference to see 872 * if they are in the log. if so, we allow them to stay 873 * otherwise they must be unlinked as a conflict 874 */ 875 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); 876 ptr_end = ptr + btrfs_item_size_nr(leaf, path->slots[0]); 877 while (ptr < ptr_end) { 878 victim_ref = (struct btrfs_inode_ref *)ptr; 879 victim_name_len = btrfs_inode_ref_name_len(leaf, 880 victim_ref); 881 victim_name = kmalloc(victim_name_len, GFP_NOFS); 882 BUG_ON(!victim_name); 883 884 read_extent_buffer(leaf, victim_name, 885 (unsigned long)(victim_ref + 1), 886 victim_name_len); 887 888 if (!backref_in_log(log_root, &search_key, 889 parent_objectid, 890 victim_name, 891 victim_name_len)) { 892 btrfs_inc_nlink(inode); 893 btrfs_release_path(path); 894 895 ret = btrfs_unlink_inode(trans, root, dir, 896 inode, victim_name, 897 victim_name_len); 898 BUG_ON(ret); 899 btrfs_run_delayed_items(trans, root); 900 kfree(victim_name); 901 *search_done = 1; 902 goto again; 903 } 904 kfree(victim_name); 905 906 ptr = (unsigned long)(victim_ref + 1) + victim_name_len; 907 } 908 BUG_ON(ret); 909 910 /* 911 * NOTE: we have searched root tree and checked the 912 * coresponding ref, it does not need to check again. 913 */ 914 *search_done = 1; 915 } 916 btrfs_release_path(path); 917 918 /* Same search but for extended refs */ 919 extref = btrfs_lookup_inode_extref(NULL, root, path, name, namelen, 920 inode_objectid, parent_objectid, 0, 921 0); 922 if (!IS_ERR_OR_NULL(extref)) { 923 u32 item_size; 924 u32 cur_offset = 0; 925 unsigned long base; 926 struct inode *victim_parent; 927 928 leaf = path->nodes[0]; 929 930 item_size = btrfs_item_size_nr(leaf, path->slots[0]); 931 base = btrfs_item_ptr_offset(leaf, path->slots[0]); 932 933 while (cur_offset < item_size) { 934 extref = (struct btrfs_inode_extref *)base + cur_offset; 935 936 victim_name_len = btrfs_inode_extref_name_len(leaf, extref); 937 938 if (btrfs_inode_extref_parent(leaf, extref) != parent_objectid) 939 goto next; 940 941 victim_name = kmalloc(victim_name_len, GFP_NOFS); 942 read_extent_buffer(leaf, victim_name, (unsigned long)&extref->name, 943 victim_name_len); 944 945 search_key.objectid = inode_objectid; 946 search_key.type = BTRFS_INODE_EXTREF_KEY; 947 search_key.offset = btrfs_extref_hash(parent_objectid, 948 victim_name, 949 victim_name_len); 950 ret = 0; 951 if (!backref_in_log(log_root, &search_key, 952 parent_objectid, victim_name, 953 victim_name_len)) { 954 ret = -ENOENT; 955 victim_parent = read_one_inode(root, 956 parent_objectid); 957 if (victim_parent) { 958 btrfs_inc_nlink(inode); 959 btrfs_release_path(path); 960 961 ret = btrfs_unlink_inode(trans, root, 962 victim_parent, 963 inode, 964 victim_name, 965 victim_name_len); 966 btrfs_run_delayed_items(trans, root); 967 } 968 BUG_ON(ret); 969 iput(victim_parent); 970 kfree(victim_name); 971 *search_done = 1; 972 goto again; 973 } 974 kfree(victim_name); 975 BUG_ON(ret); 976 next: 977 cur_offset += victim_name_len + sizeof(*extref); 978 } 979 *search_done = 1; 980 } 981 btrfs_release_path(path); 982 983 /* look for a conflicting sequence number */ 984 di = btrfs_lookup_dir_index_item(trans, root, path, btrfs_ino(dir), 985 ref_index, name, namelen, 0); 986 if (di && !IS_ERR(di)) { 987 ret = drop_one_dir_item(trans, root, path, dir, di); 988 BUG_ON(ret); 989 } 990 btrfs_release_path(path); 991 992 /* look for a conflicing name */ 993 di = btrfs_lookup_dir_item(trans, root, path, btrfs_ino(dir), 994 name, namelen, 0); 995 if (di && !IS_ERR(di)) { 996 ret = drop_one_dir_item(trans, root, path, dir, di); 997 BUG_ON(ret); 998 } 999 btrfs_release_path(path); 1000 1001 return 0; 1002 } 1003 1004 static int extref_get_fields(struct extent_buffer *eb, unsigned long ref_ptr, 1005 u32 *namelen, char **name, u64 *index, 1006 u64 *parent_objectid) 1007 { 1008 struct btrfs_inode_extref *extref; 1009 1010 extref = (struct btrfs_inode_extref *)ref_ptr; 1011 1012 *namelen = btrfs_inode_extref_name_len(eb, extref); 1013 *name = kmalloc(*namelen, GFP_NOFS); 1014 if (*name == NULL) 1015 return -ENOMEM; 1016 1017 read_extent_buffer(eb, *name, (unsigned long)&extref->name, 1018 *namelen); 1019 1020 *index = btrfs_inode_extref_index(eb, extref); 1021 if (parent_objectid) 1022 *parent_objectid = btrfs_inode_extref_parent(eb, extref); 1023 1024 return 0; 1025 } 1026 1027 static int ref_get_fields(struct extent_buffer *eb, unsigned long ref_ptr, 1028 u32 *namelen, char **name, u64 *index) 1029 { 1030 struct btrfs_inode_ref *ref; 1031 1032 ref = (struct btrfs_inode_ref *)ref_ptr; 1033 1034 *namelen = btrfs_inode_ref_name_len(eb, ref); 1035 *name = kmalloc(*namelen, GFP_NOFS); 1036 if (*name == NULL) 1037 return -ENOMEM; 1038 1039 read_extent_buffer(eb, *name, (unsigned long)(ref + 1), *namelen); 1040 1041 *index = btrfs_inode_ref_index(eb, ref); 1042 1043 return 0; 1044 } 1045 1046 /* 1047 * replay one inode back reference item found in the log tree. 1048 * eb, slot and key refer to the buffer and key found in the log tree. 1049 * root is the destination we are replaying into, and path is for temp 1050 * use by this function. (it should be released on return). 1051 */ 1052 static noinline int add_inode_ref(struct btrfs_trans_handle *trans, 1053 struct btrfs_root *root, 1054 struct btrfs_root *log, 1055 struct btrfs_path *path, 1056 struct extent_buffer *eb, int slot, 1057 struct btrfs_key *key) 1058 { 1059 struct inode *dir; 1060 struct inode *inode; 1061 unsigned long ref_ptr; 1062 unsigned long ref_end; 1063 char *name; 1064 int namelen; 1065 int ret; 1066 int search_done = 0; 1067 int log_ref_ver = 0; 1068 u64 parent_objectid; 1069 u64 inode_objectid; 1070 u64 ref_index = 0; 1071 int ref_struct_size; 1072 1073 ref_ptr = btrfs_item_ptr_offset(eb, slot); 1074 ref_end = ref_ptr + btrfs_item_size_nr(eb, slot); 1075 1076 if (key->type == BTRFS_INODE_EXTREF_KEY) { 1077 struct btrfs_inode_extref *r; 1078 1079 ref_struct_size = sizeof(struct btrfs_inode_extref); 1080 log_ref_ver = 1; 1081 r = (struct btrfs_inode_extref *)ref_ptr; 1082 parent_objectid = btrfs_inode_extref_parent(eb, r); 1083 } else { 1084 ref_struct_size = sizeof(struct btrfs_inode_ref); 1085 parent_objectid = key->offset; 1086 } 1087 inode_objectid = key->objectid; 1088 1089 /* 1090 * it is possible that we didn't log all the parent directories 1091 * for a given inode. If we don't find the dir, just don't 1092 * copy the back ref in. The link count fixup code will take 1093 * care of the rest 1094 */ 1095 dir = read_one_inode(root, parent_objectid); 1096 if (!dir) 1097 return -ENOENT; 1098 1099 inode = read_one_inode(root, inode_objectid); 1100 if (!inode) { 1101 iput(dir); 1102 return -EIO; 1103 } 1104 1105 while (ref_ptr < ref_end) { 1106 if (log_ref_ver) { 1107 ret = extref_get_fields(eb, ref_ptr, &namelen, &name, 1108 &ref_index, &parent_objectid); 1109 /* 1110 * parent object can change from one array 1111 * item to another. 1112 */ 1113 if (!dir) 1114 dir = read_one_inode(root, parent_objectid); 1115 if (!dir) 1116 return -ENOENT; 1117 } else { 1118 ret = ref_get_fields(eb, ref_ptr, &namelen, &name, 1119 &ref_index); 1120 } 1121 if (ret) 1122 return ret; 1123 1124 /* if we already have a perfect match, we're done */ 1125 if (!inode_in_dir(root, path, btrfs_ino(dir), btrfs_ino(inode), 1126 ref_index, name, namelen)) { 1127 /* 1128 * look for a conflicting back reference in the 1129 * metadata. if we find one we have to unlink that name 1130 * of the file before we add our new link. Later on, we 1131 * overwrite any existing back reference, and we don't 1132 * want to create dangling pointers in the directory. 1133 */ 1134 1135 if (!search_done) { 1136 ret = __add_inode_ref(trans, root, path, log, 1137 dir, inode, eb, 1138 inode_objectid, 1139 parent_objectid, 1140 ref_index, name, namelen, 1141 &search_done); 1142 if (ret == 1) 1143 goto out; 1144 BUG_ON(ret); 1145 } 1146 1147 /* insert our name */ 1148 ret = btrfs_add_link(trans, dir, inode, name, namelen, 1149 0, ref_index); 1150 BUG_ON(ret); 1151 1152 btrfs_update_inode(trans, root, inode); 1153 } 1154 1155 ref_ptr = (unsigned long)(ref_ptr + ref_struct_size) + namelen; 1156 kfree(name); 1157 if (log_ref_ver) { 1158 iput(dir); 1159 dir = NULL; 1160 } 1161 } 1162 1163 /* finally write the back reference in the inode */ 1164 ret = overwrite_item(trans, root, path, eb, slot, key); 1165 BUG_ON(ret); 1166 1167 out: 1168 btrfs_release_path(path); 1169 iput(dir); 1170 iput(inode); 1171 return 0; 1172 } 1173 1174 static int insert_orphan_item(struct btrfs_trans_handle *trans, 1175 struct btrfs_root *root, u64 offset) 1176 { 1177 int ret; 1178 ret = btrfs_find_orphan_item(root, offset); 1179 if (ret > 0) 1180 ret = btrfs_insert_orphan_item(trans, root, offset); 1181 return ret; 1182 } 1183 1184 static int count_inode_extrefs(struct btrfs_root *root, 1185 struct inode *inode, struct btrfs_path *path) 1186 { 1187 int ret = 0; 1188 int name_len; 1189 unsigned int nlink = 0; 1190 u32 item_size; 1191 u32 cur_offset = 0; 1192 u64 inode_objectid = btrfs_ino(inode); 1193 u64 offset = 0; 1194 unsigned long ptr; 1195 struct btrfs_inode_extref *extref; 1196 struct extent_buffer *leaf; 1197 1198 while (1) { 1199 ret = btrfs_find_one_extref(root, inode_objectid, offset, path, 1200 &extref, &offset); 1201 if (ret) 1202 break; 1203 1204 leaf = path->nodes[0]; 1205 item_size = btrfs_item_size_nr(leaf, path->slots[0]); 1206 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); 1207 1208 while (cur_offset < item_size) { 1209 extref = (struct btrfs_inode_extref *) (ptr + cur_offset); 1210 name_len = btrfs_inode_extref_name_len(leaf, extref); 1211 1212 nlink++; 1213 1214 cur_offset += name_len + sizeof(*extref); 1215 } 1216 1217 offset++; 1218 btrfs_release_path(path); 1219 } 1220 btrfs_release_path(path); 1221 1222 if (ret < 0) 1223 return ret; 1224 return nlink; 1225 } 1226 1227 static int count_inode_refs(struct btrfs_root *root, 1228 struct inode *inode, struct btrfs_path *path) 1229 { 1230 int ret; 1231 struct btrfs_key key; 1232 unsigned int nlink = 0; 1233 unsigned long ptr; 1234 unsigned long ptr_end; 1235 int name_len; 1236 u64 ino = btrfs_ino(inode); 1237 1238 key.objectid = ino; 1239 key.type = BTRFS_INODE_REF_KEY; 1240 key.offset = (u64)-1; 1241 1242 while (1) { 1243 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 1244 if (ret < 0) 1245 break; 1246 if (ret > 0) { 1247 if (path->slots[0] == 0) 1248 break; 1249 path->slots[0]--; 1250 } 1251 btrfs_item_key_to_cpu(path->nodes[0], &key, 1252 path->slots[0]); 1253 if (key.objectid != ino || 1254 key.type != BTRFS_INODE_REF_KEY) 1255 break; 1256 ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]); 1257 ptr_end = ptr + btrfs_item_size_nr(path->nodes[0], 1258 path->slots[0]); 1259 while (ptr < ptr_end) { 1260 struct btrfs_inode_ref *ref; 1261 1262 ref = (struct btrfs_inode_ref *)ptr; 1263 name_len = btrfs_inode_ref_name_len(path->nodes[0], 1264 ref); 1265 ptr = (unsigned long)(ref + 1) + name_len; 1266 nlink++; 1267 } 1268 1269 if (key.offset == 0) 1270 break; 1271 key.offset--; 1272 btrfs_release_path(path); 1273 } 1274 btrfs_release_path(path); 1275 1276 return nlink; 1277 } 1278 1279 /* 1280 * There are a few corners where the link count of the file can't 1281 * be properly maintained during replay. So, instead of adding 1282 * lots of complexity to the log code, we just scan the backrefs 1283 * for any file that has been through replay. 1284 * 1285 * The scan will update the link count on the inode to reflect the 1286 * number of back refs found. If it goes down to zero, the iput 1287 * will free the inode. 1288 */ 1289 static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans, 1290 struct btrfs_root *root, 1291 struct inode *inode) 1292 { 1293 struct btrfs_path *path; 1294 int ret; 1295 u64 nlink = 0; 1296 u64 ino = btrfs_ino(inode); 1297 1298 path = btrfs_alloc_path(); 1299 if (!path) 1300 return -ENOMEM; 1301 1302 ret = count_inode_refs(root, inode, path); 1303 if (ret < 0) 1304 goto out; 1305 1306 nlink = ret; 1307 1308 ret = count_inode_extrefs(root, inode, path); 1309 if (ret == -ENOENT) 1310 ret = 0; 1311 1312 if (ret < 0) 1313 goto out; 1314 1315 nlink += ret; 1316 1317 ret = 0; 1318 1319 if (nlink != inode->i_nlink) { 1320 set_nlink(inode, nlink); 1321 btrfs_update_inode(trans, root, inode); 1322 } 1323 BTRFS_I(inode)->index_cnt = (u64)-1; 1324 1325 if (inode->i_nlink == 0) { 1326 if (S_ISDIR(inode->i_mode)) { 1327 ret = replay_dir_deletes(trans, root, NULL, path, 1328 ino, 1); 1329 BUG_ON(ret); 1330 } 1331 ret = insert_orphan_item(trans, root, ino); 1332 BUG_ON(ret); 1333 } 1334 1335 out: 1336 btrfs_free_path(path); 1337 return ret; 1338 } 1339 1340 static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans, 1341 struct btrfs_root *root, 1342 struct btrfs_path *path) 1343 { 1344 int ret; 1345 struct btrfs_key key; 1346 struct inode *inode; 1347 1348 key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID; 1349 key.type = BTRFS_ORPHAN_ITEM_KEY; 1350 key.offset = (u64)-1; 1351 while (1) { 1352 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 1353 if (ret < 0) 1354 break; 1355 1356 if (ret == 1) { 1357 if (path->slots[0] == 0) 1358 break; 1359 path->slots[0]--; 1360 } 1361 1362 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 1363 if (key.objectid != BTRFS_TREE_LOG_FIXUP_OBJECTID || 1364 key.type != BTRFS_ORPHAN_ITEM_KEY) 1365 break; 1366 1367 ret = btrfs_del_item(trans, root, path); 1368 if (ret) 1369 goto out; 1370 1371 btrfs_release_path(path); 1372 inode = read_one_inode(root, key.offset); 1373 if (!inode) 1374 return -EIO; 1375 1376 ret = fixup_inode_link_count(trans, root, inode); 1377 BUG_ON(ret); 1378 1379 iput(inode); 1380 1381 /* 1382 * fixup on a directory may create new entries, 1383 * make sure we always look for the highset possible 1384 * offset 1385 */ 1386 key.offset = (u64)-1; 1387 } 1388 ret = 0; 1389 out: 1390 btrfs_release_path(path); 1391 return ret; 1392 } 1393 1394 1395 /* 1396 * record a given inode in the fixup dir so we can check its link 1397 * count when replay is done. The link count is incremented here 1398 * so the inode won't go away until we check it 1399 */ 1400 static noinline int link_to_fixup_dir(struct btrfs_trans_handle *trans, 1401 struct btrfs_root *root, 1402 struct btrfs_path *path, 1403 u64 objectid) 1404 { 1405 struct btrfs_key key; 1406 int ret = 0; 1407 struct inode *inode; 1408 1409 inode = read_one_inode(root, objectid); 1410 if (!inode) 1411 return -EIO; 1412 1413 key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID; 1414 btrfs_set_key_type(&key, BTRFS_ORPHAN_ITEM_KEY); 1415 key.offset = objectid; 1416 1417 ret = btrfs_insert_empty_item(trans, root, path, &key, 0); 1418 1419 btrfs_release_path(path); 1420 if (ret == 0) { 1421 if (!inode->i_nlink) 1422 set_nlink(inode, 1); 1423 else 1424 btrfs_inc_nlink(inode); 1425 ret = btrfs_update_inode(trans, root, inode); 1426 } else if (ret == -EEXIST) { 1427 ret = 0; 1428 } else { 1429 BUG(); 1430 } 1431 iput(inode); 1432 1433 return ret; 1434 } 1435 1436 /* 1437 * when replaying the log for a directory, we only insert names 1438 * for inodes that actually exist. This means an fsync on a directory 1439 * does not implicitly fsync all the new files in it 1440 */ 1441 static noinline int insert_one_name(struct btrfs_trans_handle *trans, 1442 struct btrfs_root *root, 1443 struct btrfs_path *path, 1444 u64 dirid, u64 index, 1445 char *name, int name_len, u8 type, 1446 struct btrfs_key *location) 1447 { 1448 struct inode *inode; 1449 struct inode *dir; 1450 int ret; 1451 1452 inode = read_one_inode(root, location->objectid); 1453 if (!inode) 1454 return -ENOENT; 1455 1456 dir = read_one_inode(root, dirid); 1457 if (!dir) { 1458 iput(inode); 1459 return -EIO; 1460 } 1461 ret = btrfs_add_link(trans, dir, inode, name, name_len, 1, index); 1462 1463 /* FIXME, put inode into FIXUP list */ 1464 1465 iput(inode); 1466 iput(dir); 1467 return ret; 1468 } 1469 1470 /* 1471 * take a single entry in a log directory item and replay it into 1472 * the subvolume. 1473 * 1474 * if a conflicting item exists in the subdirectory already, 1475 * the inode it points to is unlinked and put into the link count 1476 * fix up tree. 1477 * 1478 * If a name from the log points to a file or directory that does 1479 * not exist in the FS, it is skipped. fsyncs on directories 1480 * do not force down inodes inside that directory, just changes to the 1481 * names or unlinks in a directory. 1482 */ 1483 static noinline int replay_one_name(struct btrfs_trans_handle *trans, 1484 struct btrfs_root *root, 1485 struct btrfs_path *path, 1486 struct extent_buffer *eb, 1487 struct btrfs_dir_item *di, 1488 struct btrfs_key *key) 1489 { 1490 char *name; 1491 int name_len; 1492 struct btrfs_dir_item *dst_di; 1493 struct btrfs_key found_key; 1494 struct btrfs_key log_key; 1495 struct inode *dir; 1496 u8 log_type; 1497 int exists; 1498 int ret; 1499 1500 dir = read_one_inode(root, key->objectid); 1501 if (!dir) 1502 return -EIO; 1503 1504 name_len = btrfs_dir_name_len(eb, di); 1505 name = kmalloc(name_len, GFP_NOFS); 1506 if (!name) 1507 return -ENOMEM; 1508 1509 log_type = btrfs_dir_type(eb, di); 1510 read_extent_buffer(eb, name, (unsigned long)(di + 1), 1511 name_len); 1512 1513 btrfs_dir_item_key_to_cpu(eb, di, &log_key); 1514 exists = btrfs_lookup_inode(trans, root, path, &log_key, 0); 1515 if (exists == 0) 1516 exists = 1; 1517 else 1518 exists = 0; 1519 btrfs_release_path(path); 1520 1521 if (key->type == BTRFS_DIR_ITEM_KEY) { 1522 dst_di = btrfs_lookup_dir_item(trans, root, path, key->objectid, 1523 name, name_len, 1); 1524 } else if (key->type == BTRFS_DIR_INDEX_KEY) { 1525 dst_di = btrfs_lookup_dir_index_item(trans, root, path, 1526 key->objectid, 1527 key->offset, name, 1528 name_len, 1); 1529 } else { 1530 BUG(); 1531 } 1532 if (IS_ERR_OR_NULL(dst_di)) { 1533 /* we need a sequence number to insert, so we only 1534 * do inserts for the BTRFS_DIR_INDEX_KEY types 1535 */ 1536 if (key->type != BTRFS_DIR_INDEX_KEY) 1537 goto out; 1538 goto insert; 1539 } 1540 1541 btrfs_dir_item_key_to_cpu(path->nodes[0], dst_di, &found_key); 1542 /* the existing item matches the logged item */ 1543 if (found_key.objectid == log_key.objectid && 1544 found_key.type == log_key.type && 1545 found_key.offset == log_key.offset && 1546 btrfs_dir_type(path->nodes[0], dst_di) == log_type) { 1547 goto out; 1548 } 1549 1550 /* 1551 * don't drop the conflicting directory entry if the inode 1552 * for the new entry doesn't exist 1553 */ 1554 if (!exists) 1555 goto out; 1556 1557 ret = drop_one_dir_item(trans, root, path, dir, dst_di); 1558 BUG_ON(ret); 1559 1560 if (key->type == BTRFS_DIR_INDEX_KEY) 1561 goto insert; 1562 out: 1563 btrfs_release_path(path); 1564 kfree(name); 1565 iput(dir); 1566 return 0; 1567 1568 insert: 1569 btrfs_release_path(path); 1570 ret = insert_one_name(trans, root, path, key->objectid, key->offset, 1571 name, name_len, log_type, &log_key); 1572 1573 BUG_ON(ret && ret != -ENOENT); 1574 goto out; 1575 } 1576 1577 /* 1578 * find all the names in a directory item and reconcile them into 1579 * the subvolume. Only BTRFS_DIR_ITEM_KEY types will have more than 1580 * one name in a directory item, but the same code gets used for 1581 * both directory index types 1582 */ 1583 static noinline int replay_one_dir_item(struct btrfs_trans_handle *trans, 1584 struct btrfs_root *root, 1585 struct btrfs_path *path, 1586 struct extent_buffer *eb, int slot, 1587 struct btrfs_key *key) 1588 { 1589 int ret; 1590 u32 item_size = btrfs_item_size_nr(eb, slot); 1591 struct btrfs_dir_item *di; 1592 int name_len; 1593 unsigned long ptr; 1594 unsigned long ptr_end; 1595 1596 ptr = btrfs_item_ptr_offset(eb, slot); 1597 ptr_end = ptr + item_size; 1598 while (ptr < ptr_end) { 1599 di = (struct btrfs_dir_item *)ptr; 1600 if (verify_dir_item(root, eb, di)) 1601 return -EIO; 1602 name_len = btrfs_dir_name_len(eb, di); 1603 ret = replay_one_name(trans, root, path, eb, di, key); 1604 BUG_ON(ret); 1605 ptr = (unsigned long)(di + 1); 1606 ptr += name_len; 1607 } 1608 return 0; 1609 } 1610 1611 /* 1612 * directory replay has two parts. There are the standard directory 1613 * items in the log copied from the subvolume, and range items 1614 * created in the log while the subvolume was logged. 1615 * 1616 * The range items tell us which parts of the key space the log 1617 * is authoritative for. During replay, if a key in the subvolume 1618 * directory is in a logged range item, but not actually in the log 1619 * that means it was deleted from the directory before the fsync 1620 * and should be removed. 1621 */ 1622 static noinline int find_dir_range(struct btrfs_root *root, 1623 struct btrfs_path *path, 1624 u64 dirid, int key_type, 1625 u64 *start_ret, u64 *end_ret) 1626 { 1627 struct btrfs_key key; 1628 u64 found_end; 1629 struct btrfs_dir_log_item *item; 1630 int ret; 1631 int nritems; 1632 1633 if (*start_ret == (u64)-1) 1634 return 1; 1635 1636 key.objectid = dirid; 1637 key.type = key_type; 1638 key.offset = *start_ret; 1639 1640 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 1641 if (ret < 0) 1642 goto out; 1643 if (ret > 0) { 1644 if (path->slots[0] == 0) 1645 goto out; 1646 path->slots[0]--; 1647 } 1648 if (ret != 0) 1649 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 1650 1651 if (key.type != key_type || key.objectid != dirid) { 1652 ret = 1; 1653 goto next; 1654 } 1655 item = btrfs_item_ptr(path->nodes[0], path->slots[0], 1656 struct btrfs_dir_log_item); 1657 found_end = btrfs_dir_log_end(path->nodes[0], item); 1658 1659 if (*start_ret >= key.offset && *start_ret <= found_end) { 1660 ret = 0; 1661 *start_ret = key.offset; 1662 *end_ret = found_end; 1663 goto out; 1664 } 1665 ret = 1; 1666 next: 1667 /* check the next slot in the tree to see if it is a valid item */ 1668 nritems = btrfs_header_nritems(path->nodes[0]); 1669 if (path->slots[0] >= nritems) { 1670 ret = btrfs_next_leaf(root, path); 1671 if (ret) 1672 goto out; 1673 } else { 1674 path->slots[0]++; 1675 } 1676 1677 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 1678 1679 if (key.type != key_type || key.objectid != dirid) { 1680 ret = 1; 1681 goto out; 1682 } 1683 item = btrfs_item_ptr(path->nodes[0], path->slots[0], 1684 struct btrfs_dir_log_item); 1685 found_end = btrfs_dir_log_end(path->nodes[0], item); 1686 *start_ret = key.offset; 1687 *end_ret = found_end; 1688 ret = 0; 1689 out: 1690 btrfs_release_path(path); 1691 return ret; 1692 } 1693 1694 /* 1695 * this looks for a given directory item in the log. If the directory 1696 * item is not in the log, the item is removed and the inode it points 1697 * to is unlinked 1698 */ 1699 static noinline int check_item_in_log(struct btrfs_trans_handle *trans, 1700 struct btrfs_root *root, 1701 struct btrfs_root *log, 1702 struct btrfs_path *path, 1703 struct btrfs_path *log_path, 1704 struct inode *dir, 1705 struct btrfs_key *dir_key) 1706 { 1707 int ret; 1708 struct extent_buffer *eb; 1709 int slot; 1710 u32 item_size; 1711 struct btrfs_dir_item *di; 1712 struct btrfs_dir_item *log_di; 1713 int name_len; 1714 unsigned long ptr; 1715 unsigned long ptr_end; 1716 char *name; 1717 struct inode *inode; 1718 struct btrfs_key location; 1719 1720 again: 1721 eb = path->nodes[0]; 1722 slot = path->slots[0]; 1723 item_size = btrfs_item_size_nr(eb, slot); 1724 ptr = btrfs_item_ptr_offset(eb, slot); 1725 ptr_end = ptr + item_size; 1726 while (ptr < ptr_end) { 1727 di = (struct btrfs_dir_item *)ptr; 1728 if (verify_dir_item(root, eb, di)) { 1729 ret = -EIO; 1730 goto out; 1731 } 1732 1733 name_len = btrfs_dir_name_len(eb, di); 1734 name = kmalloc(name_len, GFP_NOFS); 1735 if (!name) { 1736 ret = -ENOMEM; 1737 goto out; 1738 } 1739 read_extent_buffer(eb, name, (unsigned long)(di + 1), 1740 name_len); 1741 log_di = NULL; 1742 if (log && dir_key->type == BTRFS_DIR_ITEM_KEY) { 1743 log_di = btrfs_lookup_dir_item(trans, log, log_path, 1744 dir_key->objectid, 1745 name, name_len, 0); 1746 } else if (log && dir_key->type == BTRFS_DIR_INDEX_KEY) { 1747 log_di = btrfs_lookup_dir_index_item(trans, log, 1748 log_path, 1749 dir_key->objectid, 1750 dir_key->offset, 1751 name, name_len, 0); 1752 } 1753 if (IS_ERR_OR_NULL(log_di)) { 1754 btrfs_dir_item_key_to_cpu(eb, di, &location); 1755 btrfs_release_path(path); 1756 btrfs_release_path(log_path); 1757 inode = read_one_inode(root, location.objectid); 1758 if (!inode) { 1759 kfree(name); 1760 return -EIO; 1761 } 1762 1763 ret = link_to_fixup_dir(trans, root, 1764 path, location.objectid); 1765 BUG_ON(ret); 1766 btrfs_inc_nlink(inode); 1767 ret = btrfs_unlink_inode(trans, root, dir, inode, 1768 name, name_len); 1769 BUG_ON(ret); 1770 1771 btrfs_run_delayed_items(trans, root); 1772 1773 kfree(name); 1774 iput(inode); 1775 1776 /* there might still be more names under this key 1777 * check and repeat if required 1778 */ 1779 ret = btrfs_search_slot(NULL, root, dir_key, path, 1780 0, 0); 1781 if (ret == 0) 1782 goto again; 1783 ret = 0; 1784 goto out; 1785 } 1786 btrfs_release_path(log_path); 1787 kfree(name); 1788 1789 ptr = (unsigned long)(di + 1); 1790 ptr += name_len; 1791 } 1792 ret = 0; 1793 out: 1794 btrfs_release_path(path); 1795 btrfs_release_path(log_path); 1796 return ret; 1797 } 1798 1799 /* 1800 * deletion replay happens before we copy any new directory items 1801 * out of the log or out of backreferences from inodes. It 1802 * scans the log to find ranges of keys that log is authoritative for, 1803 * and then scans the directory to find items in those ranges that are 1804 * not present in the log. 1805 * 1806 * Anything we don't find in the log is unlinked and removed from the 1807 * directory. 1808 */ 1809 static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans, 1810 struct btrfs_root *root, 1811 struct btrfs_root *log, 1812 struct btrfs_path *path, 1813 u64 dirid, int del_all) 1814 { 1815 u64 range_start; 1816 u64 range_end; 1817 int key_type = BTRFS_DIR_LOG_ITEM_KEY; 1818 int ret = 0; 1819 struct btrfs_key dir_key; 1820 struct btrfs_key found_key; 1821 struct btrfs_path *log_path; 1822 struct inode *dir; 1823 1824 dir_key.objectid = dirid; 1825 dir_key.type = BTRFS_DIR_ITEM_KEY; 1826 log_path = btrfs_alloc_path(); 1827 if (!log_path) 1828 return -ENOMEM; 1829 1830 dir = read_one_inode(root, dirid); 1831 /* it isn't an error if the inode isn't there, that can happen 1832 * because we replay the deletes before we copy in the inode item 1833 * from the log 1834 */ 1835 if (!dir) { 1836 btrfs_free_path(log_path); 1837 return 0; 1838 } 1839 again: 1840 range_start = 0; 1841 range_end = 0; 1842 while (1) { 1843 if (del_all) 1844 range_end = (u64)-1; 1845 else { 1846 ret = find_dir_range(log, path, dirid, key_type, 1847 &range_start, &range_end); 1848 if (ret != 0) 1849 break; 1850 } 1851 1852 dir_key.offset = range_start; 1853 while (1) { 1854 int nritems; 1855 ret = btrfs_search_slot(NULL, root, &dir_key, path, 1856 0, 0); 1857 if (ret < 0) 1858 goto out; 1859 1860 nritems = btrfs_header_nritems(path->nodes[0]); 1861 if (path->slots[0] >= nritems) { 1862 ret = btrfs_next_leaf(root, path); 1863 if (ret) 1864 break; 1865 } 1866 btrfs_item_key_to_cpu(path->nodes[0], &found_key, 1867 path->slots[0]); 1868 if (found_key.objectid != dirid || 1869 found_key.type != dir_key.type) 1870 goto next_type; 1871 1872 if (found_key.offset > range_end) 1873 break; 1874 1875 ret = check_item_in_log(trans, root, log, path, 1876 log_path, dir, 1877 &found_key); 1878 BUG_ON(ret); 1879 if (found_key.offset == (u64)-1) 1880 break; 1881 dir_key.offset = found_key.offset + 1; 1882 } 1883 btrfs_release_path(path); 1884 if (range_end == (u64)-1) 1885 break; 1886 range_start = range_end + 1; 1887 } 1888 1889 next_type: 1890 ret = 0; 1891 if (key_type == BTRFS_DIR_LOG_ITEM_KEY) { 1892 key_type = BTRFS_DIR_LOG_INDEX_KEY; 1893 dir_key.type = BTRFS_DIR_INDEX_KEY; 1894 btrfs_release_path(path); 1895 goto again; 1896 } 1897 out: 1898 btrfs_release_path(path); 1899 btrfs_free_path(log_path); 1900 iput(dir); 1901 return ret; 1902 } 1903 1904 /* 1905 * the process_func used to replay items from the log tree. This 1906 * gets called in two different stages. The first stage just looks 1907 * for inodes and makes sure they are all copied into the subvolume. 1908 * 1909 * The second stage copies all the other item types from the log into 1910 * the subvolume. The two stage approach is slower, but gets rid of 1911 * lots of complexity around inodes referencing other inodes that exist 1912 * only in the log (references come from either directory items or inode 1913 * back refs). 1914 */ 1915 static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb, 1916 struct walk_control *wc, u64 gen) 1917 { 1918 int nritems; 1919 struct btrfs_path *path; 1920 struct btrfs_root *root = wc->replay_dest; 1921 struct btrfs_key key; 1922 int level; 1923 int i; 1924 int ret; 1925 1926 ret = btrfs_read_buffer(eb, gen); 1927 if (ret) 1928 return ret; 1929 1930 level = btrfs_header_level(eb); 1931 1932 if (level != 0) 1933 return 0; 1934 1935 path = btrfs_alloc_path(); 1936 if (!path) 1937 return -ENOMEM; 1938 1939 nritems = btrfs_header_nritems(eb); 1940 for (i = 0; i < nritems; i++) { 1941 btrfs_item_key_to_cpu(eb, &key, i); 1942 1943 /* inode keys are done during the first stage */ 1944 if (key.type == BTRFS_INODE_ITEM_KEY && 1945 wc->stage == LOG_WALK_REPLAY_INODES) { 1946 struct btrfs_inode_item *inode_item; 1947 u32 mode; 1948 1949 inode_item = btrfs_item_ptr(eb, i, 1950 struct btrfs_inode_item); 1951 mode = btrfs_inode_mode(eb, inode_item); 1952 if (S_ISDIR(mode)) { 1953 ret = replay_dir_deletes(wc->trans, 1954 root, log, path, key.objectid, 0); 1955 BUG_ON(ret); 1956 } 1957 ret = overwrite_item(wc->trans, root, path, 1958 eb, i, &key); 1959 BUG_ON(ret); 1960 1961 /* for regular files, make sure corresponding 1962 * orhpan item exist. extents past the new EOF 1963 * will be truncated later by orphan cleanup. 1964 */ 1965 if (S_ISREG(mode)) { 1966 ret = insert_orphan_item(wc->trans, root, 1967 key.objectid); 1968 BUG_ON(ret); 1969 } 1970 1971 ret = link_to_fixup_dir(wc->trans, root, 1972 path, key.objectid); 1973 BUG_ON(ret); 1974 } 1975 if (wc->stage < LOG_WALK_REPLAY_ALL) 1976 continue; 1977 1978 /* these keys are simply copied */ 1979 if (key.type == BTRFS_XATTR_ITEM_KEY) { 1980 ret = overwrite_item(wc->trans, root, path, 1981 eb, i, &key); 1982 BUG_ON(ret); 1983 } else if (key.type == BTRFS_INODE_REF_KEY) { 1984 ret = add_inode_ref(wc->trans, root, log, path, 1985 eb, i, &key); 1986 BUG_ON(ret && ret != -ENOENT); 1987 } else if (key.type == BTRFS_INODE_EXTREF_KEY) { 1988 ret = add_inode_ref(wc->trans, root, log, path, 1989 eb, i, &key); 1990 BUG_ON(ret && ret != -ENOENT); 1991 } else if (key.type == BTRFS_EXTENT_DATA_KEY) { 1992 ret = replay_one_extent(wc->trans, root, path, 1993 eb, i, &key); 1994 BUG_ON(ret); 1995 } else if (key.type == BTRFS_DIR_ITEM_KEY || 1996 key.type == BTRFS_DIR_INDEX_KEY) { 1997 ret = replay_one_dir_item(wc->trans, root, path, 1998 eb, i, &key); 1999 BUG_ON(ret); 2000 } 2001 } 2002 btrfs_free_path(path); 2003 return 0; 2004 } 2005 2006 static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans, 2007 struct btrfs_root *root, 2008 struct btrfs_path *path, int *level, 2009 struct walk_control *wc) 2010 { 2011 u64 root_owner; 2012 u64 bytenr; 2013 u64 ptr_gen; 2014 struct extent_buffer *next; 2015 struct extent_buffer *cur; 2016 struct extent_buffer *parent; 2017 u32 blocksize; 2018 int ret = 0; 2019 2020 WARN_ON(*level < 0); 2021 WARN_ON(*level >= BTRFS_MAX_LEVEL); 2022 2023 while (*level > 0) { 2024 WARN_ON(*level < 0); 2025 WARN_ON(*level >= BTRFS_MAX_LEVEL); 2026 cur = path->nodes[*level]; 2027 2028 if (btrfs_header_level(cur) != *level) 2029 WARN_ON(1); 2030 2031 if (path->slots[*level] >= 2032 btrfs_header_nritems(cur)) 2033 break; 2034 2035 bytenr = btrfs_node_blockptr(cur, path->slots[*level]); 2036 ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]); 2037 blocksize = btrfs_level_size(root, *level - 1); 2038 2039 parent = path->nodes[*level]; 2040 root_owner = btrfs_header_owner(parent); 2041 2042 next = btrfs_find_create_tree_block(root, bytenr, blocksize); 2043 if (!next) 2044 return -ENOMEM; 2045 2046 if (*level == 1) { 2047 ret = wc->process_func(root, next, wc, ptr_gen); 2048 if (ret) 2049 return ret; 2050 2051 path->slots[*level]++; 2052 if (wc->free) { 2053 ret = btrfs_read_buffer(next, ptr_gen); 2054 if (ret) { 2055 free_extent_buffer(next); 2056 return ret; 2057 } 2058 2059 btrfs_tree_lock(next); 2060 btrfs_set_lock_blocking(next); 2061 clean_tree_block(trans, root, next); 2062 btrfs_wait_tree_block_writeback(next); 2063 btrfs_tree_unlock(next); 2064 2065 WARN_ON(root_owner != 2066 BTRFS_TREE_LOG_OBJECTID); 2067 ret = btrfs_free_and_pin_reserved_extent(root, 2068 bytenr, blocksize); 2069 BUG_ON(ret); /* -ENOMEM or logic errors */ 2070 } 2071 free_extent_buffer(next); 2072 continue; 2073 } 2074 ret = btrfs_read_buffer(next, ptr_gen); 2075 if (ret) { 2076 free_extent_buffer(next); 2077 return ret; 2078 } 2079 2080 WARN_ON(*level <= 0); 2081 if (path->nodes[*level-1]) 2082 free_extent_buffer(path->nodes[*level-1]); 2083 path->nodes[*level-1] = next; 2084 *level = btrfs_header_level(next); 2085 path->slots[*level] = 0; 2086 cond_resched(); 2087 } 2088 WARN_ON(*level < 0); 2089 WARN_ON(*level >= BTRFS_MAX_LEVEL); 2090 2091 path->slots[*level] = btrfs_header_nritems(path->nodes[*level]); 2092 2093 cond_resched(); 2094 return 0; 2095 } 2096 2097 static noinline int walk_up_log_tree(struct btrfs_trans_handle *trans, 2098 struct btrfs_root *root, 2099 struct btrfs_path *path, int *level, 2100 struct walk_control *wc) 2101 { 2102 u64 root_owner; 2103 int i; 2104 int slot; 2105 int ret; 2106 2107 for (i = *level; i < BTRFS_MAX_LEVEL - 1 && path->nodes[i]; i++) { 2108 slot = path->slots[i]; 2109 if (slot + 1 < btrfs_header_nritems(path->nodes[i])) { 2110 path->slots[i]++; 2111 *level = i; 2112 WARN_ON(*level == 0); 2113 return 0; 2114 } else { 2115 struct extent_buffer *parent; 2116 if (path->nodes[*level] == root->node) 2117 parent = path->nodes[*level]; 2118 else 2119 parent = path->nodes[*level + 1]; 2120 2121 root_owner = btrfs_header_owner(parent); 2122 ret = wc->process_func(root, path->nodes[*level], wc, 2123 btrfs_header_generation(path->nodes[*level])); 2124 if (ret) 2125 return ret; 2126 2127 if (wc->free) { 2128 struct extent_buffer *next; 2129 2130 next = path->nodes[*level]; 2131 2132 btrfs_tree_lock(next); 2133 btrfs_set_lock_blocking(next); 2134 clean_tree_block(trans, root, next); 2135 btrfs_wait_tree_block_writeback(next); 2136 btrfs_tree_unlock(next); 2137 2138 WARN_ON(root_owner != BTRFS_TREE_LOG_OBJECTID); 2139 ret = btrfs_free_and_pin_reserved_extent(root, 2140 path->nodes[*level]->start, 2141 path->nodes[*level]->len); 2142 BUG_ON(ret); 2143 } 2144 free_extent_buffer(path->nodes[*level]); 2145 path->nodes[*level] = NULL; 2146 *level = i + 1; 2147 } 2148 } 2149 return 1; 2150 } 2151 2152 /* 2153 * drop the reference count on the tree rooted at 'snap'. This traverses 2154 * the tree freeing any blocks that have a ref count of zero after being 2155 * decremented. 2156 */ 2157 static int walk_log_tree(struct btrfs_trans_handle *trans, 2158 struct btrfs_root *log, struct walk_control *wc) 2159 { 2160 int ret = 0; 2161 int wret; 2162 int level; 2163 struct btrfs_path *path; 2164 int i; 2165 int orig_level; 2166 2167 path = btrfs_alloc_path(); 2168 if (!path) 2169 return -ENOMEM; 2170 2171 level = btrfs_header_level(log->node); 2172 orig_level = level; 2173 path->nodes[level] = log->node; 2174 extent_buffer_get(log->node); 2175 path->slots[level] = 0; 2176 2177 while (1) { 2178 wret = walk_down_log_tree(trans, log, path, &level, wc); 2179 if (wret > 0) 2180 break; 2181 if (wret < 0) { 2182 ret = wret; 2183 goto out; 2184 } 2185 2186 wret = walk_up_log_tree(trans, log, path, &level, wc); 2187 if (wret > 0) 2188 break; 2189 if (wret < 0) { 2190 ret = wret; 2191 goto out; 2192 } 2193 } 2194 2195 /* was the root node processed? if not, catch it here */ 2196 if (path->nodes[orig_level]) { 2197 ret = wc->process_func(log, path->nodes[orig_level], wc, 2198 btrfs_header_generation(path->nodes[orig_level])); 2199 if (ret) 2200 goto out; 2201 if (wc->free) { 2202 struct extent_buffer *next; 2203 2204 next = path->nodes[orig_level]; 2205 2206 btrfs_tree_lock(next); 2207 btrfs_set_lock_blocking(next); 2208 clean_tree_block(trans, log, next); 2209 btrfs_wait_tree_block_writeback(next); 2210 btrfs_tree_unlock(next); 2211 2212 WARN_ON(log->root_key.objectid != 2213 BTRFS_TREE_LOG_OBJECTID); 2214 ret = btrfs_free_and_pin_reserved_extent(log, next->start, 2215 next->len); 2216 BUG_ON(ret); /* -ENOMEM or logic errors */ 2217 } 2218 } 2219 2220 out: 2221 for (i = 0; i <= orig_level; i++) { 2222 if (path->nodes[i]) { 2223 free_extent_buffer(path->nodes[i]); 2224 path->nodes[i] = NULL; 2225 } 2226 } 2227 btrfs_free_path(path); 2228 return ret; 2229 } 2230 2231 /* 2232 * helper function to update the item for a given subvolumes log root 2233 * in the tree of log roots 2234 */ 2235 static int update_log_root(struct btrfs_trans_handle *trans, 2236 struct btrfs_root *log) 2237 { 2238 int ret; 2239 2240 if (log->log_transid == 1) { 2241 /* insert root item on the first sync */ 2242 ret = btrfs_insert_root(trans, log->fs_info->log_root_tree, 2243 &log->root_key, &log->root_item); 2244 } else { 2245 ret = btrfs_update_root(trans, log->fs_info->log_root_tree, 2246 &log->root_key, &log->root_item); 2247 } 2248 return ret; 2249 } 2250 2251 static int wait_log_commit(struct btrfs_trans_handle *trans, 2252 struct btrfs_root *root, unsigned long transid) 2253 { 2254 DEFINE_WAIT(wait); 2255 int index = transid % 2; 2256 2257 /* 2258 * we only allow two pending log transactions at a time, 2259 * so we know that if ours is more than 2 older than the 2260 * current transaction, we're done 2261 */ 2262 do { 2263 prepare_to_wait(&root->log_commit_wait[index], 2264 &wait, TASK_UNINTERRUPTIBLE); 2265 mutex_unlock(&root->log_mutex); 2266 2267 if (root->fs_info->last_trans_log_full_commit != 2268 trans->transid && root->log_transid < transid + 2 && 2269 atomic_read(&root->log_commit[index])) 2270 schedule(); 2271 2272 finish_wait(&root->log_commit_wait[index], &wait); 2273 mutex_lock(&root->log_mutex); 2274 } while (root->fs_info->last_trans_log_full_commit != 2275 trans->transid && root->log_transid < transid + 2 && 2276 atomic_read(&root->log_commit[index])); 2277 return 0; 2278 } 2279 2280 static void wait_for_writer(struct btrfs_trans_handle *trans, 2281 struct btrfs_root *root) 2282 { 2283 DEFINE_WAIT(wait); 2284 while (root->fs_info->last_trans_log_full_commit != 2285 trans->transid && atomic_read(&root->log_writers)) { 2286 prepare_to_wait(&root->log_writer_wait, 2287 &wait, TASK_UNINTERRUPTIBLE); 2288 mutex_unlock(&root->log_mutex); 2289 if (root->fs_info->last_trans_log_full_commit != 2290 trans->transid && atomic_read(&root->log_writers)) 2291 schedule(); 2292 mutex_lock(&root->log_mutex); 2293 finish_wait(&root->log_writer_wait, &wait); 2294 } 2295 } 2296 2297 /* 2298 * btrfs_sync_log does sends a given tree log down to the disk and 2299 * updates the super blocks to record it. When this call is done, 2300 * you know that any inodes previously logged are safely on disk only 2301 * if it returns 0. 2302 * 2303 * Any other return value means you need to call btrfs_commit_transaction. 2304 * Some of the edge cases for fsyncing directories that have had unlinks 2305 * or renames done in the past mean that sometimes the only safe 2306 * fsync is to commit the whole FS. When btrfs_sync_log returns -EAGAIN, 2307 * that has happened. 2308 */ 2309 int btrfs_sync_log(struct btrfs_trans_handle *trans, 2310 struct btrfs_root *root) 2311 { 2312 int index1; 2313 int index2; 2314 int mark; 2315 int ret; 2316 struct btrfs_root *log = root->log_root; 2317 struct btrfs_root *log_root_tree = root->fs_info->log_root_tree; 2318 unsigned long log_transid = 0; 2319 2320 mutex_lock(&root->log_mutex); 2321 log_transid = root->log_transid; 2322 index1 = root->log_transid % 2; 2323 if (atomic_read(&root->log_commit[index1])) { 2324 wait_log_commit(trans, root, root->log_transid); 2325 mutex_unlock(&root->log_mutex); 2326 return 0; 2327 } 2328 atomic_set(&root->log_commit[index1], 1); 2329 2330 /* wait for previous tree log sync to complete */ 2331 if (atomic_read(&root->log_commit[(index1 + 1) % 2])) 2332 wait_log_commit(trans, root, root->log_transid - 1); 2333 while (1) { 2334 int batch = atomic_read(&root->log_batch); 2335 /* when we're on an ssd, just kick the log commit out */ 2336 if (!btrfs_test_opt(root, SSD) && root->log_multiple_pids) { 2337 mutex_unlock(&root->log_mutex); 2338 schedule_timeout_uninterruptible(1); 2339 mutex_lock(&root->log_mutex); 2340 } 2341 wait_for_writer(trans, root); 2342 if (batch == atomic_read(&root->log_batch)) 2343 break; 2344 } 2345 2346 /* bail out if we need to do a full commit */ 2347 if (root->fs_info->last_trans_log_full_commit == trans->transid) { 2348 ret = -EAGAIN; 2349 btrfs_free_logged_extents(log, log_transid); 2350 mutex_unlock(&root->log_mutex); 2351 goto out; 2352 } 2353 2354 if (log_transid % 2 == 0) 2355 mark = EXTENT_DIRTY; 2356 else 2357 mark = EXTENT_NEW; 2358 2359 /* we start IO on all the marked extents here, but we don't actually 2360 * wait for them until later. 2361 */ 2362 ret = btrfs_write_marked_extents(log, &log->dirty_log_pages, mark); 2363 if (ret) { 2364 btrfs_abort_transaction(trans, root, ret); 2365 btrfs_free_logged_extents(log, log_transid); 2366 mutex_unlock(&root->log_mutex); 2367 goto out; 2368 } 2369 2370 btrfs_set_root_node(&log->root_item, log->node); 2371 2372 root->log_transid++; 2373 log->log_transid = root->log_transid; 2374 root->log_start_pid = 0; 2375 smp_mb(); 2376 /* 2377 * IO has been started, blocks of the log tree have WRITTEN flag set 2378 * in their headers. new modifications of the log will be written to 2379 * new positions. so it's safe to allow log writers to go in. 2380 */ 2381 mutex_unlock(&root->log_mutex); 2382 2383 mutex_lock(&log_root_tree->log_mutex); 2384 atomic_inc(&log_root_tree->log_batch); 2385 atomic_inc(&log_root_tree->log_writers); 2386 mutex_unlock(&log_root_tree->log_mutex); 2387 2388 ret = update_log_root(trans, log); 2389 2390 mutex_lock(&log_root_tree->log_mutex); 2391 if (atomic_dec_and_test(&log_root_tree->log_writers)) { 2392 smp_mb(); 2393 if (waitqueue_active(&log_root_tree->log_writer_wait)) 2394 wake_up(&log_root_tree->log_writer_wait); 2395 } 2396 2397 if (ret) { 2398 if (ret != -ENOSPC) { 2399 btrfs_abort_transaction(trans, root, ret); 2400 mutex_unlock(&log_root_tree->log_mutex); 2401 goto out; 2402 } 2403 root->fs_info->last_trans_log_full_commit = trans->transid; 2404 btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark); 2405 btrfs_free_logged_extents(log, log_transid); 2406 mutex_unlock(&log_root_tree->log_mutex); 2407 ret = -EAGAIN; 2408 goto out; 2409 } 2410 2411 index2 = log_root_tree->log_transid % 2; 2412 if (atomic_read(&log_root_tree->log_commit[index2])) { 2413 btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark); 2414 wait_log_commit(trans, log_root_tree, 2415 log_root_tree->log_transid); 2416 btrfs_free_logged_extents(log, log_transid); 2417 mutex_unlock(&log_root_tree->log_mutex); 2418 ret = 0; 2419 goto out; 2420 } 2421 atomic_set(&log_root_tree->log_commit[index2], 1); 2422 2423 if (atomic_read(&log_root_tree->log_commit[(index2 + 1) % 2])) { 2424 wait_log_commit(trans, log_root_tree, 2425 log_root_tree->log_transid - 1); 2426 } 2427 2428 wait_for_writer(trans, log_root_tree); 2429 2430 /* 2431 * now that we've moved on to the tree of log tree roots, 2432 * check the full commit flag again 2433 */ 2434 if (root->fs_info->last_trans_log_full_commit == trans->transid) { 2435 btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark); 2436 btrfs_free_logged_extents(log, log_transid); 2437 mutex_unlock(&log_root_tree->log_mutex); 2438 ret = -EAGAIN; 2439 goto out_wake_log_root; 2440 } 2441 2442 ret = btrfs_write_and_wait_marked_extents(log_root_tree, 2443 &log_root_tree->dirty_log_pages, 2444 EXTENT_DIRTY | EXTENT_NEW); 2445 if (ret) { 2446 btrfs_abort_transaction(trans, root, ret); 2447 btrfs_free_logged_extents(log, log_transid); 2448 mutex_unlock(&log_root_tree->log_mutex); 2449 goto out_wake_log_root; 2450 } 2451 btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark); 2452 btrfs_wait_logged_extents(log, log_transid); 2453 2454 btrfs_set_super_log_root(root->fs_info->super_for_commit, 2455 log_root_tree->node->start); 2456 btrfs_set_super_log_root_level(root->fs_info->super_for_commit, 2457 btrfs_header_level(log_root_tree->node)); 2458 2459 log_root_tree->log_transid++; 2460 smp_mb(); 2461 2462 mutex_unlock(&log_root_tree->log_mutex); 2463 2464 /* 2465 * nobody else is going to jump in and write the the ctree 2466 * super here because the log_commit atomic below is protecting 2467 * us. We must be called with a transaction handle pinning 2468 * the running transaction open, so a full commit can't hop 2469 * in and cause problems either. 2470 */ 2471 btrfs_scrub_pause_super(root); 2472 ret = write_ctree_super(trans, root->fs_info->tree_root, 1); 2473 btrfs_scrub_continue_super(root); 2474 if (ret) { 2475 btrfs_abort_transaction(trans, root, ret); 2476 goto out_wake_log_root; 2477 } 2478 2479 mutex_lock(&root->log_mutex); 2480 if (root->last_log_commit < log_transid) 2481 root->last_log_commit = log_transid; 2482 mutex_unlock(&root->log_mutex); 2483 2484 out_wake_log_root: 2485 atomic_set(&log_root_tree->log_commit[index2], 0); 2486 smp_mb(); 2487 if (waitqueue_active(&log_root_tree->log_commit_wait[index2])) 2488 wake_up(&log_root_tree->log_commit_wait[index2]); 2489 out: 2490 atomic_set(&root->log_commit[index1], 0); 2491 smp_mb(); 2492 if (waitqueue_active(&root->log_commit_wait[index1])) 2493 wake_up(&root->log_commit_wait[index1]); 2494 return ret; 2495 } 2496 2497 static void free_log_tree(struct btrfs_trans_handle *trans, 2498 struct btrfs_root *log) 2499 { 2500 int ret; 2501 u64 start; 2502 u64 end; 2503 struct walk_control wc = { 2504 .free = 1, 2505 .process_func = process_one_buffer 2506 }; 2507 2508 if (trans) { 2509 ret = walk_log_tree(trans, log, &wc); 2510 BUG_ON(ret); 2511 } 2512 2513 while (1) { 2514 ret = find_first_extent_bit(&log->dirty_log_pages, 2515 0, &start, &end, EXTENT_DIRTY | EXTENT_NEW, 2516 NULL); 2517 if (ret) 2518 break; 2519 2520 clear_extent_bits(&log->dirty_log_pages, start, end, 2521 EXTENT_DIRTY | EXTENT_NEW, GFP_NOFS); 2522 } 2523 2524 /* 2525 * We may have short-circuited the log tree with the full commit logic 2526 * and left ordered extents on our list, so clear these out to keep us 2527 * from leaking inodes and memory. 2528 */ 2529 btrfs_free_logged_extents(log, 0); 2530 btrfs_free_logged_extents(log, 1); 2531 2532 free_extent_buffer(log->node); 2533 kfree(log); 2534 } 2535 2536 /* 2537 * free all the extents used by the tree log. This should be called 2538 * at commit time of the full transaction 2539 */ 2540 int btrfs_free_log(struct btrfs_trans_handle *trans, struct btrfs_root *root) 2541 { 2542 if (root->log_root) { 2543 free_log_tree(trans, root->log_root); 2544 root->log_root = NULL; 2545 } 2546 return 0; 2547 } 2548 2549 int btrfs_free_log_root_tree(struct btrfs_trans_handle *trans, 2550 struct btrfs_fs_info *fs_info) 2551 { 2552 if (fs_info->log_root_tree) { 2553 free_log_tree(trans, fs_info->log_root_tree); 2554 fs_info->log_root_tree = NULL; 2555 } 2556 return 0; 2557 } 2558 2559 /* 2560 * If both a file and directory are logged, and unlinks or renames are 2561 * mixed in, we have a few interesting corners: 2562 * 2563 * create file X in dir Y 2564 * link file X to X.link in dir Y 2565 * fsync file X 2566 * unlink file X but leave X.link 2567 * fsync dir Y 2568 * 2569 * After a crash we would expect only X.link to exist. But file X 2570 * didn't get fsync'd again so the log has back refs for X and X.link. 2571 * 2572 * We solve this by removing directory entries and inode backrefs from the 2573 * log when a file that was logged in the current transaction is 2574 * unlinked. Any later fsync will include the updated log entries, and 2575 * we'll be able to reconstruct the proper directory items from backrefs. 2576 * 2577 * This optimizations allows us to avoid relogging the entire inode 2578 * or the entire directory. 2579 */ 2580 int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans, 2581 struct btrfs_root *root, 2582 const char *name, int name_len, 2583 struct inode *dir, u64 index) 2584 { 2585 struct btrfs_root *log; 2586 struct btrfs_dir_item *di; 2587 struct btrfs_path *path; 2588 int ret; 2589 int err = 0; 2590 int bytes_del = 0; 2591 u64 dir_ino = btrfs_ino(dir); 2592 2593 if (BTRFS_I(dir)->logged_trans < trans->transid) 2594 return 0; 2595 2596 ret = join_running_log_trans(root); 2597 if (ret) 2598 return 0; 2599 2600 mutex_lock(&BTRFS_I(dir)->log_mutex); 2601 2602 log = root->log_root; 2603 path = btrfs_alloc_path(); 2604 if (!path) { 2605 err = -ENOMEM; 2606 goto out_unlock; 2607 } 2608 2609 di = btrfs_lookup_dir_item(trans, log, path, dir_ino, 2610 name, name_len, -1); 2611 if (IS_ERR(di)) { 2612 err = PTR_ERR(di); 2613 goto fail; 2614 } 2615 if (di) { 2616 ret = btrfs_delete_one_dir_name(trans, log, path, di); 2617 bytes_del += name_len; 2618 BUG_ON(ret); 2619 } 2620 btrfs_release_path(path); 2621 di = btrfs_lookup_dir_index_item(trans, log, path, dir_ino, 2622 index, name, name_len, -1); 2623 if (IS_ERR(di)) { 2624 err = PTR_ERR(di); 2625 goto fail; 2626 } 2627 if (di) { 2628 ret = btrfs_delete_one_dir_name(trans, log, path, di); 2629 bytes_del += name_len; 2630 BUG_ON(ret); 2631 } 2632 2633 /* update the directory size in the log to reflect the names 2634 * we have removed 2635 */ 2636 if (bytes_del) { 2637 struct btrfs_key key; 2638 2639 key.objectid = dir_ino; 2640 key.offset = 0; 2641 key.type = BTRFS_INODE_ITEM_KEY; 2642 btrfs_release_path(path); 2643 2644 ret = btrfs_search_slot(trans, log, &key, path, 0, 1); 2645 if (ret < 0) { 2646 err = ret; 2647 goto fail; 2648 } 2649 if (ret == 0) { 2650 struct btrfs_inode_item *item; 2651 u64 i_size; 2652 2653 item = btrfs_item_ptr(path->nodes[0], path->slots[0], 2654 struct btrfs_inode_item); 2655 i_size = btrfs_inode_size(path->nodes[0], item); 2656 if (i_size > bytes_del) 2657 i_size -= bytes_del; 2658 else 2659 i_size = 0; 2660 btrfs_set_inode_size(path->nodes[0], item, i_size); 2661 btrfs_mark_buffer_dirty(path->nodes[0]); 2662 } else 2663 ret = 0; 2664 btrfs_release_path(path); 2665 } 2666 fail: 2667 btrfs_free_path(path); 2668 out_unlock: 2669 mutex_unlock(&BTRFS_I(dir)->log_mutex); 2670 if (ret == -ENOSPC) { 2671 root->fs_info->last_trans_log_full_commit = trans->transid; 2672 ret = 0; 2673 } else if (ret < 0) 2674 btrfs_abort_transaction(trans, root, ret); 2675 2676 btrfs_end_log_trans(root); 2677 2678 return err; 2679 } 2680 2681 /* see comments for btrfs_del_dir_entries_in_log */ 2682 int btrfs_del_inode_ref_in_log(struct btrfs_trans_handle *trans, 2683 struct btrfs_root *root, 2684 const char *name, int name_len, 2685 struct inode *inode, u64 dirid) 2686 { 2687 struct btrfs_root *log; 2688 u64 index; 2689 int ret; 2690 2691 if (BTRFS_I(inode)->logged_trans < trans->transid) 2692 return 0; 2693 2694 ret = join_running_log_trans(root); 2695 if (ret) 2696 return 0; 2697 log = root->log_root; 2698 mutex_lock(&BTRFS_I(inode)->log_mutex); 2699 2700 ret = btrfs_del_inode_ref(trans, log, name, name_len, btrfs_ino(inode), 2701 dirid, &index); 2702 mutex_unlock(&BTRFS_I(inode)->log_mutex); 2703 if (ret == -ENOSPC) { 2704 root->fs_info->last_trans_log_full_commit = trans->transid; 2705 ret = 0; 2706 } else if (ret < 0 && ret != -ENOENT) 2707 btrfs_abort_transaction(trans, root, ret); 2708 btrfs_end_log_trans(root); 2709 2710 return ret; 2711 } 2712 2713 /* 2714 * creates a range item in the log for 'dirid'. first_offset and 2715 * last_offset tell us which parts of the key space the log should 2716 * be considered authoritative for. 2717 */ 2718 static noinline int insert_dir_log_key(struct btrfs_trans_handle *trans, 2719 struct btrfs_root *log, 2720 struct btrfs_path *path, 2721 int key_type, u64 dirid, 2722 u64 first_offset, u64 last_offset) 2723 { 2724 int ret; 2725 struct btrfs_key key; 2726 struct btrfs_dir_log_item *item; 2727 2728 key.objectid = dirid; 2729 key.offset = first_offset; 2730 if (key_type == BTRFS_DIR_ITEM_KEY) 2731 key.type = BTRFS_DIR_LOG_ITEM_KEY; 2732 else 2733 key.type = BTRFS_DIR_LOG_INDEX_KEY; 2734 ret = btrfs_insert_empty_item(trans, log, path, &key, sizeof(*item)); 2735 if (ret) 2736 return ret; 2737 2738 item = btrfs_item_ptr(path->nodes[0], path->slots[0], 2739 struct btrfs_dir_log_item); 2740 btrfs_set_dir_log_end(path->nodes[0], item, last_offset); 2741 btrfs_mark_buffer_dirty(path->nodes[0]); 2742 btrfs_release_path(path); 2743 return 0; 2744 } 2745 2746 /* 2747 * log all the items included in the current transaction for a given 2748 * directory. This also creates the range items in the log tree required 2749 * to replay anything deleted before the fsync 2750 */ 2751 static noinline int log_dir_items(struct btrfs_trans_handle *trans, 2752 struct btrfs_root *root, struct inode *inode, 2753 struct btrfs_path *path, 2754 struct btrfs_path *dst_path, int key_type, 2755 u64 min_offset, u64 *last_offset_ret) 2756 { 2757 struct btrfs_key min_key; 2758 struct btrfs_key max_key; 2759 struct btrfs_root *log = root->log_root; 2760 struct extent_buffer *src; 2761 int err = 0; 2762 int ret; 2763 int i; 2764 int nritems; 2765 u64 first_offset = min_offset; 2766 u64 last_offset = (u64)-1; 2767 u64 ino = btrfs_ino(inode); 2768 2769 log = root->log_root; 2770 max_key.objectid = ino; 2771 max_key.offset = (u64)-1; 2772 max_key.type = key_type; 2773 2774 min_key.objectid = ino; 2775 min_key.type = key_type; 2776 min_key.offset = min_offset; 2777 2778 path->keep_locks = 1; 2779 2780 ret = btrfs_search_forward(root, &min_key, &max_key, 2781 path, trans->transid); 2782 2783 /* 2784 * we didn't find anything from this transaction, see if there 2785 * is anything at all 2786 */ 2787 if (ret != 0 || min_key.objectid != ino || min_key.type != key_type) { 2788 min_key.objectid = ino; 2789 min_key.type = key_type; 2790 min_key.offset = (u64)-1; 2791 btrfs_release_path(path); 2792 ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0); 2793 if (ret < 0) { 2794 btrfs_release_path(path); 2795 return ret; 2796 } 2797 ret = btrfs_previous_item(root, path, ino, key_type); 2798 2799 /* if ret == 0 there are items for this type, 2800 * create a range to tell us the last key of this type. 2801 * otherwise, there are no items in this directory after 2802 * *min_offset, and we create a range to indicate that. 2803 */ 2804 if (ret == 0) { 2805 struct btrfs_key tmp; 2806 btrfs_item_key_to_cpu(path->nodes[0], &tmp, 2807 path->slots[0]); 2808 if (key_type == tmp.type) 2809 first_offset = max(min_offset, tmp.offset) + 1; 2810 } 2811 goto done; 2812 } 2813 2814 /* go backward to find any previous key */ 2815 ret = btrfs_previous_item(root, path, ino, key_type); 2816 if (ret == 0) { 2817 struct btrfs_key tmp; 2818 btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]); 2819 if (key_type == tmp.type) { 2820 first_offset = tmp.offset; 2821 ret = overwrite_item(trans, log, dst_path, 2822 path->nodes[0], path->slots[0], 2823 &tmp); 2824 if (ret) { 2825 err = ret; 2826 goto done; 2827 } 2828 } 2829 } 2830 btrfs_release_path(path); 2831 2832 /* find the first key from this transaction again */ 2833 ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0); 2834 if (ret != 0) { 2835 WARN_ON(1); 2836 goto done; 2837 } 2838 2839 /* 2840 * we have a block from this transaction, log every item in it 2841 * from our directory 2842 */ 2843 while (1) { 2844 struct btrfs_key tmp; 2845 src = path->nodes[0]; 2846 nritems = btrfs_header_nritems(src); 2847 for (i = path->slots[0]; i < nritems; i++) { 2848 btrfs_item_key_to_cpu(src, &min_key, i); 2849 2850 if (min_key.objectid != ino || min_key.type != key_type) 2851 goto done; 2852 ret = overwrite_item(trans, log, dst_path, src, i, 2853 &min_key); 2854 if (ret) { 2855 err = ret; 2856 goto done; 2857 } 2858 } 2859 path->slots[0] = nritems; 2860 2861 /* 2862 * look ahead to the next item and see if it is also 2863 * from this directory and from this transaction 2864 */ 2865 ret = btrfs_next_leaf(root, path); 2866 if (ret == 1) { 2867 last_offset = (u64)-1; 2868 goto done; 2869 } 2870 btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]); 2871 if (tmp.objectid != ino || tmp.type != key_type) { 2872 last_offset = (u64)-1; 2873 goto done; 2874 } 2875 if (btrfs_header_generation(path->nodes[0]) != trans->transid) { 2876 ret = overwrite_item(trans, log, dst_path, 2877 path->nodes[0], path->slots[0], 2878 &tmp); 2879 if (ret) 2880 err = ret; 2881 else 2882 last_offset = tmp.offset; 2883 goto done; 2884 } 2885 } 2886 done: 2887 btrfs_release_path(path); 2888 btrfs_release_path(dst_path); 2889 2890 if (err == 0) { 2891 *last_offset_ret = last_offset; 2892 /* 2893 * insert the log range keys to indicate where the log 2894 * is valid 2895 */ 2896 ret = insert_dir_log_key(trans, log, path, key_type, 2897 ino, first_offset, last_offset); 2898 if (ret) 2899 err = ret; 2900 } 2901 return err; 2902 } 2903 2904 /* 2905 * logging directories is very similar to logging inodes, We find all the items 2906 * from the current transaction and write them to the log. 2907 * 2908 * The recovery code scans the directory in the subvolume, and if it finds a 2909 * key in the range logged that is not present in the log tree, then it means 2910 * that dir entry was unlinked during the transaction. 2911 * 2912 * In order for that scan to work, we must include one key smaller than 2913 * the smallest logged by this transaction and one key larger than the largest 2914 * key logged by this transaction. 2915 */ 2916 static noinline int log_directory_changes(struct btrfs_trans_handle *trans, 2917 struct btrfs_root *root, struct inode *inode, 2918 struct btrfs_path *path, 2919 struct btrfs_path *dst_path) 2920 { 2921 u64 min_key; 2922 u64 max_key; 2923 int ret; 2924 int key_type = BTRFS_DIR_ITEM_KEY; 2925 2926 again: 2927 min_key = 0; 2928 max_key = 0; 2929 while (1) { 2930 ret = log_dir_items(trans, root, inode, path, 2931 dst_path, key_type, min_key, 2932 &max_key); 2933 if (ret) 2934 return ret; 2935 if (max_key == (u64)-1) 2936 break; 2937 min_key = max_key + 1; 2938 } 2939 2940 if (key_type == BTRFS_DIR_ITEM_KEY) { 2941 key_type = BTRFS_DIR_INDEX_KEY; 2942 goto again; 2943 } 2944 return 0; 2945 } 2946 2947 /* 2948 * a helper function to drop items from the log before we relog an 2949 * inode. max_key_type indicates the highest item type to remove. 2950 * This cannot be run for file data extents because it does not 2951 * free the extents they point to. 2952 */ 2953 static int drop_objectid_items(struct btrfs_trans_handle *trans, 2954 struct btrfs_root *log, 2955 struct btrfs_path *path, 2956 u64 objectid, int max_key_type) 2957 { 2958 int ret; 2959 struct btrfs_key key; 2960 struct btrfs_key found_key; 2961 int start_slot; 2962 2963 key.objectid = objectid; 2964 key.type = max_key_type; 2965 key.offset = (u64)-1; 2966 2967 while (1) { 2968 ret = btrfs_search_slot(trans, log, &key, path, -1, 1); 2969 BUG_ON(ret == 0); 2970 if (ret < 0) 2971 break; 2972 2973 if (path->slots[0] == 0) 2974 break; 2975 2976 path->slots[0]--; 2977 btrfs_item_key_to_cpu(path->nodes[0], &found_key, 2978 path->slots[0]); 2979 2980 if (found_key.objectid != objectid) 2981 break; 2982 2983 found_key.offset = 0; 2984 found_key.type = 0; 2985 ret = btrfs_bin_search(path->nodes[0], &found_key, 0, 2986 &start_slot); 2987 2988 ret = btrfs_del_items(trans, log, path, start_slot, 2989 path->slots[0] - start_slot + 1); 2990 /* 2991 * If start slot isn't 0 then we don't need to re-search, we've 2992 * found the last guy with the objectid in this tree. 2993 */ 2994 if (ret || start_slot != 0) 2995 break; 2996 btrfs_release_path(path); 2997 } 2998 btrfs_release_path(path); 2999 if (ret > 0) 3000 ret = 0; 3001 return ret; 3002 } 3003 3004 static void fill_inode_item(struct btrfs_trans_handle *trans, 3005 struct extent_buffer *leaf, 3006 struct btrfs_inode_item *item, 3007 struct inode *inode, int log_inode_only) 3008 { 3009 struct btrfs_map_token token; 3010 3011 btrfs_init_map_token(&token); 3012 3013 if (log_inode_only) { 3014 /* set the generation to zero so the recover code 3015 * can tell the difference between an logging 3016 * just to say 'this inode exists' and a logging 3017 * to say 'update this inode with these values' 3018 */ 3019 btrfs_set_token_inode_generation(leaf, item, 0, &token); 3020 btrfs_set_token_inode_size(leaf, item, 0, &token); 3021 } else { 3022 btrfs_set_token_inode_generation(leaf, item, 3023 BTRFS_I(inode)->generation, 3024 &token); 3025 btrfs_set_token_inode_size(leaf, item, inode->i_size, &token); 3026 } 3027 3028 btrfs_set_token_inode_uid(leaf, item, i_uid_read(inode), &token); 3029 btrfs_set_token_inode_gid(leaf, item, i_gid_read(inode), &token); 3030 btrfs_set_token_inode_mode(leaf, item, inode->i_mode, &token); 3031 btrfs_set_token_inode_nlink(leaf, item, inode->i_nlink, &token); 3032 3033 btrfs_set_token_timespec_sec(leaf, btrfs_inode_atime(item), 3034 inode->i_atime.tv_sec, &token); 3035 btrfs_set_token_timespec_nsec(leaf, btrfs_inode_atime(item), 3036 inode->i_atime.tv_nsec, &token); 3037 3038 btrfs_set_token_timespec_sec(leaf, btrfs_inode_mtime(item), 3039 inode->i_mtime.tv_sec, &token); 3040 btrfs_set_token_timespec_nsec(leaf, btrfs_inode_mtime(item), 3041 inode->i_mtime.tv_nsec, &token); 3042 3043 btrfs_set_token_timespec_sec(leaf, btrfs_inode_ctime(item), 3044 inode->i_ctime.tv_sec, &token); 3045 btrfs_set_token_timespec_nsec(leaf, btrfs_inode_ctime(item), 3046 inode->i_ctime.tv_nsec, &token); 3047 3048 btrfs_set_token_inode_nbytes(leaf, item, inode_get_bytes(inode), 3049 &token); 3050 3051 btrfs_set_token_inode_sequence(leaf, item, inode->i_version, &token); 3052 btrfs_set_token_inode_transid(leaf, item, trans->transid, &token); 3053 btrfs_set_token_inode_rdev(leaf, item, inode->i_rdev, &token); 3054 btrfs_set_token_inode_flags(leaf, item, BTRFS_I(inode)->flags, &token); 3055 btrfs_set_token_inode_block_group(leaf, item, 0, &token); 3056 } 3057 3058 static int log_inode_item(struct btrfs_trans_handle *trans, 3059 struct btrfs_root *log, struct btrfs_path *path, 3060 struct inode *inode) 3061 { 3062 struct btrfs_inode_item *inode_item; 3063 struct btrfs_key key; 3064 int ret; 3065 3066 memcpy(&key, &BTRFS_I(inode)->location, sizeof(key)); 3067 ret = btrfs_insert_empty_item(trans, log, path, &key, 3068 sizeof(*inode_item)); 3069 if (ret && ret != -EEXIST) 3070 return ret; 3071 inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0], 3072 struct btrfs_inode_item); 3073 fill_inode_item(trans, path->nodes[0], inode_item, inode, 0); 3074 btrfs_release_path(path); 3075 return 0; 3076 } 3077 3078 static noinline int copy_items(struct btrfs_trans_handle *trans, 3079 struct inode *inode, 3080 struct btrfs_path *dst_path, 3081 struct extent_buffer *src, 3082 int start_slot, int nr, int inode_only) 3083 { 3084 unsigned long src_offset; 3085 unsigned long dst_offset; 3086 struct btrfs_root *log = BTRFS_I(inode)->root->log_root; 3087 struct btrfs_file_extent_item *extent; 3088 struct btrfs_inode_item *inode_item; 3089 int ret; 3090 struct btrfs_key *ins_keys; 3091 u32 *ins_sizes; 3092 char *ins_data; 3093 int i; 3094 struct list_head ordered_sums; 3095 int skip_csum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM; 3096 3097 INIT_LIST_HEAD(&ordered_sums); 3098 3099 ins_data = kmalloc(nr * sizeof(struct btrfs_key) + 3100 nr * sizeof(u32), GFP_NOFS); 3101 if (!ins_data) 3102 return -ENOMEM; 3103 3104 ins_sizes = (u32 *)ins_data; 3105 ins_keys = (struct btrfs_key *)(ins_data + nr * sizeof(u32)); 3106 3107 for (i = 0; i < nr; i++) { 3108 ins_sizes[i] = btrfs_item_size_nr(src, i + start_slot); 3109 btrfs_item_key_to_cpu(src, ins_keys + i, i + start_slot); 3110 } 3111 ret = btrfs_insert_empty_items(trans, log, dst_path, 3112 ins_keys, ins_sizes, nr); 3113 if (ret) { 3114 kfree(ins_data); 3115 return ret; 3116 } 3117 3118 for (i = 0; i < nr; i++, dst_path->slots[0]++) { 3119 dst_offset = btrfs_item_ptr_offset(dst_path->nodes[0], 3120 dst_path->slots[0]); 3121 3122 src_offset = btrfs_item_ptr_offset(src, start_slot + i); 3123 3124 if (ins_keys[i].type == BTRFS_INODE_ITEM_KEY) { 3125 inode_item = btrfs_item_ptr(dst_path->nodes[0], 3126 dst_path->slots[0], 3127 struct btrfs_inode_item); 3128 fill_inode_item(trans, dst_path->nodes[0], inode_item, 3129 inode, inode_only == LOG_INODE_EXISTS); 3130 } else { 3131 copy_extent_buffer(dst_path->nodes[0], src, dst_offset, 3132 src_offset, ins_sizes[i]); 3133 } 3134 3135 /* take a reference on file data extents so that truncates 3136 * or deletes of this inode don't have to relog the inode 3137 * again 3138 */ 3139 if (btrfs_key_type(ins_keys + i) == BTRFS_EXTENT_DATA_KEY && 3140 !skip_csum) { 3141 int found_type; 3142 extent = btrfs_item_ptr(src, start_slot + i, 3143 struct btrfs_file_extent_item); 3144 3145 if (btrfs_file_extent_generation(src, extent) < trans->transid) 3146 continue; 3147 3148 found_type = btrfs_file_extent_type(src, extent); 3149 if (found_type == BTRFS_FILE_EXTENT_REG) { 3150 u64 ds, dl, cs, cl; 3151 ds = btrfs_file_extent_disk_bytenr(src, 3152 extent); 3153 /* ds == 0 is a hole */ 3154 if (ds == 0) 3155 continue; 3156 3157 dl = btrfs_file_extent_disk_num_bytes(src, 3158 extent); 3159 cs = btrfs_file_extent_offset(src, extent); 3160 cl = btrfs_file_extent_num_bytes(src, 3161 extent); 3162 if (btrfs_file_extent_compression(src, 3163 extent)) { 3164 cs = 0; 3165 cl = dl; 3166 } 3167 3168 ret = btrfs_lookup_csums_range( 3169 log->fs_info->csum_root, 3170 ds + cs, ds + cs + cl - 1, 3171 &ordered_sums, 0); 3172 BUG_ON(ret); 3173 } 3174 } 3175 } 3176 3177 btrfs_mark_buffer_dirty(dst_path->nodes[0]); 3178 btrfs_release_path(dst_path); 3179 kfree(ins_data); 3180 3181 /* 3182 * we have to do this after the loop above to avoid changing the 3183 * log tree while trying to change the log tree. 3184 */ 3185 ret = 0; 3186 while (!list_empty(&ordered_sums)) { 3187 struct btrfs_ordered_sum *sums = list_entry(ordered_sums.next, 3188 struct btrfs_ordered_sum, 3189 list); 3190 if (!ret) 3191 ret = btrfs_csum_file_blocks(trans, log, sums); 3192 list_del(&sums->list); 3193 kfree(sums); 3194 } 3195 return ret; 3196 } 3197 3198 static int extent_cmp(void *priv, struct list_head *a, struct list_head *b) 3199 { 3200 struct extent_map *em1, *em2; 3201 3202 em1 = list_entry(a, struct extent_map, list); 3203 em2 = list_entry(b, struct extent_map, list); 3204 3205 if (em1->start < em2->start) 3206 return -1; 3207 else if (em1->start > em2->start) 3208 return 1; 3209 return 0; 3210 } 3211 3212 static int drop_adjacent_extents(struct btrfs_trans_handle *trans, 3213 struct btrfs_root *root, struct inode *inode, 3214 struct extent_map *em, 3215 struct btrfs_path *path) 3216 { 3217 struct btrfs_file_extent_item *fi; 3218 struct extent_buffer *leaf; 3219 struct btrfs_key key, new_key; 3220 struct btrfs_map_token token; 3221 u64 extent_end; 3222 u64 extent_offset = 0; 3223 int extent_type; 3224 int del_slot = 0; 3225 int del_nr = 0; 3226 int ret = 0; 3227 3228 while (1) { 3229 btrfs_init_map_token(&token); 3230 leaf = path->nodes[0]; 3231 path->slots[0]++; 3232 if (path->slots[0] >= btrfs_header_nritems(leaf)) { 3233 if (del_nr) { 3234 ret = btrfs_del_items(trans, root, path, 3235 del_slot, del_nr); 3236 if (ret) 3237 return ret; 3238 del_nr = 0; 3239 } 3240 3241 ret = btrfs_next_leaf_write(trans, root, path, 1); 3242 if (ret < 0) 3243 return ret; 3244 if (ret > 0) 3245 return 0; 3246 leaf = path->nodes[0]; 3247 } 3248 3249 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 3250 if (key.objectid != btrfs_ino(inode) || 3251 key.type != BTRFS_EXTENT_DATA_KEY || 3252 key.offset >= em->start + em->len) 3253 break; 3254 3255 fi = btrfs_item_ptr(leaf, path->slots[0], 3256 struct btrfs_file_extent_item); 3257 extent_type = btrfs_token_file_extent_type(leaf, fi, &token); 3258 if (extent_type == BTRFS_FILE_EXTENT_REG || 3259 extent_type == BTRFS_FILE_EXTENT_PREALLOC) { 3260 extent_offset = btrfs_token_file_extent_offset(leaf, 3261 fi, &token); 3262 extent_end = key.offset + 3263 btrfs_token_file_extent_num_bytes(leaf, fi, 3264 &token); 3265 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) { 3266 extent_end = key.offset + 3267 btrfs_file_extent_inline_len(leaf, fi); 3268 } else { 3269 BUG(); 3270 } 3271 3272 if (extent_end <= em->len + em->start) { 3273 if (!del_nr) { 3274 del_slot = path->slots[0]; 3275 } 3276 del_nr++; 3277 continue; 3278 } 3279 3280 /* 3281 * Ok so we'll ignore previous items if we log a new extent, 3282 * which can lead to overlapping extents, so if we have an 3283 * existing extent we want to adjust we _have_ to check the next 3284 * guy to make sure we even need this extent anymore, this keeps 3285 * us from panicing in set_item_key_safe. 3286 */ 3287 if (path->slots[0] < btrfs_header_nritems(leaf) - 1) { 3288 struct btrfs_key tmp_key; 3289 3290 btrfs_item_key_to_cpu(leaf, &tmp_key, 3291 path->slots[0] + 1); 3292 if (tmp_key.objectid == btrfs_ino(inode) && 3293 tmp_key.type == BTRFS_EXTENT_DATA_KEY && 3294 tmp_key.offset <= em->start + em->len) { 3295 if (!del_nr) 3296 del_slot = path->slots[0]; 3297 del_nr++; 3298 continue; 3299 } 3300 } 3301 3302 BUG_ON(extent_type == BTRFS_FILE_EXTENT_INLINE); 3303 memcpy(&new_key, &key, sizeof(new_key)); 3304 new_key.offset = em->start + em->len; 3305 btrfs_set_item_key_safe(trans, root, path, &new_key); 3306 extent_offset += em->start + em->len - key.offset; 3307 btrfs_set_token_file_extent_offset(leaf, fi, extent_offset, 3308 &token); 3309 btrfs_set_token_file_extent_num_bytes(leaf, fi, extent_end - 3310 (em->start + em->len), 3311 &token); 3312 btrfs_mark_buffer_dirty(leaf); 3313 } 3314 3315 if (del_nr) 3316 ret = btrfs_del_items(trans, root, path, del_slot, del_nr); 3317 3318 return ret; 3319 } 3320 3321 static int log_one_extent(struct btrfs_trans_handle *trans, 3322 struct inode *inode, struct btrfs_root *root, 3323 struct extent_map *em, struct btrfs_path *path) 3324 { 3325 struct btrfs_root *log = root->log_root; 3326 struct btrfs_file_extent_item *fi; 3327 struct extent_buffer *leaf; 3328 struct btrfs_ordered_extent *ordered; 3329 struct list_head ordered_sums; 3330 struct btrfs_map_token token; 3331 struct btrfs_key key; 3332 u64 mod_start = em->mod_start; 3333 u64 mod_len = em->mod_len; 3334 u64 csum_offset; 3335 u64 csum_len; 3336 u64 extent_offset = em->start - em->orig_start; 3337 u64 block_len; 3338 int ret; 3339 int index = log->log_transid % 2; 3340 bool skip_csum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM; 3341 3342 insert: 3343 INIT_LIST_HEAD(&ordered_sums); 3344 btrfs_init_map_token(&token); 3345 key.objectid = btrfs_ino(inode); 3346 key.type = BTRFS_EXTENT_DATA_KEY; 3347 key.offset = em->start; 3348 path->really_keep_locks = 1; 3349 3350 ret = btrfs_insert_empty_item(trans, log, path, &key, sizeof(*fi)); 3351 if (ret && ret != -EEXIST) { 3352 path->really_keep_locks = 0; 3353 return ret; 3354 } 3355 leaf = path->nodes[0]; 3356 fi = btrfs_item_ptr(leaf, path->slots[0], 3357 struct btrfs_file_extent_item); 3358 3359 /* 3360 * If we are overwriting an inline extent with a real one then we need 3361 * to just delete the inline extent as it may not be large enough to 3362 * have the entire file_extent_item. 3363 */ 3364 if (ret && btrfs_token_file_extent_type(leaf, fi, &token) == 3365 BTRFS_FILE_EXTENT_INLINE) { 3366 ret = btrfs_del_item(trans, log, path); 3367 btrfs_release_path(path); 3368 if (ret) { 3369 path->really_keep_locks = 0; 3370 return ret; 3371 } 3372 goto insert; 3373 } 3374 3375 btrfs_set_token_file_extent_generation(leaf, fi, em->generation, 3376 &token); 3377 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) { 3378 skip_csum = true; 3379 btrfs_set_token_file_extent_type(leaf, fi, 3380 BTRFS_FILE_EXTENT_PREALLOC, 3381 &token); 3382 } else { 3383 btrfs_set_token_file_extent_type(leaf, fi, 3384 BTRFS_FILE_EXTENT_REG, 3385 &token); 3386 if (em->block_start == 0) 3387 skip_csum = true; 3388 } 3389 3390 block_len = max(em->block_len, em->orig_block_len); 3391 if (em->compress_type != BTRFS_COMPRESS_NONE) { 3392 btrfs_set_token_file_extent_disk_bytenr(leaf, fi, 3393 em->block_start, 3394 &token); 3395 btrfs_set_token_file_extent_disk_num_bytes(leaf, fi, block_len, 3396 &token); 3397 } else if (em->block_start < EXTENT_MAP_LAST_BYTE) { 3398 btrfs_set_token_file_extent_disk_bytenr(leaf, fi, 3399 em->block_start - 3400 extent_offset, &token); 3401 btrfs_set_token_file_extent_disk_num_bytes(leaf, fi, block_len, 3402 &token); 3403 } else { 3404 btrfs_set_token_file_extent_disk_bytenr(leaf, fi, 0, &token); 3405 btrfs_set_token_file_extent_disk_num_bytes(leaf, fi, 0, 3406 &token); 3407 } 3408 3409 btrfs_set_token_file_extent_offset(leaf, fi, 3410 em->start - em->orig_start, 3411 &token); 3412 btrfs_set_token_file_extent_num_bytes(leaf, fi, em->len, &token); 3413 btrfs_set_token_file_extent_ram_bytes(leaf, fi, em->len, &token); 3414 btrfs_set_token_file_extent_compression(leaf, fi, em->compress_type, 3415 &token); 3416 btrfs_set_token_file_extent_encryption(leaf, fi, 0, &token); 3417 btrfs_set_token_file_extent_other_encoding(leaf, fi, 0, &token); 3418 btrfs_mark_buffer_dirty(leaf); 3419 3420 /* 3421 * Have to check the extent to the right of us to make sure it doesn't 3422 * fall in our current range. We're ok if the previous extent is in our 3423 * range since the recovery stuff will run us in key order and thus just 3424 * drop the part we overwrote. 3425 */ 3426 ret = drop_adjacent_extents(trans, log, inode, em, path); 3427 btrfs_release_path(path); 3428 path->really_keep_locks = 0; 3429 if (ret) { 3430 return ret; 3431 } 3432 3433 if (skip_csum) 3434 return 0; 3435 3436 if (em->compress_type) { 3437 csum_offset = 0; 3438 csum_len = block_len; 3439 } 3440 3441 /* 3442 * First check and see if our csums are on our outstanding ordered 3443 * extents. 3444 */ 3445 again: 3446 spin_lock_irq(&log->log_extents_lock[index]); 3447 list_for_each_entry(ordered, &log->logged_list[index], log_list) { 3448 struct btrfs_ordered_sum *sum; 3449 3450 if (!mod_len) 3451 break; 3452 3453 if (ordered->inode != inode) 3454 continue; 3455 3456 if (ordered->file_offset + ordered->len <= mod_start || 3457 mod_start + mod_len <= ordered->file_offset) 3458 continue; 3459 3460 /* 3461 * We are going to copy all the csums on this ordered extent, so 3462 * go ahead and adjust mod_start and mod_len in case this 3463 * ordered extent has already been logged. 3464 */ 3465 if (ordered->file_offset > mod_start) { 3466 if (ordered->file_offset + ordered->len >= 3467 mod_start + mod_len) 3468 mod_len = ordered->file_offset - mod_start; 3469 /* 3470 * If we have this case 3471 * 3472 * |--------- logged extent ---------| 3473 * |----- ordered extent ----| 3474 * 3475 * Just don't mess with mod_start and mod_len, we'll 3476 * just end up logging more csums than we need and it 3477 * will be ok. 3478 */ 3479 } else { 3480 if (ordered->file_offset + ordered->len < 3481 mod_start + mod_len) { 3482 mod_len = (mod_start + mod_len) - 3483 (ordered->file_offset + ordered->len); 3484 mod_start = ordered->file_offset + 3485 ordered->len; 3486 } else { 3487 mod_len = 0; 3488 } 3489 } 3490 3491 /* 3492 * To keep us from looping for the above case of an ordered 3493 * extent that falls inside of the logged extent. 3494 */ 3495 if (test_and_set_bit(BTRFS_ORDERED_LOGGED_CSUM, 3496 &ordered->flags)) 3497 continue; 3498 atomic_inc(&ordered->refs); 3499 spin_unlock_irq(&log->log_extents_lock[index]); 3500 /* 3501 * we've dropped the lock, we must either break or 3502 * start over after this. 3503 */ 3504 3505 wait_event(ordered->wait, ordered->csum_bytes_left == 0); 3506 3507 list_for_each_entry(sum, &ordered->list, list) { 3508 ret = btrfs_csum_file_blocks(trans, log, sum); 3509 if (ret) { 3510 btrfs_put_ordered_extent(ordered); 3511 goto unlocked; 3512 } 3513 } 3514 btrfs_put_ordered_extent(ordered); 3515 goto again; 3516 3517 } 3518 spin_unlock_irq(&log->log_extents_lock[index]); 3519 unlocked: 3520 3521 if (!mod_len || ret) 3522 return ret; 3523 3524 csum_offset = mod_start - em->start; 3525 csum_len = mod_len; 3526 3527 /* block start is already adjusted for the file extent offset. */ 3528 ret = btrfs_lookup_csums_range(log->fs_info->csum_root, 3529 em->block_start + csum_offset, 3530 em->block_start + csum_offset + 3531 csum_len - 1, &ordered_sums, 0); 3532 if (ret) 3533 return ret; 3534 3535 while (!list_empty(&ordered_sums)) { 3536 struct btrfs_ordered_sum *sums = list_entry(ordered_sums.next, 3537 struct btrfs_ordered_sum, 3538 list); 3539 if (!ret) 3540 ret = btrfs_csum_file_blocks(trans, log, sums); 3541 list_del(&sums->list); 3542 kfree(sums); 3543 } 3544 3545 return ret; 3546 } 3547 3548 static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans, 3549 struct btrfs_root *root, 3550 struct inode *inode, 3551 struct btrfs_path *path) 3552 { 3553 struct extent_map *em, *n; 3554 struct list_head extents; 3555 struct extent_map_tree *tree = &BTRFS_I(inode)->extent_tree; 3556 u64 test_gen; 3557 int ret = 0; 3558 int num = 0; 3559 3560 INIT_LIST_HEAD(&extents); 3561 3562 write_lock(&tree->lock); 3563 test_gen = root->fs_info->last_trans_committed; 3564 3565 list_for_each_entry_safe(em, n, &tree->modified_extents, list) { 3566 list_del_init(&em->list); 3567 3568 /* 3569 * Just an arbitrary number, this can be really CPU intensive 3570 * once we start getting a lot of extents, and really once we 3571 * have a bunch of extents we just want to commit since it will 3572 * be faster. 3573 */ 3574 if (++num > 32768) { 3575 list_del_init(&tree->modified_extents); 3576 ret = -EFBIG; 3577 goto process; 3578 } 3579 3580 if (em->generation <= test_gen) 3581 continue; 3582 /* Need a ref to keep it from getting evicted from cache */ 3583 atomic_inc(&em->refs); 3584 set_bit(EXTENT_FLAG_LOGGING, &em->flags); 3585 list_add_tail(&em->list, &extents); 3586 num++; 3587 } 3588 3589 list_sort(NULL, &extents, extent_cmp); 3590 3591 process: 3592 while (!list_empty(&extents)) { 3593 em = list_entry(extents.next, struct extent_map, list); 3594 3595 list_del_init(&em->list); 3596 3597 /* 3598 * If we had an error we just need to delete everybody from our 3599 * private list. 3600 */ 3601 if (ret) { 3602 clear_em_logging(tree, em); 3603 free_extent_map(em); 3604 continue; 3605 } 3606 3607 write_unlock(&tree->lock); 3608 3609 ret = log_one_extent(trans, inode, root, em, path); 3610 write_lock(&tree->lock); 3611 clear_em_logging(tree, em); 3612 free_extent_map(em); 3613 } 3614 WARN_ON(!list_empty(&extents)); 3615 write_unlock(&tree->lock); 3616 3617 btrfs_release_path(path); 3618 return ret; 3619 } 3620 3621 /* log a single inode in the tree log. 3622 * At least one parent directory for this inode must exist in the tree 3623 * or be logged already. 3624 * 3625 * Any items from this inode changed by the current transaction are copied 3626 * to the log tree. An extra reference is taken on any extents in this 3627 * file, allowing us to avoid a whole pile of corner cases around logging 3628 * blocks that have been removed from the tree. 3629 * 3630 * See LOG_INODE_ALL and related defines for a description of what inode_only 3631 * does. 3632 * 3633 * This handles both files and directories. 3634 */ 3635 static int btrfs_log_inode(struct btrfs_trans_handle *trans, 3636 struct btrfs_root *root, struct inode *inode, 3637 int inode_only) 3638 { 3639 struct btrfs_path *path; 3640 struct btrfs_path *dst_path; 3641 struct btrfs_key min_key; 3642 struct btrfs_key max_key; 3643 struct btrfs_root *log = root->log_root; 3644 struct extent_buffer *src = NULL; 3645 int err = 0; 3646 int ret; 3647 int nritems; 3648 int ins_start_slot = 0; 3649 int ins_nr; 3650 bool fast_search = false; 3651 u64 ino = btrfs_ino(inode); 3652 3653 log = root->log_root; 3654 3655 path = btrfs_alloc_path(); 3656 if (!path) 3657 return -ENOMEM; 3658 dst_path = btrfs_alloc_path(); 3659 if (!dst_path) { 3660 btrfs_free_path(path); 3661 return -ENOMEM; 3662 } 3663 3664 min_key.objectid = ino; 3665 min_key.type = BTRFS_INODE_ITEM_KEY; 3666 min_key.offset = 0; 3667 3668 max_key.objectid = ino; 3669 3670 3671 /* today the code can only do partial logging of directories */ 3672 if (S_ISDIR(inode->i_mode) || 3673 (!test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, 3674 &BTRFS_I(inode)->runtime_flags) && 3675 inode_only == LOG_INODE_EXISTS)) 3676 max_key.type = BTRFS_XATTR_ITEM_KEY; 3677 else 3678 max_key.type = (u8)-1; 3679 max_key.offset = (u64)-1; 3680 3681 /* Only run delayed items if we are a dir or a new file */ 3682 if (S_ISDIR(inode->i_mode) || 3683 BTRFS_I(inode)->generation > root->fs_info->last_trans_committed) { 3684 ret = btrfs_commit_inode_delayed_items(trans, inode); 3685 if (ret) { 3686 btrfs_free_path(path); 3687 btrfs_free_path(dst_path); 3688 return ret; 3689 } 3690 } 3691 3692 mutex_lock(&BTRFS_I(inode)->log_mutex); 3693 3694 btrfs_get_logged_extents(log, inode); 3695 3696 /* 3697 * a brute force approach to making sure we get the most uptodate 3698 * copies of everything. 3699 */ 3700 if (S_ISDIR(inode->i_mode)) { 3701 int max_key_type = BTRFS_DIR_LOG_INDEX_KEY; 3702 3703 if (inode_only == LOG_INODE_EXISTS) 3704 max_key_type = BTRFS_XATTR_ITEM_KEY; 3705 ret = drop_objectid_items(trans, log, path, ino, max_key_type); 3706 } else { 3707 if (test_and_clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC, 3708 &BTRFS_I(inode)->runtime_flags)) { 3709 clear_bit(BTRFS_INODE_COPY_EVERYTHING, 3710 &BTRFS_I(inode)->runtime_flags); 3711 ret = btrfs_truncate_inode_items(trans, log, 3712 inode, 0, 0); 3713 } else if (test_and_clear_bit(BTRFS_INODE_COPY_EVERYTHING, 3714 &BTRFS_I(inode)->runtime_flags)) { 3715 if (inode_only == LOG_INODE_ALL) 3716 fast_search = true; 3717 max_key.type = BTRFS_XATTR_ITEM_KEY; 3718 ret = drop_objectid_items(trans, log, path, ino, 3719 max_key.type); 3720 } else { 3721 if (inode_only == LOG_INODE_ALL) 3722 fast_search = true; 3723 ret = log_inode_item(trans, log, dst_path, inode); 3724 if (ret) { 3725 err = ret; 3726 goto out_unlock; 3727 } 3728 goto log_extents; 3729 } 3730 3731 } 3732 if (ret) { 3733 err = ret; 3734 goto out_unlock; 3735 } 3736 path->keep_locks = 1; 3737 3738 while (1) { 3739 ins_nr = 0; 3740 ret = btrfs_search_forward(root, &min_key, &max_key, 3741 path, trans->transid); 3742 if (ret != 0) 3743 break; 3744 again: 3745 /* note, ins_nr might be > 0 here, cleanup outside the loop */ 3746 if (min_key.objectid != ino) 3747 break; 3748 if (min_key.type > max_key.type) 3749 break; 3750 3751 src = path->nodes[0]; 3752 if (ins_nr && ins_start_slot + ins_nr == path->slots[0]) { 3753 ins_nr++; 3754 goto next_slot; 3755 } else if (!ins_nr) { 3756 ins_start_slot = path->slots[0]; 3757 ins_nr = 1; 3758 goto next_slot; 3759 } 3760 3761 ret = copy_items(trans, inode, dst_path, src, ins_start_slot, 3762 ins_nr, inode_only); 3763 if (ret) { 3764 err = ret; 3765 goto out_unlock; 3766 } 3767 ins_nr = 1; 3768 ins_start_slot = path->slots[0]; 3769 next_slot: 3770 3771 nritems = btrfs_header_nritems(path->nodes[0]); 3772 path->slots[0]++; 3773 if (path->slots[0] < nritems) { 3774 btrfs_item_key_to_cpu(path->nodes[0], &min_key, 3775 path->slots[0]); 3776 goto again; 3777 } 3778 if (ins_nr) { 3779 ret = copy_items(trans, inode, dst_path, src, 3780 ins_start_slot, 3781 ins_nr, inode_only); 3782 if (ret) { 3783 err = ret; 3784 goto out_unlock; 3785 } 3786 ins_nr = 0; 3787 } 3788 btrfs_release_path(path); 3789 3790 if (min_key.offset < (u64)-1) 3791 min_key.offset++; 3792 else if (min_key.type < (u8)-1) 3793 min_key.type++; 3794 else if (min_key.objectid < (u64)-1) 3795 min_key.objectid++; 3796 else 3797 break; 3798 } 3799 if (ins_nr) { 3800 ret = copy_items(trans, inode, dst_path, src, ins_start_slot, 3801 ins_nr, inode_only); 3802 if (ret) { 3803 err = ret; 3804 goto out_unlock; 3805 } 3806 ins_nr = 0; 3807 } 3808 3809 log_extents: 3810 if (fast_search) { 3811 btrfs_release_path(dst_path); 3812 ret = btrfs_log_changed_extents(trans, root, inode, dst_path); 3813 if (ret) { 3814 err = ret; 3815 goto out_unlock; 3816 } 3817 } else { 3818 struct extent_map_tree *tree = &BTRFS_I(inode)->extent_tree; 3819 struct extent_map *em, *n; 3820 3821 write_lock(&tree->lock); 3822 list_for_each_entry_safe(em, n, &tree->modified_extents, list) 3823 list_del_init(&em->list); 3824 write_unlock(&tree->lock); 3825 } 3826 3827 if (inode_only == LOG_INODE_ALL && S_ISDIR(inode->i_mode)) { 3828 btrfs_release_path(path); 3829 btrfs_release_path(dst_path); 3830 ret = log_directory_changes(trans, root, inode, path, dst_path); 3831 if (ret) { 3832 err = ret; 3833 goto out_unlock; 3834 } 3835 } 3836 BTRFS_I(inode)->logged_trans = trans->transid; 3837 BTRFS_I(inode)->last_log_commit = BTRFS_I(inode)->last_sub_trans; 3838 out_unlock: 3839 if (err) 3840 btrfs_free_logged_extents(log, log->log_transid); 3841 mutex_unlock(&BTRFS_I(inode)->log_mutex); 3842 3843 btrfs_free_path(path); 3844 btrfs_free_path(dst_path); 3845 return err; 3846 } 3847 3848 /* 3849 * follow the dentry parent pointers up the chain and see if any 3850 * of the directories in it require a full commit before they can 3851 * be logged. Returns zero if nothing special needs to be done or 1 if 3852 * a full commit is required. 3853 */ 3854 static noinline int check_parent_dirs_for_sync(struct btrfs_trans_handle *trans, 3855 struct inode *inode, 3856 struct dentry *parent, 3857 struct super_block *sb, 3858 u64 last_committed) 3859 { 3860 int ret = 0; 3861 struct btrfs_root *root; 3862 struct dentry *old_parent = NULL; 3863 3864 /* 3865 * for regular files, if its inode is already on disk, we don't 3866 * have to worry about the parents at all. This is because 3867 * we can use the last_unlink_trans field to record renames 3868 * and other fun in this file. 3869 */ 3870 if (S_ISREG(inode->i_mode) && 3871 BTRFS_I(inode)->generation <= last_committed && 3872 BTRFS_I(inode)->last_unlink_trans <= last_committed) 3873 goto out; 3874 3875 if (!S_ISDIR(inode->i_mode)) { 3876 if (!parent || !parent->d_inode || sb != parent->d_inode->i_sb) 3877 goto out; 3878 inode = parent->d_inode; 3879 } 3880 3881 while (1) { 3882 BTRFS_I(inode)->logged_trans = trans->transid; 3883 smp_mb(); 3884 3885 if (BTRFS_I(inode)->last_unlink_trans > last_committed) { 3886 root = BTRFS_I(inode)->root; 3887 3888 /* 3889 * make sure any commits to the log are forced 3890 * to be full commits 3891 */ 3892 root->fs_info->last_trans_log_full_commit = 3893 trans->transid; 3894 ret = 1; 3895 break; 3896 } 3897 3898 if (!parent || !parent->d_inode || sb != parent->d_inode->i_sb) 3899 break; 3900 3901 if (IS_ROOT(parent)) 3902 break; 3903 3904 parent = dget_parent(parent); 3905 dput(old_parent); 3906 old_parent = parent; 3907 inode = parent->d_inode; 3908 3909 } 3910 dput(old_parent); 3911 out: 3912 return ret; 3913 } 3914 3915 /* 3916 * helper function around btrfs_log_inode to make sure newly created 3917 * parent directories also end up in the log. A minimal inode and backref 3918 * only logging is done of any parent directories that are older than 3919 * the last committed transaction 3920 */ 3921 int btrfs_log_inode_parent(struct btrfs_trans_handle *trans, 3922 struct btrfs_root *root, struct inode *inode, 3923 struct dentry *parent, int exists_only) 3924 { 3925 int inode_only = exists_only ? LOG_INODE_EXISTS : LOG_INODE_ALL; 3926 struct super_block *sb; 3927 struct dentry *old_parent = NULL; 3928 int ret = 0; 3929 u64 last_committed = root->fs_info->last_trans_committed; 3930 3931 sb = inode->i_sb; 3932 3933 if (btrfs_test_opt(root, NOTREELOG)) { 3934 ret = 1; 3935 goto end_no_trans; 3936 } 3937 3938 if (root->fs_info->last_trans_log_full_commit > 3939 root->fs_info->last_trans_committed) { 3940 ret = 1; 3941 goto end_no_trans; 3942 } 3943 3944 if (root != BTRFS_I(inode)->root || 3945 btrfs_root_refs(&root->root_item) == 0) { 3946 ret = 1; 3947 goto end_no_trans; 3948 } 3949 3950 ret = check_parent_dirs_for_sync(trans, inode, parent, 3951 sb, last_committed); 3952 if (ret) 3953 goto end_no_trans; 3954 3955 if (btrfs_inode_in_log(inode, trans->transid)) { 3956 ret = BTRFS_NO_LOG_SYNC; 3957 goto end_no_trans; 3958 } 3959 3960 ret = start_log_trans(trans, root); 3961 if (ret) 3962 goto end_trans; 3963 3964 ret = btrfs_log_inode(trans, root, inode, inode_only); 3965 if (ret) 3966 goto end_trans; 3967 3968 /* 3969 * for regular files, if its inode is already on disk, we don't 3970 * have to worry about the parents at all. This is because 3971 * we can use the last_unlink_trans field to record renames 3972 * and other fun in this file. 3973 */ 3974 if (S_ISREG(inode->i_mode) && 3975 BTRFS_I(inode)->generation <= last_committed && 3976 BTRFS_I(inode)->last_unlink_trans <= last_committed) { 3977 ret = 0; 3978 goto end_trans; 3979 } 3980 3981 inode_only = LOG_INODE_EXISTS; 3982 while (1) { 3983 if (!parent || !parent->d_inode || sb != parent->d_inode->i_sb) 3984 break; 3985 3986 inode = parent->d_inode; 3987 if (root != BTRFS_I(inode)->root) 3988 break; 3989 3990 if (BTRFS_I(inode)->generation > 3991 root->fs_info->last_trans_committed) { 3992 ret = btrfs_log_inode(trans, root, inode, inode_only); 3993 if (ret) 3994 goto end_trans; 3995 } 3996 if (IS_ROOT(parent)) 3997 break; 3998 3999 parent = dget_parent(parent); 4000 dput(old_parent); 4001 old_parent = parent; 4002 } 4003 ret = 0; 4004 end_trans: 4005 dput(old_parent); 4006 if (ret < 0) { 4007 root->fs_info->last_trans_log_full_commit = trans->transid; 4008 ret = 1; 4009 } 4010 btrfs_end_log_trans(root); 4011 end_no_trans: 4012 return ret; 4013 } 4014 4015 /* 4016 * it is not safe to log dentry if the chunk root has added new 4017 * chunks. This returns 0 if the dentry was logged, and 1 otherwise. 4018 * If this returns 1, you must commit the transaction to safely get your 4019 * data on disk. 4020 */ 4021 int btrfs_log_dentry_safe(struct btrfs_trans_handle *trans, 4022 struct btrfs_root *root, struct dentry *dentry) 4023 { 4024 struct dentry *parent = dget_parent(dentry); 4025 int ret; 4026 4027 ret = btrfs_log_inode_parent(trans, root, dentry->d_inode, parent, 0); 4028 dput(parent); 4029 4030 return ret; 4031 } 4032 4033 /* 4034 * should be called during mount to recover any replay any log trees 4035 * from the FS 4036 */ 4037 int btrfs_recover_log_trees(struct btrfs_root *log_root_tree) 4038 { 4039 int ret; 4040 struct btrfs_path *path; 4041 struct btrfs_trans_handle *trans; 4042 struct btrfs_key key; 4043 struct btrfs_key found_key; 4044 struct btrfs_key tmp_key; 4045 struct btrfs_root *log; 4046 struct btrfs_fs_info *fs_info = log_root_tree->fs_info; 4047 struct walk_control wc = { 4048 .process_func = process_one_buffer, 4049 .stage = 0, 4050 }; 4051 4052 path = btrfs_alloc_path(); 4053 if (!path) 4054 return -ENOMEM; 4055 4056 fs_info->log_root_recovering = 1; 4057 4058 trans = btrfs_start_transaction(fs_info->tree_root, 0); 4059 if (IS_ERR(trans)) { 4060 ret = PTR_ERR(trans); 4061 goto error; 4062 } 4063 4064 wc.trans = trans; 4065 wc.pin = 1; 4066 4067 ret = walk_log_tree(trans, log_root_tree, &wc); 4068 if (ret) { 4069 btrfs_error(fs_info, ret, "Failed to pin buffers while " 4070 "recovering log root tree."); 4071 goto error; 4072 } 4073 4074 again: 4075 key.objectid = BTRFS_TREE_LOG_OBJECTID; 4076 key.offset = (u64)-1; 4077 btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY); 4078 4079 while (1) { 4080 ret = btrfs_search_slot(NULL, log_root_tree, &key, path, 0, 0); 4081 4082 if (ret < 0) { 4083 btrfs_error(fs_info, ret, 4084 "Couldn't find tree log root."); 4085 goto error; 4086 } 4087 if (ret > 0) { 4088 if (path->slots[0] == 0) 4089 break; 4090 path->slots[0]--; 4091 } 4092 btrfs_item_key_to_cpu(path->nodes[0], &found_key, 4093 path->slots[0]); 4094 btrfs_release_path(path); 4095 if (found_key.objectid != BTRFS_TREE_LOG_OBJECTID) 4096 break; 4097 4098 log = btrfs_read_fs_root_no_radix(log_root_tree, 4099 &found_key); 4100 if (IS_ERR(log)) { 4101 ret = PTR_ERR(log); 4102 btrfs_error(fs_info, ret, 4103 "Couldn't read tree log root."); 4104 goto error; 4105 } 4106 4107 tmp_key.objectid = found_key.offset; 4108 tmp_key.type = BTRFS_ROOT_ITEM_KEY; 4109 tmp_key.offset = (u64)-1; 4110 4111 wc.replay_dest = btrfs_read_fs_root_no_name(fs_info, &tmp_key); 4112 if (IS_ERR(wc.replay_dest)) { 4113 ret = PTR_ERR(wc.replay_dest); 4114 btrfs_error(fs_info, ret, "Couldn't read target root " 4115 "for tree log recovery."); 4116 goto error; 4117 } 4118 4119 wc.replay_dest->log_root = log; 4120 btrfs_record_root_in_trans(trans, wc.replay_dest); 4121 ret = walk_log_tree(trans, log, &wc); 4122 BUG_ON(ret); 4123 4124 if (wc.stage == LOG_WALK_REPLAY_ALL) { 4125 ret = fixup_inode_link_counts(trans, wc.replay_dest, 4126 path); 4127 BUG_ON(ret); 4128 } 4129 4130 key.offset = found_key.offset - 1; 4131 wc.replay_dest->log_root = NULL; 4132 free_extent_buffer(log->node); 4133 free_extent_buffer(log->commit_root); 4134 kfree(log); 4135 4136 if (found_key.offset == 0) 4137 break; 4138 } 4139 btrfs_release_path(path); 4140 4141 /* step one is to pin it all, step two is to replay just inodes */ 4142 if (wc.pin) { 4143 wc.pin = 0; 4144 wc.process_func = replay_one_buffer; 4145 wc.stage = LOG_WALK_REPLAY_INODES; 4146 goto again; 4147 } 4148 /* step three is to replay everything */ 4149 if (wc.stage < LOG_WALK_REPLAY_ALL) { 4150 wc.stage++; 4151 goto again; 4152 } 4153 4154 btrfs_free_path(path); 4155 4156 free_extent_buffer(log_root_tree->node); 4157 log_root_tree->log_root = NULL; 4158 fs_info->log_root_recovering = 0; 4159 4160 /* step 4: commit the transaction, which also unpins the blocks */ 4161 btrfs_commit_transaction(trans, fs_info->tree_root); 4162 4163 kfree(log_root_tree); 4164 return 0; 4165 4166 error: 4167 btrfs_free_path(path); 4168 return ret; 4169 } 4170 4171 /* 4172 * there are some corner cases where we want to force a full 4173 * commit instead of allowing a directory to be logged. 4174 * 4175 * They revolve around files there were unlinked from the directory, and 4176 * this function updates the parent directory so that a full commit is 4177 * properly done if it is fsync'd later after the unlinks are done. 4178 */ 4179 void btrfs_record_unlink_dir(struct btrfs_trans_handle *trans, 4180 struct inode *dir, struct inode *inode, 4181 int for_rename) 4182 { 4183 /* 4184 * when we're logging a file, if it hasn't been renamed 4185 * or unlinked, and its inode is fully committed on disk, 4186 * we don't have to worry about walking up the directory chain 4187 * to log its parents. 4188 * 4189 * So, we use the last_unlink_trans field to put this transid 4190 * into the file. When the file is logged we check it and 4191 * don't log the parents if the file is fully on disk. 4192 */ 4193 if (S_ISREG(inode->i_mode)) 4194 BTRFS_I(inode)->last_unlink_trans = trans->transid; 4195 4196 /* 4197 * if this directory was already logged any new 4198 * names for this file/dir will get recorded 4199 */ 4200 smp_mb(); 4201 if (BTRFS_I(dir)->logged_trans == trans->transid) 4202 return; 4203 4204 /* 4205 * if the inode we're about to unlink was logged, 4206 * the log will be properly updated for any new names 4207 */ 4208 if (BTRFS_I(inode)->logged_trans == trans->transid) 4209 return; 4210 4211 /* 4212 * when renaming files across directories, if the directory 4213 * there we're unlinking from gets fsync'd later on, there's 4214 * no way to find the destination directory later and fsync it 4215 * properly. So, we have to be conservative and force commits 4216 * so the new name gets discovered. 4217 */ 4218 if (for_rename) 4219 goto record; 4220 4221 /* we can safely do the unlink without any special recording */ 4222 return; 4223 4224 record: 4225 BTRFS_I(dir)->last_unlink_trans = trans->transid; 4226 } 4227 4228 /* 4229 * Call this after adding a new name for a file and it will properly 4230 * update the log to reflect the new name. 4231 * 4232 * It will return zero if all goes well, and it will return 1 if a 4233 * full transaction commit is required. 4234 */ 4235 int btrfs_log_new_name(struct btrfs_trans_handle *trans, 4236 struct inode *inode, struct inode *old_dir, 4237 struct dentry *parent) 4238 { 4239 struct btrfs_root * root = BTRFS_I(inode)->root; 4240 4241 /* 4242 * this will force the logging code to walk the dentry chain 4243 * up for the file 4244 */ 4245 if (S_ISREG(inode->i_mode)) 4246 BTRFS_I(inode)->last_unlink_trans = trans->transid; 4247 4248 /* 4249 * if this inode hasn't been logged and directory we're renaming it 4250 * from hasn't been logged, we don't need to log it 4251 */ 4252 if (BTRFS_I(inode)->logged_trans <= 4253 root->fs_info->last_trans_committed && 4254 (!old_dir || BTRFS_I(old_dir)->logged_trans <= 4255 root->fs_info->last_trans_committed)) 4256 return 0; 4257 4258 return btrfs_log_inode_parent(trans, root, inode, parent, 1); 4259 } 4260 4261