1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2007,2008 Oracle. All rights reserved. 4 */ 5 6 #include <linux/sched.h> 7 #include <linux/slab.h> 8 #include <linux/rbtree.h> 9 #include <linux/mm.h> 10 #include <linux/error-injection.h> 11 #include "messages.h" 12 #include "ctree.h" 13 #include "disk-io.h" 14 #include "transaction.h" 15 #include "print-tree.h" 16 #include "locking.h" 17 #include "volumes.h" 18 #include "qgroup.h" 19 #include "tree-mod-log.h" 20 #include "tree-checker.h" 21 #include "fs.h" 22 #include "accessors.h" 23 #include "extent-tree.h" 24 #include "relocation.h" 25 #include "file-item.h" 26 27 static struct kmem_cache *btrfs_path_cachep; 28 29 static int split_node(struct btrfs_trans_handle *trans, struct btrfs_root 30 *root, struct btrfs_path *path, int level); 31 static int split_leaf(struct btrfs_trans_handle *trans, struct btrfs_root *root, 32 const struct btrfs_key *ins_key, struct btrfs_path *path, 33 int data_size, int extend); 34 static int push_node_left(struct btrfs_trans_handle *trans, 35 struct extent_buffer *dst, 36 struct extent_buffer *src, int empty); 37 static int balance_node_right(struct btrfs_trans_handle *trans, 38 struct extent_buffer *dst_buf, 39 struct extent_buffer *src_buf); 40 static void del_ptr(struct btrfs_root *root, struct btrfs_path *path, 41 int level, int slot); 42 43 static const struct btrfs_csums { 44 u16 size; 45 const char name[10]; 46 const char driver[12]; 47 } btrfs_csums[] = { 48 [BTRFS_CSUM_TYPE_CRC32] = { .size = 4, .name = "crc32c" }, 49 [BTRFS_CSUM_TYPE_XXHASH] = { .size = 8, .name = "xxhash64" }, 50 [BTRFS_CSUM_TYPE_SHA256] = { .size = 32, .name = "sha256" }, 51 [BTRFS_CSUM_TYPE_BLAKE2] = { .size = 32, .name = "blake2b", 52 .driver = "blake2b-256" }, 53 }; 54 55 /* 56 * The leaf data grows from end-to-front in the node. this returns the address 57 * of the start of the last item, which is the stop of the leaf data stack. 58 */ 59 static unsigned int leaf_data_end(const struct extent_buffer *leaf) 60 { 61 u32 nr = btrfs_header_nritems(leaf); 62 63 if (nr == 0) 64 return BTRFS_LEAF_DATA_SIZE(leaf->fs_info); 65 return btrfs_item_offset(leaf, nr - 1); 66 } 67 68 /* 69 * Move data in a @leaf (using memmove, safe for overlapping ranges). 70 * 71 * @leaf: leaf that we're doing a memmove on 72 * @dst_offset: item data offset we're moving to 73 * @src_offset: item data offset were' moving from 74 * @len: length of the data we're moving 75 * 76 * Wrapper around memmove_extent_buffer() that takes into account the header on 77 * the leaf. The btrfs_item offset's start directly after the header, so we 78 * have to adjust any offsets to account for the header in the leaf. This 79 * handles that math to simplify the callers. 80 */ 81 static inline void memmove_leaf_data(const struct extent_buffer *leaf, 82 unsigned long dst_offset, 83 unsigned long src_offset, 84 unsigned long len) 85 { 86 memmove_extent_buffer(leaf, btrfs_item_nr_offset(leaf, 0) + dst_offset, 87 btrfs_item_nr_offset(leaf, 0) + src_offset, len); 88 } 89 90 /* 91 * Copy item data from @src into @dst at the given @offset. 92 * 93 * @dst: destination leaf that we're copying into 94 * @src: source leaf that we're copying from 95 * @dst_offset: item data offset we're copying to 96 * @src_offset: item data offset were' copying from 97 * @len: length of the data we're copying 98 * 99 * Wrapper around copy_extent_buffer() that takes into account the header on 100 * the leaf. The btrfs_item offset's start directly after the header, so we 101 * have to adjust any offsets to account for the header in the leaf. This 102 * handles that math to simplify the callers. 103 */ 104 static inline void copy_leaf_data(const struct extent_buffer *dst, 105 const struct extent_buffer *src, 106 unsigned long dst_offset, 107 unsigned long src_offset, unsigned long len) 108 { 109 copy_extent_buffer(dst, src, btrfs_item_nr_offset(dst, 0) + dst_offset, 110 btrfs_item_nr_offset(src, 0) + src_offset, len); 111 } 112 113 /* 114 * Move items in a @leaf (using memmove). 115 * 116 * @dst: destination leaf for the items 117 * @dst_item: the item nr we're copying into 118 * @src_item: the item nr we're copying from 119 * @nr_items: the number of items to copy 120 * 121 * Wrapper around memmove_extent_buffer() that does the math to get the 122 * appropriate offsets into the leaf from the item numbers. 123 */ 124 static inline void memmove_leaf_items(const struct extent_buffer *leaf, 125 int dst_item, int src_item, int nr_items) 126 { 127 memmove_extent_buffer(leaf, btrfs_item_nr_offset(leaf, dst_item), 128 btrfs_item_nr_offset(leaf, src_item), 129 nr_items * sizeof(struct btrfs_item)); 130 } 131 132 /* 133 * Copy items from @src into @dst at the given @offset. 134 * 135 * @dst: destination leaf for the items 136 * @src: source leaf for the items 137 * @dst_item: the item nr we're copying into 138 * @src_item: the item nr we're copying from 139 * @nr_items: the number of items to copy 140 * 141 * Wrapper around copy_extent_buffer() that does the math to get the 142 * appropriate offsets into the leaf from the item numbers. 143 */ 144 static inline void copy_leaf_items(const struct extent_buffer *dst, 145 const struct extent_buffer *src, 146 int dst_item, int src_item, int nr_items) 147 { 148 copy_extent_buffer(dst, src, btrfs_item_nr_offset(dst, dst_item), 149 btrfs_item_nr_offset(src, src_item), 150 nr_items * sizeof(struct btrfs_item)); 151 } 152 153 int btrfs_super_csum_size(const struct btrfs_super_block *s) 154 { 155 u16 t = btrfs_super_csum_type(s); 156 /* 157 * csum type is validated at mount time 158 */ 159 return btrfs_csums[t].size; 160 } 161 162 const char *btrfs_super_csum_name(u16 csum_type) 163 { 164 /* csum type is validated at mount time */ 165 return btrfs_csums[csum_type].name; 166 } 167 168 /* 169 * Return driver name if defined, otherwise the name that's also a valid driver 170 * name 171 */ 172 const char *btrfs_super_csum_driver(u16 csum_type) 173 { 174 /* csum type is validated at mount time */ 175 return btrfs_csums[csum_type].driver[0] ? 176 btrfs_csums[csum_type].driver : 177 btrfs_csums[csum_type].name; 178 } 179 180 size_t __attribute_const__ btrfs_get_num_csums(void) 181 { 182 return ARRAY_SIZE(btrfs_csums); 183 } 184 185 struct btrfs_path *btrfs_alloc_path(void) 186 { 187 might_sleep(); 188 189 return kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS); 190 } 191 192 /* this also releases the path */ 193 void btrfs_free_path(struct btrfs_path *p) 194 { 195 if (!p) 196 return; 197 btrfs_release_path(p); 198 kmem_cache_free(btrfs_path_cachep, p); 199 } 200 201 /* 202 * path release drops references on the extent buffers in the path 203 * and it drops any locks held by this path 204 * 205 * It is safe to call this on paths that no locks or extent buffers held. 206 */ 207 noinline void btrfs_release_path(struct btrfs_path *p) 208 { 209 int i; 210 211 for (i = 0; i < BTRFS_MAX_LEVEL; i++) { 212 p->slots[i] = 0; 213 if (!p->nodes[i]) 214 continue; 215 if (p->locks[i]) { 216 btrfs_tree_unlock_rw(p->nodes[i], p->locks[i]); 217 p->locks[i] = 0; 218 } 219 free_extent_buffer(p->nodes[i]); 220 p->nodes[i] = NULL; 221 } 222 } 223 224 /* 225 * We want the transaction abort to print stack trace only for errors where the 226 * cause could be a bug, eg. due to ENOSPC, and not for common errors that are 227 * caused by external factors. 228 */ 229 bool __cold abort_should_print_stack(int errno) 230 { 231 switch (errno) { 232 case -EIO: 233 case -EROFS: 234 case -ENOMEM: 235 return false; 236 } 237 return true; 238 } 239 240 /* 241 * safely gets a reference on the root node of a tree. A lock 242 * is not taken, so a concurrent writer may put a different node 243 * at the root of the tree. See btrfs_lock_root_node for the 244 * looping required. 245 * 246 * The extent buffer returned by this has a reference taken, so 247 * it won't disappear. It may stop being the root of the tree 248 * at any time because there are no locks held. 249 */ 250 struct extent_buffer *btrfs_root_node(struct btrfs_root *root) 251 { 252 struct extent_buffer *eb; 253 254 while (1) { 255 rcu_read_lock(); 256 eb = rcu_dereference(root->node); 257 258 /* 259 * RCU really hurts here, we could free up the root node because 260 * it was COWed but we may not get the new root node yet so do 261 * the inc_not_zero dance and if it doesn't work then 262 * synchronize_rcu and try again. 263 */ 264 if (atomic_inc_not_zero(&eb->refs)) { 265 rcu_read_unlock(); 266 break; 267 } 268 rcu_read_unlock(); 269 synchronize_rcu(); 270 } 271 return eb; 272 } 273 274 /* 275 * Cowonly root (not-shareable trees, everything not subvolume or reloc roots), 276 * just get put onto a simple dirty list. Transaction walks this list to make 277 * sure they get properly updated on disk. 278 */ 279 static void add_root_to_dirty_list(struct btrfs_root *root) 280 { 281 struct btrfs_fs_info *fs_info = root->fs_info; 282 283 if (test_bit(BTRFS_ROOT_DIRTY, &root->state) || 284 !test_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state)) 285 return; 286 287 spin_lock(&fs_info->trans_lock); 288 if (!test_and_set_bit(BTRFS_ROOT_DIRTY, &root->state)) { 289 /* Want the extent tree to be the last on the list */ 290 if (root->root_key.objectid == BTRFS_EXTENT_TREE_OBJECTID) 291 list_move_tail(&root->dirty_list, 292 &fs_info->dirty_cowonly_roots); 293 else 294 list_move(&root->dirty_list, 295 &fs_info->dirty_cowonly_roots); 296 } 297 spin_unlock(&fs_info->trans_lock); 298 } 299 300 /* 301 * used by snapshot creation to make a copy of a root for a tree with 302 * a given objectid. The buffer with the new root node is returned in 303 * cow_ret, and this func returns zero on success or a negative error code. 304 */ 305 int btrfs_copy_root(struct btrfs_trans_handle *trans, 306 struct btrfs_root *root, 307 struct extent_buffer *buf, 308 struct extent_buffer **cow_ret, u64 new_root_objectid) 309 { 310 struct btrfs_fs_info *fs_info = root->fs_info; 311 struct extent_buffer *cow; 312 int ret = 0; 313 int level; 314 struct btrfs_disk_key disk_key; 315 316 WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) && 317 trans->transid != fs_info->running_transaction->transid); 318 WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) && 319 trans->transid != root->last_trans); 320 321 level = btrfs_header_level(buf); 322 if (level == 0) 323 btrfs_item_key(buf, &disk_key, 0); 324 else 325 btrfs_node_key(buf, &disk_key, 0); 326 327 cow = btrfs_alloc_tree_block(trans, root, 0, new_root_objectid, 328 &disk_key, level, buf->start, 0, 329 BTRFS_NESTING_NEW_ROOT); 330 if (IS_ERR(cow)) 331 return PTR_ERR(cow); 332 333 copy_extent_buffer_full(cow, buf); 334 btrfs_set_header_bytenr(cow, cow->start); 335 btrfs_set_header_generation(cow, trans->transid); 336 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV); 337 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN | 338 BTRFS_HEADER_FLAG_RELOC); 339 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID) 340 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC); 341 else 342 btrfs_set_header_owner(cow, new_root_objectid); 343 344 write_extent_buffer_fsid(cow, fs_info->fs_devices->metadata_uuid); 345 346 WARN_ON(btrfs_header_generation(buf) > trans->transid); 347 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID) 348 ret = btrfs_inc_ref(trans, root, cow, 1); 349 else 350 ret = btrfs_inc_ref(trans, root, cow, 0); 351 if (ret) { 352 btrfs_tree_unlock(cow); 353 free_extent_buffer(cow); 354 btrfs_abort_transaction(trans, ret); 355 return ret; 356 } 357 358 btrfs_mark_buffer_dirty(cow); 359 *cow_ret = cow; 360 return 0; 361 } 362 363 /* 364 * check if the tree block can be shared by multiple trees 365 */ 366 int btrfs_block_can_be_shared(struct btrfs_root *root, 367 struct extent_buffer *buf) 368 { 369 /* 370 * Tree blocks not in shareable trees and tree roots are never shared. 371 * If a block was allocated after the last snapshot and the block was 372 * not allocated by tree relocation, we know the block is not shared. 373 */ 374 if (test_bit(BTRFS_ROOT_SHAREABLE, &root->state) && 375 buf != root->node && buf != root->commit_root && 376 (btrfs_header_generation(buf) <= 377 btrfs_root_last_snapshot(&root->root_item) || 378 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC))) 379 return 1; 380 381 return 0; 382 } 383 384 static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans, 385 struct btrfs_root *root, 386 struct extent_buffer *buf, 387 struct extent_buffer *cow, 388 int *last_ref) 389 { 390 struct btrfs_fs_info *fs_info = root->fs_info; 391 u64 refs; 392 u64 owner; 393 u64 flags; 394 u64 new_flags = 0; 395 int ret; 396 397 /* 398 * Backrefs update rules: 399 * 400 * Always use full backrefs for extent pointers in tree block 401 * allocated by tree relocation. 402 * 403 * If a shared tree block is no longer referenced by its owner 404 * tree (btrfs_header_owner(buf) == root->root_key.objectid), 405 * use full backrefs for extent pointers in tree block. 406 * 407 * If a tree block is been relocating 408 * (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID), 409 * use full backrefs for extent pointers in tree block. 410 * The reason for this is some operations (such as drop tree) 411 * are only allowed for blocks use full backrefs. 412 */ 413 414 if (btrfs_block_can_be_shared(root, buf)) { 415 ret = btrfs_lookup_extent_info(trans, fs_info, buf->start, 416 btrfs_header_level(buf), 1, 417 &refs, &flags); 418 if (ret) 419 return ret; 420 if (refs == 0) { 421 ret = -EROFS; 422 btrfs_handle_fs_error(fs_info, ret, NULL); 423 return ret; 424 } 425 } else { 426 refs = 1; 427 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID || 428 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV) 429 flags = BTRFS_BLOCK_FLAG_FULL_BACKREF; 430 else 431 flags = 0; 432 } 433 434 owner = btrfs_header_owner(buf); 435 BUG_ON(owner == BTRFS_TREE_RELOC_OBJECTID && 436 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)); 437 438 if (refs > 1) { 439 if ((owner == root->root_key.objectid || 440 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) && 441 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) { 442 ret = btrfs_inc_ref(trans, root, buf, 1); 443 if (ret) 444 return ret; 445 446 if (root->root_key.objectid == 447 BTRFS_TREE_RELOC_OBJECTID) { 448 ret = btrfs_dec_ref(trans, root, buf, 0); 449 if (ret) 450 return ret; 451 ret = btrfs_inc_ref(trans, root, cow, 1); 452 if (ret) 453 return ret; 454 } 455 new_flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF; 456 } else { 457 458 if (root->root_key.objectid == 459 BTRFS_TREE_RELOC_OBJECTID) 460 ret = btrfs_inc_ref(trans, root, cow, 1); 461 else 462 ret = btrfs_inc_ref(trans, root, cow, 0); 463 if (ret) 464 return ret; 465 } 466 if (new_flags != 0) { 467 int level = btrfs_header_level(buf); 468 469 ret = btrfs_set_disk_extent_flags(trans, buf, 470 new_flags, level); 471 if (ret) 472 return ret; 473 } 474 } else { 475 if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) { 476 if (root->root_key.objectid == 477 BTRFS_TREE_RELOC_OBJECTID) 478 ret = btrfs_inc_ref(trans, root, cow, 1); 479 else 480 ret = btrfs_inc_ref(trans, root, cow, 0); 481 if (ret) 482 return ret; 483 ret = btrfs_dec_ref(trans, root, buf, 1); 484 if (ret) 485 return ret; 486 } 487 btrfs_clean_tree_block(buf); 488 *last_ref = 1; 489 } 490 return 0; 491 } 492 493 /* 494 * does the dirty work in cow of a single block. The parent block (if 495 * supplied) is updated to point to the new cow copy. The new buffer is marked 496 * dirty and returned locked. If you modify the block it needs to be marked 497 * dirty again. 498 * 499 * search_start -- an allocation hint for the new block 500 * 501 * empty_size -- a hint that you plan on doing more cow. This is the size in 502 * bytes the allocator should try to find free next to the block it returns. 503 * This is just a hint and may be ignored by the allocator. 504 */ 505 static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans, 506 struct btrfs_root *root, 507 struct extent_buffer *buf, 508 struct extent_buffer *parent, int parent_slot, 509 struct extent_buffer **cow_ret, 510 u64 search_start, u64 empty_size, 511 enum btrfs_lock_nesting nest) 512 { 513 struct btrfs_fs_info *fs_info = root->fs_info; 514 struct btrfs_disk_key disk_key; 515 struct extent_buffer *cow; 516 int level, ret; 517 int last_ref = 0; 518 int unlock_orig = 0; 519 u64 parent_start = 0; 520 521 if (*cow_ret == buf) 522 unlock_orig = 1; 523 524 btrfs_assert_tree_write_locked(buf); 525 526 WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) && 527 trans->transid != fs_info->running_transaction->transid); 528 WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) && 529 trans->transid != root->last_trans); 530 531 level = btrfs_header_level(buf); 532 533 if (level == 0) 534 btrfs_item_key(buf, &disk_key, 0); 535 else 536 btrfs_node_key(buf, &disk_key, 0); 537 538 if ((root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) && parent) 539 parent_start = parent->start; 540 541 cow = btrfs_alloc_tree_block(trans, root, parent_start, 542 root->root_key.objectid, &disk_key, level, 543 search_start, empty_size, nest); 544 if (IS_ERR(cow)) 545 return PTR_ERR(cow); 546 547 /* cow is set to blocking by btrfs_init_new_buffer */ 548 549 copy_extent_buffer_full(cow, buf); 550 btrfs_set_header_bytenr(cow, cow->start); 551 btrfs_set_header_generation(cow, trans->transid); 552 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV); 553 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN | 554 BTRFS_HEADER_FLAG_RELOC); 555 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) 556 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC); 557 else 558 btrfs_set_header_owner(cow, root->root_key.objectid); 559 560 write_extent_buffer_fsid(cow, fs_info->fs_devices->metadata_uuid); 561 562 ret = update_ref_for_cow(trans, root, buf, cow, &last_ref); 563 if (ret) { 564 btrfs_tree_unlock(cow); 565 free_extent_buffer(cow); 566 btrfs_abort_transaction(trans, ret); 567 return ret; 568 } 569 570 if (test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) { 571 ret = btrfs_reloc_cow_block(trans, root, buf, cow); 572 if (ret) { 573 btrfs_tree_unlock(cow); 574 free_extent_buffer(cow); 575 btrfs_abort_transaction(trans, ret); 576 return ret; 577 } 578 } 579 580 if (buf == root->node) { 581 WARN_ON(parent && parent != buf); 582 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID || 583 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV) 584 parent_start = buf->start; 585 586 atomic_inc(&cow->refs); 587 ret = btrfs_tree_mod_log_insert_root(root->node, cow, true); 588 BUG_ON(ret < 0); 589 rcu_assign_pointer(root->node, cow); 590 591 btrfs_free_tree_block(trans, btrfs_root_id(root), buf, 592 parent_start, last_ref); 593 free_extent_buffer(buf); 594 add_root_to_dirty_list(root); 595 } else { 596 WARN_ON(trans->transid != btrfs_header_generation(parent)); 597 btrfs_tree_mod_log_insert_key(parent, parent_slot, 598 BTRFS_MOD_LOG_KEY_REPLACE); 599 btrfs_set_node_blockptr(parent, parent_slot, 600 cow->start); 601 btrfs_set_node_ptr_generation(parent, parent_slot, 602 trans->transid); 603 btrfs_mark_buffer_dirty(parent); 604 if (last_ref) { 605 ret = btrfs_tree_mod_log_free_eb(buf); 606 if (ret) { 607 btrfs_tree_unlock(cow); 608 free_extent_buffer(cow); 609 btrfs_abort_transaction(trans, ret); 610 return ret; 611 } 612 } 613 btrfs_free_tree_block(trans, btrfs_root_id(root), buf, 614 parent_start, last_ref); 615 } 616 if (unlock_orig) 617 btrfs_tree_unlock(buf); 618 free_extent_buffer_stale(buf); 619 btrfs_mark_buffer_dirty(cow); 620 *cow_ret = cow; 621 return 0; 622 } 623 624 static inline int should_cow_block(struct btrfs_trans_handle *trans, 625 struct btrfs_root *root, 626 struct extent_buffer *buf) 627 { 628 if (btrfs_is_testing(root->fs_info)) 629 return 0; 630 631 /* Ensure we can see the FORCE_COW bit */ 632 smp_mb__before_atomic(); 633 634 /* 635 * We do not need to cow a block if 636 * 1) this block is not created or changed in this transaction; 637 * 2) this block does not belong to TREE_RELOC tree; 638 * 3) the root is not forced COW. 639 * 640 * What is forced COW: 641 * when we create snapshot during committing the transaction, 642 * after we've finished copying src root, we must COW the shared 643 * block to ensure the metadata consistency. 644 */ 645 if (btrfs_header_generation(buf) == trans->transid && 646 !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN) && 647 !(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID && 648 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)) && 649 !test_bit(BTRFS_ROOT_FORCE_COW, &root->state)) 650 return 0; 651 return 1; 652 } 653 654 /* 655 * cows a single block, see __btrfs_cow_block for the real work. 656 * This version of it has extra checks so that a block isn't COWed more than 657 * once per transaction, as long as it hasn't been written yet 658 */ 659 noinline int btrfs_cow_block(struct btrfs_trans_handle *trans, 660 struct btrfs_root *root, struct extent_buffer *buf, 661 struct extent_buffer *parent, int parent_slot, 662 struct extent_buffer **cow_ret, 663 enum btrfs_lock_nesting nest) 664 { 665 struct btrfs_fs_info *fs_info = root->fs_info; 666 u64 search_start; 667 int ret; 668 669 if (test_bit(BTRFS_ROOT_DELETING, &root->state)) 670 btrfs_err(fs_info, 671 "COW'ing blocks on a fs root that's being dropped"); 672 673 if (trans->transaction != fs_info->running_transaction) 674 WARN(1, KERN_CRIT "trans %llu running %llu\n", 675 trans->transid, 676 fs_info->running_transaction->transid); 677 678 if (trans->transid != fs_info->generation) 679 WARN(1, KERN_CRIT "trans %llu running %llu\n", 680 trans->transid, fs_info->generation); 681 682 if (!should_cow_block(trans, root, buf)) { 683 *cow_ret = buf; 684 return 0; 685 } 686 687 search_start = buf->start & ~((u64)SZ_1G - 1); 688 689 /* 690 * Before CoWing this block for later modification, check if it's 691 * the subtree root and do the delayed subtree trace if needed. 692 * 693 * Also We don't care about the error, as it's handled internally. 694 */ 695 btrfs_qgroup_trace_subtree_after_cow(trans, root, buf); 696 ret = __btrfs_cow_block(trans, root, buf, parent, 697 parent_slot, cow_ret, search_start, 0, nest); 698 699 trace_btrfs_cow_block(root, buf, *cow_ret); 700 701 return ret; 702 } 703 ALLOW_ERROR_INJECTION(btrfs_cow_block, ERRNO); 704 705 /* 706 * helper function for defrag to decide if two blocks pointed to by a 707 * node are actually close by 708 */ 709 static int close_blocks(u64 blocknr, u64 other, u32 blocksize) 710 { 711 if (blocknr < other && other - (blocknr + blocksize) < 32768) 712 return 1; 713 if (blocknr > other && blocknr - (other + blocksize) < 32768) 714 return 1; 715 return 0; 716 } 717 718 #ifdef __LITTLE_ENDIAN 719 720 /* 721 * Compare two keys, on little-endian the disk order is same as CPU order and 722 * we can avoid the conversion. 723 */ 724 static int comp_keys(const struct btrfs_disk_key *disk_key, 725 const struct btrfs_key *k2) 726 { 727 const struct btrfs_key *k1 = (const struct btrfs_key *)disk_key; 728 729 return btrfs_comp_cpu_keys(k1, k2); 730 } 731 732 #else 733 734 /* 735 * compare two keys in a memcmp fashion 736 */ 737 static int comp_keys(const struct btrfs_disk_key *disk, 738 const struct btrfs_key *k2) 739 { 740 struct btrfs_key k1; 741 742 btrfs_disk_key_to_cpu(&k1, disk); 743 744 return btrfs_comp_cpu_keys(&k1, k2); 745 } 746 #endif 747 748 /* 749 * same as comp_keys only with two btrfs_key's 750 */ 751 int __pure btrfs_comp_cpu_keys(const struct btrfs_key *k1, const struct btrfs_key *k2) 752 { 753 if (k1->objectid > k2->objectid) 754 return 1; 755 if (k1->objectid < k2->objectid) 756 return -1; 757 if (k1->type > k2->type) 758 return 1; 759 if (k1->type < k2->type) 760 return -1; 761 if (k1->offset > k2->offset) 762 return 1; 763 if (k1->offset < k2->offset) 764 return -1; 765 return 0; 766 } 767 768 /* 769 * this is used by the defrag code to go through all the 770 * leaves pointed to by a node and reallocate them so that 771 * disk order is close to key order 772 */ 773 int btrfs_realloc_node(struct btrfs_trans_handle *trans, 774 struct btrfs_root *root, struct extent_buffer *parent, 775 int start_slot, u64 *last_ret, 776 struct btrfs_key *progress) 777 { 778 struct btrfs_fs_info *fs_info = root->fs_info; 779 struct extent_buffer *cur; 780 u64 blocknr; 781 u64 search_start = *last_ret; 782 u64 last_block = 0; 783 u64 other; 784 u32 parent_nritems; 785 int end_slot; 786 int i; 787 int err = 0; 788 u32 blocksize; 789 int progress_passed = 0; 790 struct btrfs_disk_key disk_key; 791 792 WARN_ON(trans->transaction != fs_info->running_transaction); 793 WARN_ON(trans->transid != fs_info->generation); 794 795 parent_nritems = btrfs_header_nritems(parent); 796 blocksize = fs_info->nodesize; 797 end_slot = parent_nritems - 1; 798 799 if (parent_nritems <= 1) 800 return 0; 801 802 for (i = start_slot; i <= end_slot; i++) { 803 int close = 1; 804 805 btrfs_node_key(parent, &disk_key, i); 806 if (!progress_passed && comp_keys(&disk_key, progress) < 0) 807 continue; 808 809 progress_passed = 1; 810 blocknr = btrfs_node_blockptr(parent, i); 811 if (last_block == 0) 812 last_block = blocknr; 813 814 if (i > 0) { 815 other = btrfs_node_blockptr(parent, i - 1); 816 close = close_blocks(blocknr, other, blocksize); 817 } 818 if (!close && i < end_slot) { 819 other = btrfs_node_blockptr(parent, i + 1); 820 close = close_blocks(blocknr, other, blocksize); 821 } 822 if (close) { 823 last_block = blocknr; 824 continue; 825 } 826 827 cur = btrfs_read_node_slot(parent, i); 828 if (IS_ERR(cur)) 829 return PTR_ERR(cur); 830 if (search_start == 0) 831 search_start = last_block; 832 833 btrfs_tree_lock(cur); 834 err = __btrfs_cow_block(trans, root, cur, parent, i, 835 &cur, search_start, 836 min(16 * blocksize, 837 (end_slot - i) * blocksize), 838 BTRFS_NESTING_COW); 839 if (err) { 840 btrfs_tree_unlock(cur); 841 free_extent_buffer(cur); 842 break; 843 } 844 search_start = cur->start; 845 last_block = cur->start; 846 *last_ret = search_start; 847 btrfs_tree_unlock(cur); 848 free_extent_buffer(cur); 849 } 850 return err; 851 } 852 853 /* 854 * Search for a key in the given extent_buffer. 855 * 856 * The lower boundary for the search is specified by the slot number @low. Use a 857 * value of 0 to search over the whole extent buffer. 858 * 859 * The slot in the extent buffer is returned via @slot. If the key exists in the 860 * extent buffer, then @slot will point to the slot where the key is, otherwise 861 * it points to the slot where you would insert the key. 862 * 863 * Slot may point to the total number of items (i.e. one position beyond the last 864 * key) if the key is bigger than the last key in the extent buffer. 865 */ 866 static noinline int generic_bin_search(struct extent_buffer *eb, int low, 867 const struct btrfs_key *key, int *slot) 868 { 869 unsigned long p; 870 int item_size; 871 int high = btrfs_header_nritems(eb); 872 int ret; 873 const int key_size = sizeof(struct btrfs_disk_key); 874 875 if (low > high) { 876 btrfs_err(eb->fs_info, 877 "%s: low (%d) > high (%d) eb %llu owner %llu level %d", 878 __func__, low, high, eb->start, 879 btrfs_header_owner(eb), btrfs_header_level(eb)); 880 return -EINVAL; 881 } 882 883 if (btrfs_header_level(eb) == 0) { 884 p = offsetof(struct btrfs_leaf, items); 885 item_size = sizeof(struct btrfs_item); 886 } else { 887 p = offsetof(struct btrfs_node, ptrs); 888 item_size = sizeof(struct btrfs_key_ptr); 889 } 890 891 while (low < high) { 892 unsigned long oip; 893 unsigned long offset; 894 struct btrfs_disk_key *tmp; 895 struct btrfs_disk_key unaligned; 896 int mid; 897 898 mid = (low + high) / 2; 899 offset = p + mid * item_size; 900 oip = offset_in_page(offset); 901 902 if (oip + key_size <= PAGE_SIZE) { 903 const unsigned long idx = get_eb_page_index(offset); 904 char *kaddr = page_address(eb->pages[idx]); 905 906 oip = get_eb_offset_in_page(eb, offset); 907 tmp = (struct btrfs_disk_key *)(kaddr + oip); 908 } else { 909 read_extent_buffer(eb, &unaligned, offset, key_size); 910 tmp = &unaligned; 911 } 912 913 ret = comp_keys(tmp, key); 914 915 if (ret < 0) 916 low = mid + 1; 917 else if (ret > 0) 918 high = mid; 919 else { 920 *slot = mid; 921 return 0; 922 } 923 } 924 *slot = low; 925 return 1; 926 } 927 928 /* 929 * Simple binary search on an extent buffer. Works for both leaves and nodes, and 930 * always searches over the whole range of keys (slot 0 to slot 'nritems - 1'). 931 */ 932 int btrfs_bin_search(struct extent_buffer *eb, const struct btrfs_key *key, 933 int *slot) 934 { 935 return generic_bin_search(eb, 0, key, slot); 936 } 937 938 static void root_add_used(struct btrfs_root *root, u32 size) 939 { 940 spin_lock(&root->accounting_lock); 941 btrfs_set_root_used(&root->root_item, 942 btrfs_root_used(&root->root_item) + size); 943 spin_unlock(&root->accounting_lock); 944 } 945 946 static void root_sub_used(struct btrfs_root *root, u32 size) 947 { 948 spin_lock(&root->accounting_lock); 949 btrfs_set_root_used(&root->root_item, 950 btrfs_root_used(&root->root_item) - size); 951 spin_unlock(&root->accounting_lock); 952 } 953 954 /* given a node and slot number, this reads the blocks it points to. The 955 * extent buffer is returned with a reference taken (but unlocked). 956 */ 957 struct extent_buffer *btrfs_read_node_slot(struct extent_buffer *parent, 958 int slot) 959 { 960 int level = btrfs_header_level(parent); 961 struct btrfs_tree_parent_check check = { 0 }; 962 struct extent_buffer *eb; 963 964 if (slot < 0 || slot >= btrfs_header_nritems(parent)) 965 return ERR_PTR(-ENOENT); 966 967 BUG_ON(level == 0); 968 969 check.level = level - 1; 970 check.transid = btrfs_node_ptr_generation(parent, slot); 971 check.owner_root = btrfs_header_owner(parent); 972 check.has_first_key = true; 973 btrfs_node_key_to_cpu(parent, &check.first_key, slot); 974 975 eb = read_tree_block(parent->fs_info, btrfs_node_blockptr(parent, slot), 976 &check); 977 if (IS_ERR(eb)) 978 return eb; 979 if (!extent_buffer_uptodate(eb)) { 980 free_extent_buffer(eb); 981 return ERR_PTR(-EIO); 982 } 983 984 return eb; 985 } 986 987 /* 988 * node level balancing, used to make sure nodes are in proper order for 989 * item deletion. We balance from the top down, so we have to make sure 990 * that a deletion won't leave an node completely empty later on. 991 */ 992 static noinline int balance_level(struct btrfs_trans_handle *trans, 993 struct btrfs_root *root, 994 struct btrfs_path *path, int level) 995 { 996 struct btrfs_fs_info *fs_info = root->fs_info; 997 struct extent_buffer *right = NULL; 998 struct extent_buffer *mid; 999 struct extent_buffer *left = NULL; 1000 struct extent_buffer *parent = NULL; 1001 int ret = 0; 1002 int wret; 1003 int pslot; 1004 int orig_slot = path->slots[level]; 1005 u64 orig_ptr; 1006 1007 ASSERT(level > 0); 1008 1009 mid = path->nodes[level]; 1010 1011 WARN_ON(path->locks[level] != BTRFS_WRITE_LOCK); 1012 WARN_ON(btrfs_header_generation(mid) != trans->transid); 1013 1014 orig_ptr = btrfs_node_blockptr(mid, orig_slot); 1015 1016 if (level < BTRFS_MAX_LEVEL - 1) { 1017 parent = path->nodes[level + 1]; 1018 pslot = path->slots[level + 1]; 1019 } 1020 1021 /* 1022 * deal with the case where there is only one pointer in the root 1023 * by promoting the node below to a root 1024 */ 1025 if (!parent) { 1026 struct extent_buffer *child; 1027 1028 if (btrfs_header_nritems(mid) != 1) 1029 return 0; 1030 1031 /* promote the child to a root */ 1032 child = btrfs_read_node_slot(mid, 0); 1033 if (IS_ERR(child)) { 1034 ret = PTR_ERR(child); 1035 btrfs_handle_fs_error(fs_info, ret, NULL); 1036 goto enospc; 1037 } 1038 1039 btrfs_tree_lock(child); 1040 ret = btrfs_cow_block(trans, root, child, mid, 0, &child, 1041 BTRFS_NESTING_COW); 1042 if (ret) { 1043 btrfs_tree_unlock(child); 1044 free_extent_buffer(child); 1045 goto enospc; 1046 } 1047 1048 ret = btrfs_tree_mod_log_insert_root(root->node, child, true); 1049 BUG_ON(ret < 0); 1050 rcu_assign_pointer(root->node, child); 1051 1052 add_root_to_dirty_list(root); 1053 btrfs_tree_unlock(child); 1054 1055 path->locks[level] = 0; 1056 path->nodes[level] = NULL; 1057 btrfs_clean_tree_block(mid); 1058 btrfs_tree_unlock(mid); 1059 /* once for the path */ 1060 free_extent_buffer(mid); 1061 1062 root_sub_used(root, mid->len); 1063 btrfs_free_tree_block(trans, btrfs_root_id(root), mid, 0, 1); 1064 /* once for the root ptr */ 1065 free_extent_buffer_stale(mid); 1066 return 0; 1067 } 1068 if (btrfs_header_nritems(mid) > 1069 BTRFS_NODEPTRS_PER_BLOCK(fs_info) / 4) 1070 return 0; 1071 1072 left = btrfs_read_node_slot(parent, pslot - 1); 1073 if (IS_ERR(left)) 1074 left = NULL; 1075 1076 if (left) { 1077 __btrfs_tree_lock(left, BTRFS_NESTING_LEFT); 1078 wret = btrfs_cow_block(trans, root, left, 1079 parent, pslot - 1, &left, 1080 BTRFS_NESTING_LEFT_COW); 1081 if (wret) { 1082 ret = wret; 1083 goto enospc; 1084 } 1085 } 1086 1087 right = btrfs_read_node_slot(parent, pslot + 1); 1088 if (IS_ERR(right)) 1089 right = NULL; 1090 1091 if (right) { 1092 __btrfs_tree_lock(right, BTRFS_NESTING_RIGHT); 1093 wret = btrfs_cow_block(trans, root, right, 1094 parent, pslot + 1, &right, 1095 BTRFS_NESTING_RIGHT_COW); 1096 if (wret) { 1097 ret = wret; 1098 goto enospc; 1099 } 1100 } 1101 1102 /* first, try to make some room in the middle buffer */ 1103 if (left) { 1104 orig_slot += btrfs_header_nritems(left); 1105 wret = push_node_left(trans, left, mid, 1); 1106 if (wret < 0) 1107 ret = wret; 1108 } 1109 1110 /* 1111 * then try to empty the right most buffer into the middle 1112 */ 1113 if (right) { 1114 wret = push_node_left(trans, mid, right, 1); 1115 if (wret < 0 && wret != -ENOSPC) 1116 ret = wret; 1117 if (btrfs_header_nritems(right) == 0) { 1118 btrfs_clean_tree_block(right); 1119 btrfs_tree_unlock(right); 1120 del_ptr(root, path, level + 1, pslot + 1); 1121 root_sub_used(root, right->len); 1122 btrfs_free_tree_block(trans, btrfs_root_id(root), right, 1123 0, 1); 1124 free_extent_buffer_stale(right); 1125 right = NULL; 1126 } else { 1127 struct btrfs_disk_key right_key; 1128 btrfs_node_key(right, &right_key, 0); 1129 ret = btrfs_tree_mod_log_insert_key(parent, pslot + 1, 1130 BTRFS_MOD_LOG_KEY_REPLACE); 1131 BUG_ON(ret < 0); 1132 btrfs_set_node_key(parent, &right_key, pslot + 1); 1133 btrfs_mark_buffer_dirty(parent); 1134 } 1135 } 1136 if (btrfs_header_nritems(mid) == 1) { 1137 /* 1138 * we're not allowed to leave a node with one item in the 1139 * tree during a delete. A deletion from lower in the tree 1140 * could try to delete the only pointer in this node. 1141 * So, pull some keys from the left. 1142 * There has to be a left pointer at this point because 1143 * otherwise we would have pulled some pointers from the 1144 * right 1145 */ 1146 if (!left) { 1147 ret = -EROFS; 1148 btrfs_handle_fs_error(fs_info, ret, NULL); 1149 goto enospc; 1150 } 1151 wret = balance_node_right(trans, mid, left); 1152 if (wret < 0) { 1153 ret = wret; 1154 goto enospc; 1155 } 1156 if (wret == 1) { 1157 wret = push_node_left(trans, left, mid, 1); 1158 if (wret < 0) 1159 ret = wret; 1160 } 1161 BUG_ON(wret == 1); 1162 } 1163 if (btrfs_header_nritems(mid) == 0) { 1164 btrfs_clean_tree_block(mid); 1165 btrfs_tree_unlock(mid); 1166 del_ptr(root, path, level + 1, pslot); 1167 root_sub_used(root, mid->len); 1168 btrfs_free_tree_block(trans, btrfs_root_id(root), mid, 0, 1); 1169 free_extent_buffer_stale(mid); 1170 mid = NULL; 1171 } else { 1172 /* update the parent key to reflect our changes */ 1173 struct btrfs_disk_key mid_key; 1174 btrfs_node_key(mid, &mid_key, 0); 1175 ret = btrfs_tree_mod_log_insert_key(parent, pslot, 1176 BTRFS_MOD_LOG_KEY_REPLACE); 1177 BUG_ON(ret < 0); 1178 btrfs_set_node_key(parent, &mid_key, pslot); 1179 btrfs_mark_buffer_dirty(parent); 1180 } 1181 1182 /* update the path */ 1183 if (left) { 1184 if (btrfs_header_nritems(left) > orig_slot) { 1185 atomic_inc(&left->refs); 1186 /* left was locked after cow */ 1187 path->nodes[level] = left; 1188 path->slots[level + 1] -= 1; 1189 path->slots[level] = orig_slot; 1190 if (mid) { 1191 btrfs_tree_unlock(mid); 1192 free_extent_buffer(mid); 1193 } 1194 } else { 1195 orig_slot -= btrfs_header_nritems(left); 1196 path->slots[level] = orig_slot; 1197 } 1198 } 1199 /* double check we haven't messed things up */ 1200 if (orig_ptr != 1201 btrfs_node_blockptr(path->nodes[level], path->slots[level])) 1202 BUG(); 1203 enospc: 1204 if (right) { 1205 btrfs_tree_unlock(right); 1206 free_extent_buffer(right); 1207 } 1208 if (left) { 1209 if (path->nodes[level] != left) 1210 btrfs_tree_unlock(left); 1211 free_extent_buffer(left); 1212 } 1213 return ret; 1214 } 1215 1216 /* Node balancing for insertion. Here we only split or push nodes around 1217 * when they are completely full. This is also done top down, so we 1218 * have to be pessimistic. 1219 */ 1220 static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans, 1221 struct btrfs_root *root, 1222 struct btrfs_path *path, int level) 1223 { 1224 struct btrfs_fs_info *fs_info = root->fs_info; 1225 struct extent_buffer *right = NULL; 1226 struct extent_buffer *mid; 1227 struct extent_buffer *left = NULL; 1228 struct extent_buffer *parent = NULL; 1229 int ret = 0; 1230 int wret; 1231 int pslot; 1232 int orig_slot = path->slots[level]; 1233 1234 if (level == 0) 1235 return 1; 1236 1237 mid = path->nodes[level]; 1238 WARN_ON(btrfs_header_generation(mid) != trans->transid); 1239 1240 if (level < BTRFS_MAX_LEVEL - 1) { 1241 parent = path->nodes[level + 1]; 1242 pslot = path->slots[level + 1]; 1243 } 1244 1245 if (!parent) 1246 return 1; 1247 1248 left = btrfs_read_node_slot(parent, pslot - 1); 1249 if (IS_ERR(left)) 1250 left = NULL; 1251 1252 /* first, try to make some room in the middle buffer */ 1253 if (left) { 1254 u32 left_nr; 1255 1256 __btrfs_tree_lock(left, BTRFS_NESTING_LEFT); 1257 1258 left_nr = btrfs_header_nritems(left); 1259 if (left_nr >= BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 1) { 1260 wret = 1; 1261 } else { 1262 ret = btrfs_cow_block(trans, root, left, parent, 1263 pslot - 1, &left, 1264 BTRFS_NESTING_LEFT_COW); 1265 if (ret) 1266 wret = 1; 1267 else { 1268 wret = push_node_left(trans, left, mid, 0); 1269 } 1270 } 1271 if (wret < 0) 1272 ret = wret; 1273 if (wret == 0) { 1274 struct btrfs_disk_key disk_key; 1275 orig_slot += left_nr; 1276 btrfs_node_key(mid, &disk_key, 0); 1277 ret = btrfs_tree_mod_log_insert_key(parent, pslot, 1278 BTRFS_MOD_LOG_KEY_REPLACE); 1279 BUG_ON(ret < 0); 1280 btrfs_set_node_key(parent, &disk_key, pslot); 1281 btrfs_mark_buffer_dirty(parent); 1282 if (btrfs_header_nritems(left) > orig_slot) { 1283 path->nodes[level] = left; 1284 path->slots[level + 1] -= 1; 1285 path->slots[level] = orig_slot; 1286 btrfs_tree_unlock(mid); 1287 free_extent_buffer(mid); 1288 } else { 1289 orig_slot -= 1290 btrfs_header_nritems(left); 1291 path->slots[level] = orig_slot; 1292 btrfs_tree_unlock(left); 1293 free_extent_buffer(left); 1294 } 1295 return 0; 1296 } 1297 btrfs_tree_unlock(left); 1298 free_extent_buffer(left); 1299 } 1300 right = btrfs_read_node_slot(parent, pslot + 1); 1301 if (IS_ERR(right)) 1302 right = NULL; 1303 1304 /* 1305 * then try to empty the right most buffer into the middle 1306 */ 1307 if (right) { 1308 u32 right_nr; 1309 1310 __btrfs_tree_lock(right, BTRFS_NESTING_RIGHT); 1311 1312 right_nr = btrfs_header_nritems(right); 1313 if (right_nr >= BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 1) { 1314 wret = 1; 1315 } else { 1316 ret = btrfs_cow_block(trans, root, right, 1317 parent, pslot + 1, 1318 &right, BTRFS_NESTING_RIGHT_COW); 1319 if (ret) 1320 wret = 1; 1321 else { 1322 wret = balance_node_right(trans, right, mid); 1323 } 1324 } 1325 if (wret < 0) 1326 ret = wret; 1327 if (wret == 0) { 1328 struct btrfs_disk_key disk_key; 1329 1330 btrfs_node_key(right, &disk_key, 0); 1331 ret = btrfs_tree_mod_log_insert_key(parent, pslot + 1, 1332 BTRFS_MOD_LOG_KEY_REPLACE); 1333 BUG_ON(ret < 0); 1334 btrfs_set_node_key(parent, &disk_key, pslot + 1); 1335 btrfs_mark_buffer_dirty(parent); 1336 1337 if (btrfs_header_nritems(mid) <= orig_slot) { 1338 path->nodes[level] = right; 1339 path->slots[level + 1] += 1; 1340 path->slots[level] = orig_slot - 1341 btrfs_header_nritems(mid); 1342 btrfs_tree_unlock(mid); 1343 free_extent_buffer(mid); 1344 } else { 1345 btrfs_tree_unlock(right); 1346 free_extent_buffer(right); 1347 } 1348 return 0; 1349 } 1350 btrfs_tree_unlock(right); 1351 free_extent_buffer(right); 1352 } 1353 return 1; 1354 } 1355 1356 /* 1357 * readahead one full node of leaves, finding things that are close 1358 * to the block in 'slot', and triggering ra on them. 1359 */ 1360 static void reada_for_search(struct btrfs_fs_info *fs_info, 1361 struct btrfs_path *path, 1362 int level, int slot, u64 objectid) 1363 { 1364 struct extent_buffer *node; 1365 struct btrfs_disk_key disk_key; 1366 u32 nritems; 1367 u64 search; 1368 u64 target; 1369 u64 nread = 0; 1370 u64 nread_max; 1371 u32 nr; 1372 u32 blocksize; 1373 u32 nscan = 0; 1374 1375 if (level != 1 && path->reada != READA_FORWARD_ALWAYS) 1376 return; 1377 1378 if (!path->nodes[level]) 1379 return; 1380 1381 node = path->nodes[level]; 1382 1383 /* 1384 * Since the time between visiting leaves is much shorter than the time 1385 * between visiting nodes, limit read ahead of nodes to 1, to avoid too 1386 * much IO at once (possibly random). 1387 */ 1388 if (path->reada == READA_FORWARD_ALWAYS) { 1389 if (level > 1) 1390 nread_max = node->fs_info->nodesize; 1391 else 1392 nread_max = SZ_128K; 1393 } else { 1394 nread_max = SZ_64K; 1395 } 1396 1397 search = btrfs_node_blockptr(node, slot); 1398 blocksize = fs_info->nodesize; 1399 if (path->reada != READA_FORWARD_ALWAYS) { 1400 struct extent_buffer *eb; 1401 1402 eb = find_extent_buffer(fs_info, search); 1403 if (eb) { 1404 free_extent_buffer(eb); 1405 return; 1406 } 1407 } 1408 1409 target = search; 1410 1411 nritems = btrfs_header_nritems(node); 1412 nr = slot; 1413 1414 while (1) { 1415 if (path->reada == READA_BACK) { 1416 if (nr == 0) 1417 break; 1418 nr--; 1419 } else if (path->reada == READA_FORWARD || 1420 path->reada == READA_FORWARD_ALWAYS) { 1421 nr++; 1422 if (nr >= nritems) 1423 break; 1424 } 1425 if (path->reada == READA_BACK && objectid) { 1426 btrfs_node_key(node, &disk_key, nr); 1427 if (btrfs_disk_key_objectid(&disk_key) != objectid) 1428 break; 1429 } 1430 search = btrfs_node_blockptr(node, nr); 1431 if (path->reada == READA_FORWARD_ALWAYS || 1432 (search <= target && target - search <= 65536) || 1433 (search > target && search - target <= 65536)) { 1434 btrfs_readahead_node_child(node, nr); 1435 nread += blocksize; 1436 } 1437 nscan++; 1438 if (nread > nread_max || nscan > 32) 1439 break; 1440 } 1441 } 1442 1443 static noinline void reada_for_balance(struct btrfs_path *path, int level) 1444 { 1445 struct extent_buffer *parent; 1446 int slot; 1447 int nritems; 1448 1449 parent = path->nodes[level + 1]; 1450 if (!parent) 1451 return; 1452 1453 nritems = btrfs_header_nritems(parent); 1454 slot = path->slots[level + 1]; 1455 1456 if (slot > 0) 1457 btrfs_readahead_node_child(parent, slot - 1); 1458 if (slot + 1 < nritems) 1459 btrfs_readahead_node_child(parent, slot + 1); 1460 } 1461 1462 1463 /* 1464 * when we walk down the tree, it is usually safe to unlock the higher layers 1465 * in the tree. The exceptions are when our path goes through slot 0, because 1466 * operations on the tree might require changing key pointers higher up in the 1467 * tree. 1468 * 1469 * callers might also have set path->keep_locks, which tells this code to keep 1470 * the lock if the path points to the last slot in the block. This is part of 1471 * walking through the tree, and selecting the next slot in the higher block. 1472 * 1473 * lowest_unlock sets the lowest level in the tree we're allowed to unlock. so 1474 * if lowest_unlock is 1, level 0 won't be unlocked 1475 */ 1476 static noinline void unlock_up(struct btrfs_path *path, int level, 1477 int lowest_unlock, int min_write_lock_level, 1478 int *write_lock_level) 1479 { 1480 int i; 1481 int skip_level = level; 1482 bool check_skip = true; 1483 1484 for (i = level; i < BTRFS_MAX_LEVEL; i++) { 1485 if (!path->nodes[i]) 1486 break; 1487 if (!path->locks[i]) 1488 break; 1489 1490 if (check_skip) { 1491 if (path->slots[i] == 0) { 1492 skip_level = i + 1; 1493 continue; 1494 } 1495 1496 if (path->keep_locks) { 1497 u32 nritems; 1498 1499 nritems = btrfs_header_nritems(path->nodes[i]); 1500 if (nritems < 1 || path->slots[i] >= nritems - 1) { 1501 skip_level = i + 1; 1502 continue; 1503 } 1504 } 1505 } 1506 1507 if (i >= lowest_unlock && i > skip_level) { 1508 check_skip = false; 1509 btrfs_tree_unlock_rw(path->nodes[i], path->locks[i]); 1510 path->locks[i] = 0; 1511 if (write_lock_level && 1512 i > min_write_lock_level && 1513 i <= *write_lock_level) { 1514 *write_lock_level = i - 1; 1515 } 1516 } 1517 } 1518 } 1519 1520 /* 1521 * Helper function for btrfs_search_slot() and other functions that do a search 1522 * on a btree. The goal is to find a tree block in the cache (the radix tree at 1523 * fs_info->buffer_radix), but if we can't find it, or it's not up to date, read 1524 * its pages from disk. 1525 * 1526 * Returns -EAGAIN, with the path unlocked, if the caller needs to repeat the 1527 * whole btree search, starting again from the current root node. 1528 */ 1529 static int 1530 read_block_for_search(struct btrfs_root *root, struct btrfs_path *p, 1531 struct extent_buffer **eb_ret, int level, int slot, 1532 const struct btrfs_key *key) 1533 { 1534 struct btrfs_fs_info *fs_info = root->fs_info; 1535 struct btrfs_tree_parent_check check = { 0 }; 1536 u64 blocknr; 1537 u64 gen; 1538 struct extent_buffer *tmp; 1539 int ret; 1540 int parent_level; 1541 bool unlock_up; 1542 1543 unlock_up = ((level + 1 < BTRFS_MAX_LEVEL) && p->locks[level + 1]); 1544 blocknr = btrfs_node_blockptr(*eb_ret, slot); 1545 gen = btrfs_node_ptr_generation(*eb_ret, slot); 1546 parent_level = btrfs_header_level(*eb_ret); 1547 btrfs_node_key_to_cpu(*eb_ret, &check.first_key, slot); 1548 check.has_first_key = true; 1549 check.level = parent_level - 1; 1550 check.transid = gen; 1551 check.owner_root = root->root_key.objectid; 1552 1553 /* 1554 * If we need to read an extent buffer from disk and we are holding locks 1555 * on upper level nodes, we unlock all the upper nodes before reading the 1556 * extent buffer, and then return -EAGAIN to the caller as it needs to 1557 * restart the search. We don't release the lock on the current level 1558 * because we need to walk this node to figure out which blocks to read. 1559 */ 1560 tmp = find_extent_buffer(fs_info, blocknr); 1561 if (tmp) { 1562 if (p->reada == READA_FORWARD_ALWAYS) 1563 reada_for_search(fs_info, p, level, slot, key->objectid); 1564 1565 /* first we do an atomic uptodate check */ 1566 if (btrfs_buffer_uptodate(tmp, gen, 1) > 0) { 1567 /* 1568 * Do extra check for first_key, eb can be stale due to 1569 * being cached, read from scrub, or have multiple 1570 * parents (shared tree blocks). 1571 */ 1572 if (btrfs_verify_level_key(tmp, 1573 parent_level - 1, &check.first_key, gen)) { 1574 free_extent_buffer(tmp); 1575 return -EUCLEAN; 1576 } 1577 *eb_ret = tmp; 1578 return 0; 1579 } 1580 1581 if (p->nowait) { 1582 free_extent_buffer(tmp); 1583 return -EAGAIN; 1584 } 1585 1586 if (unlock_up) 1587 btrfs_unlock_up_safe(p, level + 1); 1588 1589 /* now we're allowed to do a blocking uptodate check */ 1590 ret = btrfs_read_extent_buffer(tmp, &check); 1591 if (ret) { 1592 free_extent_buffer(tmp); 1593 btrfs_release_path(p); 1594 return -EIO; 1595 } 1596 if (btrfs_check_eb_owner(tmp, root->root_key.objectid)) { 1597 free_extent_buffer(tmp); 1598 btrfs_release_path(p); 1599 return -EUCLEAN; 1600 } 1601 1602 if (unlock_up) 1603 ret = -EAGAIN; 1604 1605 goto out; 1606 } else if (p->nowait) { 1607 return -EAGAIN; 1608 } 1609 1610 if (unlock_up) { 1611 btrfs_unlock_up_safe(p, level + 1); 1612 ret = -EAGAIN; 1613 } else { 1614 ret = 0; 1615 } 1616 1617 if (p->reada != READA_NONE) 1618 reada_for_search(fs_info, p, level, slot, key->objectid); 1619 1620 tmp = read_tree_block(fs_info, blocknr, &check); 1621 if (IS_ERR(tmp)) { 1622 btrfs_release_path(p); 1623 return PTR_ERR(tmp); 1624 } 1625 /* 1626 * If the read above didn't mark this buffer up to date, 1627 * it will never end up being up to date. Set ret to EIO now 1628 * and give up so that our caller doesn't loop forever 1629 * on our EAGAINs. 1630 */ 1631 if (!extent_buffer_uptodate(tmp)) 1632 ret = -EIO; 1633 1634 out: 1635 if (ret == 0) { 1636 *eb_ret = tmp; 1637 } else { 1638 free_extent_buffer(tmp); 1639 btrfs_release_path(p); 1640 } 1641 1642 return ret; 1643 } 1644 1645 /* 1646 * helper function for btrfs_search_slot. This does all of the checks 1647 * for node-level blocks and does any balancing required based on 1648 * the ins_len. 1649 * 1650 * If no extra work was required, zero is returned. If we had to 1651 * drop the path, -EAGAIN is returned and btrfs_search_slot must 1652 * start over 1653 */ 1654 static int 1655 setup_nodes_for_search(struct btrfs_trans_handle *trans, 1656 struct btrfs_root *root, struct btrfs_path *p, 1657 struct extent_buffer *b, int level, int ins_len, 1658 int *write_lock_level) 1659 { 1660 struct btrfs_fs_info *fs_info = root->fs_info; 1661 int ret = 0; 1662 1663 if ((p->search_for_split || ins_len > 0) && btrfs_header_nritems(b) >= 1664 BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 3) { 1665 1666 if (*write_lock_level < level + 1) { 1667 *write_lock_level = level + 1; 1668 btrfs_release_path(p); 1669 return -EAGAIN; 1670 } 1671 1672 reada_for_balance(p, level); 1673 ret = split_node(trans, root, p, level); 1674 1675 b = p->nodes[level]; 1676 } else if (ins_len < 0 && btrfs_header_nritems(b) < 1677 BTRFS_NODEPTRS_PER_BLOCK(fs_info) / 2) { 1678 1679 if (*write_lock_level < level + 1) { 1680 *write_lock_level = level + 1; 1681 btrfs_release_path(p); 1682 return -EAGAIN; 1683 } 1684 1685 reada_for_balance(p, level); 1686 ret = balance_level(trans, root, p, level); 1687 if (ret) 1688 return ret; 1689 1690 b = p->nodes[level]; 1691 if (!b) { 1692 btrfs_release_path(p); 1693 return -EAGAIN; 1694 } 1695 BUG_ON(btrfs_header_nritems(b) == 1); 1696 } 1697 return ret; 1698 } 1699 1700 int btrfs_find_item(struct btrfs_root *fs_root, struct btrfs_path *path, 1701 u64 iobjectid, u64 ioff, u8 key_type, 1702 struct btrfs_key *found_key) 1703 { 1704 int ret; 1705 struct btrfs_key key; 1706 struct extent_buffer *eb; 1707 1708 ASSERT(path); 1709 ASSERT(found_key); 1710 1711 key.type = key_type; 1712 key.objectid = iobjectid; 1713 key.offset = ioff; 1714 1715 ret = btrfs_search_slot(NULL, fs_root, &key, path, 0, 0); 1716 if (ret < 0) 1717 return ret; 1718 1719 eb = path->nodes[0]; 1720 if (ret && path->slots[0] >= btrfs_header_nritems(eb)) { 1721 ret = btrfs_next_leaf(fs_root, path); 1722 if (ret) 1723 return ret; 1724 eb = path->nodes[0]; 1725 } 1726 1727 btrfs_item_key_to_cpu(eb, found_key, path->slots[0]); 1728 if (found_key->type != key.type || 1729 found_key->objectid != key.objectid) 1730 return 1; 1731 1732 return 0; 1733 } 1734 1735 static struct extent_buffer *btrfs_search_slot_get_root(struct btrfs_root *root, 1736 struct btrfs_path *p, 1737 int write_lock_level) 1738 { 1739 struct extent_buffer *b; 1740 int root_lock = 0; 1741 int level = 0; 1742 1743 if (p->search_commit_root) { 1744 b = root->commit_root; 1745 atomic_inc(&b->refs); 1746 level = btrfs_header_level(b); 1747 /* 1748 * Ensure that all callers have set skip_locking when 1749 * p->search_commit_root = 1. 1750 */ 1751 ASSERT(p->skip_locking == 1); 1752 1753 goto out; 1754 } 1755 1756 if (p->skip_locking) { 1757 b = btrfs_root_node(root); 1758 level = btrfs_header_level(b); 1759 goto out; 1760 } 1761 1762 /* We try very hard to do read locks on the root */ 1763 root_lock = BTRFS_READ_LOCK; 1764 1765 /* 1766 * If the level is set to maximum, we can skip trying to get the read 1767 * lock. 1768 */ 1769 if (write_lock_level < BTRFS_MAX_LEVEL) { 1770 /* 1771 * We don't know the level of the root node until we actually 1772 * have it read locked 1773 */ 1774 if (p->nowait) { 1775 b = btrfs_try_read_lock_root_node(root); 1776 if (IS_ERR(b)) 1777 return b; 1778 } else { 1779 b = btrfs_read_lock_root_node(root); 1780 } 1781 level = btrfs_header_level(b); 1782 if (level > write_lock_level) 1783 goto out; 1784 1785 /* Whoops, must trade for write lock */ 1786 btrfs_tree_read_unlock(b); 1787 free_extent_buffer(b); 1788 } 1789 1790 b = btrfs_lock_root_node(root); 1791 root_lock = BTRFS_WRITE_LOCK; 1792 1793 /* The level might have changed, check again */ 1794 level = btrfs_header_level(b); 1795 1796 out: 1797 /* 1798 * The root may have failed to write out at some point, and thus is no 1799 * longer valid, return an error in this case. 1800 */ 1801 if (!extent_buffer_uptodate(b)) { 1802 if (root_lock) 1803 btrfs_tree_unlock_rw(b, root_lock); 1804 free_extent_buffer(b); 1805 return ERR_PTR(-EIO); 1806 } 1807 1808 p->nodes[level] = b; 1809 if (!p->skip_locking) 1810 p->locks[level] = root_lock; 1811 /* 1812 * Callers are responsible for dropping b's references. 1813 */ 1814 return b; 1815 } 1816 1817 /* 1818 * Replace the extent buffer at the lowest level of the path with a cloned 1819 * version. The purpose is to be able to use it safely, after releasing the 1820 * commit root semaphore, even if relocation is happening in parallel, the 1821 * transaction used for relocation is committed and the extent buffer is 1822 * reallocated in the next transaction. 1823 * 1824 * This is used in a context where the caller does not prevent transaction 1825 * commits from happening, either by holding a transaction handle or holding 1826 * some lock, while it's doing searches through a commit root. 1827 * At the moment it's only used for send operations. 1828 */ 1829 static int finish_need_commit_sem_search(struct btrfs_path *path) 1830 { 1831 const int i = path->lowest_level; 1832 const int slot = path->slots[i]; 1833 struct extent_buffer *lowest = path->nodes[i]; 1834 struct extent_buffer *clone; 1835 1836 ASSERT(path->need_commit_sem); 1837 1838 if (!lowest) 1839 return 0; 1840 1841 lockdep_assert_held_read(&lowest->fs_info->commit_root_sem); 1842 1843 clone = btrfs_clone_extent_buffer(lowest); 1844 if (!clone) 1845 return -ENOMEM; 1846 1847 btrfs_release_path(path); 1848 path->nodes[i] = clone; 1849 path->slots[i] = slot; 1850 1851 return 0; 1852 } 1853 1854 static inline int search_for_key_slot(struct extent_buffer *eb, 1855 int search_low_slot, 1856 const struct btrfs_key *key, 1857 int prev_cmp, 1858 int *slot) 1859 { 1860 /* 1861 * If a previous call to btrfs_bin_search() on a parent node returned an 1862 * exact match (prev_cmp == 0), we can safely assume the target key will 1863 * always be at slot 0 on lower levels, since each key pointer 1864 * (struct btrfs_key_ptr) refers to the lowest key accessible from the 1865 * subtree it points to. Thus we can skip searching lower levels. 1866 */ 1867 if (prev_cmp == 0) { 1868 *slot = 0; 1869 return 0; 1870 } 1871 1872 return generic_bin_search(eb, search_low_slot, key, slot); 1873 } 1874 1875 static int search_leaf(struct btrfs_trans_handle *trans, 1876 struct btrfs_root *root, 1877 const struct btrfs_key *key, 1878 struct btrfs_path *path, 1879 int ins_len, 1880 int prev_cmp) 1881 { 1882 struct extent_buffer *leaf = path->nodes[0]; 1883 int leaf_free_space = -1; 1884 int search_low_slot = 0; 1885 int ret; 1886 bool do_bin_search = true; 1887 1888 /* 1889 * If we are doing an insertion, the leaf has enough free space and the 1890 * destination slot for the key is not slot 0, then we can unlock our 1891 * write lock on the parent, and any other upper nodes, before doing the 1892 * binary search on the leaf (with search_for_key_slot()), allowing other 1893 * tasks to lock the parent and any other upper nodes. 1894 */ 1895 if (ins_len > 0) { 1896 /* 1897 * Cache the leaf free space, since we will need it later and it 1898 * will not change until then. 1899 */ 1900 leaf_free_space = btrfs_leaf_free_space(leaf); 1901 1902 /* 1903 * !path->locks[1] means we have a single node tree, the leaf is 1904 * the root of the tree. 1905 */ 1906 if (path->locks[1] && leaf_free_space >= ins_len) { 1907 struct btrfs_disk_key first_key; 1908 1909 ASSERT(btrfs_header_nritems(leaf) > 0); 1910 btrfs_item_key(leaf, &first_key, 0); 1911 1912 /* 1913 * Doing the extra comparison with the first key is cheap, 1914 * taking into account that the first key is very likely 1915 * already in a cache line because it immediately follows 1916 * the extent buffer's header and we have recently accessed 1917 * the header's level field. 1918 */ 1919 ret = comp_keys(&first_key, key); 1920 if (ret < 0) { 1921 /* 1922 * The first key is smaller than the key we want 1923 * to insert, so we are safe to unlock all upper 1924 * nodes and we have to do the binary search. 1925 * 1926 * We do use btrfs_unlock_up_safe() and not 1927 * unlock_up() because the later does not unlock 1928 * nodes with a slot of 0 - we can safely unlock 1929 * any node even if its slot is 0 since in this 1930 * case the key does not end up at slot 0 of the 1931 * leaf and there's no need to split the leaf. 1932 */ 1933 btrfs_unlock_up_safe(path, 1); 1934 search_low_slot = 1; 1935 } else { 1936 /* 1937 * The first key is >= then the key we want to 1938 * insert, so we can skip the binary search as 1939 * the target key will be at slot 0. 1940 * 1941 * We can not unlock upper nodes when the key is 1942 * less than the first key, because we will need 1943 * to update the key at slot 0 of the parent node 1944 * and possibly of other upper nodes too. 1945 * If the key matches the first key, then we can 1946 * unlock all the upper nodes, using 1947 * btrfs_unlock_up_safe() instead of unlock_up() 1948 * as stated above. 1949 */ 1950 if (ret == 0) 1951 btrfs_unlock_up_safe(path, 1); 1952 /* 1953 * ret is already 0 or 1, matching the result of 1954 * a btrfs_bin_search() call, so there is no need 1955 * to adjust it. 1956 */ 1957 do_bin_search = false; 1958 path->slots[0] = 0; 1959 } 1960 } 1961 } 1962 1963 if (do_bin_search) { 1964 ret = search_for_key_slot(leaf, search_low_slot, key, 1965 prev_cmp, &path->slots[0]); 1966 if (ret < 0) 1967 return ret; 1968 } 1969 1970 if (ins_len > 0) { 1971 /* 1972 * Item key already exists. In this case, if we are allowed to 1973 * insert the item (for example, in dir_item case, item key 1974 * collision is allowed), it will be merged with the original 1975 * item. Only the item size grows, no new btrfs item will be 1976 * added. If search_for_extension is not set, ins_len already 1977 * accounts the size btrfs_item, deduct it here so leaf space 1978 * check will be correct. 1979 */ 1980 if (ret == 0 && !path->search_for_extension) { 1981 ASSERT(ins_len >= sizeof(struct btrfs_item)); 1982 ins_len -= sizeof(struct btrfs_item); 1983 } 1984 1985 ASSERT(leaf_free_space >= 0); 1986 1987 if (leaf_free_space < ins_len) { 1988 int err; 1989 1990 err = split_leaf(trans, root, key, path, ins_len, 1991 (ret == 0)); 1992 ASSERT(err <= 0); 1993 if (WARN_ON(err > 0)) 1994 err = -EUCLEAN; 1995 if (err) 1996 ret = err; 1997 } 1998 } 1999 2000 return ret; 2001 } 2002 2003 /* 2004 * btrfs_search_slot - look for a key in a tree and perform necessary 2005 * modifications to preserve tree invariants. 2006 * 2007 * @trans: Handle of transaction, used when modifying the tree 2008 * @p: Holds all btree nodes along the search path 2009 * @root: The root node of the tree 2010 * @key: The key we are looking for 2011 * @ins_len: Indicates purpose of search: 2012 * >0 for inserts it's size of item inserted (*) 2013 * <0 for deletions 2014 * 0 for plain searches, not modifying the tree 2015 * 2016 * (*) If size of item inserted doesn't include 2017 * sizeof(struct btrfs_item), then p->search_for_extension must 2018 * be set. 2019 * @cow: boolean should CoW operations be performed. Must always be 1 2020 * when modifying the tree. 2021 * 2022 * If @ins_len > 0, nodes and leaves will be split as we walk down the tree. 2023 * If @ins_len < 0, nodes will be merged as we walk down the tree (if possible) 2024 * 2025 * If @key is found, 0 is returned and you can find the item in the leaf level 2026 * of the path (level 0) 2027 * 2028 * If @key isn't found, 1 is returned and the leaf level of the path (level 0) 2029 * points to the slot where it should be inserted 2030 * 2031 * If an error is encountered while searching the tree a negative error number 2032 * is returned 2033 */ 2034 int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root *root, 2035 const struct btrfs_key *key, struct btrfs_path *p, 2036 int ins_len, int cow) 2037 { 2038 struct btrfs_fs_info *fs_info = root->fs_info; 2039 struct extent_buffer *b; 2040 int slot; 2041 int ret; 2042 int err; 2043 int level; 2044 int lowest_unlock = 1; 2045 /* everything at write_lock_level or lower must be write locked */ 2046 int write_lock_level = 0; 2047 u8 lowest_level = 0; 2048 int min_write_lock_level; 2049 int prev_cmp; 2050 2051 might_sleep(); 2052 2053 lowest_level = p->lowest_level; 2054 WARN_ON(lowest_level && ins_len > 0); 2055 WARN_ON(p->nodes[0] != NULL); 2056 BUG_ON(!cow && ins_len); 2057 2058 /* 2059 * For now only allow nowait for read only operations. There's no 2060 * strict reason why we can't, we just only need it for reads so it's 2061 * only implemented for reads. 2062 */ 2063 ASSERT(!p->nowait || !cow); 2064 2065 if (ins_len < 0) { 2066 lowest_unlock = 2; 2067 2068 /* when we are removing items, we might have to go up to level 2069 * two as we update tree pointers Make sure we keep write 2070 * for those levels as well 2071 */ 2072 write_lock_level = 2; 2073 } else if (ins_len > 0) { 2074 /* 2075 * for inserting items, make sure we have a write lock on 2076 * level 1 so we can update keys 2077 */ 2078 write_lock_level = 1; 2079 } 2080 2081 if (!cow) 2082 write_lock_level = -1; 2083 2084 if (cow && (p->keep_locks || p->lowest_level)) 2085 write_lock_level = BTRFS_MAX_LEVEL; 2086 2087 min_write_lock_level = write_lock_level; 2088 2089 if (p->need_commit_sem) { 2090 ASSERT(p->search_commit_root); 2091 if (p->nowait) { 2092 if (!down_read_trylock(&fs_info->commit_root_sem)) 2093 return -EAGAIN; 2094 } else { 2095 down_read(&fs_info->commit_root_sem); 2096 } 2097 } 2098 2099 again: 2100 prev_cmp = -1; 2101 b = btrfs_search_slot_get_root(root, p, write_lock_level); 2102 if (IS_ERR(b)) { 2103 ret = PTR_ERR(b); 2104 goto done; 2105 } 2106 2107 while (b) { 2108 int dec = 0; 2109 2110 level = btrfs_header_level(b); 2111 2112 if (cow) { 2113 bool last_level = (level == (BTRFS_MAX_LEVEL - 1)); 2114 2115 /* 2116 * if we don't really need to cow this block 2117 * then we don't want to set the path blocking, 2118 * so we test it here 2119 */ 2120 if (!should_cow_block(trans, root, b)) 2121 goto cow_done; 2122 2123 /* 2124 * must have write locks on this node and the 2125 * parent 2126 */ 2127 if (level > write_lock_level || 2128 (level + 1 > write_lock_level && 2129 level + 1 < BTRFS_MAX_LEVEL && 2130 p->nodes[level + 1])) { 2131 write_lock_level = level + 1; 2132 btrfs_release_path(p); 2133 goto again; 2134 } 2135 2136 if (last_level) 2137 err = btrfs_cow_block(trans, root, b, NULL, 0, 2138 &b, 2139 BTRFS_NESTING_COW); 2140 else 2141 err = btrfs_cow_block(trans, root, b, 2142 p->nodes[level + 1], 2143 p->slots[level + 1], &b, 2144 BTRFS_NESTING_COW); 2145 if (err) { 2146 ret = err; 2147 goto done; 2148 } 2149 } 2150 cow_done: 2151 p->nodes[level] = b; 2152 2153 /* 2154 * we have a lock on b and as long as we aren't changing 2155 * the tree, there is no way to for the items in b to change. 2156 * It is safe to drop the lock on our parent before we 2157 * go through the expensive btree search on b. 2158 * 2159 * If we're inserting or deleting (ins_len != 0), then we might 2160 * be changing slot zero, which may require changing the parent. 2161 * So, we can't drop the lock until after we know which slot 2162 * we're operating on. 2163 */ 2164 if (!ins_len && !p->keep_locks) { 2165 int u = level + 1; 2166 2167 if (u < BTRFS_MAX_LEVEL && p->locks[u]) { 2168 btrfs_tree_unlock_rw(p->nodes[u], p->locks[u]); 2169 p->locks[u] = 0; 2170 } 2171 } 2172 2173 if (level == 0) { 2174 if (ins_len > 0) 2175 ASSERT(write_lock_level >= 1); 2176 2177 ret = search_leaf(trans, root, key, p, ins_len, prev_cmp); 2178 if (!p->search_for_split) 2179 unlock_up(p, level, lowest_unlock, 2180 min_write_lock_level, NULL); 2181 goto done; 2182 } 2183 2184 ret = search_for_key_slot(b, 0, key, prev_cmp, &slot); 2185 if (ret < 0) 2186 goto done; 2187 prev_cmp = ret; 2188 2189 if (ret && slot > 0) { 2190 dec = 1; 2191 slot--; 2192 } 2193 p->slots[level] = slot; 2194 err = setup_nodes_for_search(trans, root, p, b, level, ins_len, 2195 &write_lock_level); 2196 if (err == -EAGAIN) 2197 goto again; 2198 if (err) { 2199 ret = err; 2200 goto done; 2201 } 2202 b = p->nodes[level]; 2203 slot = p->slots[level]; 2204 2205 /* 2206 * Slot 0 is special, if we change the key we have to update 2207 * the parent pointer which means we must have a write lock on 2208 * the parent 2209 */ 2210 if (slot == 0 && ins_len && write_lock_level < level + 1) { 2211 write_lock_level = level + 1; 2212 btrfs_release_path(p); 2213 goto again; 2214 } 2215 2216 unlock_up(p, level, lowest_unlock, min_write_lock_level, 2217 &write_lock_level); 2218 2219 if (level == lowest_level) { 2220 if (dec) 2221 p->slots[level]++; 2222 goto done; 2223 } 2224 2225 err = read_block_for_search(root, p, &b, level, slot, key); 2226 if (err == -EAGAIN) 2227 goto again; 2228 if (err) { 2229 ret = err; 2230 goto done; 2231 } 2232 2233 if (!p->skip_locking) { 2234 level = btrfs_header_level(b); 2235 2236 btrfs_maybe_reset_lockdep_class(root, b); 2237 2238 if (level <= write_lock_level) { 2239 btrfs_tree_lock(b); 2240 p->locks[level] = BTRFS_WRITE_LOCK; 2241 } else { 2242 if (p->nowait) { 2243 if (!btrfs_try_tree_read_lock(b)) { 2244 free_extent_buffer(b); 2245 ret = -EAGAIN; 2246 goto done; 2247 } 2248 } else { 2249 btrfs_tree_read_lock(b); 2250 } 2251 p->locks[level] = BTRFS_READ_LOCK; 2252 } 2253 p->nodes[level] = b; 2254 } 2255 } 2256 ret = 1; 2257 done: 2258 if (ret < 0 && !p->skip_release_on_error) 2259 btrfs_release_path(p); 2260 2261 if (p->need_commit_sem) { 2262 int ret2; 2263 2264 ret2 = finish_need_commit_sem_search(p); 2265 up_read(&fs_info->commit_root_sem); 2266 if (ret2) 2267 ret = ret2; 2268 } 2269 2270 return ret; 2271 } 2272 ALLOW_ERROR_INJECTION(btrfs_search_slot, ERRNO); 2273 2274 /* 2275 * Like btrfs_search_slot, this looks for a key in the given tree. It uses the 2276 * current state of the tree together with the operations recorded in the tree 2277 * modification log to search for the key in a previous version of this tree, as 2278 * denoted by the time_seq parameter. 2279 * 2280 * Naturally, there is no support for insert, delete or cow operations. 2281 * 2282 * The resulting path and return value will be set up as if we called 2283 * btrfs_search_slot at that point in time with ins_len and cow both set to 0. 2284 */ 2285 int btrfs_search_old_slot(struct btrfs_root *root, const struct btrfs_key *key, 2286 struct btrfs_path *p, u64 time_seq) 2287 { 2288 struct btrfs_fs_info *fs_info = root->fs_info; 2289 struct extent_buffer *b; 2290 int slot; 2291 int ret; 2292 int err; 2293 int level; 2294 int lowest_unlock = 1; 2295 u8 lowest_level = 0; 2296 2297 lowest_level = p->lowest_level; 2298 WARN_ON(p->nodes[0] != NULL); 2299 ASSERT(!p->nowait); 2300 2301 if (p->search_commit_root) { 2302 BUG_ON(time_seq); 2303 return btrfs_search_slot(NULL, root, key, p, 0, 0); 2304 } 2305 2306 again: 2307 b = btrfs_get_old_root(root, time_seq); 2308 if (!b) { 2309 ret = -EIO; 2310 goto done; 2311 } 2312 level = btrfs_header_level(b); 2313 p->locks[level] = BTRFS_READ_LOCK; 2314 2315 while (b) { 2316 int dec = 0; 2317 2318 level = btrfs_header_level(b); 2319 p->nodes[level] = b; 2320 2321 /* 2322 * we have a lock on b and as long as we aren't changing 2323 * the tree, there is no way to for the items in b to change. 2324 * It is safe to drop the lock on our parent before we 2325 * go through the expensive btree search on b. 2326 */ 2327 btrfs_unlock_up_safe(p, level + 1); 2328 2329 ret = btrfs_bin_search(b, key, &slot); 2330 if (ret < 0) 2331 goto done; 2332 2333 if (level == 0) { 2334 p->slots[level] = slot; 2335 unlock_up(p, level, lowest_unlock, 0, NULL); 2336 goto done; 2337 } 2338 2339 if (ret && slot > 0) { 2340 dec = 1; 2341 slot--; 2342 } 2343 p->slots[level] = slot; 2344 unlock_up(p, level, lowest_unlock, 0, NULL); 2345 2346 if (level == lowest_level) { 2347 if (dec) 2348 p->slots[level]++; 2349 goto done; 2350 } 2351 2352 err = read_block_for_search(root, p, &b, level, slot, key); 2353 if (err == -EAGAIN) 2354 goto again; 2355 if (err) { 2356 ret = err; 2357 goto done; 2358 } 2359 2360 level = btrfs_header_level(b); 2361 btrfs_tree_read_lock(b); 2362 b = btrfs_tree_mod_log_rewind(fs_info, p, b, time_seq); 2363 if (!b) { 2364 ret = -ENOMEM; 2365 goto done; 2366 } 2367 p->locks[level] = BTRFS_READ_LOCK; 2368 p->nodes[level] = b; 2369 } 2370 ret = 1; 2371 done: 2372 if (ret < 0) 2373 btrfs_release_path(p); 2374 2375 return ret; 2376 } 2377 2378 /* 2379 * helper to use instead of search slot if no exact match is needed but 2380 * instead the next or previous item should be returned. 2381 * When find_higher is true, the next higher item is returned, the next lower 2382 * otherwise. 2383 * When return_any and find_higher are both true, and no higher item is found, 2384 * return the next lower instead. 2385 * When return_any is true and find_higher is false, and no lower item is found, 2386 * return the next higher instead. 2387 * It returns 0 if any item is found, 1 if none is found (tree empty), and 2388 * < 0 on error 2389 */ 2390 int btrfs_search_slot_for_read(struct btrfs_root *root, 2391 const struct btrfs_key *key, 2392 struct btrfs_path *p, int find_higher, 2393 int return_any) 2394 { 2395 int ret; 2396 struct extent_buffer *leaf; 2397 2398 again: 2399 ret = btrfs_search_slot(NULL, root, key, p, 0, 0); 2400 if (ret <= 0) 2401 return ret; 2402 /* 2403 * a return value of 1 means the path is at the position where the 2404 * item should be inserted. Normally this is the next bigger item, 2405 * but in case the previous item is the last in a leaf, path points 2406 * to the first free slot in the previous leaf, i.e. at an invalid 2407 * item. 2408 */ 2409 leaf = p->nodes[0]; 2410 2411 if (find_higher) { 2412 if (p->slots[0] >= btrfs_header_nritems(leaf)) { 2413 ret = btrfs_next_leaf(root, p); 2414 if (ret <= 0) 2415 return ret; 2416 if (!return_any) 2417 return 1; 2418 /* 2419 * no higher item found, return the next 2420 * lower instead 2421 */ 2422 return_any = 0; 2423 find_higher = 0; 2424 btrfs_release_path(p); 2425 goto again; 2426 } 2427 } else { 2428 if (p->slots[0] == 0) { 2429 ret = btrfs_prev_leaf(root, p); 2430 if (ret < 0) 2431 return ret; 2432 if (!ret) { 2433 leaf = p->nodes[0]; 2434 if (p->slots[0] == btrfs_header_nritems(leaf)) 2435 p->slots[0]--; 2436 return 0; 2437 } 2438 if (!return_any) 2439 return 1; 2440 /* 2441 * no lower item found, return the next 2442 * higher instead 2443 */ 2444 return_any = 0; 2445 find_higher = 1; 2446 btrfs_release_path(p); 2447 goto again; 2448 } else { 2449 --p->slots[0]; 2450 } 2451 } 2452 return 0; 2453 } 2454 2455 /* 2456 * Execute search and call btrfs_previous_item to traverse backwards if the item 2457 * was not found. 2458 * 2459 * Return 0 if found, 1 if not found and < 0 if error. 2460 */ 2461 int btrfs_search_backwards(struct btrfs_root *root, struct btrfs_key *key, 2462 struct btrfs_path *path) 2463 { 2464 int ret; 2465 2466 ret = btrfs_search_slot(NULL, root, key, path, 0, 0); 2467 if (ret > 0) 2468 ret = btrfs_previous_item(root, path, key->objectid, key->type); 2469 2470 if (ret == 0) 2471 btrfs_item_key_to_cpu(path->nodes[0], key, path->slots[0]); 2472 2473 return ret; 2474 } 2475 2476 /* 2477 * Search for a valid slot for the given path. 2478 * 2479 * @root: The root node of the tree. 2480 * @key: Will contain a valid item if found. 2481 * @path: The starting point to validate the slot. 2482 * 2483 * Return: 0 if the item is valid 2484 * 1 if not found 2485 * <0 if error. 2486 */ 2487 int btrfs_get_next_valid_item(struct btrfs_root *root, struct btrfs_key *key, 2488 struct btrfs_path *path) 2489 { 2490 while (1) { 2491 int ret; 2492 const int slot = path->slots[0]; 2493 const struct extent_buffer *leaf = path->nodes[0]; 2494 2495 /* This is where we start walking the path. */ 2496 if (slot >= btrfs_header_nritems(leaf)) { 2497 /* 2498 * If we've reached the last slot in this leaf we need 2499 * to go to the next leaf and reset the path. 2500 */ 2501 ret = btrfs_next_leaf(root, path); 2502 if (ret) 2503 return ret; 2504 continue; 2505 } 2506 /* Store the found, valid item in @key. */ 2507 btrfs_item_key_to_cpu(leaf, key, slot); 2508 break; 2509 } 2510 return 0; 2511 } 2512 2513 /* 2514 * adjust the pointers going up the tree, starting at level 2515 * making sure the right key of each node is points to 'key'. 2516 * This is used after shifting pointers to the left, so it stops 2517 * fixing up pointers when a given leaf/node is not in slot 0 of the 2518 * higher levels 2519 * 2520 */ 2521 static void fixup_low_keys(struct btrfs_path *path, 2522 struct btrfs_disk_key *key, int level) 2523 { 2524 int i; 2525 struct extent_buffer *t; 2526 int ret; 2527 2528 for (i = level; i < BTRFS_MAX_LEVEL; i++) { 2529 int tslot = path->slots[i]; 2530 2531 if (!path->nodes[i]) 2532 break; 2533 t = path->nodes[i]; 2534 ret = btrfs_tree_mod_log_insert_key(t, tslot, 2535 BTRFS_MOD_LOG_KEY_REPLACE); 2536 BUG_ON(ret < 0); 2537 btrfs_set_node_key(t, key, tslot); 2538 btrfs_mark_buffer_dirty(path->nodes[i]); 2539 if (tslot != 0) 2540 break; 2541 } 2542 } 2543 2544 /* 2545 * update item key. 2546 * 2547 * This function isn't completely safe. It's the caller's responsibility 2548 * that the new key won't break the order 2549 */ 2550 void btrfs_set_item_key_safe(struct btrfs_fs_info *fs_info, 2551 struct btrfs_path *path, 2552 const struct btrfs_key *new_key) 2553 { 2554 struct btrfs_disk_key disk_key; 2555 struct extent_buffer *eb; 2556 int slot; 2557 2558 eb = path->nodes[0]; 2559 slot = path->slots[0]; 2560 if (slot > 0) { 2561 btrfs_item_key(eb, &disk_key, slot - 1); 2562 if (unlikely(comp_keys(&disk_key, new_key) >= 0)) { 2563 btrfs_crit(fs_info, 2564 "slot %u key (%llu %u %llu) new key (%llu %u %llu)", 2565 slot, btrfs_disk_key_objectid(&disk_key), 2566 btrfs_disk_key_type(&disk_key), 2567 btrfs_disk_key_offset(&disk_key), 2568 new_key->objectid, new_key->type, 2569 new_key->offset); 2570 btrfs_print_leaf(eb); 2571 BUG(); 2572 } 2573 } 2574 if (slot < btrfs_header_nritems(eb) - 1) { 2575 btrfs_item_key(eb, &disk_key, slot + 1); 2576 if (unlikely(comp_keys(&disk_key, new_key) <= 0)) { 2577 btrfs_crit(fs_info, 2578 "slot %u key (%llu %u %llu) new key (%llu %u %llu)", 2579 slot, btrfs_disk_key_objectid(&disk_key), 2580 btrfs_disk_key_type(&disk_key), 2581 btrfs_disk_key_offset(&disk_key), 2582 new_key->objectid, new_key->type, 2583 new_key->offset); 2584 btrfs_print_leaf(eb); 2585 BUG(); 2586 } 2587 } 2588 2589 btrfs_cpu_key_to_disk(&disk_key, new_key); 2590 btrfs_set_item_key(eb, &disk_key, slot); 2591 btrfs_mark_buffer_dirty(eb); 2592 if (slot == 0) 2593 fixup_low_keys(path, &disk_key, 1); 2594 } 2595 2596 /* 2597 * Check key order of two sibling extent buffers. 2598 * 2599 * Return true if something is wrong. 2600 * Return false if everything is fine. 2601 * 2602 * Tree-checker only works inside one tree block, thus the following 2603 * corruption can not be detected by tree-checker: 2604 * 2605 * Leaf @left | Leaf @right 2606 * -------------------------------------------------------------- 2607 * | 1 | 2 | 3 | 4 | 5 | f6 | | 7 | 8 | 2608 * 2609 * Key f6 in leaf @left itself is valid, but not valid when the next 2610 * key in leaf @right is 7. 2611 * This can only be checked at tree block merge time. 2612 * And since tree checker has ensured all key order in each tree block 2613 * is correct, we only need to bother the last key of @left and the first 2614 * key of @right. 2615 */ 2616 static bool check_sibling_keys(struct extent_buffer *left, 2617 struct extent_buffer *right) 2618 { 2619 struct btrfs_key left_last; 2620 struct btrfs_key right_first; 2621 int level = btrfs_header_level(left); 2622 int nr_left = btrfs_header_nritems(left); 2623 int nr_right = btrfs_header_nritems(right); 2624 2625 /* No key to check in one of the tree blocks */ 2626 if (!nr_left || !nr_right) 2627 return false; 2628 2629 if (level) { 2630 btrfs_node_key_to_cpu(left, &left_last, nr_left - 1); 2631 btrfs_node_key_to_cpu(right, &right_first, 0); 2632 } else { 2633 btrfs_item_key_to_cpu(left, &left_last, nr_left - 1); 2634 btrfs_item_key_to_cpu(right, &right_first, 0); 2635 } 2636 2637 if (btrfs_comp_cpu_keys(&left_last, &right_first) >= 0) { 2638 btrfs_crit(left->fs_info, 2639 "bad key order, sibling blocks, left last (%llu %u %llu) right first (%llu %u %llu)", 2640 left_last.objectid, left_last.type, 2641 left_last.offset, right_first.objectid, 2642 right_first.type, right_first.offset); 2643 return true; 2644 } 2645 return false; 2646 } 2647 2648 /* 2649 * try to push data from one node into the next node left in the 2650 * tree. 2651 * 2652 * returns 0 if some ptrs were pushed left, < 0 if there was some horrible 2653 * error, and > 0 if there was no room in the left hand block. 2654 */ 2655 static int push_node_left(struct btrfs_trans_handle *trans, 2656 struct extent_buffer *dst, 2657 struct extent_buffer *src, int empty) 2658 { 2659 struct btrfs_fs_info *fs_info = trans->fs_info; 2660 int push_items = 0; 2661 int src_nritems; 2662 int dst_nritems; 2663 int ret = 0; 2664 2665 src_nritems = btrfs_header_nritems(src); 2666 dst_nritems = btrfs_header_nritems(dst); 2667 push_items = BTRFS_NODEPTRS_PER_BLOCK(fs_info) - dst_nritems; 2668 WARN_ON(btrfs_header_generation(src) != trans->transid); 2669 WARN_ON(btrfs_header_generation(dst) != trans->transid); 2670 2671 if (!empty && src_nritems <= 8) 2672 return 1; 2673 2674 if (push_items <= 0) 2675 return 1; 2676 2677 if (empty) { 2678 push_items = min(src_nritems, push_items); 2679 if (push_items < src_nritems) { 2680 /* leave at least 8 pointers in the node if 2681 * we aren't going to empty it 2682 */ 2683 if (src_nritems - push_items < 8) { 2684 if (push_items <= 8) 2685 return 1; 2686 push_items -= 8; 2687 } 2688 } 2689 } else 2690 push_items = min(src_nritems - 8, push_items); 2691 2692 /* dst is the left eb, src is the middle eb */ 2693 if (check_sibling_keys(dst, src)) { 2694 ret = -EUCLEAN; 2695 btrfs_abort_transaction(trans, ret); 2696 return ret; 2697 } 2698 ret = btrfs_tree_mod_log_eb_copy(dst, src, dst_nritems, 0, push_items); 2699 if (ret) { 2700 btrfs_abort_transaction(trans, ret); 2701 return ret; 2702 } 2703 copy_extent_buffer(dst, src, 2704 btrfs_node_key_ptr_offset(dst, dst_nritems), 2705 btrfs_node_key_ptr_offset(src, 0), 2706 push_items * sizeof(struct btrfs_key_ptr)); 2707 2708 if (push_items < src_nritems) { 2709 /* 2710 * Don't call btrfs_tree_mod_log_insert_move() here, key removal 2711 * was already fully logged by btrfs_tree_mod_log_eb_copy() above. 2712 */ 2713 memmove_extent_buffer(src, btrfs_node_key_ptr_offset(src, 0), 2714 btrfs_node_key_ptr_offset(src, push_items), 2715 (src_nritems - push_items) * 2716 sizeof(struct btrfs_key_ptr)); 2717 } 2718 btrfs_set_header_nritems(src, src_nritems - push_items); 2719 btrfs_set_header_nritems(dst, dst_nritems + push_items); 2720 btrfs_mark_buffer_dirty(src); 2721 btrfs_mark_buffer_dirty(dst); 2722 2723 return ret; 2724 } 2725 2726 /* 2727 * try to push data from one node into the next node right in the 2728 * tree. 2729 * 2730 * returns 0 if some ptrs were pushed, < 0 if there was some horrible 2731 * error, and > 0 if there was no room in the right hand block. 2732 * 2733 * this will only push up to 1/2 the contents of the left node over 2734 */ 2735 static int balance_node_right(struct btrfs_trans_handle *trans, 2736 struct extent_buffer *dst, 2737 struct extent_buffer *src) 2738 { 2739 struct btrfs_fs_info *fs_info = trans->fs_info; 2740 int push_items = 0; 2741 int max_push; 2742 int src_nritems; 2743 int dst_nritems; 2744 int ret = 0; 2745 2746 WARN_ON(btrfs_header_generation(src) != trans->transid); 2747 WARN_ON(btrfs_header_generation(dst) != trans->transid); 2748 2749 src_nritems = btrfs_header_nritems(src); 2750 dst_nritems = btrfs_header_nritems(dst); 2751 push_items = BTRFS_NODEPTRS_PER_BLOCK(fs_info) - dst_nritems; 2752 if (push_items <= 0) 2753 return 1; 2754 2755 if (src_nritems < 4) 2756 return 1; 2757 2758 max_push = src_nritems / 2 + 1; 2759 /* don't try to empty the node */ 2760 if (max_push >= src_nritems) 2761 return 1; 2762 2763 if (max_push < push_items) 2764 push_items = max_push; 2765 2766 /* dst is the right eb, src is the middle eb */ 2767 if (check_sibling_keys(src, dst)) { 2768 ret = -EUCLEAN; 2769 btrfs_abort_transaction(trans, ret); 2770 return ret; 2771 } 2772 ret = btrfs_tree_mod_log_insert_move(dst, push_items, 0, dst_nritems); 2773 BUG_ON(ret < 0); 2774 memmove_extent_buffer(dst, btrfs_node_key_ptr_offset(dst, push_items), 2775 btrfs_node_key_ptr_offset(dst, 0), 2776 (dst_nritems) * 2777 sizeof(struct btrfs_key_ptr)); 2778 2779 ret = btrfs_tree_mod_log_eb_copy(dst, src, 0, src_nritems - push_items, 2780 push_items); 2781 if (ret) { 2782 btrfs_abort_transaction(trans, ret); 2783 return ret; 2784 } 2785 copy_extent_buffer(dst, src, 2786 btrfs_node_key_ptr_offset(dst, 0), 2787 btrfs_node_key_ptr_offset(src, src_nritems - push_items), 2788 push_items * sizeof(struct btrfs_key_ptr)); 2789 2790 btrfs_set_header_nritems(src, src_nritems - push_items); 2791 btrfs_set_header_nritems(dst, dst_nritems + push_items); 2792 2793 btrfs_mark_buffer_dirty(src); 2794 btrfs_mark_buffer_dirty(dst); 2795 2796 return ret; 2797 } 2798 2799 /* 2800 * helper function to insert a new root level in the tree. 2801 * A new node is allocated, and a single item is inserted to 2802 * point to the existing root 2803 * 2804 * returns zero on success or < 0 on failure. 2805 */ 2806 static noinline int insert_new_root(struct btrfs_trans_handle *trans, 2807 struct btrfs_root *root, 2808 struct btrfs_path *path, int level) 2809 { 2810 struct btrfs_fs_info *fs_info = root->fs_info; 2811 u64 lower_gen; 2812 struct extent_buffer *lower; 2813 struct extent_buffer *c; 2814 struct extent_buffer *old; 2815 struct btrfs_disk_key lower_key; 2816 int ret; 2817 2818 BUG_ON(path->nodes[level]); 2819 BUG_ON(path->nodes[level-1] != root->node); 2820 2821 lower = path->nodes[level-1]; 2822 if (level == 1) 2823 btrfs_item_key(lower, &lower_key, 0); 2824 else 2825 btrfs_node_key(lower, &lower_key, 0); 2826 2827 c = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid, 2828 &lower_key, level, root->node->start, 0, 2829 BTRFS_NESTING_NEW_ROOT); 2830 if (IS_ERR(c)) 2831 return PTR_ERR(c); 2832 2833 root_add_used(root, fs_info->nodesize); 2834 2835 btrfs_set_header_nritems(c, 1); 2836 btrfs_set_node_key(c, &lower_key, 0); 2837 btrfs_set_node_blockptr(c, 0, lower->start); 2838 lower_gen = btrfs_header_generation(lower); 2839 WARN_ON(lower_gen != trans->transid); 2840 2841 btrfs_set_node_ptr_generation(c, 0, lower_gen); 2842 2843 btrfs_mark_buffer_dirty(c); 2844 2845 old = root->node; 2846 ret = btrfs_tree_mod_log_insert_root(root->node, c, false); 2847 BUG_ON(ret < 0); 2848 rcu_assign_pointer(root->node, c); 2849 2850 /* the super has an extra ref to root->node */ 2851 free_extent_buffer(old); 2852 2853 add_root_to_dirty_list(root); 2854 atomic_inc(&c->refs); 2855 path->nodes[level] = c; 2856 path->locks[level] = BTRFS_WRITE_LOCK; 2857 path->slots[level] = 0; 2858 return 0; 2859 } 2860 2861 /* 2862 * worker function to insert a single pointer in a node. 2863 * the node should have enough room for the pointer already 2864 * 2865 * slot and level indicate where you want the key to go, and 2866 * blocknr is the block the key points to. 2867 */ 2868 static void insert_ptr(struct btrfs_trans_handle *trans, 2869 struct btrfs_path *path, 2870 struct btrfs_disk_key *key, u64 bytenr, 2871 int slot, int level) 2872 { 2873 struct extent_buffer *lower; 2874 int nritems; 2875 int ret; 2876 2877 BUG_ON(!path->nodes[level]); 2878 btrfs_assert_tree_write_locked(path->nodes[level]); 2879 lower = path->nodes[level]; 2880 nritems = btrfs_header_nritems(lower); 2881 BUG_ON(slot > nritems); 2882 BUG_ON(nritems == BTRFS_NODEPTRS_PER_BLOCK(trans->fs_info)); 2883 if (slot != nritems) { 2884 if (level) { 2885 ret = btrfs_tree_mod_log_insert_move(lower, slot + 1, 2886 slot, nritems - slot); 2887 BUG_ON(ret < 0); 2888 } 2889 memmove_extent_buffer(lower, 2890 btrfs_node_key_ptr_offset(lower, slot + 1), 2891 btrfs_node_key_ptr_offset(lower, slot), 2892 (nritems - slot) * sizeof(struct btrfs_key_ptr)); 2893 } 2894 if (level) { 2895 ret = btrfs_tree_mod_log_insert_key(lower, slot, 2896 BTRFS_MOD_LOG_KEY_ADD); 2897 BUG_ON(ret < 0); 2898 } 2899 btrfs_set_node_key(lower, key, slot); 2900 btrfs_set_node_blockptr(lower, slot, bytenr); 2901 WARN_ON(trans->transid == 0); 2902 btrfs_set_node_ptr_generation(lower, slot, trans->transid); 2903 btrfs_set_header_nritems(lower, nritems + 1); 2904 btrfs_mark_buffer_dirty(lower); 2905 } 2906 2907 /* 2908 * split the node at the specified level in path in two. 2909 * The path is corrected to point to the appropriate node after the split 2910 * 2911 * Before splitting this tries to make some room in the node by pushing 2912 * left and right, if either one works, it returns right away. 2913 * 2914 * returns 0 on success and < 0 on failure 2915 */ 2916 static noinline int split_node(struct btrfs_trans_handle *trans, 2917 struct btrfs_root *root, 2918 struct btrfs_path *path, int level) 2919 { 2920 struct btrfs_fs_info *fs_info = root->fs_info; 2921 struct extent_buffer *c; 2922 struct extent_buffer *split; 2923 struct btrfs_disk_key disk_key; 2924 int mid; 2925 int ret; 2926 u32 c_nritems; 2927 2928 c = path->nodes[level]; 2929 WARN_ON(btrfs_header_generation(c) != trans->transid); 2930 if (c == root->node) { 2931 /* 2932 * trying to split the root, lets make a new one 2933 * 2934 * tree mod log: We don't log_removal old root in 2935 * insert_new_root, because that root buffer will be kept as a 2936 * normal node. We are going to log removal of half of the 2937 * elements below with btrfs_tree_mod_log_eb_copy(). We're 2938 * holding a tree lock on the buffer, which is why we cannot 2939 * race with other tree_mod_log users. 2940 */ 2941 ret = insert_new_root(trans, root, path, level + 1); 2942 if (ret) 2943 return ret; 2944 } else { 2945 ret = push_nodes_for_insert(trans, root, path, level); 2946 c = path->nodes[level]; 2947 if (!ret && btrfs_header_nritems(c) < 2948 BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 3) 2949 return 0; 2950 if (ret < 0) 2951 return ret; 2952 } 2953 2954 c_nritems = btrfs_header_nritems(c); 2955 mid = (c_nritems + 1) / 2; 2956 btrfs_node_key(c, &disk_key, mid); 2957 2958 split = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid, 2959 &disk_key, level, c->start, 0, 2960 BTRFS_NESTING_SPLIT); 2961 if (IS_ERR(split)) 2962 return PTR_ERR(split); 2963 2964 root_add_used(root, fs_info->nodesize); 2965 ASSERT(btrfs_header_level(c) == level); 2966 2967 ret = btrfs_tree_mod_log_eb_copy(split, c, 0, mid, c_nritems - mid); 2968 if (ret) { 2969 btrfs_abort_transaction(trans, ret); 2970 return ret; 2971 } 2972 copy_extent_buffer(split, c, 2973 btrfs_node_key_ptr_offset(split, 0), 2974 btrfs_node_key_ptr_offset(c, mid), 2975 (c_nritems - mid) * sizeof(struct btrfs_key_ptr)); 2976 btrfs_set_header_nritems(split, c_nritems - mid); 2977 btrfs_set_header_nritems(c, mid); 2978 2979 btrfs_mark_buffer_dirty(c); 2980 btrfs_mark_buffer_dirty(split); 2981 2982 insert_ptr(trans, path, &disk_key, split->start, 2983 path->slots[level + 1] + 1, level + 1); 2984 2985 if (path->slots[level] >= mid) { 2986 path->slots[level] -= mid; 2987 btrfs_tree_unlock(c); 2988 free_extent_buffer(c); 2989 path->nodes[level] = split; 2990 path->slots[level + 1] += 1; 2991 } else { 2992 btrfs_tree_unlock(split); 2993 free_extent_buffer(split); 2994 } 2995 return 0; 2996 } 2997 2998 /* 2999 * how many bytes are required to store the items in a leaf. start 3000 * and nr indicate which items in the leaf to check. This totals up the 3001 * space used both by the item structs and the item data 3002 */ 3003 static int leaf_space_used(struct extent_buffer *l, int start, int nr) 3004 { 3005 int data_len; 3006 int nritems = btrfs_header_nritems(l); 3007 int end = min(nritems, start + nr) - 1; 3008 3009 if (!nr) 3010 return 0; 3011 data_len = btrfs_item_offset(l, start) + btrfs_item_size(l, start); 3012 data_len = data_len - btrfs_item_offset(l, end); 3013 data_len += sizeof(struct btrfs_item) * nr; 3014 WARN_ON(data_len < 0); 3015 return data_len; 3016 } 3017 3018 /* 3019 * The space between the end of the leaf items and 3020 * the start of the leaf data. IOW, how much room 3021 * the leaf has left for both items and data 3022 */ 3023 noinline int btrfs_leaf_free_space(struct extent_buffer *leaf) 3024 { 3025 struct btrfs_fs_info *fs_info = leaf->fs_info; 3026 int nritems = btrfs_header_nritems(leaf); 3027 int ret; 3028 3029 ret = BTRFS_LEAF_DATA_SIZE(fs_info) - leaf_space_used(leaf, 0, nritems); 3030 if (ret < 0) { 3031 btrfs_crit(fs_info, 3032 "leaf free space ret %d, leaf data size %lu, used %d nritems %d", 3033 ret, 3034 (unsigned long) BTRFS_LEAF_DATA_SIZE(fs_info), 3035 leaf_space_used(leaf, 0, nritems), nritems); 3036 } 3037 return ret; 3038 } 3039 3040 /* 3041 * min slot controls the lowest index we're willing to push to the 3042 * right. We'll push up to and including min_slot, but no lower 3043 */ 3044 static noinline int __push_leaf_right(struct btrfs_path *path, 3045 int data_size, int empty, 3046 struct extent_buffer *right, 3047 int free_space, u32 left_nritems, 3048 u32 min_slot) 3049 { 3050 struct btrfs_fs_info *fs_info = right->fs_info; 3051 struct extent_buffer *left = path->nodes[0]; 3052 struct extent_buffer *upper = path->nodes[1]; 3053 struct btrfs_map_token token; 3054 struct btrfs_disk_key disk_key; 3055 int slot; 3056 u32 i; 3057 int push_space = 0; 3058 int push_items = 0; 3059 u32 nr; 3060 u32 right_nritems; 3061 u32 data_end; 3062 u32 this_item_size; 3063 3064 if (empty) 3065 nr = 0; 3066 else 3067 nr = max_t(u32, 1, min_slot); 3068 3069 if (path->slots[0] >= left_nritems) 3070 push_space += data_size; 3071 3072 slot = path->slots[1]; 3073 i = left_nritems - 1; 3074 while (i >= nr) { 3075 if (!empty && push_items > 0) { 3076 if (path->slots[0] > i) 3077 break; 3078 if (path->slots[0] == i) { 3079 int space = btrfs_leaf_free_space(left); 3080 3081 if (space + push_space * 2 > free_space) 3082 break; 3083 } 3084 } 3085 3086 if (path->slots[0] == i) 3087 push_space += data_size; 3088 3089 this_item_size = btrfs_item_size(left, i); 3090 if (this_item_size + sizeof(struct btrfs_item) + 3091 push_space > free_space) 3092 break; 3093 3094 push_items++; 3095 push_space += this_item_size + sizeof(struct btrfs_item); 3096 if (i == 0) 3097 break; 3098 i--; 3099 } 3100 3101 if (push_items == 0) 3102 goto out_unlock; 3103 3104 WARN_ON(!empty && push_items == left_nritems); 3105 3106 /* push left to right */ 3107 right_nritems = btrfs_header_nritems(right); 3108 3109 push_space = btrfs_item_data_end(left, left_nritems - push_items); 3110 push_space -= leaf_data_end(left); 3111 3112 /* make room in the right data area */ 3113 data_end = leaf_data_end(right); 3114 memmove_leaf_data(right, data_end - push_space, data_end, 3115 BTRFS_LEAF_DATA_SIZE(fs_info) - data_end); 3116 3117 /* copy from the left data area */ 3118 copy_leaf_data(right, left, BTRFS_LEAF_DATA_SIZE(fs_info) - push_space, 3119 leaf_data_end(left), push_space); 3120 3121 memmove_leaf_items(right, push_items, 0, right_nritems); 3122 3123 /* copy the items from left to right */ 3124 copy_leaf_items(right, left, 0, left_nritems - push_items, push_items); 3125 3126 /* update the item pointers */ 3127 btrfs_init_map_token(&token, right); 3128 right_nritems += push_items; 3129 btrfs_set_header_nritems(right, right_nritems); 3130 push_space = BTRFS_LEAF_DATA_SIZE(fs_info); 3131 for (i = 0; i < right_nritems; i++) { 3132 push_space -= btrfs_token_item_size(&token, i); 3133 btrfs_set_token_item_offset(&token, i, push_space); 3134 } 3135 3136 left_nritems -= push_items; 3137 btrfs_set_header_nritems(left, left_nritems); 3138 3139 if (left_nritems) 3140 btrfs_mark_buffer_dirty(left); 3141 else 3142 btrfs_clean_tree_block(left); 3143 3144 btrfs_mark_buffer_dirty(right); 3145 3146 btrfs_item_key(right, &disk_key, 0); 3147 btrfs_set_node_key(upper, &disk_key, slot + 1); 3148 btrfs_mark_buffer_dirty(upper); 3149 3150 /* then fixup the leaf pointer in the path */ 3151 if (path->slots[0] >= left_nritems) { 3152 path->slots[0] -= left_nritems; 3153 if (btrfs_header_nritems(path->nodes[0]) == 0) 3154 btrfs_clean_tree_block(path->nodes[0]); 3155 btrfs_tree_unlock(path->nodes[0]); 3156 free_extent_buffer(path->nodes[0]); 3157 path->nodes[0] = right; 3158 path->slots[1] += 1; 3159 } else { 3160 btrfs_tree_unlock(right); 3161 free_extent_buffer(right); 3162 } 3163 return 0; 3164 3165 out_unlock: 3166 btrfs_tree_unlock(right); 3167 free_extent_buffer(right); 3168 return 1; 3169 } 3170 3171 /* 3172 * push some data in the path leaf to the right, trying to free up at 3173 * least data_size bytes. returns zero if the push worked, nonzero otherwise 3174 * 3175 * returns 1 if the push failed because the other node didn't have enough 3176 * room, 0 if everything worked out and < 0 if there were major errors. 3177 * 3178 * this will push starting from min_slot to the end of the leaf. It won't 3179 * push any slot lower than min_slot 3180 */ 3181 static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root 3182 *root, struct btrfs_path *path, 3183 int min_data_size, int data_size, 3184 int empty, u32 min_slot) 3185 { 3186 struct extent_buffer *left = path->nodes[0]; 3187 struct extent_buffer *right; 3188 struct extent_buffer *upper; 3189 int slot; 3190 int free_space; 3191 u32 left_nritems; 3192 int ret; 3193 3194 if (!path->nodes[1]) 3195 return 1; 3196 3197 slot = path->slots[1]; 3198 upper = path->nodes[1]; 3199 if (slot >= btrfs_header_nritems(upper) - 1) 3200 return 1; 3201 3202 btrfs_assert_tree_write_locked(path->nodes[1]); 3203 3204 right = btrfs_read_node_slot(upper, slot + 1); 3205 /* 3206 * slot + 1 is not valid or we fail to read the right node, 3207 * no big deal, just return. 3208 */ 3209 if (IS_ERR(right)) 3210 return 1; 3211 3212 __btrfs_tree_lock(right, BTRFS_NESTING_RIGHT); 3213 3214 free_space = btrfs_leaf_free_space(right); 3215 if (free_space < data_size) 3216 goto out_unlock; 3217 3218 ret = btrfs_cow_block(trans, root, right, upper, 3219 slot + 1, &right, BTRFS_NESTING_RIGHT_COW); 3220 if (ret) 3221 goto out_unlock; 3222 3223 left_nritems = btrfs_header_nritems(left); 3224 if (left_nritems == 0) 3225 goto out_unlock; 3226 3227 if (check_sibling_keys(left, right)) { 3228 ret = -EUCLEAN; 3229 btrfs_tree_unlock(right); 3230 free_extent_buffer(right); 3231 return ret; 3232 } 3233 if (path->slots[0] == left_nritems && !empty) { 3234 /* Key greater than all keys in the leaf, right neighbor has 3235 * enough room for it and we're not emptying our leaf to delete 3236 * it, therefore use right neighbor to insert the new item and 3237 * no need to touch/dirty our left leaf. */ 3238 btrfs_tree_unlock(left); 3239 free_extent_buffer(left); 3240 path->nodes[0] = right; 3241 path->slots[0] = 0; 3242 path->slots[1]++; 3243 return 0; 3244 } 3245 3246 return __push_leaf_right(path, min_data_size, empty, 3247 right, free_space, left_nritems, min_slot); 3248 out_unlock: 3249 btrfs_tree_unlock(right); 3250 free_extent_buffer(right); 3251 return 1; 3252 } 3253 3254 /* 3255 * push some data in the path leaf to the left, trying to free up at 3256 * least data_size bytes. returns zero if the push worked, nonzero otherwise 3257 * 3258 * max_slot can put a limit on how far into the leaf we'll push items. The 3259 * item at 'max_slot' won't be touched. Use (u32)-1 to make us do all the 3260 * items 3261 */ 3262 static noinline int __push_leaf_left(struct btrfs_path *path, int data_size, 3263 int empty, struct extent_buffer *left, 3264 int free_space, u32 right_nritems, 3265 u32 max_slot) 3266 { 3267 struct btrfs_fs_info *fs_info = left->fs_info; 3268 struct btrfs_disk_key disk_key; 3269 struct extent_buffer *right = path->nodes[0]; 3270 int i; 3271 int push_space = 0; 3272 int push_items = 0; 3273 u32 old_left_nritems; 3274 u32 nr; 3275 int ret = 0; 3276 u32 this_item_size; 3277 u32 old_left_item_size; 3278 struct btrfs_map_token token; 3279 3280 if (empty) 3281 nr = min(right_nritems, max_slot); 3282 else 3283 nr = min(right_nritems - 1, max_slot); 3284 3285 for (i = 0; i < nr; i++) { 3286 if (!empty && push_items > 0) { 3287 if (path->slots[0] < i) 3288 break; 3289 if (path->slots[0] == i) { 3290 int space = btrfs_leaf_free_space(right); 3291 3292 if (space + push_space * 2 > free_space) 3293 break; 3294 } 3295 } 3296 3297 if (path->slots[0] == i) 3298 push_space += data_size; 3299 3300 this_item_size = btrfs_item_size(right, i); 3301 if (this_item_size + sizeof(struct btrfs_item) + push_space > 3302 free_space) 3303 break; 3304 3305 push_items++; 3306 push_space += this_item_size + sizeof(struct btrfs_item); 3307 } 3308 3309 if (push_items == 0) { 3310 ret = 1; 3311 goto out; 3312 } 3313 WARN_ON(!empty && push_items == btrfs_header_nritems(right)); 3314 3315 /* push data from right to left */ 3316 copy_leaf_items(left, right, btrfs_header_nritems(left), 0, push_items); 3317 3318 push_space = BTRFS_LEAF_DATA_SIZE(fs_info) - 3319 btrfs_item_offset(right, push_items - 1); 3320 3321 copy_leaf_data(left, right, leaf_data_end(left) - push_space, 3322 btrfs_item_offset(right, push_items - 1), push_space); 3323 old_left_nritems = btrfs_header_nritems(left); 3324 BUG_ON(old_left_nritems <= 0); 3325 3326 btrfs_init_map_token(&token, left); 3327 old_left_item_size = btrfs_item_offset(left, old_left_nritems - 1); 3328 for (i = old_left_nritems; i < old_left_nritems + push_items; i++) { 3329 u32 ioff; 3330 3331 ioff = btrfs_token_item_offset(&token, i); 3332 btrfs_set_token_item_offset(&token, i, 3333 ioff - (BTRFS_LEAF_DATA_SIZE(fs_info) - old_left_item_size)); 3334 } 3335 btrfs_set_header_nritems(left, old_left_nritems + push_items); 3336 3337 /* fixup right node */ 3338 if (push_items > right_nritems) 3339 WARN(1, KERN_CRIT "push items %d nr %u\n", push_items, 3340 right_nritems); 3341 3342 if (push_items < right_nritems) { 3343 push_space = btrfs_item_offset(right, push_items - 1) - 3344 leaf_data_end(right); 3345 memmove_leaf_data(right, 3346 BTRFS_LEAF_DATA_SIZE(fs_info) - push_space, 3347 leaf_data_end(right), push_space); 3348 3349 memmove_leaf_items(right, 0, push_items, 3350 btrfs_header_nritems(right) - push_items); 3351 } 3352 3353 btrfs_init_map_token(&token, right); 3354 right_nritems -= push_items; 3355 btrfs_set_header_nritems(right, right_nritems); 3356 push_space = BTRFS_LEAF_DATA_SIZE(fs_info); 3357 for (i = 0; i < right_nritems; i++) { 3358 push_space = push_space - btrfs_token_item_size(&token, i); 3359 btrfs_set_token_item_offset(&token, i, push_space); 3360 } 3361 3362 btrfs_mark_buffer_dirty(left); 3363 if (right_nritems) 3364 btrfs_mark_buffer_dirty(right); 3365 else 3366 btrfs_clean_tree_block(right); 3367 3368 btrfs_item_key(right, &disk_key, 0); 3369 fixup_low_keys(path, &disk_key, 1); 3370 3371 /* then fixup the leaf pointer in the path */ 3372 if (path->slots[0] < push_items) { 3373 path->slots[0] += old_left_nritems; 3374 btrfs_tree_unlock(path->nodes[0]); 3375 free_extent_buffer(path->nodes[0]); 3376 path->nodes[0] = left; 3377 path->slots[1] -= 1; 3378 } else { 3379 btrfs_tree_unlock(left); 3380 free_extent_buffer(left); 3381 path->slots[0] -= push_items; 3382 } 3383 BUG_ON(path->slots[0] < 0); 3384 return ret; 3385 out: 3386 btrfs_tree_unlock(left); 3387 free_extent_buffer(left); 3388 return ret; 3389 } 3390 3391 /* 3392 * push some data in the path leaf to the left, trying to free up at 3393 * least data_size bytes. returns zero if the push worked, nonzero otherwise 3394 * 3395 * max_slot can put a limit on how far into the leaf we'll push items. The 3396 * item at 'max_slot' won't be touched. Use (u32)-1 to make us push all the 3397 * items 3398 */ 3399 static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root 3400 *root, struct btrfs_path *path, int min_data_size, 3401 int data_size, int empty, u32 max_slot) 3402 { 3403 struct extent_buffer *right = path->nodes[0]; 3404 struct extent_buffer *left; 3405 int slot; 3406 int free_space; 3407 u32 right_nritems; 3408 int ret = 0; 3409 3410 slot = path->slots[1]; 3411 if (slot == 0) 3412 return 1; 3413 if (!path->nodes[1]) 3414 return 1; 3415 3416 right_nritems = btrfs_header_nritems(right); 3417 if (right_nritems == 0) 3418 return 1; 3419 3420 btrfs_assert_tree_write_locked(path->nodes[1]); 3421 3422 left = btrfs_read_node_slot(path->nodes[1], slot - 1); 3423 /* 3424 * slot - 1 is not valid or we fail to read the left node, 3425 * no big deal, just return. 3426 */ 3427 if (IS_ERR(left)) 3428 return 1; 3429 3430 __btrfs_tree_lock(left, BTRFS_NESTING_LEFT); 3431 3432 free_space = btrfs_leaf_free_space(left); 3433 if (free_space < data_size) { 3434 ret = 1; 3435 goto out; 3436 } 3437 3438 ret = btrfs_cow_block(trans, root, left, 3439 path->nodes[1], slot - 1, &left, 3440 BTRFS_NESTING_LEFT_COW); 3441 if (ret) { 3442 /* we hit -ENOSPC, but it isn't fatal here */ 3443 if (ret == -ENOSPC) 3444 ret = 1; 3445 goto out; 3446 } 3447 3448 if (check_sibling_keys(left, right)) { 3449 ret = -EUCLEAN; 3450 goto out; 3451 } 3452 return __push_leaf_left(path, min_data_size, 3453 empty, left, free_space, right_nritems, 3454 max_slot); 3455 out: 3456 btrfs_tree_unlock(left); 3457 free_extent_buffer(left); 3458 return ret; 3459 } 3460 3461 /* 3462 * split the path's leaf in two, making sure there is at least data_size 3463 * available for the resulting leaf level of the path. 3464 */ 3465 static noinline void copy_for_split(struct btrfs_trans_handle *trans, 3466 struct btrfs_path *path, 3467 struct extent_buffer *l, 3468 struct extent_buffer *right, 3469 int slot, int mid, int nritems) 3470 { 3471 struct btrfs_fs_info *fs_info = trans->fs_info; 3472 int data_copy_size; 3473 int rt_data_off; 3474 int i; 3475 struct btrfs_disk_key disk_key; 3476 struct btrfs_map_token token; 3477 3478 nritems = nritems - mid; 3479 btrfs_set_header_nritems(right, nritems); 3480 data_copy_size = btrfs_item_data_end(l, mid) - leaf_data_end(l); 3481 3482 copy_leaf_items(right, l, 0, mid, nritems); 3483 3484 copy_leaf_data(right, l, BTRFS_LEAF_DATA_SIZE(fs_info) - data_copy_size, 3485 leaf_data_end(l), data_copy_size); 3486 3487 rt_data_off = BTRFS_LEAF_DATA_SIZE(fs_info) - btrfs_item_data_end(l, mid); 3488 3489 btrfs_init_map_token(&token, right); 3490 for (i = 0; i < nritems; i++) { 3491 u32 ioff; 3492 3493 ioff = btrfs_token_item_offset(&token, i); 3494 btrfs_set_token_item_offset(&token, i, ioff + rt_data_off); 3495 } 3496 3497 btrfs_set_header_nritems(l, mid); 3498 btrfs_item_key(right, &disk_key, 0); 3499 insert_ptr(trans, path, &disk_key, right->start, path->slots[1] + 1, 1); 3500 3501 btrfs_mark_buffer_dirty(right); 3502 btrfs_mark_buffer_dirty(l); 3503 BUG_ON(path->slots[0] != slot); 3504 3505 if (mid <= slot) { 3506 btrfs_tree_unlock(path->nodes[0]); 3507 free_extent_buffer(path->nodes[0]); 3508 path->nodes[0] = right; 3509 path->slots[0] -= mid; 3510 path->slots[1] += 1; 3511 } else { 3512 btrfs_tree_unlock(right); 3513 free_extent_buffer(right); 3514 } 3515 3516 BUG_ON(path->slots[0] < 0); 3517 } 3518 3519 /* 3520 * double splits happen when we need to insert a big item in the middle 3521 * of a leaf. A double split can leave us with 3 mostly empty leaves: 3522 * leaf: [ slots 0 - N] [ our target ] [ N + 1 - total in leaf ] 3523 * A B C 3524 * 3525 * We avoid this by trying to push the items on either side of our target 3526 * into the adjacent leaves. If all goes well we can avoid the double split 3527 * completely. 3528 */ 3529 static noinline int push_for_double_split(struct btrfs_trans_handle *trans, 3530 struct btrfs_root *root, 3531 struct btrfs_path *path, 3532 int data_size) 3533 { 3534 int ret; 3535 int progress = 0; 3536 int slot; 3537 u32 nritems; 3538 int space_needed = data_size; 3539 3540 slot = path->slots[0]; 3541 if (slot < btrfs_header_nritems(path->nodes[0])) 3542 space_needed -= btrfs_leaf_free_space(path->nodes[0]); 3543 3544 /* 3545 * try to push all the items after our slot into the 3546 * right leaf 3547 */ 3548 ret = push_leaf_right(trans, root, path, 1, space_needed, 0, slot); 3549 if (ret < 0) 3550 return ret; 3551 3552 if (ret == 0) 3553 progress++; 3554 3555 nritems = btrfs_header_nritems(path->nodes[0]); 3556 /* 3557 * our goal is to get our slot at the start or end of a leaf. If 3558 * we've done so we're done 3559 */ 3560 if (path->slots[0] == 0 || path->slots[0] == nritems) 3561 return 0; 3562 3563 if (btrfs_leaf_free_space(path->nodes[0]) >= data_size) 3564 return 0; 3565 3566 /* try to push all the items before our slot into the next leaf */ 3567 slot = path->slots[0]; 3568 space_needed = data_size; 3569 if (slot > 0) 3570 space_needed -= btrfs_leaf_free_space(path->nodes[0]); 3571 ret = push_leaf_left(trans, root, path, 1, space_needed, 0, slot); 3572 if (ret < 0) 3573 return ret; 3574 3575 if (ret == 0) 3576 progress++; 3577 3578 if (progress) 3579 return 0; 3580 return 1; 3581 } 3582 3583 /* 3584 * split the path's leaf in two, making sure there is at least data_size 3585 * available for the resulting leaf level of the path. 3586 * 3587 * returns 0 if all went well and < 0 on failure. 3588 */ 3589 static noinline int split_leaf(struct btrfs_trans_handle *trans, 3590 struct btrfs_root *root, 3591 const struct btrfs_key *ins_key, 3592 struct btrfs_path *path, int data_size, 3593 int extend) 3594 { 3595 struct btrfs_disk_key disk_key; 3596 struct extent_buffer *l; 3597 u32 nritems; 3598 int mid; 3599 int slot; 3600 struct extent_buffer *right; 3601 struct btrfs_fs_info *fs_info = root->fs_info; 3602 int ret = 0; 3603 int wret; 3604 int split; 3605 int num_doubles = 0; 3606 int tried_avoid_double = 0; 3607 3608 l = path->nodes[0]; 3609 slot = path->slots[0]; 3610 if (extend && data_size + btrfs_item_size(l, slot) + 3611 sizeof(struct btrfs_item) > BTRFS_LEAF_DATA_SIZE(fs_info)) 3612 return -EOVERFLOW; 3613 3614 /* first try to make some room by pushing left and right */ 3615 if (data_size && path->nodes[1]) { 3616 int space_needed = data_size; 3617 3618 if (slot < btrfs_header_nritems(l)) 3619 space_needed -= btrfs_leaf_free_space(l); 3620 3621 wret = push_leaf_right(trans, root, path, space_needed, 3622 space_needed, 0, 0); 3623 if (wret < 0) 3624 return wret; 3625 if (wret) { 3626 space_needed = data_size; 3627 if (slot > 0) 3628 space_needed -= btrfs_leaf_free_space(l); 3629 wret = push_leaf_left(trans, root, path, space_needed, 3630 space_needed, 0, (u32)-1); 3631 if (wret < 0) 3632 return wret; 3633 } 3634 l = path->nodes[0]; 3635 3636 /* did the pushes work? */ 3637 if (btrfs_leaf_free_space(l) >= data_size) 3638 return 0; 3639 } 3640 3641 if (!path->nodes[1]) { 3642 ret = insert_new_root(trans, root, path, 1); 3643 if (ret) 3644 return ret; 3645 } 3646 again: 3647 split = 1; 3648 l = path->nodes[0]; 3649 slot = path->slots[0]; 3650 nritems = btrfs_header_nritems(l); 3651 mid = (nritems + 1) / 2; 3652 3653 if (mid <= slot) { 3654 if (nritems == 1 || 3655 leaf_space_used(l, mid, nritems - mid) + data_size > 3656 BTRFS_LEAF_DATA_SIZE(fs_info)) { 3657 if (slot >= nritems) { 3658 split = 0; 3659 } else { 3660 mid = slot; 3661 if (mid != nritems && 3662 leaf_space_used(l, mid, nritems - mid) + 3663 data_size > BTRFS_LEAF_DATA_SIZE(fs_info)) { 3664 if (data_size && !tried_avoid_double) 3665 goto push_for_double; 3666 split = 2; 3667 } 3668 } 3669 } 3670 } else { 3671 if (leaf_space_used(l, 0, mid) + data_size > 3672 BTRFS_LEAF_DATA_SIZE(fs_info)) { 3673 if (!extend && data_size && slot == 0) { 3674 split = 0; 3675 } else if ((extend || !data_size) && slot == 0) { 3676 mid = 1; 3677 } else { 3678 mid = slot; 3679 if (mid != nritems && 3680 leaf_space_used(l, mid, nritems - mid) + 3681 data_size > BTRFS_LEAF_DATA_SIZE(fs_info)) { 3682 if (data_size && !tried_avoid_double) 3683 goto push_for_double; 3684 split = 2; 3685 } 3686 } 3687 } 3688 } 3689 3690 if (split == 0) 3691 btrfs_cpu_key_to_disk(&disk_key, ins_key); 3692 else 3693 btrfs_item_key(l, &disk_key, mid); 3694 3695 /* 3696 * We have to about BTRFS_NESTING_NEW_ROOT here if we've done a double 3697 * split, because we're only allowed to have MAX_LOCKDEP_SUBCLASSES 3698 * subclasses, which is 8 at the time of this patch, and we've maxed it 3699 * out. In the future we could add a 3700 * BTRFS_NESTING_SPLIT_THE_SPLITTENING if we need to, but for now just 3701 * use BTRFS_NESTING_NEW_ROOT. 3702 */ 3703 right = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid, 3704 &disk_key, 0, l->start, 0, 3705 num_doubles ? BTRFS_NESTING_NEW_ROOT : 3706 BTRFS_NESTING_SPLIT); 3707 if (IS_ERR(right)) 3708 return PTR_ERR(right); 3709 3710 root_add_used(root, fs_info->nodesize); 3711 3712 if (split == 0) { 3713 if (mid <= slot) { 3714 btrfs_set_header_nritems(right, 0); 3715 insert_ptr(trans, path, &disk_key, 3716 right->start, path->slots[1] + 1, 1); 3717 btrfs_tree_unlock(path->nodes[0]); 3718 free_extent_buffer(path->nodes[0]); 3719 path->nodes[0] = right; 3720 path->slots[0] = 0; 3721 path->slots[1] += 1; 3722 } else { 3723 btrfs_set_header_nritems(right, 0); 3724 insert_ptr(trans, path, &disk_key, 3725 right->start, path->slots[1], 1); 3726 btrfs_tree_unlock(path->nodes[0]); 3727 free_extent_buffer(path->nodes[0]); 3728 path->nodes[0] = right; 3729 path->slots[0] = 0; 3730 if (path->slots[1] == 0) 3731 fixup_low_keys(path, &disk_key, 1); 3732 } 3733 /* 3734 * We create a new leaf 'right' for the required ins_len and 3735 * we'll do btrfs_mark_buffer_dirty() on this leaf after copying 3736 * the content of ins_len to 'right'. 3737 */ 3738 return ret; 3739 } 3740 3741 copy_for_split(trans, path, l, right, slot, mid, nritems); 3742 3743 if (split == 2) { 3744 BUG_ON(num_doubles != 0); 3745 num_doubles++; 3746 goto again; 3747 } 3748 3749 return 0; 3750 3751 push_for_double: 3752 push_for_double_split(trans, root, path, data_size); 3753 tried_avoid_double = 1; 3754 if (btrfs_leaf_free_space(path->nodes[0]) >= data_size) 3755 return 0; 3756 goto again; 3757 } 3758 3759 static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans, 3760 struct btrfs_root *root, 3761 struct btrfs_path *path, int ins_len) 3762 { 3763 struct btrfs_key key; 3764 struct extent_buffer *leaf; 3765 struct btrfs_file_extent_item *fi; 3766 u64 extent_len = 0; 3767 u32 item_size; 3768 int ret; 3769 3770 leaf = path->nodes[0]; 3771 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 3772 3773 BUG_ON(key.type != BTRFS_EXTENT_DATA_KEY && 3774 key.type != BTRFS_EXTENT_CSUM_KEY); 3775 3776 if (btrfs_leaf_free_space(leaf) >= ins_len) 3777 return 0; 3778 3779 item_size = btrfs_item_size(leaf, path->slots[0]); 3780 if (key.type == BTRFS_EXTENT_DATA_KEY) { 3781 fi = btrfs_item_ptr(leaf, path->slots[0], 3782 struct btrfs_file_extent_item); 3783 extent_len = btrfs_file_extent_num_bytes(leaf, fi); 3784 } 3785 btrfs_release_path(path); 3786 3787 path->keep_locks = 1; 3788 path->search_for_split = 1; 3789 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); 3790 path->search_for_split = 0; 3791 if (ret > 0) 3792 ret = -EAGAIN; 3793 if (ret < 0) 3794 goto err; 3795 3796 ret = -EAGAIN; 3797 leaf = path->nodes[0]; 3798 /* if our item isn't there, return now */ 3799 if (item_size != btrfs_item_size(leaf, path->slots[0])) 3800 goto err; 3801 3802 /* the leaf has changed, it now has room. return now */ 3803 if (btrfs_leaf_free_space(path->nodes[0]) >= ins_len) 3804 goto err; 3805 3806 if (key.type == BTRFS_EXTENT_DATA_KEY) { 3807 fi = btrfs_item_ptr(leaf, path->slots[0], 3808 struct btrfs_file_extent_item); 3809 if (extent_len != btrfs_file_extent_num_bytes(leaf, fi)) 3810 goto err; 3811 } 3812 3813 ret = split_leaf(trans, root, &key, path, ins_len, 1); 3814 if (ret) 3815 goto err; 3816 3817 path->keep_locks = 0; 3818 btrfs_unlock_up_safe(path, 1); 3819 return 0; 3820 err: 3821 path->keep_locks = 0; 3822 return ret; 3823 } 3824 3825 static noinline int split_item(struct btrfs_path *path, 3826 const struct btrfs_key *new_key, 3827 unsigned long split_offset) 3828 { 3829 struct extent_buffer *leaf; 3830 int orig_slot, slot; 3831 char *buf; 3832 u32 nritems; 3833 u32 item_size; 3834 u32 orig_offset; 3835 struct btrfs_disk_key disk_key; 3836 3837 leaf = path->nodes[0]; 3838 BUG_ON(btrfs_leaf_free_space(leaf) < sizeof(struct btrfs_item)); 3839 3840 orig_slot = path->slots[0]; 3841 orig_offset = btrfs_item_offset(leaf, path->slots[0]); 3842 item_size = btrfs_item_size(leaf, path->slots[0]); 3843 3844 buf = kmalloc(item_size, GFP_NOFS); 3845 if (!buf) 3846 return -ENOMEM; 3847 3848 read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf, 3849 path->slots[0]), item_size); 3850 3851 slot = path->slots[0] + 1; 3852 nritems = btrfs_header_nritems(leaf); 3853 if (slot != nritems) { 3854 /* shift the items */ 3855 memmove_leaf_items(leaf, slot + 1, slot, nritems - slot); 3856 } 3857 3858 btrfs_cpu_key_to_disk(&disk_key, new_key); 3859 btrfs_set_item_key(leaf, &disk_key, slot); 3860 3861 btrfs_set_item_offset(leaf, slot, orig_offset); 3862 btrfs_set_item_size(leaf, slot, item_size - split_offset); 3863 3864 btrfs_set_item_offset(leaf, orig_slot, 3865 orig_offset + item_size - split_offset); 3866 btrfs_set_item_size(leaf, orig_slot, split_offset); 3867 3868 btrfs_set_header_nritems(leaf, nritems + 1); 3869 3870 /* write the data for the start of the original item */ 3871 write_extent_buffer(leaf, buf, 3872 btrfs_item_ptr_offset(leaf, path->slots[0]), 3873 split_offset); 3874 3875 /* write the data for the new item */ 3876 write_extent_buffer(leaf, buf + split_offset, 3877 btrfs_item_ptr_offset(leaf, slot), 3878 item_size - split_offset); 3879 btrfs_mark_buffer_dirty(leaf); 3880 3881 BUG_ON(btrfs_leaf_free_space(leaf) < 0); 3882 kfree(buf); 3883 return 0; 3884 } 3885 3886 /* 3887 * This function splits a single item into two items, 3888 * giving 'new_key' to the new item and splitting the 3889 * old one at split_offset (from the start of the item). 3890 * 3891 * The path may be released by this operation. After 3892 * the split, the path is pointing to the old item. The 3893 * new item is going to be in the same node as the old one. 3894 * 3895 * Note, the item being split must be smaller enough to live alone on 3896 * a tree block with room for one extra struct btrfs_item 3897 * 3898 * This allows us to split the item in place, keeping a lock on the 3899 * leaf the entire time. 3900 */ 3901 int btrfs_split_item(struct btrfs_trans_handle *trans, 3902 struct btrfs_root *root, 3903 struct btrfs_path *path, 3904 const struct btrfs_key *new_key, 3905 unsigned long split_offset) 3906 { 3907 int ret; 3908 ret = setup_leaf_for_split(trans, root, path, 3909 sizeof(struct btrfs_item)); 3910 if (ret) 3911 return ret; 3912 3913 ret = split_item(path, new_key, split_offset); 3914 return ret; 3915 } 3916 3917 /* 3918 * make the item pointed to by the path smaller. new_size indicates 3919 * how small to make it, and from_end tells us if we just chop bytes 3920 * off the end of the item or if we shift the item to chop bytes off 3921 * the front. 3922 */ 3923 void btrfs_truncate_item(struct btrfs_path *path, u32 new_size, int from_end) 3924 { 3925 int slot; 3926 struct extent_buffer *leaf; 3927 u32 nritems; 3928 unsigned int data_end; 3929 unsigned int old_data_start; 3930 unsigned int old_size; 3931 unsigned int size_diff; 3932 int i; 3933 struct btrfs_map_token token; 3934 3935 leaf = path->nodes[0]; 3936 slot = path->slots[0]; 3937 3938 old_size = btrfs_item_size(leaf, slot); 3939 if (old_size == new_size) 3940 return; 3941 3942 nritems = btrfs_header_nritems(leaf); 3943 data_end = leaf_data_end(leaf); 3944 3945 old_data_start = btrfs_item_offset(leaf, slot); 3946 3947 size_diff = old_size - new_size; 3948 3949 BUG_ON(slot < 0); 3950 BUG_ON(slot >= nritems); 3951 3952 /* 3953 * item0..itemN ... dataN.offset..dataN.size .. data0.size 3954 */ 3955 /* first correct the data pointers */ 3956 btrfs_init_map_token(&token, leaf); 3957 for (i = slot; i < nritems; i++) { 3958 u32 ioff; 3959 3960 ioff = btrfs_token_item_offset(&token, i); 3961 btrfs_set_token_item_offset(&token, i, ioff + size_diff); 3962 } 3963 3964 /* shift the data */ 3965 if (from_end) { 3966 memmove_leaf_data(leaf, data_end + size_diff, data_end, 3967 old_data_start + new_size - data_end); 3968 } else { 3969 struct btrfs_disk_key disk_key; 3970 u64 offset; 3971 3972 btrfs_item_key(leaf, &disk_key, slot); 3973 3974 if (btrfs_disk_key_type(&disk_key) == BTRFS_EXTENT_DATA_KEY) { 3975 unsigned long ptr; 3976 struct btrfs_file_extent_item *fi; 3977 3978 fi = btrfs_item_ptr(leaf, slot, 3979 struct btrfs_file_extent_item); 3980 fi = (struct btrfs_file_extent_item *)( 3981 (unsigned long)fi - size_diff); 3982 3983 if (btrfs_file_extent_type(leaf, fi) == 3984 BTRFS_FILE_EXTENT_INLINE) { 3985 ptr = btrfs_item_ptr_offset(leaf, slot); 3986 memmove_extent_buffer(leaf, ptr, 3987 (unsigned long)fi, 3988 BTRFS_FILE_EXTENT_INLINE_DATA_START); 3989 } 3990 } 3991 3992 memmove_leaf_data(leaf, data_end + size_diff, data_end, 3993 old_data_start - data_end); 3994 3995 offset = btrfs_disk_key_offset(&disk_key); 3996 btrfs_set_disk_key_offset(&disk_key, offset + size_diff); 3997 btrfs_set_item_key(leaf, &disk_key, slot); 3998 if (slot == 0) 3999 fixup_low_keys(path, &disk_key, 1); 4000 } 4001 4002 btrfs_set_item_size(leaf, slot, new_size); 4003 btrfs_mark_buffer_dirty(leaf); 4004 4005 if (btrfs_leaf_free_space(leaf) < 0) { 4006 btrfs_print_leaf(leaf); 4007 BUG(); 4008 } 4009 } 4010 4011 /* 4012 * make the item pointed to by the path bigger, data_size is the added size. 4013 */ 4014 void btrfs_extend_item(struct btrfs_path *path, u32 data_size) 4015 { 4016 int slot; 4017 struct extent_buffer *leaf; 4018 u32 nritems; 4019 unsigned int data_end; 4020 unsigned int old_data; 4021 unsigned int old_size; 4022 int i; 4023 struct btrfs_map_token token; 4024 4025 leaf = path->nodes[0]; 4026 4027 nritems = btrfs_header_nritems(leaf); 4028 data_end = leaf_data_end(leaf); 4029 4030 if (btrfs_leaf_free_space(leaf) < data_size) { 4031 btrfs_print_leaf(leaf); 4032 BUG(); 4033 } 4034 slot = path->slots[0]; 4035 old_data = btrfs_item_data_end(leaf, slot); 4036 4037 BUG_ON(slot < 0); 4038 if (slot >= nritems) { 4039 btrfs_print_leaf(leaf); 4040 btrfs_crit(leaf->fs_info, "slot %d too large, nritems %d", 4041 slot, nritems); 4042 BUG(); 4043 } 4044 4045 /* 4046 * item0..itemN ... dataN.offset..dataN.size .. data0.size 4047 */ 4048 /* first correct the data pointers */ 4049 btrfs_init_map_token(&token, leaf); 4050 for (i = slot; i < nritems; i++) { 4051 u32 ioff; 4052 4053 ioff = btrfs_token_item_offset(&token, i); 4054 btrfs_set_token_item_offset(&token, i, ioff - data_size); 4055 } 4056 4057 /* shift the data */ 4058 memmove_leaf_data(leaf, data_end - data_size, data_end, 4059 old_data - data_end); 4060 4061 data_end = old_data; 4062 old_size = btrfs_item_size(leaf, slot); 4063 btrfs_set_item_size(leaf, slot, old_size + data_size); 4064 btrfs_mark_buffer_dirty(leaf); 4065 4066 if (btrfs_leaf_free_space(leaf) < 0) { 4067 btrfs_print_leaf(leaf); 4068 BUG(); 4069 } 4070 } 4071 4072 /* 4073 * Make space in the node before inserting one or more items. 4074 * 4075 * @root: root we are inserting items to 4076 * @path: points to the leaf/slot where we are going to insert new items 4077 * @batch: information about the batch of items to insert 4078 * 4079 * Main purpose is to save stack depth by doing the bulk of the work in a 4080 * function that doesn't call btrfs_search_slot 4081 */ 4082 static void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path, 4083 const struct btrfs_item_batch *batch) 4084 { 4085 struct btrfs_fs_info *fs_info = root->fs_info; 4086 int i; 4087 u32 nritems; 4088 unsigned int data_end; 4089 struct btrfs_disk_key disk_key; 4090 struct extent_buffer *leaf; 4091 int slot; 4092 struct btrfs_map_token token; 4093 u32 total_size; 4094 4095 /* 4096 * Before anything else, update keys in the parent and other ancestors 4097 * if needed, then release the write locks on them, so that other tasks 4098 * can use them while we modify the leaf. 4099 */ 4100 if (path->slots[0] == 0) { 4101 btrfs_cpu_key_to_disk(&disk_key, &batch->keys[0]); 4102 fixup_low_keys(path, &disk_key, 1); 4103 } 4104 btrfs_unlock_up_safe(path, 1); 4105 4106 leaf = path->nodes[0]; 4107 slot = path->slots[0]; 4108 4109 nritems = btrfs_header_nritems(leaf); 4110 data_end = leaf_data_end(leaf); 4111 total_size = batch->total_data_size + (batch->nr * sizeof(struct btrfs_item)); 4112 4113 if (btrfs_leaf_free_space(leaf) < total_size) { 4114 btrfs_print_leaf(leaf); 4115 btrfs_crit(fs_info, "not enough freespace need %u have %d", 4116 total_size, btrfs_leaf_free_space(leaf)); 4117 BUG(); 4118 } 4119 4120 btrfs_init_map_token(&token, leaf); 4121 if (slot != nritems) { 4122 unsigned int old_data = btrfs_item_data_end(leaf, slot); 4123 4124 if (old_data < data_end) { 4125 btrfs_print_leaf(leaf); 4126 btrfs_crit(fs_info, 4127 "item at slot %d with data offset %u beyond data end of leaf %u", 4128 slot, old_data, data_end); 4129 BUG(); 4130 } 4131 /* 4132 * item0..itemN ... dataN.offset..dataN.size .. data0.size 4133 */ 4134 /* first correct the data pointers */ 4135 for (i = slot; i < nritems; i++) { 4136 u32 ioff; 4137 4138 ioff = btrfs_token_item_offset(&token, i); 4139 btrfs_set_token_item_offset(&token, i, 4140 ioff - batch->total_data_size); 4141 } 4142 /* shift the items */ 4143 memmove_leaf_items(leaf, slot + batch->nr, slot, nritems - slot); 4144 4145 /* shift the data */ 4146 memmove_leaf_data(leaf, data_end - batch->total_data_size, 4147 data_end, old_data - data_end); 4148 data_end = old_data; 4149 } 4150 4151 /* setup the item for the new data */ 4152 for (i = 0; i < batch->nr; i++) { 4153 btrfs_cpu_key_to_disk(&disk_key, &batch->keys[i]); 4154 btrfs_set_item_key(leaf, &disk_key, slot + i); 4155 data_end -= batch->data_sizes[i]; 4156 btrfs_set_token_item_offset(&token, slot + i, data_end); 4157 btrfs_set_token_item_size(&token, slot + i, batch->data_sizes[i]); 4158 } 4159 4160 btrfs_set_header_nritems(leaf, nritems + batch->nr); 4161 btrfs_mark_buffer_dirty(leaf); 4162 4163 if (btrfs_leaf_free_space(leaf) < 0) { 4164 btrfs_print_leaf(leaf); 4165 BUG(); 4166 } 4167 } 4168 4169 /* 4170 * Insert a new item into a leaf. 4171 * 4172 * @root: The root of the btree. 4173 * @path: A path pointing to the target leaf and slot. 4174 * @key: The key of the new item. 4175 * @data_size: The size of the data associated with the new key. 4176 */ 4177 void btrfs_setup_item_for_insert(struct btrfs_root *root, 4178 struct btrfs_path *path, 4179 const struct btrfs_key *key, 4180 u32 data_size) 4181 { 4182 struct btrfs_item_batch batch; 4183 4184 batch.keys = key; 4185 batch.data_sizes = &data_size; 4186 batch.total_data_size = data_size; 4187 batch.nr = 1; 4188 4189 setup_items_for_insert(root, path, &batch); 4190 } 4191 4192 /* 4193 * Given a key and some data, insert items into the tree. 4194 * This does all the path init required, making room in the tree if needed. 4195 */ 4196 int btrfs_insert_empty_items(struct btrfs_trans_handle *trans, 4197 struct btrfs_root *root, 4198 struct btrfs_path *path, 4199 const struct btrfs_item_batch *batch) 4200 { 4201 int ret = 0; 4202 int slot; 4203 u32 total_size; 4204 4205 total_size = batch->total_data_size + (batch->nr * sizeof(struct btrfs_item)); 4206 ret = btrfs_search_slot(trans, root, &batch->keys[0], path, total_size, 1); 4207 if (ret == 0) 4208 return -EEXIST; 4209 if (ret < 0) 4210 return ret; 4211 4212 slot = path->slots[0]; 4213 BUG_ON(slot < 0); 4214 4215 setup_items_for_insert(root, path, batch); 4216 return 0; 4217 } 4218 4219 /* 4220 * Given a key and some data, insert an item into the tree. 4221 * This does all the path init required, making room in the tree if needed. 4222 */ 4223 int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root *root, 4224 const struct btrfs_key *cpu_key, void *data, 4225 u32 data_size) 4226 { 4227 int ret = 0; 4228 struct btrfs_path *path; 4229 struct extent_buffer *leaf; 4230 unsigned long ptr; 4231 4232 path = btrfs_alloc_path(); 4233 if (!path) 4234 return -ENOMEM; 4235 ret = btrfs_insert_empty_item(trans, root, path, cpu_key, data_size); 4236 if (!ret) { 4237 leaf = path->nodes[0]; 4238 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); 4239 write_extent_buffer(leaf, data, ptr, data_size); 4240 btrfs_mark_buffer_dirty(leaf); 4241 } 4242 btrfs_free_path(path); 4243 return ret; 4244 } 4245 4246 /* 4247 * This function duplicates an item, giving 'new_key' to the new item. 4248 * It guarantees both items live in the same tree leaf and the new item is 4249 * contiguous with the original item. 4250 * 4251 * This allows us to split a file extent in place, keeping a lock on the leaf 4252 * the entire time. 4253 */ 4254 int btrfs_duplicate_item(struct btrfs_trans_handle *trans, 4255 struct btrfs_root *root, 4256 struct btrfs_path *path, 4257 const struct btrfs_key *new_key) 4258 { 4259 struct extent_buffer *leaf; 4260 int ret; 4261 u32 item_size; 4262 4263 leaf = path->nodes[0]; 4264 item_size = btrfs_item_size(leaf, path->slots[0]); 4265 ret = setup_leaf_for_split(trans, root, path, 4266 item_size + sizeof(struct btrfs_item)); 4267 if (ret) 4268 return ret; 4269 4270 path->slots[0]++; 4271 btrfs_setup_item_for_insert(root, path, new_key, item_size); 4272 leaf = path->nodes[0]; 4273 memcpy_extent_buffer(leaf, 4274 btrfs_item_ptr_offset(leaf, path->slots[0]), 4275 btrfs_item_ptr_offset(leaf, path->slots[0] - 1), 4276 item_size); 4277 return 0; 4278 } 4279 4280 /* 4281 * delete the pointer from a given node. 4282 * 4283 * the tree should have been previously balanced so the deletion does not 4284 * empty a node. 4285 */ 4286 static void del_ptr(struct btrfs_root *root, struct btrfs_path *path, 4287 int level, int slot) 4288 { 4289 struct extent_buffer *parent = path->nodes[level]; 4290 u32 nritems; 4291 int ret; 4292 4293 nritems = btrfs_header_nritems(parent); 4294 if (slot != nritems - 1) { 4295 if (level) { 4296 ret = btrfs_tree_mod_log_insert_move(parent, slot, 4297 slot + 1, nritems - slot - 1); 4298 BUG_ON(ret < 0); 4299 } 4300 memmove_extent_buffer(parent, 4301 btrfs_node_key_ptr_offset(parent, slot), 4302 btrfs_node_key_ptr_offset(parent, slot + 1), 4303 sizeof(struct btrfs_key_ptr) * 4304 (nritems - slot - 1)); 4305 } else if (level) { 4306 ret = btrfs_tree_mod_log_insert_key(parent, slot, 4307 BTRFS_MOD_LOG_KEY_REMOVE); 4308 BUG_ON(ret < 0); 4309 } 4310 4311 nritems--; 4312 btrfs_set_header_nritems(parent, nritems); 4313 if (nritems == 0 && parent == root->node) { 4314 BUG_ON(btrfs_header_level(root->node) != 1); 4315 /* just turn the root into a leaf and break */ 4316 btrfs_set_header_level(root->node, 0); 4317 } else if (slot == 0) { 4318 struct btrfs_disk_key disk_key; 4319 4320 btrfs_node_key(parent, &disk_key, 0); 4321 fixup_low_keys(path, &disk_key, level + 1); 4322 } 4323 btrfs_mark_buffer_dirty(parent); 4324 } 4325 4326 /* 4327 * a helper function to delete the leaf pointed to by path->slots[1] and 4328 * path->nodes[1]. 4329 * 4330 * This deletes the pointer in path->nodes[1] and frees the leaf 4331 * block extent. zero is returned if it all worked out, < 0 otherwise. 4332 * 4333 * The path must have already been setup for deleting the leaf, including 4334 * all the proper balancing. path->nodes[1] must be locked. 4335 */ 4336 static noinline void btrfs_del_leaf(struct btrfs_trans_handle *trans, 4337 struct btrfs_root *root, 4338 struct btrfs_path *path, 4339 struct extent_buffer *leaf) 4340 { 4341 WARN_ON(btrfs_header_generation(leaf) != trans->transid); 4342 del_ptr(root, path, 1, path->slots[1]); 4343 4344 /* 4345 * btrfs_free_extent is expensive, we want to make sure we 4346 * aren't holding any locks when we call it 4347 */ 4348 btrfs_unlock_up_safe(path, 0); 4349 4350 root_sub_used(root, leaf->len); 4351 4352 atomic_inc(&leaf->refs); 4353 btrfs_free_tree_block(trans, btrfs_root_id(root), leaf, 0, 1); 4354 free_extent_buffer_stale(leaf); 4355 } 4356 /* 4357 * delete the item at the leaf level in path. If that empties 4358 * the leaf, remove it from the tree 4359 */ 4360 int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root, 4361 struct btrfs_path *path, int slot, int nr) 4362 { 4363 struct btrfs_fs_info *fs_info = root->fs_info; 4364 struct extent_buffer *leaf; 4365 int ret = 0; 4366 int wret; 4367 u32 nritems; 4368 4369 leaf = path->nodes[0]; 4370 nritems = btrfs_header_nritems(leaf); 4371 4372 if (slot + nr != nritems) { 4373 const u32 last_off = btrfs_item_offset(leaf, slot + nr - 1); 4374 const int data_end = leaf_data_end(leaf); 4375 struct btrfs_map_token token; 4376 u32 dsize = 0; 4377 int i; 4378 4379 for (i = 0; i < nr; i++) 4380 dsize += btrfs_item_size(leaf, slot + i); 4381 4382 memmove_leaf_data(leaf, data_end + dsize, data_end, 4383 last_off - data_end); 4384 4385 btrfs_init_map_token(&token, leaf); 4386 for (i = slot + nr; i < nritems; i++) { 4387 u32 ioff; 4388 4389 ioff = btrfs_token_item_offset(&token, i); 4390 btrfs_set_token_item_offset(&token, i, ioff + dsize); 4391 } 4392 4393 memmove_leaf_items(leaf, slot, slot + nr, nritems - slot - nr); 4394 } 4395 btrfs_set_header_nritems(leaf, nritems - nr); 4396 nritems -= nr; 4397 4398 /* delete the leaf if we've emptied it */ 4399 if (nritems == 0) { 4400 if (leaf == root->node) { 4401 btrfs_set_header_level(leaf, 0); 4402 } else { 4403 btrfs_clean_tree_block(leaf); 4404 btrfs_del_leaf(trans, root, path, leaf); 4405 } 4406 } else { 4407 int used = leaf_space_used(leaf, 0, nritems); 4408 if (slot == 0) { 4409 struct btrfs_disk_key disk_key; 4410 4411 btrfs_item_key(leaf, &disk_key, 0); 4412 fixup_low_keys(path, &disk_key, 1); 4413 } 4414 4415 /* 4416 * Try to delete the leaf if it is mostly empty. We do this by 4417 * trying to move all its items into its left and right neighbours. 4418 * If we can't move all the items, then we don't delete it - it's 4419 * not ideal, but future insertions might fill the leaf with more 4420 * items, or items from other leaves might be moved later into our 4421 * leaf due to deletions on those leaves. 4422 */ 4423 if (used < BTRFS_LEAF_DATA_SIZE(fs_info) / 3) { 4424 u32 min_push_space; 4425 4426 /* push_leaf_left fixes the path. 4427 * make sure the path still points to our leaf 4428 * for possible call to del_ptr below 4429 */ 4430 slot = path->slots[1]; 4431 atomic_inc(&leaf->refs); 4432 /* 4433 * We want to be able to at least push one item to the 4434 * left neighbour leaf, and that's the first item. 4435 */ 4436 min_push_space = sizeof(struct btrfs_item) + 4437 btrfs_item_size(leaf, 0); 4438 wret = push_leaf_left(trans, root, path, 0, 4439 min_push_space, 1, (u32)-1); 4440 if (wret < 0 && wret != -ENOSPC) 4441 ret = wret; 4442 4443 if (path->nodes[0] == leaf && 4444 btrfs_header_nritems(leaf)) { 4445 /* 4446 * If we were not able to push all items from our 4447 * leaf to its left neighbour, then attempt to 4448 * either push all the remaining items to the 4449 * right neighbour or none. There's no advantage 4450 * in pushing only some items, instead of all, as 4451 * it's pointless to end up with a leaf having 4452 * too few items while the neighbours can be full 4453 * or nearly full. 4454 */ 4455 nritems = btrfs_header_nritems(leaf); 4456 min_push_space = leaf_space_used(leaf, 0, nritems); 4457 wret = push_leaf_right(trans, root, path, 0, 4458 min_push_space, 1, 0); 4459 if (wret < 0 && wret != -ENOSPC) 4460 ret = wret; 4461 } 4462 4463 if (btrfs_header_nritems(leaf) == 0) { 4464 path->slots[1] = slot; 4465 btrfs_del_leaf(trans, root, path, leaf); 4466 free_extent_buffer(leaf); 4467 ret = 0; 4468 } else { 4469 /* if we're still in the path, make sure 4470 * we're dirty. Otherwise, one of the 4471 * push_leaf functions must have already 4472 * dirtied this buffer 4473 */ 4474 if (path->nodes[0] == leaf) 4475 btrfs_mark_buffer_dirty(leaf); 4476 free_extent_buffer(leaf); 4477 } 4478 } else { 4479 btrfs_mark_buffer_dirty(leaf); 4480 } 4481 } 4482 return ret; 4483 } 4484 4485 /* 4486 * search the tree again to find a leaf with lesser keys 4487 * returns 0 if it found something or 1 if there are no lesser leaves. 4488 * returns < 0 on io errors. 4489 * 4490 * This may release the path, and so you may lose any locks held at the 4491 * time you call it. 4492 */ 4493 int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path) 4494 { 4495 struct btrfs_key key; 4496 struct btrfs_disk_key found_key; 4497 int ret; 4498 4499 btrfs_item_key_to_cpu(path->nodes[0], &key, 0); 4500 4501 if (key.offset > 0) { 4502 key.offset--; 4503 } else if (key.type > 0) { 4504 key.type--; 4505 key.offset = (u64)-1; 4506 } else if (key.objectid > 0) { 4507 key.objectid--; 4508 key.type = (u8)-1; 4509 key.offset = (u64)-1; 4510 } else { 4511 return 1; 4512 } 4513 4514 btrfs_release_path(path); 4515 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 4516 if (ret < 0) 4517 return ret; 4518 btrfs_item_key(path->nodes[0], &found_key, 0); 4519 ret = comp_keys(&found_key, &key); 4520 /* 4521 * We might have had an item with the previous key in the tree right 4522 * before we released our path. And after we released our path, that 4523 * item might have been pushed to the first slot (0) of the leaf we 4524 * were holding due to a tree balance. Alternatively, an item with the 4525 * previous key can exist as the only element of a leaf (big fat item). 4526 * Therefore account for these 2 cases, so that our callers (like 4527 * btrfs_previous_item) don't miss an existing item with a key matching 4528 * the previous key we computed above. 4529 */ 4530 if (ret <= 0) 4531 return 0; 4532 return 1; 4533 } 4534 4535 /* 4536 * A helper function to walk down the tree starting at min_key, and looking 4537 * for nodes or leaves that are have a minimum transaction id. 4538 * This is used by the btree defrag code, and tree logging 4539 * 4540 * This does not cow, but it does stuff the starting key it finds back 4541 * into min_key, so you can call btrfs_search_slot with cow=1 on the 4542 * key and get a writable path. 4543 * 4544 * This honors path->lowest_level to prevent descent past a given level 4545 * of the tree. 4546 * 4547 * min_trans indicates the oldest transaction that you are interested 4548 * in walking through. Any nodes or leaves older than min_trans are 4549 * skipped over (without reading them). 4550 * 4551 * returns zero if something useful was found, < 0 on error and 1 if there 4552 * was nothing in the tree that matched the search criteria. 4553 */ 4554 int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key, 4555 struct btrfs_path *path, 4556 u64 min_trans) 4557 { 4558 struct extent_buffer *cur; 4559 struct btrfs_key found_key; 4560 int slot; 4561 int sret; 4562 u32 nritems; 4563 int level; 4564 int ret = 1; 4565 int keep_locks = path->keep_locks; 4566 4567 ASSERT(!path->nowait); 4568 path->keep_locks = 1; 4569 again: 4570 cur = btrfs_read_lock_root_node(root); 4571 level = btrfs_header_level(cur); 4572 WARN_ON(path->nodes[level]); 4573 path->nodes[level] = cur; 4574 path->locks[level] = BTRFS_READ_LOCK; 4575 4576 if (btrfs_header_generation(cur) < min_trans) { 4577 ret = 1; 4578 goto out; 4579 } 4580 while (1) { 4581 nritems = btrfs_header_nritems(cur); 4582 level = btrfs_header_level(cur); 4583 sret = btrfs_bin_search(cur, min_key, &slot); 4584 if (sret < 0) { 4585 ret = sret; 4586 goto out; 4587 } 4588 4589 /* at the lowest level, we're done, setup the path and exit */ 4590 if (level == path->lowest_level) { 4591 if (slot >= nritems) 4592 goto find_next_key; 4593 ret = 0; 4594 path->slots[level] = slot; 4595 btrfs_item_key_to_cpu(cur, &found_key, slot); 4596 goto out; 4597 } 4598 if (sret && slot > 0) 4599 slot--; 4600 /* 4601 * check this node pointer against the min_trans parameters. 4602 * If it is too old, skip to the next one. 4603 */ 4604 while (slot < nritems) { 4605 u64 gen; 4606 4607 gen = btrfs_node_ptr_generation(cur, slot); 4608 if (gen < min_trans) { 4609 slot++; 4610 continue; 4611 } 4612 break; 4613 } 4614 find_next_key: 4615 /* 4616 * we didn't find a candidate key in this node, walk forward 4617 * and find another one 4618 */ 4619 if (slot >= nritems) { 4620 path->slots[level] = slot; 4621 sret = btrfs_find_next_key(root, path, min_key, level, 4622 min_trans); 4623 if (sret == 0) { 4624 btrfs_release_path(path); 4625 goto again; 4626 } else { 4627 goto out; 4628 } 4629 } 4630 /* save our key for returning back */ 4631 btrfs_node_key_to_cpu(cur, &found_key, slot); 4632 path->slots[level] = slot; 4633 if (level == path->lowest_level) { 4634 ret = 0; 4635 goto out; 4636 } 4637 cur = btrfs_read_node_slot(cur, slot); 4638 if (IS_ERR(cur)) { 4639 ret = PTR_ERR(cur); 4640 goto out; 4641 } 4642 4643 btrfs_tree_read_lock(cur); 4644 4645 path->locks[level - 1] = BTRFS_READ_LOCK; 4646 path->nodes[level - 1] = cur; 4647 unlock_up(path, level, 1, 0, NULL); 4648 } 4649 out: 4650 path->keep_locks = keep_locks; 4651 if (ret == 0) { 4652 btrfs_unlock_up_safe(path, path->lowest_level + 1); 4653 memcpy(min_key, &found_key, sizeof(found_key)); 4654 } 4655 return ret; 4656 } 4657 4658 /* 4659 * this is similar to btrfs_next_leaf, but does not try to preserve 4660 * and fixup the path. It looks for and returns the next key in the 4661 * tree based on the current path and the min_trans parameters. 4662 * 4663 * 0 is returned if another key is found, < 0 if there are any errors 4664 * and 1 is returned if there are no higher keys in the tree 4665 * 4666 * path->keep_locks should be set to 1 on the search made before 4667 * calling this function. 4668 */ 4669 int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path, 4670 struct btrfs_key *key, int level, u64 min_trans) 4671 { 4672 int slot; 4673 struct extent_buffer *c; 4674 4675 WARN_ON(!path->keep_locks && !path->skip_locking); 4676 while (level < BTRFS_MAX_LEVEL) { 4677 if (!path->nodes[level]) 4678 return 1; 4679 4680 slot = path->slots[level] + 1; 4681 c = path->nodes[level]; 4682 next: 4683 if (slot >= btrfs_header_nritems(c)) { 4684 int ret; 4685 int orig_lowest; 4686 struct btrfs_key cur_key; 4687 if (level + 1 >= BTRFS_MAX_LEVEL || 4688 !path->nodes[level + 1]) 4689 return 1; 4690 4691 if (path->locks[level + 1] || path->skip_locking) { 4692 level++; 4693 continue; 4694 } 4695 4696 slot = btrfs_header_nritems(c) - 1; 4697 if (level == 0) 4698 btrfs_item_key_to_cpu(c, &cur_key, slot); 4699 else 4700 btrfs_node_key_to_cpu(c, &cur_key, slot); 4701 4702 orig_lowest = path->lowest_level; 4703 btrfs_release_path(path); 4704 path->lowest_level = level; 4705 ret = btrfs_search_slot(NULL, root, &cur_key, path, 4706 0, 0); 4707 path->lowest_level = orig_lowest; 4708 if (ret < 0) 4709 return ret; 4710 4711 c = path->nodes[level]; 4712 slot = path->slots[level]; 4713 if (ret == 0) 4714 slot++; 4715 goto next; 4716 } 4717 4718 if (level == 0) 4719 btrfs_item_key_to_cpu(c, key, slot); 4720 else { 4721 u64 gen = btrfs_node_ptr_generation(c, slot); 4722 4723 if (gen < min_trans) { 4724 slot++; 4725 goto next; 4726 } 4727 btrfs_node_key_to_cpu(c, key, slot); 4728 } 4729 return 0; 4730 } 4731 return 1; 4732 } 4733 4734 int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path, 4735 u64 time_seq) 4736 { 4737 int slot; 4738 int level; 4739 struct extent_buffer *c; 4740 struct extent_buffer *next; 4741 struct btrfs_fs_info *fs_info = root->fs_info; 4742 struct btrfs_key key; 4743 bool need_commit_sem = false; 4744 u32 nritems; 4745 int ret; 4746 int i; 4747 4748 /* 4749 * The nowait semantics are used only for write paths, where we don't 4750 * use the tree mod log and sequence numbers. 4751 */ 4752 if (time_seq) 4753 ASSERT(!path->nowait); 4754 4755 nritems = btrfs_header_nritems(path->nodes[0]); 4756 if (nritems == 0) 4757 return 1; 4758 4759 btrfs_item_key_to_cpu(path->nodes[0], &key, nritems - 1); 4760 again: 4761 level = 1; 4762 next = NULL; 4763 btrfs_release_path(path); 4764 4765 path->keep_locks = 1; 4766 4767 if (time_seq) { 4768 ret = btrfs_search_old_slot(root, &key, path, time_seq); 4769 } else { 4770 if (path->need_commit_sem) { 4771 path->need_commit_sem = 0; 4772 need_commit_sem = true; 4773 if (path->nowait) { 4774 if (!down_read_trylock(&fs_info->commit_root_sem)) { 4775 ret = -EAGAIN; 4776 goto done; 4777 } 4778 } else { 4779 down_read(&fs_info->commit_root_sem); 4780 } 4781 } 4782 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 4783 } 4784 path->keep_locks = 0; 4785 4786 if (ret < 0) 4787 goto done; 4788 4789 nritems = btrfs_header_nritems(path->nodes[0]); 4790 /* 4791 * by releasing the path above we dropped all our locks. A balance 4792 * could have added more items next to the key that used to be 4793 * at the very end of the block. So, check again here and 4794 * advance the path if there are now more items available. 4795 */ 4796 if (nritems > 0 && path->slots[0] < nritems - 1) { 4797 if (ret == 0) 4798 path->slots[0]++; 4799 ret = 0; 4800 goto done; 4801 } 4802 /* 4803 * So the above check misses one case: 4804 * - after releasing the path above, someone has removed the item that 4805 * used to be at the very end of the block, and balance between leafs 4806 * gets another one with bigger key.offset to replace it. 4807 * 4808 * This one should be returned as well, or we can get leaf corruption 4809 * later(esp. in __btrfs_drop_extents()). 4810 * 4811 * And a bit more explanation about this check, 4812 * with ret > 0, the key isn't found, the path points to the slot 4813 * where it should be inserted, so the path->slots[0] item must be the 4814 * bigger one. 4815 */ 4816 if (nritems > 0 && ret > 0 && path->slots[0] == nritems - 1) { 4817 ret = 0; 4818 goto done; 4819 } 4820 4821 while (level < BTRFS_MAX_LEVEL) { 4822 if (!path->nodes[level]) { 4823 ret = 1; 4824 goto done; 4825 } 4826 4827 slot = path->slots[level] + 1; 4828 c = path->nodes[level]; 4829 if (slot >= btrfs_header_nritems(c)) { 4830 level++; 4831 if (level == BTRFS_MAX_LEVEL) { 4832 ret = 1; 4833 goto done; 4834 } 4835 continue; 4836 } 4837 4838 4839 /* 4840 * Our current level is where we're going to start from, and to 4841 * make sure lockdep doesn't complain we need to drop our locks 4842 * and nodes from 0 to our current level. 4843 */ 4844 for (i = 0; i < level; i++) { 4845 if (path->locks[level]) { 4846 btrfs_tree_read_unlock(path->nodes[i]); 4847 path->locks[i] = 0; 4848 } 4849 free_extent_buffer(path->nodes[i]); 4850 path->nodes[i] = NULL; 4851 } 4852 4853 next = c; 4854 ret = read_block_for_search(root, path, &next, level, 4855 slot, &key); 4856 if (ret == -EAGAIN && !path->nowait) 4857 goto again; 4858 4859 if (ret < 0) { 4860 btrfs_release_path(path); 4861 goto done; 4862 } 4863 4864 if (!path->skip_locking) { 4865 ret = btrfs_try_tree_read_lock(next); 4866 if (!ret && path->nowait) { 4867 ret = -EAGAIN; 4868 goto done; 4869 } 4870 if (!ret && time_seq) { 4871 /* 4872 * If we don't get the lock, we may be racing 4873 * with push_leaf_left, holding that lock while 4874 * itself waiting for the leaf we've currently 4875 * locked. To solve this situation, we give up 4876 * on our lock and cycle. 4877 */ 4878 free_extent_buffer(next); 4879 btrfs_release_path(path); 4880 cond_resched(); 4881 goto again; 4882 } 4883 if (!ret) 4884 btrfs_tree_read_lock(next); 4885 } 4886 break; 4887 } 4888 path->slots[level] = slot; 4889 while (1) { 4890 level--; 4891 path->nodes[level] = next; 4892 path->slots[level] = 0; 4893 if (!path->skip_locking) 4894 path->locks[level] = BTRFS_READ_LOCK; 4895 if (!level) 4896 break; 4897 4898 ret = read_block_for_search(root, path, &next, level, 4899 0, &key); 4900 if (ret == -EAGAIN && !path->nowait) 4901 goto again; 4902 4903 if (ret < 0) { 4904 btrfs_release_path(path); 4905 goto done; 4906 } 4907 4908 if (!path->skip_locking) { 4909 if (path->nowait) { 4910 if (!btrfs_try_tree_read_lock(next)) { 4911 ret = -EAGAIN; 4912 goto done; 4913 } 4914 } else { 4915 btrfs_tree_read_lock(next); 4916 } 4917 } 4918 } 4919 ret = 0; 4920 done: 4921 unlock_up(path, 0, 1, 0, NULL); 4922 if (need_commit_sem) { 4923 int ret2; 4924 4925 path->need_commit_sem = 1; 4926 ret2 = finish_need_commit_sem_search(path); 4927 up_read(&fs_info->commit_root_sem); 4928 if (ret2) 4929 ret = ret2; 4930 } 4931 4932 return ret; 4933 } 4934 4935 int btrfs_next_old_item(struct btrfs_root *root, struct btrfs_path *path, u64 time_seq) 4936 { 4937 path->slots[0]++; 4938 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) 4939 return btrfs_next_old_leaf(root, path, time_seq); 4940 return 0; 4941 } 4942 4943 /* 4944 * this uses btrfs_prev_leaf to walk backwards in the tree, and keeps 4945 * searching until it gets past min_objectid or finds an item of 'type' 4946 * 4947 * returns 0 if something is found, 1 if nothing was found and < 0 on error 4948 */ 4949 int btrfs_previous_item(struct btrfs_root *root, 4950 struct btrfs_path *path, u64 min_objectid, 4951 int type) 4952 { 4953 struct btrfs_key found_key; 4954 struct extent_buffer *leaf; 4955 u32 nritems; 4956 int ret; 4957 4958 while (1) { 4959 if (path->slots[0] == 0) { 4960 ret = btrfs_prev_leaf(root, path); 4961 if (ret != 0) 4962 return ret; 4963 } else { 4964 path->slots[0]--; 4965 } 4966 leaf = path->nodes[0]; 4967 nritems = btrfs_header_nritems(leaf); 4968 if (nritems == 0) 4969 return 1; 4970 if (path->slots[0] == nritems) 4971 path->slots[0]--; 4972 4973 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 4974 if (found_key.objectid < min_objectid) 4975 break; 4976 if (found_key.type == type) 4977 return 0; 4978 if (found_key.objectid == min_objectid && 4979 found_key.type < type) 4980 break; 4981 } 4982 return 1; 4983 } 4984 4985 /* 4986 * search in extent tree to find a previous Metadata/Data extent item with 4987 * min objecitd. 4988 * 4989 * returns 0 if something is found, 1 if nothing was found and < 0 on error 4990 */ 4991 int btrfs_previous_extent_item(struct btrfs_root *root, 4992 struct btrfs_path *path, u64 min_objectid) 4993 { 4994 struct btrfs_key found_key; 4995 struct extent_buffer *leaf; 4996 u32 nritems; 4997 int ret; 4998 4999 while (1) { 5000 if (path->slots[0] == 0) { 5001 ret = btrfs_prev_leaf(root, path); 5002 if (ret != 0) 5003 return ret; 5004 } else { 5005 path->slots[0]--; 5006 } 5007 leaf = path->nodes[0]; 5008 nritems = btrfs_header_nritems(leaf); 5009 if (nritems == 0) 5010 return 1; 5011 if (path->slots[0] == nritems) 5012 path->slots[0]--; 5013 5014 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 5015 if (found_key.objectid < min_objectid) 5016 break; 5017 if (found_key.type == BTRFS_EXTENT_ITEM_KEY || 5018 found_key.type == BTRFS_METADATA_ITEM_KEY) 5019 return 0; 5020 if (found_key.objectid == min_objectid && 5021 found_key.type < BTRFS_EXTENT_ITEM_KEY) 5022 break; 5023 } 5024 return 1; 5025 } 5026 5027 int __init btrfs_ctree_init(void) 5028 { 5029 btrfs_path_cachep = kmem_cache_create("btrfs_path", 5030 sizeof(struct btrfs_path), 0, 5031 SLAB_MEM_SPREAD, NULL); 5032 if (!btrfs_path_cachep) 5033 return -ENOMEM; 5034 return 0; 5035 } 5036 5037 void __cold btrfs_ctree_exit(void) 5038 { 5039 kmem_cache_destroy(btrfs_path_cachep); 5040 } 5041