1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2007,2008 Oracle. All rights reserved. 4 */ 5 6 #include <linux/sched.h> 7 #include <linux/slab.h> 8 #include <linux/rbtree.h> 9 #include <linux/mm.h> 10 #include <linux/error-injection.h> 11 #include "messages.h" 12 #include "ctree.h" 13 #include "disk-io.h" 14 #include "transaction.h" 15 #include "print-tree.h" 16 #include "locking.h" 17 #include "volumes.h" 18 #include "qgroup.h" 19 #include "tree-mod-log.h" 20 #include "tree-checker.h" 21 #include "fs.h" 22 #include "accessors.h" 23 #include "extent-tree.h" 24 #include "relocation.h" 25 #include "file-item.h" 26 27 static struct kmem_cache *btrfs_path_cachep; 28 29 static int split_node(struct btrfs_trans_handle *trans, struct btrfs_root 30 *root, struct btrfs_path *path, int level); 31 static int split_leaf(struct btrfs_trans_handle *trans, struct btrfs_root *root, 32 const struct btrfs_key *ins_key, struct btrfs_path *path, 33 int data_size, int extend); 34 static int push_node_left(struct btrfs_trans_handle *trans, 35 struct extent_buffer *dst, 36 struct extent_buffer *src, int empty); 37 static int balance_node_right(struct btrfs_trans_handle *trans, 38 struct extent_buffer *dst_buf, 39 struct extent_buffer *src_buf); 40 /* 41 * The leaf data grows from end-to-front in the node. this returns the address 42 * of the start of the last item, which is the stop of the leaf data stack. 43 */ 44 static unsigned int leaf_data_end(const struct extent_buffer *leaf) 45 { 46 u32 nr = btrfs_header_nritems(leaf); 47 48 if (nr == 0) 49 return BTRFS_LEAF_DATA_SIZE(leaf->fs_info); 50 return btrfs_item_offset(leaf, nr - 1); 51 } 52 53 /* 54 * Move data in a @leaf (using memmove, safe for overlapping ranges). 55 * 56 * @leaf: leaf that we're doing a memmove on 57 * @dst_offset: item data offset we're moving to 58 * @src_offset: item data offset were' moving from 59 * @len: length of the data we're moving 60 * 61 * Wrapper around memmove_extent_buffer() that takes into account the header on 62 * the leaf. The btrfs_item offset's start directly after the header, so we 63 * have to adjust any offsets to account for the header in the leaf. This 64 * handles that math to simplify the callers. 65 */ 66 static inline void memmove_leaf_data(const struct extent_buffer *leaf, 67 unsigned long dst_offset, 68 unsigned long src_offset, 69 unsigned long len) 70 { 71 memmove_extent_buffer(leaf, btrfs_item_nr_offset(leaf, 0) + dst_offset, 72 btrfs_item_nr_offset(leaf, 0) + src_offset, len); 73 } 74 75 /* 76 * Copy item data from @src into @dst at the given @offset. 77 * 78 * @dst: destination leaf that we're copying into 79 * @src: source leaf that we're copying from 80 * @dst_offset: item data offset we're copying to 81 * @src_offset: item data offset were' copying from 82 * @len: length of the data we're copying 83 * 84 * Wrapper around copy_extent_buffer() that takes into account the header on 85 * the leaf. The btrfs_item offset's start directly after the header, so we 86 * have to adjust any offsets to account for the header in the leaf. This 87 * handles that math to simplify the callers. 88 */ 89 static inline void copy_leaf_data(const struct extent_buffer *dst, 90 const struct extent_buffer *src, 91 unsigned long dst_offset, 92 unsigned long src_offset, unsigned long len) 93 { 94 copy_extent_buffer(dst, src, btrfs_item_nr_offset(dst, 0) + dst_offset, 95 btrfs_item_nr_offset(src, 0) + src_offset, len); 96 } 97 98 /* 99 * Move items in a @leaf (using memmove). 100 * 101 * @dst: destination leaf for the items 102 * @dst_item: the item nr we're copying into 103 * @src_item: the item nr we're copying from 104 * @nr_items: the number of items to copy 105 * 106 * Wrapper around memmove_extent_buffer() that does the math to get the 107 * appropriate offsets into the leaf from the item numbers. 108 */ 109 static inline void memmove_leaf_items(const struct extent_buffer *leaf, 110 int dst_item, int src_item, int nr_items) 111 { 112 memmove_extent_buffer(leaf, btrfs_item_nr_offset(leaf, dst_item), 113 btrfs_item_nr_offset(leaf, src_item), 114 nr_items * sizeof(struct btrfs_item)); 115 } 116 117 /* 118 * Copy items from @src into @dst at the given @offset. 119 * 120 * @dst: destination leaf for the items 121 * @src: source leaf for the items 122 * @dst_item: the item nr we're copying into 123 * @src_item: the item nr we're copying from 124 * @nr_items: the number of items to copy 125 * 126 * Wrapper around copy_extent_buffer() that does the math to get the 127 * appropriate offsets into the leaf from the item numbers. 128 */ 129 static inline void copy_leaf_items(const struct extent_buffer *dst, 130 const struct extent_buffer *src, 131 int dst_item, int src_item, int nr_items) 132 { 133 copy_extent_buffer(dst, src, btrfs_item_nr_offset(dst, dst_item), 134 btrfs_item_nr_offset(src, src_item), 135 nr_items * sizeof(struct btrfs_item)); 136 } 137 138 struct btrfs_path *btrfs_alloc_path(void) 139 { 140 might_sleep(); 141 142 return kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS); 143 } 144 145 /* this also releases the path */ 146 void btrfs_free_path(struct btrfs_path *p) 147 { 148 if (!p) 149 return; 150 btrfs_release_path(p); 151 kmem_cache_free(btrfs_path_cachep, p); 152 } 153 154 /* 155 * path release drops references on the extent buffers in the path 156 * and it drops any locks held by this path 157 * 158 * It is safe to call this on paths that no locks or extent buffers held. 159 */ 160 noinline void btrfs_release_path(struct btrfs_path *p) 161 { 162 int i; 163 164 for (i = 0; i < BTRFS_MAX_LEVEL; i++) { 165 p->slots[i] = 0; 166 if (!p->nodes[i]) 167 continue; 168 if (p->locks[i]) { 169 btrfs_tree_unlock_rw(p->nodes[i], p->locks[i]); 170 p->locks[i] = 0; 171 } 172 free_extent_buffer(p->nodes[i]); 173 p->nodes[i] = NULL; 174 } 175 } 176 177 /* 178 * safely gets a reference on the root node of a tree. A lock 179 * is not taken, so a concurrent writer may put a different node 180 * at the root of the tree. See btrfs_lock_root_node for the 181 * looping required. 182 * 183 * The extent buffer returned by this has a reference taken, so 184 * it won't disappear. It may stop being the root of the tree 185 * at any time because there are no locks held. 186 */ 187 struct extent_buffer *btrfs_root_node(struct btrfs_root *root) 188 { 189 struct extent_buffer *eb; 190 191 while (1) { 192 rcu_read_lock(); 193 eb = rcu_dereference(root->node); 194 195 /* 196 * RCU really hurts here, we could free up the root node because 197 * it was COWed but we may not get the new root node yet so do 198 * the inc_not_zero dance and if it doesn't work then 199 * synchronize_rcu and try again. 200 */ 201 if (atomic_inc_not_zero(&eb->refs)) { 202 rcu_read_unlock(); 203 break; 204 } 205 rcu_read_unlock(); 206 synchronize_rcu(); 207 } 208 return eb; 209 } 210 211 /* 212 * Cowonly root (not-shareable trees, everything not subvolume or reloc roots), 213 * just get put onto a simple dirty list. Transaction walks this list to make 214 * sure they get properly updated on disk. 215 */ 216 static void add_root_to_dirty_list(struct btrfs_root *root) 217 { 218 struct btrfs_fs_info *fs_info = root->fs_info; 219 220 if (test_bit(BTRFS_ROOT_DIRTY, &root->state) || 221 !test_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state)) 222 return; 223 224 spin_lock(&fs_info->trans_lock); 225 if (!test_and_set_bit(BTRFS_ROOT_DIRTY, &root->state)) { 226 /* Want the extent tree to be the last on the list */ 227 if (btrfs_root_id(root) == BTRFS_EXTENT_TREE_OBJECTID) 228 list_move_tail(&root->dirty_list, 229 &fs_info->dirty_cowonly_roots); 230 else 231 list_move(&root->dirty_list, 232 &fs_info->dirty_cowonly_roots); 233 } 234 spin_unlock(&fs_info->trans_lock); 235 } 236 237 /* 238 * used by snapshot creation to make a copy of a root for a tree with 239 * a given objectid. The buffer with the new root node is returned in 240 * cow_ret, and this func returns zero on success or a negative error code. 241 */ 242 int btrfs_copy_root(struct btrfs_trans_handle *trans, 243 struct btrfs_root *root, 244 struct extent_buffer *buf, 245 struct extent_buffer **cow_ret, u64 new_root_objectid) 246 { 247 struct btrfs_fs_info *fs_info = root->fs_info; 248 struct extent_buffer *cow; 249 int ret = 0; 250 int level; 251 struct btrfs_disk_key disk_key; 252 u64 reloc_src_root = 0; 253 254 WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) && 255 trans->transid != fs_info->running_transaction->transid); 256 WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) && 257 trans->transid != btrfs_get_root_last_trans(root)); 258 259 level = btrfs_header_level(buf); 260 if (level == 0) 261 btrfs_item_key(buf, &disk_key, 0); 262 else 263 btrfs_node_key(buf, &disk_key, 0); 264 265 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID) 266 reloc_src_root = btrfs_header_owner(buf); 267 cow = btrfs_alloc_tree_block(trans, root, 0, new_root_objectid, 268 &disk_key, level, buf->start, 0, 269 reloc_src_root, BTRFS_NESTING_NEW_ROOT); 270 if (IS_ERR(cow)) 271 return PTR_ERR(cow); 272 273 copy_extent_buffer_full(cow, buf); 274 btrfs_set_header_bytenr(cow, cow->start); 275 btrfs_set_header_generation(cow, trans->transid); 276 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV); 277 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN | 278 BTRFS_HEADER_FLAG_RELOC); 279 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID) 280 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC); 281 else 282 btrfs_set_header_owner(cow, new_root_objectid); 283 284 write_extent_buffer_fsid(cow, fs_info->fs_devices->metadata_uuid); 285 286 WARN_ON(btrfs_header_generation(buf) > trans->transid); 287 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID) 288 ret = btrfs_inc_ref(trans, root, cow, 1); 289 else 290 ret = btrfs_inc_ref(trans, root, cow, 0); 291 if (ret) { 292 btrfs_tree_unlock(cow); 293 free_extent_buffer(cow); 294 btrfs_abort_transaction(trans, ret); 295 return ret; 296 } 297 298 btrfs_mark_buffer_dirty(trans, cow); 299 *cow_ret = cow; 300 return 0; 301 } 302 303 /* 304 * check if the tree block can be shared by multiple trees 305 */ 306 bool btrfs_block_can_be_shared(struct btrfs_trans_handle *trans, 307 struct btrfs_root *root, 308 struct extent_buffer *buf) 309 { 310 const u64 buf_gen = btrfs_header_generation(buf); 311 312 /* 313 * Tree blocks not in shareable trees and tree roots are never shared. 314 * If a block was allocated after the last snapshot and the block was 315 * not allocated by tree relocation, we know the block is not shared. 316 */ 317 318 if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) 319 return false; 320 321 if (buf == root->node) 322 return false; 323 324 if (buf_gen > btrfs_root_last_snapshot(&root->root_item) && 325 !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)) 326 return false; 327 328 if (buf != root->commit_root) 329 return true; 330 331 /* 332 * An extent buffer that used to be the commit root may still be shared 333 * because the tree height may have increased and it became a child of a 334 * higher level root. This can happen when snapshotting a subvolume 335 * created in the current transaction. 336 */ 337 if (buf_gen == trans->transid) 338 return true; 339 340 return false; 341 } 342 343 static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans, 344 struct btrfs_root *root, 345 struct extent_buffer *buf, 346 struct extent_buffer *cow, 347 int *last_ref) 348 { 349 struct btrfs_fs_info *fs_info = root->fs_info; 350 u64 refs; 351 u64 owner; 352 u64 flags; 353 int ret; 354 355 /* 356 * Backrefs update rules: 357 * 358 * Always use full backrefs for extent pointers in tree block 359 * allocated by tree relocation. 360 * 361 * If a shared tree block is no longer referenced by its owner 362 * tree (btrfs_header_owner(buf) == root->root_key.objectid), 363 * use full backrefs for extent pointers in tree block. 364 * 365 * If a tree block is been relocating 366 * (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID), 367 * use full backrefs for extent pointers in tree block. 368 * The reason for this is some operations (such as drop tree) 369 * are only allowed for blocks use full backrefs. 370 */ 371 372 if (btrfs_block_can_be_shared(trans, root, buf)) { 373 ret = btrfs_lookup_extent_info(trans, fs_info, buf->start, 374 btrfs_header_level(buf), 1, 375 &refs, &flags, NULL); 376 if (ret) 377 return ret; 378 if (unlikely(refs == 0)) { 379 btrfs_crit(fs_info, 380 "found 0 references for tree block at bytenr %llu level %d root %llu", 381 buf->start, btrfs_header_level(buf), 382 btrfs_root_id(root)); 383 ret = -EUCLEAN; 384 btrfs_abort_transaction(trans, ret); 385 return ret; 386 } 387 } else { 388 refs = 1; 389 if (btrfs_root_id(root) == BTRFS_TREE_RELOC_OBJECTID || 390 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV) 391 flags = BTRFS_BLOCK_FLAG_FULL_BACKREF; 392 else 393 flags = 0; 394 } 395 396 owner = btrfs_header_owner(buf); 397 if (unlikely(owner == BTRFS_TREE_RELOC_OBJECTID && 398 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))) { 399 btrfs_crit(fs_info, 400 "found tree block at bytenr %llu level %d root %llu refs %llu flags %llx without full backref flag set", 401 buf->start, btrfs_header_level(buf), 402 btrfs_root_id(root), refs, flags); 403 ret = -EUCLEAN; 404 btrfs_abort_transaction(trans, ret); 405 return ret; 406 } 407 408 if (refs > 1) { 409 if ((owner == btrfs_root_id(root) || 410 btrfs_root_id(root) == BTRFS_TREE_RELOC_OBJECTID) && 411 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) { 412 ret = btrfs_inc_ref(trans, root, buf, 1); 413 if (ret) 414 return ret; 415 416 if (btrfs_root_id(root) == BTRFS_TREE_RELOC_OBJECTID) { 417 ret = btrfs_dec_ref(trans, root, buf, 0); 418 if (ret) 419 return ret; 420 ret = btrfs_inc_ref(trans, root, cow, 1); 421 if (ret) 422 return ret; 423 } 424 ret = btrfs_set_disk_extent_flags(trans, buf, 425 BTRFS_BLOCK_FLAG_FULL_BACKREF); 426 if (ret) 427 return ret; 428 } else { 429 430 if (btrfs_root_id(root) == BTRFS_TREE_RELOC_OBJECTID) 431 ret = btrfs_inc_ref(trans, root, cow, 1); 432 else 433 ret = btrfs_inc_ref(trans, root, cow, 0); 434 if (ret) 435 return ret; 436 } 437 } else { 438 if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) { 439 if (btrfs_root_id(root) == BTRFS_TREE_RELOC_OBJECTID) 440 ret = btrfs_inc_ref(trans, root, cow, 1); 441 else 442 ret = btrfs_inc_ref(trans, root, cow, 0); 443 if (ret) 444 return ret; 445 ret = btrfs_dec_ref(trans, root, buf, 1); 446 if (ret) 447 return ret; 448 } 449 btrfs_clear_buffer_dirty(trans, buf); 450 *last_ref = 1; 451 } 452 return 0; 453 } 454 455 /* 456 * does the dirty work in cow of a single block. The parent block (if 457 * supplied) is updated to point to the new cow copy. The new buffer is marked 458 * dirty and returned locked. If you modify the block it needs to be marked 459 * dirty again. 460 * 461 * search_start -- an allocation hint for the new block 462 * 463 * empty_size -- a hint that you plan on doing more cow. This is the size in 464 * bytes the allocator should try to find free next to the block it returns. 465 * This is just a hint and may be ignored by the allocator. 466 */ 467 int btrfs_force_cow_block(struct btrfs_trans_handle *trans, 468 struct btrfs_root *root, 469 struct extent_buffer *buf, 470 struct extent_buffer *parent, int parent_slot, 471 struct extent_buffer **cow_ret, 472 u64 search_start, u64 empty_size, 473 enum btrfs_lock_nesting nest) 474 { 475 struct btrfs_fs_info *fs_info = root->fs_info; 476 struct btrfs_disk_key disk_key; 477 struct extent_buffer *cow; 478 int level, ret; 479 int last_ref = 0; 480 int unlock_orig = 0; 481 u64 parent_start = 0; 482 u64 reloc_src_root = 0; 483 484 if (*cow_ret == buf) 485 unlock_orig = 1; 486 487 btrfs_assert_tree_write_locked(buf); 488 489 WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) && 490 trans->transid != fs_info->running_transaction->transid); 491 WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) && 492 trans->transid != btrfs_get_root_last_trans(root)); 493 494 level = btrfs_header_level(buf); 495 496 if (level == 0) 497 btrfs_item_key(buf, &disk_key, 0); 498 else 499 btrfs_node_key(buf, &disk_key, 0); 500 501 if (btrfs_root_id(root) == BTRFS_TREE_RELOC_OBJECTID) { 502 if (parent) 503 parent_start = parent->start; 504 reloc_src_root = btrfs_header_owner(buf); 505 } 506 cow = btrfs_alloc_tree_block(trans, root, parent_start, 507 btrfs_root_id(root), &disk_key, level, 508 search_start, empty_size, reloc_src_root, nest); 509 if (IS_ERR(cow)) 510 return PTR_ERR(cow); 511 512 /* cow is set to blocking by btrfs_init_new_buffer */ 513 514 copy_extent_buffer_full(cow, buf); 515 btrfs_set_header_bytenr(cow, cow->start); 516 btrfs_set_header_generation(cow, trans->transid); 517 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV); 518 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN | 519 BTRFS_HEADER_FLAG_RELOC); 520 if (btrfs_root_id(root) == BTRFS_TREE_RELOC_OBJECTID) 521 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC); 522 else 523 btrfs_set_header_owner(cow, btrfs_root_id(root)); 524 525 write_extent_buffer_fsid(cow, fs_info->fs_devices->metadata_uuid); 526 527 ret = update_ref_for_cow(trans, root, buf, cow, &last_ref); 528 if (ret) { 529 btrfs_abort_transaction(trans, ret); 530 goto error_unlock_cow; 531 } 532 533 if (test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) { 534 ret = btrfs_reloc_cow_block(trans, root, buf, cow); 535 if (ret) { 536 btrfs_abort_transaction(trans, ret); 537 goto error_unlock_cow; 538 } 539 } 540 541 if (buf == root->node) { 542 WARN_ON(parent && parent != buf); 543 if (btrfs_root_id(root) == BTRFS_TREE_RELOC_OBJECTID || 544 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV) 545 parent_start = buf->start; 546 547 ret = btrfs_tree_mod_log_insert_root(root->node, cow, true); 548 if (ret < 0) { 549 btrfs_abort_transaction(trans, ret); 550 goto error_unlock_cow; 551 } 552 atomic_inc(&cow->refs); 553 rcu_assign_pointer(root->node, cow); 554 555 ret = btrfs_free_tree_block(trans, btrfs_root_id(root), buf, 556 parent_start, last_ref); 557 free_extent_buffer(buf); 558 add_root_to_dirty_list(root); 559 if (ret < 0) { 560 btrfs_abort_transaction(trans, ret); 561 goto error_unlock_cow; 562 } 563 } else { 564 WARN_ON(trans->transid != btrfs_header_generation(parent)); 565 ret = btrfs_tree_mod_log_insert_key(parent, parent_slot, 566 BTRFS_MOD_LOG_KEY_REPLACE); 567 if (ret) { 568 btrfs_abort_transaction(trans, ret); 569 goto error_unlock_cow; 570 } 571 btrfs_set_node_blockptr(parent, parent_slot, 572 cow->start); 573 btrfs_set_node_ptr_generation(parent, parent_slot, 574 trans->transid); 575 btrfs_mark_buffer_dirty(trans, parent); 576 if (last_ref) { 577 ret = btrfs_tree_mod_log_free_eb(buf); 578 if (ret) { 579 btrfs_abort_transaction(trans, ret); 580 goto error_unlock_cow; 581 } 582 } 583 ret = btrfs_free_tree_block(trans, btrfs_root_id(root), buf, 584 parent_start, last_ref); 585 if (ret < 0) { 586 btrfs_abort_transaction(trans, ret); 587 goto error_unlock_cow; 588 } 589 } 590 591 trace_btrfs_cow_block(root, buf, cow); 592 if (unlock_orig) 593 btrfs_tree_unlock(buf); 594 free_extent_buffer_stale(buf); 595 btrfs_mark_buffer_dirty(trans, cow); 596 *cow_ret = cow; 597 return 0; 598 599 error_unlock_cow: 600 btrfs_tree_unlock(cow); 601 free_extent_buffer(cow); 602 return ret; 603 } 604 605 static inline int should_cow_block(struct btrfs_trans_handle *trans, 606 struct btrfs_root *root, 607 struct extent_buffer *buf) 608 { 609 if (btrfs_is_testing(root->fs_info)) 610 return 0; 611 612 /* Ensure we can see the FORCE_COW bit */ 613 smp_mb__before_atomic(); 614 615 /* 616 * We do not need to cow a block if 617 * 1) this block is not created or changed in this transaction; 618 * 2) this block does not belong to TREE_RELOC tree; 619 * 3) the root is not forced COW. 620 * 621 * What is forced COW: 622 * when we create snapshot during committing the transaction, 623 * after we've finished copying src root, we must COW the shared 624 * block to ensure the metadata consistency. 625 */ 626 if (btrfs_header_generation(buf) == trans->transid && 627 !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN) && 628 !(btrfs_root_id(root) != BTRFS_TREE_RELOC_OBJECTID && 629 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)) && 630 !test_bit(BTRFS_ROOT_FORCE_COW, &root->state)) 631 return 0; 632 return 1; 633 } 634 635 /* 636 * COWs a single block, see btrfs_force_cow_block() for the real work. 637 * This version of it has extra checks so that a block isn't COWed more than 638 * once per transaction, as long as it hasn't been written yet 639 */ 640 int btrfs_cow_block(struct btrfs_trans_handle *trans, 641 struct btrfs_root *root, struct extent_buffer *buf, 642 struct extent_buffer *parent, int parent_slot, 643 struct extent_buffer **cow_ret, 644 enum btrfs_lock_nesting nest) 645 { 646 struct btrfs_fs_info *fs_info = root->fs_info; 647 u64 search_start; 648 649 if (unlikely(test_bit(BTRFS_ROOT_DELETING, &root->state))) { 650 btrfs_abort_transaction(trans, -EUCLEAN); 651 btrfs_crit(fs_info, 652 "attempt to COW block %llu on root %llu that is being deleted", 653 buf->start, btrfs_root_id(root)); 654 return -EUCLEAN; 655 } 656 657 /* 658 * COWing must happen through a running transaction, which always 659 * matches the current fs generation (it's a transaction with a state 660 * less than TRANS_STATE_UNBLOCKED). If it doesn't, then turn the fs 661 * into error state to prevent the commit of any transaction. 662 */ 663 if (unlikely(trans->transaction != fs_info->running_transaction || 664 trans->transid != fs_info->generation)) { 665 btrfs_abort_transaction(trans, -EUCLEAN); 666 btrfs_crit(fs_info, 667 "unexpected transaction when attempting to COW block %llu on root %llu, transaction %llu running transaction %llu fs generation %llu", 668 buf->start, btrfs_root_id(root), trans->transid, 669 fs_info->running_transaction->transid, 670 fs_info->generation); 671 return -EUCLEAN; 672 } 673 674 if (!should_cow_block(trans, root, buf)) { 675 *cow_ret = buf; 676 return 0; 677 } 678 679 search_start = round_down(buf->start, SZ_1G); 680 681 /* 682 * Before CoWing this block for later modification, check if it's 683 * the subtree root and do the delayed subtree trace if needed. 684 * 685 * Also We don't care about the error, as it's handled internally. 686 */ 687 btrfs_qgroup_trace_subtree_after_cow(trans, root, buf); 688 return btrfs_force_cow_block(trans, root, buf, parent, parent_slot, 689 cow_ret, search_start, 0, nest); 690 } 691 ALLOW_ERROR_INJECTION(btrfs_cow_block, ERRNO); 692 693 /* 694 * same as comp_keys only with two btrfs_key's 695 */ 696 int __pure btrfs_comp_cpu_keys(const struct btrfs_key *k1, const struct btrfs_key *k2) 697 { 698 if (k1->objectid > k2->objectid) 699 return 1; 700 if (k1->objectid < k2->objectid) 701 return -1; 702 if (k1->type > k2->type) 703 return 1; 704 if (k1->type < k2->type) 705 return -1; 706 if (k1->offset > k2->offset) 707 return 1; 708 if (k1->offset < k2->offset) 709 return -1; 710 return 0; 711 } 712 713 /* 714 * Search for a key in the given extent_buffer. 715 * 716 * The lower boundary for the search is specified by the slot number @first_slot. 717 * Use a value of 0 to search over the whole extent buffer. Works for both 718 * leaves and nodes. 719 * 720 * The slot in the extent buffer is returned via @slot. If the key exists in the 721 * extent buffer, then @slot will point to the slot where the key is, otherwise 722 * it points to the slot where you would insert the key. 723 * 724 * Slot may point to the total number of items (i.e. one position beyond the last 725 * key) if the key is bigger than the last key in the extent buffer. 726 */ 727 int btrfs_bin_search(struct extent_buffer *eb, int first_slot, 728 const struct btrfs_key *key, int *slot) 729 { 730 unsigned long p; 731 int item_size; 732 /* 733 * Use unsigned types for the low and high slots, so that we get a more 734 * efficient division in the search loop below. 735 */ 736 u32 low = first_slot; 737 u32 high = btrfs_header_nritems(eb); 738 int ret; 739 const int key_size = sizeof(struct btrfs_disk_key); 740 741 if (unlikely(low > high)) { 742 btrfs_err(eb->fs_info, 743 "%s: low (%u) > high (%u) eb %llu owner %llu level %d", 744 __func__, low, high, eb->start, 745 btrfs_header_owner(eb), btrfs_header_level(eb)); 746 return -EINVAL; 747 } 748 749 if (btrfs_header_level(eb) == 0) { 750 p = offsetof(struct btrfs_leaf, items); 751 item_size = sizeof(struct btrfs_item); 752 } else { 753 p = offsetof(struct btrfs_node, ptrs); 754 item_size = sizeof(struct btrfs_key_ptr); 755 } 756 757 while (low < high) { 758 const int unit_size = eb->folio_size; 759 unsigned long oil; 760 unsigned long offset; 761 struct btrfs_disk_key *tmp; 762 struct btrfs_disk_key unaligned; 763 int mid; 764 765 mid = (low + high) / 2; 766 offset = p + mid * item_size; 767 oil = get_eb_offset_in_folio(eb, offset); 768 769 if (oil + key_size <= unit_size) { 770 const unsigned long idx = get_eb_folio_index(eb, offset); 771 char *kaddr = folio_address(eb->folios[idx]); 772 773 oil = get_eb_offset_in_folio(eb, offset); 774 tmp = (struct btrfs_disk_key *)(kaddr + oil); 775 } else { 776 read_extent_buffer(eb, &unaligned, offset, key_size); 777 tmp = &unaligned; 778 } 779 780 ret = btrfs_comp_keys(tmp, key); 781 782 if (ret < 0) 783 low = mid + 1; 784 else if (ret > 0) 785 high = mid; 786 else { 787 *slot = mid; 788 return 0; 789 } 790 } 791 *slot = low; 792 return 1; 793 } 794 795 static void root_add_used_bytes(struct btrfs_root *root) 796 { 797 spin_lock(&root->accounting_lock); 798 btrfs_set_root_used(&root->root_item, 799 btrfs_root_used(&root->root_item) + root->fs_info->nodesize); 800 spin_unlock(&root->accounting_lock); 801 } 802 803 static void root_sub_used_bytes(struct btrfs_root *root) 804 { 805 spin_lock(&root->accounting_lock); 806 btrfs_set_root_used(&root->root_item, 807 btrfs_root_used(&root->root_item) - root->fs_info->nodesize); 808 spin_unlock(&root->accounting_lock); 809 } 810 811 /* given a node and slot number, this reads the blocks it points to. The 812 * extent buffer is returned with a reference taken (but unlocked). 813 */ 814 struct extent_buffer *btrfs_read_node_slot(struct extent_buffer *parent, 815 int slot) 816 { 817 int level = btrfs_header_level(parent); 818 struct btrfs_tree_parent_check check = { 0 }; 819 struct extent_buffer *eb; 820 821 if (slot < 0 || slot >= btrfs_header_nritems(parent)) 822 return ERR_PTR(-ENOENT); 823 824 ASSERT(level); 825 826 check.level = level - 1; 827 check.transid = btrfs_node_ptr_generation(parent, slot); 828 check.owner_root = btrfs_header_owner(parent); 829 check.has_first_key = true; 830 btrfs_node_key_to_cpu(parent, &check.first_key, slot); 831 832 eb = read_tree_block(parent->fs_info, btrfs_node_blockptr(parent, slot), 833 &check); 834 if (IS_ERR(eb)) 835 return eb; 836 if (!extent_buffer_uptodate(eb)) { 837 free_extent_buffer(eb); 838 return ERR_PTR(-EIO); 839 } 840 841 return eb; 842 } 843 844 /* 845 * node level balancing, used to make sure nodes are in proper order for 846 * item deletion. We balance from the top down, so we have to make sure 847 * that a deletion won't leave an node completely empty later on. 848 */ 849 static noinline int balance_level(struct btrfs_trans_handle *trans, 850 struct btrfs_root *root, 851 struct btrfs_path *path, int level) 852 { 853 struct btrfs_fs_info *fs_info = root->fs_info; 854 struct extent_buffer *right = NULL; 855 struct extent_buffer *mid; 856 struct extent_buffer *left = NULL; 857 struct extent_buffer *parent = NULL; 858 int ret = 0; 859 int wret; 860 int pslot; 861 int orig_slot = path->slots[level]; 862 u64 orig_ptr; 863 864 ASSERT(level > 0); 865 866 mid = path->nodes[level]; 867 868 WARN_ON(path->locks[level] != BTRFS_WRITE_LOCK); 869 WARN_ON(btrfs_header_generation(mid) != trans->transid); 870 871 orig_ptr = btrfs_node_blockptr(mid, orig_slot); 872 873 if (level < BTRFS_MAX_LEVEL - 1) { 874 parent = path->nodes[level + 1]; 875 pslot = path->slots[level + 1]; 876 } 877 878 /* 879 * deal with the case where there is only one pointer in the root 880 * by promoting the node below to a root 881 */ 882 if (!parent) { 883 struct extent_buffer *child; 884 885 if (btrfs_header_nritems(mid) != 1) 886 return 0; 887 888 /* promote the child to a root */ 889 child = btrfs_read_node_slot(mid, 0); 890 if (IS_ERR(child)) { 891 ret = PTR_ERR(child); 892 goto out; 893 } 894 895 btrfs_tree_lock(child); 896 ret = btrfs_cow_block(trans, root, child, mid, 0, &child, 897 BTRFS_NESTING_COW); 898 if (ret) { 899 btrfs_tree_unlock(child); 900 free_extent_buffer(child); 901 goto out; 902 } 903 904 ret = btrfs_tree_mod_log_insert_root(root->node, child, true); 905 if (ret < 0) { 906 btrfs_tree_unlock(child); 907 free_extent_buffer(child); 908 btrfs_abort_transaction(trans, ret); 909 goto out; 910 } 911 rcu_assign_pointer(root->node, child); 912 913 add_root_to_dirty_list(root); 914 btrfs_tree_unlock(child); 915 916 path->locks[level] = 0; 917 path->nodes[level] = NULL; 918 btrfs_clear_buffer_dirty(trans, mid); 919 btrfs_tree_unlock(mid); 920 /* once for the path */ 921 free_extent_buffer(mid); 922 923 root_sub_used_bytes(root); 924 ret = btrfs_free_tree_block(trans, btrfs_root_id(root), mid, 0, 1); 925 /* once for the root ptr */ 926 free_extent_buffer_stale(mid); 927 if (ret < 0) { 928 btrfs_abort_transaction(trans, ret); 929 goto out; 930 } 931 return 0; 932 } 933 if (btrfs_header_nritems(mid) > 934 BTRFS_NODEPTRS_PER_BLOCK(fs_info) / 4) 935 return 0; 936 937 if (pslot) { 938 left = btrfs_read_node_slot(parent, pslot - 1); 939 if (IS_ERR(left)) { 940 ret = PTR_ERR(left); 941 left = NULL; 942 goto out; 943 } 944 945 btrfs_tree_lock_nested(left, BTRFS_NESTING_LEFT); 946 wret = btrfs_cow_block(trans, root, left, 947 parent, pslot - 1, &left, 948 BTRFS_NESTING_LEFT_COW); 949 if (wret) { 950 ret = wret; 951 goto out; 952 } 953 } 954 955 if (pslot + 1 < btrfs_header_nritems(parent)) { 956 right = btrfs_read_node_slot(parent, pslot + 1); 957 if (IS_ERR(right)) { 958 ret = PTR_ERR(right); 959 right = NULL; 960 goto out; 961 } 962 963 btrfs_tree_lock_nested(right, BTRFS_NESTING_RIGHT); 964 wret = btrfs_cow_block(trans, root, right, 965 parent, pslot + 1, &right, 966 BTRFS_NESTING_RIGHT_COW); 967 if (wret) { 968 ret = wret; 969 goto out; 970 } 971 } 972 973 /* first, try to make some room in the middle buffer */ 974 if (left) { 975 orig_slot += btrfs_header_nritems(left); 976 wret = push_node_left(trans, left, mid, 1); 977 if (wret < 0) 978 ret = wret; 979 } 980 981 /* 982 * then try to empty the right most buffer into the middle 983 */ 984 if (right) { 985 wret = push_node_left(trans, mid, right, 1); 986 if (wret < 0 && wret != -ENOSPC) 987 ret = wret; 988 if (btrfs_header_nritems(right) == 0) { 989 btrfs_clear_buffer_dirty(trans, right); 990 btrfs_tree_unlock(right); 991 ret = btrfs_del_ptr(trans, root, path, level + 1, pslot + 1); 992 if (ret < 0) { 993 free_extent_buffer_stale(right); 994 right = NULL; 995 goto out; 996 } 997 root_sub_used_bytes(root); 998 ret = btrfs_free_tree_block(trans, btrfs_root_id(root), 999 right, 0, 1); 1000 free_extent_buffer_stale(right); 1001 right = NULL; 1002 if (ret < 0) { 1003 btrfs_abort_transaction(trans, ret); 1004 goto out; 1005 } 1006 } else { 1007 struct btrfs_disk_key right_key; 1008 btrfs_node_key(right, &right_key, 0); 1009 ret = btrfs_tree_mod_log_insert_key(parent, pslot + 1, 1010 BTRFS_MOD_LOG_KEY_REPLACE); 1011 if (ret < 0) { 1012 btrfs_abort_transaction(trans, ret); 1013 goto out; 1014 } 1015 btrfs_set_node_key(parent, &right_key, pslot + 1); 1016 btrfs_mark_buffer_dirty(trans, parent); 1017 } 1018 } 1019 if (btrfs_header_nritems(mid) == 1) { 1020 /* 1021 * we're not allowed to leave a node with one item in the 1022 * tree during a delete. A deletion from lower in the tree 1023 * could try to delete the only pointer in this node. 1024 * So, pull some keys from the left. 1025 * There has to be a left pointer at this point because 1026 * otherwise we would have pulled some pointers from the 1027 * right 1028 */ 1029 if (unlikely(!left)) { 1030 btrfs_crit(fs_info, 1031 "missing left child when middle child only has 1 item, parent bytenr %llu level %d mid bytenr %llu root %llu", 1032 parent->start, btrfs_header_level(parent), 1033 mid->start, btrfs_root_id(root)); 1034 ret = -EUCLEAN; 1035 btrfs_abort_transaction(trans, ret); 1036 goto out; 1037 } 1038 wret = balance_node_right(trans, mid, left); 1039 if (wret < 0) { 1040 ret = wret; 1041 goto out; 1042 } 1043 if (wret == 1) { 1044 wret = push_node_left(trans, left, mid, 1); 1045 if (wret < 0) 1046 ret = wret; 1047 } 1048 BUG_ON(wret == 1); 1049 } 1050 if (btrfs_header_nritems(mid) == 0) { 1051 btrfs_clear_buffer_dirty(trans, mid); 1052 btrfs_tree_unlock(mid); 1053 ret = btrfs_del_ptr(trans, root, path, level + 1, pslot); 1054 if (ret < 0) { 1055 free_extent_buffer_stale(mid); 1056 mid = NULL; 1057 goto out; 1058 } 1059 root_sub_used_bytes(root); 1060 ret = btrfs_free_tree_block(trans, btrfs_root_id(root), mid, 0, 1); 1061 free_extent_buffer_stale(mid); 1062 mid = NULL; 1063 if (ret < 0) { 1064 btrfs_abort_transaction(trans, ret); 1065 goto out; 1066 } 1067 } else { 1068 /* update the parent key to reflect our changes */ 1069 struct btrfs_disk_key mid_key; 1070 btrfs_node_key(mid, &mid_key, 0); 1071 ret = btrfs_tree_mod_log_insert_key(parent, pslot, 1072 BTRFS_MOD_LOG_KEY_REPLACE); 1073 if (ret < 0) { 1074 btrfs_abort_transaction(trans, ret); 1075 goto out; 1076 } 1077 btrfs_set_node_key(parent, &mid_key, pslot); 1078 btrfs_mark_buffer_dirty(trans, parent); 1079 } 1080 1081 /* update the path */ 1082 if (left) { 1083 if (btrfs_header_nritems(left) > orig_slot) { 1084 atomic_inc(&left->refs); 1085 /* left was locked after cow */ 1086 path->nodes[level] = left; 1087 path->slots[level + 1] -= 1; 1088 path->slots[level] = orig_slot; 1089 if (mid) { 1090 btrfs_tree_unlock(mid); 1091 free_extent_buffer(mid); 1092 } 1093 } else { 1094 orig_slot -= btrfs_header_nritems(left); 1095 path->slots[level] = orig_slot; 1096 } 1097 } 1098 /* double check we haven't messed things up */ 1099 if (orig_ptr != 1100 btrfs_node_blockptr(path->nodes[level], path->slots[level])) 1101 BUG(); 1102 out: 1103 if (right) { 1104 btrfs_tree_unlock(right); 1105 free_extent_buffer(right); 1106 } 1107 if (left) { 1108 if (path->nodes[level] != left) 1109 btrfs_tree_unlock(left); 1110 free_extent_buffer(left); 1111 } 1112 return ret; 1113 } 1114 1115 /* Node balancing for insertion. Here we only split or push nodes around 1116 * when they are completely full. This is also done top down, so we 1117 * have to be pessimistic. 1118 */ 1119 static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans, 1120 struct btrfs_root *root, 1121 struct btrfs_path *path, int level) 1122 { 1123 struct btrfs_fs_info *fs_info = root->fs_info; 1124 struct extent_buffer *right = NULL; 1125 struct extent_buffer *mid; 1126 struct extent_buffer *left = NULL; 1127 struct extent_buffer *parent = NULL; 1128 int ret = 0; 1129 int wret; 1130 int pslot; 1131 int orig_slot = path->slots[level]; 1132 1133 if (level == 0) 1134 return 1; 1135 1136 mid = path->nodes[level]; 1137 WARN_ON(btrfs_header_generation(mid) != trans->transid); 1138 1139 if (level < BTRFS_MAX_LEVEL - 1) { 1140 parent = path->nodes[level + 1]; 1141 pslot = path->slots[level + 1]; 1142 } 1143 1144 if (!parent) 1145 return 1; 1146 1147 /* first, try to make some room in the middle buffer */ 1148 if (pslot) { 1149 u32 left_nr; 1150 1151 left = btrfs_read_node_slot(parent, pslot - 1); 1152 if (IS_ERR(left)) 1153 return PTR_ERR(left); 1154 1155 btrfs_tree_lock_nested(left, BTRFS_NESTING_LEFT); 1156 1157 left_nr = btrfs_header_nritems(left); 1158 if (left_nr >= BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 1) { 1159 wret = 1; 1160 } else { 1161 ret = btrfs_cow_block(trans, root, left, parent, 1162 pslot - 1, &left, 1163 BTRFS_NESTING_LEFT_COW); 1164 if (ret) 1165 wret = 1; 1166 else { 1167 wret = push_node_left(trans, left, mid, 0); 1168 } 1169 } 1170 if (wret < 0) 1171 ret = wret; 1172 if (wret == 0) { 1173 struct btrfs_disk_key disk_key; 1174 orig_slot += left_nr; 1175 btrfs_node_key(mid, &disk_key, 0); 1176 ret = btrfs_tree_mod_log_insert_key(parent, pslot, 1177 BTRFS_MOD_LOG_KEY_REPLACE); 1178 if (ret < 0) { 1179 btrfs_tree_unlock(left); 1180 free_extent_buffer(left); 1181 btrfs_abort_transaction(trans, ret); 1182 return ret; 1183 } 1184 btrfs_set_node_key(parent, &disk_key, pslot); 1185 btrfs_mark_buffer_dirty(trans, parent); 1186 if (btrfs_header_nritems(left) > orig_slot) { 1187 path->nodes[level] = left; 1188 path->slots[level + 1] -= 1; 1189 path->slots[level] = orig_slot; 1190 btrfs_tree_unlock(mid); 1191 free_extent_buffer(mid); 1192 } else { 1193 orig_slot -= 1194 btrfs_header_nritems(left); 1195 path->slots[level] = orig_slot; 1196 btrfs_tree_unlock(left); 1197 free_extent_buffer(left); 1198 } 1199 return 0; 1200 } 1201 btrfs_tree_unlock(left); 1202 free_extent_buffer(left); 1203 } 1204 1205 /* 1206 * then try to empty the right most buffer into the middle 1207 */ 1208 if (pslot + 1 < btrfs_header_nritems(parent)) { 1209 u32 right_nr; 1210 1211 right = btrfs_read_node_slot(parent, pslot + 1); 1212 if (IS_ERR(right)) 1213 return PTR_ERR(right); 1214 1215 btrfs_tree_lock_nested(right, BTRFS_NESTING_RIGHT); 1216 1217 right_nr = btrfs_header_nritems(right); 1218 if (right_nr >= BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 1) { 1219 wret = 1; 1220 } else { 1221 ret = btrfs_cow_block(trans, root, right, 1222 parent, pslot + 1, 1223 &right, BTRFS_NESTING_RIGHT_COW); 1224 if (ret) 1225 wret = 1; 1226 else { 1227 wret = balance_node_right(trans, right, mid); 1228 } 1229 } 1230 if (wret < 0) 1231 ret = wret; 1232 if (wret == 0) { 1233 struct btrfs_disk_key disk_key; 1234 1235 btrfs_node_key(right, &disk_key, 0); 1236 ret = btrfs_tree_mod_log_insert_key(parent, pslot + 1, 1237 BTRFS_MOD_LOG_KEY_REPLACE); 1238 if (ret < 0) { 1239 btrfs_tree_unlock(right); 1240 free_extent_buffer(right); 1241 btrfs_abort_transaction(trans, ret); 1242 return ret; 1243 } 1244 btrfs_set_node_key(parent, &disk_key, pslot + 1); 1245 btrfs_mark_buffer_dirty(trans, parent); 1246 1247 if (btrfs_header_nritems(mid) <= orig_slot) { 1248 path->nodes[level] = right; 1249 path->slots[level + 1] += 1; 1250 path->slots[level] = orig_slot - 1251 btrfs_header_nritems(mid); 1252 btrfs_tree_unlock(mid); 1253 free_extent_buffer(mid); 1254 } else { 1255 btrfs_tree_unlock(right); 1256 free_extent_buffer(right); 1257 } 1258 return 0; 1259 } 1260 btrfs_tree_unlock(right); 1261 free_extent_buffer(right); 1262 } 1263 return 1; 1264 } 1265 1266 /* 1267 * readahead one full node of leaves, finding things that are close 1268 * to the block in 'slot', and triggering ra on them. 1269 */ 1270 static void reada_for_search(struct btrfs_fs_info *fs_info, 1271 struct btrfs_path *path, 1272 int level, int slot, u64 objectid) 1273 { 1274 struct extent_buffer *node; 1275 struct btrfs_disk_key disk_key; 1276 u32 nritems; 1277 u64 search; 1278 u64 target; 1279 u64 nread = 0; 1280 u64 nread_max; 1281 u32 nr; 1282 u32 blocksize; 1283 u32 nscan = 0; 1284 1285 if (level != 1 && path->reada != READA_FORWARD_ALWAYS) 1286 return; 1287 1288 if (!path->nodes[level]) 1289 return; 1290 1291 node = path->nodes[level]; 1292 1293 /* 1294 * Since the time between visiting leaves is much shorter than the time 1295 * between visiting nodes, limit read ahead of nodes to 1, to avoid too 1296 * much IO at once (possibly random). 1297 */ 1298 if (path->reada == READA_FORWARD_ALWAYS) { 1299 if (level > 1) 1300 nread_max = node->fs_info->nodesize; 1301 else 1302 nread_max = SZ_128K; 1303 } else { 1304 nread_max = SZ_64K; 1305 } 1306 1307 search = btrfs_node_blockptr(node, slot); 1308 blocksize = fs_info->nodesize; 1309 if (path->reada != READA_FORWARD_ALWAYS) { 1310 struct extent_buffer *eb; 1311 1312 eb = find_extent_buffer(fs_info, search); 1313 if (eb) { 1314 free_extent_buffer(eb); 1315 return; 1316 } 1317 } 1318 1319 target = search; 1320 1321 nritems = btrfs_header_nritems(node); 1322 nr = slot; 1323 1324 while (1) { 1325 if (path->reada == READA_BACK) { 1326 if (nr == 0) 1327 break; 1328 nr--; 1329 } else if (path->reada == READA_FORWARD || 1330 path->reada == READA_FORWARD_ALWAYS) { 1331 nr++; 1332 if (nr >= nritems) 1333 break; 1334 } 1335 if (path->reada == READA_BACK && objectid) { 1336 btrfs_node_key(node, &disk_key, nr); 1337 if (btrfs_disk_key_objectid(&disk_key) != objectid) 1338 break; 1339 } 1340 search = btrfs_node_blockptr(node, nr); 1341 if (path->reada == READA_FORWARD_ALWAYS || 1342 (search <= target && target - search <= 65536) || 1343 (search > target && search - target <= 65536)) { 1344 btrfs_readahead_node_child(node, nr); 1345 nread += blocksize; 1346 } 1347 nscan++; 1348 if (nread > nread_max || nscan > 32) 1349 break; 1350 } 1351 } 1352 1353 static noinline void reada_for_balance(struct btrfs_path *path, int level) 1354 { 1355 struct extent_buffer *parent; 1356 int slot; 1357 int nritems; 1358 1359 parent = path->nodes[level + 1]; 1360 if (!parent) 1361 return; 1362 1363 nritems = btrfs_header_nritems(parent); 1364 slot = path->slots[level + 1]; 1365 1366 if (slot > 0) 1367 btrfs_readahead_node_child(parent, slot - 1); 1368 if (slot + 1 < nritems) 1369 btrfs_readahead_node_child(parent, slot + 1); 1370 } 1371 1372 1373 /* 1374 * when we walk down the tree, it is usually safe to unlock the higher layers 1375 * in the tree. The exceptions are when our path goes through slot 0, because 1376 * operations on the tree might require changing key pointers higher up in the 1377 * tree. 1378 * 1379 * callers might also have set path->keep_locks, which tells this code to keep 1380 * the lock if the path points to the last slot in the block. This is part of 1381 * walking through the tree, and selecting the next slot in the higher block. 1382 * 1383 * lowest_unlock sets the lowest level in the tree we're allowed to unlock. so 1384 * if lowest_unlock is 1, level 0 won't be unlocked 1385 */ 1386 static noinline void unlock_up(struct btrfs_path *path, int level, 1387 int lowest_unlock, int min_write_lock_level, 1388 int *write_lock_level) 1389 { 1390 int i; 1391 int skip_level = level; 1392 bool check_skip = true; 1393 1394 for (i = level; i < BTRFS_MAX_LEVEL; i++) { 1395 if (!path->nodes[i]) 1396 break; 1397 if (!path->locks[i]) 1398 break; 1399 1400 if (check_skip) { 1401 if (path->slots[i] == 0) { 1402 skip_level = i + 1; 1403 continue; 1404 } 1405 1406 if (path->keep_locks) { 1407 u32 nritems; 1408 1409 nritems = btrfs_header_nritems(path->nodes[i]); 1410 if (nritems < 1 || path->slots[i] >= nritems - 1) { 1411 skip_level = i + 1; 1412 continue; 1413 } 1414 } 1415 } 1416 1417 if (i >= lowest_unlock && i > skip_level) { 1418 check_skip = false; 1419 btrfs_tree_unlock_rw(path->nodes[i], path->locks[i]); 1420 path->locks[i] = 0; 1421 if (write_lock_level && 1422 i > min_write_lock_level && 1423 i <= *write_lock_level) { 1424 *write_lock_level = i - 1; 1425 } 1426 } 1427 } 1428 } 1429 1430 /* 1431 * Helper function for btrfs_search_slot() and other functions that do a search 1432 * on a btree. The goal is to find a tree block in the cache (the radix tree at 1433 * fs_info->buffer_radix), but if we can't find it, or it's not up to date, read 1434 * its pages from disk. 1435 * 1436 * Returns -EAGAIN, with the path unlocked, if the caller needs to repeat the 1437 * whole btree search, starting again from the current root node. 1438 */ 1439 static int 1440 read_block_for_search(struct btrfs_root *root, struct btrfs_path *p, 1441 struct extent_buffer **eb_ret, int slot, 1442 const struct btrfs_key *key) 1443 { 1444 struct btrfs_fs_info *fs_info = root->fs_info; 1445 struct btrfs_tree_parent_check check = { 0 }; 1446 u64 blocknr; 1447 struct extent_buffer *tmp = NULL; 1448 int ret = 0; 1449 int parent_level; 1450 int err; 1451 bool read_tmp = false; 1452 bool tmp_locked = false; 1453 bool path_released = false; 1454 1455 blocknr = btrfs_node_blockptr(*eb_ret, slot); 1456 parent_level = btrfs_header_level(*eb_ret); 1457 btrfs_node_key_to_cpu(*eb_ret, &check.first_key, slot); 1458 check.has_first_key = true; 1459 check.level = parent_level - 1; 1460 check.transid = btrfs_node_ptr_generation(*eb_ret, slot); 1461 check.owner_root = btrfs_root_id(root); 1462 1463 /* 1464 * If we need to read an extent buffer from disk and we are holding locks 1465 * on upper level nodes, we unlock all the upper nodes before reading the 1466 * extent buffer, and then return -EAGAIN to the caller as it needs to 1467 * restart the search. We don't release the lock on the current level 1468 * because we need to walk this node to figure out which blocks to read. 1469 */ 1470 tmp = find_extent_buffer(fs_info, blocknr); 1471 if (tmp) { 1472 if (p->reada == READA_FORWARD_ALWAYS) 1473 reada_for_search(fs_info, p, parent_level, slot, key->objectid); 1474 1475 /* first we do an atomic uptodate check */ 1476 if (btrfs_buffer_uptodate(tmp, check.transid, 1) > 0) { 1477 /* 1478 * Do extra check for first_key, eb can be stale due to 1479 * being cached, read from scrub, or have multiple 1480 * parents (shared tree blocks). 1481 */ 1482 if (btrfs_verify_level_key(tmp, &check)) { 1483 ret = -EUCLEAN; 1484 goto out; 1485 } 1486 *eb_ret = tmp; 1487 tmp = NULL; 1488 ret = 0; 1489 goto out; 1490 } 1491 1492 if (p->nowait) { 1493 ret = -EAGAIN; 1494 goto out; 1495 } 1496 1497 if (!p->skip_locking) { 1498 btrfs_unlock_up_safe(p, parent_level + 1); 1499 tmp_locked = true; 1500 btrfs_tree_read_lock(tmp); 1501 btrfs_release_path(p); 1502 ret = -EAGAIN; 1503 path_released = true; 1504 } 1505 1506 /* Now we're allowed to do a blocking uptodate check. */ 1507 err = btrfs_read_extent_buffer(tmp, &check); 1508 if (err) { 1509 ret = err; 1510 goto out; 1511 } 1512 1513 if (ret == 0) { 1514 ASSERT(!tmp_locked); 1515 *eb_ret = tmp; 1516 tmp = NULL; 1517 } 1518 goto out; 1519 } else if (p->nowait) { 1520 ret = -EAGAIN; 1521 goto out; 1522 } 1523 1524 if (!p->skip_locking) { 1525 btrfs_unlock_up_safe(p, parent_level + 1); 1526 ret = -EAGAIN; 1527 } 1528 1529 if (p->reada != READA_NONE) 1530 reada_for_search(fs_info, p, parent_level, slot, key->objectid); 1531 1532 tmp = btrfs_find_create_tree_block(fs_info, blocknr, check.owner_root, check.level); 1533 if (IS_ERR(tmp)) { 1534 ret = PTR_ERR(tmp); 1535 tmp = NULL; 1536 goto out; 1537 } 1538 read_tmp = true; 1539 1540 if (!p->skip_locking) { 1541 ASSERT(ret == -EAGAIN); 1542 tmp_locked = true; 1543 btrfs_tree_read_lock(tmp); 1544 btrfs_release_path(p); 1545 path_released = true; 1546 } 1547 1548 /* Now we're allowed to do a blocking uptodate check. */ 1549 err = btrfs_read_extent_buffer(tmp, &check); 1550 if (err) { 1551 ret = err; 1552 goto out; 1553 } 1554 1555 /* 1556 * If the read above didn't mark this buffer up to date, 1557 * it will never end up being up to date. Set ret to EIO now 1558 * and give up so that our caller doesn't loop forever 1559 * on our EAGAINs. 1560 */ 1561 if (!extent_buffer_uptodate(tmp)) { 1562 ret = -EIO; 1563 goto out; 1564 } 1565 1566 if (ret == 0) { 1567 ASSERT(!tmp_locked); 1568 *eb_ret = tmp; 1569 tmp = NULL; 1570 } 1571 out: 1572 if (tmp) { 1573 if (tmp_locked) 1574 btrfs_tree_read_unlock(tmp); 1575 if (read_tmp && ret && ret != -EAGAIN) 1576 free_extent_buffer_stale(tmp); 1577 else 1578 free_extent_buffer(tmp); 1579 } 1580 if (ret && !path_released) 1581 btrfs_release_path(p); 1582 1583 return ret; 1584 } 1585 1586 /* 1587 * helper function for btrfs_search_slot. This does all of the checks 1588 * for node-level blocks and does any balancing required based on 1589 * the ins_len. 1590 * 1591 * If no extra work was required, zero is returned. If we had to 1592 * drop the path, -EAGAIN is returned and btrfs_search_slot must 1593 * start over 1594 */ 1595 static int 1596 setup_nodes_for_search(struct btrfs_trans_handle *trans, 1597 struct btrfs_root *root, struct btrfs_path *p, 1598 struct extent_buffer *b, int level, int ins_len, 1599 int *write_lock_level) 1600 { 1601 struct btrfs_fs_info *fs_info = root->fs_info; 1602 int ret = 0; 1603 1604 if ((p->search_for_split || ins_len > 0) && btrfs_header_nritems(b) >= 1605 BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 3) { 1606 1607 if (*write_lock_level < level + 1) { 1608 *write_lock_level = level + 1; 1609 btrfs_release_path(p); 1610 return -EAGAIN; 1611 } 1612 1613 reada_for_balance(p, level); 1614 ret = split_node(trans, root, p, level); 1615 1616 b = p->nodes[level]; 1617 } else if (ins_len < 0 && btrfs_header_nritems(b) < 1618 BTRFS_NODEPTRS_PER_BLOCK(fs_info) / 2) { 1619 1620 if (*write_lock_level < level + 1) { 1621 *write_lock_level = level + 1; 1622 btrfs_release_path(p); 1623 return -EAGAIN; 1624 } 1625 1626 reada_for_balance(p, level); 1627 ret = balance_level(trans, root, p, level); 1628 if (ret) 1629 return ret; 1630 1631 b = p->nodes[level]; 1632 if (!b) { 1633 btrfs_release_path(p); 1634 return -EAGAIN; 1635 } 1636 BUG_ON(btrfs_header_nritems(b) == 1); 1637 } 1638 return ret; 1639 } 1640 1641 int btrfs_find_item(struct btrfs_root *fs_root, struct btrfs_path *path, 1642 u64 iobjectid, u64 ioff, u8 key_type, 1643 struct btrfs_key *found_key) 1644 { 1645 int ret; 1646 struct btrfs_key key; 1647 struct extent_buffer *eb; 1648 1649 ASSERT(path); 1650 ASSERT(found_key); 1651 1652 key.type = key_type; 1653 key.objectid = iobjectid; 1654 key.offset = ioff; 1655 1656 ret = btrfs_search_slot(NULL, fs_root, &key, path, 0, 0); 1657 if (ret < 0) 1658 return ret; 1659 1660 eb = path->nodes[0]; 1661 if (ret && path->slots[0] >= btrfs_header_nritems(eb)) { 1662 ret = btrfs_next_leaf(fs_root, path); 1663 if (ret) 1664 return ret; 1665 eb = path->nodes[0]; 1666 } 1667 1668 btrfs_item_key_to_cpu(eb, found_key, path->slots[0]); 1669 if (found_key->type != key.type || 1670 found_key->objectid != key.objectid) 1671 return 1; 1672 1673 return 0; 1674 } 1675 1676 static struct extent_buffer *btrfs_search_slot_get_root(struct btrfs_root *root, 1677 struct btrfs_path *p, 1678 int write_lock_level) 1679 { 1680 struct extent_buffer *b; 1681 int root_lock = 0; 1682 int level = 0; 1683 1684 if (p->search_commit_root) { 1685 b = root->commit_root; 1686 atomic_inc(&b->refs); 1687 level = btrfs_header_level(b); 1688 /* 1689 * Ensure that all callers have set skip_locking when 1690 * p->search_commit_root = 1. 1691 */ 1692 ASSERT(p->skip_locking == 1); 1693 1694 goto out; 1695 } 1696 1697 if (p->skip_locking) { 1698 b = btrfs_root_node(root); 1699 level = btrfs_header_level(b); 1700 goto out; 1701 } 1702 1703 /* We try very hard to do read locks on the root */ 1704 root_lock = BTRFS_READ_LOCK; 1705 1706 /* 1707 * If the level is set to maximum, we can skip trying to get the read 1708 * lock. 1709 */ 1710 if (write_lock_level < BTRFS_MAX_LEVEL) { 1711 /* 1712 * We don't know the level of the root node until we actually 1713 * have it read locked 1714 */ 1715 if (p->nowait) { 1716 b = btrfs_try_read_lock_root_node(root); 1717 if (IS_ERR(b)) 1718 return b; 1719 } else { 1720 b = btrfs_read_lock_root_node(root); 1721 } 1722 level = btrfs_header_level(b); 1723 if (level > write_lock_level) 1724 goto out; 1725 1726 /* Whoops, must trade for write lock */ 1727 btrfs_tree_read_unlock(b); 1728 free_extent_buffer(b); 1729 } 1730 1731 b = btrfs_lock_root_node(root); 1732 root_lock = BTRFS_WRITE_LOCK; 1733 1734 /* The level might have changed, check again */ 1735 level = btrfs_header_level(b); 1736 1737 out: 1738 /* 1739 * The root may have failed to write out at some point, and thus is no 1740 * longer valid, return an error in this case. 1741 */ 1742 if (!extent_buffer_uptodate(b)) { 1743 if (root_lock) 1744 btrfs_tree_unlock_rw(b, root_lock); 1745 free_extent_buffer(b); 1746 return ERR_PTR(-EIO); 1747 } 1748 1749 p->nodes[level] = b; 1750 if (!p->skip_locking) 1751 p->locks[level] = root_lock; 1752 /* 1753 * Callers are responsible for dropping b's references. 1754 */ 1755 return b; 1756 } 1757 1758 /* 1759 * Replace the extent buffer at the lowest level of the path with a cloned 1760 * version. The purpose is to be able to use it safely, after releasing the 1761 * commit root semaphore, even if relocation is happening in parallel, the 1762 * transaction used for relocation is committed and the extent buffer is 1763 * reallocated in the next transaction. 1764 * 1765 * This is used in a context where the caller does not prevent transaction 1766 * commits from happening, either by holding a transaction handle or holding 1767 * some lock, while it's doing searches through a commit root. 1768 * At the moment it's only used for send operations. 1769 */ 1770 static int finish_need_commit_sem_search(struct btrfs_path *path) 1771 { 1772 const int i = path->lowest_level; 1773 const int slot = path->slots[i]; 1774 struct extent_buffer *lowest = path->nodes[i]; 1775 struct extent_buffer *clone; 1776 1777 ASSERT(path->need_commit_sem); 1778 1779 if (!lowest) 1780 return 0; 1781 1782 lockdep_assert_held_read(&lowest->fs_info->commit_root_sem); 1783 1784 clone = btrfs_clone_extent_buffer(lowest); 1785 if (!clone) 1786 return -ENOMEM; 1787 1788 btrfs_release_path(path); 1789 path->nodes[i] = clone; 1790 path->slots[i] = slot; 1791 1792 return 0; 1793 } 1794 1795 static inline int search_for_key_slot(struct extent_buffer *eb, 1796 int search_low_slot, 1797 const struct btrfs_key *key, 1798 int prev_cmp, 1799 int *slot) 1800 { 1801 /* 1802 * If a previous call to btrfs_bin_search() on a parent node returned an 1803 * exact match (prev_cmp == 0), we can safely assume the target key will 1804 * always be at slot 0 on lower levels, since each key pointer 1805 * (struct btrfs_key_ptr) refers to the lowest key accessible from the 1806 * subtree it points to. Thus we can skip searching lower levels. 1807 */ 1808 if (prev_cmp == 0) { 1809 *slot = 0; 1810 return 0; 1811 } 1812 1813 return btrfs_bin_search(eb, search_low_slot, key, slot); 1814 } 1815 1816 static int search_leaf(struct btrfs_trans_handle *trans, 1817 struct btrfs_root *root, 1818 const struct btrfs_key *key, 1819 struct btrfs_path *path, 1820 int ins_len, 1821 int prev_cmp) 1822 { 1823 struct extent_buffer *leaf = path->nodes[0]; 1824 int leaf_free_space = -1; 1825 int search_low_slot = 0; 1826 int ret; 1827 bool do_bin_search = true; 1828 1829 /* 1830 * If we are doing an insertion, the leaf has enough free space and the 1831 * destination slot for the key is not slot 0, then we can unlock our 1832 * write lock on the parent, and any other upper nodes, before doing the 1833 * binary search on the leaf (with search_for_key_slot()), allowing other 1834 * tasks to lock the parent and any other upper nodes. 1835 */ 1836 if (ins_len > 0) { 1837 /* 1838 * Cache the leaf free space, since we will need it later and it 1839 * will not change until then. 1840 */ 1841 leaf_free_space = btrfs_leaf_free_space(leaf); 1842 1843 /* 1844 * !path->locks[1] means we have a single node tree, the leaf is 1845 * the root of the tree. 1846 */ 1847 if (path->locks[1] && leaf_free_space >= ins_len) { 1848 struct btrfs_disk_key first_key; 1849 1850 ASSERT(btrfs_header_nritems(leaf) > 0); 1851 btrfs_item_key(leaf, &first_key, 0); 1852 1853 /* 1854 * Doing the extra comparison with the first key is cheap, 1855 * taking into account that the first key is very likely 1856 * already in a cache line because it immediately follows 1857 * the extent buffer's header and we have recently accessed 1858 * the header's level field. 1859 */ 1860 ret = btrfs_comp_keys(&first_key, key); 1861 if (ret < 0) { 1862 /* 1863 * The first key is smaller than the key we want 1864 * to insert, so we are safe to unlock all upper 1865 * nodes and we have to do the binary search. 1866 * 1867 * We do use btrfs_unlock_up_safe() and not 1868 * unlock_up() because the later does not unlock 1869 * nodes with a slot of 0 - we can safely unlock 1870 * any node even if its slot is 0 since in this 1871 * case the key does not end up at slot 0 of the 1872 * leaf and there's no need to split the leaf. 1873 */ 1874 btrfs_unlock_up_safe(path, 1); 1875 search_low_slot = 1; 1876 } else { 1877 /* 1878 * The first key is >= then the key we want to 1879 * insert, so we can skip the binary search as 1880 * the target key will be at slot 0. 1881 * 1882 * We can not unlock upper nodes when the key is 1883 * less than the first key, because we will need 1884 * to update the key at slot 0 of the parent node 1885 * and possibly of other upper nodes too. 1886 * If the key matches the first key, then we can 1887 * unlock all the upper nodes, using 1888 * btrfs_unlock_up_safe() instead of unlock_up() 1889 * as stated above. 1890 */ 1891 if (ret == 0) 1892 btrfs_unlock_up_safe(path, 1); 1893 /* 1894 * ret is already 0 or 1, matching the result of 1895 * a btrfs_bin_search() call, so there is no need 1896 * to adjust it. 1897 */ 1898 do_bin_search = false; 1899 path->slots[0] = 0; 1900 } 1901 } 1902 } 1903 1904 if (do_bin_search) { 1905 ret = search_for_key_slot(leaf, search_low_slot, key, 1906 prev_cmp, &path->slots[0]); 1907 if (ret < 0) 1908 return ret; 1909 } 1910 1911 if (ins_len > 0) { 1912 /* 1913 * Item key already exists. In this case, if we are allowed to 1914 * insert the item (for example, in dir_item case, item key 1915 * collision is allowed), it will be merged with the original 1916 * item. Only the item size grows, no new btrfs item will be 1917 * added. If search_for_extension is not set, ins_len already 1918 * accounts the size btrfs_item, deduct it here so leaf space 1919 * check will be correct. 1920 */ 1921 if (ret == 0 && !path->search_for_extension) { 1922 ASSERT(ins_len >= sizeof(struct btrfs_item)); 1923 ins_len -= sizeof(struct btrfs_item); 1924 } 1925 1926 ASSERT(leaf_free_space >= 0); 1927 1928 if (leaf_free_space < ins_len) { 1929 int err; 1930 1931 err = split_leaf(trans, root, key, path, ins_len, 1932 (ret == 0)); 1933 ASSERT(err <= 0); 1934 if (WARN_ON(err > 0)) 1935 err = -EUCLEAN; 1936 if (err) 1937 ret = err; 1938 } 1939 } 1940 1941 return ret; 1942 } 1943 1944 /* 1945 * Look for a key in a tree and perform necessary modifications to preserve 1946 * tree invariants. 1947 * 1948 * @trans: Handle of transaction, used when modifying the tree 1949 * @p: Holds all btree nodes along the search path 1950 * @root: The root node of the tree 1951 * @key: The key we are looking for 1952 * @ins_len: Indicates purpose of search: 1953 * >0 for inserts it's size of item inserted (*) 1954 * <0 for deletions 1955 * 0 for plain searches, not modifying the tree 1956 * 1957 * (*) If size of item inserted doesn't include 1958 * sizeof(struct btrfs_item), then p->search_for_extension must 1959 * be set. 1960 * @cow: boolean should CoW operations be performed. Must always be 1 1961 * when modifying the tree. 1962 * 1963 * If @ins_len > 0, nodes and leaves will be split as we walk down the tree. 1964 * If @ins_len < 0, nodes will be merged as we walk down the tree (if possible) 1965 * 1966 * If @key is found, 0 is returned and you can find the item in the leaf level 1967 * of the path (level 0) 1968 * 1969 * If @key isn't found, 1 is returned and the leaf level of the path (level 0) 1970 * points to the slot where it should be inserted 1971 * 1972 * If an error is encountered while searching the tree a negative error number 1973 * is returned 1974 */ 1975 int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root *root, 1976 const struct btrfs_key *key, struct btrfs_path *p, 1977 int ins_len, int cow) 1978 { 1979 struct btrfs_fs_info *fs_info; 1980 struct extent_buffer *b; 1981 int slot; 1982 int ret; 1983 int err; 1984 int level; 1985 int lowest_unlock = 1; 1986 /* everything at write_lock_level or lower must be write locked */ 1987 int write_lock_level = 0; 1988 u8 lowest_level = 0; 1989 int min_write_lock_level; 1990 int prev_cmp; 1991 1992 if (!root) 1993 return -EINVAL; 1994 1995 fs_info = root->fs_info; 1996 might_sleep(); 1997 1998 lowest_level = p->lowest_level; 1999 WARN_ON(lowest_level && ins_len > 0); 2000 WARN_ON(p->nodes[0] != NULL); 2001 BUG_ON(!cow && ins_len); 2002 2003 /* 2004 * For now only allow nowait for read only operations. There's no 2005 * strict reason why we can't, we just only need it for reads so it's 2006 * only implemented for reads. 2007 */ 2008 ASSERT(!p->nowait || !cow); 2009 2010 if (ins_len < 0) { 2011 lowest_unlock = 2; 2012 2013 /* when we are removing items, we might have to go up to level 2014 * two as we update tree pointers Make sure we keep write 2015 * for those levels as well 2016 */ 2017 write_lock_level = 2; 2018 } else if (ins_len > 0) { 2019 /* 2020 * for inserting items, make sure we have a write lock on 2021 * level 1 so we can update keys 2022 */ 2023 write_lock_level = 1; 2024 } 2025 2026 if (!cow) 2027 write_lock_level = -1; 2028 2029 if (cow && (p->keep_locks || p->lowest_level)) 2030 write_lock_level = BTRFS_MAX_LEVEL; 2031 2032 min_write_lock_level = write_lock_level; 2033 2034 if (p->need_commit_sem) { 2035 ASSERT(p->search_commit_root); 2036 if (p->nowait) { 2037 if (!down_read_trylock(&fs_info->commit_root_sem)) 2038 return -EAGAIN; 2039 } else { 2040 down_read(&fs_info->commit_root_sem); 2041 } 2042 } 2043 2044 again: 2045 prev_cmp = -1; 2046 b = btrfs_search_slot_get_root(root, p, write_lock_level); 2047 if (IS_ERR(b)) { 2048 ret = PTR_ERR(b); 2049 goto done; 2050 } 2051 2052 while (b) { 2053 int dec = 0; 2054 2055 level = btrfs_header_level(b); 2056 2057 if (cow) { 2058 bool last_level = (level == (BTRFS_MAX_LEVEL - 1)); 2059 2060 /* 2061 * if we don't really need to cow this block 2062 * then we don't want to set the path blocking, 2063 * so we test it here 2064 */ 2065 if (!should_cow_block(trans, root, b)) 2066 goto cow_done; 2067 2068 /* 2069 * must have write locks on this node and the 2070 * parent 2071 */ 2072 if (level > write_lock_level || 2073 (level + 1 > write_lock_level && 2074 level + 1 < BTRFS_MAX_LEVEL && 2075 p->nodes[level + 1])) { 2076 write_lock_level = level + 1; 2077 btrfs_release_path(p); 2078 goto again; 2079 } 2080 2081 if (last_level) 2082 err = btrfs_cow_block(trans, root, b, NULL, 0, 2083 &b, 2084 BTRFS_NESTING_COW); 2085 else 2086 err = btrfs_cow_block(trans, root, b, 2087 p->nodes[level + 1], 2088 p->slots[level + 1], &b, 2089 BTRFS_NESTING_COW); 2090 if (err) { 2091 ret = err; 2092 goto done; 2093 } 2094 } 2095 cow_done: 2096 p->nodes[level] = b; 2097 2098 /* 2099 * we have a lock on b and as long as we aren't changing 2100 * the tree, there is no way to for the items in b to change. 2101 * It is safe to drop the lock on our parent before we 2102 * go through the expensive btree search on b. 2103 * 2104 * If we're inserting or deleting (ins_len != 0), then we might 2105 * be changing slot zero, which may require changing the parent. 2106 * So, we can't drop the lock until after we know which slot 2107 * we're operating on. 2108 */ 2109 if (!ins_len && !p->keep_locks) { 2110 int u = level + 1; 2111 2112 if (u < BTRFS_MAX_LEVEL && p->locks[u]) { 2113 btrfs_tree_unlock_rw(p->nodes[u], p->locks[u]); 2114 p->locks[u] = 0; 2115 } 2116 } 2117 2118 if (level == 0) { 2119 if (ins_len > 0) 2120 ASSERT(write_lock_level >= 1); 2121 2122 ret = search_leaf(trans, root, key, p, ins_len, prev_cmp); 2123 if (!p->search_for_split) 2124 unlock_up(p, level, lowest_unlock, 2125 min_write_lock_level, NULL); 2126 goto done; 2127 } 2128 2129 ret = search_for_key_slot(b, 0, key, prev_cmp, &slot); 2130 if (ret < 0) 2131 goto done; 2132 prev_cmp = ret; 2133 2134 if (ret && slot > 0) { 2135 dec = 1; 2136 slot--; 2137 } 2138 p->slots[level] = slot; 2139 err = setup_nodes_for_search(trans, root, p, b, level, ins_len, 2140 &write_lock_level); 2141 if (err == -EAGAIN) 2142 goto again; 2143 if (err) { 2144 ret = err; 2145 goto done; 2146 } 2147 b = p->nodes[level]; 2148 slot = p->slots[level]; 2149 2150 /* 2151 * Slot 0 is special, if we change the key we have to update 2152 * the parent pointer which means we must have a write lock on 2153 * the parent 2154 */ 2155 if (slot == 0 && ins_len && write_lock_level < level + 1) { 2156 write_lock_level = level + 1; 2157 btrfs_release_path(p); 2158 goto again; 2159 } 2160 2161 unlock_up(p, level, lowest_unlock, min_write_lock_level, 2162 &write_lock_level); 2163 2164 if (level == lowest_level) { 2165 if (dec) 2166 p->slots[level]++; 2167 goto done; 2168 } 2169 2170 err = read_block_for_search(root, p, &b, slot, key); 2171 if (err == -EAGAIN && !p->nowait) 2172 goto again; 2173 if (err) { 2174 ret = err; 2175 goto done; 2176 } 2177 2178 if (!p->skip_locking) { 2179 level = btrfs_header_level(b); 2180 2181 btrfs_maybe_reset_lockdep_class(root, b); 2182 2183 if (level <= write_lock_level) { 2184 btrfs_tree_lock(b); 2185 p->locks[level] = BTRFS_WRITE_LOCK; 2186 } else { 2187 if (p->nowait) { 2188 if (!btrfs_try_tree_read_lock(b)) { 2189 free_extent_buffer(b); 2190 ret = -EAGAIN; 2191 goto done; 2192 } 2193 } else { 2194 btrfs_tree_read_lock(b); 2195 } 2196 p->locks[level] = BTRFS_READ_LOCK; 2197 } 2198 p->nodes[level] = b; 2199 } 2200 } 2201 ret = 1; 2202 done: 2203 if (ret < 0 && !p->skip_release_on_error) 2204 btrfs_release_path(p); 2205 2206 if (p->need_commit_sem) { 2207 int ret2; 2208 2209 ret2 = finish_need_commit_sem_search(p); 2210 up_read(&fs_info->commit_root_sem); 2211 if (ret2) 2212 ret = ret2; 2213 } 2214 2215 return ret; 2216 } 2217 ALLOW_ERROR_INJECTION(btrfs_search_slot, ERRNO); 2218 2219 /* 2220 * Like btrfs_search_slot, this looks for a key in the given tree. It uses the 2221 * current state of the tree together with the operations recorded in the tree 2222 * modification log to search for the key in a previous version of this tree, as 2223 * denoted by the time_seq parameter. 2224 * 2225 * Naturally, there is no support for insert, delete or cow operations. 2226 * 2227 * The resulting path and return value will be set up as if we called 2228 * btrfs_search_slot at that point in time with ins_len and cow both set to 0. 2229 */ 2230 int btrfs_search_old_slot(struct btrfs_root *root, const struct btrfs_key *key, 2231 struct btrfs_path *p, u64 time_seq) 2232 { 2233 struct btrfs_fs_info *fs_info = root->fs_info; 2234 struct extent_buffer *b; 2235 int slot; 2236 int ret; 2237 int err; 2238 int level; 2239 int lowest_unlock = 1; 2240 u8 lowest_level = 0; 2241 2242 lowest_level = p->lowest_level; 2243 WARN_ON(p->nodes[0] != NULL); 2244 ASSERT(!p->nowait); 2245 2246 if (p->search_commit_root) { 2247 BUG_ON(time_seq); 2248 return btrfs_search_slot(NULL, root, key, p, 0, 0); 2249 } 2250 2251 again: 2252 b = btrfs_get_old_root(root, time_seq); 2253 if (!b) { 2254 ret = -EIO; 2255 goto done; 2256 } 2257 level = btrfs_header_level(b); 2258 p->locks[level] = BTRFS_READ_LOCK; 2259 2260 while (b) { 2261 int dec = 0; 2262 2263 level = btrfs_header_level(b); 2264 p->nodes[level] = b; 2265 2266 /* 2267 * we have a lock on b and as long as we aren't changing 2268 * the tree, there is no way to for the items in b to change. 2269 * It is safe to drop the lock on our parent before we 2270 * go through the expensive btree search on b. 2271 */ 2272 btrfs_unlock_up_safe(p, level + 1); 2273 2274 ret = btrfs_bin_search(b, 0, key, &slot); 2275 if (ret < 0) 2276 goto done; 2277 2278 if (level == 0) { 2279 p->slots[level] = slot; 2280 unlock_up(p, level, lowest_unlock, 0, NULL); 2281 goto done; 2282 } 2283 2284 if (ret && slot > 0) { 2285 dec = 1; 2286 slot--; 2287 } 2288 p->slots[level] = slot; 2289 unlock_up(p, level, lowest_unlock, 0, NULL); 2290 2291 if (level == lowest_level) { 2292 if (dec) 2293 p->slots[level]++; 2294 goto done; 2295 } 2296 2297 err = read_block_for_search(root, p, &b, slot, key); 2298 if (err == -EAGAIN && !p->nowait) 2299 goto again; 2300 if (err) { 2301 ret = err; 2302 goto done; 2303 } 2304 2305 level = btrfs_header_level(b); 2306 btrfs_tree_read_lock(b); 2307 b = btrfs_tree_mod_log_rewind(fs_info, b, time_seq); 2308 if (!b) { 2309 ret = -ENOMEM; 2310 goto done; 2311 } 2312 p->locks[level] = BTRFS_READ_LOCK; 2313 p->nodes[level] = b; 2314 } 2315 ret = 1; 2316 done: 2317 if (ret < 0) 2318 btrfs_release_path(p); 2319 2320 return ret; 2321 } 2322 2323 /* 2324 * Search the tree again to find a leaf with smaller keys. 2325 * Returns 0 if it found something. 2326 * Returns 1 if there are no smaller keys. 2327 * Returns < 0 on error. 2328 * 2329 * This may release the path, and so you may lose any locks held at the 2330 * time you call it. 2331 */ 2332 static int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path) 2333 { 2334 struct btrfs_key key; 2335 struct btrfs_key orig_key; 2336 struct btrfs_disk_key found_key; 2337 int ret; 2338 2339 btrfs_item_key_to_cpu(path->nodes[0], &key, 0); 2340 orig_key = key; 2341 2342 if (key.offset > 0) { 2343 key.offset--; 2344 } else if (key.type > 0) { 2345 key.type--; 2346 key.offset = (u64)-1; 2347 } else if (key.objectid > 0) { 2348 key.objectid--; 2349 key.type = (u8)-1; 2350 key.offset = (u64)-1; 2351 } else { 2352 return 1; 2353 } 2354 2355 btrfs_release_path(path); 2356 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 2357 if (ret <= 0) 2358 return ret; 2359 2360 /* 2361 * Previous key not found. Even if we were at slot 0 of the leaf we had 2362 * before releasing the path and calling btrfs_search_slot(), we now may 2363 * be in a slot pointing to the same original key - this can happen if 2364 * after we released the path, one of more items were moved from a 2365 * sibling leaf into the front of the leaf we had due to an insertion 2366 * (see push_leaf_right()). 2367 * If we hit this case and our slot is > 0 and just decrement the slot 2368 * so that the caller does not process the same key again, which may or 2369 * may not break the caller, depending on its logic. 2370 */ 2371 if (path->slots[0] < btrfs_header_nritems(path->nodes[0])) { 2372 btrfs_item_key(path->nodes[0], &found_key, path->slots[0]); 2373 ret = btrfs_comp_keys(&found_key, &orig_key); 2374 if (ret == 0) { 2375 if (path->slots[0] > 0) { 2376 path->slots[0]--; 2377 return 0; 2378 } 2379 /* 2380 * At slot 0, same key as before, it means orig_key is 2381 * the lowest, leftmost, key in the tree. We're done. 2382 */ 2383 return 1; 2384 } 2385 } 2386 2387 btrfs_item_key(path->nodes[0], &found_key, 0); 2388 ret = btrfs_comp_keys(&found_key, &key); 2389 /* 2390 * We might have had an item with the previous key in the tree right 2391 * before we released our path. And after we released our path, that 2392 * item might have been pushed to the first slot (0) of the leaf we 2393 * were holding due to a tree balance. Alternatively, an item with the 2394 * previous key can exist as the only element of a leaf (big fat item). 2395 * Therefore account for these 2 cases, so that our callers (like 2396 * btrfs_previous_item) don't miss an existing item with a key matching 2397 * the previous key we computed above. 2398 */ 2399 if (ret <= 0) 2400 return 0; 2401 return 1; 2402 } 2403 2404 /* 2405 * helper to use instead of search slot if no exact match is needed but 2406 * instead the next or previous item should be returned. 2407 * When find_higher is true, the next higher item is returned, the next lower 2408 * otherwise. 2409 * When return_any and find_higher are both true, and no higher item is found, 2410 * return the next lower instead. 2411 * When return_any is true and find_higher is false, and no lower item is found, 2412 * return the next higher instead. 2413 * It returns 0 if any item is found, 1 if none is found (tree empty), and 2414 * < 0 on error 2415 */ 2416 int btrfs_search_slot_for_read(struct btrfs_root *root, 2417 const struct btrfs_key *key, 2418 struct btrfs_path *p, int find_higher, 2419 int return_any) 2420 { 2421 int ret; 2422 struct extent_buffer *leaf; 2423 2424 again: 2425 ret = btrfs_search_slot(NULL, root, key, p, 0, 0); 2426 if (ret <= 0) 2427 return ret; 2428 /* 2429 * a return value of 1 means the path is at the position where the 2430 * item should be inserted. Normally this is the next bigger item, 2431 * but in case the previous item is the last in a leaf, path points 2432 * to the first free slot in the previous leaf, i.e. at an invalid 2433 * item. 2434 */ 2435 leaf = p->nodes[0]; 2436 2437 if (find_higher) { 2438 if (p->slots[0] >= btrfs_header_nritems(leaf)) { 2439 ret = btrfs_next_leaf(root, p); 2440 if (ret <= 0) 2441 return ret; 2442 if (!return_any) 2443 return 1; 2444 /* 2445 * no higher item found, return the next 2446 * lower instead 2447 */ 2448 return_any = 0; 2449 find_higher = 0; 2450 btrfs_release_path(p); 2451 goto again; 2452 } 2453 } else { 2454 if (p->slots[0] == 0) { 2455 ret = btrfs_prev_leaf(root, p); 2456 if (ret < 0) 2457 return ret; 2458 if (!ret) { 2459 leaf = p->nodes[0]; 2460 if (p->slots[0] == btrfs_header_nritems(leaf)) 2461 p->slots[0]--; 2462 return 0; 2463 } 2464 if (!return_any) 2465 return 1; 2466 /* 2467 * no lower item found, return the next 2468 * higher instead 2469 */ 2470 return_any = 0; 2471 find_higher = 1; 2472 btrfs_release_path(p); 2473 goto again; 2474 } else { 2475 --p->slots[0]; 2476 } 2477 } 2478 return 0; 2479 } 2480 2481 /* 2482 * Execute search and call btrfs_previous_item to traverse backwards if the item 2483 * was not found. 2484 * 2485 * Return 0 if found, 1 if not found and < 0 if error. 2486 */ 2487 int btrfs_search_backwards(struct btrfs_root *root, struct btrfs_key *key, 2488 struct btrfs_path *path) 2489 { 2490 int ret; 2491 2492 ret = btrfs_search_slot(NULL, root, key, path, 0, 0); 2493 if (ret > 0) 2494 ret = btrfs_previous_item(root, path, key->objectid, key->type); 2495 2496 if (ret == 0) 2497 btrfs_item_key_to_cpu(path->nodes[0], key, path->slots[0]); 2498 2499 return ret; 2500 } 2501 2502 /* 2503 * Search for a valid slot for the given path. 2504 * 2505 * @root: The root node of the tree. 2506 * @key: Will contain a valid item if found. 2507 * @path: The starting point to validate the slot. 2508 * 2509 * Return: 0 if the item is valid 2510 * 1 if not found 2511 * <0 if error. 2512 */ 2513 int btrfs_get_next_valid_item(struct btrfs_root *root, struct btrfs_key *key, 2514 struct btrfs_path *path) 2515 { 2516 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) { 2517 int ret; 2518 2519 ret = btrfs_next_leaf(root, path); 2520 if (ret) 2521 return ret; 2522 } 2523 2524 btrfs_item_key_to_cpu(path->nodes[0], key, path->slots[0]); 2525 return 0; 2526 } 2527 2528 /* 2529 * adjust the pointers going up the tree, starting at level 2530 * making sure the right key of each node is points to 'key'. 2531 * This is used after shifting pointers to the left, so it stops 2532 * fixing up pointers when a given leaf/node is not in slot 0 of the 2533 * higher levels 2534 * 2535 */ 2536 static void fixup_low_keys(struct btrfs_trans_handle *trans, 2537 const struct btrfs_path *path, 2538 const struct btrfs_disk_key *key, int level) 2539 { 2540 int i; 2541 struct extent_buffer *t; 2542 int ret; 2543 2544 for (i = level; i < BTRFS_MAX_LEVEL; i++) { 2545 int tslot = path->slots[i]; 2546 2547 if (!path->nodes[i]) 2548 break; 2549 t = path->nodes[i]; 2550 ret = btrfs_tree_mod_log_insert_key(t, tslot, 2551 BTRFS_MOD_LOG_KEY_REPLACE); 2552 BUG_ON(ret < 0); 2553 btrfs_set_node_key(t, key, tslot); 2554 btrfs_mark_buffer_dirty(trans, path->nodes[i]); 2555 if (tslot != 0) 2556 break; 2557 } 2558 } 2559 2560 /* 2561 * update item key. 2562 * 2563 * This function isn't completely safe. It's the caller's responsibility 2564 * that the new key won't break the order 2565 */ 2566 void btrfs_set_item_key_safe(struct btrfs_trans_handle *trans, 2567 const struct btrfs_path *path, 2568 const struct btrfs_key *new_key) 2569 { 2570 struct btrfs_fs_info *fs_info = trans->fs_info; 2571 struct btrfs_disk_key disk_key; 2572 struct extent_buffer *eb; 2573 int slot; 2574 2575 eb = path->nodes[0]; 2576 slot = path->slots[0]; 2577 if (slot > 0) { 2578 btrfs_item_key(eb, &disk_key, slot - 1); 2579 if (unlikely(btrfs_comp_keys(&disk_key, new_key) >= 0)) { 2580 btrfs_print_leaf(eb); 2581 btrfs_crit(fs_info, 2582 "slot %u key (%llu %u %llu) new key (%llu %u %llu)", 2583 slot, btrfs_disk_key_objectid(&disk_key), 2584 btrfs_disk_key_type(&disk_key), 2585 btrfs_disk_key_offset(&disk_key), 2586 new_key->objectid, new_key->type, 2587 new_key->offset); 2588 BUG(); 2589 } 2590 } 2591 if (slot < btrfs_header_nritems(eb) - 1) { 2592 btrfs_item_key(eb, &disk_key, slot + 1); 2593 if (unlikely(btrfs_comp_keys(&disk_key, new_key) <= 0)) { 2594 btrfs_print_leaf(eb); 2595 btrfs_crit(fs_info, 2596 "slot %u key (%llu %u %llu) new key (%llu %u %llu)", 2597 slot, btrfs_disk_key_objectid(&disk_key), 2598 btrfs_disk_key_type(&disk_key), 2599 btrfs_disk_key_offset(&disk_key), 2600 new_key->objectid, new_key->type, 2601 new_key->offset); 2602 BUG(); 2603 } 2604 } 2605 2606 btrfs_cpu_key_to_disk(&disk_key, new_key); 2607 btrfs_set_item_key(eb, &disk_key, slot); 2608 btrfs_mark_buffer_dirty(trans, eb); 2609 if (slot == 0) 2610 fixup_low_keys(trans, path, &disk_key, 1); 2611 } 2612 2613 /* 2614 * Check key order of two sibling extent buffers. 2615 * 2616 * Return true if something is wrong. 2617 * Return false if everything is fine. 2618 * 2619 * Tree-checker only works inside one tree block, thus the following 2620 * corruption can not be detected by tree-checker: 2621 * 2622 * Leaf @left | Leaf @right 2623 * -------------------------------------------------------------- 2624 * | 1 | 2 | 3 | 4 | 5 | f6 | | 7 | 8 | 2625 * 2626 * Key f6 in leaf @left itself is valid, but not valid when the next 2627 * key in leaf @right is 7. 2628 * This can only be checked at tree block merge time. 2629 * And since tree checker has ensured all key order in each tree block 2630 * is correct, we only need to bother the last key of @left and the first 2631 * key of @right. 2632 */ 2633 static bool check_sibling_keys(const struct extent_buffer *left, 2634 const struct extent_buffer *right) 2635 { 2636 struct btrfs_key left_last; 2637 struct btrfs_key right_first; 2638 int level = btrfs_header_level(left); 2639 int nr_left = btrfs_header_nritems(left); 2640 int nr_right = btrfs_header_nritems(right); 2641 2642 /* No key to check in one of the tree blocks */ 2643 if (!nr_left || !nr_right) 2644 return false; 2645 2646 if (level) { 2647 btrfs_node_key_to_cpu(left, &left_last, nr_left - 1); 2648 btrfs_node_key_to_cpu(right, &right_first, 0); 2649 } else { 2650 btrfs_item_key_to_cpu(left, &left_last, nr_left - 1); 2651 btrfs_item_key_to_cpu(right, &right_first, 0); 2652 } 2653 2654 if (unlikely(btrfs_comp_cpu_keys(&left_last, &right_first) >= 0)) { 2655 btrfs_crit(left->fs_info, "left extent buffer:"); 2656 btrfs_print_tree(left, false); 2657 btrfs_crit(left->fs_info, "right extent buffer:"); 2658 btrfs_print_tree(right, false); 2659 btrfs_crit(left->fs_info, 2660 "bad key order, sibling blocks, left last (%llu %u %llu) right first (%llu %u %llu)", 2661 left_last.objectid, left_last.type, 2662 left_last.offset, right_first.objectid, 2663 right_first.type, right_first.offset); 2664 return true; 2665 } 2666 return false; 2667 } 2668 2669 /* 2670 * try to push data from one node into the next node left in the 2671 * tree. 2672 * 2673 * returns 0 if some ptrs were pushed left, < 0 if there was some horrible 2674 * error, and > 0 if there was no room in the left hand block. 2675 */ 2676 static int push_node_left(struct btrfs_trans_handle *trans, 2677 struct extent_buffer *dst, 2678 struct extent_buffer *src, int empty) 2679 { 2680 struct btrfs_fs_info *fs_info = trans->fs_info; 2681 int push_items = 0; 2682 int src_nritems; 2683 int dst_nritems; 2684 int ret = 0; 2685 2686 src_nritems = btrfs_header_nritems(src); 2687 dst_nritems = btrfs_header_nritems(dst); 2688 push_items = BTRFS_NODEPTRS_PER_BLOCK(fs_info) - dst_nritems; 2689 WARN_ON(btrfs_header_generation(src) != trans->transid); 2690 WARN_ON(btrfs_header_generation(dst) != trans->transid); 2691 2692 if (!empty && src_nritems <= 8) 2693 return 1; 2694 2695 if (push_items <= 0) 2696 return 1; 2697 2698 if (empty) { 2699 push_items = min(src_nritems, push_items); 2700 if (push_items < src_nritems) { 2701 /* leave at least 8 pointers in the node if 2702 * we aren't going to empty it 2703 */ 2704 if (src_nritems - push_items < 8) { 2705 if (push_items <= 8) 2706 return 1; 2707 push_items -= 8; 2708 } 2709 } 2710 } else 2711 push_items = min(src_nritems - 8, push_items); 2712 2713 /* dst is the left eb, src is the middle eb */ 2714 if (check_sibling_keys(dst, src)) { 2715 ret = -EUCLEAN; 2716 btrfs_abort_transaction(trans, ret); 2717 return ret; 2718 } 2719 ret = btrfs_tree_mod_log_eb_copy(dst, src, dst_nritems, 0, push_items); 2720 if (ret) { 2721 btrfs_abort_transaction(trans, ret); 2722 return ret; 2723 } 2724 copy_extent_buffer(dst, src, 2725 btrfs_node_key_ptr_offset(dst, dst_nritems), 2726 btrfs_node_key_ptr_offset(src, 0), 2727 push_items * sizeof(struct btrfs_key_ptr)); 2728 2729 if (push_items < src_nritems) { 2730 /* 2731 * btrfs_tree_mod_log_eb_copy handles logging the move, so we 2732 * don't need to do an explicit tree mod log operation for it. 2733 */ 2734 memmove_extent_buffer(src, btrfs_node_key_ptr_offset(src, 0), 2735 btrfs_node_key_ptr_offset(src, push_items), 2736 (src_nritems - push_items) * 2737 sizeof(struct btrfs_key_ptr)); 2738 } 2739 btrfs_set_header_nritems(src, src_nritems - push_items); 2740 btrfs_set_header_nritems(dst, dst_nritems + push_items); 2741 btrfs_mark_buffer_dirty(trans, src); 2742 btrfs_mark_buffer_dirty(trans, dst); 2743 2744 return ret; 2745 } 2746 2747 /* 2748 * try to push data from one node into the next node right in the 2749 * tree. 2750 * 2751 * returns 0 if some ptrs were pushed, < 0 if there was some horrible 2752 * error, and > 0 if there was no room in the right hand block. 2753 * 2754 * this will only push up to 1/2 the contents of the left node over 2755 */ 2756 static int balance_node_right(struct btrfs_trans_handle *trans, 2757 struct extent_buffer *dst, 2758 struct extent_buffer *src) 2759 { 2760 struct btrfs_fs_info *fs_info = trans->fs_info; 2761 int push_items = 0; 2762 int max_push; 2763 int src_nritems; 2764 int dst_nritems; 2765 int ret = 0; 2766 2767 WARN_ON(btrfs_header_generation(src) != trans->transid); 2768 WARN_ON(btrfs_header_generation(dst) != trans->transid); 2769 2770 src_nritems = btrfs_header_nritems(src); 2771 dst_nritems = btrfs_header_nritems(dst); 2772 push_items = BTRFS_NODEPTRS_PER_BLOCK(fs_info) - dst_nritems; 2773 if (push_items <= 0) 2774 return 1; 2775 2776 if (src_nritems < 4) 2777 return 1; 2778 2779 max_push = src_nritems / 2 + 1; 2780 /* don't try to empty the node */ 2781 if (max_push >= src_nritems) 2782 return 1; 2783 2784 if (max_push < push_items) 2785 push_items = max_push; 2786 2787 /* dst is the right eb, src is the middle eb */ 2788 if (check_sibling_keys(src, dst)) { 2789 ret = -EUCLEAN; 2790 btrfs_abort_transaction(trans, ret); 2791 return ret; 2792 } 2793 2794 /* 2795 * btrfs_tree_mod_log_eb_copy handles logging the move, so we don't 2796 * need to do an explicit tree mod log operation for it. 2797 */ 2798 memmove_extent_buffer(dst, btrfs_node_key_ptr_offset(dst, push_items), 2799 btrfs_node_key_ptr_offset(dst, 0), 2800 (dst_nritems) * 2801 sizeof(struct btrfs_key_ptr)); 2802 2803 ret = btrfs_tree_mod_log_eb_copy(dst, src, 0, src_nritems - push_items, 2804 push_items); 2805 if (ret) { 2806 btrfs_abort_transaction(trans, ret); 2807 return ret; 2808 } 2809 copy_extent_buffer(dst, src, 2810 btrfs_node_key_ptr_offset(dst, 0), 2811 btrfs_node_key_ptr_offset(src, src_nritems - push_items), 2812 push_items * sizeof(struct btrfs_key_ptr)); 2813 2814 btrfs_set_header_nritems(src, src_nritems - push_items); 2815 btrfs_set_header_nritems(dst, dst_nritems + push_items); 2816 2817 btrfs_mark_buffer_dirty(trans, src); 2818 btrfs_mark_buffer_dirty(trans, dst); 2819 2820 return ret; 2821 } 2822 2823 /* 2824 * helper function to insert a new root level in the tree. 2825 * A new node is allocated, and a single item is inserted to 2826 * point to the existing root 2827 * 2828 * returns zero on success or < 0 on failure. 2829 */ 2830 static noinline int insert_new_root(struct btrfs_trans_handle *trans, 2831 struct btrfs_root *root, 2832 struct btrfs_path *path, int level) 2833 { 2834 u64 lower_gen; 2835 struct extent_buffer *lower; 2836 struct extent_buffer *c; 2837 struct extent_buffer *old; 2838 struct btrfs_disk_key lower_key; 2839 int ret; 2840 2841 BUG_ON(path->nodes[level]); 2842 BUG_ON(path->nodes[level-1] != root->node); 2843 2844 lower = path->nodes[level-1]; 2845 if (level == 1) 2846 btrfs_item_key(lower, &lower_key, 0); 2847 else 2848 btrfs_node_key(lower, &lower_key, 0); 2849 2850 c = btrfs_alloc_tree_block(trans, root, 0, btrfs_root_id(root), 2851 &lower_key, level, root->node->start, 0, 2852 0, BTRFS_NESTING_NEW_ROOT); 2853 if (IS_ERR(c)) 2854 return PTR_ERR(c); 2855 2856 root_add_used_bytes(root); 2857 2858 btrfs_set_header_nritems(c, 1); 2859 btrfs_set_node_key(c, &lower_key, 0); 2860 btrfs_set_node_blockptr(c, 0, lower->start); 2861 lower_gen = btrfs_header_generation(lower); 2862 WARN_ON(lower_gen != trans->transid); 2863 2864 btrfs_set_node_ptr_generation(c, 0, lower_gen); 2865 2866 btrfs_mark_buffer_dirty(trans, c); 2867 2868 old = root->node; 2869 ret = btrfs_tree_mod_log_insert_root(root->node, c, false); 2870 if (ret < 0) { 2871 int ret2; 2872 2873 ret2 = btrfs_free_tree_block(trans, btrfs_root_id(root), c, 0, 1); 2874 if (ret2 < 0) 2875 btrfs_abort_transaction(trans, ret2); 2876 btrfs_tree_unlock(c); 2877 free_extent_buffer(c); 2878 return ret; 2879 } 2880 rcu_assign_pointer(root->node, c); 2881 2882 /* the super has an extra ref to root->node */ 2883 free_extent_buffer(old); 2884 2885 add_root_to_dirty_list(root); 2886 atomic_inc(&c->refs); 2887 path->nodes[level] = c; 2888 path->locks[level] = BTRFS_WRITE_LOCK; 2889 path->slots[level] = 0; 2890 return 0; 2891 } 2892 2893 /* 2894 * worker function to insert a single pointer in a node. 2895 * the node should have enough room for the pointer already 2896 * 2897 * slot and level indicate where you want the key to go, and 2898 * blocknr is the block the key points to. 2899 */ 2900 static int insert_ptr(struct btrfs_trans_handle *trans, 2901 const struct btrfs_path *path, 2902 const struct btrfs_disk_key *key, u64 bytenr, 2903 int slot, int level) 2904 { 2905 struct extent_buffer *lower; 2906 int nritems; 2907 int ret; 2908 2909 BUG_ON(!path->nodes[level]); 2910 btrfs_assert_tree_write_locked(path->nodes[level]); 2911 lower = path->nodes[level]; 2912 nritems = btrfs_header_nritems(lower); 2913 BUG_ON(slot > nritems); 2914 BUG_ON(nritems == BTRFS_NODEPTRS_PER_BLOCK(trans->fs_info)); 2915 if (slot != nritems) { 2916 if (level) { 2917 ret = btrfs_tree_mod_log_insert_move(lower, slot + 1, 2918 slot, nritems - slot); 2919 if (ret < 0) { 2920 btrfs_abort_transaction(trans, ret); 2921 return ret; 2922 } 2923 } 2924 memmove_extent_buffer(lower, 2925 btrfs_node_key_ptr_offset(lower, slot + 1), 2926 btrfs_node_key_ptr_offset(lower, slot), 2927 (nritems - slot) * sizeof(struct btrfs_key_ptr)); 2928 } 2929 if (level) { 2930 ret = btrfs_tree_mod_log_insert_key(lower, slot, 2931 BTRFS_MOD_LOG_KEY_ADD); 2932 if (ret < 0) { 2933 btrfs_abort_transaction(trans, ret); 2934 return ret; 2935 } 2936 } 2937 btrfs_set_node_key(lower, key, slot); 2938 btrfs_set_node_blockptr(lower, slot, bytenr); 2939 WARN_ON(trans->transid == 0); 2940 btrfs_set_node_ptr_generation(lower, slot, trans->transid); 2941 btrfs_set_header_nritems(lower, nritems + 1); 2942 btrfs_mark_buffer_dirty(trans, lower); 2943 2944 return 0; 2945 } 2946 2947 /* 2948 * split the node at the specified level in path in two. 2949 * The path is corrected to point to the appropriate node after the split 2950 * 2951 * Before splitting this tries to make some room in the node by pushing 2952 * left and right, if either one works, it returns right away. 2953 * 2954 * returns 0 on success and < 0 on failure 2955 */ 2956 static noinline int split_node(struct btrfs_trans_handle *trans, 2957 struct btrfs_root *root, 2958 struct btrfs_path *path, int level) 2959 { 2960 struct btrfs_fs_info *fs_info = root->fs_info; 2961 struct extent_buffer *c; 2962 struct extent_buffer *split; 2963 struct btrfs_disk_key disk_key; 2964 int mid; 2965 int ret; 2966 u32 c_nritems; 2967 2968 c = path->nodes[level]; 2969 WARN_ON(btrfs_header_generation(c) != trans->transid); 2970 if (c == root->node) { 2971 /* 2972 * trying to split the root, lets make a new one 2973 * 2974 * tree mod log: We don't log_removal old root in 2975 * insert_new_root, because that root buffer will be kept as a 2976 * normal node. We are going to log removal of half of the 2977 * elements below with btrfs_tree_mod_log_eb_copy(). We're 2978 * holding a tree lock on the buffer, which is why we cannot 2979 * race with other tree_mod_log users. 2980 */ 2981 ret = insert_new_root(trans, root, path, level + 1); 2982 if (ret) 2983 return ret; 2984 } else { 2985 ret = push_nodes_for_insert(trans, root, path, level); 2986 c = path->nodes[level]; 2987 if (!ret && btrfs_header_nritems(c) < 2988 BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 3) 2989 return 0; 2990 if (ret < 0) 2991 return ret; 2992 } 2993 2994 c_nritems = btrfs_header_nritems(c); 2995 mid = (c_nritems + 1) / 2; 2996 btrfs_node_key(c, &disk_key, mid); 2997 2998 split = btrfs_alloc_tree_block(trans, root, 0, btrfs_root_id(root), 2999 &disk_key, level, c->start, 0, 3000 0, BTRFS_NESTING_SPLIT); 3001 if (IS_ERR(split)) 3002 return PTR_ERR(split); 3003 3004 root_add_used_bytes(root); 3005 ASSERT(btrfs_header_level(c) == level); 3006 3007 ret = btrfs_tree_mod_log_eb_copy(split, c, 0, mid, c_nritems - mid); 3008 if (ret) { 3009 btrfs_tree_unlock(split); 3010 free_extent_buffer(split); 3011 btrfs_abort_transaction(trans, ret); 3012 return ret; 3013 } 3014 copy_extent_buffer(split, c, 3015 btrfs_node_key_ptr_offset(split, 0), 3016 btrfs_node_key_ptr_offset(c, mid), 3017 (c_nritems - mid) * sizeof(struct btrfs_key_ptr)); 3018 btrfs_set_header_nritems(split, c_nritems - mid); 3019 btrfs_set_header_nritems(c, mid); 3020 3021 btrfs_mark_buffer_dirty(trans, c); 3022 btrfs_mark_buffer_dirty(trans, split); 3023 3024 ret = insert_ptr(trans, path, &disk_key, split->start, 3025 path->slots[level + 1] + 1, level + 1); 3026 if (ret < 0) { 3027 btrfs_tree_unlock(split); 3028 free_extent_buffer(split); 3029 return ret; 3030 } 3031 3032 if (path->slots[level] >= mid) { 3033 path->slots[level] -= mid; 3034 btrfs_tree_unlock(c); 3035 free_extent_buffer(c); 3036 path->nodes[level] = split; 3037 path->slots[level + 1] += 1; 3038 } else { 3039 btrfs_tree_unlock(split); 3040 free_extent_buffer(split); 3041 } 3042 return 0; 3043 } 3044 3045 /* 3046 * how many bytes are required to store the items in a leaf. start 3047 * and nr indicate which items in the leaf to check. This totals up the 3048 * space used both by the item structs and the item data 3049 */ 3050 static int leaf_space_used(const struct extent_buffer *l, int start, int nr) 3051 { 3052 int data_len; 3053 int nritems = btrfs_header_nritems(l); 3054 int end = min(nritems, start + nr) - 1; 3055 3056 if (!nr) 3057 return 0; 3058 data_len = btrfs_item_offset(l, start) + btrfs_item_size(l, start); 3059 data_len = data_len - btrfs_item_offset(l, end); 3060 data_len += sizeof(struct btrfs_item) * nr; 3061 WARN_ON(data_len < 0); 3062 return data_len; 3063 } 3064 3065 /* 3066 * The space between the end of the leaf items and 3067 * the start of the leaf data. IOW, how much room 3068 * the leaf has left for both items and data 3069 */ 3070 int btrfs_leaf_free_space(const struct extent_buffer *leaf) 3071 { 3072 struct btrfs_fs_info *fs_info = leaf->fs_info; 3073 int nritems = btrfs_header_nritems(leaf); 3074 int ret; 3075 3076 ret = BTRFS_LEAF_DATA_SIZE(fs_info) - leaf_space_used(leaf, 0, nritems); 3077 if (ret < 0) { 3078 btrfs_crit(fs_info, 3079 "leaf free space ret %d, leaf data size %lu, used %d nritems %d", 3080 ret, 3081 (unsigned long) BTRFS_LEAF_DATA_SIZE(fs_info), 3082 leaf_space_used(leaf, 0, nritems), nritems); 3083 } 3084 return ret; 3085 } 3086 3087 /* 3088 * min slot controls the lowest index we're willing to push to the 3089 * right. We'll push up to and including min_slot, but no lower 3090 */ 3091 static noinline int __push_leaf_right(struct btrfs_trans_handle *trans, 3092 struct btrfs_path *path, 3093 int data_size, int empty, 3094 struct extent_buffer *right, 3095 int free_space, u32 left_nritems, 3096 u32 min_slot) 3097 { 3098 struct btrfs_fs_info *fs_info = right->fs_info; 3099 struct extent_buffer *left = path->nodes[0]; 3100 struct extent_buffer *upper = path->nodes[1]; 3101 struct btrfs_map_token token; 3102 struct btrfs_disk_key disk_key; 3103 int slot; 3104 u32 i; 3105 int push_space = 0; 3106 int push_items = 0; 3107 u32 nr; 3108 u32 right_nritems; 3109 u32 data_end; 3110 u32 this_item_size; 3111 3112 if (empty) 3113 nr = 0; 3114 else 3115 nr = max_t(u32, 1, min_slot); 3116 3117 if (path->slots[0] >= left_nritems) 3118 push_space += data_size; 3119 3120 slot = path->slots[1]; 3121 i = left_nritems - 1; 3122 while (i >= nr) { 3123 if (!empty && push_items > 0) { 3124 if (path->slots[0] > i) 3125 break; 3126 if (path->slots[0] == i) { 3127 int space = btrfs_leaf_free_space(left); 3128 3129 if (space + push_space * 2 > free_space) 3130 break; 3131 } 3132 } 3133 3134 if (path->slots[0] == i) 3135 push_space += data_size; 3136 3137 this_item_size = btrfs_item_size(left, i); 3138 if (this_item_size + sizeof(struct btrfs_item) + 3139 push_space > free_space) 3140 break; 3141 3142 push_items++; 3143 push_space += this_item_size + sizeof(struct btrfs_item); 3144 if (i == 0) 3145 break; 3146 i--; 3147 } 3148 3149 if (push_items == 0) 3150 goto out_unlock; 3151 3152 WARN_ON(!empty && push_items == left_nritems); 3153 3154 /* push left to right */ 3155 right_nritems = btrfs_header_nritems(right); 3156 3157 push_space = btrfs_item_data_end(left, left_nritems - push_items); 3158 push_space -= leaf_data_end(left); 3159 3160 /* make room in the right data area */ 3161 data_end = leaf_data_end(right); 3162 memmove_leaf_data(right, data_end - push_space, data_end, 3163 BTRFS_LEAF_DATA_SIZE(fs_info) - data_end); 3164 3165 /* copy from the left data area */ 3166 copy_leaf_data(right, left, BTRFS_LEAF_DATA_SIZE(fs_info) - push_space, 3167 leaf_data_end(left), push_space); 3168 3169 memmove_leaf_items(right, push_items, 0, right_nritems); 3170 3171 /* copy the items from left to right */ 3172 copy_leaf_items(right, left, 0, left_nritems - push_items, push_items); 3173 3174 /* update the item pointers */ 3175 btrfs_init_map_token(&token, right); 3176 right_nritems += push_items; 3177 btrfs_set_header_nritems(right, right_nritems); 3178 push_space = BTRFS_LEAF_DATA_SIZE(fs_info); 3179 for (i = 0; i < right_nritems; i++) { 3180 push_space -= btrfs_token_item_size(&token, i); 3181 btrfs_set_token_item_offset(&token, i, push_space); 3182 } 3183 3184 left_nritems -= push_items; 3185 btrfs_set_header_nritems(left, left_nritems); 3186 3187 if (left_nritems) 3188 btrfs_mark_buffer_dirty(trans, left); 3189 else 3190 btrfs_clear_buffer_dirty(trans, left); 3191 3192 btrfs_mark_buffer_dirty(trans, right); 3193 3194 btrfs_item_key(right, &disk_key, 0); 3195 btrfs_set_node_key(upper, &disk_key, slot + 1); 3196 btrfs_mark_buffer_dirty(trans, upper); 3197 3198 /* then fixup the leaf pointer in the path */ 3199 if (path->slots[0] >= left_nritems) { 3200 path->slots[0] -= left_nritems; 3201 if (btrfs_header_nritems(path->nodes[0]) == 0) 3202 btrfs_clear_buffer_dirty(trans, path->nodes[0]); 3203 btrfs_tree_unlock(path->nodes[0]); 3204 free_extent_buffer(path->nodes[0]); 3205 path->nodes[0] = right; 3206 path->slots[1] += 1; 3207 } else { 3208 btrfs_tree_unlock(right); 3209 free_extent_buffer(right); 3210 } 3211 return 0; 3212 3213 out_unlock: 3214 btrfs_tree_unlock(right); 3215 free_extent_buffer(right); 3216 return 1; 3217 } 3218 3219 /* 3220 * push some data in the path leaf to the right, trying to free up at 3221 * least data_size bytes. returns zero if the push worked, nonzero otherwise 3222 * 3223 * returns 1 if the push failed because the other node didn't have enough 3224 * room, 0 if everything worked out and < 0 if there were major errors. 3225 * 3226 * this will push starting from min_slot to the end of the leaf. It won't 3227 * push any slot lower than min_slot 3228 */ 3229 static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root 3230 *root, struct btrfs_path *path, 3231 int min_data_size, int data_size, 3232 int empty, u32 min_slot) 3233 { 3234 struct extent_buffer *left = path->nodes[0]; 3235 struct extent_buffer *right; 3236 struct extent_buffer *upper; 3237 int slot; 3238 int free_space; 3239 u32 left_nritems; 3240 int ret; 3241 3242 if (!path->nodes[1]) 3243 return 1; 3244 3245 slot = path->slots[1]; 3246 upper = path->nodes[1]; 3247 if (slot >= btrfs_header_nritems(upper) - 1) 3248 return 1; 3249 3250 btrfs_assert_tree_write_locked(path->nodes[1]); 3251 3252 right = btrfs_read_node_slot(upper, slot + 1); 3253 if (IS_ERR(right)) 3254 return PTR_ERR(right); 3255 3256 btrfs_tree_lock_nested(right, BTRFS_NESTING_RIGHT); 3257 3258 free_space = btrfs_leaf_free_space(right); 3259 if (free_space < data_size) 3260 goto out_unlock; 3261 3262 ret = btrfs_cow_block(trans, root, right, upper, 3263 slot + 1, &right, BTRFS_NESTING_RIGHT_COW); 3264 if (ret) 3265 goto out_unlock; 3266 3267 left_nritems = btrfs_header_nritems(left); 3268 if (left_nritems == 0) 3269 goto out_unlock; 3270 3271 if (check_sibling_keys(left, right)) { 3272 ret = -EUCLEAN; 3273 btrfs_abort_transaction(trans, ret); 3274 btrfs_tree_unlock(right); 3275 free_extent_buffer(right); 3276 return ret; 3277 } 3278 if (path->slots[0] == left_nritems && !empty) { 3279 /* Key greater than all keys in the leaf, right neighbor has 3280 * enough room for it and we're not emptying our leaf to delete 3281 * it, therefore use right neighbor to insert the new item and 3282 * no need to touch/dirty our left leaf. */ 3283 btrfs_tree_unlock(left); 3284 free_extent_buffer(left); 3285 path->nodes[0] = right; 3286 path->slots[0] = 0; 3287 path->slots[1]++; 3288 return 0; 3289 } 3290 3291 return __push_leaf_right(trans, path, min_data_size, empty, right, 3292 free_space, left_nritems, min_slot); 3293 out_unlock: 3294 btrfs_tree_unlock(right); 3295 free_extent_buffer(right); 3296 return 1; 3297 } 3298 3299 /* 3300 * push some data in the path leaf to the left, trying to free up at 3301 * least data_size bytes. returns zero if the push worked, nonzero otherwise 3302 * 3303 * max_slot can put a limit on how far into the leaf we'll push items. The 3304 * item at 'max_slot' won't be touched. Use (u32)-1 to make us do all the 3305 * items 3306 */ 3307 static noinline int __push_leaf_left(struct btrfs_trans_handle *trans, 3308 struct btrfs_path *path, int data_size, 3309 int empty, struct extent_buffer *left, 3310 int free_space, u32 right_nritems, 3311 u32 max_slot) 3312 { 3313 struct btrfs_fs_info *fs_info = left->fs_info; 3314 struct btrfs_disk_key disk_key; 3315 struct extent_buffer *right = path->nodes[0]; 3316 int i; 3317 int push_space = 0; 3318 int push_items = 0; 3319 u32 old_left_nritems; 3320 u32 nr; 3321 int ret = 0; 3322 u32 this_item_size; 3323 u32 old_left_item_size; 3324 struct btrfs_map_token token; 3325 3326 if (empty) 3327 nr = min(right_nritems, max_slot); 3328 else 3329 nr = min(right_nritems - 1, max_slot); 3330 3331 for (i = 0; i < nr; i++) { 3332 if (!empty && push_items > 0) { 3333 if (path->slots[0] < i) 3334 break; 3335 if (path->slots[0] == i) { 3336 int space = btrfs_leaf_free_space(right); 3337 3338 if (space + push_space * 2 > free_space) 3339 break; 3340 } 3341 } 3342 3343 if (path->slots[0] == i) 3344 push_space += data_size; 3345 3346 this_item_size = btrfs_item_size(right, i); 3347 if (this_item_size + sizeof(struct btrfs_item) + push_space > 3348 free_space) 3349 break; 3350 3351 push_items++; 3352 push_space += this_item_size + sizeof(struct btrfs_item); 3353 } 3354 3355 if (push_items == 0) { 3356 ret = 1; 3357 goto out; 3358 } 3359 WARN_ON(!empty && push_items == btrfs_header_nritems(right)); 3360 3361 /* push data from right to left */ 3362 copy_leaf_items(left, right, btrfs_header_nritems(left), 0, push_items); 3363 3364 push_space = BTRFS_LEAF_DATA_SIZE(fs_info) - 3365 btrfs_item_offset(right, push_items - 1); 3366 3367 copy_leaf_data(left, right, leaf_data_end(left) - push_space, 3368 btrfs_item_offset(right, push_items - 1), push_space); 3369 old_left_nritems = btrfs_header_nritems(left); 3370 BUG_ON(old_left_nritems <= 0); 3371 3372 btrfs_init_map_token(&token, left); 3373 old_left_item_size = btrfs_item_offset(left, old_left_nritems - 1); 3374 for (i = old_left_nritems; i < old_left_nritems + push_items; i++) { 3375 u32 ioff; 3376 3377 ioff = btrfs_token_item_offset(&token, i); 3378 btrfs_set_token_item_offset(&token, i, 3379 ioff - (BTRFS_LEAF_DATA_SIZE(fs_info) - old_left_item_size)); 3380 } 3381 btrfs_set_header_nritems(left, old_left_nritems + push_items); 3382 3383 /* fixup right node */ 3384 if (push_items > right_nritems) 3385 WARN(1, KERN_CRIT "push items %d nr %u\n", push_items, 3386 right_nritems); 3387 3388 if (push_items < right_nritems) { 3389 push_space = btrfs_item_offset(right, push_items - 1) - 3390 leaf_data_end(right); 3391 memmove_leaf_data(right, 3392 BTRFS_LEAF_DATA_SIZE(fs_info) - push_space, 3393 leaf_data_end(right), push_space); 3394 3395 memmove_leaf_items(right, 0, push_items, 3396 btrfs_header_nritems(right) - push_items); 3397 } 3398 3399 btrfs_init_map_token(&token, right); 3400 right_nritems -= push_items; 3401 btrfs_set_header_nritems(right, right_nritems); 3402 push_space = BTRFS_LEAF_DATA_SIZE(fs_info); 3403 for (i = 0; i < right_nritems; i++) { 3404 push_space = push_space - btrfs_token_item_size(&token, i); 3405 btrfs_set_token_item_offset(&token, i, push_space); 3406 } 3407 3408 btrfs_mark_buffer_dirty(trans, left); 3409 if (right_nritems) 3410 btrfs_mark_buffer_dirty(trans, right); 3411 else 3412 btrfs_clear_buffer_dirty(trans, right); 3413 3414 btrfs_item_key(right, &disk_key, 0); 3415 fixup_low_keys(trans, path, &disk_key, 1); 3416 3417 /* then fixup the leaf pointer in the path */ 3418 if (path->slots[0] < push_items) { 3419 path->slots[0] += old_left_nritems; 3420 btrfs_tree_unlock(path->nodes[0]); 3421 free_extent_buffer(path->nodes[0]); 3422 path->nodes[0] = left; 3423 path->slots[1] -= 1; 3424 } else { 3425 btrfs_tree_unlock(left); 3426 free_extent_buffer(left); 3427 path->slots[0] -= push_items; 3428 } 3429 BUG_ON(path->slots[0] < 0); 3430 return ret; 3431 out: 3432 btrfs_tree_unlock(left); 3433 free_extent_buffer(left); 3434 return ret; 3435 } 3436 3437 /* 3438 * push some data in the path leaf to the left, trying to free up at 3439 * least data_size bytes. returns zero if the push worked, nonzero otherwise 3440 * 3441 * max_slot can put a limit on how far into the leaf we'll push items. The 3442 * item at 'max_slot' won't be touched. Use (u32)-1 to make us push all the 3443 * items 3444 */ 3445 static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root 3446 *root, struct btrfs_path *path, int min_data_size, 3447 int data_size, int empty, u32 max_slot) 3448 { 3449 struct extent_buffer *right = path->nodes[0]; 3450 struct extent_buffer *left; 3451 int slot; 3452 int free_space; 3453 u32 right_nritems; 3454 int ret = 0; 3455 3456 slot = path->slots[1]; 3457 if (slot == 0) 3458 return 1; 3459 if (!path->nodes[1]) 3460 return 1; 3461 3462 right_nritems = btrfs_header_nritems(right); 3463 if (right_nritems == 0) 3464 return 1; 3465 3466 btrfs_assert_tree_write_locked(path->nodes[1]); 3467 3468 left = btrfs_read_node_slot(path->nodes[1], slot - 1); 3469 if (IS_ERR(left)) 3470 return PTR_ERR(left); 3471 3472 btrfs_tree_lock_nested(left, BTRFS_NESTING_LEFT); 3473 3474 free_space = btrfs_leaf_free_space(left); 3475 if (free_space < data_size) { 3476 ret = 1; 3477 goto out; 3478 } 3479 3480 ret = btrfs_cow_block(trans, root, left, 3481 path->nodes[1], slot - 1, &left, 3482 BTRFS_NESTING_LEFT_COW); 3483 if (ret) { 3484 /* we hit -ENOSPC, but it isn't fatal here */ 3485 if (ret == -ENOSPC) 3486 ret = 1; 3487 goto out; 3488 } 3489 3490 if (check_sibling_keys(left, right)) { 3491 ret = -EUCLEAN; 3492 btrfs_abort_transaction(trans, ret); 3493 goto out; 3494 } 3495 return __push_leaf_left(trans, path, min_data_size, empty, left, 3496 free_space, right_nritems, max_slot); 3497 out: 3498 btrfs_tree_unlock(left); 3499 free_extent_buffer(left); 3500 return ret; 3501 } 3502 3503 /* 3504 * split the path's leaf in two, making sure there is at least data_size 3505 * available for the resulting leaf level of the path. 3506 */ 3507 static noinline int copy_for_split(struct btrfs_trans_handle *trans, 3508 struct btrfs_path *path, 3509 struct extent_buffer *l, 3510 struct extent_buffer *right, 3511 int slot, int mid, int nritems) 3512 { 3513 struct btrfs_fs_info *fs_info = trans->fs_info; 3514 int data_copy_size; 3515 int rt_data_off; 3516 int i; 3517 int ret; 3518 struct btrfs_disk_key disk_key; 3519 struct btrfs_map_token token; 3520 3521 nritems = nritems - mid; 3522 btrfs_set_header_nritems(right, nritems); 3523 data_copy_size = btrfs_item_data_end(l, mid) - leaf_data_end(l); 3524 3525 copy_leaf_items(right, l, 0, mid, nritems); 3526 3527 copy_leaf_data(right, l, BTRFS_LEAF_DATA_SIZE(fs_info) - data_copy_size, 3528 leaf_data_end(l), data_copy_size); 3529 3530 rt_data_off = BTRFS_LEAF_DATA_SIZE(fs_info) - btrfs_item_data_end(l, mid); 3531 3532 btrfs_init_map_token(&token, right); 3533 for (i = 0; i < nritems; i++) { 3534 u32 ioff; 3535 3536 ioff = btrfs_token_item_offset(&token, i); 3537 btrfs_set_token_item_offset(&token, i, ioff + rt_data_off); 3538 } 3539 3540 btrfs_set_header_nritems(l, mid); 3541 btrfs_item_key(right, &disk_key, 0); 3542 ret = insert_ptr(trans, path, &disk_key, right->start, path->slots[1] + 1, 1); 3543 if (ret < 0) 3544 return ret; 3545 3546 btrfs_mark_buffer_dirty(trans, right); 3547 btrfs_mark_buffer_dirty(trans, l); 3548 BUG_ON(path->slots[0] != slot); 3549 3550 if (mid <= slot) { 3551 btrfs_tree_unlock(path->nodes[0]); 3552 free_extent_buffer(path->nodes[0]); 3553 path->nodes[0] = right; 3554 path->slots[0] -= mid; 3555 path->slots[1] += 1; 3556 } else { 3557 btrfs_tree_unlock(right); 3558 free_extent_buffer(right); 3559 } 3560 3561 BUG_ON(path->slots[0] < 0); 3562 3563 return 0; 3564 } 3565 3566 /* 3567 * double splits happen when we need to insert a big item in the middle 3568 * of a leaf. A double split can leave us with 3 mostly empty leaves: 3569 * leaf: [ slots 0 - N] [ our target ] [ N + 1 - total in leaf ] 3570 * A B C 3571 * 3572 * We avoid this by trying to push the items on either side of our target 3573 * into the adjacent leaves. If all goes well we can avoid the double split 3574 * completely. 3575 */ 3576 static noinline int push_for_double_split(struct btrfs_trans_handle *trans, 3577 struct btrfs_root *root, 3578 struct btrfs_path *path, 3579 int data_size) 3580 { 3581 int ret; 3582 int progress = 0; 3583 int slot; 3584 u32 nritems; 3585 int space_needed = data_size; 3586 3587 slot = path->slots[0]; 3588 if (slot < btrfs_header_nritems(path->nodes[0])) 3589 space_needed -= btrfs_leaf_free_space(path->nodes[0]); 3590 3591 /* 3592 * try to push all the items after our slot into the 3593 * right leaf 3594 */ 3595 ret = push_leaf_right(trans, root, path, 1, space_needed, 0, slot); 3596 if (ret < 0) 3597 return ret; 3598 3599 if (ret == 0) 3600 progress++; 3601 3602 nritems = btrfs_header_nritems(path->nodes[0]); 3603 /* 3604 * our goal is to get our slot at the start or end of a leaf. If 3605 * we've done so we're done 3606 */ 3607 if (path->slots[0] == 0 || path->slots[0] == nritems) 3608 return 0; 3609 3610 if (btrfs_leaf_free_space(path->nodes[0]) >= data_size) 3611 return 0; 3612 3613 /* try to push all the items before our slot into the next leaf */ 3614 slot = path->slots[0]; 3615 space_needed = data_size; 3616 if (slot > 0) 3617 space_needed -= btrfs_leaf_free_space(path->nodes[0]); 3618 ret = push_leaf_left(trans, root, path, 1, space_needed, 0, slot); 3619 if (ret < 0) 3620 return ret; 3621 3622 if (ret == 0) 3623 progress++; 3624 3625 if (progress) 3626 return 0; 3627 return 1; 3628 } 3629 3630 /* 3631 * split the path's leaf in two, making sure there is at least data_size 3632 * available for the resulting leaf level of the path. 3633 * 3634 * returns 0 if all went well and < 0 on failure. 3635 */ 3636 static noinline int split_leaf(struct btrfs_trans_handle *trans, 3637 struct btrfs_root *root, 3638 const struct btrfs_key *ins_key, 3639 struct btrfs_path *path, int data_size, 3640 int extend) 3641 { 3642 struct btrfs_disk_key disk_key; 3643 struct extent_buffer *l; 3644 u32 nritems; 3645 int mid; 3646 int slot; 3647 struct extent_buffer *right; 3648 struct btrfs_fs_info *fs_info = root->fs_info; 3649 int ret = 0; 3650 int wret; 3651 int split; 3652 int num_doubles = 0; 3653 int tried_avoid_double = 0; 3654 3655 l = path->nodes[0]; 3656 slot = path->slots[0]; 3657 if (extend && data_size + btrfs_item_size(l, slot) + 3658 sizeof(struct btrfs_item) > BTRFS_LEAF_DATA_SIZE(fs_info)) 3659 return -EOVERFLOW; 3660 3661 /* first try to make some room by pushing left and right */ 3662 if (data_size && path->nodes[1]) { 3663 int space_needed = data_size; 3664 3665 if (slot < btrfs_header_nritems(l)) 3666 space_needed -= btrfs_leaf_free_space(l); 3667 3668 wret = push_leaf_right(trans, root, path, space_needed, 3669 space_needed, 0, 0); 3670 if (wret < 0) 3671 return wret; 3672 if (wret) { 3673 space_needed = data_size; 3674 if (slot > 0) 3675 space_needed -= btrfs_leaf_free_space(l); 3676 wret = push_leaf_left(trans, root, path, space_needed, 3677 space_needed, 0, (u32)-1); 3678 if (wret < 0) 3679 return wret; 3680 } 3681 l = path->nodes[0]; 3682 3683 /* did the pushes work? */ 3684 if (btrfs_leaf_free_space(l) >= data_size) 3685 return 0; 3686 } 3687 3688 if (!path->nodes[1]) { 3689 ret = insert_new_root(trans, root, path, 1); 3690 if (ret) 3691 return ret; 3692 } 3693 again: 3694 split = 1; 3695 l = path->nodes[0]; 3696 slot = path->slots[0]; 3697 nritems = btrfs_header_nritems(l); 3698 mid = (nritems + 1) / 2; 3699 3700 if (mid <= slot) { 3701 if (nritems == 1 || 3702 leaf_space_used(l, mid, nritems - mid) + data_size > 3703 BTRFS_LEAF_DATA_SIZE(fs_info)) { 3704 if (slot >= nritems) { 3705 split = 0; 3706 } else { 3707 mid = slot; 3708 if (mid != nritems && 3709 leaf_space_used(l, mid, nritems - mid) + 3710 data_size > BTRFS_LEAF_DATA_SIZE(fs_info)) { 3711 if (data_size && !tried_avoid_double) 3712 goto push_for_double; 3713 split = 2; 3714 } 3715 } 3716 } 3717 } else { 3718 if (leaf_space_used(l, 0, mid) + data_size > 3719 BTRFS_LEAF_DATA_SIZE(fs_info)) { 3720 if (!extend && data_size && slot == 0) { 3721 split = 0; 3722 } else if ((extend || !data_size) && slot == 0) { 3723 mid = 1; 3724 } else { 3725 mid = slot; 3726 if (mid != nritems && 3727 leaf_space_used(l, mid, nritems - mid) + 3728 data_size > BTRFS_LEAF_DATA_SIZE(fs_info)) { 3729 if (data_size && !tried_avoid_double) 3730 goto push_for_double; 3731 split = 2; 3732 } 3733 } 3734 } 3735 } 3736 3737 if (split == 0) 3738 btrfs_cpu_key_to_disk(&disk_key, ins_key); 3739 else 3740 btrfs_item_key(l, &disk_key, mid); 3741 3742 /* 3743 * We have to about BTRFS_NESTING_NEW_ROOT here if we've done a double 3744 * split, because we're only allowed to have MAX_LOCKDEP_SUBCLASSES 3745 * subclasses, which is 8 at the time of this patch, and we've maxed it 3746 * out. In the future we could add a 3747 * BTRFS_NESTING_SPLIT_THE_SPLITTENING if we need to, but for now just 3748 * use BTRFS_NESTING_NEW_ROOT. 3749 */ 3750 right = btrfs_alloc_tree_block(trans, root, 0, btrfs_root_id(root), 3751 &disk_key, 0, l->start, 0, 0, 3752 num_doubles ? BTRFS_NESTING_NEW_ROOT : 3753 BTRFS_NESTING_SPLIT); 3754 if (IS_ERR(right)) 3755 return PTR_ERR(right); 3756 3757 root_add_used_bytes(root); 3758 3759 if (split == 0) { 3760 if (mid <= slot) { 3761 btrfs_set_header_nritems(right, 0); 3762 ret = insert_ptr(trans, path, &disk_key, 3763 right->start, path->slots[1] + 1, 1); 3764 if (ret < 0) { 3765 btrfs_tree_unlock(right); 3766 free_extent_buffer(right); 3767 return ret; 3768 } 3769 btrfs_tree_unlock(path->nodes[0]); 3770 free_extent_buffer(path->nodes[0]); 3771 path->nodes[0] = right; 3772 path->slots[0] = 0; 3773 path->slots[1] += 1; 3774 } else { 3775 btrfs_set_header_nritems(right, 0); 3776 ret = insert_ptr(trans, path, &disk_key, 3777 right->start, path->slots[1], 1); 3778 if (ret < 0) { 3779 btrfs_tree_unlock(right); 3780 free_extent_buffer(right); 3781 return ret; 3782 } 3783 btrfs_tree_unlock(path->nodes[0]); 3784 free_extent_buffer(path->nodes[0]); 3785 path->nodes[0] = right; 3786 path->slots[0] = 0; 3787 if (path->slots[1] == 0) 3788 fixup_low_keys(trans, path, &disk_key, 1); 3789 } 3790 /* 3791 * We create a new leaf 'right' for the required ins_len and 3792 * we'll do btrfs_mark_buffer_dirty() on this leaf after copying 3793 * the content of ins_len to 'right'. 3794 */ 3795 return ret; 3796 } 3797 3798 ret = copy_for_split(trans, path, l, right, slot, mid, nritems); 3799 if (ret < 0) { 3800 btrfs_tree_unlock(right); 3801 free_extent_buffer(right); 3802 return ret; 3803 } 3804 3805 if (split == 2) { 3806 BUG_ON(num_doubles != 0); 3807 num_doubles++; 3808 goto again; 3809 } 3810 3811 return 0; 3812 3813 push_for_double: 3814 push_for_double_split(trans, root, path, data_size); 3815 tried_avoid_double = 1; 3816 if (btrfs_leaf_free_space(path->nodes[0]) >= data_size) 3817 return 0; 3818 goto again; 3819 } 3820 3821 static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans, 3822 struct btrfs_root *root, 3823 struct btrfs_path *path, int ins_len) 3824 { 3825 struct btrfs_key key; 3826 struct extent_buffer *leaf; 3827 struct btrfs_file_extent_item *fi; 3828 u64 extent_len = 0; 3829 u32 item_size; 3830 int ret; 3831 3832 leaf = path->nodes[0]; 3833 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 3834 3835 BUG_ON(key.type != BTRFS_EXTENT_DATA_KEY && 3836 key.type != BTRFS_RAID_STRIPE_KEY && 3837 key.type != BTRFS_EXTENT_CSUM_KEY); 3838 3839 if (btrfs_leaf_free_space(leaf) >= ins_len) 3840 return 0; 3841 3842 item_size = btrfs_item_size(leaf, path->slots[0]); 3843 if (key.type == BTRFS_EXTENT_DATA_KEY) { 3844 fi = btrfs_item_ptr(leaf, path->slots[0], 3845 struct btrfs_file_extent_item); 3846 extent_len = btrfs_file_extent_num_bytes(leaf, fi); 3847 } 3848 btrfs_release_path(path); 3849 3850 path->keep_locks = 1; 3851 path->search_for_split = 1; 3852 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); 3853 path->search_for_split = 0; 3854 if (ret > 0) 3855 ret = -EAGAIN; 3856 if (ret < 0) 3857 goto err; 3858 3859 ret = -EAGAIN; 3860 leaf = path->nodes[0]; 3861 /* if our item isn't there, return now */ 3862 if (item_size != btrfs_item_size(leaf, path->slots[0])) 3863 goto err; 3864 3865 /* the leaf has changed, it now has room. return now */ 3866 if (btrfs_leaf_free_space(path->nodes[0]) >= ins_len) 3867 goto err; 3868 3869 if (key.type == BTRFS_EXTENT_DATA_KEY) { 3870 fi = btrfs_item_ptr(leaf, path->slots[0], 3871 struct btrfs_file_extent_item); 3872 if (extent_len != btrfs_file_extent_num_bytes(leaf, fi)) 3873 goto err; 3874 } 3875 3876 ret = split_leaf(trans, root, &key, path, ins_len, 1); 3877 if (ret) 3878 goto err; 3879 3880 path->keep_locks = 0; 3881 btrfs_unlock_up_safe(path, 1); 3882 return 0; 3883 err: 3884 path->keep_locks = 0; 3885 return ret; 3886 } 3887 3888 static noinline int split_item(struct btrfs_trans_handle *trans, 3889 struct btrfs_path *path, 3890 const struct btrfs_key *new_key, 3891 unsigned long split_offset) 3892 { 3893 struct extent_buffer *leaf; 3894 int orig_slot, slot; 3895 char *buf; 3896 u32 nritems; 3897 u32 item_size; 3898 u32 orig_offset; 3899 struct btrfs_disk_key disk_key; 3900 3901 leaf = path->nodes[0]; 3902 /* 3903 * Shouldn't happen because the caller must have previously called 3904 * setup_leaf_for_split() to make room for the new item in the leaf. 3905 */ 3906 if (WARN_ON(btrfs_leaf_free_space(leaf) < sizeof(struct btrfs_item))) 3907 return -ENOSPC; 3908 3909 orig_slot = path->slots[0]; 3910 orig_offset = btrfs_item_offset(leaf, path->slots[0]); 3911 item_size = btrfs_item_size(leaf, path->slots[0]); 3912 3913 buf = kmalloc(item_size, GFP_NOFS); 3914 if (!buf) 3915 return -ENOMEM; 3916 3917 read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf, 3918 path->slots[0]), item_size); 3919 3920 slot = path->slots[0] + 1; 3921 nritems = btrfs_header_nritems(leaf); 3922 if (slot != nritems) { 3923 /* shift the items */ 3924 memmove_leaf_items(leaf, slot + 1, slot, nritems - slot); 3925 } 3926 3927 btrfs_cpu_key_to_disk(&disk_key, new_key); 3928 btrfs_set_item_key(leaf, &disk_key, slot); 3929 3930 btrfs_set_item_offset(leaf, slot, orig_offset); 3931 btrfs_set_item_size(leaf, slot, item_size - split_offset); 3932 3933 btrfs_set_item_offset(leaf, orig_slot, 3934 orig_offset + item_size - split_offset); 3935 btrfs_set_item_size(leaf, orig_slot, split_offset); 3936 3937 btrfs_set_header_nritems(leaf, nritems + 1); 3938 3939 /* write the data for the start of the original item */ 3940 write_extent_buffer(leaf, buf, 3941 btrfs_item_ptr_offset(leaf, path->slots[0]), 3942 split_offset); 3943 3944 /* write the data for the new item */ 3945 write_extent_buffer(leaf, buf + split_offset, 3946 btrfs_item_ptr_offset(leaf, slot), 3947 item_size - split_offset); 3948 btrfs_mark_buffer_dirty(trans, leaf); 3949 3950 BUG_ON(btrfs_leaf_free_space(leaf) < 0); 3951 kfree(buf); 3952 return 0; 3953 } 3954 3955 /* 3956 * This function splits a single item into two items, 3957 * giving 'new_key' to the new item and splitting the 3958 * old one at split_offset (from the start of the item). 3959 * 3960 * The path may be released by this operation. After 3961 * the split, the path is pointing to the old item. The 3962 * new item is going to be in the same node as the old one. 3963 * 3964 * Note, the item being split must be smaller enough to live alone on 3965 * a tree block with room for one extra struct btrfs_item 3966 * 3967 * This allows us to split the item in place, keeping a lock on the 3968 * leaf the entire time. 3969 */ 3970 int btrfs_split_item(struct btrfs_trans_handle *trans, 3971 struct btrfs_root *root, 3972 struct btrfs_path *path, 3973 const struct btrfs_key *new_key, 3974 unsigned long split_offset) 3975 { 3976 int ret; 3977 ret = setup_leaf_for_split(trans, root, path, 3978 sizeof(struct btrfs_item)); 3979 if (ret) 3980 return ret; 3981 3982 ret = split_item(trans, path, new_key, split_offset); 3983 return ret; 3984 } 3985 3986 /* 3987 * make the item pointed to by the path smaller. new_size indicates 3988 * how small to make it, and from_end tells us if we just chop bytes 3989 * off the end of the item or if we shift the item to chop bytes off 3990 * the front. 3991 */ 3992 void btrfs_truncate_item(struct btrfs_trans_handle *trans, 3993 const struct btrfs_path *path, u32 new_size, int from_end) 3994 { 3995 int slot; 3996 struct extent_buffer *leaf; 3997 u32 nritems; 3998 unsigned int data_end; 3999 unsigned int old_data_start; 4000 unsigned int old_size; 4001 unsigned int size_diff; 4002 int i; 4003 struct btrfs_map_token token; 4004 4005 leaf = path->nodes[0]; 4006 slot = path->slots[0]; 4007 4008 old_size = btrfs_item_size(leaf, slot); 4009 if (old_size == new_size) 4010 return; 4011 4012 nritems = btrfs_header_nritems(leaf); 4013 data_end = leaf_data_end(leaf); 4014 4015 old_data_start = btrfs_item_offset(leaf, slot); 4016 4017 size_diff = old_size - new_size; 4018 4019 BUG_ON(slot < 0); 4020 BUG_ON(slot >= nritems); 4021 4022 /* 4023 * item0..itemN ... dataN.offset..dataN.size .. data0.size 4024 */ 4025 /* first correct the data pointers */ 4026 btrfs_init_map_token(&token, leaf); 4027 for (i = slot; i < nritems; i++) { 4028 u32 ioff; 4029 4030 ioff = btrfs_token_item_offset(&token, i); 4031 btrfs_set_token_item_offset(&token, i, ioff + size_diff); 4032 } 4033 4034 /* shift the data */ 4035 if (from_end) { 4036 memmove_leaf_data(leaf, data_end + size_diff, data_end, 4037 old_data_start + new_size - data_end); 4038 } else { 4039 struct btrfs_disk_key disk_key; 4040 u64 offset; 4041 4042 btrfs_item_key(leaf, &disk_key, slot); 4043 4044 if (btrfs_disk_key_type(&disk_key) == BTRFS_EXTENT_DATA_KEY) { 4045 unsigned long ptr; 4046 struct btrfs_file_extent_item *fi; 4047 4048 fi = btrfs_item_ptr(leaf, slot, 4049 struct btrfs_file_extent_item); 4050 fi = (struct btrfs_file_extent_item *)( 4051 (unsigned long)fi - size_diff); 4052 4053 if (btrfs_file_extent_type(leaf, fi) == 4054 BTRFS_FILE_EXTENT_INLINE) { 4055 ptr = btrfs_item_ptr_offset(leaf, slot); 4056 memmove_extent_buffer(leaf, ptr, 4057 (unsigned long)fi, 4058 BTRFS_FILE_EXTENT_INLINE_DATA_START); 4059 } 4060 } 4061 4062 memmove_leaf_data(leaf, data_end + size_diff, data_end, 4063 old_data_start - data_end); 4064 4065 offset = btrfs_disk_key_offset(&disk_key); 4066 btrfs_set_disk_key_offset(&disk_key, offset + size_diff); 4067 btrfs_set_item_key(leaf, &disk_key, slot); 4068 if (slot == 0) 4069 fixup_low_keys(trans, path, &disk_key, 1); 4070 } 4071 4072 btrfs_set_item_size(leaf, slot, new_size); 4073 btrfs_mark_buffer_dirty(trans, leaf); 4074 4075 if (btrfs_leaf_free_space(leaf) < 0) { 4076 btrfs_print_leaf(leaf); 4077 BUG(); 4078 } 4079 } 4080 4081 /* 4082 * make the item pointed to by the path bigger, data_size is the added size. 4083 */ 4084 void btrfs_extend_item(struct btrfs_trans_handle *trans, 4085 const struct btrfs_path *path, u32 data_size) 4086 { 4087 int slot; 4088 struct extent_buffer *leaf; 4089 u32 nritems; 4090 unsigned int data_end; 4091 unsigned int old_data; 4092 unsigned int old_size; 4093 int i; 4094 struct btrfs_map_token token; 4095 4096 leaf = path->nodes[0]; 4097 4098 nritems = btrfs_header_nritems(leaf); 4099 data_end = leaf_data_end(leaf); 4100 4101 if (btrfs_leaf_free_space(leaf) < data_size) { 4102 btrfs_print_leaf(leaf); 4103 BUG(); 4104 } 4105 slot = path->slots[0]; 4106 old_data = btrfs_item_data_end(leaf, slot); 4107 4108 BUG_ON(slot < 0); 4109 if (slot >= nritems) { 4110 btrfs_print_leaf(leaf); 4111 btrfs_crit(leaf->fs_info, "slot %d too large, nritems %d", 4112 slot, nritems); 4113 BUG(); 4114 } 4115 4116 /* 4117 * item0..itemN ... dataN.offset..dataN.size .. data0.size 4118 */ 4119 /* first correct the data pointers */ 4120 btrfs_init_map_token(&token, leaf); 4121 for (i = slot; i < nritems; i++) { 4122 u32 ioff; 4123 4124 ioff = btrfs_token_item_offset(&token, i); 4125 btrfs_set_token_item_offset(&token, i, ioff - data_size); 4126 } 4127 4128 /* shift the data */ 4129 memmove_leaf_data(leaf, data_end - data_size, data_end, 4130 old_data - data_end); 4131 4132 data_end = old_data; 4133 old_size = btrfs_item_size(leaf, slot); 4134 btrfs_set_item_size(leaf, slot, old_size + data_size); 4135 btrfs_mark_buffer_dirty(trans, leaf); 4136 4137 if (btrfs_leaf_free_space(leaf) < 0) { 4138 btrfs_print_leaf(leaf); 4139 BUG(); 4140 } 4141 } 4142 4143 /* 4144 * Make space in the node before inserting one or more items. 4145 * 4146 * @trans: transaction handle 4147 * @root: root we are inserting items to 4148 * @path: points to the leaf/slot where we are going to insert new items 4149 * @batch: information about the batch of items to insert 4150 * 4151 * Main purpose is to save stack depth by doing the bulk of the work in a 4152 * function that doesn't call btrfs_search_slot 4153 */ 4154 static void setup_items_for_insert(struct btrfs_trans_handle *trans, 4155 struct btrfs_root *root, struct btrfs_path *path, 4156 const struct btrfs_item_batch *batch) 4157 { 4158 struct btrfs_fs_info *fs_info = root->fs_info; 4159 int i; 4160 u32 nritems; 4161 unsigned int data_end; 4162 struct btrfs_disk_key disk_key; 4163 struct extent_buffer *leaf; 4164 int slot; 4165 struct btrfs_map_token token; 4166 u32 total_size; 4167 4168 /* 4169 * Before anything else, update keys in the parent and other ancestors 4170 * if needed, then release the write locks on them, so that other tasks 4171 * can use them while we modify the leaf. 4172 */ 4173 if (path->slots[0] == 0) { 4174 btrfs_cpu_key_to_disk(&disk_key, &batch->keys[0]); 4175 fixup_low_keys(trans, path, &disk_key, 1); 4176 } 4177 btrfs_unlock_up_safe(path, 1); 4178 4179 leaf = path->nodes[0]; 4180 slot = path->slots[0]; 4181 4182 nritems = btrfs_header_nritems(leaf); 4183 data_end = leaf_data_end(leaf); 4184 total_size = batch->total_data_size + (batch->nr * sizeof(struct btrfs_item)); 4185 4186 if (btrfs_leaf_free_space(leaf) < total_size) { 4187 btrfs_print_leaf(leaf); 4188 btrfs_crit(fs_info, "not enough freespace need %u have %d", 4189 total_size, btrfs_leaf_free_space(leaf)); 4190 BUG(); 4191 } 4192 4193 btrfs_init_map_token(&token, leaf); 4194 if (slot != nritems) { 4195 unsigned int old_data = btrfs_item_data_end(leaf, slot); 4196 4197 if (old_data < data_end) { 4198 btrfs_print_leaf(leaf); 4199 btrfs_crit(fs_info, 4200 "item at slot %d with data offset %u beyond data end of leaf %u", 4201 slot, old_data, data_end); 4202 BUG(); 4203 } 4204 /* 4205 * item0..itemN ... dataN.offset..dataN.size .. data0.size 4206 */ 4207 /* first correct the data pointers */ 4208 for (i = slot; i < nritems; i++) { 4209 u32 ioff; 4210 4211 ioff = btrfs_token_item_offset(&token, i); 4212 btrfs_set_token_item_offset(&token, i, 4213 ioff - batch->total_data_size); 4214 } 4215 /* shift the items */ 4216 memmove_leaf_items(leaf, slot + batch->nr, slot, nritems - slot); 4217 4218 /* shift the data */ 4219 memmove_leaf_data(leaf, data_end - batch->total_data_size, 4220 data_end, old_data - data_end); 4221 data_end = old_data; 4222 } 4223 4224 /* setup the item for the new data */ 4225 for (i = 0; i < batch->nr; i++) { 4226 btrfs_cpu_key_to_disk(&disk_key, &batch->keys[i]); 4227 btrfs_set_item_key(leaf, &disk_key, slot + i); 4228 data_end -= batch->data_sizes[i]; 4229 btrfs_set_token_item_offset(&token, slot + i, data_end); 4230 btrfs_set_token_item_size(&token, slot + i, batch->data_sizes[i]); 4231 } 4232 4233 btrfs_set_header_nritems(leaf, nritems + batch->nr); 4234 btrfs_mark_buffer_dirty(trans, leaf); 4235 4236 if (btrfs_leaf_free_space(leaf) < 0) { 4237 btrfs_print_leaf(leaf); 4238 BUG(); 4239 } 4240 } 4241 4242 /* 4243 * Insert a new item into a leaf. 4244 * 4245 * @trans: Transaction handle. 4246 * @root: The root of the btree. 4247 * @path: A path pointing to the target leaf and slot. 4248 * @key: The key of the new item. 4249 * @data_size: The size of the data associated with the new key. 4250 */ 4251 void btrfs_setup_item_for_insert(struct btrfs_trans_handle *trans, 4252 struct btrfs_root *root, 4253 struct btrfs_path *path, 4254 const struct btrfs_key *key, 4255 u32 data_size) 4256 { 4257 struct btrfs_item_batch batch; 4258 4259 batch.keys = key; 4260 batch.data_sizes = &data_size; 4261 batch.total_data_size = data_size; 4262 batch.nr = 1; 4263 4264 setup_items_for_insert(trans, root, path, &batch); 4265 } 4266 4267 /* 4268 * Given a key and some data, insert items into the tree. 4269 * This does all the path init required, making room in the tree if needed. 4270 * 4271 * Returns: 0 on success 4272 * -EEXIST if the first key already exists 4273 * < 0 on other errors 4274 */ 4275 int btrfs_insert_empty_items(struct btrfs_trans_handle *trans, 4276 struct btrfs_root *root, 4277 struct btrfs_path *path, 4278 const struct btrfs_item_batch *batch) 4279 { 4280 int ret = 0; 4281 int slot; 4282 u32 total_size; 4283 4284 total_size = batch->total_data_size + (batch->nr * sizeof(struct btrfs_item)); 4285 ret = btrfs_search_slot(trans, root, &batch->keys[0], path, total_size, 1); 4286 if (ret == 0) 4287 return -EEXIST; 4288 if (ret < 0) 4289 return ret; 4290 4291 slot = path->slots[0]; 4292 BUG_ON(slot < 0); 4293 4294 setup_items_for_insert(trans, root, path, batch); 4295 return 0; 4296 } 4297 4298 /* 4299 * Given a key and some data, insert an item into the tree. 4300 * This does all the path init required, making room in the tree if needed. 4301 */ 4302 int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root *root, 4303 const struct btrfs_key *cpu_key, void *data, 4304 u32 data_size) 4305 { 4306 int ret = 0; 4307 struct btrfs_path *path; 4308 struct extent_buffer *leaf; 4309 unsigned long ptr; 4310 4311 path = btrfs_alloc_path(); 4312 if (!path) 4313 return -ENOMEM; 4314 ret = btrfs_insert_empty_item(trans, root, path, cpu_key, data_size); 4315 if (!ret) { 4316 leaf = path->nodes[0]; 4317 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); 4318 write_extent_buffer(leaf, data, ptr, data_size); 4319 btrfs_mark_buffer_dirty(trans, leaf); 4320 } 4321 btrfs_free_path(path); 4322 return ret; 4323 } 4324 4325 /* 4326 * This function duplicates an item, giving 'new_key' to the new item. 4327 * It guarantees both items live in the same tree leaf and the new item is 4328 * contiguous with the original item. 4329 * 4330 * This allows us to split a file extent in place, keeping a lock on the leaf 4331 * the entire time. 4332 */ 4333 int btrfs_duplicate_item(struct btrfs_trans_handle *trans, 4334 struct btrfs_root *root, 4335 struct btrfs_path *path, 4336 const struct btrfs_key *new_key) 4337 { 4338 struct extent_buffer *leaf; 4339 int ret; 4340 u32 item_size; 4341 4342 leaf = path->nodes[0]; 4343 item_size = btrfs_item_size(leaf, path->slots[0]); 4344 ret = setup_leaf_for_split(trans, root, path, 4345 item_size + sizeof(struct btrfs_item)); 4346 if (ret) 4347 return ret; 4348 4349 path->slots[0]++; 4350 btrfs_setup_item_for_insert(trans, root, path, new_key, item_size); 4351 leaf = path->nodes[0]; 4352 memcpy_extent_buffer(leaf, 4353 btrfs_item_ptr_offset(leaf, path->slots[0]), 4354 btrfs_item_ptr_offset(leaf, path->slots[0] - 1), 4355 item_size); 4356 return 0; 4357 } 4358 4359 /* 4360 * delete the pointer from a given node. 4361 * 4362 * the tree should have been previously balanced so the deletion does not 4363 * empty a node. 4364 * 4365 * This is exported for use inside btrfs-progs, don't un-export it. 4366 */ 4367 int btrfs_del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root, 4368 struct btrfs_path *path, int level, int slot) 4369 { 4370 struct extent_buffer *parent = path->nodes[level]; 4371 u32 nritems; 4372 int ret; 4373 4374 nritems = btrfs_header_nritems(parent); 4375 if (slot != nritems - 1) { 4376 if (level) { 4377 ret = btrfs_tree_mod_log_insert_move(parent, slot, 4378 slot + 1, nritems - slot - 1); 4379 if (ret < 0) { 4380 btrfs_abort_transaction(trans, ret); 4381 return ret; 4382 } 4383 } 4384 memmove_extent_buffer(parent, 4385 btrfs_node_key_ptr_offset(parent, slot), 4386 btrfs_node_key_ptr_offset(parent, slot + 1), 4387 sizeof(struct btrfs_key_ptr) * 4388 (nritems - slot - 1)); 4389 } else if (level) { 4390 ret = btrfs_tree_mod_log_insert_key(parent, slot, 4391 BTRFS_MOD_LOG_KEY_REMOVE); 4392 if (ret < 0) { 4393 btrfs_abort_transaction(trans, ret); 4394 return ret; 4395 } 4396 } 4397 4398 nritems--; 4399 btrfs_set_header_nritems(parent, nritems); 4400 if (nritems == 0 && parent == root->node) { 4401 BUG_ON(btrfs_header_level(root->node) != 1); 4402 /* just turn the root into a leaf and break */ 4403 btrfs_set_header_level(root->node, 0); 4404 } else if (slot == 0) { 4405 struct btrfs_disk_key disk_key; 4406 4407 btrfs_node_key(parent, &disk_key, 0); 4408 fixup_low_keys(trans, path, &disk_key, level + 1); 4409 } 4410 btrfs_mark_buffer_dirty(trans, parent); 4411 return 0; 4412 } 4413 4414 /* 4415 * a helper function to delete the leaf pointed to by path->slots[1] and 4416 * path->nodes[1]. 4417 * 4418 * This deletes the pointer in path->nodes[1] and frees the leaf 4419 * block extent. zero is returned if it all worked out, < 0 otherwise. 4420 * 4421 * The path must have already been setup for deleting the leaf, including 4422 * all the proper balancing. path->nodes[1] must be locked. 4423 */ 4424 static noinline int btrfs_del_leaf(struct btrfs_trans_handle *trans, 4425 struct btrfs_root *root, 4426 struct btrfs_path *path, 4427 struct extent_buffer *leaf) 4428 { 4429 int ret; 4430 4431 WARN_ON(btrfs_header_generation(leaf) != trans->transid); 4432 ret = btrfs_del_ptr(trans, root, path, 1, path->slots[1]); 4433 if (ret < 0) 4434 return ret; 4435 4436 /* 4437 * btrfs_free_extent is expensive, we want to make sure we 4438 * aren't holding any locks when we call it 4439 */ 4440 btrfs_unlock_up_safe(path, 0); 4441 4442 root_sub_used_bytes(root); 4443 4444 atomic_inc(&leaf->refs); 4445 ret = btrfs_free_tree_block(trans, btrfs_root_id(root), leaf, 0, 1); 4446 free_extent_buffer_stale(leaf); 4447 if (ret < 0) 4448 btrfs_abort_transaction(trans, ret); 4449 4450 return ret; 4451 } 4452 /* 4453 * delete the item at the leaf level in path. If that empties 4454 * the leaf, remove it from the tree 4455 */ 4456 int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root, 4457 struct btrfs_path *path, int slot, int nr) 4458 { 4459 struct btrfs_fs_info *fs_info = root->fs_info; 4460 struct extent_buffer *leaf; 4461 int ret = 0; 4462 int wret; 4463 u32 nritems; 4464 4465 leaf = path->nodes[0]; 4466 nritems = btrfs_header_nritems(leaf); 4467 4468 if (slot + nr != nritems) { 4469 const u32 last_off = btrfs_item_offset(leaf, slot + nr - 1); 4470 const int data_end = leaf_data_end(leaf); 4471 struct btrfs_map_token token; 4472 u32 dsize = 0; 4473 int i; 4474 4475 for (i = 0; i < nr; i++) 4476 dsize += btrfs_item_size(leaf, slot + i); 4477 4478 memmove_leaf_data(leaf, data_end + dsize, data_end, 4479 last_off - data_end); 4480 4481 btrfs_init_map_token(&token, leaf); 4482 for (i = slot + nr; i < nritems; i++) { 4483 u32 ioff; 4484 4485 ioff = btrfs_token_item_offset(&token, i); 4486 btrfs_set_token_item_offset(&token, i, ioff + dsize); 4487 } 4488 4489 memmove_leaf_items(leaf, slot, slot + nr, nritems - slot - nr); 4490 } 4491 btrfs_set_header_nritems(leaf, nritems - nr); 4492 nritems -= nr; 4493 4494 /* delete the leaf if we've emptied it */ 4495 if (nritems == 0) { 4496 if (leaf == root->node) { 4497 btrfs_set_header_level(leaf, 0); 4498 } else { 4499 btrfs_clear_buffer_dirty(trans, leaf); 4500 ret = btrfs_del_leaf(trans, root, path, leaf); 4501 if (ret < 0) 4502 return ret; 4503 } 4504 } else { 4505 int used = leaf_space_used(leaf, 0, nritems); 4506 if (slot == 0) { 4507 struct btrfs_disk_key disk_key; 4508 4509 btrfs_item_key(leaf, &disk_key, 0); 4510 fixup_low_keys(trans, path, &disk_key, 1); 4511 } 4512 4513 /* 4514 * Try to delete the leaf if it is mostly empty. We do this by 4515 * trying to move all its items into its left and right neighbours. 4516 * If we can't move all the items, then we don't delete it - it's 4517 * not ideal, but future insertions might fill the leaf with more 4518 * items, or items from other leaves might be moved later into our 4519 * leaf due to deletions on those leaves. 4520 */ 4521 if (used < BTRFS_LEAF_DATA_SIZE(fs_info) / 3) { 4522 u32 min_push_space; 4523 4524 /* push_leaf_left fixes the path. 4525 * make sure the path still points to our leaf 4526 * for possible call to btrfs_del_ptr below 4527 */ 4528 slot = path->slots[1]; 4529 atomic_inc(&leaf->refs); 4530 /* 4531 * We want to be able to at least push one item to the 4532 * left neighbour leaf, and that's the first item. 4533 */ 4534 min_push_space = sizeof(struct btrfs_item) + 4535 btrfs_item_size(leaf, 0); 4536 wret = push_leaf_left(trans, root, path, 0, 4537 min_push_space, 1, (u32)-1); 4538 if (wret < 0 && wret != -ENOSPC) 4539 ret = wret; 4540 4541 if (path->nodes[0] == leaf && 4542 btrfs_header_nritems(leaf)) { 4543 /* 4544 * If we were not able to push all items from our 4545 * leaf to its left neighbour, then attempt to 4546 * either push all the remaining items to the 4547 * right neighbour or none. There's no advantage 4548 * in pushing only some items, instead of all, as 4549 * it's pointless to end up with a leaf having 4550 * too few items while the neighbours can be full 4551 * or nearly full. 4552 */ 4553 nritems = btrfs_header_nritems(leaf); 4554 min_push_space = leaf_space_used(leaf, 0, nritems); 4555 wret = push_leaf_right(trans, root, path, 0, 4556 min_push_space, 1, 0); 4557 if (wret < 0 && wret != -ENOSPC) 4558 ret = wret; 4559 } 4560 4561 if (btrfs_header_nritems(leaf) == 0) { 4562 path->slots[1] = slot; 4563 ret = btrfs_del_leaf(trans, root, path, leaf); 4564 if (ret < 0) 4565 return ret; 4566 free_extent_buffer(leaf); 4567 ret = 0; 4568 } else { 4569 /* if we're still in the path, make sure 4570 * we're dirty. Otherwise, one of the 4571 * push_leaf functions must have already 4572 * dirtied this buffer 4573 */ 4574 if (path->nodes[0] == leaf) 4575 btrfs_mark_buffer_dirty(trans, leaf); 4576 free_extent_buffer(leaf); 4577 } 4578 } else { 4579 btrfs_mark_buffer_dirty(trans, leaf); 4580 } 4581 } 4582 return ret; 4583 } 4584 4585 /* 4586 * A helper function to walk down the tree starting at min_key, and looking 4587 * for nodes or leaves that are have a minimum transaction id. 4588 * This is used by the btree defrag code, and tree logging 4589 * 4590 * This does not cow, but it does stuff the starting key it finds back 4591 * into min_key, so you can call btrfs_search_slot with cow=1 on the 4592 * key and get a writable path. 4593 * 4594 * This honors path->lowest_level to prevent descent past a given level 4595 * of the tree. 4596 * 4597 * min_trans indicates the oldest transaction that you are interested 4598 * in walking through. Any nodes or leaves older than min_trans are 4599 * skipped over (without reading them). 4600 * 4601 * returns zero if something useful was found, < 0 on error and 1 if there 4602 * was nothing in the tree that matched the search criteria. 4603 */ 4604 int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key, 4605 struct btrfs_path *path, 4606 u64 min_trans) 4607 { 4608 struct extent_buffer *cur; 4609 struct btrfs_key found_key; 4610 int slot; 4611 int sret; 4612 u32 nritems; 4613 int level; 4614 int ret = 1; 4615 int keep_locks = path->keep_locks; 4616 4617 ASSERT(!path->nowait); 4618 path->keep_locks = 1; 4619 again: 4620 cur = btrfs_read_lock_root_node(root); 4621 level = btrfs_header_level(cur); 4622 WARN_ON(path->nodes[level]); 4623 path->nodes[level] = cur; 4624 path->locks[level] = BTRFS_READ_LOCK; 4625 4626 if (btrfs_header_generation(cur) < min_trans) { 4627 ret = 1; 4628 goto out; 4629 } 4630 while (1) { 4631 nritems = btrfs_header_nritems(cur); 4632 level = btrfs_header_level(cur); 4633 sret = btrfs_bin_search(cur, 0, min_key, &slot); 4634 if (sret < 0) { 4635 ret = sret; 4636 goto out; 4637 } 4638 4639 /* at the lowest level, we're done, setup the path and exit */ 4640 if (level == path->lowest_level) { 4641 if (slot >= nritems) 4642 goto find_next_key; 4643 ret = 0; 4644 path->slots[level] = slot; 4645 btrfs_item_key_to_cpu(cur, &found_key, slot); 4646 goto out; 4647 } 4648 if (sret && slot > 0) 4649 slot--; 4650 /* 4651 * check this node pointer against the min_trans parameters. 4652 * If it is too old, skip to the next one. 4653 */ 4654 while (slot < nritems) { 4655 u64 gen; 4656 4657 gen = btrfs_node_ptr_generation(cur, slot); 4658 if (gen < min_trans) { 4659 slot++; 4660 continue; 4661 } 4662 break; 4663 } 4664 find_next_key: 4665 /* 4666 * we didn't find a candidate key in this node, walk forward 4667 * and find another one 4668 */ 4669 if (slot >= nritems) { 4670 path->slots[level] = slot; 4671 sret = btrfs_find_next_key(root, path, min_key, level, 4672 min_trans); 4673 if (sret == 0) { 4674 btrfs_release_path(path); 4675 goto again; 4676 } else { 4677 goto out; 4678 } 4679 } 4680 /* save our key for returning back */ 4681 btrfs_node_key_to_cpu(cur, &found_key, slot); 4682 path->slots[level] = slot; 4683 if (level == path->lowest_level) { 4684 ret = 0; 4685 goto out; 4686 } 4687 cur = btrfs_read_node_slot(cur, slot); 4688 if (IS_ERR(cur)) { 4689 ret = PTR_ERR(cur); 4690 goto out; 4691 } 4692 4693 btrfs_tree_read_lock(cur); 4694 4695 path->locks[level - 1] = BTRFS_READ_LOCK; 4696 path->nodes[level - 1] = cur; 4697 unlock_up(path, level, 1, 0, NULL); 4698 } 4699 out: 4700 path->keep_locks = keep_locks; 4701 if (ret == 0) { 4702 btrfs_unlock_up_safe(path, path->lowest_level + 1); 4703 memcpy(min_key, &found_key, sizeof(found_key)); 4704 } 4705 return ret; 4706 } 4707 4708 /* 4709 * this is similar to btrfs_next_leaf, but does not try to preserve 4710 * and fixup the path. It looks for and returns the next key in the 4711 * tree based on the current path and the min_trans parameters. 4712 * 4713 * 0 is returned if another key is found, < 0 if there are any errors 4714 * and 1 is returned if there are no higher keys in the tree 4715 * 4716 * path->keep_locks should be set to 1 on the search made before 4717 * calling this function. 4718 */ 4719 int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path, 4720 struct btrfs_key *key, int level, u64 min_trans) 4721 { 4722 int slot; 4723 struct extent_buffer *c; 4724 4725 WARN_ON(!path->keep_locks && !path->skip_locking); 4726 while (level < BTRFS_MAX_LEVEL) { 4727 if (!path->nodes[level]) 4728 return 1; 4729 4730 slot = path->slots[level] + 1; 4731 c = path->nodes[level]; 4732 next: 4733 if (slot >= btrfs_header_nritems(c)) { 4734 int ret; 4735 int orig_lowest; 4736 struct btrfs_key cur_key; 4737 if (level + 1 >= BTRFS_MAX_LEVEL || 4738 !path->nodes[level + 1]) 4739 return 1; 4740 4741 if (path->locks[level + 1] || path->skip_locking) { 4742 level++; 4743 continue; 4744 } 4745 4746 slot = btrfs_header_nritems(c) - 1; 4747 if (level == 0) 4748 btrfs_item_key_to_cpu(c, &cur_key, slot); 4749 else 4750 btrfs_node_key_to_cpu(c, &cur_key, slot); 4751 4752 orig_lowest = path->lowest_level; 4753 btrfs_release_path(path); 4754 path->lowest_level = level; 4755 ret = btrfs_search_slot(NULL, root, &cur_key, path, 4756 0, 0); 4757 path->lowest_level = orig_lowest; 4758 if (ret < 0) 4759 return ret; 4760 4761 c = path->nodes[level]; 4762 slot = path->slots[level]; 4763 if (ret == 0) 4764 slot++; 4765 goto next; 4766 } 4767 4768 if (level == 0) 4769 btrfs_item_key_to_cpu(c, key, slot); 4770 else { 4771 u64 gen = btrfs_node_ptr_generation(c, slot); 4772 4773 if (gen < min_trans) { 4774 slot++; 4775 goto next; 4776 } 4777 btrfs_node_key_to_cpu(c, key, slot); 4778 } 4779 return 0; 4780 } 4781 return 1; 4782 } 4783 4784 int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path, 4785 u64 time_seq) 4786 { 4787 int slot; 4788 int level; 4789 struct extent_buffer *c; 4790 struct extent_buffer *next; 4791 struct btrfs_fs_info *fs_info = root->fs_info; 4792 struct btrfs_key key; 4793 bool need_commit_sem = false; 4794 u32 nritems; 4795 int ret; 4796 int i; 4797 4798 /* 4799 * The nowait semantics are used only for write paths, where we don't 4800 * use the tree mod log and sequence numbers. 4801 */ 4802 if (time_seq) 4803 ASSERT(!path->nowait); 4804 4805 nritems = btrfs_header_nritems(path->nodes[0]); 4806 if (nritems == 0) 4807 return 1; 4808 4809 btrfs_item_key_to_cpu(path->nodes[0], &key, nritems - 1); 4810 again: 4811 level = 1; 4812 next = NULL; 4813 btrfs_release_path(path); 4814 4815 path->keep_locks = 1; 4816 4817 if (time_seq) { 4818 ret = btrfs_search_old_slot(root, &key, path, time_seq); 4819 } else { 4820 if (path->need_commit_sem) { 4821 path->need_commit_sem = 0; 4822 need_commit_sem = true; 4823 if (path->nowait) { 4824 if (!down_read_trylock(&fs_info->commit_root_sem)) { 4825 ret = -EAGAIN; 4826 goto done; 4827 } 4828 } else { 4829 down_read(&fs_info->commit_root_sem); 4830 } 4831 } 4832 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 4833 } 4834 path->keep_locks = 0; 4835 4836 if (ret < 0) 4837 goto done; 4838 4839 nritems = btrfs_header_nritems(path->nodes[0]); 4840 /* 4841 * by releasing the path above we dropped all our locks. A balance 4842 * could have added more items next to the key that used to be 4843 * at the very end of the block. So, check again here and 4844 * advance the path if there are now more items available. 4845 */ 4846 if (nritems > 0 && path->slots[0] < nritems - 1) { 4847 if (ret == 0) 4848 path->slots[0]++; 4849 ret = 0; 4850 goto done; 4851 } 4852 /* 4853 * So the above check misses one case: 4854 * - after releasing the path above, someone has removed the item that 4855 * used to be at the very end of the block, and balance between leafs 4856 * gets another one with bigger key.offset to replace it. 4857 * 4858 * This one should be returned as well, or we can get leaf corruption 4859 * later(esp. in __btrfs_drop_extents()). 4860 * 4861 * And a bit more explanation about this check, 4862 * with ret > 0, the key isn't found, the path points to the slot 4863 * where it should be inserted, so the path->slots[0] item must be the 4864 * bigger one. 4865 */ 4866 if (nritems > 0 && ret > 0 && path->slots[0] == nritems - 1) { 4867 ret = 0; 4868 goto done; 4869 } 4870 4871 while (level < BTRFS_MAX_LEVEL) { 4872 if (!path->nodes[level]) { 4873 ret = 1; 4874 goto done; 4875 } 4876 4877 slot = path->slots[level] + 1; 4878 c = path->nodes[level]; 4879 if (slot >= btrfs_header_nritems(c)) { 4880 level++; 4881 if (level == BTRFS_MAX_LEVEL) { 4882 ret = 1; 4883 goto done; 4884 } 4885 continue; 4886 } 4887 4888 4889 /* 4890 * Our current level is where we're going to start from, and to 4891 * make sure lockdep doesn't complain we need to drop our locks 4892 * and nodes from 0 to our current level. 4893 */ 4894 for (i = 0; i < level; i++) { 4895 if (path->locks[level]) { 4896 btrfs_tree_read_unlock(path->nodes[i]); 4897 path->locks[i] = 0; 4898 } 4899 free_extent_buffer(path->nodes[i]); 4900 path->nodes[i] = NULL; 4901 } 4902 4903 next = c; 4904 ret = read_block_for_search(root, path, &next, slot, &key); 4905 if (ret == -EAGAIN && !path->nowait) 4906 goto again; 4907 4908 if (ret < 0) { 4909 btrfs_release_path(path); 4910 goto done; 4911 } 4912 4913 if (!path->skip_locking) { 4914 ret = btrfs_try_tree_read_lock(next); 4915 if (!ret && path->nowait) { 4916 ret = -EAGAIN; 4917 goto done; 4918 } 4919 if (!ret && time_seq) { 4920 /* 4921 * If we don't get the lock, we may be racing 4922 * with push_leaf_left, holding that lock while 4923 * itself waiting for the leaf we've currently 4924 * locked. To solve this situation, we give up 4925 * on our lock and cycle. 4926 */ 4927 free_extent_buffer(next); 4928 btrfs_release_path(path); 4929 cond_resched(); 4930 goto again; 4931 } 4932 if (!ret) 4933 btrfs_tree_read_lock(next); 4934 } 4935 break; 4936 } 4937 path->slots[level] = slot; 4938 while (1) { 4939 level--; 4940 path->nodes[level] = next; 4941 path->slots[level] = 0; 4942 if (!path->skip_locking) 4943 path->locks[level] = BTRFS_READ_LOCK; 4944 if (!level) 4945 break; 4946 4947 ret = read_block_for_search(root, path, &next, 0, &key); 4948 if (ret == -EAGAIN && !path->nowait) 4949 goto again; 4950 4951 if (ret < 0) { 4952 btrfs_release_path(path); 4953 goto done; 4954 } 4955 4956 if (!path->skip_locking) { 4957 if (path->nowait) { 4958 if (!btrfs_try_tree_read_lock(next)) { 4959 ret = -EAGAIN; 4960 goto done; 4961 } 4962 } else { 4963 btrfs_tree_read_lock(next); 4964 } 4965 } 4966 } 4967 ret = 0; 4968 done: 4969 unlock_up(path, 0, 1, 0, NULL); 4970 if (need_commit_sem) { 4971 int ret2; 4972 4973 path->need_commit_sem = 1; 4974 ret2 = finish_need_commit_sem_search(path); 4975 up_read(&fs_info->commit_root_sem); 4976 if (ret2) 4977 ret = ret2; 4978 } 4979 4980 return ret; 4981 } 4982 4983 int btrfs_next_old_item(struct btrfs_root *root, struct btrfs_path *path, u64 time_seq) 4984 { 4985 path->slots[0]++; 4986 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) 4987 return btrfs_next_old_leaf(root, path, time_seq); 4988 return 0; 4989 } 4990 4991 /* 4992 * this uses btrfs_prev_leaf to walk backwards in the tree, and keeps 4993 * searching until it gets past min_objectid or finds an item of 'type' 4994 * 4995 * returns 0 if something is found, 1 if nothing was found and < 0 on error 4996 */ 4997 int btrfs_previous_item(struct btrfs_root *root, 4998 struct btrfs_path *path, u64 min_objectid, 4999 int type) 5000 { 5001 struct btrfs_key found_key; 5002 struct extent_buffer *leaf; 5003 u32 nritems; 5004 int ret; 5005 5006 while (1) { 5007 if (path->slots[0] == 0) { 5008 ret = btrfs_prev_leaf(root, path); 5009 if (ret != 0) 5010 return ret; 5011 } else { 5012 path->slots[0]--; 5013 } 5014 leaf = path->nodes[0]; 5015 nritems = btrfs_header_nritems(leaf); 5016 if (nritems == 0) 5017 return 1; 5018 if (path->slots[0] == nritems) 5019 path->slots[0]--; 5020 5021 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 5022 if (found_key.objectid < min_objectid) 5023 break; 5024 if (found_key.type == type) 5025 return 0; 5026 if (found_key.objectid == min_objectid && 5027 found_key.type < type) 5028 break; 5029 } 5030 return 1; 5031 } 5032 5033 /* 5034 * search in extent tree to find a previous Metadata/Data extent item with 5035 * min objecitd. 5036 * 5037 * returns 0 if something is found, 1 if nothing was found and < 0 on error 5038 */ 5039 int btrfs_previous_extent_item(struct btrfs_root *root, 5040 struct btrfs_path *path, u64 min_objectid) 5041 { 5042 struct btrfs_key found_key; 5043 struct extent_buffer *leaf; 5044 u32 nritems; 5045 int ret; 5046 5047 while (1) { 5048 if (path->slots[0] == 0) { 5049 ret = btrfs_prev_leaf(root, path); 5050 if (ret != 0) 5051 return ret; 5052 } else { 5053 path->slots[0]--; 5054 } 5055 leaf = path->nodes[0]; 5056 nritems = btrfs_header_nritems(leaf); 5057 if (nritems == 0) 5058 return 1; 5059 if (path->slots[0] == nritems) 5060 path->slots[0]--; 5061 5062 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 5063 if (found_key.objectid < min_objectid) 5064 break; 5065 if (found_key.type == BTRFS_EXTENT_ITEM_KEY || 5066 found_key.type == BTRFS_METADATA_ITEM_KEY) 5067 return 0; 5068 if (found_key.objectid == min_objectid && 5069 found_key.type < BTRFS_EXTENT_ITEM_KEY) 5070 break; 5071 } 5072 return 1; 5073 } 5074 5075 int __init btrfs_ctree_init(void) 5076 { 5077 btrfs_path_cachep = KMEM_CACHE(btrfs_path, 0); 5078 if (!btrfs_path_cachep) 5079 return -ENOMEM; 5080 return 0; 5081 } 5082 5083 void __cold btrfs_ctree_exit(void) 5084 { 5085 kmem_cache_destroy(btrfs_path_cachep); 5086 } 5087