1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2007,2008 Oracle. All rights reserved. 4 */ 5 6 #include <linux/sched.h> 7 #include <linux/slab.h> 8 #include <linux/rbtree.h> 9 #include <linux/mm.h> 10 #include <linux/error-injection.h> 11 #include "messages.h" 12 #include "ctree.h" 13 #include "disk-io.h" 14 #include "transaction.h" 15 #include "print-tree.h" 16 #include "locking.h" 17 #include "volumes.h" 18 #include "qgroup.h" 19 #include "tree-mod-log.h" 20 #include "tree-checker.h" 21 #include "fs.h" 22 #include "accessors.h" 23 #include "extent-tree.h" 24 #include "relocation.h" 25 #include "file-item.h" 26 27 static struct kmem_cache *btrfs_path_cachep; 28 29 static int split_node(struct btrfs_trans_handle *trans, struct btrfs_root 30 *root, struct btrfs_path *path, int level); 31 static int split_leaf(struct btrfs_trans_handle *trans, struct btrfs_root *root, 32 const struct btrfs_key *ins_key, struct btrfs_path *path, 33 int data_size, int extend); 34 static int push_node_left(struct btrfs_trans_handle *trans, 35 struct extent_buffer *dst, 36 struct extent_buffer *src, int empty); 37 static int balance_node_right(struct btrfs_trans_handle *trans, 38 struct extent_buffer *dst_buf, 39 struct extent_buffer *src_buf); 40 41 static const struct btrfs_csums { 42 u16 size; 43 const char name[10]; 44 const char driver[12]; 45 } btrfs_csums[] = { 46 [BTRFS_CSUM_TYPE_CRC32] = { .size = 4, .name = "crc32c" }, 47 [BTRFS_CSUM_TYPE_XXHASH] = { .size = 8, .name = "xxhash64" }, 48 [BTRFS_CSUM_TYPE_SHA256] = { .size = 32, .name = "sha256" }, 49 [BTRFS_CSUM_TYPE_BLAKE2] = { .size = 32, .name = "blake2b", 50 .driver = "blake2b-256" }, 51 }; 52 53 /* 54 * The leaf data grows from end-to-front in the node. this returns the address 55 * of the start of the last item, which is the stop of the leaf data stack. 56 */ 57 static unsigned int leaf_data_end(const struct extent_buffer *leaf) 58 { 59 u32 nr = btrfs_header_nritems(leaf); 60 61 if (nr == 0) 62 return BTRFS_LEAF_DATA_SIZE(leaf->fs_info); 63 return btrfs_item_offset(leaf, nr - 1); 64 } 65 66 /* 67 * Move data in a @leaf (using memmove, safe for overlapping ranges). 68 * 69 * @leaf: leaf that we're doing a memmove on 70 * @dst_offset: item data offset we're moving to 71 * @src_offset: item data offset were' moving from 72 * @len: length of the data we're moving 73 * 74 * Wrapper around memmove_extent_buffer() that takes into account the header on 75 * the leaf. The btrfs_item offset's start directly after the header, so we 76 * have to adjust any offsets to account for the header in the leaf. This 77 * handles that math to simplify the callers. 78 */ 79 static inline void memmove_leaf_data(const struct extent_buffer *leaf, 80 unsigned long dst_offset, 81 unsigned long src_offset, 82 unsigned long len) 83 { 84 memmove_extent_buffer(leaf, btrfs_item_nr_offset(leaf, 0) + dst_offset, 85 btrfs_item_nr_offset(leaf, 0) + src_offset, len); 86 } 87 88 /* 89 * Copy item data from @src into @dst at the given @offset. 90 * 91 * @dst: destination leaf that we're copying into 92 * @src: source leaf that we're copying from 93 * @dst_offset: item data offset we're copying to 94 * @src_offset: item data offset were' copying from 95 * @len: length of the data we're copying 96 * 97 * Wrapper around copy_extent_buffer() that takes into account the header on 98 * the leaf. The btrfs_item offset's start directly after the header, so we 99 * have to adjust any offsets to account for the header in the leaf. This 100 * handles that math to simplify the callers. 101 */ 102 static inline void copy_leaf_data(const struct extent_buffer *dst, 103 const struct extent_buffer *src, 104 unsigned long dst_offset, 105 unsigned long src_offset, unsigned long len) 106 { 107 copy_extent_buffer(dst, src, btrfs_item_nr_offset(dst, 0) + dst_offset, 108 btrfs_item_nr_offset(src, 0) + src_offset, len); 109 } 110 111 /* 112 * Move items in a @leaf (using memmove). 113 * 114 * @dst: destination leaf for the items 115 * @dst_item: the item nr we're copying into 116 * @src_item: the item nr we're copying from 117 * @nr_items: the number of items to copy 118 * 119 * Wrapper around memmove_extent_buffer() that does the math to get the 120 * appropriate offsets into the leaf from the item numbers. 121 */ 122 static inline void memmove_leaf_items(const struct extent_buffer *leaf, 123 int dst_item, int src_item, int nr_items) 124 { 125 memmove_extent_buffer(leaf, btrfs_item_nr_offset(leaf, dst_item), 126 btrfs_item_nr_offset(leaf, src_item), 127 nr_items * sizeof(struct btrfs_item)); 128 } 129 130 /* 131 * Copy items from @src into @dst at the given @offset. 132 * 133 * @dst: destination leaf for the items 134 * @src: source leaf for the items 135 * @dst_item: the item nr we're copying into 136 * @src_item: the item nr we're copying from 137 * @nr_items: the number of items to copy 138 * 139 * Wrapper around copy_extent_buffer() that does the math to get the 140 * appropriate offsets into the leaf from the item numbers. 141 */ 142 static inline void copy_leaf_items(const struct extent_buffer *dst, 143 const struct extent_buffer *src, 144 int dst_item, int src_item, int nr_items) 145 { 146 copy_extent_buffer(dst, src, btrfs_item_nr_offset(dst, dst_item), 147 btrfs_item_nr_offset(src, src_item), 148 nr_items * sizeof(struct btrfs_item)); 149 } 150 151 /* This exists for btrfs-progs usages. */ 152 u16 btrfs_csum_type_size(u16 type) 153 { 154 return btrfs_csums[type].size; 155 } 156 157 int btrfs_super_csum_size(const struct btrfs_super_block *s) 158 { 159 u16 t = btrfs_super_csum_type(s); 160 /* 161 * csum type is validated at mount time 162 */ 163 return btrfs_csum_type_size(t); 164 } 165 166 const char *btrfs_super_csum_name(u16 csum_type) 167 { 168 /* csum type is validated at mount time */ 169 return btrfs_csums[csum_type].name; 170 } 171 172 /* 173 * Return driver name if defined, otherwise the name that's also a valid driver 174 * name 175 */ 176 const char *btrfs_super_csum_driver(u16 csum_type) 177 { 178 /* csum type is validated at mount time */ 179 return btrfs_csums[csum_type].driver[0] ? 180 btrfs_csums[csum_type].driver : 181 btrfs_csums[csum_type].name; 182 } 183 184 size_t __attribute_const__ btrfs_get_num_csums(void) 185 { 186 return ARRAY_SIZE(btrfs_csums); 187 } 188 189 struct btrfs_path *btrfs_alloc_path(void) 190 { 191 might_sleep(); 192 193 return kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS); 194 } 195 196 /* this also releases the path */ 197 void btrfs_free_path(struct btrfs_path *p) 198 { 199 if (!p) 200 return; 201 btrfs_release_path(p); 202 kmem_cache_free(btrfs_path_cachep, p); 203 } 204 205 /* 206 * path release drops references on the extent buffers in the path 207 * and it drops any locks held by this path 208 * 209 * It is safe to call this on paths that no locks or extent buffers held. 210 */ 211 noinline void btrfs_release_path(struct btrfs_path *p) 212 { 213 int i; 214 215 for (i = 0; i < BTRFS_MAX_LEVEL; i++) { 216 p->slots[i] = 0; 217 if (!p->nodes[i]) 218 continue; 219 if (p->locks[i]) { 220 btrfs_tree_unlock_rw(p->nodes[i], p->locks[i]); 221 p->locks[i] = 0; 222 } 223 free_extent_buffer(p->nodes[i]); 224 p->nodes[i] = NULL; 225 } 226 } 227 228 /* 229 * We want the transaction abort to print stack trace only for errors where the 230 * cause could be a bug, eg. due to ENOSPC, and not for common errors that are 231 * caused by external factors. 232 */ 233 bool __cold abort_should_print_stack(int error) 234 { 235 switch (error) { 236 case -EIO: 237 case -EROFS: 238 case -ENOMEM: 239 return false; 240 } 241 return true; 242 } 243 244 /* 245 * safely gets a reference on the root node of a tree. A lock 246 * is not taken, so a concurrent writer may put a different node 247 * at the root of the tree. See btrfs_lock_root_node for the 248 * looping required. 249 * 250 * The extent buffer returned by this has a reference taken, so 251 * it won't disappear. It may stop being the root of the tree 252 * at any time because there are no locks held. 253 */ 254 struct extent_buffer *btrfs_root_node(struct btrfs_root *root) 255 { 256 struct extent_buffer *eb; 257 258 while (1) { 259 rcu_read_lock(); 260 eb = rcu_dereference(root->node); 261 262 /* 263 * RCU really hurts here, we could free up the root node because 264 * it was COWed but we may not get the new root node yet so do 265 * the inc_not_zero dance and if it doesn't work then 266 * synchronize_rcu and try again. 267 */ 268 if (atomic_inc_not_zero(&eb->refs)) { 269 rcu_read_unlock(); 270 break; 271 } 272 rcu_read_unlock(); 273 synchronize_rcu(); 274 } 275 return eb; 276 } 277 278 /* 279 * Cowonly root (not-shareable trees, everything not subvolume or reloc roots), 280 * just get put onto a simple dirty list. Transaction walks this list to make 281 * sure they get properly updated on disk. 282 */ 283 static void add_root_to_dirty_list(struct btrfs_root *root) 284 { 285 struct btrfs_fs_info *fs_info = root->fs_info; 286 287 if (test_bit(BTRFS_ROOT_DIRTY, &root->state) || 288 !test_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state)) 289 return; 290 291 spin_lock(&fs_info->trans_lock); 292 if (!test_and_set_bit(BTRFS_ROOT_DIRTY, &root->state)) { 293 /* Want the extent tree to be the last on the list */ 294 if (btrfs_root_id(root) == BTRFS_EXTENT_TREE_OBJECTID) 295 list_move_tail(&root->dirty_list, 296 &fs_info->dirty_cowonly_roots); 297 else 298 list_move(&root->dirty_list, 299 &fs_info->dirty_cowonly_roots); 300 } 301 spin_unlock(&fs_info->trans_lock); 302 } 303 304 /* 305 * used by snapshot creation to make a copy of a root for a tree with 306 * a given objectid. The buffer with the new root node is returned in 307 * cow_ret, and this func returns zero on success or a negative error code. 308 */ 309 int btrfs_copy_root(struct btrfs_trans_handle *trans, 310 struct btrfs_root *root, 311 struct extent_buffer *buf, 312 struct extent_buffer **cow_ret, u64 new_root_objectid) 313 { 314 struct btrfs_fs_info *fs_info = root->fs_info; 315 struct extent_buffer *cow; 316 int ret = 0; 317 int level; 318 struct btrfs_disk_key disk_key; 319 u64 reloc_src_root = 0; 320 321 WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) && 322 trans->transid != fs_info->running_transaction->transid); 323 WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) && 324 trans->transid != root->last_trans); 325 326 level = btrfs_header_level(buf); 327 if (level == 0) 328 btrfs_item_key(buf, &disk_key, 0); 329 else 330 btrfs_node_key(buf, &disk_key, 0); 331 332 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID) 333 reloc_src_root = btrfs_header_owner(buf); 334 cow = btrfs_alloc_tree_block(trans, root, 0, new_root_objectid, 335 &disk_key, level, buf->start, 0, 336 reloc_src_root, BTRFS_NESTING_NEW_ROOT); 337 if (IS_ERR(cow)) 338 return PTR_ERR(cow); 339 340 copy_extent_buffer_full(cow, buf); 341 btrfs_set_header_bytenr(cow, cow->start); 342 btrfs_set_header_generation(cow, trans->transid); 343 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV); 344 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN | 345 BTRFS_HEADER_FLAG_RELOC); 346 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID) 347 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC); 348 else 349 btrfs_set_header_owner(cow, new_root_objectid); 350 351 write_extent_buffer_fsid(cow, fs_info->fs_devices->metadata_uuid); 352 353 WARN_ON(btrfs_header_generation(buf) > trans->transid); 354 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID) 355 ret = btrfs_inc_ref(trans, root, cow, 1); 356 else 357 ret = btrfs_inc_ref(trans, root, cow, 0); 358 if (ret) { 359 btrfs_tree_unlock(cow); 360 free_extent_buffer(cow); 361 btrfs_abort_transaction(trans, ret); 362 return ret; 363 } 364 365 btrfs_mark_buffer_dirty(trans, cow); 366 *cow_ret = cow; 367 return 0; 368 } 369 370 /* 371 * check if the tree block can be shared by multiple trees 372 */ 373 bool btrfs_block_can_be_shared(struct btrfs_trans_handle *trans, 374 struct btrfs_root *root, 375 struct extent_buffer *buf) 376 { 377 const u64 buf_gen = btrfs_header_generation(buf); 378 379 /* 380 * Tree blocks not in shareable trees and tree roots are never shared. 381 * If a block was allocated after the last snapshot and the block was 382 * not allocated by tree relocation, we know the block is not shared. 383 */ 384 385 if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) 386 return false; 387 388 if (buf == root->node) 389 return false; 390 391 if (buf_gen > btrfs_root_last_snapshot(&root->root_item) && 392 !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)) 393 return false; 394 395 if (buf != root->commit_root) 396 return true; 397 398 /* 399 * An extent buffer that used to be the commit root may still be shared 400 * because the tree height may have increased and it became a child of a 401 * higher level root. This can happen when snapshotting a subvolume 402 * created in the current transaction. 403 */ 404 if (buf_gen == trans->transid) 405 return true; 406 407 return false; 408 } 409 410 static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans, 411 struct btrfs_root *root, 412 struct extent_buffer *buf, 413 struct extent_buffer *cow, 414 int *last_ref) 415 { 416 struct btrfs_fs_info *fs_info = root->fs_info; 417 u64 refs; 418 u64 owner; 419 u64 flags; 420 u64 new_flags = 0; 421 int ret; 422 423 /* 424 * Backrefs update rules: 425 * 426 * Always use full backrefs for extent pointers in tree block 427 * allocated by tree relocation. 428 * 429 * If a shared tree block is no longer referenced by its owner 430 * tree (btrfs_header_owner(buf) == root->root_key.objectid), 431 * use full backrefs for extent pointers in tree block. 432 * 433 * If a tree block is been relocating 434 * (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID), 435 * use full backrefs for extent pointers in tree block. 436 * The reason for this is some operations (such as drop tree) 437 * are only allowed for blocks use full backrefs. 438 */ 439 440 if (btrfs_block_can_be_shared(trans, root, buf)) { 441 ret = btrfs_lookup_extent_info(trans, fs_info, buf->start, 442 btrfs_header_level(buf), 1, 443 &refs, &flags, NULL); 444 if (ret) 445 return ret; 446 if (unlikely(refs == 0)) { 447 btrfs_crit(fs_info, 448 "found 0 references for tree block at bytenr %llu level %d root %llu", 449 buf->start, btrfs_header_level(buf), 450 btrfs_root_id(root)); 451 ret = -EUCLEAN; 452 btrfs_abort_transaction(trans, ret); 453 return ret; 454 } 455 } else { 456 refs = 1; 457 if (btrfs_root_id(root) == BTRFS_TREE_RELOC_OBJECTID || 458 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV) 459 flags = BTRFS_BLOCK_FLAG_FULL_BACKREF; 460 else 461 flags = 0; 462 } 463 464 owner = btrfs_header_owner(buf); 465 BUG_ON(owner == BTRFS_TREE_RELOC_OBJECTID && 466 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)); 467 468 if (refs > 1) { 469 if ((owner == btrfs_root_id(root) || 470 btrfs_root_id(root) == BTRFS_TREE_RELOC_OBJECTID) && 471 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) { 472 ret = btrfs_inc_ref(trans, root, buf, 1); 473 if (ret) 474 return ret; 475 476 if (btrfs_root_id(root) == BTRFS_TREE_RELOC_OBJECTID) { 477 ret = btrfs_dec_ref(trans, root, buf, 0); 478 if (ret) 479 return ret; 480 ret = btrfs_inc_ref(trans, root, cow, 1); 481 if (ret) 482 return ret; 483 } 484 new_flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF; 485 } else { 486 487 if (btrfs_root_id(root) == BTRFS_TREE_RELOC_OBJECTID) 488 ret = btrfs_inc_ref(trans, root, cow, 1); 489 else 490 ret = btrfs_inc_ref(trans, root, cow, 0); 491 if (ret) 492 return ret; 493 } 494 if (new_flags != 0) { 495 ret = btrfs_set_disk_extent_flags(trans, buf, new_flags); 496 if (ret) 497 return ret; 498 } 499 } else { 500 if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) { 501 if (btrfs_root_id(root) == BTRFS_TREE_RELOC_OBJECTID) 502 ret = btrfs_inc_ref(trans, root, cow, 1); 503 else 504 ret = btrfs_inc_ref(trans, root, cow, 0); 505 if (ret) 506 return ret; 507 ret = btrfs_dec_ref(trans, root, buf, 1); 508 if (ret) 509 return ret; 510 } 511 btrfs_clear_buffer_dirty(trans, buf); 512 *last_ref = 1; 513 } 514 return 0; 515 } 516 517 /* 518 * does the dirty work in cow of a single block. The parent block (if 519 * supplied) is updated to point to the new cow copy. The new buffer is marked 520 * dirty and returned locked. If you modify the block it needs to be marked 521 * dirty again. 522 * 523 * search_start -- an allocation hint for the new block 524 * 525 * empty_size -- a hint that you plan on doing more cow. This is the size in 526 * bytes the allocator should try to find free next to the block it returns. 527 * This is just a hint and may be ignored by the allocator. 528 */ 529 int btrfs_force_cow_block(struct btrfs_trans_handle *trans, 530 struct btrfs_root *root, 531 struct extent_buffer *buf, 532 struct extent_buffer *parent, int parent_slot, 533 struct extent_buffer **cow_ret, 534 u64 search_start, u64 empty_size, 535 enum btrfs_lock_nesting nest) 536 { 537 struct btrfs_fs_info *fs_info = root->fs_info; 538 struct btrfs_disk_key disk_key; 539 struct extent_buffer *cow; 540 int level, ret; 541 int last_ref = 0; 542 int unlock_orig = 0; 543 u64 parent_start = 0; 544 u64 reloc_src_root = 0; 545 546 if (*cow_ret == buf) 547 unlock_orig = 1; 548 549 btrfs_assert_tree_write_locked(buf); 550 551 WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) && 552 trans->transid != fs_info->running_transaction->transid); 553 WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) && 554 trans->transid != root->last_trans); 555 556 level = btrfs_header_level(buf); 557 558 if (level == 0) 559 btrfs_item_key(buf, &disk_key, 0); 560 else 561 btrfs_node_key(buf, &disk_key, 0); 562 563 if (btrfs_root_id(root) == BTRFS_TREE_RELOC_OBJECTID) { 564 if (parent) 565 parent_start = parent->start; 566 reloc_src_root = btrfs_header_owner(buf); 567 } 568 cow = btrfs_alloc_tree_block(trans, root, parent_start, 569 btrfs_root_id(root), &disk_key, level, 570 search_start, empty_size, reloc_src_root, nest); 571 if (IS_ERR(cow)) 572 return PTR_ERR(cow); 573 574 /* cow is set to blocking by btrfs_init_new_buffer */ 575 576 copy_extent_buffer_full(cow, buf); 577 btrfs_set_header_bytenr(cow, cow->start); 578 btrfs_set_header_generation(cow, trans->transid); 579 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV); 580 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN | 581 BTRFS_HEADER_FLAG_RELOC); 582 if (btrfs_root_id(root) == BTRFS_TREE_RELOC_OBJECTID) 583 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC); 584 else 585 btrfs_set_header_owner(cow, btrfs_root_id(root)); 586 587 write_extent_buffer_fsid(cow, fs_info->fs_devices->metadata_uuid); 588 589 ret = update_ref_for_cow(trans, root, buf, cow, &last_ref); 590 if (ret) { 591 btrfs_tree_unlock(cow); 592 free_extent_buffer(cow); 593 btrfs_abort_transaction(trans, ret); 594 return ret; 595 } 596 597 if (test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) { 598 ret = btrfs_reloc_cow_block(trans, root, buf, cow); 599 if (ret) { 600 btrfs_tree_unlock(cow); 601 free_extent_buffer(cow); 602 btrfs_abort_transaction(trans, ret); 603 return ret; 604 } 605 } 606 607 if (buf == root->node) { 608 WARN_ON(parent && parent != buf); 609 if (btrfs_root_id(root) == BTRFS_TREE_RELOC_OBJECTID || 610 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV) 611 parent_start = buf->start; 612 613 ret = btrfs_tree_mod_log_insert_root(root->node, cow, true); 614 if (ret < 0) { 615 btrfs_tree_unlock(cow); 616 free_extent_buffer(cow); 617 btrfs_abort_transaction(trans, ret); 618 return ret; 619 } 620 atomic_inc(&cow->refs); 621 rcu_assign_pointer(root->node, cow); 622 623 btrfs_free_tree_block(trans, btrfs_root_id(root), buf, 624 parent_start, last_ref); 625 free_extent_buffer(buf); 626 add_root_to_dirty_list(root); 627 } else { 628 WARN_ON(trans->transid != btrfs_header_generation(parent)); 629 ret = btrfs_tree_mod_log_insert_key(parent, parent_slot, 630 BTRFS_MOD_LOG_KEY_REPLACE); 631 if (ret) { 632 btrfs_tree_unlock(cow); 633 free_extent_buffer(cow); 634 btrfs_abort_transaction(trans, ret); 635 return ret; 636 } 637 btrfs_set_node_blockptr(parent, parent_slot, 638 cow->start); 639 btrfs_set_node_ptr_generation(parent, parent_slot, 640 trans->transid); 641 btrfs_mark_buffer_dirty(trans, parent); 642 if (last_ref) { 643 ret = btrfs_tree_mod_log_free_eb(buf); 644 if (ret) { 645 btrfs_tree_unlock(cow); 646 free_extent_buffer(cow); 647 btrfs_abort_transaction(trans, ret); 648 return ret; 649 } 650 } 651 btrfs_free_tree_block(trans, btrfs_root_id(root), buf, 652 parent_start, last_ref); 653 } 654 if (unlock_orig) 655 btrfs_tree_unlock(buf); 656 free_extent_buffer_stale(buf); 657 btrfs_mark_buffer_dirty(trans, cow); 658 *cow_ret = cow; 659 return 0; 660 } 661 662 static inline int should_cow_block(struct btrfs_trans_handle *trans, 663 struct btrfs_root *root, 664 struct extent_buffer *buf) 665 { 666 if (btrfs_is_testing(root->fs_info)) 667 return 0; 668 669 /* Ensure we can see the FORCE_COW bit */ 670 smp_mb__before_atomic(); 671 672 /* 673 * We do not need to cow a block if 674 * 1) this block is not created or changed in this transaction; 675 * 2) this block does not belong to TREE_RELOC tree; 676 * 3) the root is not forced COW. 677 * 678 * What is forced COW: 679 * when we create snapshot during committing the transaction, 680 * after we've finished copying src root, we must COW the shared 681 * block to ensure the metadata consistency. 682 */ 683 if (btrfs_header_generation(buf) == trans->transid && 684 !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN) && 685 !(btrfs_root_id(root) != BTRFS_TREE_RELOC_OBJECTID && 686 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)) && 687 !test_bit(BTRFS_ROOT_FORCE_COW, &root->state)) 688 return 0; 689 return 1; 690 } 691 692 /* 693 * COWs a single block, see btrfs_force_cow_block() for the real work. 694 * This version of it has extra checks so that a block isn't COWed more than 695 * once per transaction, as long as it hasn't been written yet 696 */ 697 int btrfs_cow_block(struct btrfs_trans_handle *trans, 698 struct btrfs_root *root, struct extent_buffer *buf, 699 struct extent_buffer *parent, int parent_slot, 700 struct extent_buffer **cow_ret, 701 enum btrfs_lock_nesting nest) 702 { 703 struct btrfs_fs_info *fs_info = root->fs_info; 704 u64 search_start; 705 int ret; 706 707 if (unlikely(test_bit(BTRFS_ROOT_DELETING, &root->state))) { 708 btrfs_abort_transaction(trans, -EUCLEAN); 709 btrfs_crit(fs_info, 710 "attempt to COW block %llu on root %llu that is being deleted", 711 buf->start, btrfs_root_id(root)); 712 return -EUCLEAN; 713 } 714 715 /* 716 * COWing must happen through a running transaction, which always 717 * matches the current fs generation (it's a transaction with a state 718 * less than TRANS_STATE_UNBLOCKED). If it doesn't, then turn the fs 719 * into error state to prevent the commit of any transaction. 720 */ 721 if (unlikely(trans->transaction != fs_info->running_transaction || 722 trans->transid != fs_info->generation)) { 723 btrfs_abort_transaction(trans, -EUCLEAN); 724 btrfs_crit(fs_info, 725 "unexpected transaction when attempting to COW block %llu on root %llu, transaction %llu running transaction %llu fs generation %llu", 726 buf->start, btrfs_root_id(root), trans->transid, 727 fs_info->running_transaction->transid, 728 fs_info->generation); 729 return -EUCLEAN; 730 } 731 732 if (!should_cow_block(trans, root, buf)) { 733 *cow_ret = buf; 734 return 0; 735 } 736 737 search_start = round_down(buf->start, SZ_1G); 738 739 /* 740 * Before CoWing this block for later modification, check if it's 741 * the subtree root and do the delayed subtree trace if needed. 742 * 743 * Also We don't care about the error, as it's handled internally. 744 */ 745 btrfs_qgroup_trace_subtree_after_cow(trans, root, buf); 746 ret = btrfs_force_cow_block(trans, root, buf, parent, parent_slot, 747 cow_ret, search_start, 0, nest); 748 749 trace_btrfs_cow_block(root, buf, *cow_ret); 750 751 return ret; 752 } 753 ALLOW_ERROR_INJECTION(btrfs_cow_block, ERRNO); 754 755 /* 756 * same as comp_keys only with two btrfs_key's 757 */ 758 int __pure btrfs_comp_cpu_keys(const struct btrfs_key *k1, const struct btrfs_key *k2) 759 { 760 if (k1->objectid > k2->objectid) 761 return 1; 762 if (k1->objectid < k2->objectid) 763 return -1; 764 if (k1->type > k2->type) 765 return 1; 766 if (k1->type < k2->type) 767 return -1; 768 if (k1->offset > k2->offset) 769 return 1; 770 if (k1->offset < k2->offset) 771 return -1; 772 return 0; 773 } 774 775 /* 776 * Search for a key in the given extent_buffer. 777 * 778 * The lower boundary for the search is specified by the slot number @first_slot. 779 * Use a value of 0 to search over the whole extent buffer. Works for both 780 * leaves and nodes. 781 * 782 * The slot in the extent buffer is returned via @slot. If the key exists in the 783 * extent buffer, then @slot will point to the slot where the key is, otherwise 784 * it points to the slot where you would insert the key. 785 * 786 * Slot may point to the total number of items (i.e. one position beyond the last 787 * key) if the key is bigger than the last key in the extent buffer. 788 */ 789 int btrfs_bin_search(struct extent_buffer *eb, int first_slot, 790 const struct btrfs_key *key, int *slot) 791 { 792 unsigned long p; 793 int item_size; 794 /* 795 * Use unsigned types for the low and high slots, so that we get a more 796 * efficient division in the search loop below. 797 */ 798 u32 low = first_slot; 799 u32 high = btrfs_header_nritems(eb); 800 int ret; 801 const int key_size = sizeof(struct btrfs_disk_key); 802 803 if (unlikely(low > high)) { 804 btrfs_err(eb->fs_info, 805 "%s: low (%u) > high (%u) eb %llu owner %llu level %d", 806 __func__, low, high, eb->start, 807 btrfs_header_owner(eb), btrfs_header_level(eb)); 808 return -EINVAL; 809 } 810 811 if (btrfs_header_level(eb) == 0) { 812 p = offsetof(struct btrfs_leaf, items); 813 item_size = sizeof(struct btrfs_item); 814 } else { 815 p = offsetof(struct btrfs_node, ptrs); 816 item_size = sizeof(struct btrfs_key_ptr); 817 } 818 819 while (low < high) { 820 const int unit_size = eb->folio_size; 821 unsigned long oil; 822 unsigned long offset; 823 struct btrfs_disk_key *tmp; 824 struct btrfs_disk_key unaligned; 825 int mid; 826 827 mid = (low + high) / 2; 828 offset = p + mid * item_size; 829 oil = get_eb_offset_in_folio(eb, offset); 830 831 if (oil + key_size <= unit_size) { 832 const unsigned long idx = get_eb_folio_index(eb, offset); 833 char *kaddr = folio_address(eb->folios[idx]); 834 835 oil = get_eb_offset_in_folio(eb, offset); 836 tmp = (struct btrfs_disk_key *)(kaddr + oil); 837 } else { 838 read_extent_buffer(eb, &unaligned, offset, key_size); 839 tmp = &unaligned; 840 } 841 842 ret = btrfs_comp_keys(tmp, key); 843 844 if (ret < 0) 845 low = mid + 1; 846 else if (ret > 0) 847 high = mid; 848 else { 849 *slot = mid; 850 return 0; 851 } 852 } 853 *slot = low; 854 return 1; 855 } 856 857 static void root_add_used_bytes(struct btrfs_root *root) 858 { 859 spin_lock(&root->accounting_lock); 860 btrfs_set_root_used(&root->root_item, 861 btrfs_root_used(&root->root_item) + root->fs_info->nodesize); 862 spin_unlock(&root->accounting_lock); 863 } 864 865 static void root_sub_used_bytes(struct btrfs_root *root) 866 { 867 spin_lock(&root->accounting_lock); 868 btrfs_set_root_used(&root->root_item, 869 btrfs_root_used(&root->root_item) - root->fs_info->nodesize); 870 spin_unlock(&root->accounting_lock); 871 } 872 873 /* given a node and slot number, this reads the blocks it points to. The 874 * extent buffer is returned with a reference taken (but unlocked). 875 */ 876 struct extent_buffer *btrfs_read_node_slot(struct extent_buffer *parent, 877 int slot) 878 { 879 int level = btrfs_header_level(parent); 880 struct btrfs_tree_parent_check check = { 0 }; 881 struct extent_buffer *eb; 882 883 if (slot < 0 || slot >= btrfs_header_nritems(parent)) 884 return ERR_PTR(-ENOENT); 885 886 ASSERT(level); 887 888 check.level = level - 1; 889 check.transid = btrfs_node_ptr_generation(parent, slot); 890 check.owner_root = btrfs_header_owner(parent); 891 check.has_first_key = true; 892 btrfs_node_key_to_cpu(parent, &check.first_key, slot); 893 894 eb = read_tree_block(parent->fs_info, btrfs_node_blockptr(parent, slot), 895 &check); 896 if (IS_ERR(eb)) 897 return eb; 898 if (!extent_buffer_uptodate(eb)) { 899 free_extent_buffer(eb); 900 return ERR_PTR(-EIO); 901 } 902 903 return eb; 904 } 905 906 /* 907 * node level balancing, used to make sure nodes are in proper order for 908 * item deletion. We balance from the top down, so we have to make sure 909 * that a deletion won't leave an node completely empty later on. 910 */ 911 static noinline int balance_level(struct btrfs_trans_handle *trans, 912 struct btrfs_root *root, 913 struct btrfs_path *path, int level) 914 { 915 struct btrfs_fs_info *fs_info = root->fs_info; 916 struct extent_buffer *right = NULL; 917 struct extent_buffer *mid; 918 struct extent_buffer *left = NULL; 919 struct extent_buffer *parent = NULL; 920 int ret = 0; 921 int wret; 922 int pslot; 923 int orig_slot = path->slots[level]; 924 u64 orig_ptr; 925 926 ASSERT(level > 0); 927 928 mid = path->nodes[level]; 929 930 WARN_ON(path->locks[level] != BTRFS_WRITE_LOCK); 931 WARN_ON(btrfs_header_generation(mid) != trans->transid); 932 933 orig_ptr = btrfs_node_blockptr(mid, orig_slot); 934 935 if (level < BTRFS_MAX_LEVEL - 1) { 936 parent = path->nodes[level + 1]; 937 pslot = path->slots[level + 1]; 938 } 939 940 /* 941 * deal with the case where there is only one pointer in the root 942 * by promoting the node below to a root 943 */ 944 if (!parent) { 945 struct extent_buffer *child; 946 947 if (btrfs_header_nritems(mid) != 1) 948 return 0; 949 950 /* promote the child to a root */ 951 child = btrfs_read_node_slot(mid, 0); 952 if (IS_ERR(child)) { 953 ret = PTR_ERR(child); 954 goto out; 955 } 956 957 btrfs_tree_lock(child); 958 ret = btrfs_cow_block(trans, root, child, mid, 0, &child, 959 BTRFS_NESTING_COW); 960 if (ret) { 961 btrfs_tree_unlock(child); 962 free_extent_buffer(child); 963 goto out; 964 } 965 966 ret = btrfs_tree_mod_log_insert_root(root->node, child, true); 967 if (ret < 0) { 968 btrfs_tree_unlock(child); 969 free_extent_buffer(child); 970 btrfs_abort_transaction(trans, ret); 971 goto out; 972 } 973 rcu_assign_pointer(root->node, child); 974 975 add_root_to_dirty_list(root); 976 btrfs_tree_unlock(child); 977 978 path->locks[level] = 0; 979 path->nodes[level] = NULL; 980 btrfs_clear_buffer_dirty(trans, mid); 981 btrfs_tree_unlock(mid); 982 /* once for the path */ 983 free_extent_buffer(mid); 984 985 root_sub_used_bytes(root); 986 btrfs_free_tree_block(trans, btrfs_root_id(root), mid, 0, 1); 987 /* once for the root ptr */ 988 free_extent_buffer_stale(mid); 989 return 0; 990 } 991 if (btrfs_header_nritems(mid) > 992 BTRFS_NODEPTRS_PER_BLOCK(fs_info) / 4) 993 return 0; 994 995 if (pslot) { 996 left = btrfs_read_node_slot(parent, pslot - 1); 997 if (IS_ERR(left)) { 998 ret = PTR_ERR(left); 999 left = NULL; 1000 goto out; 1001 } 1002 1003 btrfs_tree_lock_nested(left, BTRFS_NESTING_LEFT); 1004 wret = btrfs_cow_block(trans, root, left, 1005 parent, pslot - 1, &left, 1006 BTRFS_NESTING_LEFT_COW); 1007 if (wret) { 1008 ret = wret; 1009 goto out; 1010 } 1011 } 1012 1013 if (pslot + 1 < btrfs_header_nritems(parent)) { 1014 right = btrfs_read_node_slot(parent, pslot + 1); 1015 if (IS_ERR(right)) { 1016 ret = PTR_ERR(right); 1017 right = NULL; 1018 goto out; 1019 } 1020 1021 btrfs_tree_lock_nested(right, BTRFS_NESTING_RIGHT); 1022 wret = btrfs_cow_block(trans, root, right, 1023 parent, pslot + 1, &right, 1024 BTRFS_NESTING_RIGHT_COW); 1025 if (wret) { 1026 ret = wret; 1027 goto out; 1028 } 1029 } 1030 1031 /* first, try to make some room in the middle buffer */ 1032 if (left) { 1033 orig_slot += btrfs_header_nritems(left); 1034 wret = push_node_left(trans, left, mid, 1); 1035 if (wret < 0) 1036 ret = wret; 1037 } 1038 1039 /* 1040 * then try to empty the right most buffer into the middle 1041 */ 1042 if (right) { 1043 wret = push_node_left(trans, mid, right, 1); 1044 if (wret < 0 && wret != -ENOSPC) 1045 ret = wret; 1046 if (btrfs_header_nritems(right) == 0) { 1047 btrfs_clear_buffer_dirty(trans, right); 1048 btrfs_tree_unlock(right); 1049 ret = btrfs_del_ptr(trans, root, path, level + 1, pslot + 1); 1050 if (ret < 0) { 1051 free_extent_buffer_stale(right); 1052 right = NULL; 1053 goto out; 1054 } 1055 root_sub_used_bytes(root); 1056 btrfs_free_tree_block(trans, btrfs_root_id(root), right, 1057 0, 1); 1058 free_extent_buffer_stale(right); 1059 right = NULL; 1060 } else { 1061 struct btrfs_disk_key right_key; 1062 btrfs_node_key(right, &right_key, 0); 1063 ret = btrfs_tree_mod_log_insert_key(parent, pslot + 1, 1064 BTRFS_MOD_LOG_KEY_REPLACE); 1065 if (ret < 0) { 1066 btrfs_abort_transaction(trans, ret); 1067 goto out; 1068 } 1069 btrfs_set_node_key(parent, &right_key, pslot + 1); 1070 btrfs_mark_buffer_dirty(trans, parent); 1071 } 1072 } 1073 if (btrfs_header_nritems(mid) == 1) { 1074 /* 1075 * we're not allowed to leave a node with one item in the 1076 * tree during a delete. A deletion from lower in the tree 1077 * could try to delete the only pointer in this node. 1078 * So, pull some keys from the left. 1079 * There has to be a left pointer at this point because 1080 * otherwise we would have pulled some pointers from the 1081 * right 1082 */ 1083 if (unlikely(!left)) { 1084 btrfs_crit(fs_info, 1085 "missing left child when middle child only has 1 item, parent bytenr %llu level %d mid bytenr %llu root %llu", 1086 parent->start, btrfs_header_level(parent), 1087 mid->start, btrfs_root_id(root)); 1088 ret = -EUCLEAN; 1089 btrfs_abort_transaction(trans, ret); 1090 goto out; 1091 } 1092 wret = balance_node_right(trans, mid, left); 1093 if (wret < 0) { 1094 ret = wret; 1095 goto out; 1096 } 1097 if (wret == 1) { 1098 wret = push_node_left(trans, left, mid, 1); 1099 if (wret < 0) 1100 ret = wret; 1101 } 1102 BUG_ON(wret == 1); 1103 } 1104 if (btrfs_header_nritems(mid) == 0) { 1105 btrfs_clear_buffer_dirty(trans, mid); 1106 btrfs_tree_unlock(mid); 1107 ret = btrfs_del_ptr(trans, root, path, level + 1, pslot); 1108 if (ret < 0) { 1109 free_extent_buffer_stale(mid); 1110 mid = NULL; 1111 goto out; 1112 } 1113 root_sub_used_bytes(root); 1114 btrfs_free_tree_block(trans, btrfs_root_id(root), mid, 0, 1); 1115 free_extent_buffer_stale(mid); 1116 mid = NULL; 1117 } else { 1118 /* update the parent key to reflect our changes */ 1119 struct btrfs_disk_key mid_key; 1120 btrfs_node_key(mid, &mid_key, 0); 1121 ret = btrfs_tree_mod_log_insert_key(parent, pslot, 1122 BTRFS_MOD_LOG_KEY_REPLACE); 1123 if (ret < 0) { 1124 btrfs_abort_transaction(trans, ret); 1125 goto out; 1126 } 1127 btrfs_set_node_key(parent, &mid_key, pslot); 1128 btrfs_mark_buffer_dirty(trans, parent); 1129 } 1130 1131 /* update the path */ 1132 if (left) { 1133 if (btrfs_header_nritems(left) > orig_slot) { 1134 atomic_inc(&left->refs); 1135 /* left was locked after cow */ 1136 path->nodes[level] = left; 1137 path->slots[level + 1] -= 1; 1138 path->slots[level] = orig_slot; 1139 if (mid) { 1140 btrfs_tree_unlock(mid); 1141 free_extent_buffer(mid); 1142 } 1143 } else { 1144 orig_slot -= btrfs_header_nritems(left); 1145 path->slots[level] = orig_slot; 1146 } 1147 } 1148 /* double check we haven't messed things up */ 1149 if (orig_ptr != 1150 btrfs_node_blockptr(path->nodes[level], path->slots[level])) 1151 BUG(); 1152 out: 1153 if (right) { 1154 btrfs_tree_unlock(right); 1155 free_extent_buffer(right); 1156 } 1157 if (left) { 1158 if (path->nodes[level] != left) 1159 btrfs_tree_unlock(left); 1160 free_extent_buffer(left); 1161 } 1162 return ret; 1163 } 1164 1165 /* Node balancing for insertion. Here we only split or push nodes around 1166 * when they are completely full. This is also done top down, so we 1167 * have to be pessimistic. 1168 */ 1169 static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans, 1170 struct btrfs_root *root, 1171 struct btrfs_path *path, int level) 1172 { 1173 struct btrfs_fs_info *fs_info = root->fs_info; 1174 struct extent_buffer *right = NULL; 1175 struct extent_buffer *mid; 1176 struct extent_buffer *left = NULL; 1177 struct extent_buffer *parent = NULL; 1178 int ret = 0; 1179 int wret; 1180 int pslot; 1181 int orig_slot = path->slots[level]; 1182 1183 if (level == 0) 1184 return 1; 1185 1186 mid = path->nodes[level]; 1187 WARN_ON(btrfs_header_generation(mid) != trans->transid); 1188 1189 if (level < BTRFS_MAX_LEVEL - 1) { 1190 parent = path->nodes[level + 1]; 1191 pslot = path->slots[level + 1]; 1192 } 1193 1194 if (!parent) 1195 return 1; 1196 1197 /* first, try to make some room in the middle buffer */ 1198 if (pslot) { 1199 u32 left_nr; 1200 1201 left = btrfs_read_node_slot(parent, pslot - 1); 1202 if (IS_ERR(left)) 1203 return PTR_ERR(left); 1204 1205 btrfs_tree_lock_nested(left, BTRFS_NESTING_LEFT); 1206 1207 left_nr = btrfs_header_nritems(left); 1208 if (left_nr >= BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 1) { 1209 wret = 1; 1210 } else { 1211 ret = btrfs_cow_block(trans, root, left, parent, 1212 pslot - 1, &left, 1213 BTRFS_NESTING_LEFT_COW); 1214 if (ret) 1215 wret = 1; 1216 else { 1217 wret = push_node_left(trans, left, mid, 0); 1218 } 1219 } 1220 if (wret < 0) 1221 ret = wret; 1222 if (wret == 0) { 1223 struct btrfs_disk_key disk_key; 1224 orig_slot += left_nr; 1225 btrfs_node_key(mid, &disk_key, 0); 1226 ret = btrfs_tree_mod_log_insert_key(parent, pslot, 1227 BTRFS_MOD_LOG_KEY_REPLACE); 1228 if (ret < 0) { 1229 btrfs_tree_unlock(left); 1230 free_extent_buffer(left); 1231 btrfs_abort_transaction(trans, ret); 1232 return ret; 1233 } 1234 btrfs_set_node_key(parent, &disk_key, pslot); 1235 btrfs_mark_buffer_dirty(trans, parent); 1236 if (btrfs_header_nritems(left) > orig_slot) { 1237 path->nodes[level] = left; 1238 path->slots[level + 1] -= 1; 1239 path->slots[level] = orig_slot; 1240 btrfs_tree_unlock(mid); 1241 free_extent_buffer(mid); 1242 } else { 1243 orig_slot -= 1244 btrfs_header_nritems(left); 1245 path->slots[level] = orig_slot; 1246 btrfs_tree_unlock(left); 1247 free_extent_buffer(left); 1248 } 1249 return 0; 1250 } 1251 btrfs_tree_unlock(left); 1252 free_extent_buffer(left); 1253 } 1254 1255 /* 1256 * then try to empty the right most buffer into the middle 1257 */ 1258 if (pslot + 1 < btrfs_header_nritems(parent)) { 1259 u32 right_nr; 1260 1261 right = btrfs_read_node_slot(parent, pslot + 1); 1262 if (IS_ERR(right)) 1263 return PTR_ERR(right); 1264 1265 btrfs_tree_lock_nested(right, BTRFS_NESTING_RIGHT); 1266 1267 right_nr = btrfs_header_nritems(right); 1268 if (right_nr >= BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 1) { 1269 wret = 1; 1270 } else { 1271 ret = btrfs_cow_block(trans, root, right, 1272 parent, pslot + 1, 1273 &right, BTRFS_NESTING_RIGHT_COW); 1274 if (ret) 1275 wret = 1; 1276 else { 1277 wret = balance_node_right(trans, right, mid); 1278 } 1279 } 1280 if (wret < 0) 1281 ret = wret; 1282 if (wret == 0) { 1283 struct btrfs_disk_key disk_key; 1284 1285 btrfs_node_key(right, &disk_key, 0); 1286 ret = btrfs_tree_mod_log_insert_key(parent, pslot + 1, 1287 BTRFS_MOD_LOG_KEY_REPLACE); 1288 if (ret < 0) { 1289 btrfs_tree_unlock(right); 1290 free_extent_buffer(right); 1291 btrfs_abort_transaction(trans, ret); 1292 return ret; 1293 } 1294 btrfs_set_node_key(parent, &disk_key, pslot + 1); 1295 btrfs_mark_buffer_dirty(trans, parent); 1296 1297 if (btrfs_header_nritems(mid) <= orig_slot) { 1298 path->nodes[level] = right; 1299 path->slots[level + 1] += 1; 1300 path->slots[level] = orig_slot - 1301 btrfs_header_nritems(mid); 1302 btrfs_tree_unlock(mid); 1303 free_extent_buffer(mid); 1304 } else { 1305 btrfs_tree_unlock(right); 1306 free_extent_buffer(right); 1307 } 1308 return 0; 1309 } 1310 btrfs_tree_unlock(right); 1311 free_extent_buffer(right); 1312 } 1313 return 1; 1314 } 1315 1316 /* 1317 * readahead one full node of leaves, finding things that are close 1318 * to the block in 'slot', and triggering ra on them. 1319 */ 1320 static void reada_for_search(struct btrfs_fs_info *fs_info, 1321 struct btrfs_path *path, 1322 int level, int slot, u64 objectid) 1323 { 1324 struct extent_buffer *node; 1325 struct btrfs_disk_key disk_key; 1326 u32 nritems; 1327 u64 search; 1328 u64 target; 1329 u64 nread = 0; 1330 u64 nread_max; 1331 u32 nr; 1332 u32 blocksize; 1333 u32 nscan = 0; 1334 1335 if (level != 1 && path->reada != READA_FORWARD_ALWAYS) 1336 return; 1337 1338 if (!path->nodes[level]) 1339 return; 1340 1341 node = path->nodes[level]; 1342 1343 /* 1344 * Since the time between visiting leaves is much shorter than the time 1345 * between visiting nodes, limit read ahead of nodes to 1, to avoid too 1346 * much IO at once (possibly random). 1347 */ 1348 if (path->reada == READA_FORWARD_ALWAYS) { 1349 if (level > 1) 1350 nread_max = node->fs_info->nodesize; 1351 else 1352 nread_max = SZ_128K; 1353 } else { 1354 nread_max = SZ_64K; 1355 } 1356 1357 search = btrfs_node_blockptr(node, slot); 1358 blocksize = fs_info->nodesize; 1359 if (path->reada != READA_FORWARD_ALWAYS) { 1360 struct extent_buffer *eb; 1361 1362 eb = find_extent_buffer(fs_info, search); 1363 if (eb) { 1364 free_extent_buffer(eb); 1365 return; 1366 } 1367 } 1368 1369 target = search; 1370 1371 nritems = btrfs_header_nritems(node); 1372 nr = slot; 1373 1374 while (1) { 1375 if (path->reada == READA_BACK) { 1376 if (nr == 0) 1377 break; 1378 nr--; 1379 } else if (path->reada == READA_FORWARD || 1380 path->reada == READA_FORWARD_ALWAYS) { 1381 nr++; 1382 if (nr >= nritems) 1383 break; 1384 } 1385 if (path->reada == READA_BACK && objectid) { 1386 btrfs_node_key(node, &disk_key, nr); 1387 if (btrfs_disk_key_objectid(&disk_key) != objectid) 1388 break; 1389 } 1390 search = btrfs_node_blockptr(node, nr); 1391 if (path->reada == READA_FORWARD_ALWAYS || 1392 (search <= target && target - search <= 65536) || 1393 (search > target && search - target <= 65536)) { 1394 btrfs_readahead_node_child(node, nr); 1395 nread += blocksize; 1396 } 1397 nscan++; 1398 if (nread > nread_max || nscan > 32) 1399 break; 1400 } 1401 } 1402 1403 static noinline void reada_for_balance(struct btrfs_path *path, int level) 1404 { 1405 struct extent_buffer *parent; 1406 int slot; 1407 int nritems; 1408 1409 parent = path->nodes[level + 1]; 1410 if (!parent) 1411 return; 1412 1413 nritems = btrfs_header_nritems(parent); 1414 slot = path->slots[level + 1]; 1415 1416 if (slot > 0) 1417 btrfs_readahead_node_child(parent, slot - 1); 1418 if (slot + 1 < nritems) 1419 btrfs_readahead_node_child(parent, slot + 1); 1420 } 1421 1422 1423 /* 1424 * when we walk down the tree, it is usually safe to unlock the higher layers 1425 * in the tree. The exceptions are when our path goes through slot 0, because 1426 * operations on the tree might require changing key pointers higher up in the 1427 * tree. 1428 * 1429 * callers might also have set path->keep_locks, which tells this code to keep 1430 * the lock if the path points to the last slot in the block. This is part of 1431 * walking through the tree, and selecting the next slot in the higher block. 1432 * 1433 * lowest_unlock sets the lowest level in the tree we're allowed to unlock. so 1434 * if lowest_unlock is 1, level 0 won't be unlocked 1435 */ 1436 static noinline void unlock_up(struct btrfs_path *path, int level, 1437 int lowest_unlock, int min_write_lock_level, 1438 int *write_lock_level) 1439 { 1440 int i; 1441 int skip_level = level; 1442 bool check_skip = true; 1443 1444 for (i = level; i < BTRFS_MAX_LEVEL; i++) { 1445 if (!path->nodes[i]) 1446 break; 1447 if (!path->locks[i]) 1448 break; 1449 1450 if (check_skip) { 1451 if (path->slots[i] == 0) { 1452 skip_level = i + 1; 1453 continue; 1454 } 1455 1456 if (path->keep_locks) { 1457 u32 nritems; 1458 1459 nritems = btrfs_header_nritems(path->nodes[i]); 1460 if (nritems < 1 || path->slots[i] >= nritems - 1) { 1461 skip_level = i + 1; 1462 continue; 1463 } 1464 } 1465 } 1466 1467 if (i >= lowest_unlock && i > skip_level) { 1468 check_skip = false; 1469 btrfs_tree_unlock_rw(path->nodes[i], path->locks[i]); 1470 path->locks[i] = 0; 1471 if (write_lock_level && 1472 i > min_write_lock_level && 1473 i <= *write_lock_level) { 1474 *write_lock_level = i - 1; 1475 } 1476 } 1477 } 1478 } 1479 1480 /* 1481 * Helper function for btrfs_search_slot() and other functions that do a search 1482 * on a btree. The goal is to find a tree block in the cache (the radix tree at 1483 * fs_info->buffer_radix), but if we can't find it, or it's not up to date, read 1484 * its pages from disk. 1485 * 1486 * Returns -EAGAIN, with the path unlocked, if the caller needs to repeat the 1487 * whole btree search, starting again from the current root node. 1488 */ 1489 static int 1490 read_block_for_search(struct btrfs_root *root, struct btrfs_path *p, 1491 struct extent_buffer **eb_ret, int level, int slot, 1492 const struct btrfs_key *key) 1493 { 1494 struct btrfs_fs_info *fs_info = root->fs_info; 1495 struct btrfs_tree_parent_check check = { 0 }; 1496 u64 blocknr; 1497 u64 gen; 1498 struct extent_buffer *tmp; 1499 int ret; 1500 int parent_level; 1501 bool unlock_up; 1502 1503 unlock_up = ((level + 1 < BTRFS_MAX_LEVEL) && p->locks[level + 1]); 1504 blocknr = btrfs_node_blockptr(*eb_ret, slot); 1505 gen = btrfs_node_ptr_generation(*eb_ret, slot); 1506 parent_level = btrfs_header_level(*eb_ret); 1507 btrfs_node_key_to_cpu(*eb_ret, &check.first_key, slot); 1508 check.has_first_key = true; 1509 check.level = parent_level - 1; 1510 check.transid = gen; 1511 check.owner_root = btrfs_root_id(root); 1512 1513 /* 1514 * If we need to read an extent buffer from disk and we are holding locks 1515 * on upper level nodes, we unlock all the upper nodes before reading the 1516 * extent buffer, and then return -EAGAIN to the caller as it needs to 1517 * restart the search. We don't release the lock on the current level 1518 * because we need to walk this node to figure out which blocks to read. 1519 */ 1520 tmp = find_extent_buffer(fs_info, blocknr); 1521 if (tmp) { 1522 if (p->reada == READA_FORWARD_ALWAYS) 1523 reada_for_search(fs_info, p, level, slot, key->objectid); 1524 1525 /* first we do an atomic uptodate check */ 1526 if (btrfs_buffer_uptodate(tmp, gen, 1) > 0) { 1527 /* 1528 * Do extra check for first_key, eb can be stale due to 1529 * being cached, read from scrub, or have multiple 1530 * parents (shared tree blocks). 1531 */ 1532 if (btrfs_verify_level_key(tmp, 1533 parent_level - 1, &check.first_key, gen)) { 1534 free_extent_buffer(tmp); 1535 return -EUCLEAN; 1536 } 1537 *eb_ret = tmp; 1538 return 0; 1539 } 1540 1541 if (p->nowait) { 1542 free_extent_buffer(tmp); 1543 return -EAGAIN; 1544 } 1545 1546 if (unlock_up) 1547 btrfs_unlock_up_safe(p, level + 1); 1548 1549 /* now we're allowed to do a blocking uptodate check */ 1550 ret = btrfs_read_extent_buffer(tmp, &check); 1551 if (ret) { 1552 free_extent_buffer(tmp); 1553 btrfs_release_path(p); 1554 return -EIO; 1555 } 1556 if (btrfs_check_eb_owner(tmp, btrfs_root_id(root))) { 1557 free_extent_buffer(tmp); 1558 btrfs_release_path(p); 1559 return -EUCLEAN; 1560 } 1561 1562 if (unlock_up) 1563 ret = -EAGAIN; 1564 1565 goto out; 1566 } else if (p->nowait) { 1567 return -EAGAIN; 1568 } 1569 1570 if (unlock_up) { 1571 btrfs_unlock_up_safe(p, level + 1); 1572 ret = -EAGAIN; 1573 } else { 1574 ret = 0; 1575 } 1576 1577 if (p->reada != READA_NONE) 1578 reada_for_search(fs_info, p, level, slot, key->objectid); 1579 1580 tmp = read_tree_block(fs_info, blocknr, &check); 1581 if (IS_ERR(tmp)) { 1582 btrfs_release_path(p); 1583 return PTR_ERR(tmp); 1584 } 1585 /* 1586 * If the read above didn't mark this buffer up to date, 1587 * it will never end up being up to date. Set ret to EIO now 1588 * and give up so that our caller doesn't loop forever 1589 * on our EAGAINs. 1590 */ 1591 if (!extent_buffer_uptodate(tmp)) 1592 ret = -EIO; 1593 1594 out: 1595 if (ret == 0) { 1596 *eb_ret = tmp; 1597 } else { 1598 free_extent_buffer(tmp); 1599 btrfs_release_path(p); 1600 } 1601 1602 return ret; 1603 } 1604 1605 /* 1606 * helper function for btrfs_search_slot. This does all of the checks 1607 * for node-level blocks and does any balancing required based on 1608 * the ins_len. 1609 * 1610 * If no extra work was required, zero is returned. If we had to 1611 * drop the path, -EAGAIN is returned and btrfs_search_slot must 1612 * start over 1613 */ 1614 static int 1615 setup_nodes_for_search(struct btrfs_trans_handle *trans, 1616 struct btrfs_root *root, struct btrfs_path *p, 1617 struct extent_buffer *b, int level, int ins_len, 1618 int *write_lock_level) 1619 { 1620 struct btrfs_fs_info *fs_info = root->fs_info; 1621 int ret = 0; 1622 1623 if ((p->search_for_split || ins_len > 0) && btrfs_header_nritems(b) >= 1624 BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 3) { 1625 1626 if (*write_lock_level < level + 1) { 1627 *write_lock_level = level + 1; 1628 btrfs_release_path(p); 1629 return -EAGAIN; 1630 } 1631 1632 reada_for_balance(p, level); 1633 ret = split_node(trans, root, p, level); 1634 1635 b = p->nodes[level]; 1636 } else if (ins_len < 0 && btrfs_header_nritems(b) < 1637 BTRFS_NODEPTRS_PER_BLOCK(fs_info) / 2) { 1638 1639 if (*write_lock_level < level + 1) { 1640 *write_lock_level = level + 1; 1641 btrfs_release_path(p); 1642 return -EAGAIN; 1643 } 1644 1645 reada_for_balance(p, level); 1646 ret = balance_level(trans, root, p, level); 1647 if (ret) 1648 return ret; 1649 1650 b = p->nodes[level]; 1651 if (!b) { 1652 btrfs_release_path(p); 1653 return -EAGAIN; 1654 } 1655 BUG_ON(btrfs_header_nritems(b) == 1); 1656 } 1657 return ret; 1658 } 1659 1660 int btrfs_find_item(struct btrfs_root *fs_root, struct btrfs_path *path, 1661 u64 iobjectid, u64 ioff, u8 key_type, 1662 struct btrfs_key *found_key) 1663 { 1664 int ret; 1665 struct btrfs_key key; 1666 struct extent_buffer *eb; 1667 1668 ASSERT(path); 1669 ASSERT(found_key); 1670 1671 key.type = key_type; 1672 key.objectid = iobjectid; 1673 key.offset = ioff; 1674 1675 ret = btrfs_search_slot(NULL, fs_root, &key, path, 0, 0); 1676 if (ret < 0) 1677 return ret; 1678 1679 eb = path->nodes[0]; 1680 if (ret && path->slots[0] >= btrfs_header_nritems(eb)) { 1681 ret = btrfs_next_leaf(fs_root, path); 1682 if (ret) 1683 return ret; 1684 eb = path->nodes[0]; 1685 } 1686 1687 btrfs_item_key_to_cpu(eb, found_key, path->slots[0]); 1688 if (found_key->type != key.type || 1689 found_key->objectid != key.objectid) 1690 return 1; 1691 1692 return 0; 1693 } 1694 1695 static struct extent_buffer *btrfs_search_slot_get_root(struct btrfs_root *root, 1696 struct btrfs_path *p, 1697 int write_lock_level) 1698 { 1699 struct extent_buffer *b; 1700 int root_lock = 0; 1701 int level = 0; 1702 1703 if (p->search_commit_root) { 1704 b = root->commit_root; 1705 atomic_inc(&b->refs); 1706 level = btrfs_header_level(b); 1707 /* 1708 * Ensure that all callers have set skip_locking when 1709 * p->search_commit_root = 1. 1710 */ 1711 ASSERT(p->skip_locking == 1); 1712 1713 goto out; 1714 } 1715 1716 if (p->skip_locking) { 1717 b = btrfs_root_node(root); 1718 level = btrfs_header_level(b); 1719 goto out; 1720 } 1721 1722 /* We try very hard to do read locks on the root */ 1723 root_lock = BTRFS_READ_LOCK; 1724 1725 /* 1726 * If the level is set to maximum, we can skip trying to get the read 1727 * lock. 1728 */ 1729 if (write_lock_level < BTRFS_MAX_LEVEL) { 1730 /* 1731 * We don't know the level of the root node until we actually 1732 * have it read locked 1733 */ 1734 if (p->nowait) { 1735 b = btrfs_try_read_lock_root_node(root); 1736 if (IS_ERR(b)) 1737 return b; 1738 } else { 1739 b = btrfs_read_lock_root_node(root); 1740 } 1741 level = btrfs_header_level(b); 1742 if (level > write_lock_level) 1743 goto out; 1744 1745 /* Whoops, must trade for write lock */ 1746 btrfs_tree_read_unlock(b); 1747 free_extent_buffer(b); 1748 } 1749 1750 b = btrfs_lock_root_node(root); 1751 root_lock = BTRFS_WRITE_LOCK; 1752 1753 /* The level might have changed, check again */ 1754 level = btrfs_header_level(b); 1755 1756 out: 1757 /* 1758 * The root may have failed to write out at some point, and thus is no 1759 * longer valid, return an error in this case. 1760 */ 1761 if (!extent_buffer_uptodate(b)) { 1762 if (root_lock) 1763 btrfs_tree_unlock_rw(b, root_lock); 1764 free_extent_buffer(b); 1765 return ERR_PTR(-EIO); 1766 } 1767 1768 p->nodes[level] = b; 1769 if (!p->skip_locking) 1770 p->locks[level] = root_lock; 1771 /* 1772 * Callers are responsible for dropping b's references. 1773 */ 1774 return b; 1775 } 1776 1777 /* 1778 * Replace the extent buffer at the lowest level of the path with a cloned 1779 * version. The purpose is to be able to use it safely, after releasing the 1780 * commit root semaphore, even if relocation is happening in parallel, the 1781 * transaction used for relocation is committed and the extent buffer is 1782 * reallocated in the next transaction. 1783 * 1784 * This is used in a context where the caller does not prevent transaction 1785 * commits from happening, either by holding a transaction handle or holding 1786 * some lock, while it's doing searches through a commit root. 1787 * At the moment it's only used for send operations. 1788 */ 1789 static int finish_need_commit_sem_search(struct btrfs_path *path) 1790 { 1791 const int i = path->lowest_level; 1792 const int slot = path->slots[i]; 1793 struct extent_buffer *lowest = path->nodes[i]; 1794 struct extent_buffer *clone; 1795 1796 ASSERT(path->need_commit_sem); 1797 1798 if (!lowest) 1799 return 0; 1800 1801 lockdep_assert_held_read(&lowest->fs_info->commit_root_sem); 1802 1803 clone = btrfs_clone_extent_buffer(lowest); 1804 if (!clone) 1805 return -ENOMEM; 1806 1807 btrfs_release_path(path); 1808 path->nodes[i] = clone; 1809 path->slots[i] = slot; 1810 1811 return 0; 1812 } 1813 1814 static inline int search_for_key_slot(struct extent_buffer *eb, 1815 int search_low_slot, 1816 const struct btrfs_key *key, 1817 int prev_cmp, 1818 int *slot) 1819 { 1820 /* 1821 * If a previous call to btrfs_bin_search() on a parent node returned an 1822 * exact match (prev_cmp == 0), we can safely assume the target key will 1823 * always be at slot 0 on lower levels, since each key pointer 1824 * (struct btrfs_key_ptr) refers to the lowest key accessible from the 1825 * subtree it points to. Thus we can skip searching lower levels. 1826 */ 1827 if (prev_cmp == 0) { 1828 *slot = 0; 1829 return 0; 1830 } 1831 1832 return btrfs_bin_search(eb, search_low_slot, key, slot); 1833 } 1834 1835 static int search_leaf(struct btrfs_trans_handle *trans, 1836 struct btrfs_root *root, 1837 const struct btrfs_key *key, 1838 struct btrfs_path *path, 1839 int ins_len, 1840 int prev_cmp) 1841 { 1842 struct extent_buffer *leaf = path->nodes[0]; 1843 int leaf_free_space = -1; 1844 int search_low_slot = 0; 1845 int ret; 1846 bool do_bin_search = true; 1847 1848 /* 1849 * If we are doing an insertion, the leaf has enough free space and the 1850 * destination slot for the key is not slot 0, then we can unlock our 1851 * write lock on the parent, and any other upper nodes, before doing the 1852 * binary search on the leaf (with search_for_key_slot()), allowing other 1853 * tasks to lock the parent and any other upper nodes. 1854 */ 1855 if (ins_len > 0) { 1856 /* 1857 * Cache the leaf free space, since we will need it later and it 1858 * will not change until then. 1859 */ 1860 leaf_free_space = btrfs_leaf_free_space(leaf); 1861 1862 /* 1863 * !path->locks[1] means we have a single node tree, the leaf is 1864 * the root of the tree. 1865 */ 1866 if (path->locks[1] && leaf_free_space >= ins_len) { 1867 struct btrfs_disk_key first_key; 1868 1869 ASSERT(btrfs_header_nritems(leaf) > 0); 1870 btrfs_item_key(leaf, &first_key, 0); 1871 1872 /* 1873 * Doing the extra comparison with the first key is cheap, 1874 * taking into account that the first key is very likely 1875 * already in a cache line because it immediately follows 1876 * the extent buffer's header and we have recently accessed 1877 * the header's level field. 1878 */ 1879 ret = btrfs_comp_keys(&first_key, key); 1880 if (ret < 0) { 1881 /* 1882 * The first key is smaller than the key we want 1883 * to insert, so we are safe to unlock all upper 1884 * nodes and we have to do the binary search. 1885 * 1886 * We do use btrfs_unlock_up_safe() and not 1887 * unlock_up() because the later does not unlock 1888 * nodes with a slot of 0 - we can safely unlock 1889 * any node even if its slot is 0 since in this 1890 * case the key does not end up at slot 0 of the 1891 * leaf and there's no need to split the leaf. 1892 */ 1893 btrfs_unlock_up_safe(path, 1); 1894 search_low_slot = 1; 1895 } else { 1896 /* 1897 * The first key is >= then the key we want to 1898 * insert, so we can skip the binary search as 1899 * the target key will be at slot 0. 1900 * 1901 * We can not unlock upper nodes when the key is 1902 * less than the first key, because we will need 1903 * to update the key at slot 0 of the parent node 1904 * and possibly of other upper nodes too. 1905 * If the key matches the first key, then we can 1906 * unlock all the upper nodes, using 1907 * btrfs_unlock_up_safe() instead of unlock_up() 1908 * as stated above. 1909 */ 1910 if (ret == 0) 1911 btrfs_unlock_up_safe(path, 1); 1912 /* 1913 * ret is already 0 or 1, matching the result of 1914 * a btrfs_bin_search() call, so there is no need 1915 * to adjust it. 1916 */ 1917 do_bin_search = false; 1918 path->slots[0] = 0; 1919 } 1920 } 1921 } 1922 1923 if (do_bin_search) { 1924 ret = search_for_key_slot(leaf, search_low_slot, key, 1925 prev_cmp, &path->slots[0]); 1926 if (ret < 0) 1927 return ret; 1928 } 1929 1930 if (ins_len > 0) { 1931 /* 1932 * Item key already exists. In this case, if we are allowed to 1933 * insert the item (for example, in dir_item case, item key 1934 * collision is allowed), it will be merged with the original 1935 * item. Only the item size grows, no new btrfs item will be 1936 * added. If search_for_extension is not set, ins_len already 1937 * accounts the size btrfs_item, deduct it here so leaf space 1938 * check will be correct. 1939 */ 1940 if (ret == 0 && !path->search_for_extension) { 1941 ASSERT(ins_len >= sizeof(struct btrfs_item)); 1942 ins_len -= sizeof(struct btrfs_item); 1943 } 1944 1945 ASSERT(leaf_free_space >= 0); 1946 1947 if (leaf_free_space < ins_len) { 1948 int err; 1949 1950 err = split_leaf(trans, root, key, path, ins_len, 1951 (ret == 0)); 1952 ASSERT(err <= 0); 1953 if (WARN_ON(err > 0)) 1954 err = -EUCLEAN; 1955 if (err) 1956 ret = err; 1957 } 1958 } 1959 1960 return ret; 1961 } 1962 1963 /* 1964 * Look for a key in a tree and perform necessary modifications to preserve 1965 * tree invariants. 1966 * 1967 * @trans: Handle of transaction, used when modifying the tree 1968 * @p: Holds all btree nodes along the search path 1969 * @root: The root node of the tree 1970 * @key: The key we are looking for 1971 * @ins_len: Indicates purpose of search: 1972 * >0 for inserts it's size of item inserted (*) 1973 * <0 for deletions 1974 * 0 for plain searches, not modifying the tree 1975 * 1976 * (*) If size of item inserted doesn't include 1977 * sizeof(struct btrfs_item), then p->search_for_extension must 1978 * be set. 1979 * @cow: boolean should CoW operations be performed. Must always be 1 1980 * when modifying the tree. 1981 * 1982 * If @ins_len > 0, nodes and leaves will be split as we walk down the tree. 1983 * If @ins_len < 0, nodes will be merged as we walk down the tree (if possible) 1984 * 1985 * If @key is found, 0 is returned and you can find the item in the leaf level 1986 * of the path (level 0) 1987 * 1988 * If @key isn't found, 1 is returned and the leaf level of the path (level 0) 1989 * points to the slot where it should be inserted 1990 * 1991 * If an error is encountered while searching the tree a negative error number 1992 * is returned 1993 */ 1994 int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root *root, 1995 const struct btrfs_key *key, struct btrfs_path *p, 1996 int ins_len, int cow) 1997 { 1998 struct btrfs_fs_info *fs_info = root->fs_info; 1999 struct extent_buffer *b; 2000 int slot; 2001 int ret; 2002 int err; 2003 int level; 2004 int lowest_unlock = 1; 2005 /* everything at write_lock_level or lower must be write locked */ 2006 int write_lock_level = 0; 2007 u8 lowest_level = 0; 2008 int min_write_lock_level; 2009 int prev_cmp; 2010 2011 might_sleep(); 2012 2013 lowest_level = p->lowest_level; 2014 WARN_ON(lowest_level && ins_len > 0); 2015 WARN_ON(p->nodes[0] != NULL); 2016 BUG_ON(!cow && ins_len); 2017 2018 /* 2019 * For now only allow nowait for read only operations. There's no 2020 * strict reason why we can't, we just only need it for reads so it's 2021 * only implemented for reads. 2022 */ 2023 ASSERT(!p->nowait || !cow); 2024 2025 if (ins_len < 0) { 2026 lowest_unlock = 2; 2027 2028 /* when we are removing items, we might have to go up to level 2029 * two as we update tree pointers Make sure we keep write 2030 * for those levels as well 2031 */ 2032 write_lock_level = 2; 2033 } else if (ins_len > 0) { 2034 /* 2035 * for inserting items, make sure we have a write lock on 2036 * level 1 so we can update keys 2037 */ 2038 write_lock_level = 1; 2039 } 2040 2041 if (!cow) 2042 write_lock_level = -1; 2043 2044 if (cow && (p->keep_locks || p->lowest_level)) 2045 write_lock_level = BTRFS_MAX_LEVEL; 2046 2047 min_write_lock_level = write_lock_level; 2048 2049 if (p->need_commit_sem) { 2050 ASSERT(p->search_commit_root); 2051 if (p->nowait) { 2052 if (!down_read_trylock(&fs_info->commit_root_sem)) 2053 return -EAGAIN; 2054 } else { 2055 down_read(&fs_info->commit_root_sem); 2056 } 2057 } 2058 2059 again: 2060 prev_cmp = -1; 2061 b = btrfs_search_slot_get_root(root, p, write_lock_level); 2062 if (IS_ERR(b)) { 2063 ret = PTR_ERR(b); 2064 goto done; 2065 } 2066 2067 while (b) { 2068 int dec = 0; 2069 2070 level = btrfs_header_level(b); 2071 2072 if (cow) { 2073 bool last_level = (level == (BTRFS_MAX_LEVEL - 1)); 2074 2075 /* 2076 * if we don't really need to cow this block 2077 * then we don't want to set the path blocking, 2078 * so we test it here 2079 */ 2080 if (!should_cow_block(trans, root, b)) 2081 goto cow_done; 2082 2083 /* 2084 * must have write locks on this node and the 2085 * parent 2086 */ 2087 if (level > write_lock_level || 2088 (level + 1 > write_lock_level && 2089 level + 1 < BTRFS_MAX_LEVEL && 2090 p->nodes[level + 1])) { 2091 write_lock_level = level + 1; 2092 btrfs_release_path(p); 2093 goto again; 2094 } 2095 2096 if (last_level) 2097 err = btrfs_cow_block(trans, root, b, NULL, 0, 2098 &b, 2099 BTRFS_NESTING_COW); 2100 else 2101 err = btrfs_cow_block(trans, root, b, 2102 p->nodes[level + 1], 2103 p->slots[level + 1], &b, 2104 BTRFS_NESTING_COW); 2105 if (err) { 2106 ret = err; 2107 goto done; 2108 } 2109 } 2110 cow_done: 2111 p->nodes[level] = b; 2112 2113 /* 2114 * we have a lock on b and as long as we aren't changing 2115 * the tree, there is no way to for the items in b to change. 2116 * It is safe to drop the lock on our parent before we 2117 * go through the expensive btree search on b. 2118 * 2119 * If we're inserting or deleting (ins_len != 0), then we might 2120 * be changing slot zero, which may require changing the parent. 2121 * So, we can't drop the lock until after we know which slot 2122 * we're operating on. 2123 */ 2124 if (!ins_len && !p->keep_locks) { 2125 int u = level + 1; 2126 2127 if (u < BTRFS_MAX_LEVEL && p->locks[u]) { 2128 btrfs_tree_unlock_rw(p->nodes[u], p->locks[u]); 2129 p->locks[u] = 0; 2130 } 2131 } 2132 2133 if (level == 0) { 2134 if (ins_len > 0) 2135 ASSERT(write_lock_level >= 1); 2136 2137 ret = search_leaf(trans, root, key, p, ins_len, prev_cmp); 2138 if (!p->search_for_split) 2139 unlock_up(p, level, lowest_unlock, 2140 min_write_lock_level, NULL); 2141 goto done; 2142 } 2143 2144 ret = search_for_key_slot(b, 0, key, prev_cmp, &slot); 2145 if (ret < 0) 2146 goto done; 2147 prev_cmp = ret; 2148 2149 if (ret && slot > 0) { 2150 dec = 1; 2151 slot--; 2152 } 2153 p->slots[level] = slot; 2154 err = setup_nodes_for_search(trans, root, p, b, level, ins_len, 2155 &write_lock_level); 2156 if (err == -EAGAIN) 2157 goto again; 2158 if (err) { 2159 ret = err; 2160 goto done; 2161 } 2162 b = p->nodes[level]; 2163 slot = p->slots[level]; 2164 2165 /* 2166 * Slot 0 is special, if we change the key we have to update 2167 * the parent pointer which means we must have a write lock on 2168 * the parent 2169 */ 2170 if (slot == 0 && ins_len && write_lock_level < level + 1) { 2171 write_lock_level = level + 1; 2172 btrfs_release_path(p); 2173 goto again; 2174 } 2175 2176 unlock_up(p, level, lowest_unlock, min_write_lock_level, 2177 &write_lock_level); 2178 2179 if (level == lowest_level) { 2180 if (dec) 2181 p->slots[level]++; 2182 goto done; 2183 } 2184 2185 err = read_block_for_search(root, p, &b, level, slot, key); 2186 if (err == -EAGAIN) 2187 goto again; 2188 if (err) { 2189 ret = err; 2190 goto done; 2191 } 2192 2193 if (!p->skip_locking) { 2194 level = btrfs_header_level(b); 2195 2196 btrfs_maybe_reset_lockdep_class(root, b); 2197 2198 if (level <= write_lock_level) { 2199 btrfs_tree_lock(b); 2200 p->locks[level] = BTRFS_WRITE_LOCK; 2201 } else { 2202 if (p->nowait) { 2203 if (!btrfs_try_tree_read_lock(b)) { 2204 free_extent_buffer(b); 2205 ret = -EAGAIN; 2206 goto done; 2207 } 2208 } else { 2209 btrfs_tree_read_lock(b); 2210 } 2211 p->locks[level] = BTRFS_READ_LOCK; 2212 } 2213 p->nodes[level] = b; 2214 } 2215 } 2216 ret = 1; 2217 done: 2218 if (ret < 0 && !p->skip_release_on_error) 2219 btrfs_release_path(p); 2220 2221 if (p->need_commit_sem) { 2222 int ret2; 2223 2224 ret2 = finish_need_commit_sem_search(p); 2225 up_read(&fs_info->commit_root_sem); 2226 if (ret2) 2227 ret = ret2; 2228 } 2229 2230 return ret; 2231 } 2232 ALLOW_ERROR_INJECTION(btrfs_search_slot, ERRNO); 2233 2234 /* 2235 * Like btrfs_search_slot, this looks for a key in the given tree. It uses the 2236 * current state of the tree together with the operations recorded in the tree 2237 * modification log to search for the key in a previous version of this tree, as 2238 * denoted by the time_seq parameter. 2239 * 2240 * Naturally, there is no support for insert, delete or cow operations. 2241 * 2242 * The resulting path and return value will be set up as if we called 2243 * btrfs_search_slot at that point in time with ins_len and cow both set to 0. 2244 */ 2245 int btrfs_search_old_slot(struct btrfs_root *root, const struct btrfs_key *key, 2246 struct btrfs_path *p, u64 time_seq) 2247 { 2248 struct btrfs_fs_info *fs_info = root->fs_info; 2249 struct extent_buffer *b; 2250 int slot; 2251 int ret; 2252 int err; 2253 int level; 2254 int lowest_unlock = 1; 2255 u8 lowest_level = 0; 2256 2257 lowest_level = p->lowest_level; 2258 WARN_ON(p->nodes[0] != NULL); 2259 ASSERT(!p->nowait); 2260 2261 if (p->search_commit_root) { 2262 BUG_ON(time_seq); 2263 return btrfs_search_slot(NULL, root, key, p, 0, 0); 2264 } 2265 2266 again: 2267 b = btrfs_get_old_root(root, time_seq); 2268 if (!b) { 2269 ret = -EIO; 2270 goto done; 2271 } 2272 level = btrfs_header_level(b); 2273 p->locks[level] = BTRFS_READ_LOCK; 2274 2275 while (b) { 2276 int dec = 0; 2277 2278 level = btrfs_header_level(b); 2279 p->nodes[level] = b; 2280 2281 /* 2282 * we have a lock on b and as long as we aren't changing 2283 * the tree, there is no way to for the items in b to change. 2284 * It is safe to drop the lock on our parent before we 2285 * go through the expensive btree search on b. 2286 */ 2287 btrfs_unlock_up_safe(p, level + 1); 2288 2289 ret = btrfs_bin_search(b, 0, key, &slot); 2290 if (ret < 0) 2291 goto done; 2292 2293 if (level == 0) { 2294 p->slots[level] = slot; 2295 unlock_up(p, level, lowest_unlock, 0, NULL); 2296 goto done; 2297 } 2298 2299 if (ret && slot > 0) { 2300 dec = 1; 2301 slot--; 2302 } 2303 p->slots[level] = slot; 2304 unlock_up(p, level, lowest_unlock, 0, NULL); 2305 2306 if (level == lowest_level) { 2307 if (dec) 2308 p->slots[level]++; 2309 goto done; 2310 } 2311 2312 err = read_block_for_search(root, p, &b, level, slot, key); 2313 if (err == -EAGAIN) 2314 goto again; 2315 if (err) { 2316 ret = err; 2317 goto done; 2318 } 2319 2320 level = btrfs_header_level(b); 2321 btrfs_tree_read_lock(b); 2322 b = btrfs_tree_mod_log_rewind(fs_info, p, b, time_seq); 2323 if (!b) { 2324 ret = -ENOMEM; 2325 goto done; 2326 } 2327 p->locks[level] = BTRFS_READ_LOCK; 2328 p->nodes[level] = b; 2329 } 2330 ret = 1; 2331 done: 2332 if (ret < 0) 2333 btrfs_release_path(p); 2334 2335 return ret; 2336 } 2337 2338 /* 2339 * Search the tree again to find a leaf with smaller keys. 2340 * Returns 0 if it found something. 2341 * Returns 1 if there are no smaller keys. 2342 * Returns < 0 on error. 2343 * 2344 * This may release the path, and so you may lose any locks held at the 2345 * time you call it. 2346 */ 2347 static int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path) 2348 { 2349 struct btrfs_key key; 2350 struct btrfs_key orig_key; 2351 struct btrfs_disk_key found_key; 2352 int ret; 2353 2354 btrfs_item_key_to_cpu(path->nodes[0], &key, 0); 2355 orig_key = key; 2356 2357 if (key.offset > 0) { 2358 key.offset--; 2359 } else if (key.type > 0) { 2360 key.type--; 2361 key.offset = (u64)-1; 2362 } else if (key.objectid > 0) { 2363 key.objectid--; 2364 key.type = (u8)-1; 2365 key.offset = (u64)-1; 2366 } else { 2367 return 1; 2368 } 2369 2370 btrfs_release_path(path); 2371 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 2372 if (ret <= 0) 2373 return ret; 2374 2375 /* 2376 * Previous key not found. Even if we were at slot 0 of the leaf we had 2377 * before releasing the path and calling btrfs_search_slot(), we now may 2378 * be in a slot pointing to the same original key - this can happen if 2379 * after we released the path, one of more items were moved from a 2380 * sibling leaf into the front of the leaf we had due to an insertion 2381 * (see push_leaf_right()). 2382 * If we hit this case and our slot is > 0 and just decrement the slot 2383 * so that the caller does not process the same key again, which may or 2384 * may not break the caller, depending on its logic. 2385 */ 2386 if (path->slots[0] < btrfs_header_nritems(path->nodes[0])) { 2387 btrfs_item_key(path->nodes[0], &found_key, path->slots[0]); 2388 ret = btrfs_comp_keys(&found_key, &orig_key); 2389 if (ret == 0) { 2390 if (path->slots[0] > 0) { 2391 path->slots[0]--; 2392 return 0; 2393 } 2394 /* 2395 * At slot 0, same key as before, it means orig_key is 2396 * the lowest, leftmost, key in the tree. We're done. 2397 */ 2398 return 1; 2399 } 2400 } 2401 2402 btrfs_item_key(path->nodes[0], &found_key, 0); 2403 ret = btrfs_comp_keys(&found_key, &key); 2404 /* 2405 * We might have had an item with the previous key in the tree right 2406 * before we released our path. And after we released our path, that 2407 * item might have been pushed to the first slot (0) of the leaf we 2408 * were holding due to a tree balance. Alternatively, an item with the 2409 * previous key can exist as the only element of a leaf (big fat item). 2410 * Therefore account for these 2 cases, so that our callers (like 2411 * btrfs_previous_item) don't miss an existing item with a key matching 2412 * the previous key we computed above. 2413 */ 2414 if (ret <= 0) 2415 return 0; 2416 return 1; 2417 } 2418 2419 /* 2420 * helper to use instead of search slot if no exact match is needed but 2421 * instead the next or previous item should be returned. 2422 * When find_higher is true, the next higher item is returned, the next lower 2423 * otherwise. 2424 * When return_any and find_higher are both true, and no higher item is found, 2425 * return the next lower instead. 2426 * When return_any is true and find_higher is false, and no lower item is found, 2427 * return the next higher instead. 2428 * It returns 0 if any item is found, 1 if none is found (tree empty), and 2429 * < 0 on error 2430 */ 2431 int btrfs_search_slot_for_read(struct btrfs_root *root, 2432 const struct btrfs_key *key, 2433 struct btrfs_path *p, int find_higher, 2434 int return_any) 2435 { 2436 int ret; 2437 struct extent_buffer *leaf; 2438 2439 again: 2440 ret = btrfs_search_slot(NULL, root, key, p, 0, 0); 2441 if (ret <= 0) 2442 return ret; 2443 /* 2444 * a return value of 1 means the path is at the position where the 2445 * item should be inserted. Normally this is the next bigger item, 2446 * but in case the previous item is the last in a leaf, path points 2447 * to the first free slot in the previous leaf, i.e. at an invalid 2448 * item. 2449 */ 2450 leaf = p->nodes[0]; 2451 2452 if (find_higher) { 2453 if (p->slots[0] >= btrfs_header_nritems(leaf)) { 2454 ret = btrfs_next_leaf(root, p); 2455 if (ret <= 0) 2456 return ret; 2457 if (!return_any) 2458 return 1; 2459 /* 2460 * no higher item found, return the next 2461 * lower instead 2462 */ 2463 return_any = 0; 2464 find_higher = 0; 2465 btrfs_release_path(p); 2466 goto again; 2467 } 2468 } else { 2469 if (p->slots[0] == 0) { 2470 ret = btrfs_prev_leaf(root, p); 2471 if (ret < 0) 2472 return ret; 2473 if (!ret) { 2474 leaf = p->nodes[0]; 2475 if (p->slots[0] == btrfs_header_nritems(leaf)) 2476 p->slots[0]--; 2477 return 0; 2478 } 2479 if (!return_any) 2480 return 1; 2481 /* 2482 * no lower item found, return the next 2483 * higher instead 2484 */ 2485 return_any = 0; 2486 find_higher = 1; 2487 btrfs_release_path(p); 2488 goto again; 2489 } else { 2490 --p->slots[0]; 2491 } 2492 } 2493 return 0; 2494 } 2495 2496 /* 2497 * Execute search and call btrfs_previous_item to traverse backwards if the item 2498 * was not found. 2499 * 2500 * Return 0 if found, 1 if not found and < 0 if error. 2501 */ 2502 int btrfs_search_backwards(struct btrfs_root *root, struct btrfs_key *key, 2503 struct btrfs_path *path) 2504 { 2505 int ret; 2506 2507 ret = btrfs_search_slot(NULL, root, key, path, 0, 0); 2508 if (ret > 0) 2509 ret = btrfs_previous_item(root, path, key->objectid, key->type); 2510 2511 if (ret == 0) 2512 btrfs_item_key_to_cpu(path->nodes[0], key, path->slots[0]); 2513 2514 return ret; 2515 } 2516 2517 /* 2518 * Search for a valid slot for the given path. 2519 * 2520 * @root: The root node of the tree. 2521 * @key: Will contain a valid item if found. 2522 * @path: The starting point to validate the slot. 2523 * 2524 * Return: 0 if the item is valid 2525 * 1 if not found 2526 * <0 if error. 2527 */ 2528 int btrfs_get_next_valid_item(struct btrfs_root *root, struct btrfs_key *key, 2529 struct btrfs_path *path) 2530 { 2531 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) { 2532 int ret; 2533 2534 ret = btrfs_next_leaf(root, path); 2535 if (ret) 2536 return ret; 2537 } 2538 2539 btrfs_item_key_to_cpu(path->nodes[0], key, path->slots[0]); 2540 return 0; 2541 } 2542 2543 /* 2544 * adjust the pointers going up the tree, starting at level 2545 * making sure the right key of each node is points to 'key'. 2546 * This is used after shifting pointers to the left, so it stops 2547 * fixing up pointers when a given leaf/node is not in slot 0 of the 2548 * higher levels 2549 * 2550 */ 2551 static void fixup_low_keys(struct btrfs_trans_handle *trans, 2552 struct btrfs_path *path, 2553 struct btrfs_disk_key *key, int level) 2554 { 2555 int i; 2556 struct extent_buffer *t; 2557 int ret; 2558 2559 for (i = level; i < BTRFS_MAX_LEVEL; i++) { 2560 int tslot = path->slots[i]; 2561 2562 if (!path->nodes[i]) 2563 break; 2564 t = path->nodes[i]; 2565 ret = btrfs_tree_mod_log_insert_key(t, tslot, 2566 BTRFS_MOD_LOG_KEY_REPLACE); 2567 BUG_ON(ret < 0); 2568 btrfs_set_node_key(t, key, tslot); 2569 btrfs_mark_buffer_dirty(trans, path->nodes[i]); 2570 if (tslot != 0) 2571 break; 2572 } 2573 } 2574 2575 /* 2576 * update item key. 2577 * 2578 * This function isn't completely safe. It's the caller's responsibility 2579 * that the new key won't break the order 2580 */ 2581 void btrfs_set_item_key_safe(struct btrfs_trans_handle *trans, 2582 struct btrfs_path *path, 2583 const struct btrfs_key *new_key) 2584 { 2585 struct btrfs_fs_info *fs_info = trans->fs_info; 2586 struct btrfs_disk_key disk_key; 2587 struct extent_buffer *eb; 2588 int slot; 2589 2590 eb = path->nodes[0]; 2591 slot = path->slots[0]; 2592 if (slot > 0) { 2593 btrfs_item_key(eb, &disk_key, slot - 1); 2594 if (unlikely(btrfs_comp_keys(&disk_key, new_key) >= 0)) { 2595 btrfs_print_leaf(eb); 2596 btrfs_crit(fs_info, 2597 "slot %u key (%llu %u %llu) new key (%llu %u %llu)", 2598 slot, btrfs_disk_key_objectid(&disk_key), 2599 btrfs_disk_key_type(&disk_key), 2600 btrfs_disk_key_offset(&disk_key), 2601 new_key->objectid, new_key->type, 2602 new_key->offset); 2603 BUG(); 2604 } 2605 } 2606 if (slot < btrfs_header_nritems(eb) - 1) { 2607 btrfs_item_key(eb, &disk_key, slot + 1); 2608 if (unlikely(btrfs_comp_keys(&disk_key, new_key) <= 0)) { 2609 btrfs_print_leaf(eb); 2610 btrfs_crit(fs_info, 2611 "slot %u key (%llu %u %llu) new key (%llu %u %llu)", 2612 slot, btrfs_disk_key_objectid(&disk_key), 2613 btrfs_disk_key_type(&disk_key), 2614 btrfs_disk_key_offset(&disk_key), 2615 new_key->objectid, new_key->type, 2616 new_key->offset); 2617 BUG(); 2618 } 2619 } 2620 2621 btrfs_cpu_key_to_disk(&disk_key, new_key); 2622 btrfs_set_item_key(eb, &disk_key, slot); 2623 btrfs_mark_buffer_dirty(trans, eb); 2624 if (slot == 0) 2625 fixup_low_keys(trans, path, &disk_key, 1); 2626 } 2627 2628 /* 2629 * Check key order of two sibling extent buffers. 2630 * 2631 * Return true if something is wrong. 2632 * Return false if everything is fine. 2633 * 2634 * Tree-checker only works inside one tree block, thus the following 2635 * corruption can not be detected by tree-checker: 2636 * 2637 * Leaf @left | Leaf @right 2638 * -------------------------------------------------------------- 2639 * | 1 | 2 | 3 | 4 | 5 | f6 | | 7 | 8 | 2640 * 2641 * Key f6 in leaf @left itself is valid, but not valid when the next 2642 * key in leaf @right is 7. 2643 * This can only be checked at tree block merge time. 2644 * And since tree checker has ensured all key order in each tree block 2645 * is correct, we only need to bother the last key of @left and the first 2646 * key of @right. 2647 */ 2648 static bool check_sibling_keys(struct extent_buffer *left, 2649 struct extent_buffer *right) 2650 { 2651 struct btrfs_key left_last; 2652 struct btrfs_key right_first; 2653 int level = btrfs_header_level(left); 2654 int nr_left = btrfs_header_nritems(left); 2655 int nr_right = btrfs_header_nritems(right); 2656 2657 /* No key to check in one of the tree blocks */ 2658 if (!nr_left || !nr_right) 2659 return false; 2660 2661 if (level) { 2662 btrfs_node_key_to_cpu(left, &left_last, nr_left - 1); 2663 btrfs_node_key_to_cpu(right, &right_first, 0); 2664 } else { 2665 btrfs_item_key_to_cpu(left, &left_last, nr_left - 1); 2666 btrfs_item_key_to_cpu(right, &right_first, 0); 2667 } 2668 2669 if (unlikely(btrfs_comp_cpu_keys(&left_last, &right_first) >= 0)) { 2670 btrfs_crit(left->fs_info, "left extent buffer:"); 2671 btrfs_print_tree(left, false); 2672 btrfs_crit(left->fs_info, "right extent buffer:"); 2673 btrfs_print_tree(right, false); 2674 btrfs_crit(left->fs_info, 2675 "bad key order, sibling blocks, left last (%llu %u %llu) right first (%llu %u %llu)", 2676 left_last.objectid, left_last.type, 2677 left_last.offset, right_first.objectid, 2678 right_first.type, right_first.offset); 2679 return true; 2680 } 2681 return false; 2682 } 2683 2684 /* 2685 * try to push data from one node into the next node left in the 2686 * tree. 2687 * 2688 * returns 0 if some ptrs were pushed left, < 0 if there was some horrible 2689 * error, and > 0 if there was no room in the left hand block. 2690 */ 2691 static int push_node_left(struct btrfs_trans_handle *trans, 2692 struct extent_buffer *dst, 2693 struct extent_buffer *src, int empty) 2694 { 2695 struct btrfs_fs_info *fs_info = trans->fs_info; 2696 int push_items = 0; 2697 int src_nritems; 2698 int dst_nritems; 2699 int ret = 0; 2700 2701 src_nritems = btrfs_header_nritems(src); 2702 dst_nritems = btrfs_header_nritems(dst); 2703 push_items = BTRFS_NODEPTRS_PER_BLOCK(fs_info) - dst_nritems; 2704 WARN_ON(btrfs_header_generation(src) != trans->transid); 2705 WARN_ON(btrfs_header_generation(dst) != trans->transid); 2706 2707 if (!empty && src_nritems <= 8) 2708 return 1; 2709 2710 if (push_items <= 0) 2711 return 1; 2712 2713 if (empty) { 2714 push_items = min(src_nritems, push_items); 2715 if (push_items < src_nritems) { 2716 /* leave at least 8 pointers in the node if 2717 * we aren't going to empty it 2718 */ 2719 if (src_nritems - push_items < 8) { 2720 if (push_items <= 8) 2721 return 1; 2722 push_items -= 8; 2723 } 2724 } 2725 } else 2726 push_items = min(src_nritems - 8, push_items); 2727 2728 /* dst is the left eb, src is the middle eb */ 2729 if (check_sibling_keys(dst, src)) { 2730 ret = -EUCLEAN; 2731 btrfs_abort_transaction(trans, ret); 2732 return ret; 2733 } 2734 ret = btrfs_tree_mod_log_eb_copy(dst, src, dst_nritems, 0, push_items); 2735 if (ret) { 2736 btrfs_abort_transaction(trans, ret); 2737 return ret; 2738 } 2739 copy_extent_buffer(dst, src, 2740 btrfs_node_key_ptr_offset(dst, dst_nritems), 2741 btrfs_node_key_ptr_offset(src, 0), 2742 push_items * sizeof(struct btrfs_key_ptr)); 2743 2744 if (push_items < src_nritems) { 2745 /* 2746 * btrfs_tree_mod_log_eb_copy handles logging the move, so we 2747 * don't need to do an explicit tree mod log operation for it. 2748 */ 2749 memmove_extent_buffer(src, btrfs_node_key_ptr_offset(src, 0), 2750 btrfs_node_key_ptr_offset(src, push_items), 2751 (src_nritems - push_items) * 2752 sizeof(struct btrfs_key_ptr)); 2753 } 2754 btrfs_set_header_nritems(src, src_nritems - push_items); 2755 btrfs_set_header_nritems(dst, dst_nritems + push_items); 2756 btrfs_mark_buffer_dirty(trans, src); 2757 btrfs_mark_buffer_dirty(trans, dst); 2758 2759 return ret; 2760 } 2761 2762 /* 2763 * try to push data from one node into the next node right in the 2764 * tree. 2765 * 2766 * returns 0 if some ptrs were pushed, < 0 if there was some horrible 2767 * error, and > 0 if there was no room in the right hand block. 2768 * 2769 * this will only push up to 1/2 the contents of the left node over 2770 */ 2771 static int balance_node_right(struct btrfs_trans_handle *trans, 2772 struct extent_buffer *dst, 2773 struct extent_buffer *src) 2774 { 2775 struct btrfs_fs_info *fs_info = trans->fs_info; 2776 int push_items = 0; 2777 int max_push; 2778 int src_nritems; 2779 int dst_nritems; 2780 int ret = 0; 2781 2782 WARN_ON(btrfs_header_generation(src) != trans->transid); 2783 WARN_ON(btrfs_header_generation(dst) != trans->transid); 2784 2785 src_nritems = btrfs_header_nritems(src); 2786 dst_nritems = btrfs_header_nritems(dst); 2787 push_items = BTRFS_NODEPTRS_PER_BLOCK(fs_info) - dst_nritems; 2788 if (push_items <= 0) 2789 return 1; 2790 2791 if (src_nritems < 4) 2792 return 1; 2793 2794 max_push = src_nritems / 2 + 1; 2795 /* don't try to empty the node */ 2796 if (max_push >= src_nritems) 2797 return 1; 2798 2799 if (max_push < push_items) 2800 push_items = max_push; 2801 2802 /* dst is the right eb, src is the middle eb */ 2803 if (check_sibling_keys(src, dst)) { 2804 ret = -EUCLEAN; 2805 btrfs_abort_transaction(trans, ret); 2806 return ret; 2807 } 2808 2809 /* 2810 * btrfs_tree_mod_log_eb_copy handles logging the move, so we don't 2811 * need to do an explicit tree mod log operation for it. 2812 */ 2813 memmove_extent_buffer(dst, btrfs_node_key_ptr_offset(dst, push_items), 2814 btrfs_node_key_ptr_offset(dst, 0), 2815 (dst_nritems) * 2816 sizeof(struct btrfs_key_ptr)); 2817 2818 ret = btrfs_tree_mod_log_eb_copy(dst, src, 0, src_nritems - push_items, 2819 push_items); 2820 if (ret) { 2821 btrfs_abort_transaction(trans, ret); 2822 return ret; 2823 } 2824 copy_extent_buffer(dst, src, 2825 btrfs_node_key_ptr_offset(dst, 0), 2826 btrfs_node_key_ptr_offset(src, src_nritems - push_items), 2827 push_items * sizeof(struct btrfs_key_ptr)); 2828 2829 btrfs_set_header_nritems(src, src_nritems - push_items); 2830 btrfs_set_header_nritems(dst, dst_nritems + push_items); 2831 2832 btrfs_mark_buffer_dirty(trans, src); 2833 btrfs_mark_buffer_dirty(trans, dst); 2834 2835 return ret; 2836 } 2837 2838 /* 2839 * helper function to insert a new root level in the tree. 2840 * A new node is allocated, and a single item is inserted to 2841 * point to the existing root 2842 * 2843 * returns zero on success or < 0 on failure. 2844 */ 2845 static noinline int insert_new_root(struct btrfs_trans_handle *trans, 2846 struct btrfs_root *root, 2847 struct btrfs_path *path, int level) 2848 { 2849 u64 lower_gen; 2850 struct extent_buffer *lower; 2851 struct extent_buffer *c; 2852 struct extent_buffer *old; 2853 struct btrfs_disk_key lower_key; 2854 int ret; 2855 2856 BUG_ON(path->nodes[level]); 2857 BUG_ON(path->nodes[level-1] != root->node); 2858 2859 lower = path->nodes[level-1]; 2860 if (level == 1) 2861 btrfs_item_key(lower, &lower_key, 0); 2862 else 2863 btrfs_node_key(lower, &lower_key, 0); 2864 2865 c = btrfs_alloc_tree_block(trans, root, 0, btrfs_root_id(root), 2866 &lower_key, level, root->node->start, 0, 2867 0, BTRFS_NESTING_NEW_ROOT); 2868 if (IS_ERR(c)) 2869 return PTR_ERR(c); 2870 2871 root_add_used_bytes(root); 2872 2873 btrfs_set_header_nritems(c, 1); 2874 btrfs_set_node_key(c, &lower_key, 0); 2875 btrfs_set_node_blockptr(c, 0, lower->start); 2876 lower_gen = btrfs_header_generation(lower); 2877 WARN_ON(lower_gen != trans->transid); 2878 2879 btrfs_set_node_ptr_generation(c, 0, lower_gen); 2880 2881 btrfs_mark_buffer_dirty(trans, c); 2882 2883 old = root->node; 2884 ret = btrfs_tree_mod_log_insert_root(root->node, c, false); 2885 if (ret < 0) { 2886 btrfs_free_tree_block(trans, btrfs_root_id(root), c, 0, 1); 2887 btrfs_tree_unlock(c); 2888 free_extent_buffer(c); 2889 return ret; 2890 } 2891 rcu_assign_pointer(root->node, c); 2892 2893 /* the super has an extra ref to root->node */ 2894 free_extent_buffer(old); 2895 2896 add_root_to_dirty_list(root); 2897 atomic_inc(&c->refs); 2898 path->nodes[level] = c; 2899 path->locks[level] = BTRFS_WRITE_LOCK; 2900 path->slots[level] = 0; 2901 return 0; 2902 } 2903 2904 /* 2905 * worker function to insert a single pointer in a node. 2906 * the node should have enough room for the pointer already 2907 * 2908 * slot and level indicate where you want the key to go, and 2909 * blocknr is the block the key points to. 2910 */ 2911 static int insert_ptr(struct btrfs_trans_handle *trans, 2912 struct btrfs_path *path, 2913 struct btrfs_disk_key *key, u64 bytenr, 2914 int slot, int level) 2915 { 2916 struct extent_buffer *lower; 2917 int nritems; 2918 int ret; 2919 2920 BUG_ON(!path->nodes[level]); 2921 btrfs_assert_tree_write_locked(path->nodes[level]); 2922 lower = path->nodes[level]; 2923 nritems = btrfs_header_nritems(lower); 2924 BUG_ON(slot > nritems); 2925 BUG_ON(nritems == BTRFS_NODEPTRS_PER_BLOCK(trans->fs_info)); 2926 if (slot != nritems) { 2927 if (level) { 2928 ret = btrfs_tree_mod_log_insert_move(lower, slot + 1, 2929 slot, nritems - slot); 2930 if (ret < 0) { 2931 btrfs_abort_transaction(trans, ret); 2932 return ret; 2933 } 2934 } 2935 memmove_extent_buffer(lower, 2936 btrfs_node_key_ptr_offset(lower, slot + 1), 2937 btrfs_node_key_ptr_offset(lower, slot), 2938 (nritems - slot) * sizeof(struct btrfs_key_ptr)); 2939 } 2940 if (level) { 2941 ret = btrfs_tree_mod_log_insert_key(lower, slot, 2942 BTRFS_MOD_LOG_KEY_ADD); 2943 if (ret < 0) { 2944 btrfs_abort_transaction(trans, ret); 2945 return ret; 2946 } 2947 } 2948 btrfs_set_node_key(lower, key, slot); 2949 btrfs_set_node_blockptr(lower, slot, bytenr); 2950 WARN_ON(trans->transid == 0); 2951 btrfs_set_node_ptr_generation(lower, slot, trans->transid); 2952 btrfs_set_header_nritems(lower, nritems + 1); 2953 btrfs_mark_buffer_dirty(trans, lower); 2954 2955 return 0; 2956 } 2957 2958 /* 2959 * split the node at the specified level in path in two. 2960 * The path is corrected to point to the appropriate node after the split 2961 * 2962 * Before splitting this tries to make some room in the node by pushing 2963 * left and right, if either one works, it returns right away. 2964 * 2965 * returns 0 on success and < 0 on failure 2966 */ 2967 static noinline int split_node(struct btrfs_trans_handle *trans, 2968 struct btrfs_root *root, 2969 struct btrfs_path *path, int level) 2970 { 2971 struct btrfs_fs_info *fs_info = root->fs_info; 2972 struct extent_buffer *c; 2973 struct extent_buffer *split; 2974 struct btrfs_disk_key disk_key; 2975 int mid; 2976 int ret; 2977 u32 c_nritems; 2978 2979 c = path->nodes[level]; 2980 WARN_ON(btrfs_header_generation(c) != trans->transid); 2981 if (c == root->node) { 2982 /* 2983 * trying to split the root, lets make a new one 2984 * 2985 * tree mod log: We don't log_removal old root in 2986 * insert_new_root, because that root buffer will be kept as a 2987 * normal node. We are going to log removal of half of the 2988 * elements below with btrfs_tree_mod_log_eb_copy(). We're 2989 * holding a tree lock on the buffer, which is why we cannot 2990 * race with other tree_mod_log users. 2991 */ 2992 ret = insert_new_root(trans, root, path, level + 1); 2993 if (ret) 2994 return ret; 2995 } else { 2996 ret = push_nodes_for_insert(trans, root, path, level); 2997 c = path->nodes[level]; 2998 if (!ret && btrfs_header_nritems(c) < 2999 BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 3) 3000 return 0; 3001 if (ret < 0) 3002 return ret; 3003 } 3004 3005 c_nritems = btrfs_header_nritems(c); 3006 mid = (c_nritems + 1) / 2; 3007 btrfs_node_key(c, &disk_key, mid); 3008 3009 split = btrfs_alloc_tree_block(trans, root, 0, btrfs_root_id(root), 3010 &disk_key, level, c->start, 0, 3011 0, BTRFS_NESTING_SPLIT); 3012 if (IS_ERR(split)) 3013 return PTR_ERR(split); 3014 3015 root_add_used_bytes(root); 3016 ASSERT(btrfs_header_level(c) == level); 3017 3018 ret = btrfs_tree_mod_log_eb_copy(split, c, 0, mid, c_nritems - mid); 3019 if (ret) { 3020 btrfs_tree_unlock(split); 3021 free_extent_buffer(split); 3022 btrfs_abort_transaction(trans, ret); 3023 return ret; 3024 } 3025 copy_extent_buffer(split, c, 3026 btrfs_node_key_ptr_offset(split, 0), 3027 btrfs_node_key_ptr_offset(c, mid), 3028 (c_nritems - mid) * sizeof(struct btrfs_key_ptr)); 3029 btrfs_set_header_nritems(split, c_nritems - mid); 3030 btrfs_set_header_nritems(c, mid); 3031 3032 btrfs_mark_buffer_dirty(trans, c); 3033 btrfs_mark_buffer_dirty(trans, split); 3034 3035 ret = insert_ptr(trans, path, &disk_key, split->start, 3036 path->slots[level + 1] + 1, level + 1); 3037 if (ret < 0) { 3038 btrfs_tree_unlock(split); 3039 free_extent_buffer(split); 3040 return ret; 3041 } 3042 3043 if (path->slots[level] >= mid) { 3044 path->slots[level] -= mid; 3045 btrfs_tree_unlock(c); 3046 free_extent_buffer(c); 3047 path->nodes[level] = split; 3048 path->slots[level + 1] += 1; 3049 } else { 3050 btrfs_tree_unlock(split); 3051 free_extent_buffer(split); 3052 } 3053 return 0; 3054 } 3055 3056 /* 3057 * how many bytes are required to store the items in a leaf. start 3058 * and nr indicate which items in the leaf to check. This totals up the 3059 * space used both by the item structs and the item data 3060 */ 3061 static int leaf_space_used(const struct extent_buffer *l, int start, int nr) 3062 { 3063 int data_len; 3064 int nritems = btrfs_header_nritems(l); 3065 int end = min(nritems, start + nr) - 1; 3066 3067 if (!nr) 3068 return 0; 3069 data_len = btrfs_item_offset(l, start) + btrfs_item_size(l, start); 3070 data_len = data_len - btrfs_item_offset(l, end); 3071 data_len += sizeof(struct btrfs_item) * nr; 3072 WARN_ON(data_len < 0); 3073 return data_len; 3074 } 3075 3076 /* 3077 * The space between the end of the leaf items and 3078 * the start of the leaf data. IOW, how much room 3079 * the leaf has left for both items and data 3080 */ 3081 int btrfs_leaf_free_space(const struct extent_buffer *leaf) 3082 { 3083 struct btrfs_fs_info *fs_info = leaf->fs_info; 3084 int nritems = btrfs_header_nritems(leaf); 3085 int ret; 3086 3087 ret = BTRFS_LEAF_DATA_SIZE(fs_info) - leaf_space_used(leaf, 0, nritems); 3088 if (ret < 0) { 3089 btrfs_crit(fs_info, 3090 "leaf free space ret %d, leaf data size %lu, used %d nritems %d", 3091 ret, 3092 (unsigned long) BTRFS_LEAF_DATA_SIZE(fs_info), 3093 leaf_space_used(leaf, 0, nritems), nritems); 3094 } 3095 return ret; 3096 } 3097 3098 /* 3099 * min slot controls the lowest index we're willing to push to the 3100 * right. We'll push up to and including min_slot, but no lower 3101 */ 3102 static noinline int __push_leaf_right(struct btrfs_trans_handle *trans, 3103 struct btrfs_path *path, 3104 int data_size, int empty, 3105 struct extent_buffer *right, 3106 int free_space, u32 left_nritems, 3107 u32 min_slot) 3108 { 3109 struct btrfs_fs_info *fs_info = right->fs_info; 3110 struct extent_buffer *left = path->nodes[0]; 3111 struct extent_buffer *upper = path->nodes[1]; 3112 struct btrfs_map_token token; 3113 struct btrfs_disk_key disk_key; 3114 int slot; 3115 u32 i; 3116 int push_space = 0; 3117 int push_items = 0; 3118 u32 nr; 3119 u32 right_nritems; 3120 u32 data_end; 3121 u32 this_item_size; 3122 3123 if (empty) 3124 nr = 0; 3125 else 3126 nr = max_t(u32, 1, min_slot); 3127 3128 if (path->slots[0] >= left_nritems) 3129 push_space += data_size; 3130 3131 slot = path->slots[1]; 3132 i = left_nritems - 1; 3133 while (i >= nr) { 3134 if (!empty && push_items > 0) { 3135 if (path->slots[0] > i) 3136 break; 3137 if (path->slots[0] == i) { 3138 int space = btrfs_leaf_free_space(left); 3139 3140 if (space + push_space * 2 > free_space) 3141 break; 3142 } 3143 } 3144 3145 if (path->slots[0] == i) 3146 push_space += data_size; 3147 3148 this_item_size = btrfs_item_size(left, i); 3149 if (this_item_size + sizeof(struct btrfs_item) + 3150 push_space > free_space) 3151 break; 3152 3153 push_items++; 3154 push_space += this_item_size + sizeof(struct btrfs_item); 3155 if (i == 0) 3156 break; 3157 i--; 3158 } 3159 3160 if (push_items == 0) 3161 goto out_unlock; 3162 3163 WARN_ON(!empty && push_items == left_nritems); 3164 3165 /* push left to right */ 3166 right_nritems = btrfs_header_nritems(right); 3167 3168 push_space = btrfs_item_data_end(left, left_nritems - push_items); 3169 push_space -= leaf_data_end(left); 3170 3171 /* make room in the right data area */ 3172 data_end = leaf_data_end(right); 3173 memmove_leaf_data(right, data_end - push_space, data_end, 3174 BTRFS_LEAF_DATA_SIZE(fs_info) - data_end); 3175 3176 /* copy from the left data area */ 3177 copy_leaf_data(right, left, BTRFS_LEAF_DATA_SIZE(fs_info) - push_space, 3178 leaf_data_end(left), push_space); 3179 3180 memmove_leaf_items(right, push_items, 0, right_nritems); 3181 3182 /* copy the items from left to right */ 3183 copy_leaf_items(right, left, 0, left_nritems - push_items, push_items); 3184 3185 /* update the item pointers */ 3186 btrfs_init_map_token(&token, right); 3187 right_nritems += push_items; 3188 btrfs_set_header_nritems(right, right_nritems); 3189 push_space = BTRFS_LEAF_DATA_SIZE(fs_info); 3190 for (i = 0; i < right_nritems; i++) { 3191 push_space -= btrfs_token_item_size(&token, i); 3192 btrfs_set_token_item_offset(&token, i, push_space); 3193 } 3194 3195 left_nritems -= push_items; 3196 btrfs_set_header_nritems(left, left_nritems); 3197 3198 if (left_nritems) 3199 btrfs_mark_buffer_dirty(trans, left); 3200 else 3201 btrfs_clear_buffer_dirty(trans, left); 3202 3203 btrfs_mark_buffer_dirty(trans, right); 3204 3205 btrfs_item_key(right, &disk_key, 0); 3206 btrfs_set_node_key(upper, &disk_key, slot + 1); 3207 btrfs_mark_buffer_dirty(trans, upper); 3208 3209 /* then fixup the leaf pointer in the path */ 3210 if (path->slots[0] >= left_nritems) { 3211 path->slots[0] -= left_nritems; 3212 if (btrfs_header_nritems(path->nodes[0]) == 0) 3213 btrfs_clear_buffer_dirty(trans, path->nodes[0]); 3214 btrfs_tree_unlock(path->nodes[0]); 3215 free_extent_buffer(path->nodes[0]); 3216 path->nodes[0] = right; 3217 path->slots[1] += 1; 3218 } else { 3219 btrfs_tree_unlock(right); 3220 free_extent_buffer(right); 3221 } 3222 return 0; 3223 3224 out_unlock: 3225 btrfs_tree_unlock(right); 3226 free_extent_buffer(right); 3227 return 1; 3228 } 3229 3230 /* 3231 * push some data in the path leaf to the right, trying to free up at 3232 * least data_size bytes. returns zero if the push worked, nonzero otherwise 3233 * 3234 * returns 1 if the push failed because the other node didn't have enough 3235 * room, 0 if everything worked out and < 0 if there were major errors. 3236 * 3237 * this will push starting from min_slot to the end of the leaf. It won't 3238 * push any slot lower than min_slot 3239 */ 3240 static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root 3241 *root, struct btrfs_path *path, 3242 int min_data_size, int data_size, 3243 int empty, u32 min_slot) 3244 { 3245 struct extent_buffer *left = path->nodes[0]; 3246 struct extent_buffer *right; 3247 struct extent_buffer *upper; 3248 int slot; 3249 int free_space; 3250 u32 left_nritems; 3251 int ret; 3252 3253 if (!path->nodes[1]) 3254 return 1; 3255 3256 slot = path->slots[1]; 3257 upper = path->nodes[1]; 3258 if (slot >= btrfs_header_nritems(upper) - 1) 3259 return 1; 3260 3261 btrfs_assert_tree_write_locked(path->nodes[1]); 3262 3263 right = btrfs_read_node_slot(upper, slot + 1); 3264 if (IS_ERR(right)) 3265 return PTR_ERR(right); 3266 3267 btrfs_tree_lock_nested(right, BTRFS_NESTING_RIGHT); 3268 3269 free_space = btrfs_leaf_free_space(right); 3270 if (free_space < data_size) 3271 goto out_unlock; 3272 3273 ret = btrfs_cow_block(trans, root, right, upper, 3274 slot + 1, &right, BTRFS_NESTING_RIGHT_COW); 3275 if (ret) 3276 goto out_unlock; 3277 3278 left_nritems = btrfs_header_nritems(left); 3279 if (left_nritems == 0) 3280 goto out_unlock; 3281 3282 if (check_sibling_keys(left, right)) { 3283 ret = -EUCLEAN; 3284 btrfs_abort_transaction(trans, ret); 3285 btrfs_tree_unlock(right); 3286 free_extent_buffer(right); 3287 return ret; 3288 } 3289 if (path->slots[0] == left_nritems && !empty) { 3290 /* Key greater than all keys in the leaf, right neighbor has 3291 * enough room for it and we're not emptying our leaf to delete 3292 * it, therefore use right neighbor to insert the new item and 3293 * no need to touch/dirty our left leaf. */ 3294 btrfs_tree_unlock(left); 3295 free_extent_buffer(left); 3296 path->nodes[0] = right; 3297 path->slots[0] = 0; 3298 path->slots[1]++; 3299 return 0; 3300 } 3301 3302 return __push_leaf_right(trans, path, min_data_size, empty, right, 3303 free_space, left_nritems, min_slot); 3304 out_unlock: 3305 btrfs_tree_unlock(right); 3306 free_extent_buffer(right); 3307 return 1; 3308 } 3309 3310 /* 3311 * push some data in the path leaf to the left, trying to free up at 3312 * least data_size bytes. returns zero if the push worked, nonzero otherwise 3313 * 3314 * max_slot can put a limit on how far into the leaf we'll push items. The 3315 * item at 'max_slot' won't be touched. Use (u32)-1 to make us do all the 3316 * items 3317 */ 3318 static noinline int __push_leaf_left(struct btrfs_trans_handle *trans, 3319 struct btrfs_path *path, int data_size, 3320 int empty, struct extent_buffer *left, 3321 int free_space, u32 right_nritems, 3322 u32 max_slot) 3323 { 3324 struct btrfs_fs_info *fs_info = left->fs_info; 3325 struct btrfs_disk_key disk_key; 3326 struct extent_buffer *right = path->nodes[0]; 3327 int i; 3328 int push_space = 0; 3329 int push_items = 0; 3330 u32 old_left_nritems; 3331 u32 nr; 3332 int ret = 0; 3333 u32 this_item_size; 3334 u32 old_left_item_size; 3335 struct btrfs_map_token token; 3336 3337 if (empty) 3338 nr = min(right_nritems, max_slot); 3339 else 3340 nr = min(right_nritems - 1, max_slot); 3341 3342 for (i = 0; i < nr; i++) { 3343 if (!empty && push_items > 0) { 3344 if (path->slots[0] < i) 3345 break; 3346 if (path->slots[0] == i) { 3347 int space = btrfs_leaf_free_space(right); 3348 3349 if (space + push_space * 2 > free_space) 3350 break; 3351 } 3352 } 3353 3354 if (path->slots[0] == i) 3355 push_space += data_size; 3356 3357 this_item_size = btrfs_item_size(right, i); 3358 if (this_item_size + sizeof(struct btrfs_item) + push_space > 3359 free_space) 3360 break; 3361 3362 push_items++; 3363 push_space += this_item_size + sizeof(struct btrfs_item); 3364 } 3365 3366 if (push_items == 0) { 3367 ret = 1; 3368 goto out; 3369 } 3370 WARN_ON(!empty && push_items == btrfs_header_nritems(right)); 3371 3372 /* push data from right to left */ 3373 copy_leaf_items(left, right, btrfs_header_nritems(left), 0, push_items); 3374 3375 push_space = BTRFS_LEAF_DATA_SIZE(fs_info) - 3376 btrfs_item_offset(right, push_items - 1); 3377 3378 copy_leaf_data(left, right, leaf_data_end(left) - push_space, 3379 btrfs_item_offset(right, push_items - 1), push_space); 3380 old_left_nritems = btrfs_header_nritems(left); 3381 BUG_ON(old_left_nritems <= 0); 3382 3383 btrfs_init_map_token(&token, left); 3384 old_left_item_size = btrfs_item_offset(left, old_left_nritems - 1); 3385 for (i = old_left_nritems; i < old_left_nritems + push_items; i++) { 3386 u32 ioff; 3387 3388 ioff = btrfs_token_item_offset(&token, i); 3389 btrfs_set_token_item_offset(&token, i, 3390 ioff - (BTRFS_LEAF_DATA_SIZE(fs_info) - old_left_item_size)); 3391 } 3392 btrfs_set_header_nritems(left, old_left_nritems + push_items); 3393 3394 /* fixup right node */ 3395 if (push_items > right_nritems) 3396 WARN(1, KERN_CRIT "push items %d nr %u\n", push_items, 3397 right_nritems); 3398 3399 if (push_items < right_nritems) { 3400 push_space = btrfs_item_offset(right, push_items - 1) - 3401 leaf_data_end(right); 3402 memmove_leaf_data(right, 3403 BTRFS_LEAF_DATA_SIZE(fs_info) - push_space, 3404 leaf_data_end(right), push_space); 3405 3406 memmove_leaf_items(right, 0, push_items, 3407 btrfs_header_nritems(right) - push_items); 3408 } 3409 3410 btrfs_init_map_token(&token, right); 3411 right_nritems -= push_items; 3412 btrfs_set_header_nritems(right, right_nritems); 3413 push_space = BTRFS_LEAF_DATA_SIZE(fs_info); 3414 for (i = 0; i < right_nritems; i++) { 3415 push_space = push_space - btrfs_token_item_size(&token, i); 3416 btrfs_set_token_item_offset(&token, i, push_space); 3417 } 3418 3419 btrfs_mark_buffer_dirty(trans, left); 3420 if (right_nritems) 3421 btrfs_mark_buffer_dirty(trans, right); 3422 else 3423 btrfs_clear_buffer_dirty(trans, right); 3424 3425 btrfs_item_key(right, &disk_key, 0); 3426 fixup_low_keys(trans, path, &disk_key, 1); 3427 3428 /* then fixup the leaf pointer in the path */ 3429 if (path->slots[0] < push_items) { 3430 path->slots[0] += old_left_nritems; 3431 btrfs_tree_unlock(path->nodes[0]); 3432 free_extent_buffer(path->nodes[0]); 3433 path->nodes[0] = left; 3434 path->slots[1] -= 1; 3435 } else { 3436 btrfs_tree_unlock(left); 3437 free_extent_buffer(left); 3438 path->slots[0] -= push_items; 3439 } 3440 BUG_ON(path->slots[0] < 0); 3441 return ret; 3442 out: 3443 btrfs_tree_unlock(left); 3444 free_extent_buffer(left); 3445 return ret; 3446 } 3447 3448 /* 3449 * push some data in the path leaf to the left, trying to free up at 3450 * least data_size bytes. returns zero if the push worked, nonzero otherwise 3451 * 3452 * max_slot can put a limit on how far into the leaf we'll push items. The 3453 * item at 'max_slot' won't be touched. Use (u32)-1 to make us push all the 3454 * items 3455 */ 3456 static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root 3457 *root, struct btrfs_path *path, int min_data_size, 3458 int data_size, int empty, u32 max_slot) 3459 { 3460 struct extent_buffer *right = path->nodes[0]; 3461 struct extent_buffer *left; 3462 int slot; 3463 int free_space; 3464 u32 right_nritems; 3465 int ret = 0; 3466 3467 slot = path->slots[1]; 3468 if (slot == 0) 3469 return 1; 3470 if (!path->nodes[1]) 3471 return 1; 3472 3473 right_nritems = btrfs_header_nritems(right); 3474 if (right_nritems == 0) 3475 return 1; 3476 3477 btrfs_assert_tree_write_locked(path->nodes[1]); 3478 3479 left = btrfs_read_node_slot(path->nodes[1], slot - 1); 3480 if (IS_ERR(left)) 3481 return PTR_ERR(left); 3482 3483 btrfs_tree_lock_nested(left, BTRFS_NESTING_LEFT); 3484 3485 free_space = btrfs_leaf_free_space(left); 3486 if (free_space < data_size) { 3487 ret = 1; 3488 goto out; 3489 } 3490 3491 ret = btrfs_cow_block(trans, root, left, 3492 path->nodes[1], slot - 1, &left, 3493 BTRFS_NESTING_LEFT_COW); 3494 if (ret) { 3495 /* we hit -ENOSPC, but it isn't fatal here */ 3496 if (ret == -ENOSPC) 3497 ret = 1; 3498 goto out; 3499 } 3500 3501 if (check_sibling_keys(left, right)) { 3502 ret = -EUCLEAN; 3503 btrfs_abort_transaction(trans, ret); 3504 goto out; 3505 } 3506 return __push_leaf_left(trans, path, min_data_size, empty, left, 3507 free_space, right_nritems, max_slot); 3508 out: 3509 btrfs_tree_unlock(left); 3510 free_extent_buffer(left); 3511 return ret; 3512 } 3513 3514 /* 3515 * split the path's leaf in two, making sure there is at least data_size 3516 * available for the resulting leaf level of the path. 3517 */ 3518 static noinline int copy_for_split(struct btrfs_trans_handle *trans, 3519 struct btrfs_path *path, 3520 struct extent_buffer *l, 3521 struct extent_buffer *right, 3522 int slot, int mid, int nritems) 3523 { 3524 struct btrfs_fs_info *fs_info = trans->fs_info; 3525 int data_copy_size; 3526 int rt_data_off; 3527 int i; 3528 int ret; 3529 struct btrfs_disk_key disk_key; 3530 struct btrfs_map_token token; 3531 3532 nritems = nritems - mid; 3533 btrfs_set_header_nritems(right, nritems); 3534 data_copy_size = btrfs_item_data_end(l, mid) - leaf_data_end(l); 3535 3536 copy_leaf_items(right, l, 0, mid, nritems); 3537 3538 copy_leaf_data(right, l, BTRFS_LEAF_DATA_SIZE(fs_info) - data_copy_size, 3539 leaf_data_end(l), data_copy_size); 3540 3541 rt_data_off = BTRFS_LEAF_DATA_SIZE(fs_info) - btrfs_item_data_end(l, mid); 3542 3543 btrfs_init_map_token(&token, right); 3544 for (i = 0; i < nritems; i++) { 3545 u32 ioff; 3546 3547 ioff = btrfs_token_item_offset(&token, i); 3548 btrfs_set_token_item_offset(&token, i, ioff + rt_data_off); 3549 } 3550 3551 btrfs_set_header_nritems(l, mid); 3552 btrfs_item_key(right, &disk_key, 0); 3553 ret = insert_ptr(trans, path, &disk_key, right->start, path->slots[1] + 1, 1); 3554 if (ret < 0) 3555 return ret; 3556 3557 btrfs_mark_buffer_dirty(trans, right); 3558 btrfs_mark_buffer_dirty(trans, l); 3559 BUG_ON(path->slots[0] != slot); 3560 3561 if (mid <= slot) { 3562 btrfs_tree_unlock(path->nodes[0]); 3563 free_extent_buffer(path->nodes[0]); 3564 path->nodes[0] = right; 3565 path->slots[0] -= mid; 3566 path->slots[1] += 1; 3567 } else { 3568 btrfs_tree_unlock(right); 3569 free_extent_buffer(right); 3570 } 3571 3572 BUG_ON(path->slots[0] < 0); 3573 3574 return 0; 3575 } 3576 3577 /* 3578 * double splits happen when we need to insert a big item in the middle 3579 * of a leaf. A double split can leave us with 3 mostly empty leaves: 3580 * leaf: [ slots 0 - N] [ our target ] [ N + 1 - total in leaf ] 3581 * A B C 3582 * 3583 * We avoid this by trying to push the items on either side of our target 3584 * into the adjacent leaves. If all goes well we can avoid the double split 3585 * completely. 3586 */ 3587 static noinline int push_for_double_split(struct btrfs_trans_handle *trans, 3588 struct btrfs_root *root, 3589 struct btrfs_path *path, 3590 int data_size) 3591 { 3592 int ret; 3593 int progress = 0; 3594 int slot; 3595 u32 nritems; 3596 int space_needed = data_size; 3597 3598 slot = path->slots[0]; 3599 if (slot < btrfs_header_nritems(path->nodes[0])) 3600 space_needed -= btrfs_leaf_free_space(path->nodes[0]); 3601 3602 /* 3603 * try to push all the items after our slot into the 3604 * right leaf 3605 */ 3606 ret = push_leaf_right(trans, root, path, 1, space_needed, 0, slot); 3607 if (ret < 0) 3608 return ret; 3609 3610 if (ret == 0) 3611 progress++; 3612 3613 nritems = btrfs_header_nritems(path->nodes[0]); 3614 /* 3615 * our goal is to get our slot at the start or end of a leaf. If 3616 * we've done so we're done 3617 */ 3618 if (path->slots[0] == 0 || path->slots[0] == nritems) 3619 return 0; 3620 3621 if (btrfs_leaf_free_space(path->nodes[0]) >= data_size) 3622 return 0; 3623 3624 /* try to push all the items before our slot into the next leaf */ 3625 slot = path->slots[0]; 3626 space_needed = data_size; 3627 if (slot > 0) 3628 space_needed -= btrfs_leaf_free_space(path->nodes[0]); 3629 ret = push_leaf_left(trans, root, path, 1, space_needed, 0, slot); 3630 if (ret < 0) 3631 return ret; 3632 3633 if (ret == 0) 3634 progress++; 3635 3636 if (progress) 3637 return 0; 3638 return 1; 3639 } 3640 3641 /* 3642 * split the path's leaf in two, making sure there is at least data_size 3643 * available for the resulting leaf level of the path. 3644 * 3645 * returns 0 if all went well and < 0 on failure. 3646 */ 3647 static noinline int split_leaf(struct btrfs_trans_handle *trans, 3648 struct btrfs_root *root, 3649 const struct btrfs_key *ins_key, 3650 struct btrfs_path *path, int data_size, 3651 int extend) 3652 { 3653 struct btrfs_disk_key disk_key; 3654 struct extent_buffer *l; 3655 u32 nritems; 3656 int mid; 3657 int slot; 3658 struct extent_buffer *right; 3659 struct btrfs_fs_info *fs_info = root->fs_info; 3660 int ret = 0; 3661 int wret; 3662 int split; 3663 int num_doubles = 0; 3664 int tried_avoid_double = 0; 3665 3666 l = path->nodes[0]; 3667 slot = path->slots[0]; 3668 if (extend && data_size + btrfs_item_size(l, slot) + 3669 sizeof(struct btrfs_item) > BTRFS_LEAF_DATA_SIZE(fs_info)) 3670 return -EOVERFLOW; 3671 3672 /* first try to make some room by pushing left and right */ 3673 if (data_size && path->nodes[1]) { 3674 int space_needed = data_size; 3675 3676 if (slot < btrfs_header_nritems(l)) 3677 space_needed -= btrfs_leaf_free_space(l); 3678 3679 wret = push_leaf_right(trans, root, path, space_needed, 3680 space_needed, 0, 0); 3681 if (wret < 0) 3682 return wret; 3683 if (wret) { 3684 space_needed = data_size; 3685 if (slot > 0) 3686 space_needed -= btrfs_leaf_free_space(l); 3687 wret = push_leaf_left(trans, root, path, space_needed, 3688 space_needed, 0, (u32)-1); 3689 if (wret < 0) 3690 return wret; 3691 } 3692 l = path->nodes[0]; 3693 3694 /* did the pushes work? */ 3695 if (btrfs_leaf_free_space(l) >= data_size) 3696 return 0; 3697 } 3698 3699 if (!path->nodes[1]) { 3700 ret = insert_new_root(trans, root, path, 1); 3701 if (ret) 3702 return ret; 3703 } 3704 again: 3705 split = 1; 3706 l = path->nodes[0]; 3707 slot = path->slots[0]; 3708 nritems = btrfs_header_nritems(l); 3709 mid = (nritems + 1) / 2; 3710 3711 if (mid <= slot) { 3712 if (nritems == 1 || 3713 leaf_space_used(l, mid, nritems - mid) + data_size > 3714 BTRFS_LEAF_DATA_SIZE(fs_info)) { 3715 if (slot >= nritems) { 3716 split = 0; 3717 } else { 3718 mid = slot; 3719 if (mid != nritems && 3720 leaf_space_used(l, mid, nritems - mid) + 3721 data_size > BTRFS_LEAF_DATA_SIZE(fs_info)) { 3722 if (data_size && !tried_avoid_double) 3723 goto push_for_double; 3724 split = 2; 3725 } 3726 } 3727 } 3728 } else { 3729 if (leaf_space_used(l, 0, mid) + data_size > 3730 BTRFS_LEAF_DATA_SIZE(fs_info)) { 3731 if (!extend && data_size && slot == 0) { 3732 split = 0; 3733 } else if ((extend || !data_size) && slot == 0) { 3734 mid = 1; 3735 } else { 3736 mid = slot; 3737 if (mid != nritems && 3738 leaf_space_used(l, mid, nritems - mid) + 3739 data_size > BTRFS_LEAF_DATA_SIZE(fs_info)) { 3740 if (data_size && !tried_avoid_double) 3741 goto push_for_double; 3742 split = 2; 3743 } 3744 } 3745 } 3746 } 3747 3748 if (split == 0) 3749 btrfs_cpu_key_to_disk(&disk_key, ins_key); 3750 else 3751 btrfs_item_key(l, &disk_key, mid); 3752 3753 /* 3754 * We have to about BTRFS_NESTING_NEW_ROOT here if we've done a double 3755 * split, because we're only allowed to have MAX_LOCKDEP_SUBCLASSES 3756 * subclasses, which is 8 at the time of this patch, and we've maxed it 3757 * out. In the future we could add a 3758 * BTRFS_NESTING_SPLIT_THE_SPLITTENING if we need to, but for now just 3759 * use BTRFS_NESTING_NEW_ROOT. 3760 */ 3761 right = btrfs_alloc_tree_block(trans, root, 0, btrfs_root_id(root), 3762 &disk_key, 0, l->start, 0, 0, 3763 num_doubles ? BTRFS_NESTING_NEW_ROOT : 3764 BTRFS_NESTING_SPLIT); 3765 if (IS_ERR(right)) 3766 return PTR_ERR(right); 3767 3768 root_add_used_bytes(root); 3769 3770 if (split == 0) { 3771 if (mid <= slot) { 3772 btrfs_set_header_nritems(right, 0); 3773 ret = insert_ptr(trans, path, &disk_key, 3774 right->start, path->slots[1] + 1, 1); 3775 if (ret < 0) { 3776 btrfs_tree_unlock(right); 3777 free_extent_buffer(right); 3778 return ret; 3779 } 3780 btrfs_tree_unlock(path->nodes[0]); 3781 free_extent_buffer(path->nodes[0]); 3782 path->nodes[0] = right; 3783 path->slots[0] = 0; 3784 path->slots[1] += 1; 3785 } else { 3786 btrfs_set_header_nritems(right, 0); 3787 ret = insert_ptr(trans, path, &disk_key, 3788 right->start, path->slots[1], 1); 3789 if (ret < 0) { 3790 btrfs_tree_unlock(right); 3791 free_extent_buffer(right); 3792 return ret; 3793 } 3794 btrfs_tree_unlock(path->nodes[0]); 3795 free_extent_buffer(path->nodes[0]); 3796 path->nodes[0] = right; 3797 path->slots[0] = 0; 3798 if (path->slots[1] == 0) 3799 fixup_low_keys(trans, path, &disk_key, 1); 3800 } 3801 /* 3802 * We create a new leaf 'right' for the required ins_len and 3803 * we'll do btrfs_mark_buffer_dirty() on this leaf after copying 3804 * the content of ins_len to 'right'. 3805 */ 3806 return ret; 3807 } 3808 3809 ret = copy_for_split(trans, path, l, right, slot, mid, nritems); 3810 if (ret < 0) { 3811 btrfs_tree_unlock(right); 3812 free_extent_buffer(right); 3813 return ret; 3814 } 3815 3816 if (split == 2) { 3817 BUG_ON(num_doubles != 0); 3818 num_doubles++; 3819 goto again; 3820 } 3821 3822 return 0; 3823 3824 push_for_double: 3825 push_for_double_split(trans, root, path, data_size); 3826 tried_avoid_double = 1; 3827 if (btrfs_leaf_free_space(path->nodes[0]) >= data_size) 3828 return 0; 3829 goto again; 3830 } 3831 3832 static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans, 3833 struct btrfs_root *root, 3834 struct btrfs_path *path, int ins_len) 3835 { 3836 struct btrfs_key key; 3837 struct extent_buffer *leaf; 3838 struct btrfs_file_extent_item *fi; 3839 u64 extent_len = 0; 3840 u32 item_size; 3841 int ret; 3842 3843 leaf = path->nodes[0]; 3844 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 3845 3846 BUG_ON(key.type != BTRFS_EXTENT_DATA_KEY && 3847 key.type != BTRFS_EXTENT_CSUM_KEY); 3848 3849 if (btrfs_leaf_free_space(leaf) >= ins_len) 3850 return 0; 3851 3852 item_size = btrfs_item_size(leaf, path->slots[0]); 3853 if (key.type == BTRFS_EXTENT_DATA_KEY) { 3854 fi = btrfs_item_ptr(leaf, path->slots[0], 3855 struct btrfs_file_extent_item); 3856 extent_len = btrfs_file_extent_num_bytes(leaf, fi); 3857 } 3858 btrfs_release_path(path); 3859 3860 path->keep_locks = 1; 3861 path->search_for_split = 1; 3862 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); 3863 path->search_for_split = 0; 3864 if (ret > 0) 3865 ret = -EAGAIN; 3866 if (ret < 0) 3867 goto err; 3868 3869 ret = -EAGAIN; 3870 leaf = path->nodes[0]; 3871 /* if our item isn't there, return now */ 3872 if (item_size != btrfs_item_size(leaf, path->slots[0])) 3873 goto err; 3874 3875 /* the leaf has changed, it now has room. return now */ 3876 if (btrfs_leaf_free_space(path->nodes[0]) >= ins_len) 3877 goto err; 3878 3879 if (key.type == BTRFS_EXTENT_DATA_KEY) { 3880 fi = btrfs_item_ptr(leaf, path->slots[0], 3881 struct btrfs_file_extent_item); 3882 if (extent_len != btrfs_file_extent_num_bytes(leaf, fi)) 3883 goto err; 3884 } 3885 3886 ret = split_leaf(trans, root, &key, path, ins_len, 1); 3887 if (ret) 3888 goto err; 3889 3890 path->keep_locks = 0; 3891 btrfs_unlock_up_safe(path, 1); 3892 return 0; 3893 err: 3894 path->keep_locks = 0; 3895 return ret; 3896 } 3897 3898 static noinline int split_item(struct btrfs_trans_handle *trans, 3899 struct btrfs_path *path, 3900 const struct btrfs_key *new_key, 3901 unsigned long split_offset) 3902 { 3903 struct extent_buffer *leaf; 3904 int orig_slot, slot; 3905 char *buf; 3906 u32 nritems; 3907 u32 item_size; 3908 u32 orig_offset; 3909 struct btrfs_disk_key disk_key; 3910 3911 leaf = path->nodes[0]; 3912 /* 3913 * Shouldn't happen because the caller must have previously called 3914 * setup_leaf_for_split() to make room for the new item in the leaf. 3915 */ 3916 if (WARN_ON(btrfs_leaf_free_space(leaf) < sizeof(struct btrfs_item))) 3917 return -ENOSPC; 3918 3919 orig_slot = path->slots[0]; 3920 orig_offset = btrfs_item_offset(leaf, path->slots[0]); 3921 item_size = btrfs_item_size(leaf, path->slots[0]); 3922 3923 buf = kmalloc(item_size, GFP_NOFS); 3924 if (!buf) 3925 return -ENOMEM; 3926 3927 read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf, 3928 path->slots[0]), item_size); 3929 3930 slot = path->slots[0] + 1; 3931 nritems = btrfs_header_nritems(leaf); 3932 if (slot != nritems) { 3933 /* shift the items */ 3934 memmove_leaf_items(leaf, slot + 1, slot, nritems - slot); 3935 } 3936 3937 btrfs_cpu_key_to_disk(&disk_key, new_key); 3938 btrfs_set_item_key(leaf, &disk_key, slot); 3939 3940 btrfs_set_item_offset(leaf, slot, orig_offset); 3941 btrfs_set_item_size(leaf, slot, item_size - split_offset); 3942 3943 btrfs_set_item_offset(leaf, orig_slot, 3944 orig_offset + item_size - split_offset); 3945 btrfs_set_item_size(leaf, orig_slot, split_offset); 3946 3947 btrfs_set_header_nritems(leaf, nritems + 1); 3948 3949 /* write the data for the start of the original item */ 3950 write_extent_buffer(leaf, buf, 3951 btrfs_item_ptr_offset(leaf, path->slots[0]), 3952 split_offset); 3953 3954 /* write the data for the new item */ 3955 write_extent_buffer(leaf, buf + split_offset, 3956 btrfs_item_ptr_offset(leaf, slot), 3957 item_size - split_offset); 3958 btrfs_mark_buffer_dirty(trans, leaf); 3959 3960 BUG_ON(btrfs_leaf_free_space(leaf) < 0); 3961 kfree(buf); 3962 return 0; 3963 } 3964 3965 /* 3966 * This function splits a single item into two items, 3967 * giving 'new_key' to the new item and splitting the 3968 * old one at split_offset (from the start of the item). 3969 * 3970 * The path may be released by this operation. After 3971 * the split, the path is pointing to the old item. The 3972 * new item is going to be in the same node as the old one. 3973 * 3974 * Note, the item being split must be smaller enough to live alone on 3975 * a tree block with room for one extra struct btrfs_item 3976 * 3977 * This allows us to split the item in place, keeping a lock on the 3978 * leaf the entire time. 3979 */ 3980 int btrfs_split_item(struct btrfs_trans_handle *trans, 3981 struct btrfs_root *root, 3982 struct btrfs_path *path, 3983 const struct btrfs_key *new_key, 3984 unsigned long split_offset) 3985 { 3986 int ret; 3987 ret = setup_leaf_for_split(trans, root, path, 3988 sizeof(struct btrfs_item)); 3989 if (ret) 3990 return ret; 3991 3992 ret = split_item(trans, path, new_key, split_offset); 3993 return ret; 3994 } 3995 3996 /* 3997 * make the item pointed to by the path smaller. new_size indicates 3998 * how small to make it, and from_end tells us if we just chop bytes 3999 * off the end of the item or if we shift the item to chop bytes off 4000 * the front. 4001 */ 4002 void btrfs_truncate_item(struct btrfs_trans_handle *trans, 4003 struct btrfs_path *path, u32 new_size, int from_end) 4004 { 4005 int slot; 4006 struct extent_buffer *leaf; 4007 u32 nritems; 4008 unsigned int data_end; 4009 unsigned int old_data_start; 4010 unsigned int old_size; 4011 unsigned int size_diff; 4012 int i; 4013 struct btrfs_map_token token; 4014 4015 leaf = path->nodes[0]; 4016 slot = path->slots[0]; 4017 4018 old_size = btrfs_item_size(leaf, slot); 4019 if (old_size == new_size) 4020 return; 4021 4022 nritems = btrfs_header_nritems(leaf); 4023 data_end = leaf_data_end(leaf); 4024 4025 old_data_start = btrfs_item_offset(leaf, slot); 4026 4027 size_diff = old_size - new_size; 4028 4029 BUG_ON(slot < 0); 4030 BUG_ON(slot >= nritems); 4031 4032 /* 4033 * item0..itemN ... dataN.offset..dataN.size .. data0.size 4034 */ 4035 /* first correct the data pointers */ 4036 btrfs_init_map_token(&token, leaf); 4037 for (i = slot; i < nritems; i++) { 4038 u32 ioff; 4039 4040 ioff = btrfs_token_item_offset(&token, i); 4041 btrfs_set_token_item_offset(&token, i, ioff + size_diff); 4042 } 4043 4044 /* shift the data */ 4045 if (from_end) { 4046 memmove_leaf_data(leaf, data_end + size_diff, data_end, 4047 old_data_start + new_size - data_end); 4048 } else { 4049 struct btrfs_disk_key disk_key; 4050 u64 offset; 4051 4052 btrfs_item_key(leaf, &disk_key, slot); 4053 4054 if (btrfs_disk_key_type(&disk_key) == BTRFS_EXTENT_DATA_KEY) { 4055 unsigned long ptr; 4056 struct btrfs_file_extent_item *fi; 4057 4058 fi = btrfs_item_ptr(leaf, slot, 4059 struct btrfs_file_extent_item); 4060 fi = (struct btrfs_file_extent_item *)( 4061 (unsigned long)fi - size_diff); 4062 4063 if (btrfs_file_extent_type(leaf, fi) == 4064 BTRFS_FILE_EXTENT_INLINE) { 4065 ptr = btrfs_item_ptr_offset(leaf, slot); 4066 memmove_extent_buffer(leaf, ptr, 4067 (unsigned long)fi, 4068 BTRFS_FILE_EXTENT_INLINE_DATA_START); 4069 } 4070 } 4071 4072 memmove_leaf_data(leaf, data_end + size_diff, data_end, 4073 old_data_start - data_end); 4074 4075 offset = btrfs_disk_key_offset(&disk_key); 4076 btrfs_set_disk_key_offset(&disk_key, offset + size_diff); 4077 btrfs_set_item_key(leaf, &disk_key, slot); 4078 if (slot == 0) 4079 fixup_low_keys(trans, path, &disk_key, 1); 4080 } 4081 4082 btrfs_set_item_size(leaf, slot, new_size); 4083 btrfs_mark_buffer_dirty(trans, leaf); 4084 4085 if (btrfs_leaf_free_space(leaf) < 0) { 4086 btrfs_print_leaf(leaf); 4087 BUG(); 4088 } 4089 } 4090 4091 /* 4092 * make the item pointed to by the path bigger, data_size is the added size. 4093 */ 4094 void btrfs_extend_item(struct btrfs_trans_handle *trans, 4095 struct btrfs_path *path, u32 data_size) 4096 { 4097 int slot; 4098 struct extent_buffer *leaf; 4099 u32 nritems; 4100 unsigned int data_end; 4101 unsigned int old_data; 4102 unsigned int old_size; 4103 int i; 4104 struct btrfs_map_token token; 4105 4106 leaf = path->nodes[0]; 4107 4108 nritems = btrfs_header_nritems(leaf); 4109 data_end = leaf_data_end(leaf); 4110 4111 if (btrfs_leaf_free_space(leaf) < data_size) { 4112 btrfs_print_leaf(leaf); 4113 BUG(); 4114 } 4115 slot = path->slots[0]; 4116 old_data = btrfs_item_data_end(leaf, slot); 4117 4118 BUG_ON(slot < 0); 4119 if (slot >= nritems) { 4120 btrfs_print_leaf(leaf); 4121 btrfs_crit(leaf->fs_info, "slot %d too large, nritems %d", 4122 slot, nritems); 4123 BUG(); 4124 } 4125 4126 /* 4127 * item0..itemN ... dataN.offset..dataN.size .. data0.size 4128 */ 4129 /* first correct the data pointers */ 4130 btrfs_init_map_token(&token, leaf); 4131 for (i = slot; i < nritems; i++) { 4132 u32 ioff; 4133 4134 ioff = btrfs_token_item_offset(&token, i); 4135 btrfs_set_token_item_offset(&token, i, ioff - data_size); 4136 } 4137 4138 /* shift the data */ 4139 memmove_leaf_data(leaf, data_end - data_size, data_end, 4140 old_data - data_end); 4141 4142 data_end = old_data; 4143 old_size = btrfs_item_size(leaf, slot); 4144 btrfs_set_item_size(leaf, slot, old_size + data_size); 4145 btrfs_mark_buffer_dirty(trans, leaf); 4146 4147 if (btrfs_leaf_free_space(leaf) < 0) { 4148 btrfs_print_leaf(leaf); 4149 BUG(); 4150 } 4151 } 4152 4153 /* 4154 * Make space in the node before inserting one or more items. 4155 * 4156 * @trans: transaction handle 4157 * @root: root we are inserting items to 4158 * @path: points to the leaf/slot where we are going to insert new items 4159 * @batch: information about the batch of items to insert 4160 * 4161 * Main purpose is to save stack depth by doing the bulk of the work in a 4162 * function that doesn't call btrfs_search_slot 4163 */ 4164 static void setup_items_for_insert(struct btrfs_trans_handle *trans, 4165 struct btrfs_root *root, struct btrfs_path *path, 4166 const struct btrfs_item_batch *batch) 4167 { 4168 struct btrfs_fs_info *fs_info = root->fs_info; 4169 int i; 4170 u32 nritems; 4171 unsigned int data_end; 4172 struct btrfs_disk_key disk_key; 4173 struct extent_buffer *leaf; 4174 int slot; 4175 struct btrfs_map_token token; 4176 u32 total_size; 4177 4178 /* 4179 * Before anything else, update keys in the parent and other ancestors 4180 * if needed, then release the write locks on them, so that other tasks 4181 * can use them while we modify the leaf. 4182 */ 4183 if (path->slots[0] == 0) { 4184 btrfs_cpu_key_to_disk(&disk_key, &batch->keys[0]); 4185 fixup_low_keys(trans, path, &disk_key, 1); 4186 } 4187 btrfs_unlock_up_safe(path, 1); 4188 4189 leaf = path->nodes[0]; 4190 slot = path->slots[0]; 4191 4192 nritems = btrfs_header_nritems(leaf); 4193 data_end = leaf_data_end(leaf); 4194 total_size = batch->total_data_size + (batch->nr * sizeof(struct btrfs_item)); 4195 4196 if (btrfs_leaf_free_space(leaf) < total_size) { 4197 btrfs_print_leaf(leaf); 4198 btrfs_crit(fs_info, "not enough freespace need %u have %d", 4199 total_size, btrfs_leaf_free_space(leaf)); 4200 BUG(); 4201 } 4202 4203 btrfs_init_map_token(&token, leaf); 4204 if (slot != nritems) { 4205 unsigned int old_data = btrfs_item_data_end(leaf, slot); 4206 4207 if (old_data < data_end) { 4208 btrfs_print_leaf(leaf); 4209 btrfs_crit(fs_info, 4210 "item at slot %d with data offset %u beyond data end of leaf %u", 4211 slot, old_data, data_end); 4212 BUG(); 4213 } 4214 /* 4215 * item0..itemN ... dataN.offset..dataN.size .. data0.size 4216 */ 4217 /* first correct the data pointers */ 4218 for (i = slot; i < nritems; i++) { 4219 u32 ioff; 4220 4221 ioff = btrfs_token_item_offset(&token, i); 4222 btrfs_set_token_item_offset(&token, i, 4223 ioff - batch->total_data_size); 4224 } 4225 /* shift the items */ 4226 memmove_leaf_items(leaf, slot + batch->nr, slot, nritems - slot); 4227 4228 /* shift the data */ 4229 memmove_leaf_data(leaf, data_end - batch->total_data_size, 4230 data_end, old_data - data_end); 4231 data_end = old_data; 4232 } 4233 4234 /* setup the item for the new data */ 4235 for (i = 0; i < batch->nr; i++) { 4236 btrfs_cpu_key_to_disk(&disk_key, &batch->keys[i]); 4237 btrfs_set_item_key(leaf, &disk_key, slot + i); 4238 data_end -= batch->data_sizes[i]; 4239 btrfs_set_token_item_offset(&token, slot + i, data_end); 4240 btrfs_set_token_item_size(&token, slot + i, batch->data_sizes[i]); 4241 } 4242 4243 btrfs_set_header_nritems(leaf, nritems + batch->nr); 4244 btrfs_mark_buffer_dirty(trans, leaf); 4245 4246 if (btrfs_leaf_free_space(leaf) < 0) { 4247 btrfs_print_leaf(leaf); 4248 BUG(); 4249 } 4250 } 4251 4252 /* 4253 * Insert a new item into a leaf. 4254 * 4255 * @trans: Transaction handle. 4256 * @root: The root of the btree. 4257 * @path: A path pointing to the target leaf and slot. 4258 * @key: The key of the new item. 4259 * @data_size: The size of the data associated with the new key. 4260 */ 4261 void btrfs_setup_item_for_insert(struct btrfs_trans_handle *trans, 4262 struct btrfs_root *root, 4263 struct btrfs_path *path, 4264 const struct btrfs_key *key, 4265 u32 data_size) 4266 { 4267 struct btrfs_item_batch batch; 4268 4269 batch.keys = key; 4270 batch.data_sizes = &data_size; 4271 batch.total_data_size = data_size; 4272 batch.nr = 1; 4273 4274 setup_items_for_insert(trans, root, path, &batch); 4275 } 4276 4277 /* 4278 * Given a key and some data, insert items into the tree. 4279 * This does all the path init required, making room in the tree if needed. 4280 * 4281 * Returns: 0 on success 4282 * -EEXIST if the first key already exists 4283 * < 0 on other errors 4284 */ 4285 int btrfs_insert_empty_items(struct btrfs_trans_handle *trans, 4286 struct btrfs_root *root, 4287 struct btrfs_path *path, 4288 const struct btrfs_item_batch *batch) 4289 { 4290 int ret = 0; 4291 int slot; 4292 u32 total_size; 4293 4294 total_size = batch->total_data_size + (batch->nr * sizeof(struct btrfs_item)); 4295 ret = btrfs_search_slot(trans, root, &batch->keys[0], path, total_size, 1); 4296 if (ret == 0) 4297 return -EEXIST; 4298 if (ret < 0) 4299 return ret; 4300 4301 slot = path->slots[0]; 4302 BUG_ON(slot < 0); 4303 4304 setup_items_for_insert(trans, root, path, batch); 4305 return 0; 4306 } 4307 4308 /* 4309 * Given a key and some data, insert an item into the tree. 4310 * This does all the path init required, making room in the tree if needed. 4311 */ 4312 int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root *root, 4313 const struct btrfs_key *cpu_key, void *data, 4314 u32 data_size) 4315 { 4316 int ret = 0; 4317 struct btrfs_path *path; 4318 struct extent_buffer *leaf; 4319 unsigned long ptr; 4320 4321 path = btrfs_alloc_path(); 4322 if (!path) 4323 return -ENOMEM; 4324 ret = btrfs_insert_empty_item(trans, root, path, cpu_key, data_size); 4325 if (!ret) { 4326 leaf = path->nodes[0]; 4327 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); 4328 write_extent_buffer(leaf, data, ptr, data_size); 4329 btrfs_mark_buffer_dirty(trans, leaf); 4330 } 4331 btrfs_free_path(path); 4332 return ret; 4333 } 4334 4335 /* 4336 * This function duplicates an item, giving 'new_key' to the new item. 4337 * It guarantees both items live in the same tree leaf and the new item is 4338 * contiguous with the original item. 4339 * 4340 * This allows us to split a file extent in place, keeping a lock on the leaf 4341 * the entire time. 4342 */ 4343 int btrfs_duplicate_item(struct btrfs_trans_handle *trans, 4344 struct btrfs_root *root, 4345 struct btrfs_path *path, 4346 const struct btrfs_key *new_key) 4347 { 4348 struct extent_buffer *leaf; 4349 int ret; 4350 u32 item_size; 4351 4352 leaf = path->nodes[0]; 4353 item_size = btrfs_item_size(leaf, path->slots[0]); 4354 ret = setup_leaf_for_split(trans, root, path, 4355 item_size + sizeof(struct btrfs_item)); 4356 if (ret) 4357 return ret; 4358 4359 path->slots[0]++; 4360 btrfs_setup_item_for_insert(trans, root, path, new_key, item_size); 4361 leaf = path->nodes[0]; 4362 memcpy_extent_buffer(leaf, 4363 btrfs_item_ptr_offset(leaf, path->slots[0]), 4364 btrfs_item_ptr_offset(leaf, path->slots[0] - 1), 4365 item_size); 4366 return 0; 4367 } 4368 4369 /* 4370 * delete the pointer from a given node. 4371 * 4372 * the tree should have been previously balanced so the deletion does not 4373 * empty a node. 4374 * 4375 * This is exported for use inside btrfs-progs, don't un-export it. 4376 */ 4377 int btrfs_del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root, 4378 struct btrfs_path *path, int level, int slot) 4379 { 4380 struct extent_buffer *parent = path->nodes[level]; 4381 u32 nritems; 4382 int ret; 4383 4384 nritems = btrfs_header_nritems(parent); 4385 if (slot != nritems - 1) { 4386 if (level) { 4387 ret = btrfs_tree_mod_log_insert_move(parent, slot, 4388 slot + 1, nritems - slot - 1); 4389 if (ret < 0) { 4390 btrfs_abort_transaction(trans, ret); 4391 return ret; 4392 } 4393 } 4394 memmove_extent_buffer(parent, 4395 btrfs_node_key_ptr_offset(parent, slot), 4396 btrfs_node_key_ptr_offset(parent, slot + 1), 4397 sizeof(struct btrfs_key_ptr) * 4398 (nritems - slot - 1)); 4399 } else if (level) { 4400 ret = btrfs_tree_mod_log_insert_key(parent, slot, 4401 BTRFS_MOD_LOG_KEY_REMOVE); 4402 if (ret < 0) { 4403 btrfs_abort_transaction(trans, ret); 4404 return ret; 4405 } 4406 } 4407 4408 nritems--; 4409 btrfs_set_header_nritems(parent, nritems); 4410 if (nritems == 0 && parent == root->node) { 4411 BUG_ON(btrfs_header_level(root->node) != 1); 4412 /* just turn the root into a leaf and break */ 4413 btrfs_set_header_level(root->node, 0); 4414 } else if (slot == 0) { 4415 struct btrfs_disk_key disk_key; 4416 4417 btrfs_node_key(parent, &disk_key, 0); 4418 fixup_low_keys(trans, path, &disk_key, level + 1); 4419 } 4420 btrfs_mark_buffer_dirty(trans, parent); 4421 return 0; 4422 } 4423 4424 /* 4425 * a helper function to delete the leaf pointed to by path->slots[1] and 4426 * path->nodes[1]. 4427 * 4428 * This deletes the pointer in path->nodes[1] and frees the leaf 4429 * block extent. zero is returned if it all worked out, < 0 otherwise. 4430 * 4431 * The path must have already been setup for deleting the leaf, including 4432 * all the proper balancing. path->nodes[1] must be locked. 4433 */ 4434 static noinline int btrfs_del_leaf(struct btrfs_trans_handle *trans, 4435 struct btrfs_root *root, 4436 struct btrfs_path *path, 4437 struct extent_buffer *leaf) 4438 { 4439 int ret; 4440 4441 WARN_ON(btrfs_header_generation(leaf) != trans->transid); 4442 ret = btrfs_del_ptr(trans, root, path, 1, path->slots[1]); 4443 if (ret < 0) 4444 return ret; 4445 4446 /* 4447 * btrfs_free_extent is expensive, we want to make sure we 4448 * aren't holding any locks when we call it 4449 */ 4450 btrfs_unlock_up_safe(path, 0); 4451 4452 root_sub_used_bytes(root); 4453 4454 atomic_inc(&leaf->refs); 4455 btrfs_free_tree_block(trans, btrfs_root_id(root), leaf, 0, 1); 4456 free_extent_buffer_stale(leaf); 4457 return 0; 4458 } 4459 /* 4460 * delete the item at the leaf level in path. If that empties 4461 * the leaf, remove it from the tree 4462 */ 4463 int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root, 4464 struct btrfs_path *path, int slot, int nr) 4465 { 4466 struct btrfs_fs_info *fs_info = root->fs_info; 4467 struct extent_buffer *leaf; 4468 int ret = 0; 4469 int wret; 4470 u32 nritems; 4471 4472 leaf = path->nodes[0]; 4473 nritems = btrfs_header_nritems(leaf); 4474 4475 if (slot + nr != nritems) { 4476 const u32 last_off = btrfs_item_offset(leaf, slot + nr - 1); 4477 const int data_end = leaf_data_end(leaf); 4478 struct btrfs_map_token token; 4479 u32 dsize = 0; 4480 int i; 4481 4482 for (i = 0; i < nr; i++) 4483 dsize += btrfs_item_size(leaf, slot + i); 4484 4485 memmove_leaf_data(leaf, data_end + dsize, data_end, 4486 last_off - data_end); 4487 4488 btrfs_init_map_token(&token, leaf); 4489 for (i = slot + nr; i < nritems; i++) { 4490 u32 ioff; 4491 4492 ioff = btrfs_token_item_offset(&token, i); 4493 btrfs_set_token_item_offset(&token, i, ioff + dsize); 4494 } 4495 4496 memmove_leaf_items(leaf, slot, slot + nr, nritems - slot - nr); 4497 } 4498 btrfs_set_header_nritems(leaf, nritems - nr); 4499 nritems -= nr; 4500 4501 /* delete the leaf if we've emptied it */ 4502 if (nritems == 0) { 4503 if (leaf == root->node) { 4504 btrfs_set_header_level(leaf, 0); 4505 } else { 4506 btrfs_clear_buffer_dirty(trans, leaf); 4507 ret = btrfs_del_leaf(trans, root, path, leaf); 4508 if (ret < 0) 4509 return ret; 4510 } 4511 } else { 4512 int used = leaf_space_used(leaf, 0, nritems); 4513 if (slot == 0) { 4514 struct btrfs_disk_key disk_key; 4515 4516 btrfs_item_key(leaf, &disk_key, 0); 4517 fixup_low_keys(trans, path, &disk_key, 1); 4518 } 4519 4520 /* 4521 * Try to delete the leaf if it is mostly empty. We do this by 4522 * trying to move all its items into its left and right neighbours. 4523 * If we can't move all the items, then we don't delete it - it's 4524 * not ideal, but future insertions might fill the leaf with more 4525 * items, or items from other leaves might be moved later into our 4526 * leaf due to deletions on those leaves. 4527 */ 4528 if (used < BTRFS_LEAF_DATA_SIZE(fs_info) / 3) { 4529 u32 min_push_space; 4530 4531 /* push_leaf_left fixes the path. 4532 * make sure the path still points to our leaf 4533 * for possible call to btrfs_del_ptr below 4534 */ 4535 slot = path->slots[1]; 4536 atomic_inc(&leaf->refs); 4537 /* 4538 * We want to be able to at least push one item to the 4539 * left neighbour leaf, and that's the first item. 4540 */ 4541 min_push_space = sizeof(struct btrfs_item) + 4542 btrfs_item_size(leaf, 0); 4543 wret = push_leaf_left(trans, root, path, 0, 4544 min_push_space, 1, (u32)-1); 4545 if (wret < 0 && wret != -ENOSPC) 4546 ret = wret; 4547 4548 if (path->nodes[0] == leaf && 4549 btrfs_header_nritems(leaf)) { 4550 /* 4551 * If we were not able to push all items from our 4552 * leaf to its left neighbour, then attempt to 4553 * either push all the remaining items to the 4554 * right neighbour or none. There's no advantage 4555 * in pushing only some items, instead of all, as 4556 * it's pointless to end up with a leaf having 4557 * too few items while the neighbours can be full 4558 * or nearly full. 4559 */ 4560 nritems = btrfs_header_nritems(leaf); 4561 min_push_space = leaf_space_used(leaf, 0, nritems); 4562 wret = push_leaf_right(trans, root, path, 0, 4563 min_push_space, 1, 0); 4564 if (wret < 0 && wret != -ENOSPC) 4565 ret = wret; 4566 } 4567 4568 if (btrfs_header_nritems(leaf) == 0) { 4569 path->slots[1] = slot; 4570 ret = btrfs_del_leaf(trans, root, path, leaf); 4571 if (ret < 0) 4572 return ret; 4573 free_extent_buffer(leaf); 4574 ret = 0; 4575 } else { 4576 /* if we're still in the path, make sure 4577 * we're dirty. Otherwise, one of the 4578 * push_leaf functions must have already 4579 * dirtied this buffer 4580 */ 4581 if (path->nodes[0] == leaf) 4582 btrfs_mark_buffer_dirty(trans, leaf); 4583 free_extent_buffer(leaf); 4584 } 4585 } else { 4586 btrfs_mark_buffer_dirty(trans, leaf); 4587 } 4588 } 4589 return ret; 4590 } 4591 4592 /* 4593 * A helper function to walk down the tree starting at min_key, and looking 4594 * for nodes or leaves that are have a minimum transaction id. 4595 * This is used by the btree defrag code, and tree logging 4596 * 4597 * This does not cow, but it does stuff the starting key it finds back 4598 * into min_key, so you can call btrfs_search_slot with cow=1 on the 4599 * key and get a writable path. 4600 * 4601 * This honors path->lowest_level to prevent descent past a given level 4602 * of the tree. 4603 * 4604 * min_trans indicates the oldest transaction that you are interested 4605 * in walking through. Any nodes or leaves older than min_trans are 4606 * skipped over (without reading them). 4607 * 4608 * returns zero if something useful was found, < 0 on error and 1 if there 4609 * was nothing in the tree that matched the search criteria. 4610 */ 4611 int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key, 4612 struct btrfs_path *path, 4613 u64 min_trans) 4614 { 4615 struct extent_buffer *cur; 4616 struct btrfs_key found_key; 4617 int slot; 4618 int sret; 4619 u32 nritems; 4620 int level; 4621 int ret = 1; 4622 int keep_locks = path->keep_locks; 4623 4624 ASSERT(!path->nowait); 4625 path->keep_locks = 1; 4626 again: 4627 cur = btrfs_read_lock_root_node(root); 4628 level = btrfs_header_level(cur); 4629 WARN_ON(path->nodes[level]); 4630 path->nodes[level] = cur; 4631 path->locks[level] = BTRFS_READ_LOCK; 4632 4633 if (btrfs_header_generation(cur) < min_trans) { 4634 ret = 1; 4635 goto out; 4636 } 4637 while (1) { 4638 nritems = btrfs_header_nritems(cur); 4639 level = btrfs_header_level(cur); 4640 sret = btrfs_bin_search(cur, 0, min_key, &slot); 4641 if (sret < 0) { 4642 ret = sret; 4643 goto out; 4644 } 4645 4646 /* at the lowest level, we're done, setup the path and exit */ 4647 if (level == path->lowest_level) { 4648 if (slot >= nritems) 4649 goto find_next_key; 4650 ret = 0; 4651 path->slots[level] = slot; 4652 btrfs_item_key_to_cpu(cur, &found_key, slot); 4653 goto out; 4654 } 4655 if (sret && slot > 0) 4656 slot--; 4657 /* 4658 * check this node pointer against the min_trans parameters. 4659 * If it is too old, skip to the next one. 4660 */ 4661 while (slot < nritems) { 4662 u64 gen; 4663 4664 gen = btrfs_node_ptr_generation(cur, slot); 4665 if (gen < min_trans) { 4666 slot++; 4667 continue; 4668 } 4669 break; 4670 } 4671 find_next_key: 4672 /* 4673 * we didn't find a candidate key in this node, walk forward 4674 * and find another one 4675 */ 4676 if (slot >= nritems) { 4677 path->slots[level] = slot; 4678 sret = btrfs_find_next_key(root, path, min_key, level, 4679 min_trans); 4680 if (sret == 0) { 4681 btrfs_release_path(path); 4682 goto again; 4683 } else { 4684 goto out; 4685 } 4686 } 4687 /* save our key for returning back */ 4688 btrfs_node_key_to_cpu(cur, &found_key, slot); 4689 path->slots[level] = slot; 4690 if (level == path->lowest_level) { 4691 ret = 0; 4692 goto out; 4693 } 4694 cur = btrfs_read_node_slot(cur, slot); 4695 if (IS_ERR(cur)) { 4696 ret = PTR_ERR(cur); 4697 goto out; 4698 } 4699 4700 btrfs_tree_read_lock(cur); 4701 4702 path->locks[level - 1] = BTRFS_READ_LOCK; 4703 path->nodes[level - 1] = cur; 4704 unlock_up(path, level, 1, 0, NULL); 4705 } 4706 out: 4707 path->keep_locks = keep_locks; 4708 if (ret == 0) { 4709 btrfs_unlock_up_safe(path, path->lowest_level + 1); 4710 memcpy(min_key, &found_key, sizeof(found_key)); 4711 } 4712 return ret; 4713 } 4714 4715 /* 4716 * this is similar to btrfs_next_leaf, but does not try to preserve 4717 * and fixup the path. It looks for and returns the next key in the 4718 * tree based on the current path and the min_trans parameters. 4719 * 4720 * 0 is returned if another key is found, < 0 if there are any errors 4721 * and 1 is returned if there are no higher keys in the tree 4722 * 4723 * path->keep_locks should be set to 1 on the search made before 4724 * calling this function. 4725 */ 4726 int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path, 4727 struct btrfs_key *key, int level, u64 min_trans) 4728 { 4729 int slot; 4730 struct extent_buffer *c; 4731 4732 WARN_ON(!path->keep_locks && !path->skip_locking); 4733 while (level < BTRFS_MAX_LEVEL) { 4734 if (!path->nodes[level]) 4735 return 1; 4736 4737 slot = path->slots[level] + 1; 4738 c = path->nodes[level]; 4739 next: 4740 if (slot >= btrfs_header_nritems(c)) { 4741 int ret; 4742 int orig_lowest; 4743 struct btrfs_key cur_key; 4744 if (level + 1 >= BTRFS_MAX_LEVEL || 4745 !path->nodes[level + 1]) 4746 return 1; 4747 4748 if (path->locks[level + 1] || path->skip_locking) { 4749 level++; 4750 continue; 4751 } 4752 4753 slot = btrfs_header_nritems(c) - 1; 4754 if (level == 0) 4755 btrfs_item_key_to_cpu(c, &cur_key, slot); 4756 else 4757 btrfs_node_key_to_cpu(c, &cur_key, slot); 4758 4759 orig_lowest = path->lowest_level; 4760 btrfs_release_path(path); 4761 path->lowest_level = level; 4762 ret = btrfs_search_slot(NULL, root, &cur_key, path, 4763 0, 0); 4764 path->lowest_level = orig_lowest; 4765 if (ret < 0) 4766 return ret; 4767 4768 c = path->nodes[level]; 4769 slot = path->slots[level]; 4770 if (ret == 0) 4771 slot++; 4772 goto next; 4773 } 4774 4775 if (level == 0) 4776 btrfs_item_key_to_cpu(c, key, slot); 4777 else { 4778 u64 gen = btrfs_node_ptr_generation(c, slot); 4779 4780 if (gen < min_trans) { 4781 slot++; 4782 goto next; 4783 } 4784 btrfs_node_key_to_cpu(c, key, slot); 4785 } 4786 return 0; 4787 } 4788 return 1; 4789 } 4790 4791 int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path, 4792 u64 time_seq) 4793 { 4794 int slot; 4795 int level; 4796 struct extent_buffer *c; 4797 struct extent_buffer *next; 4798 struct btrfs_fs_info *fs_info = root->fs_info; 4799 struct btrfs_key key; 4800 bool need_commit_sem = false; 4801 u32 nritems; 4802 int ret; 4803 int i; 4804 4805 /* 4806 * The nowait semantics are used only for write paths, where we don't 4807 * use the tree mod log and sequence numbers. 4808 */ 4809 if (time_seq) 4810 ASSERT(!path->nowait); 4811 4812 nritems = btrfs_header_nritems(path->nodes[0]); 4813 if (nritems == 0) 4814 return 1; 4815 4816 btrfs_item_key_to_cpu(path->nodes[0], &key, nritems - 1); 4817 again: 4818 level = 1; 4819 next = NULL; 4820 btrfs_release_path(path); 4821 4822 path->keep_locks = 1; 4823 4824 if (time_seq) { 4825 ret = btrfs_search_old_slot(root, &key, path, time_seq); 4826 } else { 4827 if (path->need_commit_sem) { 4828 path->need_commit_sem = 0; 4829 need_commit_sem = true; 4830 if (path->nowait) { 4831 if (!down_read_trylock(&fs_info->commit_root_sem)) { 4832 ret = -EAGAIN; 4833 goto done; 4834 } 4835 } else { 4836 down_read(&fs_info->commit_root_sem); 4837 } 4838 } 4839 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 4840 } 4841 path->keep_locks = 0; 4842 4843 if (ret < 0) 4844 goto done; 4845 4846 nritems = btrfs_header_nritems(path->nodes[0]); 4847 /* 4848 * by releasing the path above we dropped all our locks. A balance 4849 * could have added more items next to the key that used to be 4850 * at the very end of the block. So, check again here and 4851 * advance the path if there are now more items available. 4852 */ 4853 if (nritems > 0 && path->slots[0] < nritems - 1) { 4854 if (ret == 0) 4855 path->slots[0]++; 4856 ret = 0; 4857 goto done; 4858 } 4859 /* 4860 * So the above check misses one case: 4861 * - after releasing the path above, someone has removed the item that 4862 * used to be at the very end of the block, and balance between leafs 4863 * gets another one with bigger key.offset to replace it. 4864 * 4865 * This one should be returned as well, or we can get leaf corruption 4866 * later(esp. in __btrfs_drop_extents()). 4867 * 4868 * And a bit more explanation about this check, 4869 * with ret > 0, the key isn't found, the path points to the slot 4870 * where it should be inserted, so the path->slots[0] item must be the 4871 * bigger one. 4872 */ 4873 if (nritems > 0 && ret > 0 && path->slots[0] == nritems - 1) { 4874 ret = 0; 4875 goto done; 4876 } 4877 4878 while (level < BTRFS_MAX_LEVEL) { 4879 if (!path->nodes[level]) { 4880 ret = 1; 4881 goto done; 4882 } 4883 4884 slot = path->slots[level] + 1; 4885 c = path->nodes[level]; 4886 if (slot >= btrfs_header_nritems(c)) { 4887 level++; 4888 if (level == BTRFS_MAX_LEVEL) { 4889 ret = 1; 4890 goto done; 4891 } 4892 continue; 4893 } 4894 4895 4896 /* 4897 * Our current level is where we're going to start from, and to 4898 * make sure lockdep doesn't complain we need to drop our locks 4899 * and nodes from 0 to our current level. 4900 */ 4901 for (i = 0; i < level; i++) { 4902 if (path->locks[level]) { 4903 btrfs_tree_read_unlock(path->nodes[i]); 4904 path->locks[i] = 0; 4905 } 4906 free_extent_buffer(path->nodes[i]); 4907 path->nodes[i] = NULL; 4908 } 4909 4910 next = c; 4911 ret = read_block_for_search(root, path, &next, level, 4912 slot, &key); 4913 if (ret == -EAGAIN && !path->nowait) 4914 goto again; 4915 4916 if (ret < 0) { 4917 btrfs_release_path(path); 4918 goto done; 4919 } 4920 4921 if (!path->skip_locking) { 4922 ret = btrfs_try_tree_read_lock(next); 4923 if (!ret && path->nowait) { 4924 ret = -EAGAIN; 4925 goto done; 4926 } 4927 if (!ret && time_seq) { 4928 /* 4929 * If we don't get the lock, we may be racing 4930 * with push_leaf_left, holding that lock while 4931 * itself waiting for the leaf we've currently 4932 * locked. To solve this situation, we give up 4933 * on our lock and cycle. 4934 */ 4935 free_extent_buffer(next); 4936 btrfs_release_path(path); 4937 cond_resched(); 4938 goto again; 4939 } 4940 if (!ret) 4941 btrfs_tree_read_lock(next); 4942 } 4943 break; 4944 } 4945 path->slots[level] = slot; 4946 while (1) { 4947 level--; 4948 path->nodes[level] = next; 4949 path->slots[level] = 0; 4950 if (!path->skip_locking) 4951 path->locks[level] = BTRFS_READ_LOCK; 4952 if (!level) 4953 break; 4954 4955 ret = read_block_for_search(root, path, &next, level, 4956 0, &key); 4957 if (ret == -EAGAIN && !path->nowait) 4958 goto again; 4959 4960 if (ret < 0) { 4961 btrfs_release_path(path); 4962 goto done; 4963 } 4964 4965 if (!path->skip_locking) { 4966 if (path->nowait) { 4967 if (!btrfs_try_tree_read_lock(next)) { 4968 ret = -EAGAIN; 4969 goto done; 4970 } 4971 } else { 4972 btrfs_tree_read_lock(next); 4973 } 4974 } 4975 } 4976 ret = 0; 4977 done: 4978 unlock_up(path, 0, 1, 0, NULL); 4979 if (need_commit_sem) { 4980 int ret2; 4981 4982 path->need_commit_sem = 1; 4983 ret2 = finish_need_commit_sem_search(path); 4984 up_read(&fs_info->commit_root_sem); 4985 if (ret2) 4986 ret = ret2; 4987 } 4988 4989 return ret; 4990 } 4991 4992 int btrfs_next_old_item(struct btrfs_root *root, struct btrfs_path *path, u64 time_seq) 4993 { 4994 path->slots[0]++; 4995 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) 4996 return btrfs_next_old_leaf(root, path, time_seq); 4997 return 0; 4998 } 4999 5000 /* 5001 * this uses btrfs_prev_leaf to walk backwards in the tree, and keeps 5002 * searching until it gets past min_objectid or finds an item of 'type' 5003 * 5004 * returns 0 if something is found, 1 if nothing was found and < 0 on error 5005 */ 5006 int btrfs_previous_item(struct btrfs_root *root, 5007 struct btrfs_path *path, u64 min_objectid, 5008 int type) 5009 { 5010 struct btrfs_key found_key; 5011 struct extent_buffer *leaf; 5012 u32 nritems; 5013 int ret; 5014 5015 while (1) { 5016 if (path->slots[0] == 0) { 5017 ret = btrfs_prev_leaf(root, path); 5018 if (ret != 0) 5019 return ret; 5020 } else { 5021 path->slots[0]--; 5022 } 5023 leaf = path->nodes[0]; 5024 nritems = btrfs_header_nritems(leaf); 5025 if (nritems == 0) 5026 return 1; 5027 if (path->slots[0] == nritems) 5028 path->slots[0]--; 5029 5030 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 5031 if (found_key.objectid < min_objectid) 5032 break; 5033 if (found_key.type == type) 5034 return 0; 5035 if (found_key.objectid == min_objectid && 5036 found_key.type < type) 5037 break; 5038 } 5039 return 1; 5040 } 5041 5042 /* 5043 * search in extent tree to find a previous Metadata/Data extent item with 5044 * min objecitd. 5045 * 5046 * returns 0 if something is found, 1 if nothing was found and < 0 on error 5047 */ 5048 int btrfs_previous_extent_item(struct btrfs_root *root, 5049 struct btrfs_path *path, u64 min_objectid) 5050 { 5051 struct btrfs_key found_key; 5052 struct extent_buffer *leaf; 5053 u32 nritems; 5054 int ret; 5055 5056 while (1) { 5057 if (path->slots[0] == 0) { 5058 ret = btrfs_prev_leaf(root, path); 5059 if (ret != 0) 5060 return ret; 5061 } else { 5062 path->slots[0]--; 5063 } 5064 leaf = path->nodes[0]; 5065 nritems = btrfs_header_nritems(leaf); 5066 if (nritems == 0) 5067 return 1; 5068 if (path->slots[0] == nritems) 5069 path->slots[0]--; 5070 5071 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 5072 if (found_key.objectid < min_objectid) 5073 break; 5074 if (found_key.type == BTRFS_EXTENT_ITEM_KEY || 5075 found_key.type == BTRFS_METADATA_ITEM_KEY) 5076 return 0; 5077 if (found_key.objectid == min_objectid && 5078 found_key.type < BTRFS_EXTENT_ITEM_KEY) 5079 break; 5080 } 5081 return 1; 5082 } 5083 5084 int __init btrfs_ctree_init(void) 5085 { 5086 btrfs_path_cachep = KMEM_CACHE(btrfs_path, 0); 5087 if (!btrfs_path_cachep) 5088 return -ENOMEM; 5089 return 0; 5090 } 5091 5092 void __cold btrfs_ctree_exit(void) 5093 { 5094 kmem_cache_destroy(btrfs_path_cachep); 5095 } 5096