1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2007,2008 Oracle. All rights reserved. 4 */ 5 6 #include <linux/sched.h> 7 #include <linux/slab.h> 8 #include <linux/rbtree.h> 9 #include <linux/mm.h> 10 #include <linux/error-injection.h> 11 #include "messages.h" 12 #include "ctree.h" 13 #include "disk-io.h" 14 #include "transaction.h" 15 #include "print-tree.h" 16 #include "locking.h" 17 #include "volumes.h" 18 #include "qgroup.h" 19 #include "tree-mod-log.h" 20 #include "tree-checker.h" 21 #include "fs.h" 22 #include "accessors.h" 23 #include "extent-tree.h" 24 #include "relocation.h" 25 #include "file-item.h" 26 27 static struct kmem_cache *btrfs_path_cachep; 28 29 static int split_node(struct btrfs_trans_handle *trans, struct btrfs_root 30 *root, struct btrfs_path *path, int level); 31 static int split_leaf(struct btrfs_trans_handle *trans, struct btrfs_root *root, 32 const struct btrfs_key *ins_key, struct btrfs_path *path, 33 int data_size, int extend); 34 static int push_node_left(struct btrfs_trans_handle *trans, 35 struct extent_buffer *dst, 36 struct extent_buffer *src, int empty); 37 static int balance_node_right(struct btrfs_trans_handle *trans, 38 struct extent_buffer *dst_buf, 39 struct extent_buffer *src_buf); 40 41 static const struct btrfs_csums { 42 u16 size; 43 const char name[10]; 44 const char driver[12]; 45 } btrfs_csums[] = { 46 [BTRFS_CSUM_TYPE_CRC32] = { .size = 4, .name = "crc32c" }, 47 [BTRFS_CSUM_TYPE_XXHASH] = { .size = 8, .name = "xxhash64" }, 48 [BTRFS_CSUM_TYPE_SHA256] = { .size = 32, .name = "sha256" }, 49 [BTRFS_CSUM_TYPE_BLAKE2] = { .size = 32, .name = "blake2b", 50 .driver = "blake2b-256" }, 51 }; 52 53 /* 54 * The leaf data grows from end-to-front in the node. this returns the address 55 * of the start of the last item, which is the stop of the leaf data stack. 56 */ 57 static unsigned int leaf_data_end(const struct extent_buffer *leaf) 58 { 59 u32 nr = btrfs_header_nritems(leaf); 60 61 if (nr == 0) 62 return BTRFS_LEAF_DATA_SIZE(leaf->fs_info); 63 return btrfs_item_offset(leaf, nr - 1); 64 } 65 66 /* 67 * Move data in a @leaf (using memmove, safe for overlapping ranges). 68 * 69 * @leaf: leaf that we're doing a memmove on 70 * @dst_offset: item data offset we're moving to 71 * @src_offset: item data offset were' moving from 72 * @len: length of the data we're moving 73 * 74 * Wrapper around memmove_extent_buffer() that takes into account the header on 75 * the leaf. The btrfs_item offset's start directly after the header, so we 76 * have to adjust any offsets to account for the header in the leaf. This 77 * handles that math to simplify the callers. 78 */ 79 static inline void memmove_leaf_data(const struct extent_buffer *leaf, 80 unsigned long dst_offset, 81 unsigned long src_offset, 82 unsigned long len) 83 { 84 memmove_extent_buffer(leaf, btrfs_item_nr_offset(leaf, 0) + dst_offset, 85 btrfs_item_nr_offset(leaf, 0) + src_offset, len); 86 } 87 88 /* 89 * Copy item data from @src into @dst at the given @offset. 90 * 91 * @dst: destination leaf that we're copying into 92 * @src: source leaf that we're copying from 93 * @dst_offset: item data offset we're copying to 94 * @src_offset: item data offset were' copying from 95 * @len: length of the data we're copying 96 * 97 * Wrapper around copy_extent_buffer() that takes into account the header on 98 * the leaf. The btrfs_item offset's start directly after the header, so we 99 * have to adjust any offsets to account for the header in the leaf. This 100 * handles that math to simplify the callers. 101 */ 102 static inline void copy_leaf_data(const struct extent_buffer *dst, 103 const struct extent_buffer *src, 104 unsigned long dst_offset, 105 unsigned long src_offset, unsigned long len) 106 { 107 copy_extent_buffer(dst, src, btrfs_item_nr_offset(dst, 0) + dst_offset, 108 btrfs_item_nr_offset(src, 0) + src_offset, len); 109 } 110 111 /* 112 * Move items in a @leaf (using memmove). 113 * 114 * @dst: destination leaf for the items 115 * @dst_item: the item nr we're copying into 116 * @src_item: the item nr we're copying from 117 * @nr_items: the number of items to copy 118 * 119 * Wrapper around memmove_extent_buffer() that does the math to get the 120 * appropriate offsets into the leaf from the item numbers. 121 */ 122 static inline void memmove_leaf_items(const struct extent_buffer *leaf, 123 int dst_item, int src_item, int nr_items) 124 { 125 memmove_extent_buffer(leaf, btrfs_item_nr_offset(leaf, dst_item), 126 btrfs_item_nr_offset(leaf, src_item), 127 nr_items * sizeof(struct btrfs_item)); 128 } 129 130 /* 131 * Copy items from @src into @dst at the given @offset. 132 * 133 * @dst: destination leaf for the items 134 * @src: source leaf for the items 135 * @dst_item: the item nr we're copying into 136 * @src_item: the item nr we're copying from 137 * @nr_items: the number of items to copy 138 * 139 * Wrapper around copy_extent_buffer() that does the math to get the 140 * appropriate offsets into the leaf from the item numbers. 141 */ 142 static inline void copy_leaf_items(const struct extent_buffer *dst, 143 const struct extent_buffer *src, 144 int dst_item, int src_item, int nr_items) 145 { 146 copy_extent_buffer(dst, src, btrfs_item_nr_offset(dst, dst_item), 147 btrfs_item_nr_offset(src, src_item), 148 nr_items * sizeof(struct btrfs_item)); 149 } 150 151 /* This exists for btrfs-progs usages. */ 152 u16 btrfs_csum_type_size(u16 type) 153 { 154 return btrfs_csums[type].size; 155 } 156 157 int btrfs_super_csum_size(const struct btrfs_super_block *s) 158 { 159 u16 t = btrfs_super_csum_type(s); 160 /* 161 * csum type is validated at mount time 162 */ 163 return btrfs_csum_type_size(t); 164 } 165 166 const char *btrfs_super_csum_name(u16 csum_type) 167 { 168 /* csum type is validated at mount time */ 169 return btrfs_csums[csum_type].name; 170 } 171 172 /* 173 * Return driver name if defined, otherwise the name that's also a valid driver 174 * name 175 */ 176 const char *btrfs_super_csum_driver(u16 csum_type) 177 { 178 /* csum type is validated at mount time */ 179 return btrfs_csums[csum_type].driver[0] ? 180 btrfs_csums[csum_type].driver : 181 btrfs_csums[csum_type].name; 182 } 183 184 size_t __attribute_const__ btrfs_get_num_csums(void) 185 { 186 return ARRAY_SIZE(btrfs_csums); 187 } 188 189 struct btrfs_path *btrfs_alloc_path(void) 190 { 191 might_sleep(); 192 193 return kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS); 194 } 195 196 /* this also releases the path */ 197 void btrfs_free_path(struct btrfs_path *p) 198 { 199 if (!p) 200 return; 201 btrfs_release_path(p); 202 kmem_cache_free(btrfs_path_cachep, p); 203 } 204 205 /* 206 * path release drops references on the extent buffers in the path 207 * and it drops any locks held by this path 208 * 209 * It is safe to call this on paths that no locks or extent buffers held. 210 */ 211 noinline void btrfs_release_path(struct btrfs_path *p) 212 { 213 int i; 214 215 for (i = 0; i < BTRFS_MAX_LEVEL; i++) { 216 p->slots[i] = 0; 217 if (!p->nodes[i]) 218 continue; 219 if (p->locks[i]) { 220 btrfs_tree_unlock_rw(p->nodes[i], p->locks[i]); 221 p->locks[i] = 0; 222 } 223 free_extent_buffer(p->nodes[i]); 224 p->nodes[i] = NULL; 225 } 226 } 227 228 /* 229 * We want the transaction abort to print stack trace only for errors where the 230 * cause could be a bug, eg. due to ENOSPC, and not for common errors that are 231 * caused by external factors. 232 */ 233 bool __cold abort_should_print_stack(int error) 234 { 235 switch (error) { 236 case -EIO: 237 case -EROFS: 238 case -ENOMEM: 239 return false; 240 } 241 return true; 242 } 243 244 /* 245 * safely gets a reference on the root node of a tree. A lock 246 * is not taken, so a concurrent writer may put a different node 247 * at the root of the tree. See btrfs_lock_root_node for the 248 * looping required. 249 * 250 * The extent buffer returned by this has a reference taken, so 251 * it won't disappear. It may stop being the root of the tree 252 * at any time because there are no locks held. 253 */ 254 struct extent_buffer *btrfs_root_node(struct btrfs_root *root) 255 { 256 struct extent_buffer *eb; 257 258 while (1) { 259 rcu_read_lock(); 260 eb = rcu_dereference(root->node); 261 262 /* 263 * RCU really hurts here, we could free up the root node because 264 * it was COWed but we may not get the new root node yet so do 265 * the inc_not_zero dance and if it doesn't work then 266 * synchronize_rcu and try again. 267 */ 268 if (atomic_inc_not_zero(&eb->refs)) { 269 rcu_read_unlock(); 270 break; 271 } 272 rcu_read_unlock(); 273 synchronize_rcu(); 274 } 275 return eb; 276 } 277 278 /* 279 * Cowonly root (not-shareable trees, everything not subvolume or reloc roots), 280 * just get put onto a simple dirty list. Transaction walks this list to make 281 * sure they get properly updated on disk. 282 */ 283 static void add_root_to_dirty_list(struct btrfs_root *root) 284 { 285 struct btrfs_fs_info *fs_info = root->fs_info; 286 287 if (test_bit(BTRFS_ROOT_DIRTY, &root->state) || 288 !test_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state)) 289 return; 290 291 spin_lock(&fs_info->trans_lock); 292 if (!test_and_set_bit(BTRFS_ROOT_DIRTY, &root->state)) { 293 /* Want the extent tree to be the last on the list */ 294 if (btrfs_root_id(root) == BTRFS_EXTENT_TREE_OBJECTID) 295 list_move_tail(&root->dirty_list, 296 &fs_info->dirty_cowonly_roots); 297 else 298 list_move(&root->dirty_list, 299 &fs_info->dirty_cowonly_roots); 300 } 301 spin_unlock(&fs_info->trans_lock); 302 } 303 304 /* 305 * used by snapshot creation to make a copy of a root for a tree with 306 * a given objectid. The buffer with the new root node is returned in 307 * cow_ret, and this func returns zero on success or a negative error code. 308 */ 309 int btrfs_copy_root(struct btrfs_trans_handle *trans, 310 struct btrfs_root *root, 311 struct extent_buffer *buf, 312 struct extent_buffer **cow_ret, u64 new_root_objectid) 313 { 314 struct btrfs_fs_info *fs_info = root->fs_info; 315 struct extent_buffer *cow; 316 int ret = 0; 317 int level; 318 struct btrfs_disk_key disk_key; 319 u64 reloc_src_root = 0; 320 321 WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) && 322 trans->transid != fs_info->running_transaction->transid); 323 WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) && 324 trans->transid != btrfs_get_root_last_trans(root)); 325 326 level = btrfs_header_level(buf); 327 if (level == 0) 328 btrfs_item_key(buf, &disk_key, 0); 329 else 330 btrfs_node_key(buf, &disk_key, 0); 331 332 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID) 333 reloc_src_root = btrfs_header_owner(buf); 334 cow = btrfs_alloc_tree_block(trans, root, 0, new_root_objectid, 335 &disk_key, level, buf->start, 0, 336 reloc_src_root, BTRFS_NESTING_NEW_ROOT); 337 if (IS_ERR(cow)) 338 return PTR_ERR(cow); 339 340 copy_extent_buffer_full(cow, buf); 341 btrfs_set_header_bytenr(cow, cow->start); 342 btrfs_set_header_generation(cow, trans->transid); 343 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV); 344 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN | 345 BTRFS_HEADER_FLAG_RELOC); 346 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID) 347 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC); 348 else 349 btrfs_set_header_owner(cow, new_root_objectid); 350 351 write_extent_buffer_fsid(cow, fs_info->fs_devices->metadata_uuid); 352 353 WARN_ON(btrfs_header_generation(buf) > trans->transid); 354 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID) 355 ret = btrfs_inc_ref(trans, root, cow, 1); 356 else 357 ret = btrfs_inc_ref(trans, root, cow, 0); 358 if (ret) { 359 btrfs_tree_unlock(cow); 360 free_extent_buffer(cow); 361 btrfs_abort_transaction(trans, ret); 362 return ret; 363 } 364 365 btrfs_mark_buffer_dirty(trans, cow); 366 *cow_ret = cow; 367 return 0; 368 } 369 370 /* 371 * check if the tree block can be shared by multiple trees 372 */ 373 bool btrfs_block_can_be_shared(struct btrfs_trans_handle *trans, 374 struct btrfs_root *root, 375 struct extent_buffer *buf) 376 { 377 const u64 buf_gen = btrfs_header_generation(buf); 378 379 /* 380 * Tree blocks not in shareable trees and tree roots are never shared. 381 * If a block was allocated after the last snapshot and the block was 382 * not allocated by tree relocation, we know the block is not shared. 383 */ 384 385 if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) 386 return false; 387 388 if (buf == root->node) 389 return false; 390 391 if (buf_gen > btrfs_root_last_snapshot(&root->root_item) && 392 !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)) 393 return false; 394 395 if (buf != root->commit_root) 396 return true; 397 398 /* 399 * An extent buffer that used to be the commit root may still be shared 400 * because the tree height may have increased and it became a child of a 401 * higher level root. This can happen when snapshotting a subvolume 402 * created in the current transaction. 403 */ 404 if (buf_gen == trans->transid) 405 return true; 406 407 return false; 408 } 409 410 static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans, 411 struct btrfs_root *root, 412 struct extent_buffer *buf, 413 struct extent_buffer *cow, 414 int *last_ref) 415 { 416 struct btrfs_fs_info *fs_info = root->fs_info; 417 u64 refs; 418 u64 owner; 419 u64 flags; 420 int ret; 421 422 /* 423 * Backrefs update rules: 424 * 425 * Always use full backrefs for extent pointers in tree block 426 * allocated by tree relocation. 427 * 428 * If a shared tree block is no longer referenced by its owner 429 * tree (btrfs_header_owner(buf) == root->root_key.objectid), 430 * use full backrefs for extent pointers in tree block. 431 * 432 * If a tree block is been relocating 433 * (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID), 434 * use full backrefs for extent pointers in tree block. 435 * The reason for this is some operations (such as drop tree) 436 * are only allowed for blocks use full backrefs. 437 */ 438 439 if (btrfs_block_can_be_shared(trans, root, buf)) { 440 ret = btrfs_lookup_extent_info(trans, fs_info, buf->start, 441 btrfs_header_level(buf), 1, 442 &refs, &flags, NULL); 443 if (ret) 444 return ret; 445 if (unlikely(refs == 0)) { 446 btrfs_crit(fs_info, 447 "found 0 references for tree block at bytenr %llu level %d root %llu", 448 buf->start, btrfs_header_level(buf), 449 btrfs_root_id(root)); 450 ret = -EUCLEAN; 451 btrfs_abort_transaction(trans, ret); 452 return ret; 453 } 454 } else { 455 refs = 1; 456 if (btrfs_root_id(root) == BTRFS_TREE_RELOC_OBJECTID || 457 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV) 458 flags = BTRFS_BLOCK_FLAG_FULL_BACKREF; 459 else 460 flags = 0; 461 } 462 463 owner = btrfs_header_owner(buf); 464 if (unlikely(owner == BTRFS_TREE_RELOC_OBJECTID && 465 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))) { 466 btrfs_crit(fs_info, 467 "found tree block at bytenr %llu level %d root %llu refs %llu flags %llx without full backref flag set", 468 buf->start, btrfs_header_level(buf), 469 btrfs_root_id(root), refs, flags); 470 ret = -EUCLEAN; 471 btrfs_abort_transaction(trans, ret); 472 return ret; 473 } 474 475 if (refs > 1) { 476 if ((owner == btrfs_root_id(root) || 477 btrfs_root_id(root) == BTRFS_TREE_RELOC_OBJECTID) && 478 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) { 479 ret = btrfs_inc_ref(trans, root, buf, 1); 480 if (ret) 481 return ret; 482 483 if (btrfs_root_id(root) == BTRFS_TREE_RELOC_OBJECTID) { 484 ret = btrfs_dec_ref(trans, root, buf, 0); 485 if (ret) 486 return ret; 487 ret = btrfs_inc_ref(trans, root, cow, 1); 488 if (ret) 489 return ret; 490 } 491 ret = btrfs_set_disk_extent_flags(trans, buf, 492 BTRFS_BLOCK_FLAG_FULL_BACKREF); 493 if (ret) 494 return ret; 495 } else { 496 497 if (btrfs_root_id(root) == BTRFS_TREE_RELOC_OBJECTID) 498 ret = btrfs_inc_ref(trans, root, cow, 1); 499 else 500 ret = btrfs_inc_ref(trans, root, cow, 0); 501 if (ret) 502 return ret; 503 } 504 } else { 505 if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) { 506 if (btrfs_root_id(root) == BTRFS_TREE_RELOC_OBJECTID) 507 ret = btrfs_inc_ref(trans, root, cow, 1); 508 else 509 ret = btrfs_inc_ref(trans, root, cow, 0); 510 if (ret) 511 return ret; 512 ret = btrfs_dec_ref(trans, root, buf, 1); 513 if (ret) 514 return ret; 515 } 516 btrfs_clear_buffer_dirty(trans, buf); 517 *last_ref = 1; 518 } 519 return 0; 520 } 521 522 /* 523 * does the dirty work in cow of a single block. The parent block (if 524 * supplied) is updated to point to the new cow copy. The new buffer is marked 525 * dirty and returned locked. If you modify the block it needs to be marked 526 * dirty again. 527 * 528 * search_start -- an allocation hint for the new block 529 * 530 * empty_size -- a hint that you plan on doing more cow. This is the size in 531 * bytes the allocator should try to find free next to the block it returns. 532 * This is just a hint and may be ignored by the allocator. 533 */ 534 int btrfs_force_cow_block(struct btrfs_trans_handle *trans, 535 struct btrfs_root *root, 536 struct extent_buffer *buf, 537 struct extent_buffer *parent, int parent_slot, 538 struct extent_buffer **cow_ret, 539 u64 search_start, u64 empty_size, 540 enum btrfs_lock_nesting nest) 541 { 542 struct btrfs_fs_info *fs_info = root->fs_info; 543 struct btrfs_disk_key disk_key; 544 struct extent_buffer *cow; 545 int level, ret; 546 int last_ref = 0; 547 int unlock_orig = 0; 548 u64 parent_start = 0; 549 u64 reloc_src_root = 0; 550 551 if (*cow_ret == buf) 552 unlock_orig = 1; 553 554 btrfs_assert_tree_write_locked(buf); 555 556 WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) && 557 trans->transid != fs_info->running_transaction->transid); 558 WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) && 559 trans->transid != btrfs_get_root_last_trans(root)); 560 561 level = btrfs_header_level(buf); 562 563 if (level == 0) 564 btrfs_item_key(buf, &disk_key, 0); 565 else 566 btrfs_node_key(buf, &disk_key, 0); 567 568 if (btrfs_root_id(root) == BTRFS_TREE_RELOC_OBJECTID) { 569 if (parent) 570 parent_start = parent->start; 571 reloc_src_root = btrfs_header_owner(buf); 572 } 573 cow = btrfs_alloc_tree_block(trans, root, parent_start, 574 btrfs_root_id(root), &disk_key, level, 575 search_start, empty_size, reloc_src_root, nest); 576 if (IS_ERR(cow)) 577 return PTR_ERR(cow); 578 579 /* cow is set to blocking by btrfs_init_new_buffer */ 580 581 copy_extent_buffer_full(cow, buf); 582 btrfs_set_header_bytenr(cow, cow->start); 583 btrfs_set_header_generation(cow, trans->transid); 584 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV); 585 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN | 586 BTRFS_HEADER_FLAG_RELOC); 587 if (btrfs_root_id(root) == BTRFS_TREE_RELOC_OBJECTID) 588 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC); 589 else 590 btrfs_set_header_owner(cow, btrfs_root_id(root)); 591 592 write_extent_buffer_fsid(cow, fs_info->fs_devices->metadata_uuid); 593 594 ret = update_ref_for_cow(trans, root, buf, cow, &last_ref); 595 if (ret) { 596 btrfs_abort_transaction(trans, ret); 597 goto error_unlock_cow; 598 } 599 600 if (test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) { 601 ret = btrfs_reloc_cow_block(trans, root, buf, cow); 602 if (ret) { 603 btrfs_abort_transaction(trans, ret); 604 goto error_unlock_cow; 605 } 606 } 607 608 if (buf == root->node) { 609 WARN_ON(parent && parent != buf); 610 if (btrfs_root_id(root) == BTRFS_TREE_RELOC_OBJECTID || 611 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV) 612 parent_start = buf->start; 613 614 ret = btrfs_tree_mod_log_insert_root(root->node, cow, true); 615 if (ret < 0) { 616 btrfs_abort_transaction(trans, ret); 617 goto error_unlock_cow; 618 } 619 atomic_inc(&cow->refs); 620 rcu_assign_pointer(root->node, cow); 621 622 ret = btrfs_free_tree_block(trans, btrfs_root_id(root), buf, 623 parent_start, last_ref); 624 free_extent_buffer(buf); 625 add_root_to_dirty_list(root); 626 if (ret < 0) { 627 btrfs_abort_transaction(trans, ret); 628 goto error_unlock_cow; 629 } 630 } else { 631 WARN_ON(trans->transid != btrfs_header_generation(parent)); 632 ret = btrfs_tree_mod_log_insert_key(parent, parent_slot, 633 BTRFS_MOD_LOG_KEY_REPLACE); 634 if (ret) { 635 btrfs_abort_transaction(trans, ret); 636 goto error_unlock_cow; 637 } 638 btrfs_set_node_blockptr(parent, parent_slot, 639 cow->start); 640 btrfs_set_node_ptr_generation(parent, parent_slot, 641 trans->transid); 642 btrfs_mark_buffer_dirty(trans, parent); 643 if (last_ref) { 644 ret = btrfs_tree_mod_log_free_eb(buf); 645 if (ret) { 646 btrfs_abort_transaction(trans, ret); 647 goto error_unlock_cow; 648 } 649 } 650 ret = btrfs_free_tree_block(trans, btrfs_root_id(root), buf, 651 parent_start, last_ref); 652 if (ret < 0) { 653 btrfs_abort_transaction(trans, ret); 654 goto error_unlock_cow; 655 } 656 } 657 if (unlock_orig) 658 btrfs_tree_unlock(buf); 659 free_extent_buffer_stale(buf); 660 btrfs_mark_buffer_dirty(trans, cow); 661 *cow_ret = cow; 662 return 0; 663 664 error_unlock_cow: 665 btrfs_tree_unlock(cow); 666 free_extent_buffer(cow); 667 return ret; 668 } 669 670 static inline int should_cow_block(struct btrfs_trans_handle *trans, 671 struct btrfs_root *root, 672 struct extent_buffer *buf) 673 { 674 if (btrfs_is_testing(root->fs_info)) 675 return 0; 676 677 /* Ensure we can see the FORCE_COW bit */ 678 smp_mb__before_atomic(); 679 680 /* 681 * We do not need to cow a block if 682 * 1) this block is not created or changed in this transaction; 683 * 2) this block does not belong to TREE_RELOC tree; 684 * 3) the root is not forced COW. 685 * 686 * What is forced COW: 687 * when we create snapshot during committing the transaction, 688 * after we've finished copying src root, we must COW the shared 689 * block to ensure the metadata consistency. 690 */ 691 if (btrfs_header_generation(buf) == trans->transid && 692 !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN) && 693 !(btrfs_root_id(root) != BTRFS_TREE_RELOC_OBJECTID && 694 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)) && 695 !test_bit(BTRFS_ROOT_FORCE_COW, &root->state)) 696 return 0; 697 return 1; 698 } 699 700 /* 701 * COWs a single block, see btrfs_force_cow_block() for the real work. 702 * This version of it has extra checks so that a block isn't COWed more than 703 * once per transaction, as long as it hasn't been written yet 704 */ 705 int btrfs_cow_block(struct btrfs_trans_handle *trans, 706 struct btrfs_root *root, struct extent_buffer *buf, 707 struct extent_buffer *parent, int parent_slot, 708 struct extent_buffer **cow_ret, 709 enum btrfs_lock_nesting nest) 710 { 711 struct btrfs_fs_info *fs_info = root->fs_info; 712 u64 search_start; 713 int ret; 714 715 if (unlikely(test_bit(BTRFS_ROOT_DELETING, &root->state))) { 716 btrfs_abort_transaction(trans, -EUCLEAN); 717 btrfs_crit(fs_info, 718 "attempt to COW block %llu on root %llu that is being deleted", 719 buf->start, btrfs_root_id(root)); 720 return -EUCLEAN; 721 } 722 723 /* 724 * COWing must happen through a running transaction, which always 725 * matches the current fs generation (it's a transaction with a state 726 * less than TRANS_STATE_UNBLOCKED). If it doesn't, then turn the fs 727 * into error state to prevent the commit of any transaction. 728 */ 729 if (unlikely(trans->transaction != fs_info->running_transaction || 730 trans->transid != fs_info->generation)) { 731 btrfs_abort_transaction(trans, -EUCLEAN); 732 btrfs_crit(fs_info, 733 "unexpected transaction when attempting to COW block %llu on root %llu, transaction %llu running transaction %llu fs generation %llu", 734 buf->start, btrfs_root_id(root), trans->transid, 735 fs_info->running_transaction->transid, 736 fs_info->generation); 737 return -EUCLEAN; 738 } 739 740 if (!should_cow_block(trans, root, buf)) { 741 *cow_ret = buf; 742 return 0; 743 } 744 745 search_start = round_down(buf->start, SZ_1G); 746 747 /* 748 * Before CoWing this block for later modification, check if it's 749 * the subtree root and do the delayed subtree trace if needed. 750 * 751 * Also We don't care about the error, as it's handled internally. 752 */ 753 btrfs_qgroup_trace_subtree_after_cow(trans, root, buf); 754 ret = btrfs_force_cow_block(trans, root, buf, parent, parent_slot, 755 cow_ret, search_start, 0, nest); 756 757 trace_btrfs_cow_block(root, buf, *cow_ret); 758 759 return ret; 760 } 761 ALLOW_ERROR_INJECTION(btrfs_cow_block, ERRNO); 762 763 /* 764 * same as comp_keys only with two btrfs_key's 765 */ 766 int __pure btrfs_comp_cpu_keys(const struct btrfs_key *k1, const struct btrfs_key *k2) 767 { 768 if (k1->objectid > k2->objectid) 769 return 1; 770 if (k1->objectid < k2->objectid) 771 return -1; 772 if (k1->type > k2->type) 773 return 1; 774 if (k1->type < k2->type) 775 return -1; 776 if (k1->offset > k2->offset) 777 return 1; 778 if (k1->offset < k2->offset) 779 return -1; 780 return 0; 781 } 782 783 /* 784 * Search for a key in the given extent_buffer. 785 * 786 * The lower boundary for the search is specified by the slot number @first_slot. 787 * Use a value of 0 to search over the whole extent buffer. Works for both 788 * leaves and nodes. 789 * 790 * The slot in the extent buffer is returned via @slot. If the key exists in the 791 * extent buffer, then @slot will point to the slot where the key is, otherwise 792 * it points to the slot where you would insert the key. 793 * 794 * Slot may point to the total number of items (i.e. one position beyond the last 795 * key) if the key is bigger than the last key in the extent buffer. 796 */ 797 int btrfs_bin_search(struct extent_buffer *eb, int first_slot, 798 const struct btrfs_key *key, int *slot) 799 { 800 unsigned long p; 801 int item_size; 802 /* 803 * Use unsigned types for the low and high slots, so that we get a more 804 * efficient division in the search loop below. 805 */ 806 u32 low = first_slot; 807 u32 high = btrfs_header_nritems(eb); 808 int ret; 809 const int key_size = sizeof(struct btrfs_disk_key); 810 811 if (unlikely(low > high)) { 812 btrfs_err(eb->fs_info, 813 "%s: low (%u) > high (%u) eb %llu owner %llu level %d", 814 __func__, low, high, eb->start, 815 btrfs_header_owner(eb), btrfs_header_level(eb)); 816 return -EINVAL; 817 } 818 819 if (btrfs_header_level(eb) == 0) { 820 p = offsetof(struct btrfs_leaf, items); 821 item_size = sizeof(struct btrfs_item); 822 } else { 823 p = offsetof(struct btrfs_node, ptrs); 824 item_size = sizeof(struct btrfs_key_ptr); 825 } 826 827 while (low < high) { 828 const int unit_size = eb->folio_size; 829 unsigned long oil; 830 unsigned long offset; 831 struct btrfs_disk_key *tmp; 832 struct btrfs_disk_key unaligned; 833 int mid; 834 835 mid = (low + high) / 2; 836 offset = p + mid * item_size; 837 oil = get_eb_offset_in_folio(eb, offset); 838 839 if (oil + key_size <= unit_size) { 840 const unsigned long idx = get_eb_folio_index(eb, offset); 841 char *kaddr = folio_address(eb->folios[idx]); 842 843 oil = get_eb_offset_in_folio(eb, offset); 844 tmp = (struct btrfs_disk_key *)(kaddr + oil); 845 } else { 846 read_extent_buffer(eb, &unaligned, offset, key_size); 847 tmp = &unaligned; 848 } 849 850 ret = btrfs_comp_keys(tmp, key); 851 852 if (ret < 0) 853 low = mid + 1; 854 else if (ret > 0) 855 high = mid; 856 else { 857 *slot = mid; 858 return 0; 859 } 860 } 861 *slot = low; 862 return 1; 863 } 864 865 static void root_add_used_bytes(struct btrfs_root *root) 866 { 867 spin_lock(&root->accounting_lock); 868 btrfs_set_root_used(&root->root_item, 869 btrfs_root_used(&root->root_item) + root->fs_info->nodesize); 870 spin_unlock(&root->accounting_lock); 871 } 872 873 static void root_sub_used_bytes(struct btrfs_root *root) 874 { 875 spin_lock(&root->accounting_lock); 876 btrfs_set_root_used(&root->root_item, 877 btrfs_root_used(&root->root_item) - root->fs_info->nodesize); 878 spin_unlock(&root->accounting_lock); 879 } 880 881 /* given a node and slot number, this reads the blocks it points to. The 882 * extent buffer is returned with a reference taken (but unlocked). 883 */ 884 struct extent_buffer *btrfs_read_node_slot(struct extent_buffer *parent, 885 int slot) 886 { 887 int level = btrfs_header_level(parent); 888 struct btrfs_tree_parent_check check = { 0 }; 889 struct extent_buffer *eb; 890 891 if (slot < 0 || slot >= btrfs_header_nritems(parent)) 892 return ERR_PTR(-ENOENT); 893 894 ASSERT(level); 895 896 check.level = level - 1; 897 check.transid = btrfs_node_ptr_generation(parent, slot); 898 check.owner_root = btrfs_header_owner(parent); 899 check.has_first_key = true; 900 btrfs_node_key_to_cpu(parent, &check.first_key, slot); 901 902 eb = read_tree_block(parent->fs_info, btrfs_node_blockptr(parent, slot), 903 &check); 904 if (IS_ERR(eb)) 905 return eb; 906 if (!extent_buffer_uptodate(eb)) { 907 free_extent_buffer(eb); 908 return ERR_PTR(-EIO); 909 } 910 911 return eb; 912 } 913 914 /* 915 * node level balancing, used to make sure nodes are in proper order for 916 * item deletion. We balance from the top down, so we have to make sure 917 * that a deletion won't leave an node completely empty later on. 918 */ 919 static noinline int balance_level(struct btrfs_trans_handle *trans, 920 struct btrfs_root *root, 921 struct btrfs_path *path, int level) 922 { 923 struct btrfs_fs_info *fs_info = root->fs_info; 924 struct extent_buffer *right = NULL; 925 struct extent_buffer *mid; 926 struct extent_buffer *left = NULL; 927 struct extent_buffer *parent = NULL; 928 int ret = 0; 929 int wret; 930 int pslot; 931 int orig_slot = path->slots[level]; 932 u64 orig_ptr; 933 934 ASSERT(level > 0); 935 936 mid = path->nodes[level]; 937 938 WARN_ON(path->locks[level] != BTRFS_WRITE_LOCK); 939 WARN_ON(btrfs_header_generation(mid) != trans->transid); 940 941 orig_ptr = btrfs_node_blockptr(mid, orig_slot); 942 943 if (level < BTRFS_MAX_LEVEL - 1) { 944 parent = path->nodes[level + 1]; 945 pslot = path->slots[level + 1]; 946 } 947 948 /* 949 * deal with the case where there is only one pointer in the root 950 * by promoting the node below to a root 951 */ 952 if (!parent) { 953 struct extent_buffer *child; 954 955 if (btrfs_header_nritems(mid) != 1) 956 return 0; 957 958 /* promote the child to a root */ 959 child = btrfs_read_node_slot(mid, 0); 960 if (IS_ERR(child)) { 961 ret = PTR_ERR(child); 962 goto out; 963 } 964 965 btrfs_tree_lock(child); 966 ret = btrfs_cow_block(trans, root, child, mid, 0, &child, 967 BTRFS_NESTING_COW); 968 if (ret) { 969 btrfs_tree_unlock(child); 970 free_extent_buffer(child); 971 goto out; 972 } 973 974 ret = btrfs_tree_mod_log_insert_root(root->node, child, true); 975 if (ret < 0) { 976 btrfs_tree_unlock(child); 977 free_extent_buffer(child); 978 btrfs_abort_transaction(trans, ret); 979 goto out; 980 } 981 rcu_assign_pointer(root->node, child); 982 983 add_root_to_dirty_list(root); 984 btrfs_tree_unlock(child); 985 986 path->locks[level] = 0; 987 path->nodes[level] = NULL; 988 btrfs_clear_buffer_dirty(trans, mid); 989 btrfs_tree_unlock(mid); 990 /* once for the path */ 991 free_extent_buffer(mid); 992 993 root_sub_used_bytes(root); 994 ret = btrfs_free_tree_block(trans, btrfs_root_id(root), mid, 0, 1); 995 /* once for the root ptr */ 996 free_extent_buffer_stale(mid); 997 if (ret < 0) { 998 btrfs_abort_transaction(trans, ret); 999 goto out; 1000 } 1001 return 0; 1002 } 1003 if (btrfs_header_nritems(mid) > 1004 BTRFS_NODEPTRS_PER_BLOCK(fs_info) / 4) 1005 return 0; 1006 1007 if (pslot) { 1008 left = btrfs_read_node_slot(parent, pslot - 1); 1009 if (IS_ERR(left)) { 1010 ret = PTR_ERR(left); 1011 left = NULL; 1012 goto out; 1013 } 1014 1015 btrfs_tree_lock_nested(left, BTRFS_NESTING_LEFT); 1016 wret = btrfs_cow_block(trans, root, left, 1017 parent, pslot - 1, &left, 1018 BTRFS_NESTING_LEFT_COW); 1019 if (wret) { 1020 ret = wret; 1021 goto out; 1022 } 1023 } 1024 1025 if (pslot + 1 < btrfs_header_nritems(parent)) { 1026 right = btrfs_read_node_slot(parent, pslot + 1); 1027 if (IS_ERR(right)) { 1028 ret = PTR_ERR(right); 1029 right = NULL; 1030 goto out; 1031 } 1032 1033 btrfs_tree_lock_nested(right, BTRFS_NESTING_RIGHT); 1034 wret = btrfs_cow_block(trans, root, right, 1035 parent, pslot + 1, &right, 1036 BTRFS_NESTING_RIGHT_COW); 1037 if (wret) { 1038 ret = wret; 1039 goto out; 1040 } 1041 } 1042 1043 /* first, try to make some room in the middle buffer */ 1044 if (left) { 1045 orig_slot += btrfs_header_nritems(left); 1046 wret = push_node_left(trans, left, mid, 1); 1047 if (wret < 0) 1048 ret = wret; 1049 } 1050 1051 /* 1052 * then try to empty the right most buffer into the middle 1053 */ 1054 if (right) { 1055 wret = push_node_left(trans, mid, right, 1); 1056 if (wret < 0 && wret != -ENOSPC) 1057 ret = wret; 1058 if (btrfs_header_nritems(right) == 0) { 1059 btrfs_clear_buffer_dirty(trans, right); 1060 btrfs_tree_unlock(right); 1061 ret = btrfs_del_ptr(trans, root, path, level + 1, pslot + 1); 1062 if (ret < 0) { 1063 free_extent_buffer_stale(right); 1064 right = NULL; 1065 goto out; 1066 } 1067 root_sub_used_bytes(root); 1068 ret = btrfs_free_tree_block(trans, btrfs_root_id(root), 1069 right, 0, 1); 1070 free_extent_buffer_stale(right); 1071 right = NULL; 1072 if (ret < 0) { 1073 btrfs_abort_transaction(trans, ret); 1074 goto out; 1075 } 1076 } else { 1077 struct btrfs_disk_key right_key; 1078 btrfs_node_key(right, &right_key, 0); 1079 ret = btrfs_tree_mod_log_insert_key(parent, pslot + 1, 1080 BTRFS_MOD_LOG_KEY_REPLACE); 1081 if (ret < 0) { 1082 btrfs_abort_transaction(trans, ret); 1083 goto out; 1084 } 1085 btrfs_set_node_key(parent, &right_key, pslot + 1); 1086 btrfs_mark_buffer_dirty(trans, parent); 1087 } 1088 } 1089 if (btrfs_header_nritems(mid) == 1) { 1090 /* 1091 * we're not allowed to leave a node with one item in the 1092 * tree during a delete. A deletion from lower in the tree 1093 * could try to delete the only pointer in this node. 1094 * So, pull some keys from the left. 1095 * There has to be a left pointer at this point because 1096 * otherwise we would have pulled some pointers from the 1097 * right 1098 */ 1099 if (unlikely(!left)) { 1100 btrfs_crit(fs_info, 1101 "missing left child when middle child only has 1 item, parent bytenr %llu level %d mid bytenr %llu root %llu", 1102 parent->start, btrfs_header_level(parent), 1103 mid->start, btrfs_root_id(root)); 1104 ret = -EUCLEAN; 1105 btrfs_abort_transaction(trans, ret); 1106 goto out; 1107 } 1108 wret = balance_node_right(trans, mid, left); 1109 if (wret < 0) { 1110 ret = wret; 1111 goto out; 1112 } 1113 if (wret == 1) { 1114 wret = push_node_left(trans, left, mid, 1); 1115 if (wret < 0) 1116 ret = wret; 1117 } 1118 BUG_ON(wret == 1); 1119 } 1120 if (btrfs_header_nritems(mid) == 0) { 1121 btrfs_clear_buffer_dirty(trans, mid); 1122 btrfs_tree_unlock(mid); 1123 ret = btrfs_del_ptr(trans, root, path, level + 1, pslot); 1124 if (ret < 0) { 1125 free_extent_buffer_stale(mid); 1126 mid = NULL; 1127 goto out; 1128 } 1129 root_sub_used_bytes(root); 1130 ret = btrfs_free_tree_block(trans, btrfs_root_id(root), mid, 0, 1); 1131 free_extent_buffer_stale(mid); 1132 mid = NULL; 1133 if (ret < 0) { 1134 btrfs_abort_transaction(trans, ret); 1135 goto out; 1136 } 1137 } else { 1138 /* update the parent key to reflect our changes */ 1139 struct btrfs_disk_key mid_key; 1140 btrfs_node_key(mid, &mid_key, 0); 1141 ret = btrfs_tree_mod_log_insert_key(parent, pslot, 1142 BTRFS_MOD_LOG_KEY_REPLACE); 1143 if (ret < 0) { 1144 btrfs_abort_transaction(trans, ret); 1145 goto out; 1146 } 1147 btrfs_set_node_key(parent, &mid_key, pslot); 1148 btrfs_mark_buffer_dirty(trans, parent); 1149 } 1150 1151 /* update the path */ 1152 if (left) { 1153 if (btrfs_header_nritems(left) > orig_slot) { 1154 atomic_inc(&left->refs); 1155 /* left was locked after cow */ 1156 path->nodes[level] = left; 1157 path->slots[level + 1] -= 1; 1158 path->slots[level] = orig_slot; 1159 if (mid) { 1160 btrfs_tree_unlock(mid); 1161 free_extent_buffer(mid); 1162 } 1163 } else { 1164 orig_slot -= btrfs_header_nritems(left); 1165 path->slots[level] = orig_slot; 1166 } 1167 } 1168 /* double check we haven't messed things up */ 1169 if (orig_ptr != 1170 btrfs_node_blockptr(path->nodes[level], path->slots[level])) 1171 BUG(); 1172 out: 1173 if (right) { 1174 btrfs_tree_unlock(right); 1175 free_extent_buffer(right); 1176 } 1177 if (left) { 1178 if (path->nodes[level] != left) 1179 btrfs_tree_unlock(left); 1180 free_extent_buffer(left); 1181 } 1182 return ret; 1183 } 1184 1185 /* Node balancing for insertion. Here we only split or push nodes around 1186 * when they are completely full. This is also done top down, so we 1187 * have to be pessimistic. 1188 */ 1189 static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans, 1190 struct btrfs_root *root, 1191 struct btrfs_path *path, int level) 1192 { 1193 struct btrfs_fs_info *fs_info = root->fs_info; 1194 struct extent_buffer *right = NULL; 1195 struct extent_buffer *mid; 1196 struct extent_buffer *left = NULL; 1197 struct extent_buffer *parent = NULL; 1198 int ret = 0; 1199 int wret; 1200 int pslot; 1201 int orig_slot = path->slots[level]; 1202 1203 if (level == 0) 1204 return 1; 1205 1206 mid = path->nodes[level]; 1207 WARN_ON(btrfs_header_generation(mid) != trans->transid); 1208 1209 if (level < BTRFS_MAX_LEVEL - 1) { 1210 parent = path->nodes[level + 1]; 1211 pslot = path->slots[level + 1]; 1212 } 1213 1214 if (!parent) 1215 return 1; 1216 1217 /* first, try to make some room in the middle buffer */ 1218 if (pslot) { 1219 u32 left_nr; 1220 1221 left = btrfs_read_node_slot(parent, pslot - 1); 1222 if (IS_ERR(left)) 1223 return PTR_ERR(left); 1224 1225 btrfs_tree_lock_nested(left, BTRFS_NESTING_LEFT); 1226 1227 left_nr = btrfs_header_nritems(left); 1228 if (left_nr >= BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 1) { 1229 wret = 1; 1230 } else { 1231 ret = btrfs_cow_block(trans, root, left, parent, 1232 pslot - 1, &left, 1233 BTRFS_NESTING_LEFT_COW); 1234 if (ret) 1235 wret = 1; 1236 else { 1237 wret = push_node_left(trans, left, mid, 0); 1238 } 1239 } 1240 if (wret < 0) 1241 ret = wret; 1242 if (wret == 0) { 1243 struct btrfs_disk_key disk_key; 1244 orig_slot += left_nr; 1245 btrfs_node_key(mid, &disk_key, 0); 1246 ret = btrfs_tree_mod_log_insert_key(parent, pslot, 1247 BTRFS_MOD_LOG_KEY_REPLACE); 1248 if (ret < 0) { 1249 btrfs_tree_unlock(left); 1250 free_extent_buffer(left); 1251 btrfs_abort_transaction(trans, ret); 1252 return ret; 1253 } 1254 btrfs_set_node_key(parent, &disk_key, pslot); 1255 btrfs_mark_buffer_dirty(trans, parent); 1256 if (btrfs_header_nritems(left) > orig_slot) { 1257 path->nodes[level] = left; 1258 path->slots[level + 1] -= 1; 1259 path->slots[level] = orig_slot; 1260 btrfs_tree_unlock(mid); 1261 free_extent_buffer(mid); 1262 } else { 1263 orig_slot -= 1264 btrfs_header_nritems(left); 1265 path->slots[level] = orig_slot; 1266 btrfs_tree_unlock(left); 1267 free_extent_buffer(left); 1268 } 1269 return 0; 1270 } 1271 btrfs_tree_unlock(left); 1272 free_extent_buffer(left); 1273 } 1274 1275 /* 1276 * then try to empty the right most buffer into the middle 1277 */ 1278 if (pslot + 1 < btrfs_header_nritems(parent)) { 1279 u32 right_nr; 1280 1281 right = btrfs_read_node_slot(parent, pslot + 1); 1282 if (IS_ERR(right)) 1283 return PTR_ERR(right); 1284 1285 btrfs_tree_lock_nested(right, BTRFS_NESTING_RIGHT); 1286 1287 right_nr = btrfs_header_nritems(right); 1288 if (right_nr >= BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 1) { 1289 wret = 1; 1290 } else { 1291 ret = btrfs_cow_block(trans, root, right, 1292 parent, pslot + 1, 1293 &right, BTRFS_NESTING_RIGHT_COW); 1294 if (ret) 1295 wret = 1; 1296 else { 1297 wret = balance_node_right(trans, right, mid); 1298 } 1299 } 1300 if (wret < 0) 1301 ret = wret; 1302 if (wret == 0) { 1303 struct btrfs_disk_key disk_key; 1304 1305 btrfs_node_key(right, &disk_key, 0); 1306 ret = btrfs_tree_mod_log_insert_key(parent, pslot + 1, 1307 BTRFS_MOD_LOG_KEY_REPLACE); 1308 if (ret < 0) { 1309 btrfs_tree_unlock(right); 1310 free_extent_buffer(right); 1311 btrfs_abort_transaction(trans, ret); 1312 return ret; 1313 } 1314 btrfs_set_node_key(parent, &disk_key, pslot + 1); 1315 btrfs_mark_buffer_dirty(trans, parent); 1316 1317 if (btrfs_header_nritems(mid) <= orig_slot) { 1318 path->nodes[level] = right; 1319 path->slots[level + 1] += 1; 1320 path->slots[level] = orig_slot - 1321 btrfs_header_nritems(mid); 1322 btrfs_tree_unlock(mid); 1323 free_extent_buffer(mid); 1324 } else { 1325 btrfs_tree_unlock(right); 1326 free_extent_buffer(right); 1327 } 1328 return 0; 1329 } 1330 btrfs_tree_unlock(right); 1331 free_extent_buffer(right); 1332 } 1333 return 1; 1334 } 1335 1336 /* 1337 * readahead one full node of leaves, finding things that are close 1338 * to the block in 'slot', and triggering ra on them. 1339 */ 1340 static void reada_for_search(struct btrfs_fs_info *fs_info, 1341 struct btrfs_path *path, 1342 int level, int slot, u64 objectid) 1343 { 1344 struct extent_buffer *node; 1345 struct btrfs_disk_key disk_key; 1346 u32 nritems; 1347 u64 search; 1348 u64 target; 1349 u64 nread = 0; 1350 u64 nread_max; 1351 u32 nr; 1352 u32 blocksize; 1353 u32 nscan = 0; 1354 1355 if (level != 1 && path->reada != READA_FORWARD_ALWAYS) 1356 return; 1357 1358 if (!path->nodes[level]) 1359 return; 1360 1361 node = path->nodes[level]; 1362 1363 /* 1364 * Since the time between visiting leaves is much shorter than the time 1365 * between visiting nodes, limit read ahead of nodes to 1, to avoid too 1366 * much IO at once (possibly random). 1367 */ 1368 if (path->reada == READA_FORWARD_ALWAYS) { 1369 if (level > 1) 1370 nread_max = node->fs_info->nodesize; 1371 else 1372 nread_max = SZ_128K; 1373 } else { 1374 nread_max = SZ_64K; 1375 } 1376 1377 search = btrfs_node_blockptr(node, slot); 1378 blocksize = fs_info->nodesize; 1379 if (path->reada != READA_FORWARD_ALWAYS) { 1380 struct extent_buffer *eb; 1381 1382 eb = find_extent_buffer(fs_info, search); 1383 if (eb) { 1384 free_extent_buffer(eb); 1385 return; 1386 } 1387 } 1388 1389 target = search; 1390 1391 nritems = btrfs_header_nritems(node); 1392 nr = slot; 1393 1394 while (1) { 1395 if (path->reada == READA_BACK) { 1396 if (nr == 0) 1397 break; 1398 nr--; 1399 } else if (path->reada == READA_FORWARD || 1400 path->reada == READA_FORWARD_ALWAYS) { 1401 nr++; 1402 if (nr >= nritems) 1403 break; 1404 } 1405 if (path->reada == READA_BACK && objectid) { 1406 btrfs_node_key(node, &disk_key, nr); 1407 if (btrfs_disk_key_objectid(&disk_key) != objectid) 1408 break; 1409 } 1410 search = btrfs_node_blockptr(node, nr); 1411 if (path->reada == READA_FORWARD_ALWAYS || 1412 (search <= target && target - search <= 65536) || 1413 (search > target && search - target <= 65536)) { 1414 btrfs_readahead_node_child(node, nr); 1415 nread += blocksize; 1416 } 1417 nscan++; 1418 if (nread > nread_max || nscan > 32) 1419 break; 1420 } 1421 } 1422 1423 static noinline void reada_for_balance(struct btrfs_path *path, int level) 1424 { 1425 struct extent_buffer *parent; 1426 int slot; 1427 int nritems; 1428 1429 parent = path->nodes[level + 1]; 1430 if (!parent) 1431 return; 1432 1433 nritems = btrfs_header_nritems(parent); 1434 slot = path->slots[level + 1]; 1435 1436 if (slot > 0) 1437 btrfs_readahead_node_child(parent, slot - 1); 1438 if (slot + 1 < nritems) 1439 btrfs_readahead_node_child(parent, slot + 1); 1440 } 1441 1442 1443 /* 1444 * when we walk down the tree, it is usually safe to unlock the higher layers 1445 * in the tree. The exceptions are when our path goes through slot 0, because 1446 * operations on the tree might require changing key pointers higher up in the 1447 * tree. 1448 * 1449 * callers might also have set path->keep_locks, which tells this code to keep 1450 * the lock if the path points to the last slot in the block. This is part of 1451 * walking through the tree, and selecting the next slot in the higher block. 1452 * 1453 * lowest_unlock sets the lowest level in the tree we're allowed to unlock. so 1454 * if lowest_unlock is 1, level 0 won't be unlocked 1455 */ 1456 static noinline void unlock_up(struct btrfs_path *path, int level, 1457 int lowest_unlock, int min_write_lock_level, 1458 int *write_lock_level) 1459 { 1460 int i; 1461 int skip_level = level; 1462 bool check_skip = true; 1463 1464 for (i = level; i < BTRFS_MAX_LEVEL; i++) { 1465 if (!path->nodes[i]) 1466 break; 1467 if (!path->locks[i]) 1468 break; 1469 1470 if (check_skip) { 1471 if (path->slots[i] == 0) { 1472 skip_level = i + 1; 1473 continue; 1474 } 1475 1476 if (path->keep_locks) { 1477 u32 nritems; 1478 1479 nritems = btrfs_header_nritems(path->nodes[i]); 1480 if (nritems < 1 || path->slots[i] >= nritems - 1) { 1481 skip_level = i + 1; 1482 continue; 1483 } 1484 } 1485 } 1486 1487 if (i >= lowest_unlock && i > skip_level) { 1488 check_skip = false; 1489 btrfs_tree_unlock_rw(path->nodes[i], path->locks[i]); 1490 path->locks[i] = 0; 1491 if (write_lock_level && 1492 i > min_write_lock_level && 1493 i <= *write_lock_level) { 1494 *write_lock_level = i - 1; 1495 } 1496 } 1497 } 1498 } 1499 1500 /* 1501 * Helper function for btrfs_search_slot() and other functions that do a search 1502 * on a btree. The goal is to find a tree block in the cache (the radix tree at 1503 * fs_info->buffer_radix), but if we can't find it, or it's not up to date, read 1504 * its pages from disk. 1505 * 1506 * Returns -EAGAIN, with the path unlocked, if the caller needs to repeat the 1507 * whole btree search, starting again from the current root node. 1508 */ 1509 static int 1510 read_block_for_search(struct btrfs_root *root, struct btrfs_path *p, 1511 struct extent_buffer **eb_ret, int slot, 1512 const struct btrfs_key *key) 1513 { 1514 struct btrfs_fs_info *fs_info = root->fs_info; 1515 struct btrfs_tree_parent_check check = { 0 }; 1516 u64 blocknr; 1517 struct extent_buffer *tmp = NULL; 1518 int ret = 0; 1519 int parent_level; 1520 int err; 1521 bool read_tmp = false; 1522 bool tmp_locked = false; 1523 bool path_released = false; 1524 1525 blocknr = btrfs_node_blockptr(*eb_ret, slot); 1526 parent_level = btrfs_header_level(*eb_ret); 1527 btrfs_node_key_to_cpu(*eb_ret, &check.first_key, slot); 1528 check.has_first_key = true; 1529 check.level = parent_level - 1; 1530 check.transid = btrfs_node_ptr_generation(*eb_ret, slot); 1531 check.owner_root = btrfs_root_id(root); 1532 1533 /* 1534 * If we need to read an extent buffer from disk and we are holding locks 1535 * on upper level nodes, we unlock all the upper nodes before reading the 1536 * extent buffer, and then return -EAGAIN to the caller as it needs to 1537 * restart the search. We don't release the lock on the current level 1538 * because we need to walk this node to figure out which blocks to read. 1539 */ 1540 tmp = find_extent_buffer(fs_info, blocknr); 1541 if (tmp) { 1542 if (p->reada == READA_FORWARD_ALWAYS) 1543 reada_for_search(fs_info, p, parent_level, slot, key->objectid); 1544 1545 /* first we do an atomic uptodate check */ 1546 if (btrfs_buffer_uptodate(tmp, check.transid, 1) > 0) { 1547 /* 1548 * Do extra check for first_key, eb can be stale due to 1549 * being cached, read from scrub, or have multiple 1550 * parents (shared tree blocks). 1551 */ 1552 if (btrfs_verify_level_key(tmp, &check)) { 1553 ret = -EUCLEAN; 1554 goto out; 1555 } 1556 *eb_ret = tmp; 1557 tmp = NULL; 1558 ret = 0; 1559 goto out; 1560 } 1561 1562 if (p->nowait) { 1563 ret = -EAGAIN; 1564 goto out; 1565 } 1566 1567 if (!p->skip_locking) { 1568 btrfs_unlock_up_safe(p, parent_level + 1); 1569 tmp_locked = true; 1570 btrfs_tree_read_lock(tmp); 1571 btrfs_release_path(p); 1572 ret = -EAGAIN; 1573 path_released = true; 1574 } 1575 1576 /* Now we're allowed to do a blocking uptodate check. */ 1577 err = btrfs_read_extent_buffer(tmp, &check); 1578 if (err) { 1579 ret = err; 1580 goto out; 1581 } 1582 1583 if (ret == 0) { 1584 ASSERT(!tmp_locked); 1585 *eb_ret = tmp; 1586 tmp = NULL; 1587 } 1588 goto out; 1589 } else if (p->nowait) { 1590 ret = -EAGAIN; 1591 goto out; 1592 } 1593 1594 if (!p->skip_locking) { 1595 btrfs_unlock_up_safe(p, parent_level + 1); 1596 ret = -EAGAIN; 1597 } 1598 1599 if (p->reada != READA_NONE) 1600 reada_for_search(fs_info, p, parent_level, slot, key->objectid); 1601 1602 tmp = btrfs_find_create_tree_block(fs_info, blocknr, check.owner_root, check.level); 1603 if (IS_ERR(tmp)) { 1604 ret = PTR_ERR(tmp); 1605 tmp = NULL; 1606 goto out; 1607 } 1608 read_tmp = true; 1609 1610 if (!p->skip_locking) { 1611 ASSERT(ret == -EAGAIN); 1612 tmp_locked = true; 1613 btrfs_tree_read_lock(tmp); 1614 btrfs_release_path(p); 1615 path_released = true; 1616 } 1617 1618 /* Now we're allowed to do a blocking uptodate check. */ 1619 err = btrfs_read_extent_buffer(tmp, &check); 1620 if (err) { 1621 ret = err; 1622 goto out; 1623 } 1624 1625 /* 1626 * If the read above didn't mark this buffer up to date, 1627 * it will never end up being up to date. Set ret to EIO now 1628 * and give up so that our caller doesn't loop forever 1629 * on our EAGAINs. 1630 */ 1631 if (!extent_buffer_uptodate(tmp)) { 1632 ret = -EIO; 1633 goto out; 1634 } 1635 1636 if (ret == 0) { 1637 ASSERT(!tmp_locked); 1638 *eb_ret = tmp; 1639 tmp = NULL; 1640 } 1641 out: 1642 if (tmp) { 1643 if (tmp_locked) 1644 btrfs_tree_read_unlock(tmp); 1645 if (read_tmp && ret && ret != -EAGAIN) 1646 free_extent_buffer_stale(tmp); 1647 else 1648 free_extent_buffer(tmp); 1649 } 1650 if (ret && !path_released) 1651 btrfs_release_path(p); 1652 1653 return ret; 1654 } 1655 1656 /* 1657 * helper function for btrfs_search_slot. This does all of the checks 1658 * for node-level blocks and does any balancing required based on 1659 * the ins_len. 1660 * 1661 * If no extra work was required, zero is returned. If we had to 1662 * drop the path, -EAGAIN is returned and btrfs_search_slot must 1663 * start over 1664 */ 1665 static int 1666 setup_nodes_for_search(struct btrfs_trans_handle *trans, 1667 struct btrfs_root *root, struct btrfs_path *p, 1668 struct extent_buffer *b, int level, int ins_len, 1669 int *write_lock_level) 1670 { 1671 struct btrfs_fs_info *fs_info = root->fs_info; 1672 int ret = 0; 1673 1674 if ((p->search_for_split || ins_len > 0) && btrfs_header_nritems(b) >= 1675 BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 3) { 1676 1677 if (*write_lock_level < level + 1) { 1678 *write_lock_level = level + 1; 1679 btrfs_release_path(p); 1680 return -EAGAIN; 1681 } 1682 1683 reada_for_balance(p, level); 1684 ret = split_node(trans, root, p, level); 1685 1686 b = p->nodes[level]; 1687 } else if (ins_len < 0 && btrfs_header_nritems(b) < 1688 BTRFS_NODEPTRS_PER_BLOCK(fs_info) / 2) { 1689 1690 if (*write_lock_level < level + 1) { 1691 *write_lock_level = level + 1; 1692 btrfs_release_path(p); 1693 return -EAGAIN; 1694 } 1695 1696 reada_for_balance(p, level); 1697 ret = balance_level(trans, root, p, level); 1698 if (ret) 1699 return ret; 1700 1701 b = p->nodes[level]; 1702 if (!b) { 1703 btrfs_release_path(p); 1704 return -EAGAIN; 1705 } 1706 BUG_ON(btrfs_header_nritems(b) == 1); 1707 } 1708 return ret; 1709 } 1710 1711 int btrfs_find_item(struct btrfs_root *fs_root, struct btrfs_path *path, 1712 u64 iobjectid, u64 ioff, u8 key_type, 1713 struct btrfs_key *found_key) 1714 { 1715 int ret; 1716 struct btrfs_key key; 1717 struct extent_buffer *eb; 1718 1719 ASSERT(path); 1720 ASSERT(found_key); 1721 1722 key.type = key_type; 1723 key.objectid = iobjectid; 1724 key.offset = ioff; 1725 1726 ret = btrfs_search_slot(NULL, fs_root, &key, path, 0, 0); 1727 if (ret < 0) 1728 return ret; 1729 1730 eb = path->nodes[0]; 1731 if (ret && path->slots[0] >= btrfs_header_nritems(eb)) { 1732 ret = btrfs_next_leaf(fs_root, path); 1733 if (ret) 1734 return ret; 1735 eb = path->nodes[0]; 1736 } 1737 1738 btrfs_item_key_to_cpu(eb, found_key, path->slots[0]); 1739 if (found_key->type != key.type || 1740 found_key->objectid != key.objectid) 1741 return 1; 1742 1743 return 0; 1744 } 1745 1746 static struct extent_buffer *btrfs_search_slot_get_root(struct btrfs_root *root, 1747 struct btrfs_path *p, 1748 int write_lock_level) 1749 { 1750 struct extent_buffer *b; 1751 int root_lock = 0; 1752 int level = 0; 1753 1754 if (p->search_commit_root) { 1755 b = root->commit_root; 1756 atomic_inc(&b->refs); 1757 level = btrfs_header_level(b); 1758 /* 1759 * Ensure that all callers have set skip_locking when 1760 * p->search_commit_root = 1. 1761 */ 1762 ASSERT(p->skip_locking == 1); 1763 1764 goto out; 1765 } 1766 1767 if (p->skip_locking) { 1768 b = btrfs_root_node(root); 1769 level = btrfs_header_level(b); 1770 goto out; 1771 } 1772 1773 /* We try very hard to do read locks on the root */ 1774 root_lock = BTRFS_READ_LOCK; 1775 1776 /* 1777 * If the level is set to maximum, we can skip trying to get the read 1778 * lock. 1779 */ 1780 if (write_lock_level < BTRFS_MAX_LEVEL) { 1781 /* 1782 * We don't know the level of the root node until we actually 1783 * have it read locked 1784 */ 1785 if (p->nowait) { 1786 b = btrfs_try_read_lock_root_node(root); 1787 if (IS_ERR(b)) 1788 return b; 1789 } else { 1790 b = btrfs_read_lock_root_node(root); 1791 } 1792 level = btrfs_header_level(b); 1793 if (level > write_lock_level) 1794 goto out; 1795 1796 /* Whoops, must trade for write lock */ 1797 btrfs_tree_read_unlock(b); 1798 free_extent_buffer(b); 1799 } 1800 1801 b = btrfs_lock_root_node(root); 1802 root_lock = BTRFS_WRITE_LOCK; 1803 1804 /* The level might have changed, check again */ 1805 level = btrfs_header_level(b); 1806 1807 out: 1808 /* 1809 * The root may have failed to write out at some point, and thus is no 1810 * longer valid, return an error in this case. 1811 */ 1812 if (!extent_buffer_uptodate(b)) { 1813 if (root_lock) 1814 btrfs_tree_unlock_rw(b, root_lock); 1815 free_extent_buffer(b); 1816 return ERR_PTR(-EIO); 1817 } 1818 1819 p->nodes[level] = b; 1820 if (!p->skip_locking) 1821 p->locks[level] = root_lock; 1822 /* 1823 * Callers are responsible for dropping b's references. 1824 */ 1825 return b; 1826 } 1827 1828 /* 1829 * Replace the extent buffer at the lowest level of the path with a cloned 1830 * version. The purpose is to be able to use it safely, after releasing the 1831 * commit root semaphore, even if relocation is happening in parallel, the 1832 * transaction used for relocation is committed and the extent buffer is 1833 * reallocated in the next transaction. 1834 * 1835 * This is used in a context where the caller does not prevent transaction 1836 * commits from happening, either by holding a transaction handle or holding 1837 * some lock, while it's doing searches through a commit root. 1838 * At the moment it's only used for send operations. 1839 */ 1840 static int finish_need_commit_sem_search(struct btrfs_path *path) 1841 { 1842 const int i = path->lowest_level; 1843 const int slot = path->slots[i]; 1844 struct extent_buffer *lowest = path->nodes[i]; 1845 struct extent_buffer *clone; 1846 1847 ASSERT(path->need_commit_sem); 1848 1849 if (!lowest) 1850 return 0; 1851 1852 lockdep_assert_held_read(&lowest->fs_info->commit_root_sem); 1853 1854 clone = btrfs_clone_extent_buffer(lowest); 1855 if (!clone) 1856 return -ENOMEM; 1857 1858 btrfs_release_path(path); 1859 path->nodes[i] = clone; 1860 path->slots[i] = slot; 1861 1862 return 0; 1863 } 1864 1865 static inline int search_for_key_slot(struct extent_buffer *eb, 1866 int search_low_slot, 1867 const struct btrfs_key *key, 1868 int prev_cmp, 1869 int *slot) 1870 { 1871 /* 1872 * If a previous call to btrfs_bin_search() on a parent node returned an 1873 * exact match (prev_cmp == 0), we can safely assume the target key will 1874 * always be at slot 0 on lower levels, since each key pointer 1875 * (struct btrfs_key_ptr) refers to the lowest key accessible from the 1876 * subtree it points to. Thus we can skip searching lower levels. 1877 */ 1878 if (prev_cmp == 0) { 1879 *slot = 0; 1880 return 0; 1881 } 1882 1883 return btrfs_bin_search(eb, search_low_slot, key, slot); 1884 } 1885 1886 static int search_leaf(struct btrfs_trans_handle *trans, 1887 struct btrfs_root *root, 1888 const struct btrfs_key *key, 1889 struct btrfs_path *path, 1890 int ins_len, 1891 int prev_cmp) 1892 { 1893 struct extent_buffer *leaf = path->nodes[0]; 1894 int leaf_free_space = -1; 1895 int search_low_slot = 0; 1896 int ret; 1897 bool do_bin_search = true; 1898 1899 /* 1900 * If we are doing an insertion, the leaf has enough free space and the 1901 * destination slot for the key is not slot 0, then we can unlock our 1902 * write lock on the parent, and any other upper nodes, before doing the 1903 * binary search on the leaf (with search_for_key_slot()), allowing other 1904 * tasks to lock the parent and any other upper nodes. 1905 */ 1906 if (ins_len > 0) { 1907 /* 1908 * Cache the leaf free space, since we will need it later and it 1909 * will not change until then. 1910 */ 1911 leaf_free_space = btrfs_leaf_free_space(leaf); 1912 1913 /* 1914 * !path->locks[1] means we have a single node tree, the leaf is 1915 * the root of the tree. 1916 */ 1917 if (path->locks[1] && leaf_free_space >= ins_len) { 1918 struct btrfs_disk_key first_key; 1919 1920 ASSERT(btrfs_header_nritems(leaf) > 0); 1921 btrfs_item_key(leaf, &first_key, 0); 1922 1923 /* 1924 * Doing the extra comparison with the first key is cheap, 1925 * taking into account that the first key is very likely 1926 * already in a cache line because it immediately follows 1927 * the extent buffer's header and we have recently accessed 1928 * the header's level field. 1929 */ 1930 ret = btrfs_comp_keys(&first_key, key); 1931 if (ret < 0) { 1932 /* 1933 * The first key is smaller than the key we want 1934 * to insert, so we are safe to unlock all upper 1935 * nodes and we have to do the binary search. 1936 * 1937 * We do use btrfs_unlock_up_safe() and not 1938 * unlock_up() because the later does not unlock 1939 * nodes with a slot of 0 - we can safely unlock 1940 * any node even if its slot is 0 since in this 1941 * case the key does not end up at slot 0 of the 1942 * leaf and there's no need to split the leaf. 1943 */ 1944 btrfs_unlock_up_safe(path, 1); 1945 search_low_slot = 1; 1946 } else { 1947 /* 1948 * The first key is >= then the key we want to 1949 * insert, so we can skip the binary search as 1950 * the target key will be at slot 0. 1951 * 1952 * We can not unlock upper nodes when the key is 1953 * less than the first key, because we will need 1954 * to update the key at slot 0 of the parent node 1955 * and possibly of other upper nodes too. 1956 * If the key matches the first key, then we can 1957 * unlock all the upper nodes, using 1958 * btrfs_unlock_up_safe() instead of unlock_up() 1959 * as stated above. 1960 */ 1961 if (ret == 0) 1962 btrfs_unlock_up_safe(path, 1); 1963 /* 1964 * ret is already 0 or 1, matching the result of 1965 * a btrfs_bin_search() call, so there is no need 1966 * to adjust it. 1967 */ 1968 do_bin_search = false; 1969 path->slots[0] = 0; 1970 } 1971 } 1972 } 1973 1974 if (do_bin_search) { 1975 ret = search_for_key_slot(leaf, search_low_slot, key, 1976 prev_cmp, &path->slots[0]); 1977 if (ret < 0) 1978 return ret; 1979 } 1980 1981 if (ins_len > 0) { 1982 /* 1983 * Item key already exists. In this case, if we are allowed to 1984 * insert the item (for example, in dir_item case, item key 1985 * collision is allowed), it will be merged with the original 1986 * item. Only the item size grows, no new btrfs item will be 1987 * added. If search_for_extension is not set, ins_len already 1988 * accounts the size btrfs_item, deduct it here so leaf space 1989 * check will be correct. 1990 */ 1991 if (ret == 0 && !path->search_for_extension) { 1992 ASSERT(ins_len >= sizeof(struct btrfs_item)); 1993 ins_len -= sizeof(struct btrfs_item); 1994 } 1995 1996 ASSERT(leaf_free_space >= 0); 1997 1998 if (leaf_free_space < ins_len) { 1999 int err; 2000 2001 err = split_leaf(trans, root, key, path, ins_len, 2002 (ret == 0)); 2003 ASSERT(err <= 0); 2004 if (WARN_ON(err > 0)) 2005 err = -EUCLEAN; 2006 if (err) 2007 ret = err; 2008 } 2009 } 2010 2011 return ret; 2012 } 2013 2014 /* 2015 * Look for a key in a tree and perform necessary modifications to preserve 2016 * tree invariants. 2017 * 2018 * @trans: Handle of transaction, used when modifying the tree 2019 * @p: Holds all btree nodes along the search path 2020 * @root: The root node of the tree 2021 * @key: The key we are looking for 2022 * @ins_len: Indicates purpose of search: 2023 * >0 for inserts it's size of item inserted (*) 2024 * <0 for deletions 2025 * 0 for plain searches, not modifying the tree 2026 * 2027 * (*) If size of item inserted doesn't include 2028 * sizeof(struct btrfs_item), then p->search_for_extension must 2029 * be set. 2030 * @cow: boolean should CoW operations be performed. Must always be 1 2031 * when modifying the tree. 2032 * 2033 * If @ins_len > 0, nodes and leaves will be split as we walk down the tree. 2034 * If @ins_len < 0, nodes will be merged as we walk down the tree (if possible) 2035 * 2036 * If @key is found, 0 is returned and you can find the item in the leaf level 2037 * of the path (level 0) 2038 * 2039 * If @key isn't found, 1 is returned and the leaf level of the path (level 0) 2040 * points to the slot where it should be inserted 2041 * 2042 * If an error is encountered while searching the tree a negative error number 2043 * is returned 2044 */ 2045 int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root *root, 2046 const struct btrfs_key *key, struct btrfs_path *p, 2047 int ins_len, int cow) 2048 { 2049 struct btrfs_fs_info *fs_info = root->fs_info; 2050 struct extent_buffer *b; 2051 int slot; 2052 int ret; 2053 int err; 2054 int level; 2055 int lowest_unlock = 1; 2056 /* everything at write_lock_level or lower must be write locked */ 2057 int write_lock_level = 0; 2058 u8 lowest_level = 0; 2059 int min_write_lock_level; 2060 int prev_cmp; 2061 2062 might_sleep(); 2063 2064 lowest_level = p->lowest_level; 2065 WARN_ON(lowest_level && ins_len > 0); 2066 WARN_ON(p->nodes[0] != NULL); 2067 BUG_ON(!cow && ins_len); 2068 2069 /* 2070 * For now only allow nowait for read only operations. There's no 2071 * strict reason why we can't, we just only need it for reads so it's 2072 * only implemented for reads. 2073 */ 2074 ASSERT(!p->nowait || !cow); 2075 2076 if (ins_len < 0) { 2077 lowest_unlock = 2; 2078 2079 /* when we are removing items, we might have to go up to level 2080 * two as we update tree pointers Make sure we keep write 2081 * for those levels as well 2082 */ 2083 write_lock_level = 2; 2084 } else if (ins_len > 0) { 2085 /* 2086 * for inserting items, make sure we have a write lock on 2087 * level 1 so we can update keys 2088 */ 2089 write_lock_level = 1; 2090 } 2091 2092 if (!cow) 2093 write_lock_level = -1; 2094 2095 if (cow && (p->keep_locks || p->lowest_level)) 2096 write_lock_level = BTRFS_MAX_LEVEL; 2097 2098 min_write_lock_level = write_lock_level; 2099 2100 if (p->need_commit_sem) { 2101 ASSERT(p->search_commit_root); 2102 if (p->nowait) { 2103 if (!down_read_trylock(&fs_info->commit_root_sem)) 2104 return -EAGAIN; 2105 } else { 2106 down_read(&fs_info->commit_root_sem); 2107 } 2108 } 2109 2110 again: 2111 prev_cmp = -1; 2112 b = btrfs_search_slot_get_root(root, p, write_lock_level); 2113 if (IS_ERR(b)) { 2114 ret = PTR_ERR(b); 2115 goto done; 2116 } 2117 2118 while (b) { 2119 int dec = 0; 2120 2121 level = btrfs_header_level(b); 2122 2123 if (cow) { 2124 bool last_level = (level == (BTRFS_MAX_LEVEL - 1)); 2125 2126 /* 2127 * if we don't really need to cow this block 2128 * then we don't want to set the path blocking, 2129 * so we test it here 2130 */ 2131 if (!should_cow_block(trans, root, b)) 2132 goto cow_done; 2133 2134 /* 2135 * must have write locks on this node and the 2136 * parent 2137 */ 2138 if (level > write_lock_level || 2139 (level + 1 > write_lock_level && 2140 level + 1 < BTRFS_MAX_LEVEL && 2141 p->nodes[level + 1])) { 2142 write_lock_level = level + 1; 2143 btrfs_release_path(p); 2144 goto again; 2145 } 2146 2147 if (last_level) 2148 err = btrfs_cow_block(trans, root, b, NULL, 0, 2149 &b, 2150 BTRFS_NESTING_COW); 2151 else 2152 err = btrfs_cow_block(trans, root, b, 2153 p->nodes[level + 1], 2154 p->slots[level + 1], &b, 2155 BTRFS_NESTING_COW); 2156 if (err) { 2157 ret = err; 2158 goto done; 2159 } 2160 } 2161 cow_done: 2162 p->nodes[level] = b; 2163 2164 /* 2165 * we have a lock on b and as long as we aren't changing 2166 * the tree, there is no way to for the items in b to change. 2167 * It is safe to drop the lock on our parent before we 2168 * go through the expensive btree search on b. 2169 * 2170 * If we're inserting or deleting (ins_len != 0), then we might 2171 * be changing slot zero, which may require changing the parent. 2172 * So, we can't drop the lock until after we know which slot 2173 * we're operating on. 2174 */ 2175 if (!ins_len && !p->keep_locks) { 2176 int u = level + 1; 2177 2178 if (u < BTRFS_MAX_LEVEL && p->locks[u]) { 2179 btrfs_tree_unlock_rw(p->nodes[u], p->locks[u]); 2180 p->locks[u] = 0; 2181 } 2182 } 2183 2184 if (level == 0) { 2185 if (ins_len > 0) 2186 ASSERT(write_lock_level >= 1); 2187 2188 ret = search_leaf(trans, root, key, p, ins_len, prev_cmp); 2189 if (!p->search_for_split) 2190 unlock_up(p, level, lowest_unlock, 2191 min_write_lock_level, NULL); 2192 goto done; 2193 } 2194 2195 ret = search_for_key_slot(b, 0, key, prev_cmp, &slot); 2196 if (ret < 0) 2197 goto done; 2198 prev_cmp = ret; 2199 2200 if (ret && slot > 0) { 2201 dec = 1; 2202 slot--; 2203 } 2204 p->slots[level] = slot; 2205 err = setup_nodes_for_search(trans, root, p, b, level, ins_len, 2206 &write_lock_level); 2207 if (err == -EAGAIN) 2208 goto again; 2209 if (err) { 2210 ret = err; 2211 goto done; 2212 } 2213 b = p->nodes[level]; 2214 slot = p->slots[level]; 2215 2216 /* 2217 * Slot 0 is special, if we change the key we have to update 2218 * the parent pointer which means we must have a write lock on 2219 * the parent 2220 */ 2221 if (slot == 0 && ins_len && write_lock_level < level + 1) { 2222 write_lock_level = level + 1; 2223 btrfs_release_path(p); 2224 goto again; 2225 } 2226 2227 unlock_up(p, level, lowest_unlock, min_write_lock_level, 2228 &write_lock_level); 2229 2230 if (level == lowest_level) { 2231 if (dec) 2232 p->slots[level]++; 2233 goto done; 2234 } 2235 2236 err = read_block_for_search(root, p, &b, slot, key); 2237 if (err == -EAGAIN && !p->nowait) 2238 goto again; 2239 if (err) { 2240 ret = err; 2241 goto done; 2242 } 2243 2244 if (!p->skip_locking) { 2245 level = btrfs_header_level(b); 2246 2247 btrfs_maybe_reset_lockdep_class(root, b); 2248 2249 if (level <= write_lock_level) { 2250 btrfs_tree_lock(b); 2251 p->locks[level] = BTRFS_WRITE_LOCK; 2252 } else { 2253 if (p->nowait) { 2254 if (!btrfs_try_tree_read_lock(b)) { 2255 free_extent_buffer(b); 2256 ret = -EAGAIN; 2257 goto done; 2258 } 2259 } else { 2260 btrfs_tree_read_lock(b); 2261 } 2262 p->locks[level] = BTRFS_READ_LOCK; 2263 } 2264 p->nodes[level] = b; 2265 } 2266 } 2267 ret = 1; 2268 done: 2269 if (ret < 0 && !p->skip_release_on_error) 2270 btrfs_release_path(p); 2271 2272 if (p->need_commit_sem) { 2273 int ret2; 2274 2275 ret2 = finish_need_commit_sem_search(p); 2276 up_read(&fs_info->commit_root_sem); 2277 if (ret2) 2278 ret = ret2; 2279 } 2280 2281 return ret; 2282 } 2283 ALLOW_ERROR_INJECTION(btrfs_search_slot, ERRNO); 2284 2285 /* 2286 * Like btrfs_search_slot, this looks for a key in the given tree. It uses the 2287 * current state of the tree together with the operations recorded in the tree 2288 * modification log to search for the key in a previous version of this tree, as 2289 * denoted by the time_seq parameter. 2290 * 2291 * Naturally, there is no support for insert, delete or cow operations. 2292 * 2293 * The resulting path and return value will be set up as if we called 2294 * btrfs_search_slot at that point in time with ins_len and cow both set to 0. 2295 */ 2296 int btrfs_search_old_slot(struct btrfs_root *root, const struct btrfs_key *key, 2297 struct btrfs_path *p, u64 time_seq) 2298 { 2299 struct btrfs_fs_info *fs_info = root->fs_info; 2300 struct extent_buffer *b; 2301 int slot; 2302 int ret; 2303 int err; 2304 int level; 2305 int lowest_unlock = 1; 2306 u8 lowest_level = 0; 2307 2308 lowest_level = p->lowest_level; 2309 WARN_ON(p->nodes[0] != NULL); 2310 ASSERT(!p->nowait); 2311 2312 if (p->search_commit_root) { 2313 BUG_ON(time_seq); 2314 return btrfs_search_slot(NULL, root, key, p, 0, 0); 2315 } 2316 2317 again: 2318 b = btrfs_get_old_root(root, time_seq); 2319 if (!b) { 2320 ret = -EIO; 2321 goto done; 2322 } 2323 level = btrfs_header_level(b); 2324 p->locks[level] = BTRFS_READ_LOCK; 2325 2326 while (b) { 2327 int dec = 0; 2328 2329 level = btrfs_header_level(b); 2330 p->nodes[level] = b; 2331 2332 /* 2333 * we have a lock on b and as long as we aren't changing 2334 * the tree, there is no way to for the items in b to change. 2335 * It is safe to drop the lock on our parent before we 2336 * go through the expensive btree search on b. 2337 */ 2338 btrfs_unlock_up_safe(p, level + 1); 2339 2340 ret = btrfs_bin_search(b, 0, key, &slot); 2341 if (ret < 0) 2342 goto done; 2343 2344 if (level == 0) { 2345 p->slots[level] = slot; 2346 unlock_up(p, level, lowest_unlock, 0, NULL); 2347 goto done; 2348 } 2349 2350 if (ret && slot > 0) { 2351 dec = 1; 2352 slot--; 2353 } 2354 p->slots[level] = slot; 2355 unlock_up(p, level, lowest_unlock, 0, NULL); 2356 2357 if (level == lowest_level) { 2358 if (dec) 2359 p->slots[level]++; 2360 goto done; 2361 } 2362 2363 err = read_block_for_search(root, p, &b, slot, key); 2364 if (err == -EAGAIN && !p->nowait) 2365 goto again; 2366 if (err) { 2367 ret = err; 2368 goto done; 2369 } 2370 2371 level = btrfs_header_level(b); 2372 btrfs_tree_read_lock(b); 2373 b = btrfs_tree_mod_log_rewind(fs_info, b, time_seq); 2374 if (!b) { 2375 ret = -ENOMEM; 2376 goto done; 2377 } 2378 p->locks[level] = BTRFS_READ_LOCK; 2379 p->nodes[level] = b; 2380 } 2381 ret = 1; 2382 done: 2383 if (ret < 0) 2384 btrfs_release_path(p); 2385 2386 return ret; 2387 } 2388 2389 /* 2390 * Search the tree again to find a leaf with smaller keys. 2391 * Returns 0 if it found something. 2392 * Returns 1 if there are no smaller keys. 2393 * Returns < 0 on error. 2394 * 2395 * This may release the path, and so you may lose any locks held at the 2396 * time you call it. 2397 */ 2398 static int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path) 2399 { 2400 struct btrfs_key key; 2401 struct btrfs_key orig_key; 2402 struct btrfs_disk_key found_key; 2403 int ret; 2404 2405 btrfs_item_key_to_cpu(path->nodes[0], &key, 0); 2406 orig_key = key; 2407 2408 if (key.offset > 0) { 2409 key.offset--; 2410 } else if (key.type > 0) { 2411 key.type--; 2412 key.offset = (u64)-1; 2413 } else if (key.objectid > 0) { 2414 key.objectid--; 2415 key.type = (u8)-1; 2416 key.offset = (u64)-1; 2417 } else { 2418 return 1; 2419 } 2420 2421 btrfs_release_path(path); 2422 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 2423 if (ret <= 0) 2424 return ret; 2425 2426 /* 2427 * Previous key not found. Even if we were at slot 0 of the leaf we had 2428 * before releasing the path and calling btrfs_search_slot(), we now may 2429 * be in a slot pointing to the same original key - this can happen if 2430 * after we released the path, one of more items were moved from a 2431 * sibling leaf into the front of the leaf we had due to an insertion 2432 * (see push_leaf_right()). 2433 * If we hit this case and our slot is > 0 and just decrement the slot 2434 * so that the caller does not process the same key again, which may or 2435 * may not break the caller, depending on its logic. 2436 */ 2437 if (path->slots[0] < btrfs_header_nritems(path->nodes[0])) { 2438 btrfs_item_key(path->nodes[0], &found_key, path->slots[0]); 2439 ret = btrfs_comp_keys(&found_key, &orig_key); 2440 if (ret == 0) { 2441 if (path->slots[0] > 0) { 2442 path->slots[0]--; 2443 return 0; 2444 } 2445 /* 2446 * At slot 0, same key as before, it means orig_key is 2447 * the lowest, leftmost, key in the tree. We're done. 2448 */ 2449 return 1; 2450 } 2451 } 2452 2453 btrfs_item_key(path->nodes[0], &found_key, 0); 2454 ret = btrfs_comp_keys(&found_key, &key); 2455 /* 2456 * We might have had an item with the previous key in the tree right 2457 * before we released our path. And after we released our path, that 2458 * item might have been pushed to the first slot (0) of the leaf we 2459 * were holding due to a tree balance. Alternatively, an item with the 2460 * previous key can exist as the only element of a leaf (big fat item). 2461 * Therefore account for these 2 cases, so that our callers (like 2462 * btrfs_previous_item) don't miss an existing item with a key matching 2463 * the previous key we computed above. 2464 */ 2465 if (ret <= 0) 2466 return 0; 2467 return 1; 2468 } 2469 2470 /* 2471 * helper to use instead of search slot if no exact match is needed but 2472 * instead the next or previous item should be returned. 2473 * When find_higher is true, the next higher item is returned, the next lower 2474 * otherwise. 2475 * When return_any and find_higher are both true, and no higher item is found, 2476 * return the next lower instead. 2477 * When return_any is true and find_higher is false, and no lower item is found, 2478 * return the next higher instead. 2479 * It returns 0 if any item is found, 1 if none is found (tree empty), and 2480 * < 0 on error 2481 */ 2482 int btrfs_search_slot_for_read(struct btrfs_root *root, 2483 const struct btrfs_key *key, 2484 struct btrfs_path *p, int find_higher, 2485 int return_any) 2486 { 2487 int ret; 2488 struct extent_buffer *leaf; 2489 2490 again: 2491 ret = btrfs_search_slot(NULL, root, key, p, 0, 0); 2492 if (ret <= 0) 2493 return ret; 2494 /* 2495 * a return value of 1 means the path is at the position where the 2496 * item should be inserted. Normally this is the next bigger item, 2497 * but in case the previous item is the last in a leaf, path points 2498 * to the first free slot in the previous leaf, i.e. at an invalid 2499 * item. 2500 */ 2501 leaf = p->nodes[0]; 2502 2503 if (find_higher) { 2504 if (p->slots[0] >= btrfs_header_nritems(leaf)) { 2505 ret = btrfs_next_leaf(root, p); 2506 if (ret <= 0) 2507 return ret; 2508 if (!return_any) 2509 return 1; 2510 /* 2511 * no higher item found, return the next 2512 * lower instead 2513 */ 2514 return_any = 0; 2515 find_higher = 0; 2516 btrfs_release_path(p); 2517 goto again; 2518 } 2519 } else { 2520 if (p->slots[0] == 0) { 2521 ret = btrfs_prev_leaf(root, p); 2522 if (ret < 0) 2523 return ret; 2524 if (!ret) { 2525 leaf = p->nodes[0]; 2526 if (p->slots[0] == btrfs_header_nritems(leaf)) 2527 p->slots[0]--; 2528 return 0; 2529 } 2530 if (!return_any) 2531 return 1; 2532 /* 2533 * no lower item found, return the next 2534 * higher instead 2535 */ 2536 return_any = 0; 2537 find_higher = 1; 2538 btrfs_release_path(p); 2539 goto again; 2540 } else { 2541 --p->slots[0]; 2542 } 2543 } 2544 return 0; 2545 } 2546 2547 /* 2548 * Execute search and call btrfs_previous_item to traverse backwards if the item 2549 * was not found. 2550 * 2551 * Return 0 if found, 1 if not found and < 0 if error. 2552 */ 2553 int btrfs_search_backwards(struct btrfs_root *root, struct btrfs_key *key, 2554 struct btrfs_path *path) 2555 { 2556 int ret; 2557 2558 ret = btrfs_search_slot(NULL, root, key, path, 0, 0); 2559 if (ret > 0) 2560 ret = btrfs_previous_item(root, path, key->objectid, key->type); 2561 2562 if (ret == 0) 2563 btrfs_item_key_to_cpu(path->nodes[0], key, path->slots[0]); 2564 2565 return ret; 2566 } 2567 2568 /* 2569 * Search for a valid slot for the given path. 2570 * 2571 * @root: The root node of the tree. 2572 * @key: Will contain a valid item if found. 2573 * @path: The starting point to validate the slot. 2574 * 2575 * Return: 0 if the item is valid 2576 * 1 if not found 2577 * <0 if error. 2578 */ 2579 int btrfs_get_next_valid_item(struct btrfs_root *root, struct btrfs_key *key, 2580 struct btrfs_path *path) 2581 { 2582 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) { 2583 int ret; 2584 2585 ret = btrfs_next_leaf(root, path); 2586 if (ret) 2587 return ret; 2588 } 2589 2590 btrfs_item_key_to_cpu(path->nodes[0], key, path->slots[0]); 2591 return 0; 2592 } 2593 2594 /* 2595 * adjust the pointers going up the tree, starting at level 2596 * making sure the right key of each node is points to 'key'. 2597 * This is used after shifting pointers to the left, so it stops 2598 * fixing up pointers when a given leaf/node is not in slot 0 of the 2599 * higher levels 2600 * 2601 */ 2602 static void fixup_low_keys(struct btrfs_trans_handle *trans, 2603 const struct btrfs_path *path, 2604 const struct btrfs_disk_key *key, int level) 2605 { 2606 int i; 2607 struct extent_buffer *t; 2608 int ret; 2609 2610 for (i = level; i < BTRFS_MAX_LEVEL; i++) { 2611 int tslot = path->slots[i]; 2612 2613 if (!path->nodes[i]) 2614 break; 2615 t = path->nodes[i]; 2616 ret = btrfs_tree_mod_log_insert_key(t, tslot, 2617 BTRFS_MOD_LOG_KEY_REPLACE); 2618 BUG_ON(ret < 0); 2619 btrfs_set_node_key(t, key, tslot); 2620 btrfs_mark_buffer_dirty(trans, path->nodes[i]); 2621 if (tslot != 0) 2622 break; 2623 } 2624 } 2625 2626 /* 2627 * update item key. 2628 * 2629 * This function isn't completely safe. It's the caller's responsibility 2630 * that the new key won't break the order 2631 */ 2632 void btrfs_set_item_key_safe(struct btrfs_trans_handle *trans, 2633 const struct btrfs_path *path, 2634 const struct btrfs_key *new_key) 2635 { 2636 struct btrfs_fs_info *fs_info = trans->fs_info; 2637 struct btrfs_disk_key disk_key; 2638 struct extent_buffer *eb; 2639 int slot; 2640 2641 eb = path->nodes[0]; 2642 slot = path->slots[0]; 2643 if (slot > 0) { 2644 btrfs_item_key(eb, &disk_key, slot - 1); 2645 if (unlikely(btrfs_comp_keys(&disk_key, new_key) >= 0)) { 2646 btrfs_print_leaf(eb); 2647 btrfs_crit(fs_info, 2648 "slot %u key (%llu %u %llu) new key (%llu %u %llu)", 2649 slot, btrfs_disk_key_objectid(&disk_key), 2650 btrfs_disk_key_type(&disk_key), 2651 btrfs_disk_key_offset(&disk_key), 2652 new_key->objectid, new_key->type, 2653 new_key->offset); 2654 BUG(); 2655 } 2656 } 2657 if (slot < btrfs_header_nritems(eb) - 1) { 2658 btrfs_item_key(eb, &disk_key, slot + 1); 2659 if (unlikely(btrfs_comp_keys(&disk_key, new_key) <= 0)) { 2660 btrfs_print_leaf(eb); 2661 btrfs_crit(fs_info, 2662 "slot %u key (%llu %u %llu) new key (%llu %u %llu)", 2663 slot, btrfs_disk_key_objectid(&disk_key), 2664 btrfs_disk_key_type(&disk_key), 2665 btrfs_disk_key_offset(&disk_key), 2666 new_key->objectid, new_key->type, 2667 new_key->offset); 2668 BUG(); 2669 } 2670 } 2671 2672 btrfs_cpu_key_to_disk(&disk_key, new_key); 2673 btrfs_set_item_key(eb, &disk_key, slot); 2674 btrfs_mark_buffer_dirty(trans, eb); 2675 if (slot == 0) 2676 fixup_low_keys(trans, path, &disk_key, 1); 2677 } 2678 2679 /* 2680 * Check key order of two sibling extent buffers. 2681 * 2682 * Return true if something is wrong. 2683 * Return false if everything is fine. 2684 * 2685 * Tree-checker only works inside one tree block, thus the following 2686 * corruption can not be detected by tree-checker: 2687 * 2688 * Leaf @left | Leaf @right 2689 * -------------------------------------------------------------- 2690 * | 1 | 2 | 3 | 4 | 5 | f6 | | 7 | 8 | 2691 * 2692 * Key f6 in leaf @left itself is valid, but not valid when the next 2693 * key in leaf @right is 7. 2694 * This can only be checked at tree block merge time. 2695 * And since tree checker has ensured all key order in each tree block 2696 * is correct, we only need to bother the last key of @left and the first 2697 * key of @right. 2698 */ 2699 static bool check_sibling_keys(const struct extent_buffer *left, 2700 const struct extent_buffer *right) 2701 { 2702 struct btrfs_key left_last; 2703 struct btrfs_key right_first; 2704 int level = btrfs_header_level(left); 2705 int nr_left = btrfs_header_nritems(left); 2706 int nr_right = btrfs_header_nritems(right); 2707 2708 /* No key to check in one of the tree blocks */ 2709 if (!nr_left || !nr_right) 2710 return false; 2711 2712 if (level) { 2713 btrfs_node_key_to_cpu(left, &left_last, nr_left - 1); 2714 btrfs_node_key_to_cpu(right, &right_first, 0); 2715 } else { 2716 btrfs_item_key_to_cpu(left, &left_last, nr_left - 1); 2717 btrfs_item_key_to_cpu(right, &right_first, 0); 2718 } 2719 2720 if (unlikely(btrfs_comp_cpu_keys(&left_last, &right_first) >= 0)) { 2721 btrfs_crit(left->fs_info, "left extent buffer:"); 2722 btrfs_print_tree(left, false); 2723 btrfs_crit(left->fs_info, "right extent buffer:"); 2724 btrfs_print_tree(right, false); 2725 btrfs_crit(left->fs_info, 2726 "bad key order, sibling blocks, left last (%llu %u %llu) right first (%llu %u %llu)", 2727 left_last.objectid, left_last.type, 2728 left_last.offset, right_first.objectid, 2729 right_first.type, right_first.offset); 2730 return true; 2731 } 2732 return false; 2733 } 2734 2735 /* 2736 * try to push data from one node into the next node left in the 2737 * tree. 2738 * 2739 * returns 0 if some ptrs were pushed left, < 0 if there was some horrible 2740 * error, and > 0 if there was no room in the left hand block. 2741 */ 2742 static int push_node_left(struct btrfs_trans_handle *trans, 2743 struct extent_buffer *dst, 2744 struct extent_buffer *src, int empty) 2745 { 2746 struct btrfs_fs_info *fs_info = trans->fs_info; 2747 int push_items = 0; 2748 int src_nritems; 2749 int dst_nritems; 2750 int ret = 0; 2751 2752 src_nritems = btrfs_header_nritems(src); 2753 dst_nritems = btrfs_header_nritems(dst); 2754 push_items = BTRFS_NODEPTRS_PER_BLOCK(fs_info) - dst_nritems; 2755 WARN_ON(btrfs_header_generation(src) != trans->transid); 2756 WARN_ON(btrfs_header_generation(dst) != trans->transid); 2757 2758 if (!empty && src_nritems <= 8) 2759 return 1; 2760 2761 if (push_items <= 0) 2762 return 1; 2763 2764 if (empty) { 2765 push_items = min(src_nritems, push_items); 2766 if (push_items < src_nritems) { 2767 /* leave at least 8 pointers in the node if 2768 * we aren't going to empty it 2769 */ 2770 if (src_nritems - push_items < 8) { 2771 if (push_items <= 8) 2772 return 1; 2773 push_items -= 8; 2774 } 2775 } 2776 } else 2777 push_items = min(src_nritems - 8, push_items); 2778 2779 /* dst is the left eb, src is the middle eb */ 2780 if (check_sibling_keys(dst, src)) { 2781 ret = -EUCLEAN; 2782 btrfs_abort_transaction(trans, ret); 2783 return ret; 2784 } 2785 ret = btrfs_tree_mod_log_eb_copy(dst, src, dst_nritems, 0, push_items); 2786 if (ret) { 2787 btrfs_abort_transaction(trans, ret); 2788 return ret; 2789 } 2790 copy_extent_buffer(dst, src, 2791 btrfs_node_key_ptr_offset(dst, dst_nritems), 2792 btrfs_node_key_ptr_offset(src, 0), 2793 push_items * sizeof(struct btrfs_key_ptr)); 2794 2795 if (push_items < src_nritems) { 2796 /* 2797 * btrfs_tree_mod_log_eb_copy handles logging the move, so we 2798 * don't need to do an explicit tree mod log operation for it. 2799 */ 2800 memmove_extent_buffer(src, btrfs_node_key_ptr_offset(src, 0), 2801 btrfs_node_key_ptr_offset(src, push_items), 2802 (src_nritems - push_items) * 2803 sizeof(struct btrfs_key_ptr)); 2804 } 2805 btrfs_set_header_nritems(src, src_nritems - push_items); 2806 btrfs_set_header_nritems(dst, dst_nritems + push_items); 2807 btrfs_mark_buffer_dirty(trans, src); 2808 btrfs_mark_buffer_dirty(trans, dst); 2809 2810 return ret; 2811 } 2812 2813 /* 2814 * try to push data from one node into the next node right in the 2815 * tree. 2816 * 2817 * returns 0 if some ptrs were pushed, < 0 if there was some horrible 2818 * error, and > 0 if there was no room in the right hand block. 2819 * 2820 * this will only push up to 1/2 the contents of the left node over 2821 */ 2822 static int balance_node_right(struct btrfs_trans_handle *trans, 2823 struct extent_buffer *dst, 2824 struct extent_buffer *src) 2825 { 2826 struct btrfs_fs_info *fs_info = trans->fs_info; 2827 int push_items = 0; 2828 int max_push; 2829 int src_nritems; 2830 int dst_nritems; 2831 int ret = 0; 2832 2833 WARN_ON(btrfs_header_generation(src) != trans->transid); 2834 WARN_ON(btrfs_header_generation(dst) != trans->transid); 2835 2836 src_nritems = btrfs_header_nritems(src); 2837 dst_nritems = btrfs_header_nritems(dst); 2838 push_items = BTRFS_NODEPTRS_PER_BLOCK(fs_info) - dst_nritems; 2839 if (push_items <= 0) 2840 return 1; 2841 2842 if (src_nritems < 4) 2843 return 1; 2844 2845 max_push = src_nritems / 2 + 1; 2846 /* don't try to empty the node */ 2847 if (max_push >= src_nritems) 2848 return 1; 2849 2850 if (max_push < push_items) 2851 push_items = max_push; 2852 2853 /* dst is the right eb, src is the middle eb */ 2854 if (check_sibling_keys(src, dst)) { 2855 ret = -EUCLEAN; 2856 btrfs_abort_transaction(trans, ret); 2857 return ret; 2858 } 2859 2860 /* 2861 * btrfs_tree_mod_log_eb_copy handles logging the move, so we don't 2862 * need to do an explicit tree mod log operation for it. 2863 */ 2864 memmove_extent_buffer(dst, btrfs_node_key_ptr_offset(dst, push_items), 2865 btrfs_node_key_ptr_offset(dst, 0), 2866 (dst_nritems) * 2867 sizeof(struct btrfs_key_ptr)); 2868 2869 ret = btrfs_tree_mod_log_eb_copy(dst, src, 0, src_nritems - push_items, 2870 push_items); 2871 if (ret) { 2872 btrfs_abort_transaction(trans, ret); 2873 return ret; 2874 } 2875 copy_extent_buffer(dst, src, 2876 btrfs_node_key_ptr_offset(dst, 0), 2877 btrfs_node_key_ptr_offset(src, src_nritems - push_items), 2878 push_items * sizeof(struct btrfs_key_ptr)); 2879 2880 btrfs_set_header_nritems(src, src_nritems - push_items); 2881 btrfs_set_header_nritems(dst, dst_nritems + push_items); 2882 2883 btrfs_mark_buffer_dirty(trans, src); 2884 btrfs_mark_buffer_dirty(trans, dst); 2885 2886 return ret; 2887 } 2888 2889 /* 2890 * helper function to insert a new root level in the tree. 2891 * A new node is allocated, and a single item is inserted to 2892 * point to the existing root 2893 * 2894 * returns zero on success or < 0 on failure. 2895 */ 2896 static noinline int insert_new_root(struct btrfs_trans_handle *trans, 2897 struct btrfs_root *root, 2898 struct btrfs_path *path, int level) 2899 { 2900 u64 lower_gen; 2901 struct extent_buffer *lower; 2902 struct extent_buffer *c; 2903 struct extent_buffer *old; 2904 struct btrfs_disk_key lower_key; 2905 int ret; 2906 2907 BUG_ON(path->nodes[level]); 2908 BUG_ON(path->nodes[level-1] != root->node); 2909 2910 lower = path->nodes[level-1]; 2911 if (level == 1) 2912 btrfs_item_key(lower, &lower_key, 0); 2913 else 2914 btrfs_node_key(lower, &lower_key, 0); 2915 2916 c = btrfs_alloc_tree_block(trans, root, 0, btrfs_root_id(root), 2917 &lower_key, level, root->node->start, 0, 2918 0, BTRFS_NESTING_NEW_ROOT); 2919 if (IS_ERR(c)) 2920 return PTR_ERR(c); 2921 2922 root_add_used_bytes(root); 2923 2924 btrfs_set_header_nritems(c, 1); 2925 btrfs_set_node_key(c, &lower_key, 0); 2926 btrfs_set_node_blockptr(c, 0, lower->start); 2927 lower_gen = btrfs_header_generation(lower); 2928 WARN_ON(lower_gen != trans->transid); 2929 2930 btrfs_set_node_ptr_generation(c, 0, lower_gen); 2931 2932 btrfs_mark_buffer_dirty(trans, c); 2933 2934 old = root->node; 2935 ret = btrfs_tree_mod_log_insert_root(root->node, c, false); 2936 if (ret < 0) { 2937 int ret2; 2938 2939 ret2 = btrfs_free_tree_block(trans, btrfs_root_id(root), c, 0, 1); 2940 if (ret2 < 0) 2941 btrfs_abort_transaction(trans, ret2); 2942 btrfs_tree_unlock(c); 2943 free_extent_buffer(c); 2944 return ret; 2945 } 2946 rcu_assign_pointer(root->node, c); 2947 2948 /* the super has an extra ref to root->node */ 2949 free_extent_buffer(old); 2950 2951 add_root_to_dirty_list(root); 2952 atomic_inc(&c->refs); 2953 path->nodes[level] = c; 2954 path->locks[level] = BTRFS_WRITE_LOCK; 2955 path->slots[level] = 0; 2956 return 0; 2957 } 2958 2959 /* 2960 * worker function to insert a single pointer in a node. 2961 * the node should have enough room for the pointer already 2962 * 2963 * slot and level indicate where you want the key to go, and 2964 * blocknr is the block the key points to. 2965 */ 2966 static int insert_ptr(struct btrfs_trans_handle *trans, 2967 const struct btrfs_path *path, 2968 const struct btrfs_disk_key *key, u64 bytenr, 2969 int slot, int level) 2970 { 2971 struct extent_buffer *lower; 2972 int nritems; 2973 int ret; 2974 2975 BUG_ON(!path->nodes[level]); 2976 btrfs_assert_tree_write_locked(path->nodes[level]); 2977 lower = path->nodes[level]; 2978 nritems = btrfs_header_nritems(lower); 2979 BUG_ON(slot > nritems); 2980 BUG_ON(nritems == BTRFS_NODEPTRS_PER_BLOCK(trans->fs_info)); 2981 if (slot != nritems) { 2982 if (level) { 2983 ret = btrfs_tree_mod_log_insert_move(lower, slot + 1, 2984 slot, nritems - slot); 2985 if (ret < 0) { 2986 btrfs_abort_transaction(trans, ret); 2987 return ret; 2988 } 2989 } 2990 memmove_extent_buffer(lower, 2991 btrfs_node_key_ptr_offset(lower, slot + 1), 2992 btrfs_node_key_ptr_offset(lower, slot), 2993 (nritems - slot) * sizeof(struct btrfs_key_ptr)); 2994 } 2995 if (level) { 2996 ret = btrfs_tree_mod_log_insert_key(lower, slot, 2997 BTRFS_MOD_LOG_KEY_ADD); 2998 if (ret < 0) { 2999 btrfs_abort_transaction(trans, ret); 3000 return ret; 3001 } 3002 } 3003 btrfs_set_node_key(lower, key, slot); 3004 btrfs_set_node_blockptr(lower, slot, bytenr); 3005 WARN_ON(trans->transid == 0); 3006 btrfs_set_node_ptr_generation(lower, slot, trans->transid); 3007 btrfs_set_header_nritems(lower, nritems + 1); 3008 btrfs_mark_buffer_dirty(trans, lower); 3009 3010 return 0; 3011 } 3012 3013 /* 3014 * split the node at the specified level in path in two. 3015 * The path is corrected to point to the appropriate node after the split 3016 * 3017 * Before splitting this tries to make some room in the node by pushing 3018 * left and right, if either one works, it returns right away. 3019 * 3020 * returns 0 on success and < 0 on failure 3021 */ 3022 static noinline int split_node(struct btrfs_trans_handle *trans, 3023 struct btrfs_root *root, 3024 struct btrfs_path *path, int level) 3025 { 3026 struct btrfs_fs_info *fs_info = root->fs_info; 3027 struct extent_buffer *c; 3028 struct extent_buffer *split; 3029 struct btrfs_disk_key disk_key; 3030 int mid; 3031 int ret; 3032 u32 c_nritems; 3033 3034 c = path->nodes[level]; 3035 WARN_ON(btrfs_header_generation(c) != trans->transid); 3036 if (c == root->node) { 3037 /* 3038 * trying to split the root, lets make a new one 3039 * 3040 * tree mod log: We don't log_removal old root in 3041 * insert_new_root, because that root buffer will be kept as a 3042 * normal node. We are going to log removal of half of the 3043 * elements below with btrfs_tree_mod_log_eb_copy(). We're 3044 * holding a tree lock on the buffer, which is why we cannot 3045 * race with other tree_mod_log users. 3046 */ 3047 ret = insert_new_root(trans, root, path, level + 1); 3048 if (ret) 3049 return ret; 3050 } else { 3051 ret = push_nodes_for_insert(trans, root, path, level); 3052 c = path->nodes[level]; 3053 if (!ret && btrfs_header_nritems(c) < 3054 BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 3) 3055 return 0; 3056 if (ret < 0) 3057 return ret; 3058 } 3059 3060 c_nritems = btrfs_header_nritems(c); 3061 mid = (c_nritems + 1) / 2; 3062 btrfs_node_key(c, &disk_key, mid); 3063 3064 split = btrfs_alloc_tree_block(trans, root, 0, btrfs_root_id(root), 3065 &disk_key, level, c->start, 0, 3066 0, BTRFS_NESTING_SPLIT); 3067 if (IS_ERR(split)) 3068 return PTR_ERR(split); 3069 3070 root_add_used_bytes(root); 3071 ASSERT(btrfs_header_level(c) == level); 3072 3073 ret = btrfs_tree_mod_log_eb_copy(split, c, 0, mid, c_nritems - mid); 3074 if (ret) { 3075 btrfs_tree_unlock(split); 3076 free_extent_buffer(split); 3077 btrfs_abort_transaction(trans, ret); 3078 return ret; 3079 } 3080 copy_extent_buffer(split, c, 3081 btrfs_node_key_ptr_offset(split, 0), 3082 btrfs_node_key_ptr_offset(c, mid), 3083 (c_nritems - mid) * sizeof(struct btrfs_key_ptr)); 3084 btrfs_set_header_nritems(split, c_nritems - mid); 3085 btrfs_set_header_nritems(c, mid); 3086 3087 btrfs_mark_buffer_dirty(trans, c); 3088 btrfs_mark_buffer_dirty(trans, split); 3089 3090 ret = insert_ptr(trans, path, &disk_key, split->start, 3091 path->slots[level + 1] + 1, level + 1); 3092 if (ret < 0) { 3093 btrfs_tree_unlock(split); 3094 free_extent_buffer(split); 3095 return ret; 3096 } 3097 3098 if (path->slots[level] >= mid) { 3099 path->slots[level] -= mid; 3100 btrfs_tree_unlock(c); 3101 free_extent_buffer(c); 3102 path->nodes[level] = split; 3103 path->slots[level + 1] += 1; 3104 } else { 3105 btrfs_tree_unlock(split); 3106 free_extent_buffer(split); 3107 } 3108 return 0; 3109 } 3110 3111 /* 3112 * how many bytes are required to store the items in a leaf. start 3113 * and nr indicate which items in the leaf to check. This totals up the 3114 * space used both by the item structs and the item data 3115 */ 3116 static int leaf_space_used(const struct extent_buffer *l, int start, int nr) 3117 { 3118 int data_len; 3119 int nritems = btrfs_header_nritems(l); 3120 int end = min(nritems, start + nr) - 1; 3121 3122 if (!nr) 3123 return 0; 3124 data_len = btrfs_item_offset(l, start) + btrfs_item_size(l, start); 3125 data_len = data_len - btrfs_item_offset(l, end); 3126 data_len += sizeof(struct btrfs_item) * nr; 3127 WARN_ON(data_len < 0); 3128 return data_len; 3129 } 3130 3131 /* 3132 * The space between the end of the leaf items and 3133 * the start of the leaf data. IOW, how much room 3134 * the leaf has left for both items and data 3135 */ 3136 int btrfs_leaf_free_space(const struct extent_buffer *leaf) 3137 { 3138 struct btrfs_fs_info *fs_info = leaf->fs_info; 3139 int nritems = btrfs_header_nritems(leaf); 3140 int ret; 3141 3142 ret = BTRFS_LEAF_DATA_SIZE(fs_info) - leaf_space_used(leaf, 0, nritems); 3143 if (ret < 0) { 3144 btrfs_crit(fs_info, 3145 "leaf free space ret %d, leaf data size %lu, used %d nritems %d", 3146 ret, 3147 (unsigned long) BTRFS_LEAF_DATA_SIZE(fs_info), 3148 leaf_space_used(leaf, 0, nritems), nritems); 3149 } 3150 return ret; 3151 } 3152 3153 /* 3154 * min slot controls the lowest index we're willing to push to the 3155 * right. We'll push up to and including min_slot, but no lower 3156 */ 3157 static noinline int __push_leaf_right(struct btrfs_trans_handle *trans, 3158 struct btrfs_path *path, 3159 int data_size, int empty, 3160 struct extent_buffer *right, 3161 int free_space, u32 left_nritems, 3162 u32 min_slot) 3163 { 3164 struct btrfs_fs_info *fs_info = right->fs_info; 3165 struct extent_buffer *left = path->nodes[0]; 3166 struct extent_buffer *upper = path->nodes[1]; 3167 struct btrfs_map_token token; 3168 struct btrfs_disk_key disk_key; 3169 int slot; 3170 u32 i; 3171 int push_space = 0; 3172 int push_items = 0; 3173 u32 nr; 3174 u32 right_nritems; 3175 u32 data_end; 3176 u32 this_item_size; 3177 3178 if (empty) 3179 nr = 0; 3180 else 3181 nr = max_t(u32, 1, min_slot); 3182 3183 if (path->slots[0] >= left_nritems) 3184 push_space += data_size; 3185 3186 slot = path->slots[1]; 3187 i = left_nritems - 1; 3188 while (i >= nr) { 3189 if (!empty && push_items > 0) { 3190 if (path->slots[0] > i) 3191 break; 3192 if (path->slots[0] == i) { 3193 int space = btrfs_leaf_free_space(left); 3194 3195 if (space + push_space * 2 > free_space) 3196 break; 3197 } 3198 } 3199 3200 if (path->slots[0] == i) 3201 push_space += data_size; 3202 3203 this_item_size = btrfs_item_size(left, i); 3204 if (this_item_size + sizeof(struct btrfs_item) + 3205 push_space > free_space) 3206 break; 3207 3208 push_items++; 3209 push_space += this_item_size + sizeof(struct btrfs_item); 3210 if (i == 0) 3211 break; 3212 i--; 3213 } 3214 3215 if (push_items == 0) 3216 goto out_unlock; 3217 3218 WARN_ON(!empty && push_items == left_nritems); 3219 3220 /* push left to right */ 3221 right_nritems = btrfs_header_nritems(right); 3222 3223 push_space = btrfs_item_data_end(left, left_nritems - push_items); 3224 push_space -= leaf_data_end(left); 3225 3226 /* make room in the right data area */ 3227 data_end = leaf_data_end(right); 3228 memmove_leaf_data(right, data_end - push_space, data_end, 3229 BTRFS_LEAF_DATA_SIZE(fs_info) - data_end); 3230 3231 /* copy from the left data area */ 3232 copy_leaf_data(right, left, BTRFS_LEAF_DATA_SIZE(fs_info) - push_space, 3233 leaf_data_end(left), push_space); 3234 3235 memmove_leaf_items(right, push_items, 0, right_nritems); 3236 3237 /* copy the items from left to right */ 3238 copy_leaf_items(right, left, 0, left_nritems - push_items, push_items); 3239 3240 /* update the item pointers */ 3241 btrfs_init_map_token(&token, right); 3242 right_nritems += push_items; 3243 btrfs_set_header_nritems(right, right_nritems); 3244 push_space = BTRFS_LEAF_DATA_SIZE(fs_info); 3245 for (i = 0; i < right_nritems; i++) { 3246 push_space -= btrfs_token_item_size(&token, i); 3247 btrfs_set_token_item_offset(&token, i, push_space); 3248 } 3249 3250 left_nritems -= push_items; 3251 btrfs_set_header_nritems(left, left_nritems); 3252 3253 if (left_nritems) 3254 btrfs_mark_buffer_dirty(trans, left); 3255 else 3256 btrfs_clear_buffer_dirty(trans, left); 3257 3258 btrfs_mark_buffer_dirty(trans, right); 3259 3260 btrfs_item_key(right, &disk_key, 0); 3261 btrfs_set_node_key(upper, &disk_key, slot + 1); 3262 btrfs_mark_buffer_dirty(trans, upper); 3263 3264 /* then fixup the leaf pointer in the path */ 3265 if (path->slots[0] >= left_nritems) { 3266 path->slots[0] -= left_nritems; 3267 if (btrfs_header_nritems(path->nodes[0]) == 0) 3268 btrfs_clear_buffer_dirty(trans, path->nodes[0]); 3269 btrfs_tree_unlock(path->nodes[0]); 3270 free_extent_buffer(path->nodes[0]); 3271 path->nodes[0] = right; 3272 path->slots[1] += 1; 3273 } else { 3274 btrfs_tree_unlock(right); 3275 free_extent_buffer(right); 3276 } 3277 return 0; 3278 3279 out_unlock: 3280 btrfs_tree_unlock(right); 3281 free_extent_buffer(right); 3282 return 1; 3283 } 3284 3285 /* 3286 * push some data in the path leaf to the right, trying to free up at 3287 * least data_size bytes. returns zero if the push worked, nonzero otherwise 3288 * 3289 * returns 1 if the push failed because the other node didn't have enough 3290 * room, 0 if everything worked out and < 0 if there were major errors. 3291 * 3292 * this will push starting from min_slot to the end of the leaf. It won't 3293 * push any slot lower than min_slot 3294 */ 3295 static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root 3296 *root, struct btrfs_path *path, 3297 int min_data_size, int data_size, 3298 int empty, u32 min_slot) 3299 { 3300 struct extent_buffer *left = path->nodes[0]; 3301 struct extent_buffer *right; 3302 struct extent_buffer *upper; 3303 int slot; 3304 int free_space; 3305 u32 left_nritems; 3306 int ret; 3307 3308 if (!path->nodes[1]) 3309 return 1; 3310 3311 slot = path->slots[1]; 3312 upper = path->nodes[1]; 3313 if (slot >= btrfs_header_nritems(upper) - 1) 3314 return 1; 3315 3316 btrfs_assert_tree_write_locked(path->nodes[1]); 3317 3318 right = btrfs_read_node_slot(upper, slot + 1); 3319 if (IS_ERR(right)) 3320 return PTR_ERR(right); 3321 3322 btrfs_tree_lock_nested(right, BTRFS_NESTING_RIGHT); 3323 3324 free_space = btrfs_leaf_free_space(right); 3325 if (free_space < data_size) 3326 goto out_unlock; 3327 3328 ret = btrfs_cow_block(trans, root, right, upper, 3329 slot + 1, &right, BTRFS_NESTING_RIGHT_COW); 3330 if (ret) 3331 goto out_unlock; 3332 3333 left_nritems = btrfs_header_nritems(left); 3334 if (left_nritems == 0) 3335 goto out_unlock; 3336 3337 if (check_sibling_keys(left, right)) { 3338 ret = -EUCLEAN; 3339 btrfs_abort_transaction(trans, ret); 3340 btrfs_tree_unlock(right); 3341 free_extent_buffer(right); 3342 return ret; 3343 } 3344 if (path->slots[0] == left_nritems && !empty) { 3345 /* Key greater than all keys in the leaf, right neighbor has 3346 * enough room for it and we're not emptying our leaf to delete 3347 * it, therefore use right neighbor to insert the new item and 3348 * no need to touch/dirty our left leaf. */ 3349 btrfs_tree_unlock(left); 3350 free_extent_buffer(left); 3351 path->nodes[0] = right; 3352 path->slots[0] = 0; 3353 path->slots[1]++; 3354 return 0; 3355 } 3356 3357 return __push_leaf_right(trans, path, min_data_size, empty, right, 3358 free_space, left_nritems, min_slot); 3359 out_unlock: 3360 btrfs_tree_unlock(right); 3361 free_extent_buffer(right); 3362 return 1; 3363 } 3364 3365 /* 3366 * push some data in the path leaf to the left, trying to free up at 3367 * least data_size bytes. returns zero if the push worked, nonzero otherwise 3368 * 3369 * max_slot can put a limit on how far into the leaf we'll push items. The 3370 * item at 'max_slot' won't be touched. Use (u32)-1 to make us do all the 3371 * items 3372 */ 3373 static noinline int __push_leaf_left(struct btrfs_trans_handle *trans, 3374 struct btrfs_path *path, int data_size, 3375 int empty, struct extent_buffer *left, 3376 int free_space, u32 right_nritems, 3377 u32 max_slot) 3378 { 3379 struct btrfs_fs_info *fs_info = left->fs_info; 3380 struct btrfs_disk_key disk_key; 3381 struct extent_buffer *right = path->nodes[0]; 3382 int i; 3383 int push_space = 0; 3384 int push_items = 0; 3385 u32 old_left_nritems; 3386 u32 nr; 3387 int ret = 0; 3388 u32 this_item_size; 3389 u32 old_left_item_size; 3390 struct btrfs_map_token token; 3391 3392 if (empty) 3393 nr = min(right_nritems, max_slot); 3394 else 3395 nr = min(right_nritems - 1, max_slot); 3396 3397 for (i = 0; i < nr; i++) { 3398 if (!empty && push_items > 0) { 3399 if (path->slots[0] < i) 3400 break; 3401 if (path->slots[0] == i) { 3402 int space = btrfs_leaf_free_space(right); 3403 3404 if (space + push_space * 2 > free_space) 3405 break; 3406 } 3407 } 3408 3409 if (path->slots[0] == i) 3410 push_space += data_size; 3411 3412 this_item_size = btrfs_item_size(right, i); 3413 if (this_item_size + sizeof(struct btrfs_item) + push_space > 3414 free_space) 3415 break; 3416 3417 push_items++; 3418 push_space += this_item_size + sizeof(struct btrfs_item); 3419 } 3420 3421 if (push_items == 0) { 3422 ret = 1; 3423 goto out; 3424 } 3425 WARN_ON(!empty && push_items == btrfs_header_nritems(right)); 3426 3427 /* push data from right to left */ 3428 copy_leaf_items(left, right, btrfs_header_nritems(left), 0, push_items); 3429 3430 push_space = BTRFS_LEAF_DATA_SIZE(fs_info) - 3431 btrfs_item_offset(right, push_items - 1); 3432 3433 copy_leaf_data(left, right, leaf_data_end(left) - push_space, 3434 btrfs_item_offset(right, push_items - 1), push_space); 3435 old_left_nritems = btrfs_header_nritems(left); 3436 BUG_ON(old_left_nritems <= 0); 3437 3438 btrfs_init_map_token(&token, left); 3439 old_left_item_size = btrfs_item_offset(left, old_left_nritems - 1); 3440 for (i = old_left_nritems; i < old_left_nritems + push_items; i++) { 3441 u32 ioff; 3442 3443 ioff = btrfs_token_item_offset(&token, i); 3444 btrfs_set_token_item_offset(&token, i, 3445 ioff - (BTRFS_LEAF_DATA_SIZE(fs_info) - old_left_item_size)); 3446 } 3447 btrfs_set_header_nritems(left, old_left_nritems + push_items); 3448 3449 /* fixup right node */ 3450 if (push_items > right_nritems) 3451 WARN(1, KERN_CRIT "push items %d nr %u\n", push_items, 3452 right_nritems); 3453 3454 if (push_items < right_nritems) { 3455 push_space = btrfs_item_offset(right, push_items - 1) - 3456 leaf_data_end(right); 3457 memmove_leaf_data(right, 3458 BTRFS_LEAF_DATA_SIZE(fs_info) - push_space, 3459 leaf_data_end(right), push_space); 3460 3461 memmove_leaf_items(right, 0, push_items, 3462 btrfs_header_nritems(right) - push_items); 3463 } 3464 3465 btrfs_init_map_token(&token, right); 3466 right_nritems -= push_items; 3467 btrfs_set_header_nritems(right, right_nritems); 3468 push_space = BTRFS_LEAF_DATA_SIZE(fs_info); 3469 for (i = 0; i < right_nritems; i++) { 3470 push_space = push_space - btrfs_token_item_size(&token, i); 3471 btrfs_set_token_item_offset(&token, i, push_space); 3472 } 3473 3474 btrfs_mark_buffer_dirty(trans, left); 3475 if (right_nritems) 3476 btrfs_mark_buffer_dirty(trans, right); 3477 else 3478 btrfs_clear_buffer_dirty(trans, right); 3479 3480 btrfs_item_key(right, &disk_key, 0); 3481 fixup_low_keys(trans, path, &disk_key, 1); 3482 3483 /* then fixup the leaf pointer in the path */ 3484 if (path->slots[0] < push_items) { 3485 path->slots[0] += old_left_nritems; 3486 btrfs_tree_unlock(path->nodes[0]); 3487 free_extent_buffer(path->nodes[0]); 3488 path->nodes[0] = left; 3489 path->slots[1] -= 1; 3490 } else { 3491 btrfs_tree_unlock(left); 3492 free_extent_buffer(left); 3493 path->slots[0] -= push_items; 3494 } 3495 BUG_ON(path->slots[0] < 0); 3496 return ret; 3497 out: 3498 btrfs_tree_unlock(left); 3499 free_extent_buffer(left); 3500 return ret; 3501 } 3502 3503 /* 3504 * push some data in the path leaf to the left, trying to free up at 3505 * least data_size bytes. returns zero if the push worked, nonzero otherwise 3506 * 3507 * max_slot can put a limit on how far into the leaf we'll push items. The 3508 * item at 'max_slot' won't be touched. Use (u32)-1 to make us push all the 3509 * items 3510 */ 3511 static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root 3512 *root, struct btrfs_path *path, int min_data_size, 3513 int data_size, int empty, u32 max_slot) 3514 { 3515 struct extent_buffer *right = path->nodes[0]; 3516 struct extent_buffer *left; 3517 int slot; 3518 int free_space; 3519 u32 right_nritems; 3520 int ret = 0; 3521 3522 slot = path->slots[1]; 3523 if (slot == 0) 3524 return 1; 3525 if (!path->nodes[1]) 3526 return 1; 3527 3528 right_nritems = btrfs_header_nritems(right); 3529 if (right_nritems == 0) 3530 return 1; 3531 3532 btrfs_assert_tree_write_locked(path->nodes[1]); 3533 3534 left = btrfs_read_node_slot(path->nodes[1], slot - 1); 3535 if (IS_ERR(left)) 3536 return PTR_ERR(left); 3537 3538 btrfs_tree_lock_nested(left, BTRFS_NESTING_LEFT); 3539 3540 free_space = btrfs_leaf_free_space(left); 3541 if (free_space < data_size) { 3542 ret = 1; 3543 goto out; 3544 } 3545 3546 ret = btrfs_cow_block(trans, root, left, 3547 path->nodes[1], slot - 1, &left, 3548 BTRFS_NESTING_LEFT_COW); 3549 if (ret) { 3550 /* we hit -ENOSPC, but it isn't fatal here */ 3551 if (ret == -ENOSPC) 3552 ret = 1; 3553 goto out; 3554 } 3555 3556 if (check_sibling_keys(left, right)) { 3557 ret = -EUCLEAN; 3558 btrfs_abort_transaction(trans, ret); 3559 goto out; 3560 } 3561 return __push_leaf_left(trans, path, min_data_size, empty, left, 3562 free_space, right_nritems, max_slot); 3563 out: 3564 btrfs_tree_unlock(left); 3565 free_extent_buffer(left); 3566 return ret; 3567 } 3568 3569 /* 3570 * split the path's leaf in two, making sure there is at least data_size 3571 * available for the resulting leaf level of the path. 3572 */ 3573 static noinline int copy_for_split(struct btrfs_trans_handle *trans, 3574 struct btrfs_path *path, 3575 struct extent_buffer *l, 3576 struct extent_buffer *right, 3577 int slot, int mid, int nritems) 3578 { 3579 struct btrfs_fs_info *fs_info = trans->fs_info; 3580 int data_copy_size; 3581 int rt_data_off; 3582 int i; 3583 int ret; 3584 struct btrfs_disk_key disk_key; 3585 struct btrfs_map_token token; 3586 3587 nritems = nritems - mid; 3588 btrfs_set_header_nritems(right, nritems); 3589 data_copy_size = btrfs_item_data_end(l, mid) - leaf_data_end(l); 3590 3591 copy_leaf_items(right, l, 0, mid, nritems); 3592 3593 copy_leaf_data(right, l, BTRFS_LEAF_DATA_SIZE(fs_info) - data_copy_size, 3594 leaf_data_end(l), data_copy_size); 3595 3596 rt_data_off = BTRFS_LEAF_DATA_SIZE(fs_info) - btrfs_item_data_end(l, mid); 3597 3598 btrfs_init_map_token(&token, right); 3599 for (i = 0; i < nritems; i++) { 3600 u32 ioff; 3601 3602 ioff = btrfs_token_item_offset(&token, i); 3603 btrfs_set_token_item_offset(&token, i, ioff + rt_data_off); 3604 } 3605 3606 btrfs_set_header_nritems(l, mid); 3607 btrfs_item_key(right, &disk_key, 0); 3608 ret = insert_ptr(trans, path, &disk_key, right->start, path->slots[1] + 1, 1); 3609 if (ret < 0) 3610 return ret; 3611 3612 btrfs_mark_buffer_dirty(trans, right); 3613 btrfs_mark_buffer_dirty(trans, l); 3614 BUG_ON(path->slots[0] != slot); 3615 3616 if (mid <= slot) { 3617 btrfs_tree_unlock(path->nodes[0]); 3618 free_extent_buffer(path->nodes[0]); 3619 path->nodes[0] = right; 3620 path->slots[0] -= mid; 3621 path->slots[1] += 1; 3622 } else { 3623 btrfs_tree_unlock(right); 3624 free_extent_buffer(right); 3625 } 3626 3627 BUG_ON(path->slots[0] < 0); 3628 3629 return 0; 3630 } 3631 3632 /* 3633 * double splits happen when we need to insert a big item in the middle 3634 * of a leaf. A double split can leave us with 3 mostly empty leaves: 3635 * leaf: [ slots 0 - N] [ our target ] [ N + 1 - total in leaf ] 3636 * A B C 3637 * 3638 * We avoid this by trying to push the items on either side of our target 3639 * into the adjacent leaves. If all goes well we can avoid the double split 3640 * completely. 3641 */ 3642 static noinline int push_for_double_split(struct btrfs_trans_handle *trans, 3643 struct btrfs_root *root, 3644 struct btrfs_path *path, 3645 int data_size) 3646 { 3647 int ret; 3648 int progress = 0; 3649 int slot; 3650 u32 nritems; 3651 int space_needed = data_size; 3652 3653 slot = path->slots[0]; 3654 if (slot < btrfs_header_nritems(path->nodes[0])) 3655 space_needed -= btrfs_leaf_free_space(path->nodes[0]); 3656 3657 /* 3658 * try to push all the items after our slot into the 3659 * right leaf 3660 */ 3661 ret = push_leaf_right(trans, root, path, 1, space_needed, 0, slot); 3662 if (ret < 0) 3663 return ret; 3664 3665 if (ret == 0) 3666 progress++; 3667 3668 nritems = btrfs_header_nritems(path->nodes[0]); 3669 /* 3670 * our goal is to get our slot at the start or end of a leaf. If 3671 * we've done so we're done 3672 */ 3673 if (path->slots[0] == 0 || path->slots[0] == nritems) 3674 return 0; 3675 3676 if (btrfs_leaf_free_space(path->nodes[0]) >= data_size) 3677 return 0; 3678 3679 /* try to push all the items before our slot into the next leaf */ 3680 slot = path->slots[0]; 3681 space_needed = data_size; 3682 if (slot > 0) 3683 space_needed -= btrfs_leaf_free_space(path->nodes[0]); 3684 ret = push_leaf_left(trans, root, path, 1, space_needed, 0, slot); 3685 if (ret < 0) 3686 return ret; 3687 3688 if (ret == 0) 3689 progress++; 3690 3691 if (progress) 3692 return 0; 3693 return 1; 3694 } 3695 3696 /* 3697 * split the path's leaf in two, making sure there is at least data_size 3698 * available for the resulting leaf level of the path. 3699 * 3700 * returns 0 if all went well and < 0 on failure. 3701 */ 3702 static noinline int split_leaf(struct btrfs_trans_handle *trans, 3703 struct btrfs_root *root, 3704 const struct btrfs_key *ins_key, 3705 struct btrfs_path *path, int data_size, 3706 int extend) 3707 { 3708 struct btrfs_disk_key disk_key; 3709 struct extent_buffer *l; 3710 u32 nritems; 3711 int mid; 3712 int slot; 3713 struct extent_buffer *right; 3714 struct btrfs_fs_info *fs_info = root->fs_info; 3715 int ret = 0; 3716 int wret; 3717 int split; 3718 int num_doubles = 0; 3719 int tried_avoid_double = 0; 3720 3721 l = path->nodes[0]; 3722 slot = path->slots[0]; 3723 if (extend && data_size + btrfs_item_size(l, slot) + 3724 sizeof(struct btrfs_item) > BTRFS_LEAF_DATA_SIZE(fs_info)) 3725 return -EOVERFLOW; 3726 3727 /* first try to make some room by pushing left and right */ 3728 if (data_size && path->nodes[1]) { 3729 int space_needed = data_size; 3730 3731 if (slot < btrfs_header_nritems(l)) 3732 space_needed -= btrfs_leaf_free_space(l); 3733 3734 wret = push_leaf_right(trans, root, path, space_needed, 3735 space_needed, 0, 0); 3736 if (wret < 0) 3737 return wret; 3738 if (wret) { 3739 space_needed = data_size; 3740 if (slot > 0) 3741 space_needed -= btrfs_leaf_free_space(l); 3742 wret = push_leaf_left(trans, root, path, space_needed, 3743 space_needed, 0, (u32)-1); 3744 if (wret < 0) 3745 return wret; 3746 } 3747 l = path->nodes[0]; 3748 3749 /* did the pushes work? */ 3750 if (btrfs_leaf_free_space(l) >= data_size) 3751 return 0; 3752 } 3753 3754 if (!path->nodes[1]) { 3755 ret = insert_new_root(trans, root, path, 1); 3756 if (ret) 3757 return ret; 3758 } 3759 again: 3760 split = 1; 3761 l = path->nodes[0]; 3762 slot = path->slots[0]; 3763 nritems = btrfs_header_nritems(l); 3764 mid = (nritems + 1) / 2; 3765 3766 if (mid <= slot) { 3767 if (nritems == 1 || 3768 leaf_space_used(l, mid, nritems - mid) + data_size > 3769 BTRFS_LEAF_DATA_SIZE(fs_info)) { 3770 if (slot >= nritems) { 3771 split = 0; 3772 } else { 3773 mid = slot; 3774 if (mid != nritems && 3775 leaf_space_used(l, mid, nritems - mid) + 3776 data_size > BTRFS_LEAF_DATA_SIZE(fs_info)) { 3777 if (data_size && !tried_avoid_double) 3778 goto push_for_double; 3779 split = 2; 3780 } 3781 } 3782 } 3783 } else { 3784 if (leaf_space_used(l, 0, mid) + data_size > 3785 BTRFS_LEAF_DATA_SIZE(fs_info)) { 3786 if (!extend && data_size && slot == 0) { 3787 split = 0; 3788 } else if ((extend || !data_size) && slot == 0) { 3789 mid = 1; 3790 } else { 3791 mid = slot; 3792 if (mid != nritems && 3793 leaf_space_used(l, mid, nritems - mid) + 3794 data_size > BTRFS_LEAF_DATA_SIZE(fs_info)) { 3795 if (data_size && !tried_avoid_double) 3796 goto push_for_double; 3797 split = 2; 3798 } 3799 } 3800 } 3801 } 3802 3803 if (split == 0) 3804 btrfs_cpu_key_to_disk(&disk_key, ins_key); 3805 else 3806 btrfs_item_key(l, &disk_key, mid); 3807 3808 /* 3809 * We have to about BTRFS_NESTING_NEW_ROOT here if we've done a double 3810 * split, because we're only allowed to have MAX_LOCKDEP_SUBCLASSES 3811 * subclasses, which is 8 at the time of this patch, and we've maxed it 3812 * out. In the future we could add a 3813 * BTRFS_NESTING_SPLIT_THE_SPLITTENING if we need to, but for now just 3814 * use BTRFS_NESTING_NEW_ROOT. 3815 */ 3816 right = btrfs_alloc_tree_block(trans, root, 0, btrfs_root_id(root), 3817 &disk_key, 0, l->start, 0, 0, 3818 num_doubles ? BTRFS_NESTING_NEW_ROOT : 3819 BTRFS_NESTING_SPLIT); 3820 if (IS_ERR(right)) 3821 return PTR_ERR(right); 3822 3823 root_add_used_bytes(root); 3824 3825 if (split == 0) { 3826 if (mid <= slot) { 3827 btrfs_set_header_nritems(right, 0); 3828 ret = insert_ptr(trans, path, &disk_key, 3829 right->start, path->slots[1] + 1, 1); 3830 if (ret < 0) { 3831 btrfs_tree_unlock(right); 3832 free_extent_buffer(right); 3833 return ret; 3834 } 3835 btrfs_tree_unlock(path->nodes[0]); 3836 free_extent_buffer(path->nodes[0]); 3837 path->nodes[0] = right; 3838 path->slots[0] = 0; 3839 path->slots[1] += 1; 3840 } else { 3841 btrfs_set_header_nritems(right, 0); 3842 ret = insert_ptr(trans, path, &disk_key, 3843 right->start, path->slots[1], 1); 3844 if (ret < 0) { 3845 btrfs_tree_unlock(right); 3846 free_extent_buffer(right); 3847 return ret; 3848 } 3849 btrfs_tree_unlock(path->nodes[0]); 3850 free_extent_buffer(path->nodes[0]); 3851 path->nodes[0] = right; 3852 path->slots[0] = 0; 3853 if (path->slots[1] == 0) 3854 fixup_low_keys(trans, path, &disk_key, 1); 3855 } 3856 /* 3857 * We create a new leaf 'right' for the required ins_len and 3858 * we'll do btrfs_mark_buffer_dirty() on this leaf after copying 3859 * the content of ins_len to 'right'. 3860 */ 3861 return ret; 3862 } 3863 3864 ret = copy_for_split(trans, path, l, right, slot, mid, nritems); 3865 if (ret < 0) { 3866 btrfs_tree_unlock(right); 3867 free_extent_buffer(right); 3868 return ret; 3869 } 3870 3871 if (split == 2) { 3872 BUG_ON(num_doubles != 0); 3873 num_doubles++; 3874 goto again; 3875 } 3876 3877 return 0; 3878 3879 push_for_double: 3880 push_for_double_split(trans, root, path, data_size); 3881 tried_avoid_double = 1; 3882 if (btrfs_leaf_free_space(path->nodes[0]) >= data_size) 3883 return 0; 3884 goto again; 3885 } 3886 3887 static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans, 3888 struct btrfs_root *root, 3889 struct btrfs_path *path, int ins_len) 3890 { 3891 struct btrfs_key key; 3892 struct extent_buffer *leaf; 3893 struct btrfs_file_extent_item *fi; 3894 u64 extent_len = 0; 3895 u32 item_size; 3896 int ret; 3897 3898 leaf = path->nodes[0]; 3899 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 3900 3901 BUG_ON(key.type != BTRFS_EXTENT_DATA_KEY && 3902 key.type != BTRFS_EXTENT_CSUM_KEY); 3903 3904 if (btrfs_leaf_free_space(leaf) >= ins_len) 3905 return 0; 3906 3907 item_size = btrfs_item_size(leaf, path->slots[0]); 3908 if (key.type == BTRFS_EXTENT_DATA_KEY) { 3909 fi = btrfs_item_ptr(leaf, path->slots[0], 3910 struct btrfs_file_extent_item); 3911 extent_len = btrfs_file_extent_num_bytes(leaf, fi); 3912 } 3913 btrfs_release_path(path); 3914 3915 path->keep_locks = 1; 3916 path->search_for_split = 1; 3917 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); 3918 path->search_for_split = 0; 3919 if (ret > 0) 3920 ret = -EAGAIN; 3921 if (ret < 0) 3922 goto err; 3923 3924 ret = -EAGAIN; 3925 leaf = path->nodes[0]; 3926 /* if our item isn't there, return now */ 3927 if (item_size != btrfs_item_size(leaf, path->slots[0])) 3928 goto err; 3929 3930 /* the leaf has changed, it now has room. return now */ 3931 if (btrfs_leaf_free_space(path->nodes[0]) >= ins_len) 3932 goto err; 3933 3934 if (key.type == BTRFS_EXTENT_DATA_KEY) { 3935 fi = btrfs_item_ptr(leaf, path->slots[0], 3936 struct btrfs_file_extent_item); 3937 if (extent_len != btrfs_file_extent_num_bytes(leaf, fi)) 3938 goto err; 3939 } 3940 3941 ret = split_leaf(trans, root, &key, path, ins_len, 1); 3942 if (ret) 3943 goto err; 3944 3945 path->keep_locks = 0; 3946 btrfs_unlock_up_safe(path, 1); 3947 return 0; 3948 err: 3949 path->keep_locks = 0; 3950 return ret; 3951 } 3952 3953 static noinline int split_item(struct btrfs_trans_handle *trans, 3954 struct btrfs_path *path, 3955 const struct btrfs_key *new_key, 3956 unsigned long split_offset) 3957 { 3958 struct extent_buffer *leaf; 3959 int orig_slot, slot; 3960 char *buf; 3961 u32 nritems; 3962 u32 item_size; 3963 u32 orig_offset; 3964 struct btrfs_disk_key disk_key; 3965 3966 leaf = path->nodes[0]; 3967 /* 3968 * Shouldn't happen because the caller must have previously called 3969 * setup_leaf_for_split() to make room for the new item in the leaf. 3970 */ 3971 if (WARN_ON(btrfs_leaf_free_space(leaf) < sizeof(struct btrfs_item))) 3972 return -ENOSPC; 3973 3974 orig_slot = path->slots[0]; 3975 orig_offset = btrfs_item_offset(leaf, path->slots[0]); 3976 item_size = btrfs_item_size(leaf, path->slots[0]); 3977 3978 buf = kmalloc(item_size, GFP_NOFS); 3979 if (!buf) 3980 return -ENOMEM; 3981 3982 read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf, 3983 path->slots[0]), item_size); 3984 3985 slot = path->slots[0] + 1; 3986 nritems = btrfs_header_nritems(leaf); 3987 if (slot != nritems) { 3988 /* shift the items */ 3989 memmove_leaf_items(leaf, slot + 1, slot, nritems - slot); 3990 } 3991 3992 btrfs_cpu_key_to_disk(&disk_key, new_key); 3993 btrfs_set_item_key(leaf, &disk_key, slot); 3994 3995 btrfs_set_item_offset(leaf, slot, orig_offset); 3996 btrfs_set_item_size(leaf, slot, item_size - split_offset); 3997 3998 btrfs_set_item_offset(leaf, orig_slot, 3999 orig_offset + item_size - split_offset); 4000 btrfs_set_item_size(leaf, orig_slot, split_offset); 4001 4002 btrfs_set_header_nritems(leaf, nritems + 1); 4003 4004 /* write the data for the start of the original item */ 4005 write_extent_buffer(leaf, buf, 4006 btrfs_item_ptr_offset(leaf, path->slots[0]), 4007 split_offset); 4008 4009 /* write the data for the new item */ 4010 write_extent_buffer(leaf, buf + split_offset, 4011 btrfs_item_ptr_offset(leaf, slot), 4012 item_size - split_offset); 4013 btrfs_mark_buffer_dirty(trans, leaf); 4014 4015 BUG_ON(btrfs_leaf_free_space(leaf) < 0); 4016 kfree(buf); 4017 return 0; 4018 } 4019 4020 /* 4021 * This function splits a single item into two items, 4022 * giving 'new_key' to the new item and splitting the 4023 * old one at split_offset (from the start of the item). 4024 * 4025 * The path may be released by this operation. After 4026 * the split, the path is pointing to the old item. The 4027 * new item is going to be in the same node as the old one. 4028 * 4029 * Note, the item being split must be smaller enough to live alone on 4030 * a tree block with room for one extra struct btrfs_item 4031 * 4032 * This allows us to split the item in place, keeping a lock on the 4033 * leaf the entire time. 4034 */ 4035 int btrfs_split_item(struct btrfs_trans_handle *trans, 4036 struct btrfs_root *root, 4037 struct btrfs_path *path, 4038 const struct btrfs_key *new_key, 4039 unsigned long split_offset) 4040 { 4041 int ret; 4042 ret = setup_leaf_for_split(trans, root, path, 4043 sizeof(struct btrfs_item)); 4044 if (ret) 4045 return ret; 4046 4047 ret = split_item(trans, path, new_key, split_offset); 4048 return ret; 4049 } 4050 4051 /* 4052 * make the item pointed to by the path smaller. new_size indicates 4053 * how small to make it, and from_end tells us if we just chop bytes 4054 * off the end of the item or if we shift the item to chop bytes off 4055 * the front. 4056 */ 4057 void btrfs_truncate_item(struct btrfs_trans_handle *trans, 4058 const struct btrfs_path *path, u32 new_size, int from_end) 4059 { 4060 int slot; 4061 struct extent_buffer *leaf; 4062 u32 nritems; 4063 unsigned int data_end; 4064 unsigned int old_data_start; 4065 unsigned int old_size; 4066 unsigned int size_diff; 4067 int i; 4068 struct btrfs_map_token token; 4069 4070 leaf = path->nodes[0]; 4071 slot = path->slots[0]; 4072 4073 old_size = btrfs_item_size(leaf, slot); 4074 if (old_size == new_size) 4075 return; 4076 4077 nritems = btrfs_header_nritems(leaf); 4078 data_end = leaf_data_end(leaf); 4079 4080 old_data_start = btrfs_item_offset(leaf, slot); 4081 4082 size_diff = old_size - new_size; 4083 4084 BUG_ON(slot < 0); 4085 BUG_ON(slot >= nritems); 4086 4087 /* 4088 * item0..itemN ... dataN.offset..dataN.size .. data0.size 4089 */ 4090 /* first correct the data pointers */ 4091 btrfs_init_map_token(&token, leaf); 4092 for (i = slot; i < nritems; i++) { 4093 u32 ioff; 4094 4095 ioff = btrfs_token_item_offset(&token, i); 4096 btrfs_set_token_item_offset(&token, i, ioff + size_diff); 4097 } 4098 4099 /* shift the data */ 4100 if (from_end) { 4101 memmove_leaf_data(leaf, data_end + size_diff, data_end, 4102 old_data_start + new_size - data_end); 4103 } else { 4104 struct btrfs_disk_key disk_key; 4105 u64 offset; 4106 4107 btrfs_item_key(leaf, &disk_key, slot); 4108 4109 if (btrfs_disk_key_type(&disk_key) == BTRFS_EXTENT_DATA_KEY) { 4110 unsigned long ptr; 4111 struct btrfs_file_extent_item *fi; 4112 4113 fi = btrfs_item_ptr(leaf, slot, 4114 struct btrfs_file_extent_item); 4115 fi = (struct btrfs_file_extent_item *)( 4116 (unsigned long)fi - size_diff); 4117 4118 if (btrfs_file_extent_type(leaf, fi) == 4119 BTRFS_FILE_EXTENT_INLINE) { 4120 ptr = btrfs_item_ptr_offset(leaf, slot); 4121 memmove_extent_buffer(leaf, ptr, 4122 (unsigned long)fi, 4123 BTRFS_FILE_EXTENT_INLINE_DATA_START); 4124 } 4125 } 4126 4127 memmove_leaf_data(leaf, data_end + size_diff, data_end, 4128 old_data_start - data_end); 4129 4130 offset = btrfs_disk_key_offset(&disk_key); 4131 btrfs_set_disk_key_offset(&disk_key, offset + size_diff); 4132 btrfs_set_item_key(leaf, &disk_key, slot); 4133 if (slot == 0) 4134 fixup_low_keys(trans, path, &disk_key, 1); 4135 } 4136 4137 btrfs_set_item_size(leaf, slot, new_size); 4138 btrfs_mark_buffer_dirty(trans, leaf); 4139 4140 if (btrfs_leaf_free_space(leaf) < 0) { 4141 btrfs_print_leaf(leaf); 4142 BUG(); 4143 } 4144 } 4145 4146 /* 4147 * make the item pointed to by the path bigger, data_size is the added size. 4148 */ 4149 void btrfs_extend_item(struct btrfs_trans_handle *trans, 4150 const struct btrfs_path *path, u32 data_size) 4151 { 4152 int slot; 4153 struct extent_buffer *leaf; 4154 u32 nritems; 4155 unsigned int data_end; 4156 unsigned int old_data; 4157 unsigned int old_size; 4158 int i; 4159 struct btrfs_map_token token; 4160 4161 leaf = path->nodes[0]; 4162 4163 nritems = btrfs_header_nritems(leaf); 4164 data_end = leaf_data_end(leaf); 4165 4166 if (btrfs_leaf_free_space(leaf) < data_size) { 4167 btrfs_print_leaf(leaf); 4168 BUG(); 4169 } 4170 slot = path->slots[0]; 4171 old_data = btrfs_item_data_end(leaf, slot); 4172 4173 BUG_ON(slot < 0); 4174 if (slot >= nritems) { 4175 btrfs_print_leaf(leaf); 4176 btrfs_crit(leaf->fs_info, "slot %d too large, nritems %d", 4177 slot, nritems); 4178 BUG(); 4179 } 4180 4181 /* 4182 * item0..itemN ... dataN.offset..dataN.size .. data0.size 4183 */ 4184 /* first correct the data pointers */ 4185 btrfs_init_map_token(&token, leaf); 4186 for (i = slot; i < nritems; i++) { 4187 u32 ioff; 4188 4189 ioff = btrfs_token_item_offset(&token, i); 4190 btrfs_set_token_item_offset(&token, i, ioff - data_size); 4191 } 4192 4193 /* shift the data */ 4194 memmove_leaf_data(leaf, data_end - data_size, data_end, 4195 old_data - data_end); 4196 4197 data_end = old_data; 4198 old_size = btrfs_item_size(leaf, slot); 4199 btrfs_set_item_size(leaf, slot, old_size + data_size); 4200 btrfs_mark_buffer_dirty(trans, leaf); 4201 4202 if (btrfs_leaf_free_space(leaf) < 0) { 4203 btrfs_print_leaf(leaf); 4204 BUG(); 4205 } 4206 } 4207 4208 /* 4209 * Make space in the node before inserting one or more items. 4210 * 4211 * @trans: transaction handle 4212 * @root: root we are inserting items to 4213 * @path: points to the leaf/slot where we are going to insert new items 4214 * @batch: information about the batch of items to insert 4215 * 4216 * Main purpose is to save stack depth by doing the bulk of the work in a 4217 * function that doesn't call btrfs_search_slot 4218 */ 4219 static void setup_items_for_insert(struct btrfs_trans_handle *trans, 4220 struct btrfs_root *root, struct btrfs_path *path, 4221 const struct btrfs_item_batch *batch) 4222 { 4223 struct btrfs_fs_info *fs_info = root->fs_info; 4224 int i; 4225 u32 nritems; 4226 unsigned int data_end; 4227 struct btrfs_disk_key disk_key; 4228 struct extent_buffer *leaf; 4229 int slot; 4230 struct btrfs_map_token token; 4231 u32 total_size; 4232 4233 /* 4234 * Before anything else, update keys in the parent and other ancestors 4235 * if needed, then release the write locks on them, so that other tasks 4236 * can use them while we modify the leaf. 4237 */ 4238 if (path->slots[0] == 0) { 4239 btrfs_cpu_key_to_disk(&disk_key, &batch->keys[0]); 4240 fixup_low_keys(trans, path, &disk_key, 1); 4241 } 4242 btrfs_unlock_up_safe(path, 1); 4243 4244 leaf = path->nodes[0]; 4245 slot = path->slots[0]; 4246 4247 nritems = btrfs_header_nritems(leaf); 4248 data_end = leaf_data_end(leaf); 4249 total_size = batch->total_data_size + (batch->nr * sizeof(struct btrfs_item)); 4250 4251 if (btrfs_leaf_free_space(leaf) < total_size) { 4252 btrfs_print_leaf(leaf); 4253 btrfs_crit(fs_info, "not enough freespace need %u have %d", 4254 total_size, btrfs_leaf_free_space(leaf)); 4255 BUG(); 4256 } 4257 4258 btrfs_init_map_token(&token, leaf); 4259 if (slot != nritems) { 4260 unsigned int old_data = btrfs_item_data_end(leaf, slot); 4261 4262 if (old_data < data_end) { 4263 btrfs_print_leaf(leaf); 4264 btrfs_crit(fs_info, 4265 "item at slot %d with data offset %u beyond data end of leaf %u", 4266 slot, old_data, data_end); 4267 BUG(); 4268 } 4269 /* 4270 * item0..itemN ... dataN.offset..dataN.size .. data0.size 4271 */ 4272 /* first correct the data pointers */ 4273 for (i = slot; i < nritems; i++) { 4274 u32 ioff; 4275 4276 ioff = btrfs_token_item_offset(&token, i); 4277 btrfs_set_token_item_offset(&token, i, 4278 ioff - batch->total_data_size); 4279 } 4280 /* shift the items */ 4281 memmove_leaf_items(leaf, slot + batch->nr, slot, nritems - slot); 4282 4283 /* shift the data */ 4284 memmove_leaf_data(leaf, data_end - batch->total_data_size, 4285 data_end, old_data - data_end); 4286 data_end = old_data; 4287 } 4288 4289 /* setup the item for the new data */ 4290 for (i = 0; i < batch->nr; i++) { 4291 btrfs_cpu_key_to_disk(&disk_key, &batch->keys[i]); 4292 btrfs_set_item_key(leaf, &disk_key, slot + i); 4293 data_end -= batch->data_sizes[i]; 4294 btrfs_set_token_item_offset(&token, slot + i, data_end); 4295 btrfs_set_token_item_size(&token, slot + i, batch->data_sizes[i]); 4296 } 4297 4298 btrfs_set_header_nritems(leaf, nritems + batch->nr); 4299 btrfs_mark_buffer_dirty(trans, leaf); 4300 4301 if (btrfs_leaf_free_space(leaf) < 0) { 4302 btrfs_print_leaf(leaf); 4303 BUG(); 4304 } 4305 } 4306 4307 /* 4308 * Insert a new item into a leaf. 4309 * 4310 * @trans: Transaction handle. 4311 * @root: The root of the btree. 4312 * @path: A path pointing to the target leaf and slot. 4313 * @key: The key of the new item. 4314 * @data_size: The size of the data associated with the new key. 4315 */ 4316 void btrfs_setup_item_for_insert(struct btrfs_trans_handle *trans, 4317 struct btrfs_root *root, 4318 struct btrfs_path *path, 4319 const struct btrfs_key *key, 4320 u32 data_size) 4321 { 4322 struct btrfs_item_batch batch; 4323 4324 batch.keys = key; 4325 batch.data_sizes = &data_size; 4326 batch.total_data_size = data_size; 4327 batch.nr = 1; 4328 4329 setup_items_for_insert(trans, root, path, &batch); 4330 } 4331 4332 /* 4333 * Given a key and some data, insert items into the tree. 4334 * This does all the path init required, making room in the tree if needed. 4335 * 4336 * Returns: 0 on success 4337 * -EEXIST if the first key already exists 4338 * < 0 on other errors 4339 */ 4340 int btrfs_insert_empty_items(struct btrfs_trans_handle *trans, 4341 struct btrfs_root *root, 4342 struct btrfs_path *path, 4343 const struct btrfs_item_batch *batch) 4344 { 4345 int ret = 0; 4346 int slot; 4347 u32 total_size; 4348 4349 total_size = batch->total_data_size + (batch->nr * sizeof(struct btrfs_item)); 4350 ret = btrfs_search_slot(trans, root, &batch->keys[0], path, total_size, 1); 4351 if (ret == 0) 4352 return -EEXIST; 4353 if (ret < 0) 4354 return ret; 4355 4356 slot = path->slots[0]; 4357 BUG_ON(slot < 0); 4358 4359 setup_items_for_insert(trans, root, path, batch); 4360 return 0; 4361 } 4362 4363 /* 4364 * Given a key and some data, insert an item into the tree. 4365 * This does all the path init required, making room in the tree if needed. 4366 */ 4367 int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root *root, 4368 const struct btrfs_key *cpu_key, void *data, 4369 u32 data_size) 4370 { 4371 int ret = 0; 4372 struct btrfs_path *path; 4373 struct extent_buffer *leaf; 4374 unsigned long ptr; 4375 4376 path = btrfs_alloc_path(); 4377 if (!path) 4378 return -ENOMEM; 4379 ret = btrfs_insert_empty_item(trans, root, path, cpu_key, data_size); 4380 if (!ret) { 4381 leaf = path->nodes[0]; 4382 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); 4383 write_extent_buffer(leaf, data, ptr, data_size); 4384 btrfs_mark_buffer_dirty(trans, leaf); 4385 } 4386 btrfs_free_path(path); 4387 return ret; 4388 } 4389 4390 /* 4391 * This function duplicates an item, giving 'new_key' to the new item. 4392 * It guarantees both items live in the same tree leaf and the new item is 4393 * contiguous with the original item. 4394 * 4395 * This allows us to split a file extent in place, keeping a lock on the leaf 4396 * the entire time. 4397 */ 4398 int btrfs_duplicate_item(struct btrfs_trans_handle *trans, 4399 struct btrfs_root *root, 4400 struct btrfs_path *path, 4401 const struct btrfs_key *new_key) 4402 { 4403 struct extent_buffer *leaf; 4404 int ret; 4405 u32 item_size; 4406 4407 leaf = path->nodes[0]; 4408 item_size = btrfs_item_size(leaf, path->slots[0]); 4409 ret = setup_leaf_for_split(trans, root, path, 4410 item_size + sizeof(struct btrfs_item)); 4411 if (ret) 4412 return ret; 4413 4414 path->slots[0]++; 4415 btrfs_setup_item_for_insert(trans, root, path, new_key, item_size); 4416 leaf = path->nodes[0]; 4417 memcpy_extent_buffer(leaf, 4418 btrfs_item_ptr_offset(leaf, path->slots[0]), 4419 btrfs_item_ptr_offset(leaf, path->slots[0] - 1), 4420 item_size); 4421 return 0; 4422 } 4423 4424 /* 4425 * delete the pointer from a given node. 4426 * 4427 * the tree should have been previously balanced so the deletion does not 4428 * empty a node. 4429 * 4430 * This is exported for use inside btrfs-progs, don't un-export it. 4431 */ 4432 int btrfs_del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root, 4433 struct btrfs_path *path, int level, int slot) 4434 { 4435 struct extent_buffer *parent = path->nodes[level]; 4436 u32 nritems; 4437 int ret; 4438 4439 nritems = btrfs_header_nritems(parent); 4440 if (slot != nritems - 1) { 4441 if (level) { 4442 ret = btrfs_tree_mod_log_insert_move(parent, slot, 4443 slot + 1, nritems - slot - 1); 4444 if (ret < 0) { 4445 btrfs_abort_transaction(trans, ret); 4446 return ret; 4447 } 4448 } 4449 memmove_extent_buffer(parent, 4450 btrfs_node_key_ptr_offset(parent, slot), 4451 btrfs_node_key_ptr_offset(parent, slot + 1), 4452 sizeof(struct btrfs_key_ptr) * 4453 (nritems - slot - 1)); 4454 } else if (level) { 4455 ret = btrfs_tree_mod_log_insert_key(parent, slot, 4456 BTRFS_MOD_LOG_KEY_REMOVE); 4457 if (ret < 0) { 4458 btrfs_abort_transaction(trans, ret); 4459 return ret; 4460 } 4461 } 4462 4463 nritems--; 4464 btrfs_set_header_nritems(parent, nritems); 4465 if (nritems == 0 && parent == root->node) { 4466 BUG_ON(btrfs_header_level(root->node) != 1); 4467 /* just turn the root into a leaf and break */ 4468 btrfs_set_header_level(root->node, 0); 4469 } else if (slot == 0) { 4470 struct btrfs_disk_key disk_key; 4471 4472 btrfs_node_key(parent, &disk_key, 0); 4473 fixup_low_keys(trans, path, &disk_key, level + 1); 4474 } 4475 btrfs_mark_buffer_dirty(trans, parent); 4476 return 0; 4477 } 4478 4479 /* 4480 * a helper function to delete the leaf pointed to by path->slots[1] and 4481 * path->nodes[1]. 4482 * 4483 * This deletes the pointer in path->nodes[1] and frees the leaf 4484 * block extent. zero is returned if it all worked out, < 0 otherwise. 4485 * 4486 * The path must have already been setup for deleting the leaf, including 4487 * all the proper balancing. path->nodes[1] must be locked. 4488 */ 4489 static noinline int btrfs_del_leaf(struct btrfs_trans_handle *trans, 4490 struct btrfs_root *root, 4491 struct btrfs_path *path, 4492 struct extent_buffer *leaf) 4493 { 4494 int ret; 4495 4496 WARN_ON(btrfs_header_generation(leaf) != trans->transid); 4497 ret = btrfs_del_ptr(trans, root, path, 1, path->slots[1]); 4498 if (ret < 0) 4499 return ret; 4500 4501 /* 4502 * btrfs_free_extent is expensive, we want to make sure we 4503 * aren't holding any locks when we call it 4504 */ 4505 btrfs_unlock_up_safe(path, 0); 4506 4507 root_sub_used_bytes(root); 4508 4509 atomic_inc(&leaf->refs); 4510 ret = btrfs_free_tree_block(trans, btrfs_root_id(root), leaf, 0, 1); 4511 free_extent_buffer_stale(leaf); 4512 if (ret < 0) 4513 btrfs_abort_transaction(trans, ret); 4514 4515 return ret; 4516 } 4517 /* 4518 * delete the item at the leaf level in path. If that empties 4519 * the leaf, remove it from the tree 4520 */ 4521 int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root, 4522 struct btrfs_path *path, int slot, int nr) 4523 { 4524 struct btrfs_fs_info *fs_info = root->fs_info; 4525 struct extent_buffer *leaf; 4526 int ret = 0; 4527 int wret; 4528 u32 nritems; 4529 4530 leaf = path->nodes[0]; 4531 nritems = btrfs_header_nritems(leaf); 4532 4533 if (slot + nr != nritems) { 4534 const u32 last_off = btrfs_item_offset(leaf, slot + nr - 1); 4535 const int data_end = leaf_data_end(leaf); 4536 struct btrfs_map_token token; 4537 u32 dsize = 0; 4538 int i; 4539 4540 for (i = 0; i < nr; i++) 4541 dsize += btrfs_item_size(leaf, slot + i); 4542 4543 memmove_leaf_data(leaf, data_end + dsize, data_end, 4544 last_off - data_end); 4545 4546 btrfs_init_map_token(&token, leaf); 4547 for (i = slot + nr; i < nritems; i++) { 4548 u32 ioff; 4549 4550 ioff = btrfs_token_item_offset(&token, i); 4551 btrfs_set_token_item_offset(&token, i, ioff + dsize); 4552 } 4553 4554 memmove_leaf_items(leaf, slot, slot + nr, nritems - slot - nr); 4555 } 4556 btrfs_set_header_nritems(leaf, nritems - nr); 4557 nritems -= nr; 4558 4559 /* delete the leaf if we've emptied it */ 4560 if (nritems == 0) { 4561 if (leaf == root->node) { 4562 btrfs_set_header_level(leaf, 0); 4563 } else { 4564 btrfs_clear_buffer_dirty(trans, leaf); 4565 ret = btrfs_del_leaf(trans, root, path, leaf); 4566 if (ret < 0) 4567 return ret; 4568 } 4569 } else { 4570 int used = leaf_space_used(leaf, 0, nritems); 4571 if (slot == 0) { 4572 struct btrfs_disk_key disk_key; 4573 4574 btrfs_item_key(leaf, &disk_key, 0); 4575 fixup_low_keys(trans, path, &disk_key, 1); 4576 } 4577 4578 /* 4579 * Try to delete the leaf if it is mostly empty. We do this by 4580 * trying to move all its items into its left and right neighbours. 4581 * If we can't move all the items, then we don't delete it - it's 4582 * not ideal, but future insertions might fill the leaf with more 4583 * items, or items from other leaves might be moved later into our 4584 * leaf due to deletions on those leaves. 4585 */ 4586 if (used < BTRFS_LEAF_DATA_SIZE(fs_info) / 3) { 4587 u32 min_push_space; 4588 4589 /* push_leaf_left fixes the path. 4590 * make sure the path still points to our leaf 4591 * for possible call to btrfs_del_ptr below 4592 */ 4593 slot = path->slots[1]; 4594 atomic_inc(&leaf->refs); 4595 /* 4596 * We want to be able to at least push one item to the 4597 * left neighbour leaf, and that's the first item. 4598 */ 4599 min_push_space = sizeof(struct btrfs_item) + 4600 btrfs_item_size(leaf, 0); 4601 wret = push_leaf_left(trans, root, path, 0, 4602 min_push_space, 1, (u32)-1); 4603 if (wret < 0 && wret != -ENOSPC) 4604 ret = wret; 4605 4606 if (path->nodes[0] == leaf && 4607 btrfs_header_nritems(leaf)) { 4608 /* 4609 * If we were not able to push all items from our 4610 * leaf to its left neighbour, then attempt to 4611 * either push all the remaining items to the 4612 * right neighbour or none. There's no advantage 4613 * in pushing only some items, instead of all, as 4614 * it's pointless to end up with a leaf having 4615 * too few items while the neighbours can be full 4616 * or nearly full. 4617 */ 4618 nritems = btrfs_header_nritems(leaf); 4619 min_push_space = leaf_space_used(leaf, 0, nritems); 4620 wret = push_leaf_right(trans, root, path, 0, 4621 min_push_space, 1, 0); 4622 if (wret < 0 && wret != -ENOSPC) 4623 ret = wret; 4624 } 4625 4626 if (btrfs_header_nritems(leaf) == 0) { 4627 path->slots[1] = slot; 4628 ret = btrfs_del_leaf(trans, root, path, leaf); 4629 if (ret < 0) 4630 return ret; 4631 free_extent_buffer(leaf); 4632 ret = 0; 4633 } else { 4634 /* if we're still in the path, make sure 4635 * we're dirty. Otherwise, one of the 4636 * push_leaf functions must have already 4637 * dirtied this buffer 4638 */ 4639 if (path->nodes[0] == leaf) 4640 btrfs_mark_buffer_dirty(trans, leaf); 4641 free_extent_buffer(leaf); 4642 } 4643 } else { 4644 btrfs_mark_buffer_dirty(trans, leaf); 4645 } 4646 } 4647 return ret; 4648 } 4649 4650 /* 4651 * A helper function to walk down the tree starting at min_key, and looking 4652 * for nodes or leaves that are have a minimum transaction id. 4653 * This is used by the btree defrag code, and tree logging 4654 * 4655 * This does not cow, but it does stuff the starting key it finds back 4656 * into min_key, so you can call btrfs_search_slot with cow=1 on the 4657 * key and get a writable path. 4658 * 4659 * This honors path->lowest_level to prevent descent past a given level 4660 * of the tree. 4661 * 4662 * min_trans indicates the oldest transaction that you are interested 4663 * in walking through. Any nodes or leaves older than min_trans are 4664 * skipped over (without reading them). 4665 * 4666 * returns zero if something useful was found, < 0 on error and 1 if there 4667 * was nothing in the tree that matched the search criteria. 4668 */ 4669 int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key, 4670 struct btrfs_path *path, 4671 u64 min_trans) 4672 { 4673 struct extent_buffer *cur; 4674 struct btrfs_key found_key; 4675 int slot; 4676 int sret; 4677 u32 nritems; 4678 int level; 4679 int ret = 1; 4680 int keep_locks = path->keep_locks; 4681 4682 ASSERT(!path->nowait); 4683 path->keep_locks = 1; 4684 again: 4685 cur = btrfs_read_lock_root_node(root); 4686 level = btrfs_header_level(cur); 4687 WARN_ON(path->nodes[level]); 4688 path->nodes[level] = cur; 4689 path->locks[level] = BTRFS_READ_LOCK; 4690 4691 if (btrfs_header_generation(cur) < min_trans) { 4692 ret = 1; 4693 goto out; 4694 } 4695 while (1) { 4696 nritems = btrfs_header_nritems(cur); 4697 level = btrfs_header_level(cur); 4698 sret = btrfs_bin_search(cur, 0, min_key, &slot); 4699 if (sret < 0) { 4700 ret = sret; 4701 goto out; 4702 } 4703 4704 /* at the lowest level, we're done, setup the path and exit */ 4705 if (level == path->lowest_level) { 4706 if (slot >= nritems) 4707 goto find_next_key; 4708 ret = 0; 4709 path->slots[level] = slot; 4710 btrfs_item_key_to_cpu(cur, &found_key, slot); 4711 goto out; 4712 } 4713 if (sret && slot > 0) 4714 slot--; 4715 /* 4716 * check this node pointer against the min_trans parameters. 4717 * If it is too old, skip to the next one. 4718 */ 4719 while (slot < nritems) { 4720 u64 gen; 4721 4722 gen = btrfs_node_ptr_generation(cur, slot); 4723 if (gen < min_trans) { 4724 slot++; 4725 continue; 4726 } 4727 break; 4728 } 4729 find_next_key: 4730 /* 4731 * we didn't find a candidate key in this node, walk forward 4732 * and find another one 4733 */ 4734 if (slot >= nritems) { 4735 path->slots[level] = slot; 4736 sret = btrfs_find_next_key(root, path, min_key, level, 4737 min_trans); 4738 if (sret == 0) { 4739 btrfs_release_path(path); 4740 goto again; 4741 } else { 4742 goto out; 4743 } 4744 } 4745 /* save our key for returning back */ 4746 btrfs_node_key_to_cpu(cur, &found_key, slot); 4747 path->slots[level] = slot; 4748 if (level == path->lowest_level) { 4749 ret = 0; 4750 goto out; 4751 } 4752 cur = btrfs_read_node_slot(cur, slot); 4753 if (IS_ERR(cur)) { 4754 ret = PTR_ERR(cur); 4755 goto out; 4756 } 4757 4758 btrfs_tree_read_lock(cur); 4759 4760 path->locks[level - 1] = BTRFS_READ_LOCK; 4761 path->nodes[level - 1] = cur; 4762 unlock_up(path, level, 1, 0, NULL); 4763 } 4764 out: 4765 path->keep_locks = keep_locks; 4766 if (ret == 0) { 4767 btrfs_unlock_up_safe(path, path->lowest_level + 1); 4768 memcpy(min_key, &found_key, sizeof(found_key)); 4769 } 4770 return ret; 4771 } 4772 4773 /* 4774 * this is similar to btrfs_next_leaf, but does not try to preserve 4775 * and fixup the path. It looks for and returns the next key in the 4776 * tree based on the current path and the min_trans parameters. 4777 * 4778 * 0 is returned if another key is found, < 0 if there are any errors 4779 * and 1 is returned if there are no higher keys in the tree 4780 * 4781 * path->keep_locks should be set to 1 on the search made before 4782 * calling this function. 4783 */ 4784 int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path, 4785 struct btrfs_key *key, int level, u64 min_trans) 4786 { 4787 int slot; 4788 struct extent_buffer *c; 4789 4790 WARN_ON(!path->keep_locks && !path->skip_locking); 4791 while (level < BTRFS_MAX_LEVEL) { 4792 if (!path->nodes[level]) 4793 return 1; 4794 4795 slot = path->slots[level] + 1; 4796 c = path->nodes[level]; 4797 next: 4798 if (slot >= btrfs_header_nritems(c)) { 4799 int ret; 4800 int orig_lowest; 4801 struct btrfs_key cur_key; 4802 if (level + 1 >= BTRFS_MAX_LEVEL || 4803 !path->nodes[level + 1]) 4804 return 1; 4805 4806 if (path->locks[level + 1] || path->skip_locking) { 4807 level++; 4808 continue; 4809 } 4810 4811 slot = btrfs_header_nritems(c) - 1; 4812 if (level == 0) 4813 btrfs_item_key_to_cpu(c, &cur_key, slot); 4814 else 4815 btrfs_node_key_to_cpu(c, &cur_key, slot); 4816 4817 orig_lowest = path->lowest_level; 4818 btrfs_release_path(path); 4819 path->lowest_level = level; 4820 ret = btrfs_search_slot(NULL, root, &cur_key, path, 4821 0, 0); 4822 path->lowest_level = orig_lowest; 4823 if (ret < 0) 4824 return ret; 4825 4826 c = path->nodes[level]; 4827 slot = path->slots[level]; 4828 if (ret == 0) 4829 slot++; 4830 goto next; 4831 } 4832 4833 if (level == 0) 4834 btrfs_item_key_to_cpu(c, key, slot); 4835 else { 4836 u64 gen = btrfs_node_ptr_generation(c, slot); 4837 4838 if (gen < min_trans) { 4839 slot++; 4840 goto next; 4841 } 4842 btrfs_node_key_to_cpu(c, key, slot); 4843 } 4844 return 0; 4845 } 4846 return 1; 4847 } 4848 4849 int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path, 4850 u64 time_seq) 4851 { 4852 int slot; 4853 int level; 4854 struct extent_buffer *c; 4855 struct extent_buffer *next; 4856 struct btrfs_fs_info *fs_info = root->fs_info; 4857 struct btrfs_key key; 4858 bool need_commit_sem = false; 4859 u32 nritems; 4860 int ret; 4861 int i; 4862 4863 /* 4864 * The nowait semantics are used only for write paths, where we don't 4865 * use the tree mod log and sequence numbers. 4866 */ 4867 if (time_seq) 4868 ASSERT(!path->nowait); 4869 4870 nritems = btrfs_header_nritems(path->nodes[0]); 4871 if (nritems == 0) 4872 return 1; 4873 4874 btrfs_item_key_to_cpu(path->nodes[0], &key, nritems - 1); 4875 again: 4876 level = 1; 4877 next = NULL; 4878 btrfs_release_path(path); 4879 4880 path->keep_locks = 1; 4881 4882 if (time_seq) { 4883 ret = btrfs_search_old_slot(root, &key, path, time_seq); 4884 } else { 4885 if (path->need_commit_sem) { 4886 path->need_commit_sem = 0; 4887 need_commit_sem = true; 4888 if (path->nowait) { 4889 if (!down_read_trylock(&fs_info->commit_root_sem)) { 4890 ret = -EAGAIN; 4891 goto done; 4892 } 4893 } else { 4894 down_read(&fs_info->commit_root_sem); 4895 } 4896 } 4897 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 4898 } 4899 path->keep_locks = 0; 4900 4901 if (ret < 0) 4902 goto done; 4903 4904 nritems = btrfs_header_nritems(path->nodes[0]); 4905 /* 4906 * by releasing the path above we dropped all our locks. A balance 4907 * could have added more items next to the key that used to be 4908 * at the very end of the block. So, check again here and 4909 * advance the path if there are now more items available. 4910 */ 4911 if (nritems > 0 && path->slots[0] < nritems - 1) { 4912 if (ret == 0) 4913 path->slots[0]++; 4914 ret = 0; 4915 goto done; 4916 } 4917 /* 4918 * So the above check misses one case: 4919 * - after releasing the path above, someone has removed the item that 4920 * used to be at the very end of the block, and balance between leafs 4921 * gets another one with bigger key.offset to replace it. 4922 * 4923 * This one should be returned as well, or we can get leaf corruption 4924 * later(esp. in __btrfs_drop_extents()). 4925 * 4926 * And a bit more explanation about this check, 4927 * with ret > 0, the key isn't found, the path points to the slot 4928 * where it should be inserted, so the path->slots[0] item must be the 4929 * bigger one. 4930 */ 4931 if (nritems > 0 && ret > 0 && path->slots[0] == nritems - 1) { 4932 ret = 0; 4933 goto done; 4934 } 4935 4936 while (level < BTRFS_MAX_LEVEL) { 4937 if (!path->nodes[level]) { 4938 ret = 1; 4939 goto done; 4940 } 4941 4942 slot = path->slots[level] + 1; 4943 c = path->nodes[level]; 4944 if (slot >= btrfs_header_nritems(c)) { 4945 level++; 4946 if (level == BTRFS_MAX_LEVEL) { 4947 ret = 1; 4948 goto done; 4949 } 4950 continue; 4951 } 4952 4953 4954 /* 4955 * Our current level is where we're going to start from, and to 4956 * make sure lockdep doesn't complain we need to drop our locks 4957 * and nodes from 0 to our current level. 4958 */ 4959 for (i = 0; i < level; i++) { 4960 if (path->locks[level]) { 4961 btrfs_tree_read_unlock(path->nodes[i]); 4962 path->locks[i] = 0; 4963 } 4964 free_extent_buffer(path->nodes[i]); 4965 path->nodes[i] = NULL; 4966 } 4967 4968 next = c; 4969 ret = read_block_for_search(root, path, &next, slot, &key); 4970 if (ret == -EAGAIN && !path->nowait) 4971 goto again; 4972 4973 if (ret < 0) { 4974 btrfs_release_path(path); 4975 goto done; 4976 } 4977 4978 if (!path->skip_locking) { 4979 ret = btrfs_try_tree_read_lock(next); 4980 if (!ret && path->nowait) { 4981 ret = -EAGAIN; 4982 goto done; 4983 } 4984 if (!ret && time_seq) { 4985 /* 4986 * If we don't get the lock, we may be racing 4987 * with push_leaf_left, holding that lock while 4988 * itself waiting for the leaf we've currently 4989 * locked. To solve this situation, we give up 4990 * on our lock and cycle. 4991 */ 4992 free_extent_buffer(next); 4993 btrfs_release_path(path); 4994 cond_resched(); 4995 goto again; 4996 } 4997 if (!ret) 4998 btrfs_tree_read_lock(next); 4999 } 5000 break; 5001 } 5002 path->slots[level] = slot; 5003 while (1) { 5004 level--; 5005 path->nodes[level] = next; 5006 path->slots[level] = 0; 5007 if (!path->skip_locking) 5008 path->locks[level] = BTRFS_READ_LOCK; 5009 if (!level) 5010 break; 5011 5012 ret = read_block_for_search(root, path, &next, 0, &key); 5013 if (ret == -EAGAIN && !path->nowait) 5014 goto again; 5015 5016 if (ret < 0) { 5017 btrfs_release_path(path); 5018 goto done; 5019 } 5020 5021 if (!path->skip_locking) { 5022 if (path->nowait) { 5023 if (!btrfs_try_tree_read_lock(next)) { 5024 ret = -EAGAIN; 5025 goto done; 5026 } 5027 } else { 5028 btrfs_tree_read_lock(next); 5029 } 5030 } 5031 } 5032 ret = 0; 5033 done: 5034 unlock_up(path, 0, 1, 0, NULL); 5035 if (need_commit_sem) { 5036 int ret2; 5037 5038 path->need_commit_sem = 1; 5039 ret2 = finish_need_commit_sem_search(path); 5040 up_read(&fs_info->commit_root_sem); 5041 if (ret2) 5042 ret = ret2; 5043 } 5044 5045 return ret; 5046 } 5047 5048 int btrfs_next_old_item(struct btrfs_root *root, struct btrfs_path *path, u64 time_seq) 5049 { 5050 path->slots[0]++; 5051 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) 5052 return btrfs_next_old_leaf(root, path, time_seq); 5053 return 0; 5054 } 5055 5056 /* 5057 * this uses btrfs_prev_leaf to walk backwards in the tree, and keeps 5058 * searching until it gets past min_objectid or finds an item of 'type' 5059 * 5060 * returns 0 if something is found, 1 if nothing was found and < 0 on error 5061 */ 5062 int btrfs_previous_item(struct btrfs_root *root, 5063 struct btrfs_path *path, u64 min_objectid, 5064 int type) 5065 { 5066 struct btrfs_key found_key; 5067 struct extent_buffer *leaf; 5068 u32 nritems; 5069 int ret; 5070 5071 while (1) { 5072 if (path->slots[0] == 0) { 5073 ret = btrfs_prev_leaf(root, path); 5074 if (ret != 0) 5075 return ret; 5076 } else { 5077 path->slots[0]--; 5078 } 5079 leaf = path->nodes[0]; 5080 nritems = btrfs_header_nritems(leaf); 5081 if (nritems == 0) 5082 return 1; 5083 if (path->slots[0] == nritems) 5084 path->slots[0]--; 5085 5086 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 5087 if (found_key.objectid < min_objectid) 5088 break; 5089 if (found_key.type == type) 5090 return 0; 5091 if (found_key.objectid == min_objectid && 5092 found_key.type < type) 5093 break; 5094 } 5095 return 1; 5096 } 5097 5098 /* 5099 * search in extent tree to find a previous Metadata/Data extent item with 5100 * min objecitd. 5101 * 5102 * returns 0 if something is found, 1 if nothing was found and < 0 on error 5103 */ 5104 int btrfs_previous_extent_item(struct btrfs_root *root, 5105 struct btrfs_path *path, u64 min_objectid) 5106 { 5107 struct btrfs_key found_key; 5108 struct extent_buffer *leaf; 5109 u32 nritems; 5110 int ret; 5111 5112 while (1) { 5113 if (path->slots[0] == 0) { 5114 ret = btrfs_prev_leaf(root, path); 5115 if (ret != 0) 5116 return ret; 5117 } else { 5118 path->slots[0]--; 5119 } 5120 leaf = path->nodes[0]; 5121 nritems = btrfs_header_nritems(leaf); 5122 if (nritems == 0) 5123 return 1; 5124 if (path->slots[0] == nritems) 5125 path->slots[0]--; 5126 5127 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 5128 if (found_key.objectid < min_objectid) 5129 break; 5130 if (found_key.type == BTRFS_EXTENT_ITEM_KEY || 5131 found_key.type == BTRFS_METADATA_ITEM_KEY) 5132 return 0; 5133 if (found_key.objectid == min_objectid && 5134 found_key.type < BTRFS_EXTENT_ITEM_KEY) 5135 break; 5136 } 5137 return 1; 5138 } 5139 5140 int __init btrfs_ctree_init(void) 5141 { 5142 btrfs_path_cachep = KMEM_CACHE(btrfs_path, 0); 5143 if (!btrfs_path_cachep) 5144 return -ENOMEM; 5145 return 0; 5146 } 5147 5148 void __cold btrfs_ctree_exit(void) 5149 { 5150 kmem_cache_destroy(btrfs_path_cachep); 5151 } 5152