1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2007,2008 Oracle. All rights reserved. 4 */ 5 6 #include <linux/sched.h> 7 #include <linux/slab.h> 8 #include <linux/rbtree.h> 9 #include <linux/mm.h> 10 #include <linux/error-injection.h> 11 #include "messages.h" 12 #include "ctree.h" 13 #include "disk-io.h" 14 #include "transaction.h" 15 #include "print-tree.h" 16 #include "locking.h" 17 #include "volumes.h" 18 #include "qgroup.h" 19 #include "tree-mod-log.h" 20 #include "tree-checker.h" 21 #include "fs.h" 22 #include "accessors.h" 23 #include "extent-tree.h" 24 #include "relocation.h" 25 #include "file-item.h" 26 27 static struct kmem_cache *btrfs_path_cachep; 28 29 static int split_node(struct btrfs_trans_handle *trans, struct btrfs_root 30 *root, struct btrfs_path *path, int level); 31 static int split_leaf(struct btrfs_trans_handle *trans, struct btrfs_root *root, 32 const struct btrfs_key *ins_key, struct btrfs_path *path, 33 int data_size, int extend); 34 static int push_node_left(struct btrfs_trans_handle *trans, 35 struct extent_buffer *dst, 36 struct extent_buffer *src, int empty); 37 static int balance_node_right(struct btrfs_trans_handle *trans, 38 struct extent_buffer *dst_buf, 39 struct extent_buffer *src_buf); 40 41 static const struct btrfs_csums { 42 u16 size; 43 const char name[10]; 44 const char driver[12]; 45 } btrfs_csums[] = { 46 [BTRFS_CSUM_TYPE_CRC32] = { .size = 4, .name = "crc32c" }, 47 [BTRFS_CSUM_TYPE_XXHASH] = { .size = 8, .name = "xxhash64" }, 48 [BTRFS_CSUM_TYPE_SHA256] = { .size = 32, .name = "sha256" }, 49 [BTRFS_CSUM_TYPE_BLAKE2] = { .size = 32, .name = "blake2b", 50 .driver = "blake2b-256" }, 51 }; 52 53 /* 54 * The leaf data grows from end-to-front in the node. this returns the address 55 * of the start of the last item, which is the stop of the leaf data stack. 56 */ 57 static unsigned int leaf_data_end(const struct extent_buffer *leaf) 58 { 59 u32 nr = btrfs_header_nritems(leaf); 60 61 if (nr == 0) 62 return BTRFS_LEAF_DATA_SIZE(leaf->fs_info); 63 return btrfs_item_offset(leaf, nr - 1); 64 } 65 66 /* 67 * Move data in a @leaf (using memmove, safe for overlapping ranges). 68 * 69 * @leaf: leaf that we're doing a memmove on 70 * @dst_offset: item data offset we're moving to 71 * @src_offset: item data offset were' moving from 72 * @len: length of the data we're moving 73 * 74 * Wrapper around memmove_extent_buffer() that takes into account the header on 75 * the leaf. The btrfs_item offset's start directly after the header, so we 76 * have to adjust any offsets to account for the header in the leaf. This 77 * handles that math to simplify the callers. 78 */ 79 static inline void memmove_leaf_data(const struct extent_buffer *leaf, 80 unsigned long dst_offset, 81 unsigned long src_offset, 82 unsigned long len) 83 { 84 memmove_extent_buffer(leaf, btrfs_item_nr_offset(leaf, 0) + dst_offset, 85 btrfs_item_nr_offset(leaf, 0) + src_offset, len); 86 } 87 88 /* 89 * Copy item data from @src into @dst at the given @offset. 90 * 91 * @dst: destination leaf that we're copying into 92 * @src: source leaf that we're copying from 93 * @dst_offset: item data offset we're copying to 94 * @src_offset: item data offset were' copying from 95 * @len: length of the data we're copying 96 * 97 * Wrapper around copy_extent_buffer() that takes into account the header on 98 * the leaf. The btrfs_item offset's start directly after the header, so we 99 * have to adjust any offsets to account for the header in the leaf. This 100 * handles that math to simplify the callers. 101 */ 102 static inline void copy_leaf_data(const struct extent_buffer *dst, 103 const struct extent_buffer *src, 104 unsigned long dst_offset, 105 unsigned long src_offset, unsigned long len) 106 { 107 copy_extent_buffer(dst, src, btrfs_item_nr_offset(dst, 0) + dst_offset, 108 btrfs_item_nr_offset(src, 0) + src_offset, len); 109 } 110 111 /* 112 * Move items in a @leaf (using memmove). 113 * 114 * @dst: destination leaf for the items 115 * @dst_item: the item nr we're copying into 116 * @src_item: the item nr we're copying from 117 * @nr_items: the number of items to copy 118 * 119 * Wrapper around memmove_extent_buffer() that does the math to get the 120 * appropriate offsets into the leaf from the item numbers. 121 */ 122 static inline void memmove_leaf_items(const struct extent_buffer *leaf, 123 int dst_item, int src_item, int nr_items) 124 { 125 memmove_extent_buffer(leaf, btrfs_item_nr_offset(leaf, dst_item), 126 btrfs_item_nr_offset(leaf, src_item), 127 nr_items * sizeof(struct btrfs_item)); 128 } 129 130 /* 131 * Copy items from @src into @dst at the given @offset. 132 * 133 * @dst: destination leaf for the items 134 * @src: source leaf for the items 135 * @dst_item: the item nr we're copying into 136 * @src_item: the item nr we're copying from 137 * @nr_items: the number of items to copy 138 * 139 * Wrapper around copy_extent_buffer() that does the math to get the 140 * appropriate offsets into the leaf from the item numbers. 141 */ 142 static inline void copy_leaf_items(const struct extent_buffer *dst, 143 const struct extent_buffer *src, 144 int dst_item, int src_item, int nr_items) 145 { 146 copy_extent_buffer(dst, src, btrfs_item_nr_offset(dst, dst_item), 147 btrfs_item_nr_offset(src, src_item), 148 nr_items * sizeof(struct btrfs_item)); 149 } 150 151 /* This exists for btrfs-progs usages. */ 152 u16 btrfs_csum_type_size(u16 type) 153 { 154 return btrfs_csums[type].size; 155 } 156 157 int btrfs_super_csum_size(const struct btrfs_super_block *s) 158 { 159 u16 t = btrfs_super_csum_type(s); 160 /* 161 * csum type is validated at mount time 162 */ 163 return btrfs_csum_type_size(t); 164 } 165 166 const char *btrfs_super_csum_name(u16 csum_type) 167 { 168 /* csum type is validated at mount time */ 169 return btrfs_csums[csum_type].name; 170 } 171 172 /* 173 * Return driver name if defined, otherwise the name that's also a valid driver 174 * name 175 */ 176 const char *btrfs_super_csum_driver(u16 csum_type) 177 { 178 /* csum type is validated at mount time */ 179 return btrfs_csums[csum_type].driver[0] ? 180 btrfs_csums[csum_type].driver : 181 btrfs_csums[csum_type].name; 182 } 183 184 size_t __attribute_const__ btrfs_get_num_csums(void) 185 { 186 return ARRAY_SIZE(btrfs_csums); 187 } 188 189 struct btrfs_path *btrfs_alloc_path(void) 190 { 191 might_sleep(); 192 193 return kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS); 194 } 195 196 /* this also releases the path */ 197 void btrfs_free_path(struct btrfs_path *p) 198 { 199 if (!p) 200 return; 201 btrfs_release_path(p); 202 kmem_cache_free(btrfs_path_cachep, p); 203 } 204 205 /* 206 * path release drops references on the extent buffers in the path 207 * and it drops any locks held by this path 208 * 209 * It is safe to call this on paths that no locks or extent buffers held. 210 */ 211 noinline void btrfs_release_path(struct btrfs_path *p) 212 { 213 int i; 214 215 for (i = 0; i < BTRFS_MAX_LEVEL; i++) { 216 p->slots[i] = 0; 217 if (!p->nodes[i]) 218 continue; 219 if (p->locks[i]) { 220 btrfs_tree_unlock_rw(p->nodes[i], p->locks[i]); 221 p->locks[i] = 0; 222 } 223 free_extent_buffer(p->nodes[i]); 224 p->nodes[i] = NULL; 225 } 226 } 227 228 /* 229 * We want the transaction abort to print stack trace only for errors where the 230 * cause could be a bug, eg. due to ENOSPC, and not for common errors that are 231 * caused by external factors. 232 */ 233 bool __cold abort_should_print_stack(int error) 234 { 235 switch (error) { 236 case -EIO: 237 case -EROFS: 238 case -ENOMEM: 239 return false; 240 } 241 return true; 242 } 243 244 /* 245 * safely gets a reference on the root node of a tree. A lock 246 * is not taken, so a concurrent writer may put a different node 247 * at the root of the tree. See btrfs_lock_root_node for the 248 * looping required. 249 * 250 * The extent buffer returned by this has a reference taken, so 251 * it won't disappear. It may stop being the root of the tree 252 * at any time because there are no locks held. 253 */ 254 struct extent_buffer *btrfs_root_node(struct btrfs_root *root) 255 { 256 struct extent_buffer *eb; 257 258 while (1) { 259 rcu_read_lock(); 260 eb = rcu_dereference(root->node); 261 262 /* 263 * RCU really hurts here, we could free up the root node because 264 * it was COWed but we may not get the new root node yet so do 265 * the inc_not_zero dance and if it doesn't work then 266 * synchronize_rcu and try again. 267 */ 268 if (atomic_inc_not_zero(&eb->refs)) { 269 rcu_read_unlock(); 270 break; 271 } 272 rcu_read_unlock(); 273 synchronize_rcu(); 274 } 275 return eb; 276 } 277 278 /* 279 * Cowonly root (not-shareable trees, everything not subvolume or reloc roots), 280 * just get put onto a simple dirty list. Transaction walks this list to make 281 * sure they get properly updated on disk. 282 */ 283 static void add_root_to_dirty_list(struct btrfs_root *root) 284 { 285 struct btrfs_fs_info *fs_info = root->fs_info; 286 287 if (test_bit(BTRFS_ROOT_DIRTY, &root->state) || 288 !test_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state)) 289 return; 290 291 spin_lock(&fs_info->trans_lock); 292 if (!test_and_set_bit(BTRFS_ROOT_DIRTY, &root->state)) { 293 /* Want the extent tree to be the last on the list */ 294 if (btrfs_root_id(root) == BTRFS_EXTENT_TREE_OBJECTID) 295 list_move_tail(&root->dirty_list, 296 &fs_info->dirty_cowonly_roots); 297 else 298 list_move(&root->dirty_list, 299 &fs_info->dirty_cowonly_roots); 300 } 301 spin_unlock(&fs_info->trans_lock); 302 } 303 304 /* 305 * used by snapshot creation to make a copy of a root for a tree with 306 * a given objectid. The buffer with the new root node is returned in 307 * cow_ret, and this func returns zero on success or a negative error code. 308 */ 309 int btrfs_copy_root(struct btrfs_trans_handle *trans, 310 struct btrfs_root *root, 311 struct extent_buffer *buf, 312 struct extent_buffer **cow_ret, u64 new_root_objectid) 313 { 314 struct btrfs_fs_info *fs_info = root->fs_info; 315 struct extent_buffer *cow; 316 int ret = 0; 317 int level; 318 struct btrfs_disk_key disk_key; 319 u64 reloc_src_root = 0; 320 321 WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) && 322 trans->transid != fs_info->running_transaction->transid); 323 WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) && 324 trans->transid != btrfs_get_root_last_trans(root)); 325 326 level = btrfs_header_level(buf); 327 if (level == 0) 328 btrfs_item_key(buf, &disk_key, 0); 329 else 330 btrfs_node_key(buf, &disk_key, 0); 331 332 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID) 333 reloc_src_root = btrfs_header_owner(buf); 334 cow = btrfs_alloc_tree_block(trans, root, 0, new_root_objectid, 335 &disk_key, level, buf->start, 0, 336 reloc_src_root, BTRFS_NESTING_NEW_ROOT); 337 if (IS_ERR(cow)) 338 return PTR_ERR(cow); 339 340 copy_extent_buffer_full(cow, buf); 341 btrfs_set_header_bytenr(cow, cow->start); 342 btrfs_set_header_generation(cow, trans->transid); 343 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV); 344 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN | 345 BTRFS_HEADER_FLAG_RELOC); 346 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID) 347 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC); 348 else 349 btrfs_set_header_owner(cow, new_root_objectid); 350 351 write_extent_buffer_fsid(cow, fs_info->fs_devices->metadata_uuid); 352 353 WARN_ON(btrfs_header_generation(buf) > trans->transid); 354 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID) 355 ret = btrfs_inc_ref(trans, root, cow, 1); 356 else 357 ret = btrfs_inc_ref(trans, root, cow, 0); 358 if (ret) { 359 btrfs_tree_unlock(cow); 360 free_extent_buffer(cow); 361 btrfs_abort_transaction(trans, ret); 362 return ret; 363 } 364 365 btrfs_mark_buffer_dirty(trans, cow); 366 *cow_ret = cow; 367 return 0; 368 } 369 370 /* 371 * check if the tree block can be shared by multiple trees 372 */ 373 bool btrfs_block_can_be_shared(struct btrfs_trans_handle *trans, 374 struct btrfs_root *root, 375 struct extent_buffer *buf) 376 { 377 const u64 buf_gen = btrfs_header_generation(buf); 378 379 /* 380 * Tree blocks not in shareable trees and tree roots are never shared. 381 * If a block was allocated after the last snapshot and the block was 382 * not allocated by tree relocation, we know the block is not shared. 383 */ 384 385 if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) 386 return false; 387 388 if (buf == root->node) 389 return false; 390 391 if (buf_gen > btrfs_root_last_snapshot(&root->root_item) && 392 !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)) 393 return false; 394 395 if (buf != root->commit_root) 396 return true; 397 398 /* 399 * An extent buffer that used to be the commit root may still be shared 400 * because the tree height may have increased and it became a child of a 401 * higher level root. This can happen when snapshotting a subvolume 402 * created in the current transaction. 403 */ 404 if (buf_gen == trans->transid) 405 return true; 406 407 return false; 408 } 409 410 static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans, 411 struct btrfs_root *root, 412 struct extent_buffer *buf, 413 struct extent_buffer *cow, 414 int *last_ref) 415 { 416 struct btrfs_fs_info *fs_info = root->fs_info; 417 u64 refs; 418 u64 owner; 419 u64 flags; 420 int ret; 421 422 /* 423 * Backrefs update rules: 424 * 425 * Always use full backrefs for extent pointers in tree block 426 * allocated by tree relocation. 427 * 428 * If a shared tree block is no longer referenced by its owner 429 * tree (btrfs_header_owner(buf) == root->root_key.objectid), 430 * use full backrefs for extent pointers in tree block. 431 * 432 * If a tree block is been relocating 433 * (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID), 434 * use full backrefs for extent pointers in tree block. 435 * The reason for this is some operations (such as drop tree) 436 * are only allowed for blocks use full backrefs. 437 */ 438 439 if (btrfs_block_can_be_shared(trans, root, buf)) { 440 ret = btrfs_lookup_extent_info(trans, fs_info, buf->start, 441 btrfs_header_level(buf), 1, 442 &refs, &flags, NULL); 443 if (ret) 444 return ret; 445 if (unlikely(refs == 0)) { 446 btrfs_crit(fs_info, 447 "found 0 references for tree block at bytenr %llu level %d root %llu", 448 buf->start, btrfs_header_level(buf), 449 btrfs_root_id(root)); 450 ret = -EUCLEAN; 451 btrfs_abort_transaction(trans, ret); 452 return ret; 453 } 454 } else { 455 refs = 1; 456 if (btrfs_root_id(root) == BTRFS_TREE_RELOC_OBJECTID || 457 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV) 458 flags = BTRFS_BLOCK_FLAG_FULL_BACKREF; 459 else 460 flags = 0; 461 } 462 463 owner = btrfs_header_owner(buf); 464 if (unlikely(owner == BTRFS_TREE_RELOC_OBJECTID && 465 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))) { 466 btrfs_crit(fs_info, 467 "found tree block at bytenr %llu level %d root %llu refs %llu flags %llx without full backref flag set", 468 buf->start, btrfs_header_level(buf), 469 btrfs_root_id(root), refs, flags); 470 ret = -EUCLEAN; 471 btrfs_abort_transaction(trans, ret); 472 return ret; 473 } 474 475 if (refs > 1) { 476 if ((owner == btrfs_root_id(root) || 477 btrfs_root_id(root) == BTRFS_TREE_RELOC_OBJECTID) && 478 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) { 479 ret = btrfs_inc_ref(trans, root, buf, 1); 480 if (ret) 481 return ret; 482 483 if (btrfs_root_id(root) == BTRFS_TREE_RELOC_OBJECTID) { 484 ret = btrfs_dec_ref(trans, root, buf, 0); 485 if (ret) 486 return ret; 487 ret = btrfs_inc_ref(trans, root, cow, 1); 488 if (ret) 489 return ret; 490 } 491 ret = btrfs_set_disk_extent_flags(trans, buf, 492 BTRFS_BLOCK_FLAG_FULL_BACKREF); 493 if (ret) 494 return ret; 495 } else { 496 497 if (btrfs_root_id(root) == BTRFS_TREE_RELOC_OBJECTID) 498 ret = btrfs_inc_ref(trans, root, cow, 1); 499 else 500 ret = btrfs_inc_ref(trans, root, cow, 0); 501 if (ret) 502 return ret; 503 } 504 } else { 505 if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) { 506 if (btrfs_root_id(root) == BTRFS_TREE_RELOC_OBJECTID) 507 ret = btrfs_inc_ref(trans, root, cow, 1); 508 else 509 ret = btrfs_inc_ref(trans, root, cow, 0); 510 if (ret) 511 return ret; 512 ret = btrfs_dec_ref(trans, root, buf, 1); 513 if (ret) 514 return ret; 515 } 516 btrfs_clear_buffer_dirty(trans, buf); 517 *last_ref = 1; 518 } 519 return 0; 520 } 521 522 /* 523 * does the dirty work in cow of a single block. The parent block (if 524 * supplied) is updated to point to the new cow copy. The new buffer is marked 525 * dirty and returned locked. If you modify the block it needs to be marked 526 * dirty again. 527 * 528 * search_start -- an allocation hint for the new block 529 * 530 * empty_size -- a hint that you plan on doing more cow. This is the size in 531 * bytes the allocator should try to find free next to the block it returns. 532 * This is just a hint and may be ignored by the allocator. 533 */ 534 int btrfs_force_cow_block(struct btrfs_trans_handle *trans, 535 struct btrfs_root *root, 536 struct extent_buffer *buf, 537 struct extent_buffer *parent, int parent_slot, 538 struct extent_buffer **cow_ret, 539 u64 search_start, u64 empty_size, 540 enum btrfs_lock_nesting nest) 541 { 542 struct btrfs_fs_info *fs_info = root->fs_info; 543 struct btrfs_disk_key disk_key; 544 struct extent_buffer *cow; 545 int level, ret; 546 int last_ref = 0; 547 int unlock_orig = 0; 548 u64 parent_start = 0; 549 u64 reloc_src_root = 0; 550 551 if (*cow_ret == buf) 552 unlock_orig = 1; 553 554 btrfs_assert_tree_write_locked(buf); 555 556 WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) && 557 trans->transid != fs_info->running_transaction->transid); 558 WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) && 559 trans->transid != btrfs_get_root_last_trans(root)); 560 561 level = btrfs_header_level(buf); 562 563 if (level == 0) 564 btrfs_item_key(buf, &disk_key, 0); 565 else 566 btrfs_node_key(buf, &disk_key, 0); 567 568 if (btrfs_root_id(root) == BTRFS_TREE_RELOC_OBJECTID) { 569 if (parent) 570 parent_start = parent->start; 571 reloc_src_root = btrfs_header_owner(buf); 572 } 573 cow = btrfs_alloc_tree_block(trans, root, parent_start, 574 btrfs_root_id(root), &disk_key, level, 575 search_start, empty_size, reloc_src_root, nest); 576 if (IS_ERR(cow)) 577 return PTR_ERR(cow); 578 579 /* cow is set to blocking by btrfs_init_new_buffer */ 580 581 copy_extent_buffer_full(cow, buf); 582 btrfs_set_header_bytenr(cow, cow->start); 583 btrfs_set_header_generation(cow, trans->transid); 584 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV); 585 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN | 586 BTRFS_HEADER_FLAG_RELOC); 587 if (btrfs_root_id(root) == BTRFS_TREE_RELOC_OBJECTID) 588 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC); 589 else 590 btrfs_set_header_owner(cow, btrfs_root_id(root)); 591 592 write_extent_buffer_fsid(cow, fs_info->fs_devices->metadata_uuid); 593 594 ret = update_ref_for_cow(trans, root, buf, cow, &last_ref); 595 if (ret) { 596 btrfs_abort_transaction(trans, ret); 597 goto error_unlock_cow; 598 } 599 600 if (test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) { 601 ret = btrfs_reloc_cow_block(trans, root, buf, cow); 602 if (ret) { 603 btrfs_abort_transaction(trans, ret); 604 goto error_unlock_cow; 605 } 606 } 607 608 if (buf == root->node) { 609 WARN_ON(parent && parent != buf); 610 if (btrfs_root_id(root) == BTRFS_TREE_RELOC_OBJECTID || 611 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV) 612 parent_start = buf->start; 613 614 ret = btrfs_tree_mod_log_insert_root(root->node, cow, true); 615 if (ret < 0) { 616 btrfs_abort_transaction(trans, ret); 617 goto error_unlock_cow; 618 } 619 atomic_inc(&cow->refs); 620 rcu_assign_pointer(root->node, cow); 621 622 ret = btrfs_free_tree_block(trans, btrfs_root_id(root), buf, 623 parent_start, last_ref); 624 free_extent_buffer(buf); 625 add_root_to_dirty_list(root); 626 if (ret < 0) { 627 btrfs_abort_transaction(trans, ret); 628 goto error_unlock_cow; 629 } 630 } else { 631 WARN_ON(trans->transid != btrfs_header_generation(parent)); 632 ret = btrfs_tree_mod_log_insert_key(parent, parent_slot, 633 BTRFS_MOD_LOG_KEY_REPLACE); 634 if (ret) { 635 btrfs_abort_transaction(trans, ret); 636 goto error_unlock_cow; 637 } 638 btrfs_set_node_blockptr(parent, parent_slot, 639 cow->start); 640 btrfs_set_node_ptr_generation(parent, parent_slot, 641 trans->transid); 642 btrfs_mark_buffer_dirty(trans, parent); 643 if (last_ref) { 644 ret = btrfs_tree_mod_log_free_eb(buf); 645 if (ret) { 646 btrfs_abort_transaction(trans, ret); 647 goto error_unlock_cow; 648 } 649 } 650 ret = btrfs_free_tree_block(trans, btrfs_root_id(root), buf, 651 parent_start, last_ref); 652 if (ret < 0) { 653 btrfs_abort_transaction(trans, ret); 654 goto error_unlock_cow; 655 } 656 } 657 if (unlock_orig) 658 btrfs_tree_unlock(buf); 659 free_extent_buffer_stale(buf); 660 btrfs_mark_buffer_dirty(trans, cow); 661 *cow_ret = cow; 662 return 0; 663 664 error_unlock_cow: 665 btrfs_tree_unlock(cow); 666 free_extent_buffer(cow); 667 return ret; 668 } 669 670 static inline int should_cow_block(struct btrfs_trans_handle *trans, 671 struct btrfs_root *root, 672 struct extent_buffer *buf) 673 { 674 if (btrfs_is_testing(root->fs_info)) 675 return 0; 676 677 /* Ensure we can see the FORCE_COW bit */ 678 smp_mb__before_atomic(); 679 680 /* 681 * We do not need to cow a block if 682 * 1) this block is not created or changed in this transaction; 683 * 2) this block does not belong to TREE_RELOC tree; 684 * 3) the root is not forced COW. 685 * 686 * What is forced COW: 687 * when we create snapshot during committing the transaction, 688 * after we've finished copying src root, we must COW the shared 689 * block to ensure the metadata consistency. 690 */ 691 if (btrfs_header_generation(buf) == trans->transid && 692 !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN) && 693 !(btrfs_root_id(root) != BTRFS_TREE_RELOC_OBJECTID && 694 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)) && 695 !test_bit(BTRFS_ROOT_FORCE_COW, &root->state)) 696 return 0; 697 return 1; 698 } 699 700 /* 701 * COWs a single block, see btrfs_force_cow_block() for the real work. 702 * This version of it has extra checks so that a block isn't COWed more than 703 * once per transaction, as long as it hasn't been written yet 704 */ 705 int btrfs_cow_block(struct btrfs_trans_handle *trans, 706 struct btrfs_root *root, struct extent_buffer *buf, 707 struct extent_buffer *parent, int parent_slot, 708 struct extent_buffer **cow_ret, 709 enum btrfs_lock_nesting nest) 710 { 711 struct btrfs_fs_info *fs_info = root->fs_info; 712 u64 search_start; 713 int ret; 714 715 if (unlikely(test_bit(BTRFS_ROOT_DELETING, &root->state))) { 716 btrfs_abort_transaction(trans, -EUCLEAN); 717 btrfs_crit(fs_info, 718 "attempt to COW block %llu on root %llu that is being deleted", 719 buf->start, btrfs_root_id(root)); 720 return -EUCLEAN; 721 } 722 723 /* 724 * COWing must happen through a running transaction, which always 725 * matches the current fs generation (it's a transaction with a state 726 * less than TRANS_STATE_UNBLOCKED). If it doesn't, then turn the fs 727 * into error state to prevent the commit of any transaction. 728 */ 729 if (unlikely(trans->transaction != fs_info->running_transaction || 730 trans->transid != fs_info->generation)) { 731 btrfs_abort_transaction(trans, -EUCLEAN); 732 btrfs_crit(fs_info, 733 "unexpected transaction when attempting to COW block %llu on root %llu, transaction %llu running transaction %llu fs generation %llu", 734 buf->start, btrfs_root_id(root), trans->transid, 735 fs_info->running_transaction->transid, 736 fs_info->generation); 737 return -EUCLEAN; 738 } 739 740 if (!should_cow_block(trans, root, buf)) { 741 *cow_ret = buf; 742 return 0; 743 } 744 745 search_start = round_down(buf->start, SZ_1G); 746 747 /* 748 * Before CoWing this block for later modification, check if it's 749 * the subtree root and do the delayed subtree trace if needed. 750 * 751 * Also We don't care about the error, as it's handled internally. 752 */ 753 btrfs_qgroup_trace_subtree_after_cow(trans, root, buf); 754 ret = btrfs_force_cow_block(trans, root, buf, parent, parent_slot, 755 cow_ret, search_start, 0, nest); 756 757 trace_btrfs_cow_block(root, buf, *cow_ret); 758 759 return ret; 760 } 761 ALLOW_ERROR_INJECTION(btrfs_cow_block, ERRNO); 762 763 /* 764 * same as comp_keys only with two btrfs_key's 765 */ 766 int __pure btrfs_comp_cpu_keys(const struct btrfs_key *k1, const struct btrfs_key *k2) 767 { 768 if (k1->objectid > k2->objectid) 769 return 1; 770 if (k1->objectid < k2->objectid) 771 return -1; 772 if (k1->type > k2->type) 773 return 1; 774 if (k1->type < k2->type) 775 return -1; 776 if (k1->offset > k2->offset) 777 return 1; 778 if (k1->offset < k2->offset) 779 return -1; 780 return 0; 781 } 782 783 /* 784 * Search for a key in the given extent_buffer. 785 * 786 * The lower boundary for the search is specified by the slot number @first_slot. 787 * Use a value of 0 to search over the whole extent buffer. Works for both 788 * leaves and nodes. 789 * 790 * The slot in the extent buffer is returned via @slot. If the key exists in the 791 * extent buffer, then @slot will point to the slot where the key is, otherwise 792 * it points to the slot where you would insert the key. 793 * 794 * Slot may point to the total number of items (i.e. one position beyond the last 795 * key) if the key is bigger than the last key in the extent buffer. 796 */ 797 int btrfs_bin_search(struct extent_buffer *eb, int first_slot, 798 const struct btrfs_key *key, int *slot) 799 { 800 unsigned long p; 801 int item_size; 802 /* 803 * Use unsigned types for the low and high slots, so that we get a more 804 * efficient division in the search loop below. 805 */ 806 u32 low = first_slot; 807 u32 high = btrfs_header_nritems(eb); 808 int ret; 809 const int key_size = sizeof(struct btrfs_disk_key); 810 811 if (unlikely(low > high)) { 812 btrfs_err(eb->fs_info, 813 "%s: low (%u) > high (%u) eb %llu owner %llu level %d", 814 __func__, low, high, eb->start, 815 btrfs_header_owner(eb), btrfs_header_level(eb)); 816 return -EINVAL; 817 } 818 819 if (btrfs_header_level(eb) == 0) { 820 p = offsetof(struct btrfs_leaf, items); 821 item_size = sizeof(struct btrfs_item); 822 } else { 823 p = offsetof(struct btrfs_node, ptrs); 824 item_size = sizeof(struct btrfs_key_ptr); 825 } 826 827 while (low < high) { 828 const int unit_size = eb->folio_size; 829 unsigned long oil; 830 unsigned long offset; 831 struct btrfs_disk_key *tmp; 832 struct btrfs_disk_key unaligned; 833 int mid; 834 835 mid = (low + high) / 2; 836 offset = p + mid * item_size; 837 oil = get_eb_offset_in_folio(eb, offset); 838 839 if (oil + key_size <= unit_size) { 840 const unsigned long idx = get_eb_folio_index(eb, offset); 841 char *kaddr = folio_address(eb->folios[idx]); 842 843 oil = get_eb_offset_in_folio(eb, offset); 844 tmp = (struct btrfs_disk_key *)(kaddr + oil); 845 } else { 846 read_extent_buffer(eb, &unaligned, offset, key_size); 847 tmp = &unaligned; 848 } 849 850 ret = btrfs_comp_keys(tmp, key); 851 852 if (ret < 0) 853 low = mid + 1; 854 else if (ret > 0) 855 high = mid; 856 else { 857 *slot = mid; 858 return 0; 859 } 860 } 861 *slot = low; 862 return 1; 863 } 864 865 static void root_add_used_bytes(struct btrfs_root *root) 866 { 867 spin_lock(&root->accounting_lock); 868 btrfs_set_root_used(&root->root_item, 869 btrfs_root_used(&root->root_item) + root->fs_info->nodesize); 870 spin_unlock(&root->accounting_lock); 871 } 872 873 static void root_sub_used_bytes(struct btrfs_root *root) 874 { 875 spin_lock(&root->accounting_lock); 876 btrfs_set_root_used(&root->root_item, 877 btrfs_root_used(&root->root_item) - root->fs_info->nodesize); 878 spin_unlock(&root->accounting_lock); 879 } 880 881 /* given a node and slot number, this reads the blocks it points to. The 882 * extent buffer is returned with a reference taken (but unlocked). 883 */ 884 struct extent_buffer *btrfs_read_node_slot(struct extent_buffer *parent, 885 int slot) 886 { 887 int level = btrfs_header_level(parent); 888 struct btrfs_tree_parent_check check = { 0 }; 889 struct extent_buffer *eb; 890 891 if (slot < 0 || slot >= btrfs_header_nritems(parent)) 892 return ERR_PTR(-ENOENT); 893 894 ASSERT(level); 895 896 check.level = level - 1; 897 check.transid = btrfs_node_ptr_generation(parent, slot); 898 check.owner_root = btrfs_header_owner(parent); 899 check.has_first_key = true; 900 btrfs_node_key_to_cpu(parent, &check.first_key, slot); 901 902 eb = read_tree_block(parent->fs_info, btrfs_node_blockptr(parent, slot), 903 &check); 904 if (IS_ERR(eb)) 905 return eb; 906 if (!extent_buffer_uptodate(eb)) { 907 free_extent_buffer(eb); 908 return ERR_PTR(-EIO); 909 } 910 911 return eb; 912 } 913 914 /* 915 * node level balancing, used to make sure nodes are in proper order for 916 * item deletion. We balance from the top down, so we have to make sure 917 * that a deletion won't leave an node completely empty later on. 918 */ 919 static noinline int balance_level(struct btrfs_trans_handle *trans, 920 struct btrfs_root *root, 921 struct btrfs_path *path, int level) 922 { 923 struct btrfs_fs_info *fs_info = root->fs_info; 924 struct extent_buffer *right = NULL; 925 struct extent_buffer *mid; 926 struct extent_buffer *left = NULL; 927 struct extent_buffer *parent = NULL; 928 int ret = 0; 929 int wret; 930 int pslot; 931 int orig_slot = path->slots[level]; 932 u64 orig_ptr; 933 934 ASSERT(level > 0); 935 936 mid = path->nodes[level]; 937 938 WARN_ON(path->locks[level] != BTRFS_WRITE_LOCK); 939 WARN_ON(btrfs_header_generation(mid) != trans->transid); 940 941 orig_ptr = btrfs_node_blockptr(mid, orig_slot); 942 943 if (level < BTRFS_MAX_LEVEL - 1) { 944 parent = path->nodes[level + 1]; 945 pslot = path->slots[level + 1]; 946 } 947 948 /* 949 * deal with the case where there is only one pointer in the root 950 * by promoting the node below to a root 951 */ 952 if (!parent) { 953 struct extent_buffer *child; 954 955 if (btrfs_header_nritems(mid) != 1) 956 return 0; 957 958 /* promote the child to a root */ 959 child = btrfs_read_node_slot(mid, 0); 960 if (IS_ERR(child)) { 961 ret = PTR_ERR(child); 962 goto out; 963 } 964 965 btrfs_tree_lock(child); 966 ret = btrfs_cow_block(trans, root, child, mid, 0, &child, 967 BTRFS_NESTING_COW); 968 if (ret) { 969 btrfs_tree_unlock(child); 970 free_extent_buffer(child); 971 goto out; 972 } 973 974 ret = btrfs_tree_mod_log_insert_root(root->node, child, true); 975 if (ret < 0) { 976 btrfs_tree_unlock(child); 977 free_extent_buffer(child); 978 btrfs_abort_transaction(trans, ret); 979 goto out; 980 } 981 rcu_assign_pointer(root->node, child); 982 983 add_root_to_dirty_list(root); 984 btrfs_tree_unlock(child); 985 986 path->locks[level] = 0; 987 path->nodes[level] = NULL; 988 btrfs_clear_buffer_dirty(trans, mid); 989 btrfs_tree_unlock(mid); 990 /* once for the path */ 991 free_extent_buffer(mid); 992 993 root_sub_used_bytes(root); 994 ret = btrfs_free_tree_block(trans, btrfs_root_id(root), mid, 0, 1); 995 /* once for the root ptr */ 996 free_extent_buffer_stale(mid); 997 if (ret < 0) { 998 btrfs_abort_transaction(trans, ret); 999 goto out; 1000 } 1001 return 0; 1002 } 1003 if (btrfs_header_nritems(mid) > 1004 BTRFS_NODEPTRS_PER_BLOCK(fs_info) / 4) 1005 return 0; 1006 1007 if (pslot) { 1008 left = btrfs_read_node_slot(parent, pslot - 1); 1009 if (IS_ERR(left)) { 1010 ret = PTR_ERR(left); 1011 left = NULL; 1012 goto out; 1013 } 1014 1015 btrfs_tree_lock_nested(left, BTRFS_NESTING_LEFT); 1016 wret = btrfs_cow_block(trans, root, left, 1017 parent, pslot - 1, &left, 1018 BTRFS_NESTING_LEFT_COW); 1019 if (wret) { 1020 ret = wret; 1021 goto out; 1022 } 1023 } 1024 1025 if (pslot + 1 < btrfs_header_nritems(parent)) { 1026 right = btrfs_read_node_slot(parent, pslot + 1); 1027 if (IS_ERR(right)) { 1028 ret = PTR_ERR(right); 1029 right = NULL; 1030 goto out; 1031 } 1032 1033 btrfs_tree_lock_nested(right, BTRFS_NESTING_RIGHT); 1034 wret = btrfs_cow_block(trans, root, right, 1035 parent, pslot + 1, &right, 1036 BTRFS_NESTING_RIGHT_COW); 1037 if (wret) { 1038 ret = wret; 1039 goto out; 1040 } 1041 } 1042 1043 /* first, try to make some room in the middle buffer */ 1044 if (left) { 1045 orig_slot += btrfs_header_nritems(left); 1046 wret = push_node_left(trans, left, mid, 1); 1047 if (wret < 0) 1048 ret = wret; 1049 } 1050 1051 /* 1052 * then try to empty the right most buffer into the middle 1053 */ 1054 if (right) { 1055 wret = push_node_left(trans, mid, right, 1); 1056 if (wret < 0 && wret != -ENOSPC) 1057 ret = wret; 1058 if (btrfs_header_nritems(right) == 0) { 1059 btrfs_clear_buffer_dirty(trans, right); 1060 btrfs_tree_unlock(right); 1061 ret = btrfs_del_ptr(trans, root, path, level + 1, pslot + 1); 1062 if (ret < 0) { 1063 free_extent_buffer_stale(right); 1064 right = NULL; 1065 goto out; 1066 } 1067 root_sub_used_bytes(root); 1068 ret = btrfs_free_tree_block(trans, btrfs_root_id(root), 1069 right, 0, 1); 1070 free_extent_buffer_stale(right); 1071 right = NULL; 1072 if (ret < 0) { 1073 btrfs_abort_transaction(trans, ret); 1074 goto out; 1075 } 1076 } else { 1077 struct btrfs_disk_key right_key; 1078 btrfs_node_key(right, &right_key, 0); 1079 ret = btrfs_tree_mod_log_insert_key(parent, pslot + 1, 1080 BTRFS_MOD_LOG_KEY_REPLACE); 1081 if (ret < 0) { 1082 btrfs_abort_transaction(trans, ret); 1083 goto out; 1084 } 1085 btrfs_set_node_key(parent, &right_key, pslot + 1); 1086 btrfs_mark_buffer_dirty(trans, parent); 1087 } 1088 } 1089 if (btrfs_header_nritems(mid) == 1) { 1090 /* 1091 * we're not allowed to leave a node with one item in the 1092 * tree during a delete. A deletion from lower in the tree 1093 * could try to delete the only pointer in this node. 1094 * So, pull some keys from the left. 1095 * There has to be a left pointer at this point because 1096 * otherwise we would have pulled some pointers from the 1097 * right 1098 */ 1099 if (unlikely(!left)) { 1100 btrfs_crit(fs_info, 1101 "missing left child when middle child only has 1 item, parent bytenr %llu level %d mid bytenr %llu root %llu", 1102 parent->start, btrfs_header_level(parent), 1103 mid->start, btrfs_root_id(root)); 1104 ret = -EUCLEAN; 1105 btrfs_abort_transaction(trans, ret); 1106 goto out; 1107 } 1108 wret = balance_node_right(trans, mid, left); 1109 if (wret < 0) { 1110 ret = wret; 1111 goto out; 1112 } 1113 if (wret == 1) { 1114 wret = push_node_left(trans, left, mid, 1); 1115 if (wret < 0) 1116 ret = wret; 1117 } 1118 BUG_ON(wret == 1); 1119 } 1120 if (btrfs_header_nritems(mid) == 0) { 1121 btrfs_clear_buffer_dirty(trans, mid); 1122 btrfs_tree_unlock(mid); 1123 ret = btrfs_del_ptr(trans, root, path, level + 1, pslot); 1124 if (ret < 0) { 1125 free_extent_buffer_stale(mid); 1126 mid = NULL; 1127 goto out; 1128 } 1129 root_sub_used_bytes(root); 1130 ret = btrfs_free_tree_block(trans, btrfs_root_id(root), mid, 0, 1); 1131 free_extent_buffer_stale(mid); 1132 mid = NULL; 1133 if (ret < 0) { 1134 btrfs_abort_transaction(trans, ret); 1135 goto out; 1136 } 1137 } else { 1138 /* update the parent key to reflect our changes */ 1139 struct btrfs_disk_key mid_key; 1140 btrfs_node_key(mid, &mid_key, 0); 1141 ret = btrfs_tree_mod_log_insert_key(parent, pslot, 1142 BTRFS_MOD_LOG_KEY_REPLACE); 1143 if (ret < 0) { 1144 btrfs_abort_transaction(trans, ret); 1145 goto out; 1146 } 1147 btrfs_set_node_key(parent, &mid_key, pslot); 1148 btrfs_mark_buffer_dirty(trans, parent); 1149 } 1150 1151 /* update the path */ 1152 if (left) { 1153 if (btrfs_header_nritems(left) > orig_slot) { 1154 atomic_inc(&left->refs); 1155 /* left was locked after cow */ 1156 path->nodes[level] = left; 1157 path->slots[level + 1] -= 1; 1158 path->slots[level] = orig_slot; 1159 if (mid) { 1160 btrfs_tree_unlock(mid); 1161 free_extent_buffer(mid); 1162 } 1163 } else { 1164 orig_slot -= btrfs_header_nritems(left); 1165 path->slots[level] = orig_slot; 1166 } 1167 } 1168 /* double check we haven't messed things up */ 1169 if (orig_ptr != 1170 btrfs_node_blockptr(path->nodes[level], path->slots[level])) 1171 BUG(); 1172 out: 1173 if (right) { 1174 btrfs_tree_unlock(right); 1175 free_extent_buffer(right); 1176 } 1177 if (left) { 1178 if (path->nodes[level] != left) 1179 btrfs_tree_unlock(left); 1180 free_extent_buffer(left); 1181 } 1182 return ret; 1183 } 1184 1185 /* Node balancing for insertion. Here we only split or push nodes around 1186 * when they are completely full. This is also done top down, so we 1187 * have to be pessimistic. 1188 */ 1189 static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans, 1190 struct btrfs_root *root, 1191 struct btrfs_path *path, int level) 1192 { 1193 struct btrfs_fs_info *fs_info = root->fs_info; 1194 struct extent_buffer *right = NULL; 1195 struct extent_buffer *mid; 1196 struct extent_buffer *left = NULL; 1197 struct extent_buffer *parent = NULL; 1198 int ret = 0; 1199 int wret; 1200 int pslot; 1201 int orig_slot = path->slots[level]; 1202 1203 if (level == 0) 1204 return 1; 1205 1206 mid = path->nodes[level]; 1207 WARN_ON(btrfs_header_generation(mid) != trans->transid); 1208 1209 if (level < BTRFS_MAX_LEVEL - 1) { 1210 parent = path->nodes[level + 1]; 1211 pslot = path->slots[level + 1]; 1212 } 1213 1214 if (!parent) 1215 return 1; 1216 1217 /* first, try to make some room in the middle buffer */ 1218 if (pslot) { 1219 u32 left_nr; 1220 1221 left = btrfs_read_node_slot(parent, pslot - 1); 1222 if (IS_ERR(left)) 1223 return PTR_ERR(left); 1224 1225 btrfs_tree_lock_nested(left, BTRFS_NESTING_LEFT); 1226 1227 left_nr = btrfs_header_nritems(left); 1228 if (left_nr >= BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 1) { 1229 wret = 1; 1230 } else { 1231 ret = btrfs_cow_block(trans, root, left, parent, 1232 pslot - 1, &left, 1233 BTRFS_NESTING_LEFT_COW); 1234 if (ret) 1235 wret = 1; 1236 else { 1237 wret = push_node_left(trans, left, mid, 0); 1238 } 1239 } 1240 if (wret < 0) 1241 ret = wret; 1242 if (wret == 0) { 1243 struct btrfs_disk_key disk_key; 1244 orig_slot += left_nr; 1245 btrfs_node_key(mid, &disk_key, 0); 1246 ret = btrfs_tree_mod_log_insert_key(parent, pslot, 1247 BTRFS_MOD_LOG_KEY_REPLACE); 1248 if (ret < 0) { 1249 btrfs_tree_unlock(left); 1250 free_extent_buffer(left); 1251 btrfs_abort_transaction(trans, ret); 1252 return ret; 1253 } 1254 btrfs_set_node_key(parent, &disk_key, pslot); 1255 btrfs_mark_buffer_dirty(trans, parent); 1256 if (btrfs_header_nritems(left) > orig_slot) { 1257 path->nodes[level] = left; 1258 path->slots[level + 1] -= 1; 1259 path->slots[level] = orig_slot; 1260 btrfs_tree_unlock(mid); 1261 free_extent_buffer(mid); 1262 } else { 1263 orig_slot -= 1264 btrfs_header_nritems(left); 1265 path->slots[level] = orig_slot; 1266 btrfs_tree_unlock(left); 1267 free_extent_buffer(left); 1268 } 1269 return 0; 1270 } 1271 btrfs_tree_unlock(left); 1272 free_extent_buffer(left); 1273 } 1274 1275 /* 1276 * then try to empty the right most buffer into the middle 1277 */ 1278 if (pslot + 1 < btrfs_header_nritems(parent)) { 1279 u32 right_nr; 1280 1281 right = btrfs_read_node_slot(parent, pslot + 1); 1282 if (IS_ERR(right)) 1283 return PTR_ERR(right); 1284 1285 btrfs_tree_lock_nested(right, BTRFS_NESTING_RIGHT); 1286 1287 right_nr = btrfs_header_nritems(right); 1288 if (right_nr >= BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 1) { 1289 wret = 1; 1290 } else { 1291 ret = btrfs_cow_block(trans, root, right, 1292 parent, pslot + 1, 1293 &right, BTRFS_NESTING_RIGHT_COW); 1294 if (ret) 1295 wret = 1; 1296 else { 1297 wret = balance_node_right(trans, right, mid); 1298 } 1299 } 1300 if (wret < 0) 1301 ret = wret; 1302 if (wret == 0) { 1303 struct btrfs_disk_key disk_key; 1304 1305 btrfs_node_key(right, &disk_key, 0); 1306 ret = btrfs_tree_mod_log_insert_key(parent, pslot + 1, 1307 BTRFS_MOD_LOG_KEY_REPLACE); 1308 if (ret < 0) { 1309 btrfs_tree_unlock(right); 1310 free_extent_buffer(right); 1311 btrfs_abort_transaction(trans, ret); 1312 return ret; 1313 } 1314 btrfs_set_node_key(parent, &disk_key, pslot + 1); 1315 btrfs_mark_buffer_dirty(trans, parent); 1316 1317 if (btrfs_header_nritems(mid) <= orig_slot) { 1318 path->nodes[level] = right; 1319 path->slots[level + 1] += 1; 1320 path->slots[level] = orig_slot - 1321 btrfs_header_nritems(mid); 1322 btrfs_tree_unlock(mid); 1323 free_extent_buffer(mid); 1324 } else { 1325 btrfs_tree_unlock(right); 1326 free_extent_buffer(right); 1327 } 1328 return 0; 1329 } 1330 btrfs_tree_unlock(right); 1331 free_extent_buffer(right); 1332 } 1333 return 1; 1334 } 1335 1336 /* 1337 * readahead one full node of leaves, finding things that are close 1338 * to the block in 'slot', and triggering ra on them. 1339 */ 1340 static void reada_for_search(struct btrfs_fs_info *fs_info, 1341 struct btrfs_path *path, 1342 int level, int slot, u64 objectid) 1343 { 1344 struct extent_buffer *node; 1345 struct btrfs_disk_key disk_key; 1346 u32 nritems; 1347 u64 search; 1348 u64 target; 1349 u64 nread = 0; 1350 u64 nread_max; 1351 u32 nr; 1352 u32 blocksize; 1353 u32 nscan = 0; 1354 1355 if (level != 1 && path->reada != READA_FORWARD_ALWAYS) 1356 return; 1357 1358 if (!path->nodes[level]) 1359 return; 1360 1361 node = path->nodes[level]; 1362 1363 /* 1364 * Since the time between visiting leaves is much shorter than the time 1365 * between visiting nodes, limit read ahead of nodes to 1, to avoid too 1366 * much IO at once (possibly random). 1367 */ 1368 if (path->reada == READA_FORWARD_ALWAYS) { 1369 if (level > 1) 1370 nread_max = node->fs_info->nodesize; 1371 else 1372 nread_max = SZ_128K; 1373 } else { 1374 nread_max = SZ_64K; 1375 } 1376 1377 search = btrfs_node_blockptr(node, slot); 1378 blocksize = fs_info->nodesize; 1379 if (path->reada != READA_FORWARD_ALWAYS) { 1380 struct extent_buffer *eb; 1381 1382 eb = find_extent_buffer(fs_info, search); 1383 if (eb) { 1384 free_extent_buffer(eb); 1385 return; 1386 } 1387 } 1388 1389 target = search; 1390 1391 nritems = btrfs_header_nritems(node); 1392 nr = slot; 1393 1394 while (1) { 1395 if (path->reada == READA_BACK) { 1396 if (nr == 0) 1397 break; 1398 nr--; 1399 } else if (path->reada == READA_FORWARD || 1400 path->reada == READA_FORWARD_ALWAYS) { 1401 nr++; 1402 if (nr >= nritems) 1403 break; 1404 } 1405 if (path->reada == READA_BACK && objectid) { 1406 btrfs_node_key(node, &disk_key, nr); 1407 if (btrfs_disk_key_objectid(&disk_key) != objectid) 1408 break; 1409 } 1410 search = btrfs_node_blockptr(node, nr); 1411 if (path->reada == READA_FORWARD_ALWAYS || 1412 (search <= target && target - search <= 65536) || 1413 (search > target && search - target <= 65536)) { 1414 btrfs_readahead_node_child(node, nr); 1415 nread += blocksize; 1416 } 1417 nscan++; 1418 if (nread > nread_max || nscan > 32) 1419 break; 1420 } 1421 } 1422 1423 static noinline void reada_for_balance(struct btrfs_path *path, int level) 1424 { 1425 struct extent_buffer *parent; 1426 int slot; 1427 int nritems; 1428 1429 parent = path->nodes[level + 1]; 1430 if (!parent) 1431 return; 1432 1433 nritems = btrfs_header_nritems(parent); 1434 slot = path->slots[level + 1]; 1435 1436 if (slot > 0) 1437 btrfs_readahead_node_child(parent, slot - 1); 1438 if (slot + 1 < nritems) 1439 btrfs_readahead_node_child(parent, slot + 1); 1440 } 1441 1442 1443 /* 1444 * when we walk down the tree, it is usually safe to unlock the higher layers 1445 * in the tree. The exceptions are when our path goes through slot 0, because 1446 * operations on the tree might require changing key pointers higher up in the 1447 * tree. 1448 * 1449 * callers might also have set path->keep_locks, which tells this code to keep 1450 * the lock if the path points to the last slot in the block. This is part of 1451 * walking through the tree, and selecting the next slot in the higher block. 1452 * 1453 * lowest_unlock sets the lowest level in the tree we're allowed to unlock. so 1454 * if lowest_unlock is 1, level 0 won't be unlocked 1455 */ 1456 static noinline void unlock_up(struct btrfs_path *path, int level, 1457 int lowest_unlock, int min_write_lock_level, 1458 int *write_lock_level) 1459 { 1460 int i; 1461 int skip_level = level; 1462 bool check_skip = true; 1463 1464 for (i = level; i < BTRFS_MAX_LEVEL; i++) { 1465 if (!path->nodes[i]) 1466 break; 1467 if (!path->locks[i]) 1468 break; 1469 1470 if (check_skip) { 1471 if (path->slots[i] == 0) { 1472 skip_level = i + 1; 1473 continue; 1474 } 1475 1476 if (path->keep_locks) { 1477 u32 nritems; 1478 1479 nritems = btrfs_header_nritems(path->nodes[i]); 1480 if (nritems < 1 || path->slots[i] >= nritems - 1) { 1481 skip_level = i + 1; 1482 continue; 1483 } 1484 } 1485 } 1486 1487 if (i >= lowest_unlock && i > skip_level) { 1488 check_skip = false; 1489 btrfs_tree_unlock_rw(path->nodes[i], path->locks[i]); 1490 path->locks[i] = 0; 1491 if (write_lock_level && 1492 i > min_write_lock_level && 1493 i <= *write_lock_level) { 1494 *write_lock_level = i - 1; 1495 } 1496 } 1497 } 1498 } 1499 1500 /* 1501 * Helper function for btrfs_search_slot() and other functions that do a search 1502 * on a btree. The goal is to find a tree block in the cache (the radix tree at 1503 * fs_info->buffer_radix), but if we can't find it, or it's not up to date, read 1504 * its pages from disk. 1505 * 1506 * Returns -EAGAIN, with the path unlocked, if the caller needs to repeat the 1507 * whole btree search, starting again from the current root node. 1508 */ 1509 static int 1510 read_block_for_search(struct btrfs_root *root, struct btrfs_path *p, 1511 struct extent_buffer **eb_ret, int slot, 1512 const struct btrfs_key *key) 1513 { 1514 struct btrfs_fs_info *fs_info = root->fs_info; 1515 struct btrfs_tree_parent_check check = { 0 }; 1516 u64 blocknr; 1517 struct extent_buffer *tmp = NULL; 1518 int ret = 0; 1519 int parent_level; 1520 int err; 1521 bool read_tmp = false; 1522 bool tmp_locked = false; 1523 bool path_released = false; 1524 1525 blocknr = btrfs_node_blockptr(*eb_ret, slot); 1526 parent_level = btrfs_header_level(*eb_ret); 1527 btrfs_node_key_to_cpu(*eb_ret, &check.first_key, slot); 1528 check.has_first_key = true; 1529 check.level = parent_level - 1; 1530 check.transid = btrfs_node_ptr_generation(*eb_ret, slot); 1531 check.owner_root = btrfs_root_id(root); 1532 1533 /* 1534 * If we need to read an extent buffer from disk and we are holding locks 1535 * on upper level nodes, we unlock all the upper nodes before reading the 1536 * extent buffer, and then return -EAGAIN to the caller as it needs to 1537 * restart the search. We don't release the lock on the current level 1538 * because we need to walk this node to figure out which blocks to read. 1539 */ 1540 tmp = find_extent_buffer(fs_info, blocknr); 1541 if (tmp) { 1542 if (p->reada == READA_FORWARD_ALWAYS) 1543 reada_for_search(fs_info, p, parent_level, slot, key->objectid); 1544 1545 /* first we do an atomic uptodate check */ 1546 if (btrfs_buffer_uptodate(tmp, check.transid, 1) > 0) { 1547 /* 1548 * Do extra check for first_key, eb can be stale due to 1549 * being cached, read from scrub, or have multiple 1550 * parents (shared tree blocks). 1551 */ 1552 if (btrfs_verify_level_key(tmp, &check)) { 1553 ret = -EUCLEAN; 1554 goto out; 1555 } 1556 *eb_ret = tmp; 1557 tmp = NULL; 1558 ret = 0; 1559 goto out; 1560 } 1561 1562 if (p->nowait) { 1563 ret = -EAGAIN; 1564 goto out; 1565 } 1566 1567 if (!p->skip_locking) { 1568 btrfs_unlock_up_safe(p, parent_level + 1); 1569 tmp_locked = true; 1570 btrfs_tree_read_lock(tmp); 1571 btrfs_release_path(p); 1572 ret = -EAGAIN; 1573 path_released = true; 1574 } 1575 1576 /* Now we're allowed to do a blocking uptodate check. */ 1577 err = btrfs_read_extent_buffer(tmp, &check); 1578 if (err) { 1579 ret = err; 1580 goto out; 1581 } 1582 1583 if (ret == 0) { 1584 ASSERT(!tmp_locked); 1585 *eb_ret = tmp; 1586 tmp = NULL; 1587 } 1588 goto out; 1589 } else if (p->nowait) { 1590 ret = -EAGAIN; 1591 goto out; 1592 } 1593 1594 if (!p->skip_locking) { 1595 btrfs_unlock_up_safe(p, parent_level + 1); 1596 ret = -EAGAIN; 1597 } 1598 1599 if (p->reada != READA_NONE) 1600 reada_for_search(fs_info, p, parent_level, slot, key->objectid); 1601 1602 tmp = btrfs_find_create_tree_block(fs_info, blocknr, check.owner_root, check.level); 1603 if (IS_ERR(tmp)) { 1604 ret = PTR_ERR(tmp); 1605 tmp = NULL; 1606 goto out; 1607 } 1608 read_tmp = true; 1609 1610 if (!p->skip_locking) { 1611 ASSERT(ret == -EAGAIN); 1612 tmp_locked = true; 1613 btrfs_tree_read_lock(tmp); 1614 btrfs_release_path(p); 1615 path_released = true; 1616 } 1617 1618 /* Now we're allowed to do a blocking uptodate check. */ 1619 err = btrfs_read_extent_buffer(tmp, &check); 1620 if (err) { 1621 ret = err; 1622 goto out; 1623 } 1624 1625 /* 1626 * If the read above didn't mark this buffer up to date, 1627 * it will never end up being up to date. Set ret to EIO now 1628 * and give up so that our caller doesn't loop forever 1629 * on our EAGAINs. 1630 */ 1631 if (!extent_buffer_uptodate(tmp)) { 1632 ret = -EIO; 1633 goto out; 1634 } 1635 1636 if (ret == 0) { 1637 ASSERT(!tmp_locked); 1638 *eb_ret = tmp; 1639 tmp = NULL; 1640 } 1641 out: 1642 if (tmp) { 1643 if (tmp_locked) 1644 btrfs_tree_read_unlock(tmp); 1645 if (read_tmp && ret && ret != -EAGAIN) 1646 free_extent_buffer_stale(tmp); 1647 else 1648 free_extent_buffer(tmp); 1649 } 1650 if (ret && !path_released) 1651 btrfs_release_path(p); 1652 1653 return ret; 1654 } 1655 1656 /* 1657 * helper function for btrfs_search_slot. This does all of the checks 1658 * for node-level blocks and does any balancing required based on 1659 * the ins_len. 1660 * 1661 * If no extra work was required, zero is returned. If we had to 1662 * drop the path, -EAGAIN is returned and btrfs_search_slot must 1663 * start over 1664 */ 1665 static int 1666 setup_nodes_for_search(struct btrfs_trans_handle *trans, 1667 struct btrfs_root *root, struct btrfs_path *p, 1668 struct extent_buffer *b, int level, int ins_len, 1669 int *write_lock_level) 1670 { 1671 struct btrfs_fs_info *fs_info = root->fs_info; 1672 int ret = 0; 1673 1674 if ((p->search_for_split || ins_len > 0) && btrfs_header_nritems(b) >= 1675 BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 3) { 1676 1677 if (*write_lock_level < level + 1) { 1678 *write_lock_level = level + 1; 1679 btrfs_release_path(p); 1680 return -EAGAIN; 1681 } 1682 1683 reada_for_balance(p, level); 1684 ret = split_node(trans, root, p, level); 1685 1686 b = p->nodes[level]; 1687 } else if (ins_len < 0 && btrfs_header_nritems(b) < 1688 BTRFS_NODEPTRS_PER_BLOCK(fs_info) / 2) { 1689 1690 if (*write_lock_level < level + 1) { 1691 *write_lock_level = level + 1; 1692 btrfs_release_path(p); 1693 return -EAGAIN; 1694 } 1695 1696 reada_for_balance(p, level); 1697 ret = balance_level(trans, root, p, level); 1698 if (ret) 1699 return ret; 1700 1701 b = p->nodes[level]; 1702 if (!b) { 1703 btrfs_release_path(p); 1704 return -EAGAIN; 1705 } 1706 BUG_ON(btrfs_header_nritems(b) == 1); 1707 } 1708 return ret; 1709 } 1710 1711 int btrfs_find_item(struct btrfs_root *fs_root, struct btrfs_path *path, 1712 u64 iobjectid, u64 ioff, u8 key_type, 1713 struct btrfs_key *found_key) 1714 { 1715 int ret; 1716 struct btrfs_key key; 1717 struct extent_buffer *eb; 1718 1719 ASSERT(path); 1720 ASSERT(found_key); 1721 1722 key.type = key_type; 1723 key.objectid = iobjectid; 1724 key.offset = ioff; 1725 1726 ret = btrfs_search_slot(NULL, fs_root, &key, path, 0, 0); 1727 if (ret < 0) 1728 return ret; 1729 1730 eb = path->nodes[0]; 1731 if (ret && path->slots[0] >= btrfs_header_nritems(eb)) { 1732 ret = btrfs_next_leaf(fs_root, path); 1733 if (ret) 1734 return ret; 1735 eb = path->nodes[0]; 1736 } 1737 1738 btrfs_item_key_to_cpu(eb, found_key, path->slots[0]); 1739 if (found_key->type != key.type || 1740 found_key->objectid != key.objectid) 1741 return 1; 1742 1743 return 0; 1744 } 1745 1746 static struct extent_buffer *btrfs_search_slot_get_root(struct btrfs_root *root, 1747 struct btrfs_path *p, 1748 int write_lock_level) 1749 { 1750 struct extent_buffer *b; 1751 int root_lock = 0; 1752 int level = 0; 1753 1754 if (p->search_commit_root) { 1755 b = root->commit_root; 1756 atomic_inc(&b->refs); 1757 level = btrfs_header_level(b); 1758 /* 1759 * Ensure that all callers have set skip_locking when 1760 * p->search_commit_root = 1. 1761 */ 1762 ASSERT(p->skip_locking == 1); 1763 1764 goto out; 1765 } 1766 1767 if (p->skip_locking) { 1768 b = btrfs_root_node(root); 1769 level = btrfs_header_level(b); 1770 goto out; 1771 } 1772 1773 /* We try very hard to do read locks on the root */ 1774 root_lock = BTRFS_READ_LOCK; 1775 1776 /* 1777 * If the level is set to maximum, we can skip trying to get the read 1778 * lock. 1779 */ 1780 if (write_lock_level < BTRFS_MAX_LEVEL) { 1781 /* 1782 * We don't know the level of the root node until we actually 1783 * have it read locked 1784 */ 1785 if (p->nowait) { 1786 b = btrfs_try_read_lock_root_node(root); 1787 if (IS_ERR(b)) 1788 return b; 1789 } else { 1790 b = btrfs_read_lock_root_node(root); 1791 } 1792 level = btrfs_header_level(b); 1793 if (level > write_lock_level) 1794 goto out; 1795 1796 /* Whoops, must trade for write lock */ 1797 btrfs_tree_read_unlock(b); 1798 free_extent_buffer(b); 1799 } 1800 1801 b = btrfs_lock_root_node(root); 1802 root_lock = BTRFS_WRITE_LOCK; 1803 1804 /* The level might have changed, check again */ 1805 level = btrfs_header_level(b); 1806 1807 out: 1808 /* 1809 * The root may have failed to write out at some point, and thus is no 1810 * longer valid, return an error in this case. 1811 */ 1812 if (!extent_buffer_uptodate(b)) { 1813 if (root_lock) 1814 btrfs_tree_unlock_rw(b, root_lock); 1815 free_extent_buffer(b); 1816 return ERR_PTR(-EIO); 1817 } 1818 1819 p->nodes[level] = b; 1820 if (!p->skip_locking) 1821 p->locks[level] = root_lock; 1822 /* 1823 * Callers are responsible for dropping b's references. 1824 */ 1825 return b; 1826 } 1827 1828 /* 1829 * Replace the extent buffer at the lowest level of the path with a cloned 1830 * version. The purpose is to be able to use it safely, after releasing the 1831 * commit root semaphore, even if relocation is happening in parallel, the 1832 * transaction used for relocation is committed and the extent buffer is 1833 * reallocated in the next transaction. 1834 * 1835 * This is used in a context where the caller does not prevent transaction 1836 * commits from happening, either by holding a transaction handle or holding 1837 * some lock, while it's doing searches through a commit root. 1838 * At the moment it's only used for send operations. 1839 */ 1840 static int finish_need_commit_sem_search(struct btrfs_path *path) 1841 { 1842 const int i = path->lowest_level; 1843 const int slot = path->slots[i]; 1844 struct extent_buffer *lowest = path->nodes[i]; 1845 struct extent_buffer *clone; 1846 1847 ASSERT(path->need_commit_sem); 1848 1849 if (!lowest) 1850 return 0; 1851 1852 lockdep_assert_held_read(&lowest->fs_info->commit_root_sem); 1853 1854 clone = btrfs_clone_extent_buffer(lowest); 1855 if (!clone) 1856 return -ENOMEM; 1857 1858 btrfs_release_path(path); 1859 path->nodes[i] = clone; 1860 path->slots[i] = slot; 1861 1862 return 0; 1863 } 1864 1865 static inline int search_for_key_slot(struct extent_buffer *eb, 1866 int search_low_slot, 1867 const struct btrfs_key *key, 1868 int prev_cmp, 1869 int *slot) 1870 { 1871 /* 1872 * If a previous call to btrfs_bin_search() on a parent node returned an 1873 * exact match (prev_cmp == 0), we can safely assume the target key will 1874 * always be at slot 0 on lower levels, since each key pointer 1875 * (struct btrfs_key_ptr) refers to the lowest key accessible from the 1876 * subtree it points to. Thus we can skip searching lower levels. 1877 */ 1878 if (prev_cmp == 0) { 1879 *slot = 0; 1880 return 0; 1881 } 1882 1883 return btrfs_bin_search(eb, search_low_slot, key, slot); 1884 } 1885 1886 static int search_leaf(struct btrfs_trans_handle *trans, 1887 struct btrfs_root *root, 1888 const struct btrfs_key *key, 1889 struct btrfs_path *path, 1890 int ins_len, 1891 int prev_cmp) 1892 { 1893 struct extent_buffer *leaf = path->nodes[0]; 1894 int leaf_free_space = -1; 1895 int search_low_slot = 0; 1896 int ret; 1897 bool do_bin_search = true; 1898 1899 /* 1900 * If we are doing an insertion, the leaf has enough free space and the 1901 * destination slot for the key is not slot 0, then we can unlock our 1902 * write lock on the parent, and any other upper nodes, before doing the 1903 * binary search on the leaf (with search_for_key_slot()), allowing other 1904 * tasks to lock the parent and any other upper nodes. 1905 */ 1906 if (ins_len > 0) { 1907 /* 1908 * Cache the leaf free space, since we will need it later and it 1909 * will not change until then. 1910 */ 1911 leaf_free_space = btrfs_leaf_free_space(leaf); 1912 1913 /* 1914 * !path->locks[1] means we have a single node tree, the leaf is 1915 * the root of the tree. 1916 */ 1917 if (path->locks[1] && leaf_free_space >= ins_len) { 1918 struct btrfs_disk_key first_key; 1919 1920 ASSERT(btrfs_header_nritems(leaf) > 0); 1921 btrfs_item_key(leaf, &first_key, 0); 1922 1923 /* 1924 * Doing the extra comparison with the first key is cheap, 1925 * taking into account that the first key is very likely 1926 * already in a cache line because it immediately follows 1927 * the extent buffer's header and we have recently accessed 1928 * the header's level field. 1929 */ 1930 ret = btrfs_comp_keys(&first_key, key); 1931 if (ret < 0) { 1932 /* 1933 * The first key is smaller than the key we want 1934 * to insert, so we are safe to unlock all upper 1935 * nodes and we have to do the binary search. 1936 * 1937 * We do use btrfs_unlock_up_safe() and not 1938 * unlock_up() because the later does not unlock 1939 * nodes with a slot of 0 - we can safely unlock 1940 * any node even if its slot is 0 since in this 1941 * case the key does not end up at slot 0 of the 1942 * leaf and there's no need to split the leaf. 1943 */ 1944 btrfs_unlock_up_safe(path, 1); 1945 search_low_slot = 1; 1946 } else { 1947 /* 1948 * The first key is >= then the key we want to 1949 * insert, so we can skip the binary search as 1950 * the target key will be at slot 0. 1951 * 1952 * We can not unlock upper nodes when the key is 1953 * less than the first key, because we will need 1954 * to update the key at slot 0 of the parent node 1955 * and possibly of other upper nodes too. 1956 * If the key matches the first key, then we can 1957 * unlock all the upper nodes, using 1958 * btrfs_unlock_up_safe() instead of unlock_up() 1959 * as stated above. 1960 */ 1961 if (ret == 0) 1962 btrfs_unlock_up_safe(path, 1); 1963 /* 1964 * ret is already 0 or 1, matching the result of 1965 * a btrfs_bin_search() call, so there is no need 1966 * to adjust it. 1967 */ 1968 do_bin_search = false; 1969 path->slots[0] = 0; 1970 } 1971 } 1972 } 1973 1974 if (do_bin_search) { 1975 ret = search_for_key_slot(leaf, search_low_slot, key, 1976 prev_cmp, &path->slots[0]); 1977 if (ret < 0) 1978 return ret; 1979 } 1980 1981 if (ins_len > 0) { 1982 /* 1983 * Item key already exists. In this case, if we are allowed to 1984 * insert the item (for example, in dir_item case, item key 1985 * collision is allowed), it will be merged with the original 1986 * item. Only the item size grows, no new btrfs item will be 1987 * added. If search_for_extension is not set, ins_len already 1988 * accounts the size btrfs_item, deduct it here so leaf space 1989 * check will be correct. 1990 */ 1991 if (ret == 0 && !path->search_for_extension) { 1992 ASSERT(ins_len >= sizeof(struct btrfs_item)); 1993 ins_len -= sizeof(struct btrfs_item); 1994 } 1995 1996 ASSERT(leaf_free_space >= 0); 1997 1998 if (leaf_free_space < ins_len) { 1999 int err; 2000 2001 err = split_leaf(trans, root, key, path, ins_len, 2002 (ret == 0)); 2003 ASSERT(err <= 0); 2004 if (WARN_ON(err > 0)) 2005 err = -EUCLEAN; 2006 if (err) 2007 ret = err; 2008 } 2009 } 2010 2011 return ret; 2012 } 2013 2014 /* 2015 * Look for a key in a tree and perform necessary modifications to preserve 2016 * tree invariants. 2017 * 2018 * @trans: Handle of transaction, used when modifying the tree 2019 * @p: Holds all btree nodes along the search path 2020 * @root: The root node of the tree 2021 * @key: The key we are looking for 2022 * @ins_len: Indicates purpose of search: 2023 * >0 for inserts it's size of item inserted (*) 2024 * <0 for deletions 2025 * 0 for plain searches, not modifying the tree 2026 * 2027 * (*) If size of item inserted doesn't include 2028 * sizeof(struct btrfs_item), then p->search_for_extension must 2029 * be set. 2030 * @cow: boolean should CoW operations be performed. Must always be 1 2031 * when modifying the tree. 2032 * 2033 * If @ins_len > 0, nodes and leaves will be split as we walk down the tree. 2034 * If @ins_len < 0, nodes will be merged as we walk down the tree (if possible) 2035 * 2036 * If @key is found, 0 is returned and you can find the item in the leaf level 2037 * of the path (level 0) 2038 * 2039 * If @key isn't found, 1 is returned and the leaf level of the path (level 0) 2040 * points to the slot where it should be inserted 2041 * 2042 * If an error is encountered while searching the tree a negative error number 2043 * is returned 2044 */ 2045 int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root *root, 2046 const struct btrfs_key *key, struct btrfs_path *p, 2047 int ins_len, int cow) 2048 { 2049 struct btrfs_fs_info *fs_info; 2050 struct extent_buffer *b; 2051 int slot; 2052 int ret; 2053 int err; 2054 int level; 2055 int lowest_unlock = 1; 2056 /* everything at write_lock_level or lower must be write locked */ 2057 int write_lock_level = 0; 2058 u8 lowest_level = 0; 2059 int min_write_lock_level; 2060 int prev_cmp; 2061 2062 if (!root) 2063 return -EINVAL; 2064 2065 fs_info = root->fs_info; 2066 might_sleep(); 2067 2068 lowest_level = p->lowest_level; 2069 WARN_ON(lowest_level && ins_len > 0); 2070 WARN_ON(p->nodes[0] != NULL); 2071 BUG_ON(!cow && ins_len); 2072 2073 /* 2074 * For now only allow nowait for read only operations. There's no 2075 * strict reason why we can't, we just only need it for reads so it's 2076 * only implemented for reads. 2077 */ 2078 ASSERT(!p->nowait || !cow); 2079 2080 if (ins_len < 0) { 2081 lowest_unlock = 2; 2082 2083 /* when we are removing items, we might have to go up to level 2084 * two as we update tree pointers Make sure we keep write 2085 * for those levels as well 2086 */ 2087 write_lock_level = 2; 2088 } else if (ins_len > 0) { 2089 /* 2090 * for inserting items, make sure we have a write lock on 2091 * level 1 so we can update keys 2092 */ 2093 write_lock_level = 1; 2094 } 2095 2096 if (!cow) 2097 write_lock_level = -1; 2098 2099 if (cow && (p->keep_locks || p->lowest_level)) 2100 write_lock_level = BTRFS_MAX_LEVEL; 2101 2102 min_write_lock_level = write_lock_level; 2103 2104 if (p->need_commit_sem) { 2105 ASSERT(p->search_commit_root); 2106 if (p->nowait) { 2107 if (!down_read_trylock(&fs_info->commit_root_sem)) 2108 return -EAGAIN; 2109 } else { 2110 down_read(&fs_info->commit_root_sem); 2111 } 2112 } 2113 2114 again: 2115 prev_cmp = -1; 2116 b = btrfs_search_slot_get_root(root, p, write_lock_level); 2117 if (IS_ERR(b)) { 2118 ret = PTR_ERR(b); 2119 goto done; 2120 } 2121 2122 while (b) { 2123 int dec = 0; 2124 2125 level = btrfs_header_level(b); 2126 2127 if (cow) { 2128 bool last_level = (level == (BTRFS_MAX_LEVEL - 1)); 2129 2130 /* 2131 * if we don't really need to cow this block 2132 * then we don't want to set the path blocking, 2133 * so we test it here 2134 */ 2135 if (!should_cow_block(trans, root, b)) 2136 goto cow_done; 2137 2138 /* 2139 * must have write locks on this node and the 2140 * parent 2141 */ 2142 if (level > write_lock_level || 2143 (level + 1 > write_lock_level && 2144 level + 1 < BTRFS_MAX_LEVEL && 2145 p->nodes[level + 1])) { 2146 write_lock_level = level + 1; 2147 btrfs_release_path(p); 2148 goto again; 2149 } 2150 2151 if (last_level) 2152 err = btrfs_cow_block(trans, root, b, NULL, 0, 2153 &b, 2154 BTRFS_NESTING_COW); 2155 else 2156 err = btrfs_cow_block(trans, root, b, 2157 p->nodes[level + 1], 2158 p->slots[level + 1], &b, 2159 BTRFS_NESTING_COW); 2160 if (err) { 2161 ret = err; 2162 goto done; 2163 } 2164 } 2165 cow_done: 2166 p->nodes[level] = b; 2167 2168 /* 2169 * we have a lock on b and as long as we aren't changing 2170 * the tree, there is no way to for the items in b to change. 2171 * It is safe to drop the lock on our parent before we 2172 * go through the expensive btree search on b. 2173 * 2174 * If we're inserting or deleting (ins_len != 0), then we might 2175 * be changing slot zero, which may require changing the parent. 2176 * So, we can't drop the lock until after we know which slot 2177 * we're operating on. 2178 */ 2179 if (!ins_len && !p->keep_locks) { 2180 int u = level + 1; 2181 2182 if (u < BTRFS_MAX_LEVEL && p->locks[u]) { 2183 btrfs_tree_unlock_rw(p->nodes[u], p->locks[u]); 2184 p->locks[u] = 0; 2185 } 2186 } 2187 2188 if (level == 0) { 2189 if (ins_len > 0) 2190 ASSERT(write_lock_level >= 1); 2191 2192 ret = search_leaf(trans, root, key, p, ins_len, prev_cmp); 2193 if (!p->search_for_split) 2194 unlock_up(p, level, lowest_unlock, 2195 min_write_lock_level, NULL); 2196 goto done; 2197 } 2198 2199 ret = search_for_key_slot(b, 0, key, prev_cmp, &slot); 2200 if (ret < 0) 2201 goto done; 2202 prev_cmp = ret; 2203 2204 if (ret && slot > 0) { 2205 dec = 1; 2206 slot--; 2207 } 2208 p->slots[level] = slot; 2209 err = setup_nodes_for_search(trans, root, p, b, level, ins_len, 2210 &write_lock_level); 2211 if (err == -EAGAIN) 2212 goto again; 2213 if (err) { 2214 ret = err; 2215 goto done; 2216 } 2217 b = p->nodes[level]; 2218 slot = p->slots[level]; 2219 2220 /* 2221 * Slot 0 is special, if we change the key we have to update 2222 * the parent pointer which means we must have a write lock on 2223 * the parent 2224 */ 2225 if (slot == 0 && ins_len && write_lock_level < level + 1) { 2226 write_lock_level = level + 1; 2227 btrfs_release_path(p); 2228 goto again; 2229 } 2230 2231 unlock_up(p, level, lowest_unlock, min_write_lock_level, 2232 &write_lock_level); 2233 2234 if (level == lowest_level) { 2235 if (dec) 2236 p->slots[level]++; 2237 goto done; 2238 } 2239 2240 err = read_block_for_search(root, p, &b, slot, key); 2241 if (err == -EAGAIN && !p->nowait) 2242 goto again; 2243 if (err) { 2244 ret = err; 2245 goto done; 2246 } 2247 2248 if (!p->skip_locking) { 2249 level = btrfs_header_level(b); 2250 2251 btrfs_maybe_reset_lockdep_class(root, b); 2252 2253 if (level <= write_lock_level) { 2254 btrfs_tree_lock(b); 2255 p->locks[level] = BTRFS_WRITE_LOCK; 2256 } else { 2257 if (p->nowait) { 2258 if (!btrfs_try_tree_read_lock(b)) { 2259 free_extent_buffer(b); 2260 ret = -EAGAIN; 2261 goto done; 2262 } 2263 } else { 2264 btrfs_tree_read_lock(b); 2265 } 2266 p->locks[level] = BTRFS_READ_LOCK; 2267 } 2268 p->nodes[level] = b; 2269 } 2270 } 2271 ret = 1; 2272 done: 2273 if (ret < 0 && !p->skip_release_on_error) 2274 btrfs_release_path(p); 2275 2276 if (p->need_commit_sem) { 2277 int ret2; 2278 2279 ret2 = finish_need_commit_sem_search(p); 2280 up_read(&fs_info->commit_root_sem); 2281 if (ret2) 2282 ret = ret2; 2283 } 2284 2285 return ret; 2286 } 2287 ALLOW_ERROR_INJECTION(btrfs_search_slot, ERRNO); 2288 2289 /* 2290 * Like btrfs_search_slot, this looks for a key in the given tree. It uses the 2291 * current state of the tree together with the operations recorded in the tree 2292 * modification log to search for the key in a previous version of this tree, as 2293 * denoted by the time_seq parameter. 2294 * 2295 * Naturally, there is no support for insert, delete or cow operations. 2296 * 2297 * The resulting path and return value will be set up as if we called 2298 * btrfs_search_slot at that point in time with ins_len and cow both set to 0. 2299 */ 2300 int btrfs_search_old_slot(struct btrfs_root *root, const struct btrfs_key *key, 2301 struct btrfs_path *p, u64 time_seq) 2302 { 2303 struct btrfs_fs_info *fs_info = root->fs_info; 2304 struct extent_buffer *b; 2305 int slot; 2306 int ret; 2307 int err; 2308 int level; 2309 int lowest_unlock = 1; 2310 u8 lowest_level = 0; 2311 2312 lowest_level = p->lowest_level; 2313 WARN_ON(p->nodes[0] != NULL); 2314 ASSERT(!p->nowait); 2315 2316 if (p->search_commit_root) { 2317 BUG_ON(time_seq); 2318 return btrfs_search_slot(NULL, root, key, p, 0, 0); 2319 } 2320 2321 again: 2322 b = btrfs_get_old_root(root, time_seq); 2323 if (!b) { 2324 ret = -EIO; 2325 goto done; 2326 } 2327 level = btrfs_header_level(b); 2328 p->locks[level] = BTRFS_READ_LOCK; 2329 2330 while (b) { 2331 int dec = 0; 2332 2333 level = btrfs_header_level(b); 2334 p->nodes[level] = b; 2335 2336 /* 2337 * we have a lock on b and as long as we aren't changing 2338 * the tree, there is no way to for the items in b to change. 2339 * It is safe to drop the lock on our parent before we 2340 * go through the expensive btree search on b. 2341 */ 2342 btrfs_unlock_up_safe(p, level + 1); 2343 2344 ret = btrfs_bin_search(b, 0, key, &slot); 2345 if (ret < 0) 2346 goto done; 2347 2348 if (level == 0) { 2349 p->slots[level] = slot; 2350 unlock_up(p, level, lowest_unlock, 0, NULL); 2351 goto done; 2352 } 2353 2354 if (ret && slot > 0) { 2355 dec = 1; 2356 slot--; 2357 } 2358 p->slots[level] = slot; 2359 unlock_up(p, level, lowest_unlock, 0, NULL); 2360 2361 if (level == lowest_level) { 2362 if (dec) 2363 p->slots[level]++; 2364 goto done; 2365 } 2366 2367 err = read_block_for_search(root, p, &b, slot, key); 2368 if (err == -EAGAIN && !p->nowait) 2369 goto again; 2370 if (err) { 2371 ret = err; 2372 goto done; 2373 } 2374 2375 level = btrfs_header_level(b); 2376 btrfs_tree_read_lock(b); 2377 b = btrfs_tree_mod_log_rewind(fs_info, b, time_seq); 2378 if (!b) { 2379 ret = -ENOMEM; 2380 goto done; 2381 } 2382 p->locks[level] = BTRFS_READ_LOCK; 2383 p->nodes[level] = b; 2384 } 2385 ret = 1; 2386 done: 2387 if (ret < 0) 2388 btrfs_release_path(p); 2389 2390 return ret; 2391 } 2392 2393 /* 2394 * Search the tree again to find a leaf with smaller keys. 2395 * Returns 0 if it found something. 2396 * Returns 1 if there are no smaller keys. 2397 * Returns < 0 on error. 2398 * 2399 * This may release the path, and so you may lose any locks held at the 2400 * time you call it. 2401 */ 2402 static int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path) 2403 { 2404 struct btrfs_key key; 2405 struct btrfs_key orig_key; 2406 struct btrfs_disk_key found_key; 2407 int ret; 2408 2409 btrfs_item_key_to_cpu(path->nodes[0], &key, 0); 2410 orig_key = key; 2411 2412 if (key.offset > 0) { 2413 key.offset--; 2414 } else if (key.type > 0) { 2415 key.type--; 2416 key.offset = (u64)-1; 2417 } else if (key.objectid > 0) { 2418 key.objectid--; 2419 key.type = (u8)-1; 2420 key.offset = (u64)-1; 2421 } else { 2422 return 1; 2423 } 2424 2425 btrfs_release_path(path); 2426 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 2427 if (ret <= 0) 2428 return ret; 2429 2430 /* 2431 * Previous key not found. Even if we were at slot 0 of the leaf we had 2432 * before releasing the path and calling btrfs_search_slot(), we now may 2433 * be in a slot pointing to the same original key - this can happen if 2434 * after we released the path, one of more items were moved from a 2435 * sibling leaf into the front of the leaf we had due to an insertion 2436 * (see push_leaf_right()). 2437 * If we hit this case and our slot is > 0 and just decrement the slot 2438 * so that the caller does not process the same key again, which may or 2439 * may not break the caller, depending on its logic. 2440 */ 2441 if (path->slots[0] < btrfs_header_nritems(path->nodes[0])) { 2442 btrfs_item_key(path->nodes[0], &found_key, path->slots[0]); 2443 ret = btrfs_comp_keys(&found_key, &orig_key); 2444 if (ret == 0) { 2445 if (path->slots[0] > 0) { 2446 path->slots[0]--; 2447 return 0; 2448 } 2449 /* 2450 * At slot 0, same key as before, it means orig_key is 2451 * the lowest, leftmost, key in the tree. We're done. 2452 */ 2453 return 1; 2454 } 2455 } 2456 2457 btrfs_item_key(path->nodes[0], &found_key, 0); 2458 ret = btrfs_comp_keys(&found_key, &key); 2459 /* 2460 * We might have had an item with the previous key in the tree right 2461 * before we released our path. And after we released our path, that 2462 * item might have been pushed to the first slot (0) of the leaf we 2463 * were holding due to a tree balance. Alternatively, an item with the 2464 * previous key can exist as the only element of a leaf (big fat item). 2465 * Therefore account for these 2 cases, so that our callers (like 2466 * btrfs_previous_item) don't miss an existing item with a key matching 2467 * the previous key we computed above. 2468 */ 2469 if (ret <= 0) 2470 return 0; 2471 return 1; 2472 } 2473 2474 /* 2475 * helper to use instead of search slot if no exact match is needed but 2476 * instead the next or previous item should be returned. 2477 * When find_higher is true, the next higher item is returned, the next lower 2478 * otherwise. 2479 * When return_any and find_higher are both true, and no higher item is found, 2480 * return the next lower instead. 2481 * When return_any is true and find_higher is false, and no lower item is found, 2482 * return the next higher instead. 2483 * It returns 0 if any item is found, 1 if none is found (tree empty), and 2484 * < 0 on error 2485 */ 2486 int btrfs_search_slot_for_read(struct btrfs_root *root, 2487 const struct btrfs_key *key, 2488 struct btrfs_path *p, int find_higher, 2489 int return_any) 2490 { 2491 int ret; 2492 struct extent_buffer *leaf; 2493 2494 again: 2495 ret = btrfs_search_slot(NULL, root, key, p, 0, 0); 2496 if (ret <= 0) 2497 return ret; 2498 /* 2499 * a return value of 1 means the path is at the position where the 2500 * item should be inserted. Normally this is the next bigger item, 2501 * but in case the previous item is the last in a leaf, path points 2502 * to the first free slot in the previous leaf, i.e. at an invalid 2503 * item. 2504 */ 2505 leaf = p->nodes[0]; 2506 2507 if (find_higher) { 2508 if (p->slots[0] >= btrfs_header_nritems(leaf)) { 2509 ret = btrfs_next_leaf(root, p); 2510 if (ret <= 0) 2511 return ret; 2512 if (!return_any) 2513 return 1; 2514 /* 2515 * no higher item found, return the next 2516 * lower instead 2517 */ 2518 return_any = 0; 2519 find_higher = 0; 2520 btrfs_release_path(p); 2521 goto again; 2522 } 2523 } else { 2524 if (p->slots[0] == 0) { 2525 ret = btrfs_prev_leaf(root, p); 2526 if (ret < 0) 2527 return ret; 2528 if (!ret) { 2529 leaf = p->nodes[0]; 2530 if (p->slots[0] == btrfs_header_nritems(leaf)) 2531 p->slots[0]--; 2532 return 0; 2533 } 2534 if (!return_any) 2535 return 1; 2536 /* 2537 * no lower item found, return the next 2538 * higher instead 2539 */ 2540 return_any = 0; 2541 find_higher = 1; 2542 btrfs_release_path(p); 2543 goto again; 2544 } else { 2545 --p->slots[0]; 2546 } 2547 } 2548 return 0; 2549 } 2550 2551 /* 2552 * Execute search and call btrfs_previous_item to traverse backwards if the item 2553 * was not found. 2554 * 2555 * Return 0 if found, 1 if not found and < 0 if error. 2556 */ 2557 int btrfs_search_backwards(struct btrfs_root *root, struct btrfs_key *key, 2558 struct btrfs_path *path) 2559 { 2560 int ret; 2561 2562 ret = btrfs_search_slot(NULL, root, key, path, 0, 0); 2563 if (ret > 0) 2564 ret = btrfs_previous_item(root, path, key->objectid, key->type); 2565 2566 if (ret == 0) 2567 btrfs_item_key_to_cpu(path->nodes[0], key, path->slots[0]); 2568 2569 return ret; 2570 } 2571 2572 /* 2573 * Search for a valid slot for the given path. 2574 * 2575 * @root: The root node of the tree. 2576 * @key: Will contain a valid item if found. 2577 * @path: The starting point to validate the slot. 2578 * 2579 * Return: 0 if the item is valid 2580 * 1 if not found 2581 * <0 if error. 2582 */ 2583 int btrfs_get_next_valid_item(struct btrfs_root *root, struct btrfs_key *key, 2584 struct btrfs_path *path) 2585 { 2586 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) { 2587 int ret; 2588 2589 ret = btrfs_next_leaf(root, path); 2590 if (ret) 2591 return ret; 2592 } 2593 2594 btrfs_item_key_to_cpu(path->nodes[0], key, path->slots[0]); 2595 return 0; 2596 } 2597 2598 /* 2599 * adjust the pointers going up the tree, starting at level 2600 * making sure the right key of each node is points to 'key'. 2601 * This is used after shifting pointers to the left, so it stops 2602 * fixing up pointers when a given leaf/node is not in slot 0 of the 2603 * higher levels 2604 * 2605 */ 2606 static void fixup_low_keys(struct btrfs_trans_handle *trans, 2607 const struct btrfs_path *path, 2608 const struct btrfs_disk_key *key, int level) 2609 { 2610 int i; 2611 struct extent_buffer *t; 2612 int ret; 2613 2614 for (i = level; i < BTRFS_MAX_LEVEL; i++) { 2615 int tslot = path->slots[i]; 2616 2617 if (!path->nodes[i]) 2618 break; 2619 t = path->nodes[i]; 2620 ret = btrfs_tree_mod_log_insert_key(t, tslot, 2621 BTRFS_MOD_LOG_KEY_REPLACE); 2622 BUG_ON(ret < 0); 2623 btrfs_set_node_key(t, key, tslot); 2624 btrfs_mark_buffer_dirty(trans, path->nodes[i]); 2625 if (tslot != 0) 2626 break; 2627 } 2628 } 2629 2630 /* 2631 * update item key. 2632 * 2633 * This function isn't completely safe. It's the caller's responsibility 2634 * that the new key won't break the order 2635 */ 2636 void btrfs_set_item_key_safe(struct btrfs_trans_handle *trans, 2637 const struct btrfs_path *path, 2638 const struct btrfs_key *new_key) 2639 { 2640 struct btrfs_fs_info *fs_info = trans->fs_info; 2641 struct btrfs_disk_key disk_key; 2642 struct extent_buffer *eb; 2643 int slot; 2644 2645 eb = path->nodes[0]; 2646 slot = path->slots[0]; 2647 if (slot > 0) { 2648 btrfs_item_key(eb, &disk_key, slot - 1); 2649 if (unlikely(btrfs_comp_keys(&disk_key, new_key) >= 0)) { 2650 btrfs_print_leaf(eb); 2651 btrfs_crit(fs_info, 2652 "slot %u key (%llu %u %llu) new key (%llu %u %llu)", 2653 slot, btrfs_disk_key_objectid(&disk_key), 2654 btrfs_disk_key_type(&disk_key), 2655 btrfs_disk_key_offset(&disk_key), 2656 new_key->objectid, new_key->type, 2657 new_key->offset); 2658 BUG(); 2659 } 2660 } 2661 if (slot < btrfs_header_nritems(eb) - 1) { 2662 btrfs_item_key(eb, &disk_key, slot + 1); 2663 if (unlikely(btrfs_comp_keys(&disk_key, new_key) <= 0)) { 2664 btrfs_print_leaf(eb); 2665 btrfs_crit(fs_info, 2666 "slot %u key (%llu %u %llu) new key (%llu %u %llu)", 2667 slot, btrfs_disk_key_objectid(&disk_key), 2668 btrfs_disk_key_type(&disk_key), 2669 btrfs_disk_key_offset(&disk_key), 2670 new_key->objectid, new_key->type, 2671 new_key->offset); 2672 BUG(); 2673 } 2674 } 2675 2676 btrfs_cpu_key_to_disk(&disk_key, new_key); 2677 btrfs_set_item_key(eb, &disk_key, slot); 2678 btrfs_mark_buffer_dirty(trans, eb); 2679 if (slot == 0) 2680 fixup_low_keys(trans, path, &disk_key, 1); 2681 } 2682 2683 /* 2684 * Check key order of two sibling extent buffers. 2685 * 2686 * Return true if something is wrong. 2687 * Return false if everything is fine. 2688 * 2689 * Tree-checker only works inside one tree block, thus the following 2690 * corruption can not be detected by tree-checker: 2691 * 2692 * Leaf @left | Leaf @right 2693 * -------------------------------------------------------------- 2694 * | 1 | 2 | 3 | 4 | 5 | f6 | | 7 | 8 | 2695 * 2696 * Key f6 in leaf @left itself is valid, but not valid when the next 2697 * key in leaf @right is 7. 2698 * This can only be checked at tree block merge time. 2699 * And since tree checker has ensured all key order in each tree block 2700 * is correct, we only need to bother the last key of @left and the first 2701 * key of @right. 2702 */ 2703 static bool check_sibling_keys(const struct extent_buffer *left, 2704 const struct extent_buffer *right) 2705 { 2706 struct btrfs_key left_last; 2707 struct btrfs_key right_first; 2708 int level = btrfs_header_level(left); 2709 int nr_left = btrfs_header_nritems(left); 2710 int nr_right = btrfs_header_nritems(right); 2711 2712 /* No key to check in one of the tree blocks */ 2713 if (!nr_left || !nr_right) 2714 return false; 2715 2716 if (level) { 2717 btrfs_node_key_to_cpu(left, &left_last, nr_left - 1); 2718 btrfs_node_key_to_cpu(right, &right_first, 0); 2719 } else { 2720 btrfs_item_key_to_cpu(left, &left_last, nr_left - 1); 2721 btrfs_item_key_to_cpu(right, &right_first, 0); 2722 } 2723 2724 if (unlikely(btrfs_comp_cpu_keys(&left_last, &right_first) >= 0)) { 2725 btrfs_crit(left->fs_info, "left extent buffer:"); 2726 btrfs_print_tree(left, false); 2727 btrfs_crit(left->fs_info, "right extent buffer:"); 2728 btrfs_print_tree(right, false); 2729 btrfs_crit(left->fs_info, 2730 "bad key order, sibling blocks, left last (%llu %u %llu) right first (%llu %u %llu)", 2731 left_last.objectid, left_last.type, 2732 left_last.offset, right_first.objectid, 2733 right_first.type, right_first.offset); 2734 return true; 2735 } 2736 return false; 2737 } 2738 2739 /* 2740 * try to push data from one node into the next node left in the 2741 * tree. 2742 * 2743 * returns 0 if some ptrs were pushed left, < 0 if there was some horrible 2744 * error, and > 0 if there was no room in the left hand block. 2745 */ 2746 static int push_node_left(struct btrfs_trans_handle *trans, 2747 struct extent_buffer *dst, 2748 struct extent_buffer *src, int empty) 2749 { 2750 struct btrfs_fs_info *fs_info = trans->fs_info; 2751 int push_items = 0; 2752 int src_nritems; 2753 int dst_nritems; 2754 int ret = 0; 2755 2756 src_nritems = btrfs_header_nritems(src); 2757 dst_nritems = btrfs_header_nritems(dst); 2758 push_items = BTRFS_NODEPTRS_PER_BLOCK(fs_info) - dst_nritems; 2759 WARN_ON(btrfs_header_generation(src) != trans->transid); 2760 WARN_ON(btrfs_header_generation(dst) != trans->transid); 2761 2762 if (!empty && src_nritems <= 8) 2763 return 1; 2764 2765 if (push_items <= 0) 2766 return 1; 2767 2768 if (empty) { 2769 push_items = min(src_nritems, push_items); 2770 if (push_items < src_nritems) { 2771 /* leave at least 8 pointers in the node if 2772 * we aren't going to empty it 2773 */ 2774 if (src_nritems - push_items < 8) { 2775 if (push_items <= 8) 2776 return 1; 2777 push_items -= 8; 2778 } 2779 } 2780 } else 2781 push_items = min(src_nritems - 8, push_items); 2782 2783 /* dst is the left eb, src is the middle eb */ 2784 if (check_sibling_keys(dst, src)) { 2785 ret = -EUCLEAN; 2786 btrfs_abort_transaction(trans, ret); 2787 return ret; 2788 } 2789 ret = btrfs_tree_mod_log_eb_copy(dst, src, dst_nritems, 0, push_items); 2790 if (ret) { 2791 btrfs_abort_transaction(trans, ret); 2792 return ret; 2793 } 2794 copy_extent_buffer(dst, src, 2795 btrfs_node_key_ptr_offset(dst, dst_nritems), 2796 btrfs_node_key_ptr_offset(src, 0), 2797 push_items * sizeof(struct btrfs_key_ptr)); 2798 2799 if (push_items < src_nritems) { 2800 /* 2801 * btrfs_tree_mod_log_eb_copy handles logging the move, so we 2802 * don't need to do an explicit tree mod log operation for it. 2803 */ 2804 memmove_extent_buffer(src, btrfs_node_key_ptr_offset(src, 0), 2805 btrfs_node_key_ptr_offset(src, push_items), 2806 (src_nritems - push_items) * 2807 sizeof(struct btrfs_key_ptr)); 2808 } 2809 btrfs_set_header_nritems(src, src_nritems - push_items); 2810 btrfs_set_header_nritems(dst, dst_nritems + push_items); 2811 btrfs_mark_buffer_dirty(trans, src); 2812 btrfs_mark_buffer_dirty(trans, dst); 2813 2814 return ret; 2815 } 2816 2817 /* 2818 * try to push data from one node into the next node right in the 2819 * tree. 2820 * 2821 * returns 0 if some ptrs were pushed, < 0 if there was some horrible 2822 * error, and > 0 if there was no room in the right hand block. 2823 * 2824 * this will only push up to 1/2 the contents of the left node over 2825 */ 2826 static int balance_node_right(struct btrfs_trans_handle *trans, 2827 struct extent_buffer *dst, 2828 struct extent_buffer *src) 2829 { 2830 struct btrfs_fs_info *fs_info = trans->fs_info; 2831 int push_items = 0; 2832 int max_push; 2833 int src_nritems; 2834 int dst_nritems; 2835 int ret = 0; 2836 2837 WARN_ON(btrfs_header_generation(src) != trans->transid); 2838 WARN_ON(btrfs_header_generation(dst) != trans->transid); 2839 2840 src_nritems = btrfs_header_nritems(src); 2841 dst_nritems = btrfs_header_nritems(dst); 2842 push_items = BTRFS_NODEPTRS_PER_BLOCK(fs_info) - dst_nritems; 2843 if (push_items <= 0) 2844 return 1; 2845 2846 if (src_nritems < 4) 2847 return 1; 2848 2849 max_push = src_nritems / 2 + 1; 2850 /* don't try to empty the node */ 2851 if (max_push >= src_nritems) 2852 return 1; 2853 2854 if (max_push < push_items) 2855 push_items = max_push; 2856 2857 /* dst is the right eb, src is the middle eb */ 2858 if (check_sibling_keys(src, dst)) { 2859 ret = -EUCLEAN; 2860 btrfs_abort_transaction(trans, ret); 2861 return ret; 2862 } 2863 2864 /* 2865 * btrfs_tree_mod_log_eb_copy handles logging the move, so we don't 2866 * need to do an explicit tree mod log operation for it. 2867 */ 2868 memmove_extent_buffer(dst, btrfs_node_key_ptr_offset(dst, push_items), 2869 btrfs_node_key_ptr_offset(dst, 0), 2870 (dst_nritems) * 2871 sizeof(struct btrfs_key_ptr)); 2872 2873 ret = btrfs_tree_mod_log_eb_copy(dst, src, 0, src_nritems - push_items, 2874 push_items); 2875 if (ret) { 2876 btrfs_abort_transaction(trans, ret); 2877 return ret; 2878 } 2879 copy_extent_buffer(dst, src, 2880 btrfs_node_key_ptr_offset(dst, 0), 2881 btrfs_node_key_ptr_offset(src, src_nritems - push_items), 2882 push_items * sizeof(struct btrfs_key_ptr)); 2883 2884 btrfs_set_header_nritems(src, src_nritems - push_items); 2885 btrfs_set_header_nritems(dst, dst_nritems + push_items); 2886 2887 btrfs_mark_buffer_dirty(trans, src); 2888 btrfs_mark_buffer_dirty(trans, dst); 2889 2890 return ret; 2891 } 2892 2893 /* 2894 * helper function to insert a new root level in the tree. 2895 * A new node is allocated, and a single item is inserted to 2896 * point to the existing root 2897 * 2898 * returns zero on success or < 0 on failure. 2899 */ 2900 static noinline int insert_new_root(struct btrfs_trans_handle *trans, 2901 struct btrfs_root *root, 2902 struct btrfs_path *path, int level) 2903 { 2904 u64 lower_gen; 2905 struct extent_buffer *lower; 2906 struct extent_buffer *c; 2907 struct extent_buffer *old; 2908 struct btrfs_disk_key lower_key; 2909 int ret; 2910 2911 BUG_ON(path->nodes[level]); 2912 BUG_ON(path->nodes[level-1] != root->node); 2913 2914 lower = path->nodes[level-1]; 2915 if (level == 1) 2916 btrfs_item_key(lower, &lower_key, 0); 2917 else 2918 btrfs_node_key(lower, &lower_key, 0); 2919 2920 c = btrfs_alloc_tree_block(trans, root, 0, btrfs_root_id(root), 2921 &lower_key, level, root->node->start, 0, 2922 0, BTRFS_NESTING_NEW_ROOT); 2923 if (IS_ERR(c)) 2924 return PTR_ERR(c); 2925 2926 root_add_used_bytes(root); 2927 2928 btrfs_set_header_nritems(c, 1); 2929 btrfs_set_node_key(c, &lower_key, 0); 2930 btrfs_set_node_blockptr(c, 0, lower->start); 2931 lower_gen = btrfs_header_generation(lower); 2932 WARN_ON(lower_gen != trans->transid); 2933 2934 btrfs_set_node_ptr_generation(c, 0, lower_gen); 2935 2936 btrfs_mark_buffer_dirty(trans, c); 2937 2938 old = root->node; 2939 ret = btrfs_tree_mod_log_insert_root(root->node, c, false); 2940 if (ret < 0) { 2941 int ret2; 2942 2943 ret2 = btrfs_free_tree_block(trans, btrfs_root_id(root), c, 0, 1); 2944 if (ret2 < 0) 2945 btrfs_abort_transaction(trans, ret2); 2946 btrfs_tree_unlock(c); 2947 free_extent_buffer(c); 2948 return ret; 2949 } 2950 rcu_assign_pointer(root->node, c); 2951 2952 /* the super has an extra ref to root->node */ 2953 free_extent_buffer(old); 2954 2955 add_root_to_dirty_list(root); 2956 atomic_inc(&c->refs); 2957 path->nodes[level] = c; 2958 path->locks[level] = BTRFS_WRITE_LOCK; 2959 path->slots[level] = 0; 2960 return 0; 2961 } 2962 2963 /* 2964 * worker function to insert a single pointer in a node. 2965 * the node should have enough room for the pointer already 2966 * 2967 * slot and level indicate where you want the key to go, and 2968 * blocknr is the block the key points to. 2969 */ 2970 static int insert_ptr(struct btrfs_trans_handle *trans, 2971 const struct btrfs_path *path, 2972 const struct btrfs_disk_key *key, u64 bytenr, 2973 int slot, int level) 2974 { 2975 struct extent_buffer *lower; 2976 int nritems; 2977 int ret; 2978 2979 BUG_ON(!path->nodes[level]); 2980 btrfs_assert_tree_write_locked(path->nodes[level]); 2981 lower = path->nodes[level]; 2982 nritems = btrfs_header_nritems(lower); 2983 BUG_ON(slot > nritems); 2984 BUG_ON(nritems == BTRFS_NODEPTRS_PER_BLOCK(trans->fs_info)); 2985 if (slot != nritems) { 2986 if (level) { 2987 ret = btrfs_tree_mod_log_insert_move(lower, slot + 1, 2988 slot, nritems - slot); 2989 if (ret < 0) { 2990 btrfs_abort_transaction(trans, ret); 2991 return ret; 2992 } 2993 } 2994 memmove_extent_buffer(lower, 2995 btrfs_node_key_ptr_offset(lower, slot + 1), 2996 btrfs_node_key_ptr_offset(lower, slot), 2997 (nritems - slot) * sizeof(struct btrfs_key_ptr)); 2998 } 2999 if (level) { 3000 ret = btrfs_tree_mod_log_insert_key(lower, slot, 3001 BTRFS_MOD_LOG_KEY_ADD); 3002 if (ret < 0) { 3003 btrfs_abort_transaction(trans, ret); 3004 return ret; 3005 } 3006 } 3007 btrfs_set_node_key(lower, key, slot); 3008 btrfs_set_node_blockptr(lower, slot, bytenr); 3009 WARN_ON(trans->transid == 0); 3010 btrfs_set_node_ptr_generation(lower, slot, trans->transid); 3011 btrfs_set_header_nritems(lower, nritems + 1); 3012 btrfs_mark_buffer_dirty(trans, lower); 3013 3014 return 0; 3015 } 3016 3017 /* 3018 * split the node at the specified level in path in two. 3019 * The path is corrected to point to the appropriate node after the split 3020 * 3021 * Before splitting this tries to make some room in the node by pushing 3022 * left and right, if either one works, it returns right away. 3023 * 3024 * returns 0 on success and < 0 on failure 3025 */ 3026 static noinline int split_node(struct btrfs_trans_handle *trans, 3027 struct btrfs_root *root, 3028 struct btrfs_path *path, int level) 3029 { 3030 struct btrfs_fs_info *fs_info = root->fs_info; 3031 struct extent_buffer *c; 3032 struct extent_buffer *split; 3033 struct btrfs_disk_key disk_key; 3034 int mid; 3035 int ret; 3036 u32 c_nritems; 3037 3038 c = path->nodes[level]; 3039 WARN_ON(btrfs_header_generation(c) != trans->transid); 3040 if (c == root->node) { 3041 /* 3042 * trying to split the root, lets make a new one 3043 * 3044 * tree mod log: We don't log_removal old root in 3045 * insert_new_root, because that root buffer will be kept as a 3046 * normal node. We are going to log removal of half of the 3047 * elements below with btrfs_tree_mod_log_eb_copy(). We're 3048 * holding a tree lock on the buffer, which is why we cannot 3049 * race with other tree_mod_log users. 3050 */ 3051 ret = insert_new_root(trans, root, path, level + 1); 3052 if (ret) 3053 return ret; 3054 } else { 3055 ret = push_nodes_for_insert(trans, root, path, level); 3056 c = path->nodes[level]; 3057 if (!ret && btrfs_header_nritems(c) < 3058 BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 3) 3059 return 0; 3060 if (ret < 0) 3061 return ret; 3062 } 3063 3064 c_nritems = btrfs_header_nritems(c); 3065 mid = (c_nritems + 1) / 2; 3066 btrfs_node_key(c, &disk_key, mid); 3067 3068 split = btrfs_alloc_tree_block(trans, root, 0, btrfs_root_id(root), 3069 &disk_key, level, c->start, 0, 3070 0, BTRFS_NESTING_SPLIT); 3071 if (IS_ERR(split)) 3072 return PTR_ERR(split); 3073 3074 root_add_used_bytes(root); 3075 ASSERT(btrfs_header_level(c) == level); 3076 3077 ret = btrfs_tree_mod_log_eb_copy(split, c, 0, mid, c_nritems - mid); 3078 if (ret) { 3079 btrfs_tree_unlock(split); 3080 free_extent_buffer(split); 3081 btrfs_abort_transaction(trans, ret); 3082 return ret; 3083 } 3084 copy_extent_buffer(split, c, 3085 btrfs_node_key_ptr_offset(split, 0), 3086 btrfs_node_key_ptr_offset(c, mid), 3087 (c_nritems - mid) * sizeof(struct btrfs_key_ptr)); 3088 btrfs_set_header_nritems(split, c_nritems - mid); 3089 btrfs_set_header_nritems(c, mid); 3090 3091 btrfs_mark_buffer_dirty(trans, c); 3092 btrfs_mark_buffer_dirty(trans, split); 3093 3094 ret = insert_ptr(trans, path, &disk_key, split->start, 3095 path->slots[level + 1] + 1, level + 1); 3096 if (ret < 0) { 3097 btrfs_tree_unlock(split); 3098 free_extent_buffer(split); 3099 return ret; 3100 } 3101 3102 if (path->slots[level] >= mid) { 3103 path->slots[level] -= mid; 3104 btrfs_tree_unlock(c); 3105 free_extent_buffer(c); 3106 path->nodes[level] = split; 3107 path->slots[level + 1] += 1; 3108 } else { 3109 btrfs_tree_unlock(split); 3110 free_extent_buffer(split); 3111 } 3112 return 0; 3113 } 3114 3115 /* 3116 * how many bytes are required to store the items in a leaf. start 3117 * and nr indicate which items in the leaf to check. This totals up the 3118 * space used both by the item structs and the item data 3119 */ 3120 static int leaf_space_used(const struct extent_buffer *l, int start, int nr) 3121 { 3122 int data_len; 3123 int nritems = btrfs_header_nritems(l); 3124 int end = min(nritems, start + nr) - 1; 3125 3126 if (!nr) 3127 return 0; 3128 data_len = btrfs_item_offset(l, start) + btrfs_item_size(l, start); 3129 data_len = data_len - btrfs_item_offset(l, end); 3130 data_len += sizeof(struct btrfs_item) * nr; 3131 WARN_ON(data_len < 0); 3132 return data_len; 3133 } 3134 3135 /* 3136 * The space between the end of the leaf items and 3137 * the start of the leaf data. IOW, how much room 3138 * the leaf has left for both items and data 3139 */ 3140 int btrfs_leaf_free_space(const struct extent_buffer *leaf) 3141 { 3142 struct btrfs_fs_info *fs_info = leaf->fs_info; 3143 int nritems = btrfs_header_nritems(leaf); 3144 int ret; 3145 3146 ret = BTRFS_LEAF_DATA_SIZE(fs_info) - leaf_space_used(leaf, 0, nritems); 3147 if (ret < 0) { 3148 btrfs_crit(fs_info, 3149 "leaf free space ret %d, leaf data size %lu, used %d nritems %d", 3150 ret, 3151 (unsigned long) BTRFS_LEAF_DATA_SIZE(fs_info), 3152 leaf_space_used(leaf, 0, nritems), nritems); 3153 } 3154 return ret; 3155 } 3156 3157 /* 3158 * min slot controls the lowest index we're willing to push to the 3159 * right. We'll push up to and including min_slot, but no lower 3160 */ 3161 static noinline int __push_leaf_right(struct btrfs_trans_handle *trans, 3162 struct btrfs_path *path, 3163 int data_size, int empty, 3164 struct extent_buffer *right, 3165 int free_space, u32 left_nritems, 3166 u32 min_slot) 3167 { 3168 struct btrfs_fs_info *fs_info = right->fs_info; 3169 struct extent_buffer *left = path->nodes[0]; 3170 struct extent_buffer *upper = path->nodes[1]; 3171 struct btrfs_map_token token; 3172 struct btrfs_disk_key disk_key; 3173 int slot; 3174 u32 i; 3175 int push_space = 0; 3176 int push_items = 0; 3177 u32 nr; 3178 u32 right_nritems; 3179 u32 data_end; 3180 u32 this_item_size; 3181 3182 if (empty) 3183 nr = 0; 3184 else 3185 nr = max_t(u32, 1, min_slot); 3186 3187 if (path->slots[0] >= left_nritems) 3188 push_space += data_size; 3189 3190 slot = path->slots[1]; 3191 i = left_nritems - 1; 3192 while (i >= nr) { 3193 if (!empty && push_items > 0) { 3194 if (path->slots[0] > i) 3195 break; 3196 if (path->slots[0] == i) { 3197 int space = btrfs_leaf_free_space(left); 3198 3199 if (space + push_space * 2 > free_space) 3200 break; 3201 } 3202 } 3203 3204 if (path->slots[0] == i) 3205 push_space += data_size; 3206 3207 this_item_size = btrfs_item_size(left, i); 3208 if (this_item_size + sizeof(struct btrfs_item) + 3209 push_space > free_space) 3210 break; 3211 3212 push_items++; 3213 push_space += this_item_size + sizeof(struct btrfs_item); 3214 if (i == 0) 3215 break; 3216 i--; 3217 } 3218 3219 if (push_items == 0) 3220 goto out_unlock; 3221 3222 WARN_ON(!empty && push_items == left_nritems); 3223 3224 /* push left to right */ 3225 right_nritems = btrfs_header_nritems(right); 3226 3227 push_space = btrfs_item_data_end(left, left_nritems - push_items); 3228 push_space -= leaf_data_end(left); 3229 3230 /* make room in the right data area */ 3231 data_end = leaf_data_end(right); 3232 memmove_leaf_data(right, data_end - push_space, data_end, 3233 BTRFS_LEAF_DATA_SIZE(fs_info) - data_end); 3234 3235 /* copy from the left data area */ 3236 copy_leaf_data(right, left, BTRFS_LEAF_DATA_SIZE(fs_info) - push_space, 3237 leaf_data_end(left), push_space); 3238 3239 memmove_leaf_items(right, push_items, 0, right_nritems); 3240 3241 /* copy the items from left to right */ 3242 copy_leaf_items(right, left, 0, left_nritems - push_items, push_items); 3243 3244 /* update the item pointers */ 3245 btrfs_init_map_token(&token, right); 3246 right_nritems += push_items; 3247 btrfs_set_header_nritems(right, right_nritems); 3248 push_space = BTRFS_LEAF_DATA_SIZE(fs_info); 3249 for (i = 0; i < right_nritems; i++) { 3250 push_space -= btrfs_token_item_size(&token, i); 3251 btrfs_set_token_item_offset(&token, i, push_space); 3252 } 3253 3254 left_nritems -= push_items; 3255 btrfs_set_header_nritems(left, left_nritems); 3256 3257 if (left_nritems) 3258 btrfs_mark_buffer_dirty(trans, left); 3259 else 3260 btrfs_clear_buffer_dirty(trans, left); 3261 3262 btrfs_mark_buffer_dirty(trans, right); 3263 3264 btrfs_item_key(right, &disk_key, 0); 3265 btrfs_set_node_key(upper, &disk_key, slot + 1); 3266 btrfs_mark_buffer_dirty(trans, upper); 3267 3268 /* then fixup the leaf pointer in the path */ 3269 if (path->slots[0] >= left_nritems) { 3270 path->slots[0] -= left_nritems; 3271 if (btrfs_header_nritems(path->nodes[0]) == 0) 3272 btrfs_clear_buffer_dirty(trans, path->nodes[0]); 3273 btrfs_tree_unlock(path->nodes[0]); 3274 free_extent_buffer(path->nodes[0]); 3275 path->nodes[0] = right; 3276 path->slots[1] += 1; 3277 } else { 3278 btrfs_tree_unlock(right); 3279 free_extent_buffer(right); 3280 } 3281 return 0; 3282 3283 out_unlock: 3284 btrfs_tree_unlock(right); 3285 free_extent_buffer(right); 3286 return 1; 3287 } 3288 3289 /* 3290 * push some data in the path leaf to the right, trying to free up at 3291 * least data_size bytes. returns zero if the push worked, nonzero otherwise 3292 * 3293 * returns 1 if the push failed because the other node didn't have enough 3294 * room, 0 if everything worked out and < 0 if there were major errors. 3295 * 3296 * this will push starting from min_slot to the end of the leaf. It won't 3297 * push any slot lower than min_slot 3298 */ 3299 static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root 3300 *root, struct btrfs_path *path, 3301 int min_data_size, int data_size, 3302 int empty, u32 min_slot) 3303 { 3304 struct extent_buffer *left = path->nodes[0]; 3305 struct extent_buffer *right; 3306 struct extent_buffer *upper; 3307 int slot; 3308 int free_space; 3309 u32 left_nritems; 3310 int ret; 3311 3312 if (!path->nodes[1]) 3313 return 1; 3314 3315 slot = path->slots[1]; 3316 upper = path->nodes[1]; 3317 if (slot >= btrfs_header_nritems(upper) - 1) 3318 return 1; 3319 3320 btrfs_assert_tree_write_locked(path->nodes[1]); 3321 3322 right = btrfs_read_node_slot(upper, slot + 1); 3323 if (IS_ERR(right)) 3324 return PTR_ERR(right); 3325 3326 btrfs_tree_lock_nested(right, BTRFS_NESTING_RIGHT); 3327 3328 free_space = btrfs_leaf_free_space(right); 3329 if (free_space < data_size) 3330 goto out_unlock; 3331 3332 ret = btrfs_cow_block(trans, root, right, upper, 3333 slot + 1, &right, BTRFS_NESTING_RIGHT_COW); 3334 if (ret) 3335 goto out_unlock; 3336 3337 left_nritems = btrfs_header_nritems(left); 3338 if (left_nritems == 0) 3339 goto out_unlock; 3340 3341 if (check_sibling_keys(left, right)) { 3342 ret = -EUCLEAN; 3343 btrfs_abort_transaction(trans, ret); 3344 btrfs_tree_unlock(right); 3345 free_extent_buffer(right); 3346 return ret; 3347 } 3348 if (path->slots[0] == left_nritems && !empty) { 3349 /* Key greater than all keys in the leaf, right neighbor has 3350 * enough room for it and we're not emptying our leaf to delete 3351 * it, therefore use right neighbor to insert the new item and 3352 * no need to touch/dirty our left leaf. */ 3353 btrfs_tree_unlock(left); 3354 free_extent_buffer(left); 3355 path->nodes[0] = right; 3356 path->slots[0] = 0; 3357 path->slots[1]++; 3358 return 0; 3359 } 3360 3361 return __push_leaf_right(trans, path, min_data_size, empty, right, 3362 free_space, left_nritems, min_slot); 3363 out_unlock: 3364 btrfs_tree_unlock(right); 3365 free_extent_buffer(right); 3366 return 1; 3367 } 3368 3369 /* 3370 * push some data in the path leaf to the left, trying to free up at 3371 * least data_size bytes. returns zero if the push worked, nonzero otherwise 3372 * 3373 * max_slot can put a limit on how far into the leaf we'll push items. The 3374 * item at 'max_slot' won't be touched. Use (u32)-1 to make us do all the 3375 * items 3376 */ 3377 static noinline int __push_leaf_left(struct btrfs_trans_handle *trans, 3378 struct btrfs_path *path, int data_size, 3379 int empty, struct extent_buffer *left, 3380 int free_space, u32 right_nritems, 3381 u32 max_slot) 3382 { 3383 struct btrfs_fs_info *fs_info = left->fs_info; 3384 struct btrfs_disk_key disk_key; 3385 struct extent_buffer *right = path->nodes[0]; 3386 int i; 3387 int push_space = 0; 3388 int push_items = 0; 3389 u32 old_left_nritems; 3390 u32 nr; 3391 int ret = 0; 3392 u32 this_item_size; 3393 u32 old_left_item_size; 3394 struct btrfs_map_token token; 3395 3396 if (empty) 3397 nr = min(right_nritems, max_slot); 3398 else 3399 nr = min(right_nritems - 1, max_slot); 3400 3401 for (i = 0; i < nr; i++) { 3402 if (!empty && push_items > 0) { 3403 if (path->slots[0] < i) 3404 break; 3405 if (path->slots[0] == i) { 3406 int space = btrfs_leaf_free_space(right); 3407 3408 if (space + push_space * 2 > free_space) 3409 break; 3410 } 3411 } 3412 3413 if (path->slots[0] == i) 3414 push_space += data_size; 3415 3416 this_item_size = btrfs_item_size(right, i); 3417 if (this_item_size + sizeof(struct btrfs_item) + push_space > 3418 free_space) 3419 break; 3420 3421 push_items++; 3422 push_space += this_item_size + sizeof(struct btrfs_item); 3423 } 3424 3425 if (push_items == 0) { 3426 ret = 1; 3427 goto out; 3428 } 3429 WARN_ON(!empty && push_items == btrfs_header_nritems(right)); 3430 3431 /* push data from right to left */ 3432 copy_leaf_items(left, right, btrfs_header_nritems(left), 0, push_items); 3433 3434 push_space = BTRFS_LEAF_DATA_SIZE(fs_info) - 3435 btrfs_item_offset(right, push_items - 1); 3436 3437 copy_leaf_data(left, right, leaf_data_end(left) - push_space, 3438 btrfs_item_offset(right, push_items - 1), push_space); 3439 old_left_nritems = btrfs_header_nritems(left); 3440 BUG_ON(old_left_nritems <= 0); 3441 3442 btrfs_init_map_token(&token, left); 3443 old_left_item_size = btrfs_item_offset(left, old_left_nritems - 1); 3444 for (i = old_left_nritems; i < old_left_nritems + push_items; i++) { 3445 u32 ioff; 3446 3447 ioff = btrfs_token_item_offset(&token, i); 3448 btrfs_set_token_item_offset(&token, i, 3449 ioff - (BTRFS_LEAF_DATA_SIZE(fs_info) - old_left_item_size)); 3450 } 3451 btrfs_set_header_nritems(left, old_left_nritems + push_items); 3452 3453 /* fixup right node */ 3454 if (push_items > right_nritems) 3455 WARN(1, KERN_CRIT "push items %d nr %u\n", push_items, 3456 right_nritems); 3457 3458 if (push_items < right_nritems) { 3459 push_space = btrfs_item_offset(right, push_items - 1) - 3460 leaf_data_end(right); 3461 memmove_leaf_data(right, 3462 BTRFS_LEAF_DATA_SIZE(fs_info) - push_space, 3463 leaf_data_end(right), push_space); 3464 3465 memmove_leaf_items(right, 0, push_items, 3466 btrfs_header_nritems(right) - push_items); 3467 } 3468 3469 btrfs_init_map_token(&token, right); 3470 right_nritems -= push_items; 3471 btrfs_set_header_nritems(right, right_nritems); 3472 push_space = BTRFS_LEAF_DATA_SIZE(fs_info); 3473 for (i = 0; i < right_nritems; i++) { 3474 push_space = push_space - btrfs_token_item_size(&token, i); 3475 btrfs_set_token_item_offset(&token, i, push_space); 3476 } 3477 3478 btrfs_mark_buffer_dirty(trans, left); 3479 if (right_nritems) 3480 btrfs_mark_buffer_dirty(trans, right); 3481 else 3482 btrfs_clear_buffer_dirty(trans, right); 3483 3484 btrfs_item_key(right, &disk_key, 0); 3485 fixup_low_keys(trans, path, &disk_key, 1); 3486 3487 /* then fixup the leaf pointer in the path */ 3488 if (path->slots[0] < push_items) { 3489 path->slots[0] += old_left_nritems; 3490 btrfs_tree_unlock(path->nodes[0]); 3491 free_extent_buffer(path->nodes[0]); 3492 path->nodes[0] = left; 3493 path->slots[1] -= 1; 3494 } else { 3495 btrfs_tree_unlock(left); 3496 free_extent_buffer(left); 3497 path->slots[0] -= push_items; 3498 } 3499 BUG_ON(path->slots[0] < 0); 3500 return ret; 3501 out: 3502 btrfs_tree_unlock(left); 3503 free_extent_buffer(left); 3504 return ret; 3505 } 3506 3507 /* 3508 * push some data in the path leaf to the left, trying to free up at 3509 * least data_size bytes. returns zero if the push worked, nonzero otherwise 3510 * 3511 * max_slot can put a limit on how far into the leaf we'll push items. The 3512 * item at 'max_slot' won't be touched. Use (u32)-1 to make us push all the 3513 * items 3514 */ 3515 static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root 3516 *root, struct btrfs_path *path, int min_data_size, 3517 int data_size, int empty, u32 max_slot) 3518 { 3519 struct extent_buffer *right = path->nodes[0]; 3520 struct extent_buffer *left; 3521 int slot; 3522 int free_space; 3523 u32 right_nritems; 3524 int ret = 0; 3525 3526 slot = path->slots[1]; 3527 if (slot == 0) 3528 return 1; 3529 if (!path->nodes[1]) 3530 return 1; 3531 3532 right_nritems = btrfs_header_nritems(right); 3533 if (right_nritems == 0) 3534 return 1; 3535 3536 btrfs_assert_tree_write_locked(path->nodes[1]); 3537 3538 left = btrfs_read_node_slot(path->nodes[1], slot - 1); 3539 if (IS_ERR(left)) 3540 return PTR_ERR(left); 3541 3542 btrfs_tree_lock_nested(left, BTRFS_NESTING_LEFT); 3543 3544 free_space = btrfs_leaf_free_space(left); 3545 if (free_space < data_size) { 3546 ret = 1; 3547 goto out; 3548 } 3549 3550 ret = btrfs_cow_block(trans, root, left, 3551 path->nodes[1], slot - 1, &left, 3552 BTRFS_NESTING_LEFT_COW); 3553 if (ret) { 3554 /* we hit -ENOSPC, but it isn't fatal here */ 3555 if (ret == -ENOSPC) 3556 ret = 1; 3557 goto out; 3558 } 3559 3560 if (check_sibling_keys(left, right)) { 3561 ret = -EUCLEAN; 3562 btrfs_abort_transaction(trans, ret); 3563 goto out; 3564 } 3565 return __push_leaf_left(trans, path, min_data_size, empty, left, 3566 free_space, right_nritems, max_slot); 3567 out: 3568 btrfs_tree_unlock(left); 3569 free_extent_buffer(left); 3570 return ret; 3571 } 3572 3573 /* 3574 * split the path's leaf in two, making sure there is at least data_size 3575 * available for the resulting leaf level of the path. 3576 */ 3577 static noinline int copy_for_split(struct btrfs_trans_handle *trans, 3578 struct btrfs_path *path, 3579 struct extent_buffer *l, 3580 struct extent_buffer *right, 3581 int slot, int mid, int nritems) 3582 { 3583 struct btrfs_fs_info *fs_info = trans->fs_info; 3584 int data_copy_size; 3585 int rt_data_off; 3586 int i; 3587 int ret; 3588 struct btrfs_disk_key disk_key; 3589 struct btrfs_map_token token; 3590 3591 nritems = nritems - mid; 3592 btrfs_set_header_nritems(right, nritems); 3593 data_copy_size = btrfs_item_data_end(l, mid) - leaf_data_end(l); 3594 3595 copy_leaf_items(right, l, 0, mid, nritems); 3596 3597 copy_leaf_data(right, l, BTRFS_LEAF_DATA_SIZE(fs_info) - data_copy_size, 3598 leaf_data_end(l), data_copy_size); 3599 3600 rt_data_off = BTRFS_LEAF_DATA_SIZE(fs_info) - btrfs_item_data_end(l, mid); 3601 3602 btrfs_init_map_token(&token, right); 3603 for (i = 0; i < nritems; i++) { 3604 u32 ioff; 3605 3606 ioff = btrfs_token_item_offset(&token, i); 3607 btrfs_set_token_item_offset(&token, i, ioff + rt_data_off); 3608 } 3609 3610 btrfs_set_header_nritems(l, mid); 3611 btrfs_item_key(right, &disk_key, 0); 3612 ret = insert_ptr(trans, path, &disk_key, right->start, path->slots[1] + 1, 1); 3613 if (ret < 0) 3614 return ret; 3615 3616 btrfs_mark_buffer_dirty(trans, right); 3617 btrfs_mark_buffer_dirty(trans, l); 3618 BUG_ON(path->slots[0] != slot); 3619 3620 if (mid <= slot) { 3621 btrfs_tree_unlock(path->nodes[0]); 3622 free_extent_buffer(path->nodes[0]); 3623 path->nodes[0] = right; 3624 path->slots[0] -= mid; 3625 path->slots[1] += 1; 3626 } else { 3627 btrfs_tree_unlock(right); 3628 free_extent_buffer(right); 3629 } 3630 3631 BUG_ON(path->slots[0] < 0); 3632 3633 return 0; 3634 } 3635 3636 /* 3637 * double splits happen when we need to insert a big item in the middle 3638 * of a leaf. A double split can leave us with 3 mostly empty leaves: 3639 * leaf: [ slots 0 - N] [ our target ] [ N + 1 - total in leaf ] 3640 * A B C 3641 * 3642 * We avoid this by trying to push the items on either side of our target 3643 * into the adjacent leaves. If all goes well we can avoid the double split 3644 * completely. 3645 */ 3646 static noinline int push_for_double_split(struct btrfs_trans_handle *trans, 3647 struct btrfs_root *root, 3648 struct btrfs_path *path, 3649 int data_size) 3650 { 3651 int ret; 3652 int progress = 0; 3653 int slot; 3654 u32 nritems; 3655 int space_needed = data_size; 3656 3657 slot = path->slots[0]; 3658 if (slot < btrfs_header_nritems(path->nodes[0])) 3659 space_needed -= btrfs_leaf_free_space(path->nodes[0]); 3660 3661 /* 3662 * try to push all the items after our slot into the 3663 * right leaf 3664 */ 3665 ret = push_leaf_right(trans, root, path, 1, space_needed, 0, slot); 3666 if (ret < 0) 3667 return ret; 3668 3669 if (ret == 0) 3670 progress++; 3671 3672 nritems = btrfs_header_nritems(path->nodes[0]); 3673 /* 3674 * our goal is to get our slot at the start or end of a leaf. If 3675 * we've done so we're done 3676 */ 3677 if (path->slots[0] == 0 || path->slots[0] == nritems) 3678 return 0; 3679 3680 if (btrfs_leaf_free_space(path->nodes[0]) >= data_size) 3681 return 0; 3682 3683 /* try to push all the items before our slot into the next leaf */ 3684 slot = path->slots[0]; 3685 space_needed = data_size; 3686 if (slot > 0) 3687 space_needed -= btrfs_leaf_free_space(path->nodes[0]); 3688 ret = push_leaf_left(trans, root, path, 1, space_needed, 0, slot); 3689 if (ret < 0) 3690 return ret; 3691 3692 if (ret == 0) 3693 progress++; 3694 3695 if (progress) 3696 return 0; 3697 return 1; 3698 } 3699 3700 /* 3701 * split the path's leaf in two, making sure there is at least data_size 3702 * available for the resulting leaf level of the path. 3703 * 3704 * returns 0 if all went well and < 0 on failure. 3705 */ 3706 static noinline int split_leaf(struct btrfs_trans_handle *trans, 3707 struct btrfs_root *root, 3708 const struct btrfs_key *ins_key, 3709 struct btrfs_path *path, int data_size, 3710 int extend) 3711 { 3712 struct btrfs_disk_key disk_key; 3713 struct extent_buffer *l; 3714 u32 nritems; 3715 int mid; 3716 int slot; 3717 struct extent_buffer *right; 3718 struct btrfs_fs_info *fs_info = root->fs_info; 3719 int ret = 0; 3720 int wret; 3721 int split; 3722 int num_doubles = 0; 3723 int tried_avoid_double = 0; 3724 3725 l = path->nodes[0]; 3726 slot = path->slots[0]; 3727 if (extend && data_size + btrfs_item_size(l, slot) + 3728 sizeof(struct btrfs_item) > BTRFS_LEAF_DATA_SIZE(fs_info)) 3729 return -EOVERFLOW; 3730 3731 /* first try to make some room by pushing left and right */ 3732 if (data_size && path->nodes[1]) { 3733 int space_needed = data_size; 3734 3735 if (slot < btrfs_header_nritems(l)) 3736 space_needed -= btrfs_leaf_free_space(l); 3737 3738 wret = push_leaf_right(trans, root, path, space_needed, 3739 space_needed, 0, 0); 3740 if (wret < 0) 3741 return wret; 3742 if (wret) { 3743 space_needed = data_size; 3744 if (slot > 0) 3745 space_needed -= btrfs_leaf_free_space(l); 3746 wret = push_leaf_left(trans, root, path, space_needed, 3747 space_needed, 0, (u32)-1); 3748 if (wret < 0) 3749 return wret; 3750 } 3751 l = path->nodes[0]; 3752 3753 /* did the pushes work? */ 3754 if (btrfs_leaf_free_space(l) >= data_size) 3755 return 0; 3756 } 3757 3758 if (!path->nodes[1]) { 3759 ret = insert_new_root(trans, root, path, 1); 3760 if (ret) 3761 return ret; 3762 } 3763 again: 3764 split = 1; 3765 l = path->nodes[0]; 3766 slot = path->slots[0]; 3767 nritems = btrfs_header_nritems(l); 3768 mid = (nritems + 1) / 2; 3769 3770 if (mid <= slot) { 3771 if (nritems == 1 || 3772 leaf_space_used(l, mid, nritems - mid) + data_size > 3773 BTRFS_LEAF_DATA_SIZE(fs_info)) { 3774 if (slot >= nritems) { 3775 split = 0; 3776 } else { 3777 mid = slot; 3778 if (mid != nritems && 3779 leaf_space_used(l, mid, nritems - mid) + 3780 data_size > BTRFS_LEAF_DATA_SIZE(fs_info)) { 3781 if (data_size && !tried_avoid_double) 3782 goto push_for_double; 3783 split = 2; 3784 } 3785 } 3786 } 3787 } else { 3788 if (leaf_space_used(l, 0, mid) + data_size > 3789 BTRFS_LEAF_DATA_SIZE(fs_info)) { 3790 if (!extend && data_size && slot == 0) { 3791 split = 0; 3792 } else if ((extend || !data_size) && slot == 0) { 3793 mid = 1; 3794 } else { 3795 mid = slot; 3796 if (mid != nritems && 3797 leaf_space_used(l, mid, nritems - mid) + 3798 data_size > BTRFS_LEAF_DATA_SIZE(fs_info)) { 3799 if (data_size && !tried_avoid_double) 3800 goto push_for_double; 3801 split = 2; 3802 } 3803 } 3804 } 3805 } 3806 3807 if (split == 0) 3808 btrfs_cpu_key_to_disk(&disk_key, ins_key); 3809 else 3810 btrfs_item_key(l, &disk_key, mid); 3811 3812 /* 3813 * We have to about BTRFS_NESTING_NEW_ROOT here if we've done a double 3814 * split, because we're only allowed to have MAX_LOCKDEP_SUBCLASSES 3815 * subclasses, which is 8 at the time of this patch, and we've maxed it 3816 * out. In the future we could add a 3817 * BTRFS_NESTING_SPLIT_THE_SPLITTENING if we need to, but for now just 3818 * use BTRFS_NESTING_NEW_ROOT. 3819 */ 3820 right = btrfs_alloc_tree_block(trans, root, 0, btrfs_root_id(root), 3821 &disk_key, 0, l->start, 0, 0, 3822 num_doubles ? BTRFS_NESTING_NEW_ROOT : 3823 BTRFS_NESTING_SPLIT); 3824 if (IS_ERR(right)) 3825 return PTR_ERR(right); 3826 3827 root_add_used_bytes(root); 3828 3829 if (split == 0) { 3830 if (mid <= slot) { 3831 btrfs_set_header_nritems(right, 0); 3832 ret = insert_ptr(trans, path, &disk_key, 3833 right->start, path->slots[1] + 1, 1); 3834 if (ret < 0) { 3835 btrfs_tree_unlock(right); 3836 free_extent_buffer(right); 3837 return ret; 3838 } 3839 btrfs_tree_unlock(path->nodes[0]); 3840 free_extent_buffer(path->nodes[0]); 3841 path->nodes[0] = right; 3842 path->slots[0] = 0; 3843 path->slots[1] += 1; 3844 } else { 3845 btrfs_set_header_nritems(right, 0); 3846 ret = insert_ptr(trans, path, &disk_key, 3847 right->start, path->slots[1], 1); 3848 if (ret < 0) { 3849 btrfs_tree_unlock(right); 3850 free_extent_buffer(right); 3851 return ret; 3852 } 3853 btrfs_tree_unlock(path->nodes[0]); 3854 free_extent_buffer(path->nodes[0]); 3855 path->nodes[0] = right; 3856 path->slots[0] = 0; 3857 if (path->slots[1] == 0) 3858 fixup_low_keys(trans, path, &disk_key, 1); 3859 } 3860 /* 3861 * We create a new leaf 'right' for the required ins_len and 3862 * we'll do btrfs_mark_buffer_dirty() on this leaf after copying 3863 * the content of ins_len to 'right'. 3864 */ 3865 return ret; 3866 } 3867 3868 ret = copy_for_split(trans, path, l, right, slot, mid, nritems); 3869 if (ret < 0) { 3870 btrfs_tree_unlock(right); 3871 free_extent_buffer(right); 3872 return ret; 3873 } 3874 3875 if (split == 2) { 3876 BUG_ON(num_doubles != 0); 3877 num_doubles++; 3878 goto again; 3879 } 3880 3881 return 0; 3882 3883 push_for_double: 3884 push_for_double_split(trans, root, path, data_size); 3885 tried_avoid_double = 1; 3886 if (btrfs_leaf_free_space(path->nodes[0]) >= data_size) 3887 return 0; 3888 goto again; 3889 } 3890 3891 static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans, 3892 struct btrfs_root *root, 3893 struct btrfs_path *path, int ins_len) 3894 { 3895 struct btrfs_key key; 3896 struct extent_buffer *leaf; 3897 struct btrfs_file_extent_item *fi; 3898 u64 extent_len = 0; 3899 u32 item_size; 3900 int ret; 3901 3902 leaf = path->nodes[0]; 3903 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 3904 3905 BUG_ON(key.type != BTRFS_EXTENT_DATA_KEY && 3906 key.type != BTRFS_EXTENT_CSUM_KEY); 3907 3908 if (btrfs_leaf_free_space(leaf) >= ins_len) 3909 return 0; 3910 3911 item_size = btrfs_item_size(leaf, path->slots[0]); 3912 if (key.type == BTRFS_EXTENT_DATA_KEY) { 3913 fi = btrfs_item_ptr(leaf, path->slots[0], 3914 struct btrfs_file_extent_item); 3915 extent_len = btrfs_file_extent_num_bytes(leaf, fi); 3916 } 3917 btrfs_release_path(path); 3918 3919 path->keep_locks = 1; 3920 path->search_for_split = 1; 3921 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); 3922 path->search_for_split = 0; 3923 if (ret > 0) 3924 ret = -EAGAIN; 3925 if (ret < 0) 3926 goto err; 3927 3928 ret = -EAGAIN; 3929 leaf = path->nodes[0]; 3930 /* if our item isn't there, return now */ 3931 if (item_size != btrfs_item_size(leaf, path->slots[0])) 3932 goto err; 3933 3934 /* the leaf has changed, it now has room. return now */ 3935 if (btrfs_leaf_free_space(path->nodes[0]) >= ins_len) 3936 goto err; 3937 3938 if (key.type == BTRFS_EXTENT_DATA_KEY) { 3939 fi = btrfs_item_ptr(leaf, path->slots[0], 3940 struct btrfs_file_extent_item); 3941 if (extent_len != btrfs_file_extent_num_bytes(leaf, fi)) 3942 goto err; 3943 } 3944 3945 ret = split_leaf(trans, root, &key, path, ins_len, 1); 3946 if (ret) 3947 goto err; 3948 3949 path->keep_locks = 0; 3950 btrfs_unlock_up_safe(path, 1); 3951 return 0; 3952 err: 3953 path->keep_locks = 0; 3954 return ret; 3955 } 3956 3957 static noinline int split_item(struct btrfs_trans_handle *trans, 3958 struct btrfs_path *path, 3959 const struct btrfs_key *new_key, 3960 unsigned long split_offset) 3961 { 3962 struct extent_buffer *leaf; 3963 int orig_slot, slot; 3964 char *buf; 3965 u32 nritems; 3966 u32 item_size; 3967 u32 orig_offset; 3968 struct btrfs_disk_key disk_key; 3969 3970 leaf = path->nodes[0]; 3971 /* 3972 * Shouldn't happen because the caller must have previously called 3973 * setup_leaf_for_split() to make room for the new item in the leaf. 3974 */ 3975 if (WARN_ON(btrfs_leaf_free_space(leaf) < sizeof(struct btrfs_item))) 3976 return -ENOSPC; 3977 3978 orig_slot = path->slots[0]; 3979 orig_offset = btrfs_item_offset(leaf, path->slots[0]); 3980 item_size = btrfs_item_size(leaf, path->slots[0]); 3981 3982 buf = kmalloc(item_size, GFP_NOFS); 3983 if (!buf) 3984 return -ENOMEM; 3985 3986 read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf, 3987 path->slots[0]), item_size); 3988 3989 slot = path->slots[0] + 1; 3990 nritems = btrfs_header_nritems(leaf); 3991 if (slot != nritems) { 3992 /* shift the items */ 3993 memmove_leaf_items(leaf, slot + 1, slot, nritems - slot); 3994 } 3995 3996 btrfs_cpu_key_to_disk(&disk_key, new_key); 3997 btrfs_set_item_key(leaf, &disk_key, slot); 3998 3999 btrfs_set_item_offset(leaf, slot, orig_offset); 4000 btrfs_set_item_size(leaf, slot, item_size - split_offset); 4001 4002 btrfs_set_item_offset(leaf, orig_slot, 4003 orig_offset + item_size - split_offset); 4004 btrfs_set_item_size(leaf, orig_slot, split_offset); 4005 4006 btrfs_set_header_nritems(leaf, nritems + 1); 4007 4008 /* write the data for the start of the original item */ 4009 write_extent_buffer(leaf, buf, 4010 btrfs_item_ptr_offset(leaf, path->slots[0]), 4011 split_offset); 4012 4013 /* write the data for the new item */ 4014 write_extent_buffer(leaf, buf + split_offset, 4015 btrfs_item_ptr_offset(leaf, slot), 4016 item_size - split_offset); 4017 btrfs_mark_buffer_dirty(trans, leaf); 4018 4019 BUG_ON(btrfs_leaf_free_space(leaf) < 0); 4020 kfree(buf); 4021 return 0; 4022 } 4023 4024 /* 4025 * This function splits a single item into two items, 4026 * giving 'new_key' to the new item and splitting the 4027 * old one at split_offset (from the start of the item). 4028 * 4029 * The path may be released by this operation. After 4030 * the split, the path is pointing to the old item. The 4031 * new item is going to be in the same node as the old one. 4032 * 4033 * Note, the item being split must be smaller enough to live alone on 4034 * a tree block with room for one extra struct btrfs_item 4035 * 4036 * This allows us to split the item in place, keeping a lock on the 4037 * leaf the entire time. 4038 */ 4039 int btrfs_split_item(struct btrfs_trans_handle *trans, 4040 struct btrfs_root *root, 4041 struct btrfs_path *path, 4042 const struct btrfs_key *new_key, 4043 unsigned long split_offset) 4044 { 4045 int ret; 4046 ret = setup_leaf_for_split(trans, root, path, 4047 sizeof(struct btrfs_item)); 4048 if (ret) 4049 return ret; 4050 4051 ret = split_item(trans, path, new_key, split_offset); 4052 return ret; 4053 } 4054 4055 /* 4056 * make the item pointed to by the path smaller. new_size indicates 4057 * how small to make it, and from_end tells us if we just chop bytes 4058 * off the end of the item or if we shift the item to chop bytes off 4059 * the front. 4060 */ 4061 void btrfs_truncate_item(struct btrfs_trans_handle *trans, 4062 const struct btrfs_path *path, u32 new_size, int from_end) 4063 { 4064 int slot; 4065 struct extent_buffer *leaf; 4066 u32 nritems; 4067 unsigned int data_end; 4068 unsigned int old_data_start; 4069 unsigned int old_size; 4070 unsigned int size_diff; 4071 int i; 4072 struct btrfs_map_token token; 4073 4074 leaf = path->nodes[0]; 4075 slot = path->slots[0]; 4076 4077 old_size = btrfs_item_size(leaf, slot); 4078 if (old_size == new_size) 4079 return; 4080 4081 nritems = btrfs_header_nritems(leaf); 4082 data_end = leaf_data_end(leaf); 4083 4084 old_data_start = btrfs_item_offset(leaf, slot); 4085 4086 size_diff = old_size - new_size; 4087 4088 BUG_ON(slot < 0); 4089 BUG_ON(slot >= nritems); 4090 4091 /* 4092 * item0..itemN ... dataN.offset..dataN.size .. data0.size 4093 */ 4094 /* first correct the data pointers */ 4095 btrfs_init_map_token(&token, leaf); 4096 for (i = slot; i < nritems; i++) { 4097 u32 ioff; 4098 4099 ioff = btrfs_token_item_offset(&token, i); 4100 btrfs_set_token_item_offset(&token, i, ioff + size_diff); 4101 } 4102 4103 /* shift the data */ 4104 if (from_end) { 4105 memmove_leaf_data(leaf, data_end + size_diff, data_end, 4106 old_data_start + new_size - data_end); 4107 } else { 4108 struct btrfs_disk_key disk_key; 4109 u64 offset; 4110 4111 btrfs_item_key(leaf, &disk_key, slot); 4112 4113 if (btrfs_disk_key_type(&disk_key) == BTRFS_EXTENT_DATA_KEY) { 4114 unsigned long ptr; 4115 struct btrfs_file_extent_item *fi; 4116 4117 fi = btrfs_item_ptr(leaf, slot, 4118 struct btrfs_file_extent_item); 4119 fi = (struct btrfs_file_extent_item *)( 4120 (unsigned long)fi - size_diff); 4121 4122 if (btrfs_file_extent_type(leaf, fi) == 4123 BTRFS_FILE_EXTENT_INLINE) { 4124 ptr = btrfs_item_ptr_offset(leaf, slot); 4125 memmove_extent_buffer(leaf, ptr, 4126 (unsigned long)fi, 4127 BTRFS_FILE_EXTENT_INLINE_DATA_START); 4128 } 4129 } 4130 4131 memmove_leaf_data(leaf, data_end + size_diff, data_end, 4132 old_data_start - data_end); 4133 4134 offset = btrfs_disk_key_offset(&disk_key); 4135 btrfs_set_disk_key_offset(&disk_key, offset + size_diff); 4136 btrfs_set_item_key(leaf, &disk_key, slot); 4137 if (slot == 0) 4138 fixup_low_keys(trans, path, &disk_key, 1); 4139 } 4140 4141 btrfs_set_item_size(leaf, slot, new_size); 4142 btrfs_mark_buffer_dirty(trans, leaf); 4143 4144 if (btrfs_leaf_free_space(leaf) < 0) { 4145 btrfs_print_leaf(leaf); 4146 BUG(); 4147 } 4148 } 4149 4150 /* 4151 * make the item pointed to by the path bigger, data_size is the added size. 4152 */ 4153 void btrfs_extend_item(struct btrfs_trans_handle *trans, 4154 const struct btrfs_path *path, u32 data_size) 4155 { 4156 int slot; 4157 struct extent_buffer *leaf; 4158 u32 nritems; 4159 unsigned int data_end; 4160 unsigned int old_data; 4161 unsigned int old_size; 4162 int i; 4163 struct btrfs_map_token token; 4164 4165 leaf = path->nodes[0]; 4166 4167 nritems = btrfs_header_nritems(leaf); 4168 data_end = leaf_data_end(leaf); 4169 4170 if (btrfs_leaf_free_space(leaf) < data_size) { 4171 btrfs_print_leaf(leaf); 4172 BUG(); 4173 } 4174 slot = path->slots[0]; 4175 old_data = btrfs_item_data_end(leaf, slot); 4176 4177 BUG_ON(slot < 0); 4178 if (slot >= nritems) { 4179 btrfs_print_leaf(leaf); 4180 btrfs_crit(leaf->fs_info, "slot %d too large, nritems %d", 4181 slot, nritems); 4182 BUG(); 4183 } 4184 4185 /* 4186 * item0..itemN ... dataN.offset..dataN.size .. data0.size 4187 */ 4188 /* first correct the data pointers */ 4189 btrfs_init_map_token(&token, leaf); 4190 for (i = slot; i < nritems; i++) { 4191 u32 ioff; 4192 4193 ioff = btrfs_token_item_offset(&token, i); 4194 btrfs_set_token_item_offset(&token, i, ioff - data_size); 4195 } 4196 4197 /* shift the data */ 4198 memmove_leaf_data(leaf, data_end - data_size, data_end, 4199 old_data - data_end); 4200 4201 data_end = old_data; 4202 old_size = btrfs_item_size(leaf, slot); 4203 btrfs_set_item_size(leaf, slot, old_size + data_size); 4204 btrfs_mark_buffer_dirty(trans, leaf); 4205 4206 if (btrfs_leaf_free_space(leaf) < 0) { 4207 btrfs_print_leaf(leaf); 4208 BUG(); 4209 } 4210 } 4211 4212 /* 4213 * Make space in the node before inserting one or more items. 4214 * 4215 * @trans: transaction handle 4216 * @root: root we are inserting items to 4217 * @path: points to the leaf/slot where we are going to insert new items 4218 * @batch: information about the batch of items to insert 4219 * 4220 * Main purpose is to save stack depth by doing the bulk of the work in a 4221 * function that doesn't call btrfs_search_slot 4222 */ 4223 static void setup_items_for_insert(struct btrfs_trans_handle *trans, 4224 struct btrfs_root *root, struct btrfs_path *path, 4225 const struct btrfs_item_batch *batch) 4226 { 4227 struct btrfs_fs_info *fs_info = root->fs_info; 4228 int i; 4229 u32 nritems; 4230 unsigned int data_end; 4231 struct btrfs_disk_key disk_key; 4232 struct extent_buffer *leaf; 4233 int slot; 4234 struct btrfs_map_token token; 4235 u32 total_size; 4236 4237 /* 4238 * Before anything else, update keys in the parent and other ancestors 4239 * if needed, then release the write locks on them, so that other tasks 4240 * can use them while we modify the leaf. 4241 */ 4242 if (path->slots[0] == 0) { 4243 btrfs_cpu_key_to_disk(&disk_key, &batch->keys[0]); 4244 fixup_low_keys(trans, path, &disk_key, 1); 4245 } 4246 btrfs_unlock_up_safe(path, 1); 4247 4248 leaf = path->nodes[0]; 4249 slot = path->slots[0]; 4250 4251 nritems = btrfs_header_nritems(leaf); 4252 data_end = leaf_data_end(leaf); 4253 total_size = batch->total_data_size + (batch->nr * sizeof(struct btrfs_item)); 4254 4255 if (btrfs_leaf_free_space(leaf) < total_size) { 4256 btrfs_print_leaf(leaf); 4257 btrfs_crit(fs_info, "not enough freespace need %u have %d", 4258 total_size, btrfs_leaf_free_space(leaf)); 4259 BUG(); 4260 } 4261 4262 btrfs_init_map_token(&token, leaf); 4263 if (slot != nritems) { 4264 unsigned int old_data = btrfs_item_data_end(leaf, slot); 4265 4266 if (old_data < data_end) { 4267 btrfs_print_leaf(leaf); 4268 btrfs_crit(fs_info, 4269 "item at slot %d with data offset %u beyond data end of leaf %u", 4270 slot, old_data, data_end); 4271 BUG(); 4272 } 4273 /* 4274 * item0..itemN ... dataN.offset..dataN.size .. data0.size 4275 */ 4276 /* first correct the data pointers */ 4277 for (i = slot; i < nritems; i++) { 4278 u32 ioff; 4279 4280 ioff = btrfs_token_item_offset(&token, i); 4281 btrfs_set_token_item_offset(&token, i, 4282 ioff - batch->total_data_size); 4283 } 4284 /* shift the items */ 4285 memmove_leaf_items(leaf, slot + batch->nr, slot, nritems - slot); 4286 4287 /* shift the data */ 4288 memmove_leaf_data(leaf, data_end - batch->total_data_size, 4289 data_end, old_data - data_end); 4290 data_end = old_data; 4291 } 4292 4293 /* setup the item for the new data */ 4294 for (i = 0; i < batch->nr; i++) { 4295 btrfs_cpu_key_to_disk(&disk_key, &batch->keys[i]); 4296 btrfs_set_item_key(leaf, &disk_key, slot + i); 4297 data_end -= batch->data_sizes[i]; 4298 btrfs_set_token_item_offset(&token, slot + i, data_end); 4299 btrfs_set_token_item_size(&token, slot + i, batch->data_sizes[i]); 4300 } 4301 4302 btrfs_set_header_nritems(leaf, nritems + batch->nr); 4303 btrfs_mark_buffer_dirty(trans, leaf); 4304 4305 if (btrfs_leaf_free_space(leaf) < 0) { 4306 btrfs_print_leaf(leaf); 4307 BUG(); 4308 } 4309 } 4310 4311 /* 4312 * Insert a new item into a leaf. 4313 * 4314 * @trans: Transaction handle. 4315 * @root: The root of the btree. 4316 * @path: A path pointing to the target leaf and slot. 4317 * @key: The key of the new item. 4318 * @data_size: The size of the data associated with the new key. 4319 */ 4320 void btrfs_setup_item_for_insert(struct btrfs_trans_handle *trans, 4321 struct btrfs_root *root, 4322 struct btrfs_path *path, 4323 const struct btrfs_key *key, 4324 u32 data_size) 4325 { 4326 struct btrfs_item_batch batch; 4327 4328 batch.keys = key; 4329 batch.data_sizes = &data_size; 4330 batch.total_data_size = data_size; 4331 batch.nr = 1; 4332 4333 setup_items_for_insert(trans, root, path, &batch); 4334 } 4335 4336 /* 4337 * Given a key and some data, insert items into the tree. 4338 * This does all the path init required, making room in the tree if needed. 4339 * 4340 * Returns: 0 on success 4341 * -EEXIST if the first key already exists 4342 * < 0 on other errors 4343 */ 4344 int btrfs_insert_empty_items(struct btrfs_trans_handle *trans, 4345 struct btrfs_root *root, 4346 struct btrfs_path *path, 4347 const struct btrfs_item_batch *batch) 4348 { 4349 int ret = 0; 4350 int slot; 4351 u32 total_size; 4352 4353 total_size = batch->total_data_size + (batch->nr * sizeof(struct btrfs_item)); 4354 ret = btrfs_search_slot(trans, root, &batch->keys[0], path, total_size, 1); 4355 if (ret == 0) 4356 return -EEXIST; 4357 if (ret < 0) 4358 return ret; 4359 4360 slot = path->slots[0]; 4361 BUG_ON(slot < 0); 4362 4363 setup_items_for_insert(trans, root, path, batch); 4364 return 0; 4365 } 4366 4367 /* 4368 * Given a key and some data, insert an item into the tree. 4369 * This does all the path init required, making room in the tree if needed. 4370 */ 4371 int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root *root, 4372 const struct btrfs_key *cpu_key, void *data, 4373 u32 data_size) 4374 { 4375 int ret = 0; 4376 struct btrfs_path *path; 4377 struct extent_buffer *leaf; 4378 unsigned long ptr; 4379 4380 path = btrfs_alloc_path(); 4381 if (!path) 4382 return -ENOMEM; 4383 ret = btrfs_insert_empty_item(trans, root, path, cpu_key, data_size); 4384 if (!ret) { 4385 leaf = path->nodes[0]; 4386 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); 4387 write_extent_buffer(leaf, data, ptr, data_size); 4388 btrfs_mark_buffer_dirty(trans, leaf); 4389 } 4390 btrfs_free_path(path); 4391 return ret; 4392 } 4393 4394 /* 4395 * This function duplicates an item, giving 'new_key' to the new item. 4396 * It guarantees both items live in the same tree leaf and the new item is 4397 * contiguous with the original item. 4398 * 4399 * This allows us to split a file extent in place, keeping a lock on the leaf 4400 * the entire time. 4401 */ 4402 int btrfs_duplicate_item(struct btrfs_trans_handle *trans, 4403 struct btrfs_root *root, 4404 struct btrfs_path *path, 4405 const struct btrfs_key *new_key) 4406 { 4407 struct extent_buffer *leaf; 4408 int ret; 4409 u32 item_size; 4410 4411 leaf = path->nodes[0]; 4412 item_size = btrfs_item_size(leaf, path->slots[0]); 4413 ret = setup_leaf_for_split(trans, root, path, 4414 item_size + sizeof(struct btrfs_item)); 4415 if (ret) 4416 return ret; 4417 4418 path->slots[0]++; 4419 btrfs_setup_item_for_insert(trans, root, path, new_key, item_size); 4420 leaf = path->nodes[0]; 4421 memcpy_extent_buffer(leaf, 4422 btrfs_item_ptr_offset(leaf, path->slots[0]), 4423 btrfs_item_ptr_offset(leaf, path->slots[0] - 1), 4424 item_size); 4425 return 0; 4426 } 4427 4428 /* 4429 * delete the pointer from a given node. 4430 * 4431 * the tree should have been previously balanced so the deletion does not 4432 * empty a node. 4433 * 4434 * This is exported for use inside btrfs-progs, don't un-export it. 4435 */ 4436 int btrfs_del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root, 4437 struct btrfs_path *path, int level, int slot) 4438 { 4439 struct extent_buffer *parent = path->nodes[level]; 4440 u32 nritems; 4441 int ret; 4442 4443 nritems = btrfs_header_nritems(parent); 4444 if (slot != nritems - 1) { 4445 if (level) { 4446 ret = btrfs_tree_mod_log_insert_move(parent, slot, 4447 slot + 1, nritems - slot - 1); 4448 if (ret < 0) { 4449 btrfs_abort_transaction(trans, ret); 4450 return ret; 4451 } 4452 } 4453 memmove_extent_buffer(parent, 4454 btrfs_node_key_ptr_offset(parent, slot), 4455 btrfs_node_key_ptr_offset(parent, slot + 1), 4456 sizeof(struct btrfs_key_ptr) * 4457 (nritems - slot - 1)); 4458 } else if (level) { 4459 ret = btrfs_tree_mod_log_insert_key(parent, slot, 4460 BTRFS_MOD_LOG_KEY_REMOVE); 4461 if (ret < 0) { 4462 btrfs_abort_transaction(trans, ret); 4463 return ret; 4464 } 4465 } 4466 4467 nritems--; 4468 btrfs_set_header_nritems(parent, nritems); 4469 if (nritems == 0 && parent == root->node) { 4470 BUG_ON(btrfs_header_level(root->node) != 1); 4471 /* just turn the root into a leaf and break */ 4472 btrfs_set_header_level(root->node, 0); 4473 } else if (slot == 0) { 4474 struct btrfs_disk_key disk_key; 4475 4476 btrfs_node_key(parent, &disk_key, 0); 4477 fixup_low_keys(trans, path, &disk_key, level + 1); 4478 } 4479 btrfs_mark_buffer_dirty(trans, parent); 4480 return 0; 4481 } 4482 4483 /* 4484 * a helper function to delete the leaf pointed to by path->slots[1] and 4485 * path->nodes[1]. 4486 * 4487 * This deletes the pointer in path->nodes[1] and frees the leaf 4488 * block extent. zero is returned if it all worked out, < 0 otherwise. 4489 * 4490 * The path must have already been setup for deleting the leaf, including 4491 * all the proper balancing. path->nodes[1] must be locked. 4492 */ 4493 static noinline int btrfs_del_leaf(struct btrfs_trans_handle *trans, 4494 struct btrfs_root *root, 4495 struct btrfs_path *path, 4496 struct extent_buffer *leaf) 4497 { 4498 int ret; 4499 4500 WARN_ON(btrfs_header_generation(leaf) != trans->transid); 4501 ret = btrfs_del_ptr(trans, root, path, 1, path->slots[1]); 4502 if (ret < 0) 4503 return ret; 4504 4505 /* 4506 * btrfs_free_extent is expensive, we want to make sure we 4507 * aren't holding any locks when we call it 4508 */ 4509 btrfs_unlock_up_safe(path, 0); 4510 4511 root_sub_used_bytes(root); 4512 4513 atomic_inc(&leaf->refs); 4514 ret = btrfs_free_tree_block(trans, btrfs_root_id(root), leaf, 0, 1); 4515 free_extent_buffer_stale(leaf); 4516 if (ret < 0) 4517 btrfs_abort_transaction(trans, ret); 4518 4519 return ret; 4520 } 4521 /* 4522 * delete the item at the leaf level in path. If that empties 4523 * the leaf, remove it from the tree 4524 */ 4525 int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root, 4526 struct btrfs_path *path, int slot, int nr) 4527 { 4528 struct btrfs_fs_info *fs_info = root->fs_info; 4529 struct extent_buffer *leaf; 4530 int ret = 0; 4531 int wret; 4532 u32 nritems; 4533 4534 leaf = path->nodes[0]; 4535 nritems = btrfs_header_nritems(leaf); 4536 4537 if (slot + nr != nritems) { 4538 const u32 last_off = btrfs_item_offset(leaf, slot + nr - 1); 4539 const int data_end = leaf_data_end(leaf); 4540 struct btrfs_map_token token; 4541 u32 dsize = 0; 4542 int i; 4543 4544 for (i = 0; i < nr; i++) 4545 dsize += btrfs_item_size(leaf, slot + i); 4546 4547 memmove_leaf_data(leaf, data_end + dsize, data_end, 4548 last_off - data_end); 4549 4550 btrfs_init_map_token(&token, leaf); 4551 for (i = slot + nr; i < nritems; i++) { 4552 u32 ioff; 4553 4554 ioff = btrfs_token_item_offset(&token, i); 4555 btrfs_set_token_item_offset(&token, i, ioff + dsize); 4556 } 4557 4558 memmove_leaf_items(leaf, slot, slot + nr, nritems - slot - nr); 4559 } 4560 btrfs_set_header_nritems(leaf, nritems - nr); 4561 nritems -= nr; 4562 4563 /* delete the leaf if we've emptied it */ 4564 if (nritems == 0) { 4565 if (leaf == root->node) { 4566 btrfs_set_header_level(leaf, 0); 4567 } else { 4568 btrfs_clear_buffer_dirty(trans, leaf); 4569 ret = btrfs_del_leaf(trans, root, path, leaf); 4570 if (ret < 0) 4571 return ret; 4572 } 4573 } else { 4574 int used = leaf_space_used(leaf, 0, nritems); 4575 if (slot == 0) { 4576 struct btrfs_disk_key disk_key; 4577 4578 btrfs_item_key(leaf, &disk_key, 0); 4579 fixup_low_keys(trans, path, &disk_key, 1); 4580 } 4581 4582 /* 4583 * Try to delete the leaf if it is mostly empty. We do this by 4584 * trying to move all its items into its left and right neighbours. 4585 * If we can't move all the items, then we don't delete it - it's 4586 * not ideal, but future insertions might fill the leaf with more 4587 * items, or items from other leaves might be moved later into our 4588 * leaf due to deletions on those leaves. 4589 */ 4590 if (used < BTRFS_LEAF_DATA_SIZE(fs_info) / 3) { 4591 u32 min_push_space; 4592 4593 /* push_leaf_left fixes the path. 4594 * make sure the path still points to our leaf 4595 * for possible call to btrfs_del_ptr below 4596 */ 4597 slot = path->slots[1]; 4598 atomic_inc(&leaf->refs); 4599 /* 4600 * We want to be able to at least push one item to the 4601 * left neighbour leaf, and that's the first item. 4602 */ 4603 min_push_space = sizeof(struct btrfs_item) + 4604 btrfs_item_size(leaf, 0); 4605 wret = push_leaf_left(trans, root, path, 0, 4606 min_push_space, 1, (u32)-1); 4607 if (wret < 0 && wret != -ENOSPC) 4608 ret = wret; 4609 4610 if (path->nodes[0] == leaf && 4611 btrfs_header_nritems(leaf)) { 4612 /* 4613 * If we were not able to push all items from our 4614 * leaf to its left neighbour, then attempt to 4615 * either push all the remaining items to the 4616 * right neighbour or none. There's no advantage 4617 * in pushing only some items, instead of all, as 4618 * it's pointless to end up with a leaf having 4619 * too few items while the neighbours can be full 4620 * or nearly full. 4621 */ 4622 nritems = btrfs_header_nritems(leaf); 4623 min_push_space = leaf_space_used(leaf, 0, nritems); 4624 wret = push_leaf_right(trans, root, path, 0, 4625 min_push_space, 1, 0); 4626 if (wret < 0 && wret != -ENOSPC) 4627 ret = wret; 4628 } 4629 4630 if (btrfs_header_nritems(leaf) == 0) { 4631 path->slots[1] = slot; 4632 ret = btrfs_del_leaf(trans, root, path, leaf); 4633 if (ret < 0) 4634 return ret; 4635 free_extent_buffer(leaf); 4636 ret = 0; 4637 } else { 4638 /* if we're still in the path, make sure 4639 * we're dirty. Otherwise, one of the 4640 * push_leaf functions must have already 4641 * dirtied this buffer 4642 */ 4643 if (path->nodes[0] == leaf) 4644 btrfs_mark_buffer_dirty(trans, leaf); 4645 free_extent_buffer(leaf); 4646 } 4647 } else { 4648 btrfs_mark_buffer_dirty(trans, leaf); 4649 } 4650 } 4651 return ret; 4652 } 4653 4654 /* 4655 * A helper function to walk down the tree starting at min_key, and looking 4656 * for nodes or leaves that are have a minimum transaction id. 4657 * This is used by the btree defrag code, and tree logging 4658 * 4659 * This does not cow, but it does stuff the starting key it finds back 4660 * into min_key, so you can call btrfs_search_slot with cow=1 on the 4661 * key and get a writable path. 4662 * 4663 * This honors path->lowest_level to prevent descent past a given level 4664 * of the tree. 4665 * 4666 * min_trans indicates the oldest transaction that you are interested 4667 * in walking through. Any nodes or leaves older than min_trans are 4668 * skipped over (without reading them). 4669 * 4670 * returns zero if something useful was found, < 0 on error and 1 if there 4671 * was nothing in the tree that matched the search criteria. 4672 */ 4673 int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key, 4674 struct btrfs_path *path, 4675 u64 min_trans) 4676 { 4677 struct extent_buffer *cur; 4678 struct btrfs_key found_key; 4679 int slot; 4680 int sret; 4681 u32 nritems; 4682 int level; 4683 int ret = 1; 4684 int keep_locks = path->keep_locks; 4685 4686 ASSERT(!path->nowait); 4687 path->keep_locks = 1; 4688 again: 4689 cur = btrfs_read_lock_root_node(root); 4690 level = btrfs_header_level(cur); 4691 WARN_ON(path->nodes[level]); 4692 path->nodes[level] = cur; 4693 path->locks[level] = BTRFS_READ_LOCK; 4694 4695 if (btrfs_header_generation(cur) < min_trans) { 4696 ret = 1; 4697 goto out; 4698 } 4699 while (1) { 4700 nritems = btrfs_header_nritems(cur); 4701 level = btrfs_header_level(cur); 4702 sret = btrfs_bin_search(cur, 0, min_key, &slot); 4703 if (sret < 0) { 4704 ret = sret; 4705 goto out; 4706 } 4707 4708 /* at the lowest level, we're done, setup the path and exit */ 4709 if (level == path->lowest_level) { 4710 if (slot >= nritems) 4711 goto find_next_key; 4712 ret = 0; 4713 path->slots[level] = slot; 4714 btrfs_item_key_to_cpu(cur, &found_key, slot); 4715 goto out; 4716 } 4717 if (sret && slot > 0) 4718 slot--; 4719 /* 4720 * check this node pointer against the min_trans parameters. 4721 * If it is too old, skip to the next one. 4722 */ 4723 while (slot < nritems) { 4724 u64 gen; 4725 4726 gen = btrfs_node_ptr_generation(cur, slot); 4727 if (gen < min_trans) { 4728 slot++; 4729 continue; 4730 } 4731 break; 4732 } 4733 find_next_key: 4734 /* 4735 * we didn't find a candidate key in this node, walk forward 4736 * and find another one 4737 */ 4738 if (slot >= nritems) { 4739 path->slots[level] = slot; 4740 sret = btrfs_find_next_key(root, path, min_key, level, 4741 min_trans); 4742 if (sret == 0) { 4743 btrfs_release_path(path); 4744 goto again; 4745 } else { 4746 goto out; 4747 } 4748 } 4749 /* save our key for returning back */ 4750 btrfs_node_key_to_cpu(cur, &found_key, slot); 4751 path->slots[level] = slot; 4752 if (level == path->lowest_level) { 4753 ret = 0; 4754 goto out; 4755 } 4756 cur = btrfs_read_node_slot(cur, slot); 4757 if (IS_ERR(cur)) { 4758 ret = PTR_ERR(cur); 4759 goto out; 4760 } 4761 4762 btrfs_tree_read_lock(cur); 4763 4764 path->locks[level - 1] = BTRFS_READ_LOCK; 4765 path->nodes[level - 1] = cur; 4766 unlock_up(path, level, 1, 0, NULL); 4767 } 4768 out: 4769 path->keep_locks = keep_locks; 4770 if (ret == 0) { 4771 btrfs_unlock_up_safe(path, path->lowest_level + 1); 4772 memcpy(min_key, &found_key, sizeof(found_key)); 4773 } 4774 return ret; 4775 } 4776 4777 /* 4778 * this is similar to btrfs_next_leaf, but does not try to preserve 4779 * and fixup the path. It looks for and returns the next key in the 4780 * tree based on the current path and the min_trans parameters. 4781 * 4782 * 0 is returned if another key is found, < 0 if there are any errors 4783 * and 1 is returned if there are no higher keys in the tree 4784 * 4785 * path->keep_locks should be set to 1 on the search made before 4786 * calling this function. 4787 */ 4788 int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path, 4789 struct btrfs_key *key, int level, u64 min_trans) 4790 { 4791 int slot; 4792 struct extent_buffer *c; 4793 4794 WARN_ON(!path->keep_locks && !path->skip_locking); 4795 while (level < BTRFS_MAX_LEVEL) { 4796 if (!path->nodes[level]) 4797 return 1; 4798 4799 slot = path->slots[level] + 1; 4800 c = path->nodes[level]; 4801 next: 4802 if (slot >= btrfs_header_nritems(c)) { 4803 int ret; 4804 int orig_lowest; 4805 struct btrfs_key cur_key; 4806 if (level + 1 >= BTRFS_MAX_LEVEL || 4807 !path->nodes[level + 1]) 4808 return 1; 4809 4810 if (path->locks[level + 1] || path->skip_locking) { 4811 level++; 4812 continue; 4813 } 4814 4815 slot = btrfs_header_nritems(c) - 1; 4816 if (level == 0) 4817 btrfs_item_key_to_cpu(c, &cur_key, slot); 4818 else 4819 btrfs_node_key_to_cpu(c, &cur_key, slot); 4820 4821 orig_lowest = path->lowest_level; 4822 btrfs_release_path(path); 4823 path->lowest_level = level; 4824 ret = btrfs_search_slot(NULL, root, &cur_key, path, 4825 0, 0); 4826 path->lowest_level = orig_lowest; 4827 if (ret < 0) 4828 return ret; 4829 4830 c = path->nodes[level]; 4831 slot = path->slots[level]; 4832 if (ret == 0) 4833 slot++; 4834 goto next; 4835 } 4836 4837 if (level == 0) 4838 btrfs_item_key_to_cpu(c, key, slot); 4839 else { 4840 u64 gen = btrfs_node_ptr_generation(c, slot); 4841 4842 if (gen < min_trans) { 4843 slot++; 4844 goto next; 4845 } 4846 btrfs_node_key_to_cpu(c, key, slot); 4847 } 4848 return 0; 4849 } 4850 return 1; 4851 } 4852 4853 int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path, 4854 u64 time_seq) 4855 { 4856 int slot; 4857 int level; 4858 struct extent_buffer *c; 4859 struct extent_buffer *next; 4860 struct btrfs_fs_info *fs_info = root->fs_info; 4861 struct btrfs_key key; 4862 bool need_commit_sem = false; 4863 u32 nritems; 4864 int ret; 4865 int i; 4866 4867 /* 4868 * The nowait semantics are used only for write paths, where we don't 4869 * use the tree mod log and sequence numbers. 4870 */ 4871 if (time_seq) 4872 ASSERT(!path->nowait); 4873 4874 nritems = btrfs_header_nritems(path->nodes[0]); 4875 if (nritems == 0) 4876 return 1; 4877 4878 btrfs_item_key_to_cpu(path->nodes[0], &key, nritems - 1); 4879 again: 4880 level = 1; 4881 next = NULL; 4882 btrfs_release_path(path); 4883 4884 path->keep_locks = 1; 4885 4886 if (time_seq) { 4887 ret = btrfs_search_old_slot(root, &key, path, time_seq); 4888 } else { 4889 if (path->need_commit_sem) { 4890 path->need_commit_sem = 0; 4891 need_commit_sem = true; 4892 if (path->nowait) { 4893 if (!down_read_trylock(&fs_info->commit_root_sem)) { 4894 ret = -EAGAIN; 4895 goto done; 4896 } 4897 } else { 4898 down_read(&fs_info->commit_root_sem); 4899 } 4900 } 4901 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 4902 } 4903 path->keep_locks = 0; 4904 4905 if (ret < 0) 4906 goto done; 4907 4908 nritems = btrfs_header_nritems(path->nodes[0]); 4909 /* 4910 * by releasing the path above we dropped all our locks. A balance 4911 * could have added more items next to the key that used to be 4912 * at the very end of the block. So, check again here and 4913 * advance the path if there are now more items available. 4914 */ 4915 if (nritems > 0 && path->slots[0] < nritems - 1) { 4916 if (ret == 0) 4917 path->slots[0]++; 4918 ret = 0; 4919 goto done; 4920 } 4921 /* 4922 * So the above check misses one case: 4923 * - after releasing the path above, someone has removed the item that 4924 * used to be at the very end of the block, and balance between leafs 4925 * gets another one with bigger key.offset to replace it. 4926 * 4927 * This one should be returned as well, or we can get leaf corruption 4928 * later(esp. in __btrfs_drop_extents()). 4929 * 4930 * And a bit more explanation about this check, 4931 * with ret > 0, the key isn't found, the path points to the slot 4932 * where it should be inserted, so the path->slots[0] item must be the 4933 * bigger one. 4934 */ 4935 if (nritems > 0 && ret > 0 && path->slots[0] == nritems - 1) { 4936 ret = 0; 4937 goto done; 4938 } 4939 4940 while (level < BTRFS_MAX_LEVEL) { 4941 if (!path->nodes[level]) { 4942 ret = 1; 4943 goto done; 4944 } 4945 4946 slot = path->slots[level] + 1; 4947 c = path->nodes[level]; 4948 if (slot >= btrfs_header_nritems(c)) { 4949 level++; 4950 if (level == BTRFS_MAX_LEVEL) { 4951 ret = 1; 4952 goto done; 4953 } 4954 continue; 4955 } 4956 4957 4958 /* 4959 * Our current level is where we're going to start from, and to 4960 * make sure lockdep doesn't complain we need to drop our locks 4961 * and nodes from 0 to our current level. 4962 */ 4963 for (i = 0; i < level; i++) { 4964 if (path->locks[level]) { 4965 btrfs_tree_read_unlock(path->nodes[i]); 4966 path->locks[i] = 0; 4967 } 4968 free_extent_buffer(path->nodes[i]); 4969 path->nodes[i] = NULL; 4970 } 4971 4972 next = c; 4973 ret = read_block_for_search(root, path, &next, slot, &key); 4974 if (ret == -EAGAIN && !path->nowait) 4975 goto again; 4976 4977 if (ret < 0) { 4978 btrfs_release_path(path); 4979 goto done; 4980 } 4981 4982 if (!path->skip_locking) { 4983 ret = btrfs_try_tree_read_lock(next); 4984 if (!ret && path->nowait) { 4985 ret = -EAGAIN; 4986 goto done; 4987 } 4988 if (!ret && time_seq) { 4989 /* 4990 * If we don't get the lock, we may be racing 4991 * with push_leaf_left, holding that lock while 4992 * itself waiting for the leaf we've currently 4993 * locked. To solve this situation, we give up 4994 * on our lock and cycle. 4995 */ 4996 free_extent_buffer(next); 4997 btrfs_release_path(path); 4998 cond_resched(); 4999 goto again; 5000 } 5001 if (!ret) 5002 btrfs_tree_read_lock(next); 5003 } 5004 break; 5005 } 5006 path->slots[level] = slot; 5007 while (1) { 5008 level--; 5009 path->nodes[level] = next; 5010 path->slots[level] = 0; 5011 if (!path->skip_locking) 5012 path->locks[level] = BTRFS_READ_LOCK; 5013 if (!level) 5014 break; 5015 5016 ret = read_block_for_search(root, path, &next, 0, &key); 5017 if (ret == -EAGAIN && !path->nowait) 5018 goto again; 5019 5020 if (ret < 0) { 5021 btrfs_release_path(path); 5022 goto done; 5023 } 5024 5025 if (!path->skip_locking) { 5026 if (path->nowait) { 5027 if (!btrfs_try_tree_read_lock(next)) { 5028 ret = -EAGAIN; 5029 goto done; 5030 } 5031 } else { 5032 btrfs_tree_read_lock(next); 5033 } 5034 } 5035 } 5036 ret = 0; 5037 done: 5038 unlock_up(path, 0, 1, 0, NULL); 5039 if (need_commit_sem) { 5040 int ret2; 5041 5042 path->need_commit_sem = 1; 5043 ret2 = finish_need_commit_sem_search(path); 5044 up_read(&fs_info->commit_root_sem); 5045 if (ret2) 5046 ret = ret2; 5047 } 5048 5049 return ret; 5050 } 5051 5052 int btrfs_next_old_item(struct btrfs_root *root, struct btrfs_path *path, u64 time_seq) 5053 { 5054 path->slots[0]++; 5055 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) 5056 return btrfs_next_old_leaf(root, path, time_seq); 5057 return 0; 5058 } 5059 5060 /* 5061 * this uses btrfs_prev_leaf to walk backwards in the tree, and keeps 5062 * searching until it gets past min_objectid or finds an item of 'type' 5063 * 5064 * returns 0 if something is found, 1 if nothing was found and < 0 on error 5065 */ 5066 int btrfs_previous_item(struct btrfs_root *root, 5067 struct btrfs_path *path, u64 min_objectid, 5068 int type) 5069 { 5070 struct btrfs_key found_key; 5071 struct extent_buffer *leaf; 5072 u32 nritems; 5073 int ret; 5074 5075 while (1) { 5076 if (path->slots[0] == 0) { 5077 ret = btrfs_prev_leaf(root, path); 5078 if (ret != 0) 5079 return ret; 5080 } else { 5081 path->slots[0]--; 5082 } 5083 leaf = path->nodes[0]; 5084 nritems = btrfs_header_nritems(leaf); 5085 if (nritems == 0) 5086 return 1; 5087 if (path->slots[0] == nritems) 5088 path->slots[0]--; 5089 5090 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 5091 if (found_key.objectid < min_objectid) 5092 break; 5093 if (found_key.type == type) 5094 return 0; 5095 if (found_key.objectid == min_objectid && 5096 found_key.type < type) 5097 break; 5098 } 5099 return 1; 5100 } 5101 5102 /* 5103 * search in extent tree to find a previous Metadata/Data extent item with 5104 * min objecitd. 5105 * 5106 * returns 0 if something is found, 1 if nothing was found and < 0 on error 5107 */ 5108 int btrfs_previous_extent_item(struct btrfs_root *root, 5109 struct btrfs_path *path, u64 min_objectid) 5110 { 5111 struct btrfs_key found_key; 5112 struct extent_buffer *leaf; 5113 u32 nritems; 5114 int ret; 5115 5116 while (1) { 5117 if (path->slots[0] == 0) { 5118 ret = btrfs_prev_leaf(root, path); 5119 if (ret != 0) 5120 return ret; 5121 } else { 5122 path->slots[0]--; 5123 } 5124 leaf = path->nodes[0]; 5125 nritems = btrfs_header_nritems(leaf); 5126 if (nritems == 0) 5127 return 1; 5128 if (path->slots[0] == nritems) 5129 path->slots[0]--; 5130 5131 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 5132 if (found_key.objectid < min_objectid) 5133 break; 5134 if (found_key.type == BTRFS_EXTENT_ITEM_KEY || 5135 found_key.type == BTRFS_METADATA_ITEM_KEY) 5136 return 0; 5137 if (found_key.objectid == min_objectid && 5138 found_key.type < BTRFS_EXTENT_ITEM_KEY) 5139 break; 5140 } 5141 return 1; 5142 } 5143 5144 int __init btrfs_ctree_init(void) 5145 { 5146 btrfs_path_cachep = KMEM_CACHE(btrfs_path, 0); 5147 if (!btrfs_path_cachep) 5148 return -ENOMEM; 5149 return 0; 5150 } 5151 5152 void __cold btrfs_ctree_exit(void) 5153 { 5154 kmem_cache_destroy(btrfs_path_cachep); 5155 } 5156