1 /* 2 * Copyright (C) 2007,2008 Oracle. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public 6 * License v2 as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public 14 * License along with this program; if not, write to the 15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 16 * Boston, MA 021110-1307, USA. 17 */ 18 19 #include <linux/sched.h> 20 #include <linux/slab.h> 21 #include <linux/rbtree.h> 22 #include "ctree.h" 23 #include "disk-io.h" 24 #include "transaction.h" 25 #include "print-tree.h" 26 #include "locking.h" 27 28 static int split_node(struct btrfs_trans_handle *trans, struct btrfs_root 29 *root, struct btrfs_path *path, int level); 30 static int split_leaf(struct btrfs_trans_handle *trans, struct btrfs_root 31 *root, struct btrfs_key *ins_key, 32 struct btrfs_path *path, int data_size, int extend); 33 static int push_node_left(struct btrfs_trans_handle *trans, 34 struct btrfs_root *root, struct extent_buffer *dst, 35 struct extent_buffer *src, int empty); 36 static int balance_node_right(struct btrfs_trans_handle *trans, 37 struct btrfs_root *root, 38 struct extent_buffer *dst_buf, 39 struct extent_buffer *src_buf); 40 static void del_ptr(struct btrfs_root *root, struct btrfs_path *path, 41 int level, int slot); 42 static int tree_mod_log_free_eb(struct btrfs_fs_info *fs_info, 43 struct extent_buffer *eb); 44 45 struct btrfs_path *btrfs_alloc_path(void) 46 { 47 struct btrfs_path *path; 48 path = kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS); 49 return path; 50 } 51 52 /* 53 * set all locked nodes in the path to blocking locks. This should 54 * be done before scheduling 55 */ 56 noinline void btrfs_set_path_blocking(struct btrfs_path *p) 57 { 58 int i; 59 for (i = 0; i < BTRFS_MAX_LEVEL; i++) { 60 if (!p->nodes[i] || !p->locks[i]) 61 continue; 62 btrfs_set_lock_blocking_rw(p->nodes[i], p->locks[i]); 63 if (p->locks[i] == BTRFS_READ_LOCK) 64 p->locks[i] = BTRFS_READ_LOCK_BLOCKING; 65 else if (p->locks[i] == BTRFS_WRITE_LOCK) 66 p->locks[i] = BTRFS_WRITE_LOCK_BLOCKING; 67 } 68 } 69 70 /* 71 * reset all the locked nodes in the patch to spinning locks. 72 * 73 * held is used to keep lockdep happy, when lockdep is enabled 74 * we set held to a blocking lock before we go around and 75 * retake all the spinlocks in the path. You can safely use NULL 76 * for held 77 */ 78 noinline void btrfs_clear_path_blocking(struct btrfs_path *p, 79 struct extent_buffer *held, int held_rw) 80 { 81 int i; 82 83 if (held) { 84 btrfs_set_lock_blocking_rw(held, held_rw); 85 if (held_rw == BTRFS_WRITE_LOCK) 86 held_rw = BTRFS_WRITE_LOCK_BLOCKING; 87 else if (held_rw == BTRFS_READ_LOCK) 88 held_rw = BTRFS_READ_LOCK_BLOCKING; 89 } 90 btrfs_set_path_blocking(p); 91 92 for (i = BTRFS_MAX_LEVEL - 1; i >= 0; i--) { 93 if (p->nodes[i] && p->locks[i]) { 94 btrfs_clear_lock_blocking_rw(p->nodes[i], p->locks[i]); 95 if (p->locks[i] == BTRFS_WRITE_LOCK_BLOCKING) 96 p->locks[i] = BTRFS_WRITE_LOCK; 97 else if (p->locks[i] == BTRFS_READ_LOCK_BLOCKING) 98 p->locks[i] = BTRFS_READ_LOCK; 99 } 100 } 101 102 if (held) 103 btrfs_clear_lock_blocking_rw(held, held_rw); 104 } 105 106 /* this also releases the path */ 107 void btrfs_free_path(struct btrfs_path *p) 108 { 109 if (!p) 110 return; 111 btrfs_release_path(p); 112 kmem_cache_free(btrfs_path_cachep, p); 113 } 114 115 /* 116 * path release drops references on the extent buffers in the path 117 * and it drops any locks held by this path 118 * 119 * It is safe to call this on paths that no locks or extent buffers held. 120 */ 121 noinline void btrfs_release_path(struct btrfs_path *p) 122 { 123 int i; 124 125 for (i = 0; i < BTRFS_MAX_LEVEL; i++) { 126 p->slots[i] = 0; 127 if (!p->nodes[i]) 128 continue; 129 if (p->locks[i]) { 130 btrfs_tree_unlock_rw(p->nodes[i], p->locks[i]); 131 p->locks[i] = 0; 132 } 133 free_extent_buffer(p->nodes[i]); 134 p->nodes[i] = NULL; 135 } 136 } 137 138 /* 139 * safely gets a reference on the root node of a tree. A lock 140 * is not taken, so a concurrent writer may put a different node 141 * at the root of the tree. See btrfs_lock_root_node for the 142 * looping required. 143 * 144 * The extent buffer returned by this has a reference taken, so 145 * it won't disappear. It may stop being the root of the tree 146 * at any time because there are no locks held. 147 */ 148 struct extent_buffer *btrfs_root_node(struct btrfs_root *root) 149 { 150 struct extent_buffer *eb; 151 152 while (1) { 153 rcu_read_lock(); 154 eb = rcu_dereference(root->node); 155 156 /* 157 * RCU really hurts here, we could free up the root node because 158 * it was cow'ed but we may not get the new root node yet so do 159 * the inc_not_zero dance and if it doesn't work then 160 * synchronize_rcu and try again. 161 */ 162 if (atomic_inc_not_zero(&eb->refs)) { 163 rcu_read_unlock(); 164 break; 165 } 166 rcu_read_unlock(); 167 synchronize_rcu(); 168 } 169 return eb; 170 } 171 172 /* loop around taking references on and locking the root node of the 173 * tree until you end up with a lock on the root. A locked buffer 174 * is returned, with a reference held. 175 */ 176 struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root) 177 { 178 struct extent_buffer *eb; 179 180 while (1) { 181 eb = btrfs_root_node(root); 182 btrfs_tree_lock(eb); 183 if (eb == root->node) 184 break; 185 btrfs_tree_unlock(eb); 186 free_extent_buffer(eb); 187 } 188 return eb; 189 } 190 191 /* loop around taking references on and locking the root node of the 192 * tree until you end up with a lock on the root. A locked buffer 193 * is returned, with a reference held. 194 */ 195 static struct extent_buffer *btrfs_read_lock_root_node(struct btrfs_root *root) 196 { 197 struct extent_buffer *eb; 198 199 while (1) { 200 eb = btrfs_root_node(root); 201 btrfs_tree_read_lock(eb); 202 if (eb == root->node) 203 break; 204 btrfs_tree_read_unlock(eb); 205 free_extent_buffer(eb); 206 } 207 return eb; 208 } 209 210 /* cowonly root (everything not a reference counted cow subvolume), just get 211 * put onto a simple dirty list. transaction.c walks this to make sure they 212 * get properly updated on disk. 213 */ 214 static void add_root_to_dirty_list(struct btrfs_root *root) 215 { 216 if (test_bit(BTRFS_ROOT_DIRTY, &root->state) || 217 !test_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state)) 218 return; 219 220 spin_lock(&root->fs_info->trans_lock); 221 if (!test_and_set_bit(BTRFS_ROOT_DIRTY, &root->state)) { 222 /* Want the extent tree to be the last on the list */ 223 if (root->objectid == BTRFS_EXTENT_TREE_OBJECTID) 224 list_move_tail(&root->dirty_list, 225 &root->fs_info->dirty_cowonly_roots); 226 else 227 list_move(&root->dirty_list, 228 &root->fs_info->dirty_cowonly_roots); 229 } 230 spin_unlock(&root->fs_info->trans_lock); 231 } 232 233 /* 234 * used by snapshot creation to make a copy of a root for a tree with 235 * a given objectid. The buffer with the new root node is returned in 236 * cow_ret, and this func returns zero on success or a negative error code. 237 */ 238 int btrfs_copy_root(struct btrfs_trans_handle *trans, 239 struct btrfs_root *root, 240 struct extent_buffer *buf, 241 struct extent_buffer **cow_ret, u64 new_root_objectid) 242 { 243 struct extent_buffer *cow; 244 int ret = 0; 245 int level; 246 struct btrfs_disk_key disk_key; 247 248 WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) && 249 trans->transid != root->fs_info->running_transaction->transid); 250 WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) && 251 trans->transid != root->last_trans); 252 253 level = btrfs_header_level(buf); 254 if (level == 0) 255 btrfs_item_key(buf, &disk_key, 0); 256 else 257 btrfs_node_key(buf, &disk_key, 0); 258 259 cow = btrfs_alloc_tree_block(trans, root, 0, new_root_objectid, 260 &disk_key, level, buf->start, 0); 261 if (IS_ERR(cow)) 262 return PTR_ERR(cow); 263 264 copy_extent_buffer(cow, buf, 0, 0, cow->len); 265 btrfs_set_header_bytenr(cow, cow->start); 266 btrfs_set_header_generation(cow, trans->transid); 267 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV); 268 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN | 269 BTRFS_HEADER_FLAG_RELOC); 270 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID) 271 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC); 272 else 273 btrfs_set_header_owner(cow, new_root_objectid); 274 275 write_extent_buffer(cow, root->fs_info->fsid, btrfs_header_fsid(), 276 BTRFS_FSID_SIZE); 277 278 WARN_ON(btrfs_header_generation(buf) > trans->transid); 279 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID) 280 ret = btrfs_inc_ref(trans, root, cow, 1); 281 else 282 ret = btrfs_inc_ref(trans, root, cow, 0); 283 284 if (ret) 285 return ret; 286 287 btrfs_mark_buffer_dirty(cow); 288 *cow_ret = cow; 289 return 0; 290 } 291 292 enum mod_log_op { 293 MOD_LOG_KEY_REPLACE, 294 MOD_LOG_KEY_ADD, 295 MOD_LOG_KEY_REMOVE, 296 MOD_LOG_KEY_REMOVE_WHILE_FREEING, 297 MOD_LOG_KEY_REMOVE_WHILE_MOVING, 298 MOD_LOG_MOVE_KEYS, 299 MOD_LOG_ROOT_REPLACE, 300 }; 301 302 struct tree_mod_move { 303 int dst_slot; 304 int nr_items; 305 }; 306 307 struct tree_mod_root { 308 u64 logical; 309 u8 level; 310 }; 311 312 struct tree_mod_elem { 313 struct rb_node node; 314 u64 index; /* shifted logical */ 315 u64 seq; 316 enum mod_log_op op; 317 318 /* this is used for MOD_LOG_KEY_* and MOD_LOG_MOVE_KEYS operations */ 319 int slot; 320 321 /* this is used for MOD_LOG_KEY* and MOD_LOG_ROOT_REPLACE */ 322 u64 generation; 323 324 /* those are used for op == MOD_LOG_KEY_{REPLACE,REMOVE} */ 325 struct btrfs_disk_key key; 326 u64 blockptr; 327 328 /* this is used for op == MOD_LOG_MOVE_KEYS */ 329 struct tree_mod_move move; 330 331 /* this is used for op == MOD_LOG_ROOT_REPLACE */ 332 struct tree_mod_root old_root; 333 }; 334 335 static inline void tree_mod_log_read_lock(struct btrfs_fs_info *fs_info) 336 { 337 read_lock(&fs_info->tree_mod_log_lock); 338 } 339 340 static inline void tree_mod_log_read_unlock(struct btrfs_fs_info *fs_info) 341 { 342 read_unlock(&fs_info->tree_mod_log_lock); 343 } 344 345 static inline void tree_mod_log_write_lock(struct btrfs_fs_info *fs_info) 346 { 347 write_lock(&fs_info->tree_mod_log_lock); 348 } 349 350 static inline void tree_mod_log_write_unlock(struct btrfs_fs_info *fs_info) 351 { 352 write_unlock(&fs_info->tree_mod_log_lock); 353 } 354 355 /* 356 * Pull a new tree mod seq number for our operation. 357 */ 358 static inline u64 btrfs_inc_tree_mod_seq(struct btrfs_fs_info *fs_info) 359 { 360 return atomic64_inc_return(&fs_info->tree_mod_seq); 361 } 362 363 /* 364 * This adds a new blocker to the tree mod log's blocker list if the @elem 365 * passed does not already have a sequence number set. So when a caller expects 366 * to record tree modifications, it should ensure to set elem->seq to zero 367 * before calling btrfs_get_tree_mod_seq. 368 * Returns a fresh, unused tree log modification sequence number, even if no new 369 * blocker was added. 370 */ 371 u64 btrfs_get_tree_mod_seq(struct btrfs_fs_info *fs_info, 372 struct seq_list *elem) 373 { 374 tree_mod_log_write_lock(fs_info); 375 spin_lock(&fs_info->tree_mod_seq_lock); 376 if (!elem->seq) { 377 elem->seq = btrfs_inc_tree_mod_seq(fs_info); 378 list_add_tail(&elem->list, &fs_info->tree_mod_seq_list); 379 } 380 spin_unlock(&fs_info->tree_mod_seq_lock); 381 tree_mod_log_write_unlock(fs_info); 382 383 return elem->seq; 384 } 385 386 void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info, 387 struct seq_list *elem) 388 { 389 struct rb_root *tm_root; 390 struct rb_node *node; 391 struct rb_node *next; 392 struct seq_list *cur_elem; 393 struct tree_mod_elem *tm; 394 u64 min_seq = (u64)-1; 395 u64 seq_putting = elem->seq; 396 397 if (!seq_putting) 398 return; 399 400 spin_lock(&fs_info->tree_mod_seq_lock); 401 list_del(&elem->list); 402 elem->seq = 0; 403 404 list_for_each_entry(cur_elem, &fs_info->tree_mod_seq_list, list) { 405 if (cur_elem->seq < min_seq) { 406 if (seq_putting > cur_elem->seq) { 407 /* 408 * blocker with lower sequence number exists, we 409 * cannot remove anything from the log 410 */ 411 spin_unlock(&fs_info->tree_mod_seq_lock); 412 return; 413 } 414 min_seq = cur_elem->seq; 415 } 416 } 417 spin_unlock(&fs_info->tree_mod_seq_lock); 418 419 /* 420 * anything that's lower than the lowest existing (read: blocked) 421 * sequence number can be removed from the tree. 422 */ 423 tree_mod_log_write_lock(fs_info); 424 tm_root = &fs_info->tree_mod_log; 425 for (node = rb_first(tm_root); node; node = next) { 426 next = rb_next(node); 427 tm = container_of(node, struct tree_mod_elem, node); 428 if (tm->seq > min_seq) 429 continue; 430 rb_erase(node, tm_root); 431 kfree(tm); 432 } 433 tree_mod_log_write_unlock(fs_info); 434 } 435 436 /* 437 * key order of the log: 438 * index -> sequence 439 * 440 * the index is the shifted logical of the *new* root node for root replace 441 * operations, or the shifted logical of the affected block for all other 442 * operations. 443 * 444 * Note: must be called with write lock (tree_mod_log_write_lock). 445 */ 446 static noinline int 447 __tree_mod_log_insert(struct btrfs_fs_info *fs_info, struct tree_mod_elem *tm) 448 { 449 struct rb_root *tm_root; 450 struct rb_node **new; 451 struct rb_node *parent = NULL; 452 struct tree_mod_elem *cur; 453 454 BUG_ON(!tm); 455 456 tm->seq = btrfs_inc_tree_mod_seq(fs_info); 457 458 tm_root = &fs_info->tree_mod_log; 459 new = &tm_root->rb_node; 460 while (*new) { 461 cur = container_of(*new, struct tree_mod_elem, node); 462 parent = *new; 463 if (cur->index < tm->index) 464 new = &((*new)->rb_left); 465 else if (cur->index > tm->index) 466 new = &((*new)->rb_right); 467 else if (cur->seq < tm->seq) 468 new = &((*new)->rb_left); 469 else if (cur->seq > tm->seq) 470 new = &((*new)->rb_right); 471 else 472 return -EEXIST; 473 } 474 475 rb_link_node(&tm->node, parent, new); 476 rb_insert_color(&tm->node, tm_root); 477 return 0; 478 } 479 480 /* 481 * Determines if logging can be omitted. Returns 1 if it can. Otherwise, it 482 * returns zero with the tree_mod_log_lock acquired. The caller must hold 483 * this until all tree mod log insertions are recorded in the rb tree and then 484 * call tree_mod_log_write_unlock() to release. 485 */ 486 static inline int tree_mod_dont_log(struct btrfs_fs_info *fs_info, 487 struct extent_buffer *eb) { 488 smp_mb(); 489 if (list_empty(&(fs_info)->tree_mod_seq_list)) 490 return 1; 491 if (eb && btrfs_header_level(eb) == 0) 492 return 1; 493 494 tree_mod_log_write_lock(fs_info); 495 if (list_empty(&(fs_info)->tree_mod_seq_list)) { 496 tree_mod_log_write_unlock(fs_info); 497 return 1; 498 } 499 500 return 0; 501 } 502 503 /* Similar to tree_mod_dont_log, but doesn't acquire any locks. */ 504 static inline int tree_mod_need_log(const struct btrfs_fs_info *fs_info, 505 struct extent_buffer *eb) 506 { 507 smp_mb(); 508 if (list_empty(&(fs_info)->tree_mod_seq_list)) 509 return 0; 510 if (eb && btrfs_header_level(eb) == 0) 511 return 0; 512 513 return 1; 514 } 515 516 static struct tree_mod_elem * 517 alloc_tree_mod_elem(struct extent_buffer *eb, int slot, 518 enum mod_log_op op, gfp_t flags) 519 { 520 struct tree_mod_elem *tm; 521 522 tm = kzalloc(sizeof(*tm), flags); 523 if (!tm) 524 return NULL; 525 526 tm->index = eb->start >> PAGE_CACHE_SHIFT; 527 if (op != MOD_LOG_KEY_ADD) { 528 btrfs_node_key(eb, &tm->key, slot); 529 tm->blockptr = btrfs_node_blockptr(eb, slot); 530 } 531 tm->op = op; 532 tm->slot = slot; 533 tm->generation = btrfs_node_ptr_generation(eb, slot); 534 RB_CLEAR_NODE(&tm->node); 535 536 return tm; 537 } 538 539 static noinline int 540 tree_mod_log_insert_key(struct btrfs_fs_info *fs_info, 541 struct extent_buffer *eb, int slot, 542 enum mod_log_op op, gfp_t flags) 543 { 544 struct tree_mod_elem *tm; 545 int ret; 546 547 if (!tree_mod_need_log(fs_info, eb)) 548 return 0; 549 550 tm = alloc_tree_mod_elem(eb, slot, op, flags); 551 if (!tm) 552 return -ENOMEM; 553 554 if (tree_mod_dont_log(fs_info, eb)) { 555 kfree(tm); 556 return 0; 557 } 558 559 ret = __tree_mod_log_insert(fs_info, tm); 560 tree_mod_log_write_unlock(fs_info); 561 if (ret) 562 kfree(tm); 563 564 return ret; 565 } 566 567 static noinline int 568 tree_mod_log_insert_move(struct btrfs_fs_info *fs_info, 569 struct extent_buffer *eb, int dst_slot, int src_slot, 570 int nr_items, gfp_t flags) 571 { 572 struct tree_mod_elem *tm = NULL; 573 struct tree_mod_elem **tm_list = NULL; 574 int ret = 0; 575 int i; 576 int locked = 0; 577 578 if (!tree_mod_need_log(fs_info, eb)) 579 return 0; 580 581 tm_list = kzalloc(nr_items * sizeof(struct tree_mod_elem *), flags); 582 if (!tm_list) 583 return -ENOMEM; 584 585 tm = kzalloc(sizeof(*tm), flags); 586 if (!tm) { 587 ret = -ENOMEM; 588 goto free_tms; 589 } 590 591 tm->index = eb->start >> PAGE_CACHE_SHIFT; 592 tm->slot = src_slot; 593 tm->move.dst_slot = dst_slot; 594 tm->move.nr_items = nr_items; 595 tm->op = MOD_LOG_MOVE_KEYS; 596 597 for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) { 598 tm_list[i] = alloc_tree_mod_elem(eb, i + dst_slot, 599 MOD_LOG_KEY_REMOVE_WHILE_MOVING, flags); 600 if (!tm_list[i]) { 601 ret = -ENOMEM; 602 goto free_tms; 603 } 604 } 605 606 if (tree_mod_dont_log(fs_info, eb)) 607 goto free_tms; 608 locked = 1; 609 610 /* 611 * When we override something during the move, we log these removals. 612 * This can only happen when we move towards the beginning of the 613 * buffer, i.e. dst_slot < src_slot. 614 */ 615 for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) { 616 ret = __tree_mod_log_insert(fs_info, tm_list[i]); 617 if (ret) 618 goto free_tms; 619 } 620 621 ret = __tree_mod_log_insert(fs_info, tm); 622 if (ret) 623 goto free_tms; 624 tree_mod_log_write_unlock(fs_info); 625 kfree(tm_list); 626 627 return 0; 628 free_tms: 629 for (i = 0; i < nr_items; i++) { 630 if (tm_list[i] && !RB_EMPTY_NODE(&tm_list[i]->node)) 631 rb_erase(&tm_list[i]->node, &fs_info->tree_mod_log); 632 kfree(tm_list[i]); 633 } 634 if (locked) 635 tree_mod_log_write_unlock(fs_info); 636 kfree(tm_list); 637 kfree(tm); 638 639 return ret; 640 } 641 642 static inline int 643 __tree_mod_log_free_eb(struct btrfs_fs_info *fs_info, 644 struct tree_mod_elem **tm_list, 645 int nritems) 646 { 647 int i, j; 648 int ret; 649 650 for (i = nritems - 1; i >= 0; i--) { 651 ret = __tree_mod_log_insert(fs_info, tm_list[i]); 652 if (ret) { 653 for (j = nritems - 1; j > i; j--) 654 rb_erase(&tm_list[j]->node, 655 &fs_info->tree_mod_log); 656 return ret; 657 } 658 } 659 660 return 0; 661 } 662 663 static noinline int 664 tree_mod_log_insert_root(struct btrfs_fs_info *fs_info, 665 struct extent_buffer *old_root, 666 struct extent_buffer *new_root, gfp_t flags, 667 int log_removal) 668 { 669 struct tree_mod_elem *tm = NULL; 670 struct tree_mod_elem **tm_list = NULL; 671 int nritems = 0; 672 int ret = 0; 673 int i; 674 675 if (!tree_mod_need_log(fs_info, NULL)) 676 return 0; 677 678 if (log_removal && btrfs_header_level(old_root) > 0) { 679 nritems = btrfs_header_nritems(old_root); 680 tm_list = kzalloc(nritems * sizeof(struct tree_mod_elem *), 681 flags); 682 if (!tm_list) { 683 ret = -ENOMEM; 684 goto free_tms; 685 } 686 for (i = 0; i < nritems; i++) { 687 tm_list[i] = alloc_tree_mod_elem(old_root, i, 688 MOD_LOG_KEY_REMOVE_WHILE_FREEING, flags); 689 if (!tm_list[i]) { 690 ret = -ENOMEM; 691 goto free_tms; 692 } 693 } 694 } 695 696 tm = kzalloc(sizeof(*tm), flags); 697 if (!tm) { 698 ret = -ENOMEM; 699 goto free_tms; 700 } 701 702 tm->index = new_root->start >> PAGE_CACHE_SHIFT; 703 tm->old_root.logical = old_root->start; 704 tm->old_root.level = btrfs_header_level(old_root); 705 tm->generation = btrfs_header_generation(old_root); 706 tm->op = MOD_LOG_ROOT_REPLACE; 707 708 if (tree_mod_dont_log(fs_info, NULL)) 709 goto free_tms; 710 711 if (tm_list) 712 ret = __tree_mod_log_free_eb(fs_info, tm_list, nritems); 713 if (!ret) 714 ret = __tree_mod_log_insert(fs_info, tm); 715 716 tree_mod_log_write_unlock(fs_info); 717 if (ret) 718 goto free_tms; 719 kfree(tm_list); 720 721 return ret; 722 723 free_tms: 724 if (tm_list) { 725 for (i = 0; i < nritems; i++) 726 kfree(tm_list[i]); 727 kfree(tm_list); 728 } 729 kfree(tm); 730 731 return ret; 732 } 733 734 static struct tree_mod_elem * 735 __tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq, 736 int smallest) 737 { 738 struct rb_root *tm_root; 739 struct rb_node *node; 740 struct tree_mod_elem *cur = NULL; 741 struct tree_mod_elem *found = NULL; 742 u64 index = start >> PAGE_CACHE_SHIFT; 743 744 tree_mod_log_read_lock(fs_info); 745 tm_root = &fs_info->tree_mod_log; 746 node = tm_root->rb_node; 747 while (node) { 748 cur = container_of(node, struct tree_mod_elem, node); 749 if (cur->index < index) { 750 node = node->rb_left; 751 } else if (cur->index > index) { 752 node = node->rb_right; 753 } else if (cur->seq < min_seq) { 754 node = node->rb_left; 755 } else if (!smallest) { 756 /* we want the node with the highest seq */ 757 if (found) 758 BUG_ON(found->seq > cur->seq); 759 found = cur; 760 node = node->rb_left; 761 } else if (cur->seq > min_seq) { 762 /* we want the node with the smallest seq */ 763 if (found) 764 BUG_ON(found->seq < cur->seq); 765 found = cur; 766 node = node->rb_right; 767 } else { 768 found = cur; 769 break; 770 } 771 } 772 tree_mod_log_read_unlock(fs_info); 773 774 return found; 775 } 776 777 /* 778 * this returns the element from the log with the smallest time sequence 779 * value that's in the log (the oldest log item). any element with a time 780 * sequence lower than min_seq will be ignored. 781 */ 782 static struct tree_mod_elem * 783 tree_mod_log_search_oldest(struct btrfs_fs_info *fs_info, u64 start, 784 u64 min_seq) 785 { 786 return __tree_mod_log_search(fs_info, start, min_seq, 1); 787 } 788 789 /* 790 * this returns the element from the log with the largest time sequence 791 * value that's in the log (the most recent log item). any element with 792 * a time sequence lower than min_seq will be ignored. 793 */ 794 static struct tree_mod_elem * 795 tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq) 796 { 797 return __tree_mod_log_search(fs_info, start, min_seq, 0); 798 } 799 800 static noinline int 801 tree_mod_log_eb_copy(struct btrfs_fs_info *fs_info, struct extent_buffer *dst, 802 struct extent_buffer *src, unsigned long dst_offset, 803 unsigned long src_offset, int nr_items) 804 { 805 int ret = 0; 806 struct tree_mod_elem **tm_list = NULL; 807 struct tree_mod_elem **tm_list_add, **tm_list_rem; 808 int i; 809 int locked = 0; 810 811 if (!tree_mod_need_log(fs_info, NULL)) 812 return 0; 813 814 if (btrfs_header_level(dst) == 0 && btrfs_header_level(src) == 0) 815 return 0; 816 817 tm_list = kzalloc(nr_items * 2 * sizeof(struct tree_mod_elem *), 818 GFP_NOFS); 819 if (!tm_list) 820 return -ENOMEM; 821 822 tm_list_add = tm_list; 823 tm_list_rem = tm_list + nr_items; 824 for (i = 0; i < nr_items; i++) { 825 tm_list_rem[i] = alloc_tree_mod_elem(src, i + src_offset, 826 MOD_LOG_KEY_REMOVE, GFP_NOFS); 827 if (!tm_list_rem[i]) { 828 ret = -ENOMEM; 829 goto free_tms; 830 } 831 832 tm_list_add[i] = alloc_tree_mod_elem(dst, i + dst_offset, 833 MOD_LOG_KEY_ADD, GFP_NOFS); 834 if (!tm_list_add[i]) { 835 ret = -ENOMEM; 836 goto free_tms; 837 } 838 } 839 840 if (tree_mod_dont_log(fs_info, NULL)) 841 goto free_tms; 842 locked = 1; 843 844 for (i = 0; i < nr_items; i++) { 845 ret = __tree_mod_log_insert(fs_info, tm_list_rem[i]); 846 if (ret) 847 goto free_tms; 848 ret = __tree_mod_log_insert(fs_info, tm_list_add[i]); 849 if (ret) 850 goto free_tms; 851 } 852 853 tree_mod_log_write_unlock(fs_info); 854 kfree(tm_list); 855 856 return 0; 857 858 free_tms: 859 for (i = 0; i < nr_items * 2; i++) { 860 if (tm_list[i] && !RB_EMPTY_NODE(&tm_list[i]->node)) 861 rb_erase(&tm_list[i]->node, &fs_info->tree_mod_log); 862 kfree(tm_list[i]); 863 } 864 if (locked) 865 tree_mod_log_write_unlock(fs_info); 866 kfree(tm_list); 867 868 return ret; 869 } 870 871 static inline void 872 tree_mod_log_eb_move(struct btrfs_fs_info *fs_info, struct extent_buffer *dst, 873 int dst_offset, int src_offset, int nr_items) 874 { 875 int ret; 876 ret = tree_mod_log_insert_move(fs_info, dst, dst_offset, src_offset, 877 nr_items, GFP_NOFS); 878 BUG_ON(ret < 0); 879 } 880 881 static noinline void 882 tree_mod_log_set_node_key(struct btrfs_fs_info *fs_info, 883 struct extent_buffer *eb, int slot, int atomic) 884 { 885 int ret; 886 887 ret = tree_mod_log_insert_key(fs_info, eb, slot, 888 MOD_LOG_KEY_REPLACE, 889 atomic ? GFP_ATOMIC : GFP_NOFS); 890 BUG_ON(ret < 0); 891 } 892 893 static noinline int 894 tree_mod_log_free_eb(struct btrfs_fs_info *fs_info, struct extent_buffer *eb) 895 { 896 struct tree_mod_elem **tm_list = NULL; 897 int nritems = 0; 898 int i; 899 int ret = 0; 900 901 if (btrfs_header_level(eb) == 0) 902 return 0; 903 904 if (!tree_mod_need_log(fs_info, NULL)) 905 return 0; 906 907 nritems = btrfs_header_nritems(eb); 908 tm_list = kzalloc(nritems * sizeof(struct tree_mod_elem *), 909 GFP_NOFS); 910 if (!tm_list) 911 return -ENOMEM; 912 913 for (i = 0; i < nritems; i++) { 914 tm_list[i] = alloc_tree_mod_elem(eb, i, 915 MOD_LOG_KEY_REMOVE_WHILE_FREEING, GFP_NOFS); 916 if (!tm_list[i]) { 917 ret = -ENOMEM; 918 goto free_tms; 919 } 920 } 921 922 if (tree_mod_dont_log(fs_info, eb)) 923 goto free_tms; 924 925 ret = __tree_mod_log_free_eb(fs_info, tm_list, nritems); 926 tree_mod_log_write_unlock(fs_info); 927 if (ret) 928 goto free_tms; 929 kfree(tm_list); 930 931 return 0; 932 933 free_tms: 934 for (i = 0; i < nritems; i++) 935 kfree(tm_list[i]); 936 kfree(tm_list); 937 938 return ret; 939 } 940 941 static noinline void 942 tree_mod_log_set_root_pointer(struct btrfs_root *root, 943 struct extent_buffer *new_root_node, 944 int log_removal) 945 { 946 int ret; 947 ret = tree_mod_log_insert_root(root->fs_info, root->node, 948 new_root_node, GFP_NOFS, log_removal); 949 BUG_ON(ret < 0); 950 } 951 952 /* 953 * check if the tree block can be shared by multiple trees 954 */ 955 int btrfs_block_can_be_shared(struct btrfs_root *root, 956 struct extent_buffer *buf) 957 { 958 /* 959 * Tree blocks not in refernece counted trees and tree roots 960 * are never shared. If a block was allocated after the last 961 * snapshot and the block was not allocated by tree relocation, 962 * we know the block is not shared. 963 */ 964 if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) && 965 buf != root->node && buf != root->commit_root && 966 (btrfs_header_generation(buf) <= 967 btrfs_root_last_snapshot(&root->root_item) || 968 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC))) 969 return 1; 970 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 971 if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) && 972 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV) 973 return 1; 974 #endif 975 return 0; 976 } 977 978 static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans, 979 struct btrfs_root *root, 980 struct extent_buffer *buf, 981 struct extent_buffer *cow, 982 int *last_ref) 983 { 984 u64 refs; 985 u64 owner; 986 u64 flags; 987 u64 new_flags = 0; 988 int ret; 989 990 /* 991 * Backrefs update rules: 992 * 993 * Always use full backrefs for extent pointers in tree block 994 * allocated by tree relocation. 995 * 996 * If a shared tree block is no longer referenced by its owner 997 * tree (btrfs_header_owner(buf) == root->root_key.objectid), 998 * use full backrefs for extent pointers in tree block. 999 * 1000 * If a tree block is been relocating 1001 * (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID), 1002 * use full backrefs for extent pointers in tree block. 1003 * The reason for this is some operations (such as drop tree) 1004 * are only allowed for blocks use full backrefs. 1005 */ 1006 1007 if (btrfs_block_can_be_shared(root, buf)) { 1008 ret = btrfs_lookup_extent_info(trans, root, buf->start, 1009 btrfs_header_level(buf), 1, 1010 &refs, &flags); 1011 if (ret) 1012 return ret; 1013 if (refs == 0) { 1014 ret = -EROFS; 1015 btrfs_std_error(root->fs_info, ret); 1016 return ret; 1017 } 1018 } else { 1019 refs = 1; 1020 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID || 1021 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV) 1022 flags = BTRFS_BLOCK_FLAG_FULL_BACKREF; 1023 else 1024 flags = 0; 1025 } 1026 1027 owner = btrfs_header_owner(buf); 1028 BUG_ON(owner == BTRFS_TREE_RELOC_OBJECTID && 1029 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)); 1030 1031 if (refs > 1) { 1032 if ((owner == root->root_key.objectid || 1033 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) && 1034 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) { 1035 ret = btrfs_inc_ref(trans, root, buf, 1); 1036 BUG_ON(ret); /* -ENOMEM */ 1037 1038 if (root->root_key.objectid == 1039 BTRFS_TREE_RELOC_OBJECTID) { 1040 ret = btrfs_dec_ref(trans, root, buf, 0); 1041 BUG_ON(ret); /* -ENOMEM */ 1042 ret = btrfs_inc_ref(trans, root, cow, 1); 1043 BUG_ON(ret); /* -ENOMEM */ 1044 } 1045 new_flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF; 1046 } else { 1047 1048 if (root->root_key.objectid == 1049 BTRFS_TREE_RELOC_OBJECTID) 1050 ret = btrfs_inc_ref(trans, root, cow, 1); 1051 else 1052 ret = btrfs_inc_ref(trans, root, cow, 0); 1053 BUG_ON(ret); /* -ENOMEM */ 1054 } 1055 if (new_flags != 0) { 1056 int level = btrfs_header_level(buf); 1057 1058 ret = btrfs_set_disk_extent_flags(trans, root, 1059 buf->start, 1060 buf->len, 1061 new_flags, level, 0); 1062 if (ret) 1063 return ret; 1064 } 1065 } else { 1066 if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) { 1067 if (root->root_key.objectid == 1068 BTRFS_TREE_RELOC_OBJECTID) 1069 ret = btrfs_inc_ref(trans, root, cow, 1); 1070 else 1071 ret = btrfs_inc_ref(trans, root, cow, 0); 1072 BUG_ON(ret); /* -ENOMEM */ 1073 ret = btrfs_dec_ref(trans, root, buf, 1); 1074 BUG_ON(ret); /* -ENOMEM */ 1075 } 1076 clean_tree_block(trans, root, buf); 1077 *last_ref = 1; 1078 } 1079 return 0; 1080 } 1081 1082 /* 1083 * does the dirty work in cow of a single block. The parent block (if 1084 * supplied) is updated to point to the new cow copy. The new buffer is marked 1085 * dirty and returned locked. If you modify the block it needs to be marked 1086 * dirty again. 1087 * 1088 * search_start -- an allocation hint for the new block 1089 * 1090 * empty_size -- a hint that you plan on doing more cow. This is the size in 1091 * bytes the allocator should try to find free next to the block it returns. 1092 * This is just a hint and may be ignored by the allocator. 1093 */ 1094 static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans, 1095 struct btrfs_root *root, 1096 struct extent_buffer *buf, 1097 struct extent_buffer *parent, int parent_slot, 1098 struct extent_buffer **cow_ret, 1099 u64 search_start, u64 empty_size) 1100 { 1101 struct btrfs_disk_key disk_key; 1102 struct extent_buffer *cow; 1103 int level, ret; 1104 int last_ref = 0; 1105 int unlock_orig = 0; 1106 u64 parent_start; 1107 1108 if (*cow_ret == buf) 1109 unlock_orig = 1; 1110 1111 btrfs_assert_tree_locked(buf); 1112 1113 WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) && 1114 trans->transid != root->fs_info->running_transaction->transid); 1115 WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) && 1116 trans->transid != root->last_trans); 1117 1118 level = btrfs_header_level(buf); 1119 1120 if (level == 0) 1121 btrfs_item_key(buf, &disk_key, 0); 1122 else 1123 btrfs_node_key(buf, &disk_key, 0); 1124 1125 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) { 1126 if (parent) 1127 parent_start = parent->start; 1128 else 1129 parent_start = 0; 1130 } else 1131 parent_start = 0; 1132 1133 cow = btrfs_alloc_tree_block(trans, root, parent_start, 1134 root->root_key.objectid, &disk_key, level, 1135 search_start, empty_size); 1136 if (IS_ERR(cow)) 1137 return PTR_ERR(cow); 1138 1139 /* cow is set to blocking by btrfs_init_new_buffer */ 1140 1141 copy_extent_buffer(cow, buf, 0, 0, cow->len); 1142 btrfs_set_header_bytenr(cow, cow->start); 1143 btrfs_set_header_generation(cow, trans->transid); 1144 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV); 1145 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN | 1146 BTRFS_HEADER_FLAG_RELOC); 1147 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) 1148 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC); 1149 else 1150 btrfs_set_header_owner(cow, root->root_key.objectid); 1151 1152 write_extent_buffer(cow, root->fs_info->fsid, btrfs_header_fsid(), 1153 BTRFS_FSID_SIZE); 1154 1155 ret = update_ref_for_cow(trans, root, buf, cow, &last_ref); 1156 if (ret) { 1157 btrfs_abort_transaction(trans, root, ret); 1158 return ret; 1159 } 1160 1161 if (test_bit(BTRFS_ROOT_REF_COWS, &root->state)) { 1162 ret = btrfs_reloc_cow_block(trans, root, buf, cow); 1163 if (ret) 1164 return ret; 1165 } 1166 1167 if (buf == root->node) { 1168 WARN_ON(parent && parent != buf); 1169 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID || 1170 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV) 1171 parent_start = buf->start; 1172 else 1173 parent_start = 0; 1174 1175 extent_buffer_get(cow); 1176 tree_mod_log_set_root_pointer(root, cow, 1); 1177 rcu_assign_pointer(root->node, cow); 1178 1179 btrfs_free_tree_block(trans, root, buf, parent_start, 1180 last_ref); 1181 free_extent_buffer(buf); 1182 add_root_to_dirty_list(root); 1183 } else { 1184 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) 1185 parent_start = parent->start; 1186 else 1187 parent_start = 0; 1188 1189 WARN_ON(trans->transid != btrfs_header_generation(parent)); 1190 tree_mod_log_insert_key(root->fs_info, parent, parent_slot, 1191 MOD_LOG_KEY_REPLACE, GFP_NOFS); 1192 btrfs_set_node_blockptr(parent, parent_slot, 1193 cow->start); 1194 btrfs_set_node_ptr_generation(parent, parent_slot, 1195 trans->transid); 1196 btrfs_mark_buffer_dirty(parent); 1197 if (last_ref) { 1198 ret = tree_mod_log_free_eb(root->fs_info, buf); 1199 if (ret) { 1200 btrfs_abort_transaction(trans, root, ret); 1201 return ret; 1202 } 1203 } 1204 btrfs_free_tree_block(trans, root, buf, parent_start, 1205 last_ref); 1206 } 1207 if (unlock_orig) 1208 btrfs_tree_unlock(buf); 1209 free_extent_buffer_stale(buf); 1210 btrfs_mark_buffer_dirty(cow); 1211 *cow_ret = cow; 1212 return 0; 1213 } 1214 1215 /* 1216 * returns the logical address of the oldest predecessor of the given root. 1217 * entries older than time_seq are ignored. 1218 */ 1219 static struct tree_mod_elem * 1220 __tree_mod_log_oldest_root(struct btrfs_fs_info *fs_info, 1221 struct extent_buffer *eb_root, u64 time_seq) 1222 { 1223 struct tree_mod_elem *tm; 1224 struct tree_mod_elem *found = NULL; 1225 u64 root_logical = eb_root->start; 1226 int looped = 0; 1227 1228 if (!time_seq) 1229 return NULL; 1230 1231 /* 1232 * the very last operation that's logged for a root is the replacement 1233 * operation (if it is replaced at all). this has the index of the *new* 1234 * root, making it the very first operation that's logged for this root. 1235 */ 1236 while (1) { 1237 tm = tree_mod_log_search_oldest(fs_info, root_logical, 1238 time_seq); 1239 if (!looped && !tm) 1240 return NULL; 1241 /* 1242 * if there are no tree operation for the oldest root, we simply 1243 * return it. this should only happen if that (old) root is at 1244 * level 0. 1245 */ 1246 if (!tm) 1247 break; 1248 1249 /* 1250 * if there's an operation that's not a root replacement, we 1251 * found the oldest version of our root. normally, we'll find a 1252 * MOD_LOG_KEY_REMOVE_WHILE_FREEING operation here. 1253 */ 1254 if (tm->op != MOD_LOG_ROOT_REPLACE) 1255 break; 1256 1257 found = tm; 1258 root_logical = tm->old_root.logical; 1259 looped = 1; 1260 } 1261 1262 /* if there's no old root to return, return what we found instead */ 1263 if (!found) 1264 found = tm; 1265 1266 return found; 1267 } 1268 1269 /* 1270 * tm is a pointer to the first operation to rewind within eb. then, all 1271 * previous operations will be rewinded (until we reach something older than 1272 * time_seq). 1273 */ 1274 static void 1275 __tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb, 1276 u64 time_seq, struct tree_mod_elem *first_tm) 1277 { 1278 u32 n; 1279 struct rb_node *next; 1280 struct tree_mod_elem *tm = first_tm; 1281 unsigned long o_dst; 1282 unsigned long o_src; 1283 unsigned long p_size = sizeof(struct btrfs_key_ptr); 1284 1285 n = btrfs_header_nritems(eb); 1286 tree_mod_log_read_lock(fs_info); 1287 while (tm && tm->seq >= time_seq) { 1288 /* 1289 * all the operations are recorded with the operator used for 1290 * the modification. as we're going backwards, we do the 1291 * opposite of each operation here. 1292 */ 1293 switch (tm->op) { 1294 case MOD_LOG_KEY_REMOVE_WHILE_FREEING: 1295 BUG_ON(tm->slot < n); 1296 /* Fallthrough */ 1297 case MOD_LOG_KEY_REMOVE_WHILE_MOVING: 1298 case MOD_LOG_KEY_REMOVE: 1299 btrfs_set_node_key(eb, &tm->key, tm->slot); 1300 btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr); 1301 btrfs_set_node_ptr_generation(eb, tm->slot, 1302 tm->generation); 1303 n++; 1304 break; 1305 case MOD_LOG_KEY_REPLACE: 1306 BUG_ON(tm->slot >= n); 1307 btrfs_set_node_key(eb, &tm->key, tm->slot); 1308 btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr); 1309 btrfs_set_node_ptr_generation(eb, tm->slot, 1310 tm->generation); 1311 break; 1312 case MOD_LOG_KEY_ADD: 1313 /* if a move operation is needed it's in the log */ 1314 n--; 1315 break; 1316 case MOD_LOG_MOVE_KEYS: 1317 o_dst = btrfs_node_key_ptr_offset(tm->slot); 1318 o_src = btrfs_node_key_ptr_offset(tm->move.dst_slot); 1319 memmove_extent_buffer(eb, o_dst, o_src, 1320 tm->move.nr_items * p_size); 1321 break; 1322 case MOD_LOG_ROOT_REPLACE: 1323 /* 1324 * this operation is special. for roots, this must be 1325 * handled explicitly before rewinding. 1326 * for non-roots, this operation may exist if the node 1327 * was a root: root A -> child B; then A gets empty and 1328 * B is promoted to the new root. in the mod log, we'll 1329 * have a root-replace operation for B, a tree block 1330 * that is no root. we simply ignore that operation. 1331 */ 1332 break; 1333 } 1334 next = rb_next(&tm->node); 1335 if (!next) 1336 break; 1337 tm = container_of(next, struct tree_mod_elem, node); 1338 if (tm->index != first_tm->index) 1339 break; 1340 } 1341 tree_mod_log_read_unlock(fs_info); 1342 btrfs_set_header_nritems(eb, n); 1343 } 1344 1345 /* 1346 * Called with eb read locked. If the buffer cannot be rewinded, the same buffer 1347 * is returned. If rewind operations happen, a fresh buffer is returned. The 1348 * returned buffer is always read-locked. If the returned buffer is not the 1349 * input buffer, the lock on the input buffer is released and the input buffer 1350 * is freed (its refcount is decremented). 1351 */ 1352 static struct extent_buffer * 1353 tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct btrfs_path *path, 1354 struct extent_buffer *eb, u64 time_seq) 1355 { 1356 struct extent_buffer *eb_rewin; 1357 struct tree_mod_elem *tm; 1358 1359 if (!time_seq) 1360 return eb; 1361 1362 if (btrfs_header_level(eb) == 0) 1363 return eb; 1364 1365 tm = tree_mod_log_search(fs_info, eb->start, time_seq); 1366 if (!tm) 1367 return eb; 1368 1369 btrfs_set_path_blocking(path); 1370 btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK); 1371 1372 if (tm->op == MOD_LOG_KEY_REMOVE_WHILE_FREEING) { 1373 BUG_ON(tm->slot != 0); 1374 eb_rewin = alloc_dummy_extent_buffer(fs_info, eb->start); 1375 if (!eb_rewin) { 1376 btrfs_tree_read_unlock_blocking(eb); 1377 free_extent_buffer(eb); 1378 return NULL; 1379 } 1380 btrfs_set_header_bytenr(eb_rewin, eb->start); 1381 btrfs_set_header_backref_rev(eb_rewin, 1382 btrfs_header_backref_rev(eb)); 1383 btrfs_set_header_owner(eb_rewin, btrfs_header_owner(eb)); 1384 btrfs_set_header_level(eb_rewin, btrfs_header_level(eb)); 1385 } else { 1386 eb_rewin = btrfs_clone_extent_buffer(eb); 1387 if (!eb_rewin) { 1388 btrfs_tree_read_unlock_blocking(eb); 1389 free_extent_buffer(eb); 1390 return NULL; 1391 } 1392 } 1393 1394 btrfs_clear_path_blocking(path, NULL, BTRFS_READ_LOCK); 1395 btrfs_tree_read_unlock_blocking(eb); 1396 free_extent_buffer(eb); 1397 1398 extent_buffer_get(eb_rewin); 1399 btrfs_tree_read_lock(eb_rewin); 1400 __tree_mod_log_rewind(fs_info, eb_rewin, time_seq, tm); 1401 WARN_ON(btrfs_header_nritems(eb_rewin) > 1402 BTRFS_NODEPTRS_PER_BLOCK(fs_info->tree_root)); 1403 1404 return eb_rewin; 1405 } 1406 1407 /* 1408 * get_old_root() rewinds the state of @root's root node to the given @time_seq 1409 * value. If there are no changes, the current root->root_node is returned. If 1410 * anything changed in between, there's a fresh buffer allocated on which the 1411 * rewind operations are done. In any case, the returned buffer is read locked. 1412 * Returns NULL on error (with no locks held). 1413 */ 1414 static inline struct extent_buffer * 1415 get_old_root(struct btrfs_root *root, u64 time_seq) 1416 { 1417 struct tree_mod_elem *tm; 1418 struct extent_buffer *eb = NULL; 1419 struct extent_buffer *eb_root; 1420 struct extent_buffer *old; 1421 struct tree_mod_root *old_root = NULL; 1422 u64 old_generation = 0; 1423 u64 logical; 1424 1425 eb_root = btrfs_read_lock_root_node(root); 1426 tm = __tree_mod_log_oldest_root(root->fs_info, eb_root, time_seq); 1427 if (!tm) 1428 return eb_root; 1429 1430 if (tm->op == MOD_LOG_ROOT_REPLACE) { 1431 old_root = &tm->old_root; 1432 old_generation = tm->generation; 1433 logical = old_root->logical; 1434 } else { 1435 logical = eb_root->start; 1436 } 1437 1438 tm = tree_mod_log_search(root->fs_info, logical, time_seq); 1439 if (old_root && tm && tm->op != MOD_LOG_KEY_REMOVE_WHILE_FREEING) { 1440 btrfs_tree_read_unlock(eb_root); 1441 free_extent_buffer(eb_root); 1442 old = read_tree_block(root, logical, 0); 1443 if (WARN_ON(!old || !extent_buffer_uptodate(old))) { 1444 free_extent_buffer(old); 1445 btrfs_warn(root->fs_info, 1446 "failed to read tree block %llu from get_old_root", logical); 1447 } else { 1448 eb = btrfs_clone_extent_buffer(old); 1449 free_extent_buffer(old); 1450 } 1451 } else if (old_root) { 1452 btrfs_tree_read_unlock(eb_root); 1453 free_extent_buffer(eb_root); 1454 eb = alloc_dummy_extent_buffer(root->fs_info, logical); 1455 } else { 1456 btrfs_set_lock_blocking_rw(eb_root, BTRFS_READ_LOCK); 1457 eb = btrfs_clone_extent_buffer(eb_root); 1458 btrfs_tree_read_unlock_blocking(eb_root); 1459 free_extent_buffer(eb_root); 1460 } 1461 1462 if (!eb) 1463 return NULL; 1464 extent_buffer_get(eb); 1465 btrfs_tree_read_lock(eb); 1466 if (old_root) { 1467 btrfs_set_header_bytenr(eb, eb->start); 1468 btrfs_set_header_backref_rev(eb, BTRFS_MIXED_BACKREF_REV); 1469 btrfs_set_header_owner(eb, btrfs_header_owner(eb_root)); 1470 btrfs_set_header_level(eb, old_root->level); 1471 btrfs_set_header_generation(eb, old_generation); 1472 } 1473 if (tm) 1474 __tree_mod_log_rewind(root->fs_info, eb, time_seq, tm); 1475 else 1476 WARN_ON(btrfs_header_level(eb) != 0); 1477 WARN_ON(btrfs_header_nritems(eb) > BTRFS_NODEPTRS_PER_BLOCK(root)); 1478 1479 return eb; 1480 } 1481 1482 int btrfs_old_root_level(struct btrfs_root *root, u64 time_seq) 1483 { 1484 struct tree_mod_elem *tm; 1485 int level; 1486 struct extent_buffer *eb_root = btrfs_root_node(root); 1487 1488 tm = __tree_mod_log_oldest_root(root->fs_info, eb_root, time_seq); 1489 if (tm && tm->op == MOD_LOG_ROOT_REPLACE) { 1490 level = tm->old_root.level; 1491 } else { 1492 level = btrfs_header_level(eb_root); 1493 } 1494 free_extent_buffer(eb_root); 1495 1496 return level; 1497 } 1498 1499 static inline int should_cow_block(struct btrfs_trans_handle *trans, 1500 struct btrfs_root *root, 1501 struct extent_buffer *buf) 1502 { 1503 if (btrfs_test_is_dummy_root(root)) 1504 return 0; 1505 1506 /* ensure we can see the force_cow */ 1507 smp_rmb(); 1508 1509 /* 1510 * We do not need to cow a block if 1511 * 1) this block is not created or changed in this transaction; 1512 * 2) this block does not belong to TREE_RELOC tree; 1513 * 3) the root is not forced COW. 1514 * 1515 * What is forced COW: 1516 * when we create snapshot during commiting the transaction, 1517 * after we've finished coping src root, we must COW the shared 1518 * block to ensure the metadata consistency. 1519 */ 1520 if (btrfs_header_generation(buf) == trans->transid && 1521 !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN) && 1522 !(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID && 1523 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)) && 1524 !test_bit(BTRFS_ROOT_FORCE_COW, &root->state)) 1525 return 0; 1526 return 1; 1527 } 1528 1529 /* 1530 * cows a single block, see __btrfs_cow_block for the real work. 1531 * This version of it has extra checks so that a block isn't cow'd more than 1532 * once per transaction, as long as it hasn't been written yet 1533 */ 1534 noinline int btrfs_cow_block(struct btrfs_trans_handle *trans, 1535 struct btrfs_root *root, struct extent_buffer *buf, 1536 struct extent_buffer *parent, int parent_slot, 1537 struct extent_buffer **cow_ret) 1538 { 1539 u64 search_start; 1540 int ret; 1541 1542 if (trans->transaction != root->fs_info->running_transaction) 1543 WARN(1, KERN_CRIT "trans %llu running %llu\n", 1544 trans->transid, 1545 root->fs_info->running_transaction->transid); 1546 1547 if (trans->transid != root->fs_info->generation) 1548 WARN(1, KERN_CRIT "trans %llu running %llu\n", 1549 trans->transid, root->fs_info->generation); 1550 1551 if (!should_cow_block(trans, root, buf)) { 1552 *cow_ret = buf; 1553 return 0; 1554 } 1555 1556 search_start = buf->start & ~((u64)(1024 * 1024 * 1024) - 1); 1557 1558 if (parent) 1559 btrfs_set_lock_blocking(parent); 1560 btrfs_set_lock_blocking(buf); 1561 1562 ret = __btrfs_cow_block(trans, root, buf, parent, 1563 parent_slot, cow_ret, search_start, 0); 1564 1565 trace_btrfs_cow_block(root, buf, *cow_ret); 1566 1567 return ret; 1568 } 1569 1570 /* 1571 * helper function for defrag to decide if two blocks pointed to by a 1572 * node are actually close by 1573 */ 1574 static int close_blocks(u64 blocknr, u64 other, u32 blocksize) 1575 { 1576 if (blocknr < other && other - (blocknr + blocksize) < 32768) 1577 return 1; 1578 if (blocknr > other && blocknr - (other + blocksize) < 32768) 1579 return 1; 1580 return 0; 1581 } 1582 1583 /* 1584 * compare two keys in a memcmp fashion 1585 */ 1586 static int comp_keys(struct btrfs_disk_key *disk, struct btrfs_key *k2) 1587 { 1588 struct btrfs_key k1; 1589 1590 btrfs_disk_key_to_cpu(&k1, disk); 1591 1592 return btrfs_comp_cpu_keys(&k1, k2); 1593 } 1594 1595 /* 1596 * same as comp_keys only with two btrfs_key's 1597 */ 1598 int btrfs_comp_cpu_keys(struct btrfs_key *k1, struct btrfs_key *k2) 1599 { 1600 if (k1->objectid > k2->objectid) 1601 return 1; 1602 if (k1->objectid < k2->objectid) 1603 return -1; 1604 if (k1->type > k2->type) 1605 return 1; 1606 if (k1->type < k2->type) 1607 return -1; 1608 if (k1->offset > k2->offset) 1609 return 1; 1610 if (k1->offset < k2->offset) 1611 return -1; 1612 return 0; 1613 } 1614 1615 /* 1616 * this is used by the defrag code to go through all the 1617 * leaves pointed to by a node and reallocate them so that 1618 * disk order is close to key order 1619 */ 1620 int btrfs_realloc_node(struct btrfs_trans_handle *trans, 1621 struct btrfs_root *root, struct extent_buffer *parent, 1622 int start_slot, u64 *last_ret, 1623 struct btrfs_key *progress) 1624 { 1625 struct extent_buffer *cur; 1626 u64 blocknr; 1627 u64 gen; 1628 u64 search_start = *last_ret; 1629 u64 last_block = 0; 1630 u64 other; 1631 u32 parent_nritems; 1632 int end_slot; 1633 int i; 1634 int err = 0; 1635 int parent_level; 1636 int uptodate; 1637 u32 blocksize; 1638 int progress_passed = 0; 1639 struct btrfs_disk_key disk_key; 1640 1641 parent_level = btrfs_header_level(parent); 1642 1643 WARN_ON(trans->transaction != root->fs_info->running_transaction); 1644 WARN_ON(trans->transid != root->fs_info->generation); 1645 1646 parent_nritems = btrfs_header_nritems(parent); 1647 blocksize = root->nodesize; 1648 end_slot = parent_nritems - 1; 1649 1650 if (parent_nritems <= 1) 1651 return 0; 1652 1653 btrfs_set_lock_blocking(parent); 1654 1655 for (i = start_slot; i <= end_slot; i++) { 1656 int close = 1; 1657 1658 btrfs_node_key(parent, &disk_key, i); 1659 if (!progress_passed && comp_keys(&disk_key, progress) < 0) 1660 continue; 1661 1662 progress_passed = 1; 1663 blocknr = btrfs_node_blockptr(parent, i); 1664 gen = btrfs_node_ptr_generation(parent, i); 1665 if (last_block == 0) 1666 last_block = blocknr; 1667 1668 if (i > 0) { 1669 other = btrfs_node_blockptr(parent, i - 1); 1670 close = close_blocks(blocknr, other, blocksize); 1671 } 1672 if (!close && i < end_slot) { 1673 other = btrfs_node_blockptr(parent, i + 1); 1674 close = close_blocks(blocknr, other, blocksize); 1675 } 1676 if (close) { 1677 last_block = blocknr; 1678 continue; 1679 } 1680 1681 cur = btrfs_find_tree_block(root, blocknr); 1682 if (cur) 1683 uptodate = btrfs_buffer_uptodate(cur, gen, 0); 1684 else 1685 uptodate = 0; 1686 if (!cur || !uptodate) { 1687 if (!cur) { 1688 cur = read_tree_block(root, blocknr, gen); 1689 if (!cur || !extent_buffer_uptodate(cur)) { 1690 free_extent_buffer(cur); 1691 return -EIO; 1692 } 1693 } else if (!uptodate) { 1694 err = btrfs_read_buffer(cur, gen); 1695 if (err) { 1696 free_extent_buffer(cur); 1697 return err; 1698 } 1699 } 1700 } 1701 if (search_start == 0) 1702 search_start = last_block; 1703 1704 btrfs_tree_lock(cur); 1705 btrfs_set_lock_blocking(cur); 1706 err = __btrfs_cow_block(trans, root, cur, parent, i, 1707 &cur, search_start, 1708 min(16 * blocksize, 1709 (end_slot - i) * blocksize)); 1710 if (err) { 1711 btrfs_tree_unlock(cur); 1712 free_extent_buffer(cur); 1713 break; 1714 } 1715 search_start = cur->start; 1716 last_block = cur->start; 1717 *last_ret = search_start; 1718 btrfs_tree_unlock(cur); 1719 free_extent_buffer(cur); 1720 } 1721 return err; 1722 } 1723 1724 /* 1725 * The leaf data grows from end-to-front in the node. 1726 * this returns the address of the start of the last item, 1727 * which is the stop of the leaf data stack 1728 */ 1729 static inline unsigned int leaf_data_end(struct btrfs_root *root, 1730 struct extent_buffer *leaf) 1731 { 1732 u32 nr = btrfs_header_nritems(leaf); 1733 if (nr == 0) 1734 return BTRFS_LEAF_DATA_SIZE(root); 1735 return btrfs_item_offset_nr(leaf, nr - 1); 1736 } 1737 1738 1739 /* 1740 * search for key in the extent_buffer. The items start at offset p, 1741 * and they are item_size apart. There are 'max' items in p. 1742 * 1743 * the slot in the array is returned via slot, and it points to 1744 * the place where you would insert key if it is not found in 1745 * the array. 1746 * 1747 * slot may point to max if the key is bigger than all of the keys 1748 */ 1749 static noinline int generic_bin_search(struct extent_buffer *eb, 1750 unsigned long p, 1751 int item_size, struct btrfs_key *key, 1752 int max, int *slot) 1753 { 1754 int low = 0; 1755 int high = max; 1756 int mid; 1757 int ret; 1758 struct btrfs_disk_key *tmp = NULL; 1759 struct btrfs_disk_key unaligned; 1760 unsigned long offset; 1761 char *kaddr = NULL; 1762 unsigned long map_start = 0; 1763 unsigned long map_len = 0; 1764 int err; 1765 1766 while (low < high) { 1767 mid = (low + high) / 2; 1768 offset = p + mid * item_size; 1769 1770 if (!kaddr || offset < map_start || 1771 (offset + sizeof(struct btrfs_disk_key)) > 1772 map_start + map_len) { 1773 1774 err = map_private_extent_buffer(eb, offset, 1775 sizeof(struct btrfs_disk_key), 1776 &kaddr, &map_start, &map_len); 1777 1778 if (!err) { 1779 tmp = (struct btrfs_disk_key *)(kaddr + offset - 1780 map_start); 1781 } else { 1782 read_extent_buffer(eb, &unaligned, 1783 offset, sizeof(unaligned)); 1784 tmp = &unaligned; 1785 } 1786 1787 } else { 1788 tmp = (struct btrfs_disk_key *)(kaddr + offset - 1789 map_start); 1790 } 1791 ret = comp_keys(tmp, key); 1792 1793 if (ret < 0) 1794 low = mid + 1; 1795 else if (ret > 0) 1796 high = mid; 1797 else { 1798 *slot = mid; 1799 return 0; 1800 } 1801 } 1802 *slot = low; 1803 return 1; 1804 } 1805 1806 /* 1807 * simple bin_search frontend that does the right thing for 1808 * leaves vs nodes 1809 */ 1810 static int bin_search(struct extent_buffer *eb, struct btrfs_key *key, 1811 int level, int *slot) 1812 { 1813 if (level == 0) 1814 return generic_bin_search(eb, 1815 offsetof(struct btrfs_leaf, items), 1816 sizeof(struct btrfs_item), 1817 key, btrfs_header_nritems(eb), 1818 slot); 1819 else 1820 return generic_bin_search(eb, 1821 offsetof(struct btrfs_node, ptrs), 1822 sizeof(struct btrfs_key_ptr), 1823 key, btrfs_header_nritems(eb), 1824 slot); 1825 } 1826 1827 int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key, 1828 int level, int *slot) 1829 { 1830 return bin_search(eb, key, level, slot); 1831 } 1832 1833 static void root_add_used(struct btrfs_root *root, u32 size) 1834 { 1835 spin_lock(&root->accounting_lock); 1836 btrfs_set_root_used(&root->root_item, 1837 btrfs_root_used(&root->root_item) + size); 1838 spin_unlock(&root->accounting_lock); 1839 } 1840 1841 static void root_sub_used(struct btrfs_root *root, u32 size) 1842 { 1843 spin_lock(&root->accounting_lock); 1844 btrfs_set_root_used(&root->root_item, 1845 btrfs_root_used(&root->root_item) - size); 1846 spin_unlock(&root->accounting_lock); 1847 } 1848 1849 /* given a node and slot number, this reads the blocks it points to. The 1850 * extent buffer is returned with a reference taken (but unlocked). 1851 * NULL is returned on error. 1852 */ 1853 static noinline struct extent_buffer *read_node_slot(struct btrfs_root *root, 1854 struct extent_buffer *parent, int slot) 1855 { 1856 int level = btrfs_header_level(parent); 1857 struct extent_buffer *eb; 1858 1859 if (slot < 0) 1860 return NULL; 1861 if (slot >= btrfs_header_nritems(parent)) 1862 return NULL; 1863 1864 BUG_ON(level == 0); 1865 1866 eb = read_tree_block(root, btrfs_node_blockptr(parent, slot), 1867 btrfs_node_ptr_generation(parent, slot)); 1868 if (eb && !extent_buffer_uptodate(eb)) { 1869 free_extent_buffer(eb); 1870 eb = NULL; 1871 } 1872 1873 return eb; 1874 } 1875 1876 /* 1877 * node level balancing, used to make sure nodes are in proper order for 1878 * item deletion. We balance from the top down, so we have to make sure 1879 * that a deletion won't leave an node completely empty later on. 1880 */ 1881 static noinline int balance_level(struct btrfs_trans_handle *trans, 1882 struct btrfs_root *root, 1883 struct btrfs_path *path, int level) 1884 { 1885 struct extent_buffer *right = NULL; 1886 struct extent_buffer *mid; 1887 struct extent_buffer *left = NULL; 1888 struct extent_buffer *parent = NULL; 1889 int ret = 0; 1890 int wret; 1891 int pslot; 1892 int orig_slot = path->slots[level]; 1893 u64 orig_ptr; 1894 1895 if (level == 0) 1896 return 0; 1897 1898 mid = path->nodes[level]; 1899 1900 WARN_ON(path->locks[level] != BTRFS_WRITE_LOCK && 1901 path->locks[level] != BTRFS_WRITE_LOCK_BLOCKING); 1902 WARN_ON(btrfs_header_generation(mid) != trans->transid); 1903 1904 orig_ptr = btrfs_node_blockptr(mid, orig_slot); 1905 1906 if (level < BTRFS_MAX_LEVEL - 1) { 1907 parent = path->nodes[level + 1]; 1908 pslot = path->slots[level + 1]; 1909 } 1910 1911 /* 1912 * deal with the case where there is only one pointer in the root 1913 * by promoting the node below to a root 1914 */ 1915 if (!parent) { 1916 struct extent_buffer *child; 1917 1918 if (btrfs_header_nritems(mid) != 1) 1919 return 0; 1920 1921 /* promote the child to a root */ 1922 child = read_node_slot(root, mid, 0); 1923 if (!child) { 1924 ret = -EROFS; 1925 btrfs_std_error(root->fs_info, ret); 1926 goto enospc; 1927 } 1928 1929 btrfs_tree_lock(child); 1930 btrfs_set_lock_blocking(child); 1931 ret = btrfs_cow_block(trans, root, child, mid, 0, &child); 1932 if (ret) { 1933 btrfs_tree_unlock(child); 1934 free_extent_buffer(child); 1935 goto enospc; 1936 } 1937 1938 tree_mod_log_set_root_pointer(root, child, 1); 1939 rcu_assign_pointer(root->node, child); 1940 1941 add_root_to_dirty_list(root); 1942 btrfs_tree_unlock(child); 1943 1944 path->locks[level] = 0; 1945 path->nodes[level] = NULL; 1946 clean_tree_block(trans, root, mid); 1947 btrfs_tree_unlock(mid); 1948 /* once for the path */ 1949 free_extent_buffer(mid); 1950 1951 root_sub_used(root, mid->len); 1952 btrfs_free_tree_block(trans, root, mid, 0, 1); 1953 /* once for the root ptr */ 1954 free_extent_buffer_stale(mid); 1955 return 0; 1956 } 1957 if (btrfs_header_nritems(mid) > 1958 BTRFS_NODEPTRS_PER_BLOCK(root) / 4) 1959 return 0; 1960 1961 left = read_node_slot(root, parent, pslot - 1); 1962 if (left) { 1963 btrfs_tree_lock(left); 1964 btrfs_set_lock_blocking(left); 1965 wret = btrfs_cow_block(trans, root, left, 1966 parent, pslot - 1, &left); 1967 if (wret) { 1968 ret = wret; 1969 goto enospc; 1970 } 1971 } 1972 right = read_node_slot(root, parent, pslot + 1); 1973 if (right) { 1974 btrfs_tree_lock(right); 1975 btrfs_set_lock_blocking(right); 1976 wret = btrfs_cow_block(trans, root, right, 1977 parent, pslot + 1, &right); 1978 if (wret) { 1979 ret = wret; 1980 goto enospc; 1981 } 1982 } 1983 1984 /* first, try to make some room in the middle buffer */ 1985 if (left) { 1986 orig_slot += btrfs_header_nritems(left); 1987 wret = push_node_left(trans, root, left, mid, 1); 1988 if (wret < 0) 1989 ret = wret; 1990 } 1991 1992 /* 1993 * then try to empty the right most buffer into the middle 1994 */ 1995 if (right) { 1996 wret = push_node_left(trans, root, mid, right, 1); 1997 if (wret < 0 && wret != -ENOSPC) 1998 ret = wret; 1999 if (btrfs_header_nritems(right) == 0) { 2000 clean_tree_block(trans, root, right); 2001 btrfs_tree_unlock(right); 2002 del_ptr(root, path, level + 1, pslot + 1); 2003 root_sub_used(root, right->len); 2004 btrfs_free_tree_block(trans, root, right, 0, 1); 2005 free_extent_buffer_stale(right); 2006 right = NULL; 2007 } else { 2008 struct btrfs_disk_key right_key; 2009 btrfs_node_key(right, &right_key, 0); 2010 tree_mod_log_set_node_key(root->fs_info, parent, 2011 pslot + 1, 0); 2012 btrfs_set_node_key(parent, &right_key, pslot + 1); 2013 btrfs_mark_buffer_dirty(parent); 2014 } 2015 } 2016 if (btrfs_header_nritems(mid) == 1) { 2017 /* 2018 * we're not allowed to leave a node with one item in the 2019 * tree during a delete. A deletion from lower in the tree 2020 * could try to delete the only pointer in this node. 2021 * So, pull some keys from the left. 2022 * There has to be a left pointer at this point because 2023 * otherwise we would have pulled some pointers from the 2024 * right 2025 */ 2026 if (!left) { 2027 ret = -EROFS; 2028 btrfs_std_error(root->fs_info, ret); 2029 goto enospc; 2030 } 2031 wret = balance_node_right(trans, root, mid, left); 2032 if (wret < 0) { 2033 ret = wret; 2034 goto enospc; 2035 } 2036 if (wret == 1) { 2037 wret = push_node_left(trans, root, left, mid, 1); 2038 if (wret < 0) 2039 ret = wret; 2040 } 2041 BUG_ON(wret == 1); 2042 } 2043 if (btrfs_header_nritems(mid) == 0) { 2044 clean_tree_block(trans, root, mid); 2045 btrfs_tree_unlock(mid); 2046 del_ptr(root, path, level + 1, pslot); 2047 root_sub_used(root, mid->len); 2048 btrfs_free_tree_block(trans, root, mid, 0, 1); 2049 free_extent_buffer_stale(mid); 2050 mid = NULL; 2051 } else { 2052 /* update the parent key to reflect our changes */ 2053 struct btrfs_disk_key mid_key; 2054 btrfs_node_key(mid, &mid_key, 0); 2055 tree_mod_log_set_node_key(root->fs_info, parent, 2056 pslot, 0); 2057 btrfs_set_node_key(parent, &mid_key, pslot); 2058 btrfs_mark_buffer_dirty(parent); 2059 } 2060 2061 /* update the path */ 2062 if (left) { 2063 if (btrfs_header_nritems(left) > orig_slot) { 2064 extent_buffer_get(left); 2065 /* left was locked after cow */ 2066 path->nodes[level] = left; 2067 path->slots[level + 1] -= 1; 2068 path->slots[level] = orig_slot; 2069 if (mid) { 2070 btrfs_tree_unlock(mid); 2071 free_extent_buffer(mid); 2072 } 2073 } else { 2074 orig_slot -= btrfs_header_nritems(left); 2075 path->slots[level] = orig_slot; 2076 } 2077 } 2078 /* double check we haven't messed things up */ 2079 if (orig_ptr != 2080 btrfs_node_blockptr(path->nodes[level], path->slots[level])) 2081 BUG(); 2082 enospc: 2083 if (right) { 2084 btrfs_tree_unlock(right); 2085 free_extent_buffer(right); 2086 } 2087 if (left) { 2088 if (path->nodes[level] != left) 2089 btrfs_tree_unlock(left); 2090 free_extent_buffer(left); 2091 } 2092 return ret; 2093 } 2094 2095 /* Node balancing for insertion. Here we only split or push nodes around 2096 * when they are completely full. This is also done top down, so we 2097 * have to be pessimistic. 2098 */ 2099 static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans, 2100 struct btrfs_root *root, 2101 struct btrfs_path *path, int level) 2102 { 2103 struct extent_buffer *right = NULL; 2104 struct extent_buffer *mid; 2105 struct extent_buffer *left = NULL; 2106 struct extent_buffer *parent = NULL; 2107 int ret = 0; 2108 int wret; 2109 int pslot; 2110 int orig_slot = path->slots[level]; 2111 2112 if (level == 0) 2113 return 1; 2114 2115 mid = path->nodes[level]; 2116 WARN_ON(btrfs_header_generation(mid) != trans->transid); 2117 2118 if (level < BTRFS_MAX_LEVEL - 1) { 2119 parent = path->nodes[level + 1]; 2120 pslot = path->slots[level + 1]; 2121 } 2122 2123 if (!parent) 2124 return 1; 2125 2126 left = read_node_slot(root, parent, pslot - 1); 2127 2128 /* first, try to make some room in the middle buffer */ 2129 if (left) { 2130 u32 left_nr; 2131 2132 btrfs_tree_lock(left); 2133 btrfs_set_lock_blocking(left); 2134 2135 left_nr = btrfs_header_nritems(left); 2136 if (left_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) { 2137 wret = 1; 2138 } else { 2139 ret = btrfs_cow_block(trans, root, left, parent, 2140 pslot - 1, &left); 2141 if (ret) 2142 wret = 1; 2143 else { 2144 wret = push_node_left(trans, root, 2145 left, mid, 0); 2146 } 2147 } 2148 if (wret < 0) 2149 ret = wret; 2150 if (wret == 0) { 2151 struct btrfs_disk_key disk_key; 2152 orig_slot += left_nr; 2153 btrfs_node_key(mid, &disk_key, 0); 2154 tree_mod_log_set_node_key(root->fs_info, parent, 2155 pslot, 0); 2156 btrfs_set_node_key(parent, &disk_key, pslot); 2157 btrfs_mark_buffer_dirty(parent); 2158 if (btrfs_header_nritems(left) > orig_slot) { 2159 path->nodes[level] = left; 2160 path->slots[level + 1] -= 1; 2161 path->slots[level] = orig_slot; 2162 btrfs_tree_unlock(mid); 2163 free_extent_buffer(mid); 2164 } else { 2165 orig_slot -= 2166 btrfs_header_nritems(left); 2167 path->slots[level] = orig_slot; 2168 btrfs_tree_unlock(left); 2169 free_extent_buffer(left); 2170 } 2171 return 0; 2172 } 2173 btrfs_tree_unlock(left); 2174 free_extent_buffer(left); 2175 } 2176 right = read_node_slot(root, parent, pslot + 1); 2177 2178 /* 2179 * then try to empty the right most buffer into the middle 2180 */ 2181 if (right) { 2182 u32 right_nr; 2183 2184 btrfs_tree_lock(right); 2185 btrfs_set_lock_blocking(right); 2186 2187 right_nr = btrfs_header_nritems(right); 2188 if (right_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) { 2189 wret = 1; 2190 } else { 2191 ret = btrfs_cow_block(trans, root, right, 2192 parent, pslot + 1, 2193 &right); 2194 if (ret) 2195 wret = 1; 2196 else { 2197 wret = balance_node_right(trans, root, 2198 right, mid); 2199 } 2200 } 2201 if (wret < 0) 2202 ret = wret; 2203 if (wret == 0) { 2204 struct btrfs_disk_key disk_key; 2205 2206 btrfs_node_key(right, &disk_key, 0); 2207 tree_mod_log_set_node_key(root->fs_info, parent, 2208 pslot + 1, 0); 2209 btrfs_set_node_key(parent, &disk_key, pslot + 1); 2210 btrfs_mark_buffer_dirty(parent); 2211 2212 if (btrfs_header_nritems(mid) <= orig_slot) { 2213 path->nodes[level] = right; 2214 path->slots[level + 1] += 1; 2215 path->slots[level] = orig_slot - 2216 btrfs_header_nritems(mid); 2217 btrfs_tree_unlock(mid); 2218 free_extent_buffer(mid); 2219 } else { 2220 btrfs_tree_unlock(right); 2221 free_extent_buffer(right); 2222 } 2223 return 0; 2224 } 2225 btrfs_tree_unlock(right); 2226 free_extent_buffer(right); 2227 } 2228 return 1; 2229 } 2230 2231 /* 2232 * readahead one full node of leaves, finding things that are close 2233 * to the block in 'slot', and triggering ra on them. 2234 */ 2235 static void reada_for_search(struct btrfs_root *root, 2236 struct btrfs_path *path, 2237 int level, int slot, u64 objectid) 2238 { 2239 struct extent_buffer *node; 2240 struct btrfs_disk_key disk_key; 2241 u32 nritems; 2242 u64 search; 2243 u64 target; 2244 u64 nread = 0; 2245 u64 gen; 2246 int direction = path->reada; 2247 struct extent_buffer *eb; 2248 u32 nr; 2249 u32 blocksize; 2250 u32 nscan = 0; 2251 2252 if (level != 1) 2253 return; 2254 2255 if (!path->nodes[level]) 2256 return; 2257 2258 node = path->nodes[level]; 2259 2260 search = btrfs_node_blockptr(node, slot); 2261 blocksize = root->nodesize; 2262 eb = btrfs_find_tree_block(root, search); 2263 if (eb) { 2264 free_extent_buffer(eb); 2265 return; 2266 } 2267 2268 target = search; 2269 2270 nritems = btrfs_header_nritems(node); 2271 nr = slot; 2272 2273 while (1) { 2274 if (direction < 0) { 2275 if (nr == 0) 2276 break; 2277 nr--; 2278 } else if (direction > 0) { 2279 nr++; 2280 if (nr >= nritems) 2281 break; 2282 } 2283 if (path->reada < 0 && objectid) { 2284 btrfs_node_key(node, &disk_key, nr); 2285 if (btrfs_disk_key_objectid(&disk_key) != objectid) 2286 break; 2287 } 2288 search = btrfs_node_blockptr(node, nr); 2289 if ((search <= target && target - search <= 65536) || 2290 (search > target && search - target <= 65536)) { 2291 gen = btrfs_node_ptr_generation(node, nr); 2292 readahead_tree_block(root, search); 2293 nread += blocksize; 2294 } 2295 nscan++; 2296 if ((nread > 65536 || nscan > 32)) 2297 break; 2298 } 2299 } 2300 2301 static noinline void reada_for_balance(struct btrfs_root *root, 2302 struct btrfs_path *path, int level) 2303 { 2304 int slot; 2305 int nritems; 2306 struct extent_buffer *parent; 2307 struct extent_buffer *eb; 2308 u64 gen; 2309 u64 block1 = 0; 2310 u64 block2 = 0; 2311 2312 parent = path->nodes[level + 1]; 2313 if (!parent) 2314 return; 2315 2316 nritems = btrfs_header_nritems(parent); 2317 slot = path->slots[level + 1]; 2318 2319 if (slot > 0) { 2320 block1 = btrfs_node_blockptr(parent, slot - 1); 2321 gen = btrfs_node_ptr_generation(parent, slot - 1); 2322 eb = btrfs_find_tree_block(root, block1); 2323 /* 2324 * if we get -eagain from btrfs_buffer_uptodate, we 2325 * don't want to return eagain here. That will loop 2326 * forever 2327 */ 2328 if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0) 2329 block1 = 0; 2330 free_extent_buffer(eb); 2331 } 2332 if (slot + 1 < nritems) { 2333 block2 = btrfs_node_blockptr(parent, slot + 1); 2334 gen = btrfs_node_ptr_generation(parent, slot + 1); 2335 eb = btrfs_find_tree_block(root, block2); 2336 if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0) 2337 block2 = 0; 2338 free_extent_buffer(eb); 2339 } 2340 2341 if (block1) 2342 readahead_tree_block(root, block1); 2343 if (block2) 2344 readahead_tree_block(root, block2); 2345 } 2346 2347 2348 /* 2349 * when we walk down the tree, it is usually safe to unlock the higher layers 2350 * in the tree. The exceptions are when our path goes through slot 0, because 2351 * operations on the tree might require changing key pointers higher up in the 2352 * tree. 2353 * 2354 * callers might also have set path->keep_locks, which tells this code to keep 2355 * the lock if the path points to the last slot in the block. This is part of 2356 * walking through the tree, and selecting the next slot in the higher block. 2357 * 2358 * lowest_unlock sets the lowest level in the tree we're allowed to unlock. so 2359 * if lowest_unlock is 1, level 0 won't be unlocked 2360 */ 2361 static noinline void unlock_up(struct btrfs_path *path, int level, 2362 int lowest_unlock, int min_write_lock_level, 2363 int *write_lock_level) 2364 { 2365 int i; 2366 int skip_level = level; 2367 int no_skips = 0; 2368 struct extent_buffer *t; 2369 2370 for (i = level; i < BTRFS_MAX_LEVEL; i++) { 2371 if (!path->nodes[i]) 2372 break; 2373 if (!path->locks[i]) 2374 break; 2375 if (!no_skips && path->slots[i] == 0) { 2376 skip_level = i + 1; 2377 continue; 2378 } 2379 if (!no_skips && path->keep_locks) { 2380 u32 nritems; 2381 t = path->nodes[i]; 2382 nritems = btrfs_header_nritems(t); 2383 if (nritems < 1 || path->slots[i] >= nritems - 1) { 2384 skip_level = i + 1; 2385 continue; 2386 } 2387 } 2388 if (skip_level < i && i >= lowest_unlock) 2389 no_skips = 1; 2390 2391 t = path->nodes[i]; 2392 if (i >= lowest_unlock && i > skip_level && path->locks[i]) { 2393 btrfs_tree_unlock_rw(t, path->locks[i]); 2394 path->locks[i] = 0; 2395 if (write_lock_level && 2396 i > min_write_lock_level && 2397 i <= *write_lock_level) { 2398 *write_lock_level = i - 1; 2399 } 2400 } 2401 } 2402 } 2403 2404 /* 2405 * This releases any locks held in the path starting at level and 2406 * going all the way up to the root. 2407 * 2408 * btrfs_search_slot will keep the lock held on higher nodes in a few 2409 * corner cases, such as COW of the block at slot zero in the node. This 2410 * ignores those rules, and it should only be called when there are no 2411 * more updates to be done higher up in the tree. 2412 */ 2413 noinline void btrfs_unlock_up_safe(struct btrfs_path *path, int level) 2414 { 2415 int i; 2416 2417 if (path->keep_locks) 2418 return; 2419 2420 for (i = level; i < BTRFS_MAX_LEVEL; i++) { 2421 if (!path->nodes[i]) 2422 continue; 2423 if (!path->locks[i]) 2424 continue; 2425 btrfs_tree_unlock_rw(path->nodes[i], path->locks[i]); 2426 path->locks[i] = 0; 2427 } 2428 } 2429 2430 /* 2431 * helper function for btrfs_search_slot. The goal is to find a block 2432 * in cache without setting the path to blocking. If we find the block 2433 * we return zero and the path is unchanged. 2434 * 2435 * If we can't find the block, we set the path blocking and do some 2436 * reada. -EAGAIN is returned and the search must be repeated. 2437 */ 2438 static int 2439 read_block_for_search(struct btrfs_trans_handle *trans, 2440 struct btrfs_root *root, struct btrfs_path *p, 2441 struct extent_buffer **eb_ret, int level, int slot, 2442 struct btrfs_key *key, u64 time_seq) 2443 { 2444 u64 blocknr; 2445 u64 gen; 2446 struct extent_buffer *b = *eb_ret; 2447 struct extent_buffer *tmp; 2448 int ret; 2449 2450 blocknr = btrfs_node_blockptr(b, slot); 2451 gen = btrfs_node_ptr_generation(b, slot); 2452 2453 tmp = btrfs_find_tree_block(root, blocknr); 2454 if (tmp) { 2455 /* first we do an atomic uptodate check */ 2456 if (btrfs_buffer_uptodate(tmp, gen, 1) > 0) { 2457 *eb_ret = tmp; 2458 return 0; 2459 } 2460 2461 /* the pages were up to date, but we failed 2462 * the generation number check. Do a full 2463 * read for the generation number that is correct. 2464 * We must do this without dropping locks so 2465 * we can trust our generation number 2466 */ 2467 btrfs_set_path_blocking(p); 2468 2469 /* now we're allowed to do a blocking uptodate check */ 2470 ret = btrfs_read_buffer(tmp, gen); 2471 if (!ret) { 2472 *eb_ret = tmp; 2473 return 0; 2474 } 2475 free_extent_buffer(tmp); 2476 btrfs_release_path(p); 2477 return -EIO; 2478 } 2479 2480 /* 2481 * reduce lock contention at high levels 2482 * of the btree by dropping locks before 2483 * we read. Don't release the lock on the current 2484 * level because we need to walk this node to figure 2485 * out which blocks to read. 2486 */ 2487 btrfs_unlock_up_safe(p, level + 1); 2488 btrfs_set_path_blocking(p); 2489 2490 free_extent_buffer(tmp); 2491 if (p->reada) 2492 reada_for_search(root, p, level, slot, key->objectid); 2493 2494 btrfs_release_path(p); 2495 2496 ret = -EAGAIN; 2497 tmp = read_tree_block(root, blocknr, 0); 2498 if (tmp) { 2499 /* 2500 * If the read above didn't mark this buffer up to date, 2501 * it will never end up being up to date. Set ret to EIO now 2502 * and give up so that our caller doesn't loop forever 2503 * on our EAGAINs. 2504 */ 2505 if (!btrfs_buffer_uptodate(tmp, 0, 0)) 2506 ret = -EIO; 2507 free_extent_buffer(tmp); 2508 } 2509 return ret; 2510 } 2511 2512 /* 2513 * helper function for btrfs_search_slot. This does all of the checks 2514 * for node-level blocks and does any balancing required based on 2515 * the ins_len. 2516 * 2517 * If no extra work was required, zero is returned. If we had to 2518 * drop the path, -EAGAIN is returned and btrfs_search_slot must 2519 * start over 2520 */ 2521 static int 2522 setup_nodes_for_search(struct btrfs_trans_handle *trans, 2523 struct btrfs_root *root, struct btrfs_path *p, 2524 struct extent_buffer *b, int level, int ins_len, 2525 int *write_lock_level) 2526 { 2527 int ret; 2528 if ((p->search_for_split || ins_len > 0) && btrfs_header_nritems(b) >= 2529 BTRFS_NODEPTRS_PER_BLOCK(root) - 3) { 2530 int sret; 2531 2532 if (*write_lock_level < level + 1) { 2533 *write_lock_level = level + 1; 2534 btrfs_release_path(p); 2535 goto again; 2536 } 2537 2538 btrfs_set_path_blocking(p); 2539 reada_for_balance(root, p, level); 2540 sret = split_node(trans, root, p, level); 2541 btrfs_clear_path_blocking(p, NULL, 0); 2542 2543 BUG_ON(sret > 0); 2544 if (sret) { 2545 ret = sret; 2546 goto done; 2547 } 2548 b = p->nodes[level]; 2549 } else if (ins_len < 0 && btrfs_header_nritems(b) < 2550 BTRFS_NODEPTRS_PER_BLOCK(root) / 2) { 2551 int sret; 2552 2553 if (*write_lock_level < level + 1) { 2554 *write_lock_level = level + 1; 2555 btrfs_release_path(p); 2556 goto again; 2557 } 2558 2559 btrfs_set_path_blocking(p); 2560 reada_for_balance(root, p, level); 2561 sret = balance_level(trans, root, p, level); 2562 btrfs_clear_path_blocking(p, NULL, 0); 2563 2564 if (sret) { 2565 ret = sret; 2566 goto done; 2567 } 2568 b = p->nodes[level]; 2569 if (!b) { 2570 btrfs_release_path(p); 2571 goto again; 2572 } 2573 BUG_ON(btrfs_header_nritems(b) == 1); 2574 } 2575 return 0; 2576 2577 again: 2578 ret = -EAGAIN; 2579 done: 2580 return ret; 2581 } 2582 2583 static void key_search_validate(struct extent_buffer *b, 2584 struct btrfs_key *key, 2585 int level) 2586 { 2587 #ifdef CONFIG_BTRFS_ASSERT 2588 struct btrfs_disk_key disk_key; 2589 2590 btrfs_cpu_key_to_disk(&disk_key, key); 2591 2592 if (level == 0) 2593 ASSERT(!memcmp_extent_buffer(b, &disk_key, 2594 offsetof(struct btrfs_leaf, items[0].key), 2595 sizeof(disk_key))); 2596 else 2597 ASSERT(!memcmp_extent_buffer(b, &disk_key, 2598 offsetof(struct btrfs_node, ptrs[0].key), 2599 sizeof(disk_key))); 2600 #endif 2601 } 2602 2603 static int key_search(struct extent_buffer *b, struct btrfs_key *key, 2604 int level, int *prev_cmp, int *slot) 2605 { 2606 if (*prev_cmp != 0) { 2607 *prev_cmp = bin_search(b, key, level, slot); 2608 return *prev_cmp; 2609 } 2610 2611 key_search_validate(b, key, level); 2612 *slot = 0; 2613 2614 return 0; 2615 } 2616 2617 int btrfs_find_item(struct btrfs_root *fs_root, struct btrfs_path *path, 2618 u64 iobjectid, u64 ioff, u8 key_type, 2619 struct btrfs_key *found_key) 2620 { 2621 int ret; 2622 struct btrfs_key key; 2623 struct extent_buffer *eb; 2624 2625 ASSERT(path); 2626 ASSERT(found_key); 2627 2628 key.type = key_type; 2629 key.objectid = iobjectid; 2630 key.offset = ioff; 2631 2632 ret = btrfs_search_slot(NULL, fs_root, &key, path, 0, 0); 2633 if (ret < 0) 2634 return ret; 2635 2636 eb = path->nodes[0]; 2637 if (ret && path->slots[0] >= btrfs_header_nritems(eb)) { 2638 ret = btrfs_next_leaf(fs_root, path); 2639 if (ret) 2640 return ret; 2641 eb = path->nodes[0]; 2642 } 2643 2644 btrfs_item_key_to_cpu(eb, found_key, path->slots[0]); 2645 if (found_key->type != key.type || 2646 found_key->objectid != key.objectid) 2647 return 1; 2648 2649 return 0; 2650 } 2651 2652 /* 2653 * look for key in the tree. path is filled in with nodes along the way 2654 * if key is found, we return zero and you can find the item in the leaf 2655 * level of the path (level 0) 2656 * 2657 * If the key isn't found, the path points to the slot where it should 2658 * be inserted, and 1 is returned. If there are other errors during the 2659 * search a negative error number is returned. 2660 * 2661 * if ins_len > 0, nodes and leaves will be split as we walk down the 2662 * tree. if ins_len < 0, nodes will be merged as we walk down the tree (if 2663 * possible) 2664 */ 2665 int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root 2666 *root, struct btrfs_key *key, struct btrfs_path *p, int 2667 ins_len, int cow) 2668 { 2669 struct extent_buffer *b; 2670 int slot; 2671 int ret; 2672 int err; 2673 int level; 2674 int lowest_unlock = 1; 2675 int root_lock; 2676 /* everything at write_lock_level or lower must be write locked */ 2677 int write_lock_level = 0; 2678 u8 lowest_level = 0; 2679 int min_write_lock_level; 2680 int prev_cmp; 2681 2682 lowest_level = p->lowest_level; 2683 WARN_ON(lowest_level && ins_len > 0); 2684 WARN_ON(p->nodes[0] != NULL); 2685 BUG_ON(!cow && ins_len); 2686 2687 if (ins_len < 0) { 2688 lowest_unlock = 2; 2689 2690 /* when we are removing items, we might have to go up to level 2691 * two as we update tree pointers Make sure we keep write 2692 * for those levels as well 2693 */ 2694 write_lock_level = 2; 2695 } else if (ins_len > 0) { 2696 /* 2697 * for inserting items, make sure we have a write lock on 2698 * level 1 so we can update keys 2699 */ 2700 write_lock_level = 1; 2701 } 2702 2703 if (!cow) 2704 write_lock_level = -1; 2705 2706 if (cow && (p->keep_locks || p->lowest_level)) 2707 write_lock_level = BTRFS_MAX_LEVEL; 2708 2709 min_write_lock_level = write_lock_level; 2710 2711 again: 2712 prev_cmp = -1; 2713 /* 2714 * we try very hard to do read locks on the root 2715 */ 2716 root_lock = BTRFS_READ_LOCK; 2717 level = 0; 2718 if (p->search_commit_root) { 2719 /* 2720 * the commit roots are read only 2721 * so we always do read locks 2722 */ 2723 if (p->need_commit_sem) 2724 down_read(&root->fs_info->commit_root_sem); 2725 b = root->commit_root; 2726 extent_buffer_get(b); 2727 level = btrfs_header_level(b); 2728 if (p->need_commit_sem) 2729 up_read(&root->fs_info->commit_root_sem); 2730 if (!p->skip_locking) 2731 btrfs_tree_read_lock(b); 2732 } else { 2733 if (p->skip_locking) { 2734 b = btrfs_root_node(root); 2735 level = btrfs_header_level(b); 2736 } else { 2737 /* we don't know the level of the root node 2738 * until we actually have it read locked 2739 */ 2740 b = btrfs_read_lock_root_node(root); 2741 level = btrfs_header_level(b); 2742 if (level <= write_lock_level) { 2743 /* whoops, must trade for write lock */ 2744 btrfs_tree_read_unlock(b); 2745 free_extent_buffer(b); 2746 b = btrfs_lock_root_node(root); 2747 root_lock = BTRFS_WRITE_LOCK; 2748 2749 /* the level might have changed, check again */ 2750 level = btrfs_header_level(b); 2751 } 2752 } 2753 } 2754 p->nodes[level] = b; 2755 if (!p->skip_locking) 2756 p->locks[level] = root_lock; 2757 2758 while (b) { 2759 level = btrfs_header_level(b); 2760 2761 /* 2762 * setup the path here so we can release it under lock 2763 * contention with the cow code 2764 */ 2765 if (cow) { 2766 /* 2767 * if we don't really need to cow this block 2768 * then we don't want to set the path blocking, 2769 * so we test it here 2770 */ 2771 if (!should_cow_block(trans, root, b)) 2772 goto cow_done; 2773 2774 /* 2775 * must have write locks on this node and the 2776 * parent 2777 */ 2778 if (level > write_lock_level || 2779 (level + 1 > write_lock_level && 2780 level + 1 < BTRFS_MAX_LEVEL && 2781 p->nodes[level + 1])) { 2782 write_lock_level = level + 1; 2783 btrfs_release_path(p); 2784 goto again; 2785 } 2786 2787 btrfs_set_path_blocking(p); 2788 err = btrfs_cow_block(trans, root, b, 2789 p->nodes[level + 1], 2790 p->slots[level + 1], &b); 2791 if (err) { 2792 ret = err; 2793 goto done; 2794 } 2795 } 2796 cow_done: 2797 p->nodes[level] = b; 2798 btrfs_clear_path_blocking(p, NULL, 0); 2799 2800 /* 2801 * we have a lock on b and as long as we aren't changing 2802 * the tree, there is no way to for the items in b to change. 2803 * It is safe to drop the lock on our parent before we 2804 * go through the expensive btree search on b. 2805 * 2806 * If we're inserting or deleting (ins_len != 0), then we might 2807 * be changing slot zero, which may require changing the parent. 2808 * So, we can't drop the lock until after we know which slot 2809 * we're operating on. 2810 */ 2811 if (!ins_len && !p->keep_locks) { 2812 int u = level + 1; 2813 2814 if (u < BTRFS_MAX_LEVEL && p->locks[u]) { 2815 btrfs_tree_unlock_rw(p->nodes[u], p->locks[u]); 2816 p->locks[u] = 0; 2817 } 2818 } 2819 2820 ret = key_search(b, key, level, &prev_cmp, &slot); 2821 2822 if (level != 0) { 2823 int dec = 0; 2824 if (ret && slot > 0) { 2825 dec = 1; 2826 slot -= 1; 2827 } 2828 p->slots[level] = slot; 2829 err = setup_nodes_for_search(trans, root, p, b, level, 2830 ins_len, &write_lock_level); 2831 if (err == -EAGAIN) 2832 goto again; 2833 if (err) { 2834 ret = err; 2835 goto done; 2836 } 2837 b = p->nodes[level]; 2838 slot = p->slots[level]; 2839 2840 /* 2841 * slot 0 is special, if we change the key 2842 * we have to update the parent pointer 2843 * which means we must have a write lock 2844 * on the parent 2845 */ 2846 if (slot == 0 && ins_len && 2847 write_lock_level < level + 1) { 2848 write_lock_level = level + 1; 2849 btrfs_release_path(p); 2850 goto again; 2851 } 2852 2853 unlock_up(p, level, lowest_unlock, 2854 min_write_lock_level, &write_lock_level); 2855 2856 if (level == lowest_level) { 2857 if (dec) 2858 p->slots[level]++; 2859 goto done; 2860 } 2861 2862 err = read_block_for_search(trans, root, p, 2863 &b, level, slot, key, 0); 2864 if (err == -EAGAIN) 2865 goto again; 2866 if (err) { 2867 ret = err; 2868 goto done; 2869 } 2870 2871 if (!p->skip_locking) { 2872 level = btrfs_header_level(b); 2873 if (level <= write_lock_level) { 2874 err = btrfs_try_tree_write_lock(b); 2875 if (!err) { 2876 btrfs_set_path_blocking(p); 2877 btrfs_tree_lock(b); 2878 btrfs_clear_path_blocking(p, b, 2879 BTRFS_WRITE_LOCK); 2880 } 2881 p->locks[level] = BTRFS_WRITE_LOCK; 2882 } else { 2883 err = btrfs_tree_read_lock_atomic(b); 2884 if (!err) { 2885 btrfs_set_path_blocking(p); 2886 btrfs_tree_read_lock(b); 2887 btrfs_clear_path_blocking(p, b, 2888 BTRFS_READ_LOCK); 2889 } 2890 p->locks[level] = BTRFS_READ_LOCK; 2891 } 2892 p->nodes[level] = b; 2893 } 2894 } else { 2895 p->slots[level] = slot; 2896 if (ins_len > 0 && 2897 btrfs_leaf_free_space(root, b) < ins_len) { 2898 if (write_lock_level < 1) { 2899 write_lock_level = 1; 2900 btrfs_release_path(p); 2901 goto again; 2902 } 2903 2904 btrfs_set_path_blocking(p); 2905 err = split_leaf(trans, root, key, 2906 p, ins_len, ret == 0); 2907 btrfs_clear_path_blocking(p, NULL, 0); 2908 2909 BUG_ON(err > 0); 2910 if (err) { 2911 ret = err; 2912 goto done; 2913 } 2914 } 2915 if (!p->search_for_split) 2916 unlock_up(p, level, lowest_unlock, 2917 min_write_lock_level, &write_lock_level); 2918 goto done; 2919 } 2920 } 2921 ret = 1; 2922 done: 2923 /* 2924 * we don't really know what they plan on doing with the path 2925 * from here on, so for now just mark it as blocking 2926 */ 2927 if (!p->leave_spinning) 2928 btrfs_set_path_blocking(p); 2929 if (ret < 0 && !p->skip_release_on_error) 2930 btrfs_release_path(p); 2931 return ret; 2932 } 2933 2934 /* 2935 * Like btrfs_search_slot, this looks for a key in the given tree. It uses the 2936 * current state of the tree together with the operations recorded in the tree 2937 * modification log to search for the key in a previous version of this tree, as 2938 * denoted by the time_seq parameter. 2939 * 2940 * Naturally, there is no support for insert, delete or cow operations. 2941 * 2942 * The resulting path and return value will be set up as if we called 2943 * btrfs_search_slot at that point in time with ins_len and cow both set to 0. 2944 */ 2945 int btrfs_search_old_slot(struct btrfs_root *root, struct btrfs_key *key, 2946 struct btrfs_path *p, u64 time_seq) 2947 { 2948 struct extent_buffer *b; 2949 int slot; 2950 int ret; 2951 int err; 2952 int level; 2953 int lowest_unlock = 1; 2954 u8 lowest_level = 0; 2955 int prev_cmp = -1; 2956 2957 lowest_level = p->lowest_level; 2958 WARN_ON(p->nodes[0] != NULL); 2959 2960 if (p->search_commit_root) { 2961 BUG_ON(time_seq); 2962 return btrfs_search_slot(NULL, root, key, p, 0, 0); 2963 } 2964 2965 again: 2966 b = get_old_root(root, time_seq); 2967 level = btrfs_header_level(b); 2968 p->locks[level] = BTRFS_READ_LOCK; 2969 2970 while (b) { 2971 level = btrfs_header_level(b); 2972 p->nodes[level] = b; 2973 btrfs_clear_path_blocking(p, NULL, 0); 2974 2975 /* 2976 * we have a lock on b and as long as we aren't changing 2977 * the tree, there is no way to for the items in b to change. 2978 * It is safe to drop the lock on our parent before we 2979 * go through the expensive btree search on b. 2980 */ 2981 btrfs_unlock_up_safe(p, level + 1); 2982 2983 /* 2984 * Since we can unwind eb's we want to do a real search every 2985 * time. 2986 */ 2987 prev_cmp = -1; 2988 ret = key_search(b, key, level, &prev_cmp, &slot); 2989 2990 if (level != 0) { 2991 int dec = 0; 2992 if (ret && slot > 0) { 2993 dec = 1; 2994 slot -= 1; 2995 } 2996 p->slots[level] = slot; 2997 unlock_up(p, level, lowest_unlock, 0, NULL); 2998 2999 if (level == lowest_level) { 3000 if (dec) 3001 p->slots[level]++; 3002 goto done; 3003 } 3004 3005 err = read_block_for_search(NULL, root, p, &b, level, 3006 slot, key, time_seq); 3007 if (err == -EAGAIN) 3008 goto again; 3009 if (err) { 3010 ret = err; 3011 goto done; 3012 } 3013 3014 level = btrfs_header_level(b); 3015 err = btrfs_tree_read_lock_atomic(b); 3016 if (!err) { 3017 btrfs_set_path_blocking(p); 3018 btrfs_tree_read_lock(b); 3019 btrfs_clear_path_blocking(p, b, 3020 BTRFS_READ_LOCK); 3021 } 3022 b = tree_mod_log_rewind(root->fs_info, p, b, time_seq); 3023 if (!b) { 3024 ret = -ENOMEM; 3025 goto done; 3026 } 3027 p->locks[level] = BTRFS_READ_LOCK; 3028 p->nodes[level] = b; 3029 } else { 3030 p->slots[level] = slot; 3031 unlock_up(p, level, lowest_unlock, 0, NULL); 3032 goto done; 3033 } 3034 } 3035 ret = 1; 3036 done: 3037 if (!p->leave_spinning) 3038 btrfs_set_path_blocking(p); 3039 if (ret < 0) 3040 btrfs_release_path(p); 3041 3042 return ret; 3043 } 3044 3045 /* 3046 * helper to use instead of search slot if no exact match is needed but 3047 * instead the next or previous item should be returned. 3048 * When find_higher is true, the next higher item is returned, the next lower 3049 * otherwise. 3050 * When return_any and find_higher are both true, and no higher item is found, 3051 * return the next lower instead. 3052 * When return_any is true and find_higher is false, and no lower item is found, 3053 * return the next higher instead. 3054 * It returns 0 if any item is found, 1 if none is found (tree empty), and 3055 * < 0 on error 3056 */ 3057 int btrfs_search_slot_for_read(struct btrfs_root *root, 3058 struct btrfs_key *key, struct btrfs_path *p, 3059 int find_higher, int return_any) 3060 { 3061 int ret; 3062 struct extent_buffer *leaf; 3063 3064 again: 3065 ret = btrfs_search_slot(NULL, root, key, p, 0, 0); 3066 if (ret <= 0) 3067 return ret; 3068 /* 3069 * a return value of 1 means the path is at the position where the 3070 * item should be inserted. Normally this is the next bigger item, 3071 * but in case the previous item is the last in a leaf, path points 3072 * to the first free slot in the previous leaf, i.e. at an invalid 3073 * item. 3074 */ 3075 leaf = p->nodes[0]; 3076 3077 if (find_higher) { 3078 if (p->slots[0] >= btrfs_header_nritems(leaf)) { 3079 ret = btrfs_next_leaf(root, p); 3080 if (ret <= 0) 3081 return ret; 3082 if (!return_any) 3083 return 1; 3084 /* 3085 * no higher item found, return the next 3086 * lower instead 3087 */ 3088 return_any = 0; 3089 find_higher = 0; 3090 btrfs_release_path(p); 3091 goto again; 3092 } 3093 } else { 3094 if (p->slots[0] == 0) { 3095 ret = btrfs_prev_leaf(root, p); 3096 if (ret < 0) 3097 return ret; 3098 if (!ret) { 3099 leaf = p->nodes[0]; 3100 if (p->slots[0] == btrfs_header_nritems(leaf)) 3101 p->slots[0]--; 3102 return 0; 3103 } 3104 if (!return_any) 3105 return 1; 3106 /* 3107 * no lower item found, return the next 3108 * higher instead 3109 */ 3110 return_any = 0; 3111 find_higher = 1; 3112 btrfs_release_path(p); 3113 goto again; 3114 } else { 3115 --p->slots[0]; 3116 } 3117 } 3118 return 0; 3119 } 3120 3121 /* 3122 * adjust the pointers going up the tree, starting at level 3123 * making sure the right key of each node is points to 'key'. 3124 * This is used after shifting pointers to the left, so it stops 3125 * fixing up pointers when a given leaf/node is not in slot 0 of the 3126 * higher levels 3127 * 3128 */ 3129 static void fixup_low_keys(struct btrfs_root *root, struct btrfs_path *path, 3130 struct btrfs_disk_key *key, int level) 3131 { 3132 int i; 3133 struct extent_buffer *t; 3134 3135 for (i = level; i < BTRFS_MAX_LEVEL; i++) { 3136 int tslot = path->slots[i]; 3137 if (!path->nodes[i]) 3138 break; 3139 t = path->nodes[i]; 3140 tree_mod_log_set_node_key(root->fs_info, t, tslot, 1); 3141 btrfs_set_node_key(t, key, tslot); 3142 btrfs_mark_buffer_dirty(path->nodes[i]); 3143 if (tslot != 0) 3144 break; 3145 } 3146 } 3147 3148 /* 3149 * update item key. 3150 * 3151 * This function isn't completely safe. It's the caller's responsibility 3152 * that the new key won't break the order 3153 */ 3154 void btrfs_set_item_key_safe(struct btrfs_root *root, struct btrfs_path *path, 3155 struct btrfs_key *new_key) 3156 { 3157 struct btrfs_disk_key disk_key; 3158 struct extent_buffer *eb; 3159 int slot; 3160 3161 eb = path->nodes[0]; 3162 slot = path->slots[0]; 3163 if (slot > 0) { 3164 btrfs_item_key(eb, &disk_key, slot - 1); 3165 BUG_ON(comp_keys(&disk_key, new_key) >= 0); 3166 } 3167 if (slot < btrfs_header_nritems(eb) - 1) { 3168 btrfs_item_key(eb, &disk_key, slot + 1); 3169 BUG_ON(comp_keys(&disk_key, new_key) <= 0); 3170 } 3171 3172 btrfs_cpu_key_to_disk(&disk_key, new_key); 3173 btrfs_set_item_key(eb, &disk_key, slot); 3174 btrfs_mark_buffer_dirty(eb); 3175 if (slot == 0) 3176 fixup_low_keys(root, path, &disk_key, 1); 3177 } 3178 3179 /* 3180 * try to push data from one node into the next node left in the 3181 * tree. 3182 * 3183 * returns 0 if some ptrs were pushed left, < 0 if there was some horrible 3184 * error, and > 0 if there was no room in the left hand block. 3185 */ 3186 static int push_node_left(struct btrfs_trans_handle *trans, 3187 struct btrfs_root *root, struct extent_buffer *dst, 3188 struct extent_buffer *src, int empty) 3189 { 3190 int push_items = 0; 3191 int src_nritems; 3192 int dst_nritems; 3193 int ret = 0; 3194 3195 src_nritems = btrfs_header_nritems(src); 3196 dst_nritems = btrfs_header_nritems(dst); 3197 push_items = BTRFS_NODEPTRS_PER_BLOCK(root) - dst_nritems; 3198 WARN_ON(btrfs_header_generation(src) != trans->transid); 3199 WARN_ON(btrfs_header_generation(dst) != trans->transid); 3200 3201 if (!empty && src_nritems <= 8) 3202 return 1; 3203 3204 if (push_items <= 0) 3205 return 1; 3206 3207 if (empty) { 3208 push_items = min(src_nritems, push_items); 3209 if (push_items < src_nritems) { 3210 /* leave at least 8 pointers in the node if 3211 * we aren't going to empty it 3212 */ 3213 if (src_nritems - push_items < 8) { 3214 if (push_items <= 8) 3215 return 1; 3216 push_items -= 8; 3217 } 3218 } 3219 } else 3220 push_items = min(src_nritems - 8, push_items); 3221 3222 ret = tree_mod_log_eb_copy(root->fs_info, dst, src, dst_nritems, 0, 3223 push_items); 3224 if (ret) { 3225 btrfs_abort_transaction(trans, root, ret); 3226 return ret; 3227 } 3228 copy_extent_buffer(dst, src, 3229 btrfs_node_key_ptr_offset(dst_nritems), 3230 btrfs_node_key_ptr_offset(0), 3231 push_items * sizeof(struct btrfs_key_ptr)); 3232 3233 if (push_items < src_nritems) { 3234 /* 3235 * don't call tree_mod_log_eb_move here, key removal was already 3236 * fully logged by tree_mod_log_eb_copy above. 3237 */ 3238 memmove_extent_buffer(src, btrfs_node_key_ptr_offset(0), 3239 btrfs_node_key_ptr_offset(push_items), 3240 (src_nritems - push_items) * 3241 sizeof(struct btrfs_key_ptr)); 3242 } 3243 btrfs_set_header_nritems(src, src_nritems - push_items); 3244 btrfs_set_header_nritems(dst, dst_nritems + push_items); 3245 btrfs_mark_buffer_dirty(src); 3246 btrfs_mark_buffer_dirty(dst); 3247 3248 return ret; 3249 } 3250 3251 /* 3252 * try to push data from one node into the next node right in the 3253 * tree. 3254 * 3255 * returns 0 if some ptrs were pushed, < 0 if there was some horrible 3256 * error, and > 0 if there was no room in the right hand block. 3257 * 3258 * this will only push up to 1/2 the contents of the left node over 3259 */ 3260 static int balance_node_right(struct btrfs_trans_handle *trans, 3261 struct btrfs_root *root, 3262 struct extent_buffer *dst, 3263 struct extent_buffer *src) 3264 { 3265 int push_items = 0; 3266 int max_push; 3267 int src_nritems; 3268 int dst_nritems; 3269 int ret = 0; 3270 3271 WARN_ON(btrfs_header_generation(src) != trans->transid); 3272 WARN_ON(btrfs_header_generation(dst) != trans->transid); 3273 3274 src_nritems = btrfs_header_nritems(src); 3275 dst_nritems = btrfs_header_nritems(dst); 3276 push_items = BTRFS_NODEPTRS_PER_BLOCK(root) - dst_nritems; 3277 if (push_items <= 0) 3278 return 1; 3279 3280 if (src_nritems < 4) 3281 return 1; 3282 3283 max_push = src_nritems / 2 + 1; 3284 /* don't try to empty the node */ 3285 if (max_push >= src_nritems) 3286 return 1; 3287 3288 if (max_push < push_items) 3289 push_items = max_push; 3290 3291 tree_mod_log_eb_move(root->fs_info, dst, push_items, 0, dst_nritems); 3292 memmove_extent_buffer(dst, btrfs_node_key_ptr_offset(push_items), 3293 btrfs_node_key_ptr_offset(0), 3294 (dst_nritems) * 3295 sizeof(struct btrfs_key_ptr)); 3296 3297 ret = tree_mod_log_eb_copy(root->fs_info, dst, src, 0, 3298 src_nritems - push_items, push_items); 3299 if (ret) { 3300 btrfs_abort_transaction(trans, root, ret); 3301 return ret; 3302 } 3303 copy_extent_buffer(dst, src, 3304 btrfs_node_key_ptr_offset(0), 3305 btrfs_node_key_ptr_offset(src_nritems - push_items), 3306 push_items * sizeof(struct btrfs_key_ptr)); 3307 3308 btrfs_set_header_nritems(src, src_nritems - push_items); 3309 btrfs_set_header_nritems(dst, dst_nritems + push_items); 3310 3311 btrfs_mark_buffer_dirty(src); 3312 btrfs_mark_buffer_dirty(dst); 3313 3314 return ret; 3315 } 3316 3317 /* 3318 * helper function to insert a new root level in the tree. 3319 * A new node is allocated, and a single item is inserted to 3320 * point to the existing root 3321 * 3322 * returns zero on success or < 0 on failure. 3323 */ 3324 static noinline int insert_new_root(struct btrfs_trans_handle *trans, 3325 struct btrfs_root *root, 3326 struct btrfs_path *path, int level) 3327 { 3328 u64 lower_gen; 3329 struct extent_buffer *lower; 3330 struct extent_buffer *c; 3331 struct extent_buffer *old; 3332 struct btrfs_disk_key lower_key; 3333 3334 BUG_ON(path->nodes[level]); 3335 BUG_ON(path->nodes[level-1] != root->node); 3336 3337 lower = path->nodes[level-1]; 3338 if (level == 1) 3339 btrfs_item_key(lower, &lower_key, 0); 3340 else 3341 btrfs_node_key(lower, &lower_key, 0); 3342 3343 c = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid, 3344 &lower_key, level, root->node->start, 0); 3345 if (IS_ERR(c)) 3346 return PTR_ERR(c); 3347 3348 root_add_used(root, root->nodesize); 3349 3350 memset_extent_buffer(c, 0, 0, sizeof(struct btrfs_header)); 3351 btrfs_set_header_nritems(c, 1); 3352 btrfs_set_header_level(c, level); 3353 btrfs_set_header_bytenr(c, c->start); 3354 btrfs_set_header_generation(c, trans->transid); 3355 btrfs_set_header_backref_rev(c, BTRFS_MIXED_BACKREF_REV); 3356 btrfs_set_header_owner(c, root->root_key.objectid); 3357 3358 write_extent_buffer(c, root->fs_info->fsid, btrfs_header_fsid(), 3359 BTRFS_FSID_SIZE); 3360 3361 write_extent_buffer(c, root->fs_info->chunk_tree_uuid, 3362 btrfs_header_chunk_tree_uuid(c), BTRFS_UUID_SIZE); 3363 3364 btrfs_set_node_key(c, &lower_key, 0); 3365 btrfs_set_node_blockptr(c, 0, lower->start); 3366 lower_gen = btrfs_header_generation(lower); 3367 WARN_ON(lower_gen != trans->transid); 3368 3369 btrfs_set_node_ptr_generation(c, 0, lower_gen); 3370 3371 btrfs_mark_buffer_dirty(c); 3372 3373 old = root->node; 3374 tree_mod_log_set_root_pointer(root, c, 0); 3375 rcu_assign_pointer(root->node, c); 3376 3377 /* the super has an extra ref to root->node */ 3378 free_extent_buffer(old); 3379 3380 add_root_to_dirty_list(root); 3381 extent_buffer_get(c); 3382 path->nodes[level] = c; 3383 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING; 3384 path->slots[level] = 0; 3385 return 0; 3386 } 3387 3388 /* 3389 * worker function to insert a single pointer in a node. 3390 * the node should have enough room for the pointer already 3391 * 3392 * slot and level indicate where you want the key to go, and 3393 * blocknr is the block the key points to. 3394 */ 3395 static void insert_ptr(struct btrfs_trans_handle *trans, 3396 struct btrfs_root *root, struct btrfs_path *path, 3397 struct btrfs_disk_key *key, u64 bytenr, 3398 int slot, int level) 3399 { 3400 struct extent_buffer *lower; 3401 int nritems; 3402 int ret; 3403 3404 BUG_ON(!path->nodes[level]); 3405 btrfs_assert_tree_locked(path->nodes[level]); 3406 lower = path->nodes[level]; 3407 nritems = btrfs_header_nritems(lower); 3408 BUG_ON(slot > nritems); 3409 BUG_ON(nritems == BTRFS_NODEPTRS_PER_BLOCK(root)); 3410 if (slot != nritems) { 3411 if (level) 3412 tree_mod_log_eb_move(root->fs_info, lower, slot + 1, 3413 slot, nritems - slot); 3414 memmove_extent_buffer(lower, 3415 btrfs_node_key_ptr_offset(slot + 1), 3416 btrfs_node_key_ptr_offset(slot), 3417 (nritems - slot) * sizeof(struct btrfs_key_ptr)); 3418 } 3419 if (level) { 3420 ret = tree_mod_log_insert_key(root->fs_info, lower, slot, 3421 MOD_LOG_KEY_ADD, GFP_NOFS); 3422 BUG_ON(ret < 0); 3423 } 3424 btrfs_set_node_key(lower, key, slot); 3425 btrfs_set_node_blockptr(lower, slot, bytenr); 3426 WARN_ON(trans->transid == 0); 3427 btrfs_set_node_ptr_generation(lower, slot, trans->transid); 3428 btrfs_set_header_nritems(lower, nritems + 1); 3429 btrfs_mark_buffer_dirty(lower); 3430 } 3431 3432 /* 3433 * split the node at the specified level in path in two. 3434 * The path is corrected to point to the appropriate node after the split 3435 * 3436 * Before splitting this tries to make some room in the node by pushing 3437 * left and right, if either one works, it returns right away. 3438 * 3439 * returns 0 on success and < 0 on failure 3440 */ 3441 static noinline int split_node(struct btrfs_trans_handle *trans, 3442 struct btrfs_root *root, 3443 struct btrfs_path *path, int level) 3444 { 3445 struct extent_buffer *c; 3446 struct extent_buffer *split; 3447 struct btrfs_disk_key disk_key; 3448 int mid; 3449 int ret; 3450 u32 c_nritems; 3451 3452 c = path->nodes[level]; 3453 WARN_ON(btrfs_header_generation(c) != trans->transid); 3454 if (c == root->node) { 3455 /* 3456 * trying to split the root, lets make a new one 3457 * 3458 * tree mod log: We don't log_removal old root in 3459 * insert_new_root, because that root buffer will be kept as a 3460 * normal node. We are going to log removal of half of the 3461 * elements below with tree_mod_log_eb_copy. We're holding a 3462 * tree lock on the buffer, which is why we cannot race with 3463 * other tree_mod_log users. 3464 */ 3465 ret = insert_new_root(trans, root, path, level + 1); 3466 if (ret) 3467 return ret; 3468 } else { 3469 ret = push_nodes_for_insert(trans, root, path, level); 3470 c = path->nodes[level]; 3471 if (!ret && btrfs_header_nritems(c) < 3472 BTRFS_NODEPTRS_PER_BLOCK(root) - 3) 3473 return 0; 3474 if (ret < 0) 3475 return ret; 3476 } 3477 3478 c_nritems = btrfs_header_nritems(c); 3479 mid = (c_nritems + 1) / 2; 3480 btrfs_node_key(c, &disk_key, mid); 3481 3482 split = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid, 3483 &disk_key, level, c->start, 0); 3484 if (IS_ERR(split)) 3485 return PTR_ERR(split); 3486 3487 root_add_used(root, root->nodesize); 3488 3489 memset_extent_buffer(split, 0, 0, sizeof(struct btrfs_header)); 3490 btrfs_set_header_level(split, btrfs_header_level(c)); 3491 btrfs_set_header_bytenr(split, split->start); 3492 btrfs_set_header_generation(split, trans->transid); 3493 btrfs_set_header_backref_rev(split, BTRFS_MIXED_BACKREF_REV); 3494 btrfs_set_header_owner(split, root->root_key.objectid); 3495 write_extent_buffer(split, root->fs_info->fsid, 3496 btrfs_header_fsid(), BTRFS_FSID_SIZE); 3497 write_extent_buffer(split, root->fs_info->chunk_tree_uuid, 3498 btrfs_header_chunk_tree_uuid(split), 3499 BTRFS_UUID_SIZE); 3500 3501 ret = tree_mod_log_eb_copy(root->fs_info, split, c, 0, 3502 mid, c_nritems - mid); 3503 if (ret) { 3504 btrfs_abort_transaction(trans, root, ret); 3505 return ret; 3506 } 3507 copy_extent_buffer(split, c, 3508 btrfs_node_key_ptr_offset(0), 3509 btrfs_node_key_ptr_offset(mid), 3510 (c_nritems - mid) * sizeof(struct btrfs_key_ptr)); 3511 btrfs_set_header_nritems(split, c_nritems - mid); 3512 btrfs_set_header_nritems(c, mid); 3513 ret = 0; 3514 3515 btrfs_mark_buffer_dirty(c); 3516 btrfs_mark_buffer_dirty(split); 3517 3518 insert_ptr(trans, root, path, &disk_key, split->start, 3519 path->slots[level + 1] + 1, level + 1); 3520 3521 if (path->slots[level] >= mid) { 3522 path->slots[level] -= mid; 3523 btrfs_tree_unlock(c); 3524 free_extent_buffer(c); 3525 path->nodes[level] = split; 3526 path->slots[level + 1] += 1; 3527 } else { 3528 btrfs_tree_unlock(split); 3529 free_extent_buffer(split); 3530 } 3531 return ret; 3532 } 3533 3534 /* 3535 * how many bytes are required to store the items in a leaf. start 3536 * and nr indicate which items in the leaf to check. This totals up the 3537 * space used both by the item structs and the item data 3538 */ 3539 static int leaf_space_used(struct extent_buffer *l, int start, int nr) 3540 { 3541 struct btrfs_item *start_item; 3542 struct btrfs_item *end_item; 3543 struct btrfs_map_token token; 3544 int data_len; 3545 int nritems = btrfs_header_nritems(l); 3546 int end = min(nritems, start + nr) - 1; 3547 3548 if (!nr) 3549 return 0; 3550 btrfs_init_map_token(&token); 3551 start_item = btrfs_item_nr(start); 3552 end_item = btrfs_item_nr(end); 3553 data_len = btrfs_token_item_offset(l, start_item, &token) + 3554 btrfs_token_item_size(l, start_item, &token); 3555 data_len = data_len - btrfs_token_item_offset(l, end_item, &token); 3556 data_len += sizeof(struct btrfs_item) * nr; 3557 WARN_ON(data_len < 0); 3558 return data_len; 3559 } 3560 3561 /* 3562 * The space between the end of the leaf items and 3563 * the start of the leaf data. IOW, how much room 3564 * the leaf has left for both items and data 3565 */ 3566 noinline int btrfs_leaf_free_space(struct btrfs_root *root, 3567 struct extent_buffer *leaf) 3568 { 3569 int nritems = btrfs_header_nritems(leaf); 3570 int ret; 3571 ret = BTRFS_LEAF_DATA_SIZE(root) - leaf_space_used(leaf, 0, nritems); 3572 if (ret < 0) { 3573 btrfs_crit(root->fs_info, 3574 "leaf free space ret %d, leaf data size %lu, used %d nritems %d", 3575 ret, (unsigned long) BTRFS_LEAF_DATA_SIZE(root), 3576 leaf_space_used(leaf, 0, nritems), nritems); 3577 } 3578 return ret; 3579 } 3580 3581 /* 3582 * min slot controls the lowest index we're willing to push to the 3583 * right. We'll push up to and including min_slot, but no lower 3584 */ 3585 static noinline int __push_leaf_right(struct btrfs_trans_handle *trans, 3586 struct btrfs_root *root, 3587 struct btrfs_path *path, 3588 int data_size, int empty, 3589 struct extent_buffer *right, 3590 int free_space, u32 left_nritems, 3591 u32 min_slot) 3592 { 3593 struct extent_buffer *left = path->nodes[0]; 3594 struct extent_buffer *upper = path->nodes[1]; 3595 struct btrfs_map_token token; 3596 struct btrfs_disk_key disk_key; 3597 int slot; 3598 u32 i; 3599 int push_space = 0; 3600 int push_items = 0; 3601 struct btrfs_item *item; 3602 u32 nr; 3603 u32 right_nritems; 3604 u32 data_end; 3605 u32 this_item_size; 3606 3607 btrfs_init_map_token(&token); 3608 3609 if (empty) 3610 nr = 0; 3611 else 3612 nr = max_t(u32, 1, min_slot); 3613 3614 if (path->slots[0] >= left_nritems) 3615 push_space += data_size; 3616 3617 slot = path->slots[1]; 3618 i = left_nritems - 1; 3619 while (i >= nr) { 3620 item = btrfs_item_nr(i); 3621 3622 if (!empty && push_items > 0) { 3623 if (path->slots[0] > i) 3624 break; 3625 if (path->slots[0] == i) { 3626 int space = btrfs_leaf_free_space(root, left); 3627 if (space + push_space * 2 > free_space) 3628 break; 3629 } 3630 } 3631 3632 if (path->slots[0] == i) 3633 push_space += data_size; 3634 3635 this_item_size = btrfs_item_size(left, item); 3636 if (this_item_size + sizeof(*item) + push_space > free_space) 3637 break; 3638 3639 push_items++; 3640 push_space += this_item_size + sizeof(*item); 3641 if (i == 0) 3642 break; 3643 i--; 3644 } 3645 3646 if (push_items == 0) 3647 goto out_unlock; 3648 3649 WARN_ON(!empty && push_items == left_nritems); 3650 3651 /* push left to right */ 3652 right_nritems = btrfs_header_nritems(right); 3653 3654 push_space = btrfs_item_end_nr(left, left_nritems - push_items); 3655 push_space -= leaf_data_end(root, left); 3656 3657 /* make room in the right data area */ 3658 data_end = leaf_data_end(root, right); 3659 memmove_extent_buffer(right, 3660 btrfs_leaf_data(right) + data_end - push_space, 3661 btrfs_leaf_data(right) + data_end, 3662 BTRFS_LEAF_DATA_SIZE(root) - data_end); 3663 3664 /* copy from the left data area */ 3665 copy_extent_buffer(right, left, btrfs_leaf_data(right) + 3666 BTRFS_LEAF_DATA_SIZE(root) - push_space, 3667 btrfs_leaf_data(left) + leaf_data_end(root, left), 3668 push_space); 3669 3670 memmove_extent_buffer(right, btrfs_item_nr_offset(push_items), 3671 btrfs_item_nr_offset(0), 3672 right_nritems * sizeof(struct btrfs_item)); 3673 3674 /* copy the items from left to right */ 3675 copy_extent_buffer(right, left, btrfs_item_nr_offset(0), 3676 btrfs_item_nr_offset(left_nritems - push_items), 3677 push_items * sizeof(struct btrfs_item)); 3678 3679 /* update the item pointers */ 3680 right_nritems += push_items; 3681 btrfs_set_header_nritems(right, right_nritems); 3682 push_space = BTRFS_LEAF_DATA_SIZE(root); 3683 for (i = 0; i < right_nritems; i++) { 3684 item = btrfs_item_nr(i); 3685 push_space -= btrfs_token_item_size(right, item, &token); 3686 btrfs_set_token_item_offset(right, item, push_space, &token); 3687 } 3688 3689 left_nritems -= push_items; 3690 btrfs_set_header_nritems(left, left_nritems); 3691 3692 if (left_nritems) 3693 btrfs_mark_buffer_dirty(left); 3694 else 3695 clean_tree_block(trans, root, left); 3696 3697 btrfs_mark_buffer_dirty(right); 3698 3699 btrfs_item_key(right, &disk_key, 0); 3700 btrfs_set_node_key(upper, &disk_key, slot + 1); 3701 btrfs_mark_buffer_dirty(upper); 3702 3703 /* then fixup the leaf pointer in the path */ 3704 if (path->slots[0] >= left_nritems) { 3705 path->slots[0] -= left_nritems; 3706 if (btrfs_header_nritems(path->nodes[0]) == 0) 3707 clean_tree_block(trans, root, path->nodes[0]); 3708 btrfs_tree_unlock(path->nodes[0]); 3709 free_extent_buffer(path->nodes[0]); 3710 path->nodes[0] = right; 3711 path->slots[1] += 1; 3712 } else { 3713 btrfs_tree_unlock(right); 3714 free_extent_buffer(right); 3715 } 3716 return 0; 3717 3718 out_unlock: 3719 btrfs_tree_unlock(right); 3720 free_extent_buffer(right); 3721 return 1; 3722 } 3723 3724 /* 3725 * push some data in the path leaf to the right, trying to free up at 3726 * least data_size bytes. returns zero if the push worked, nonzero otherwise 3727 * 3728 * returns 1 if the push failed because the other node didn't have enough 3729 * room, 0 if everything worked out and < 0 if there were major errors. 3730 * 3731 * this will push starting from min_slot to the end of the leaf. It won't 3732 * push any slot lower than min_slot 3733 */ 3734 static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root 3735 *root, struct btrfs_path *path, 3736 int min_data_size, int data_size, 3737 int empty, u32 min_slot) 3738 { 3739 struct extent_buffer *left = path->nodes[0]; 3740 struct extent_buffer *right; 3741 struct extent_buffer *upper; 3742 int slot; 3743 int free_space; 3744 u32 left_nritems; 3745 int ret; 3746 3747 if (!path->nodes[1]) 3748 return 1; 3749 3750 slot = path->slots[1]; 3751 upper = path->nodes[1]; 3752 if (slot >= btrfs_header_nritems(upper) - 1) 3753 return 1; 3754 3755 btrfs_assert_tree_locked(path->nodes[1]); 3756 3757 right = read_node_slot(root, upper, slot + 1); 3758 if (right == NULL) 3759 return 1; 3760 3761 btrfs_tree_lock(right); 3762 btrfs_set_lock_blocking(right); 3763 3764 free_space = btrfs_leaf_free_space(root, right); 3765 if (free_space < data_size) 3766 goto out_unlock; 3767 3768 /* cow and double check */ 3769 ret = btrfs_cow_block(trans, root, right, upper, 3770 slot + 1, &right); 3771 if (ret) 3772 goto out_unlock; 3773 3774 free_space = btrfs_leaf_free_space(root, right); 3775 if (free_space < data_size) 3776 goto out_unlock; 3777 3778 left_nritems = btrfs_header_nritems(left); 3779 if (left_nritems == 0) 3780 goto out_unlock; 3781 3782 if (path->slots[0] == left_nritems && !empty) { 3783 /* Key greater than all keys in the leaf, right neighbor has 3784 * enough room for it and we're not emptying our leaf to delete 3785 * it, therefore use right neighbor to insert the new item and 3786 * no need to touch/dirty our left leaft. */ 3787 btrfs_tree_unlock(left); 3788 free_extent_buffer(left); 3789 path->nodes[0] = right; 3790 path->slots[0] = 0; 3791 path->slots[1]++; 3792 return 0; 3793 } 3794 3795 return __push_leaf_right(trans, root, path, min_data_size, empty, 3796 right, free_space, left_nritems, min_slot); 3797 out_unlock: 3798 btrfs_tree_unlock(right); 3799 free_extent_buffer(right); 3800 return 1; 3801 } 3802 3803 /* 3804 * push some data in the path leaf to the left, trying to free up at 3805 * least data_size bytes. returns zero if the push worked, nonzero otherwise 3806 * 3807 * max_slot can put a limit on how far into the leaf we'll push items. The 3808 * item at 'max_slot' won't be touched. Use (u32)-1 to make us do all the 3809 * items 3810 */ 3811 static noinline int __push_leaf_left(struct btrfs_trans_handle *trans, 3812 struct btrfs_root *root, 3813 struct btrfs_path *path, int data_size, 3814 int empty, struct extent_buffer *left, 3815 int free_space, u32 right_nritems, 3816 u32 max_slot) 3817 { 3818 struct btrfs_disk_key disk_key; 3819 struct extent_buffer *right = path->nodes[0]; 3820 int i; 3821 int push_space = 0; 3822 int push_items = 0; 3823 struct btrfs_item *item; 3824 u32 old_left_nritems; 3825 u32 nr; 3826 int ret = 0; 3827 u32 this_item_size; 3828 u32 old_left_item_size; 3829 struct btrfs_map_token token; 3830 3831 btrfs_init_map_token(&token); 3832 3833 if (empty) 3834 nr = min(right_nritems, max_slot); 3835 else 3836 nr = min(right_nritems - 1, max_slot); 3837 3838 for (i = 0; i < nr; i++) { 3839 item = btrfs_item_nr(i); 3840 3841 if (!empty && push_items > 0) { 3842 if (path->slots[0] < i) 3843 break; 3844 if (path->slots[0] == i) { 3845 int space = btrfs_leaf_free_space(root, right); 3846 if (space + push_space * 2 > free_space) 3847 break; 3848 } 3849 } 3850 3851 if (path->slots[0] == i) 3852 push_space += data_size; 3853 3854 this_item_size = btrfs_item_size(right, item); 3855 if (this_item_size + sizeof(*item) + push_space > free_space) 3856 break; 3857 3858 push_items++; 3859 push_space += this_item_size + sizeof(*item); 3860 } 3861 3862 if (push_items == 0) { 3863 ret = 1; 3864 goto out; 3865 } 3866 WARN_ON(!empty && push_items == btrfs_header_nritems(right)); 3867 3868 /* push data from right to left */ 3869 copy_extent_buffer(left, right, 3870 btrfs_item_nr_offset(btrfs_header_nritems(left)), 3871 btrfs_item_nr_offset(0), 3872 push_items * sizeof(struct btrfs_item)); 3873 3874 push_space = BTRFS_LEAF_DATA_SIZE(root) - 3875 btrfs_item_offset_nr(right, push_items - 1); 3876 3877 copy_extent_buffer(left, right, btrfs_leaf_data(left) + 3878 leaf_data_end(root, left) - push_space, 3879 btrfs_leaf_data(right) + 3880 btrfs_item_offset_nr(right, push_items - 1), 3881 push_space); 3882 old_left_nritems = btrfs_header_nritems(left); 3883 BUG_ON(old_left_nritems <= 0); 3884 3885 old_left_item_size = btrfs_item_offset_nr(left, old_left_nritems - 1); 3886 for (i = old_left_nritems; i < old_left_nritems + push_items; i++) { 3887 u32 ioff; 3888 3889 item = btrfs_item_nr(i); 3890 3891 ioff = btrfs_token_item_offset(left, item, &token); 3892 btrfs_set_token_item_offset(left, item, 3893 ioff - (BTRFS_LEAF_DATA_SIZE(root) - old_left_item_size), 3894 &token); 3895 } 3896 btrfs_set_header_nritems(left, old_left_nritems + push_items); 3897 3898 /* fixup right node */ 3899 if (push_items > right_nritems) 3900 WARN(1, KERN_CRIT "push items %d nr %u\n", push_items, 3901 right_nritems); 3902 3903 if (push_items < right_nritems) { 3904 push_space = btrfs_item_offset_nr(right, push_items - 1) - 3905 leaf_data_end(root, right); 3906 memmove_extent_buffer(right, btrfs_leaf_data(right) + 3907 BTRFS_LEAF_DATA_SIZE(root) - push_space, 3908 btrfs_leaf_data(right) + 3909 leaf_data_end(root, right), push_space); 3910 3911 memmove_extent_buffer(right, btrfs_item_nr_offset(0), 3912 btrfs_item_nr_offset(push_items), 3913 (btrfs_header_nritems(right) - push_items) * 3914 sizeof(struct btrfs_item)); 3915 } 3916 right_nritems -= push_items; 3917 btrfs_set_header_nritems(right, right_nritems); 3918 push_space = BTRFS_LEAF_DATA_SIZE(root); 3919 for (i = 0; i < right_nritems; i++) { 3920 item = btrfs_item_nr(i); 3921 3922 push_space = push_space - btrfs_token_item_size(right, 3923 item, &token); 3924 btrfs_set_token_item_offset(right, item, push_space, &token); 3925 } 3926 3927 btrfs_mark_buffer_dirty(left); 3928 if (right_nritems) 3929 btrfs_mark_buffer_dirty(right); 3930 else 3931 clean_tree_block(trans, root, right); 3932 3933 btrfs_item_key(right, &disk_key, 0); 3934 fixup_low_keys(root, path, &disk_key, 1); 3935 3936 /* then fixup the leaf pointer in the path */ 3937 if (path->slots[0] < push_items) { 3938 path->slots[0] += old_left_nritems; 3939 btrfs_tree_unlock(path->nodes[0]); 3940 free_extent_buffer(path->nodes[0]); 3941 path->nodes[0] = left; 3942 path->slots[1] -= 1; 3943 } else { 3944 btrfs_tree_unlock(left); 3945 free_extent_buffer(left); 3946 path->slots[0] -= push_items; 3947 } 3948 BUG_ON(path->slots[0] < 0); 3949 return ret; 3950 out: 3951 btrfs_tree_unlock(left); 3952 free_extent_buffer(left); 3953 return ret; 3954 } 3955 3956 /* 3957 * push some data in the path leaf to the left, trying to free up at 3958 * least data_size bytes. returns zero if the push worked, nonzero otherwise 3959 * 3960 * max_slot can put a limit on how far into the leaf we'll push items. The 3961 * item at 'max_slot' won't be touched. Use (u32)-1 to make us push all the 3962 * items 3963 */ 3964 static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root 3965 *root, struct btrfs_path *path, int min_data_size, 3966 int data_size, int empty, u32 max_slot) 3967 { 3968 struct extent_buffer *right = path->nodes[0]; 3969 struct extent_buffer *left; 3970 int slot; 3971 int free_space; 3972 u32 right_nritems; 3973 int ret = 0; 3974 3975 slot = path->slots[1]; 3976 if (slot == 0) 3977 return 1; 3978 if (!path->nodes[1]) 3979 return 1; 3980 3981 right_nritems = btrfs_header_nritems(right); 3982 if (right_nritems == 0) 3983 return 1; 3984 3985 btrfs_assert_tree_locked(path->nodes[1]); 3986 3987 left = read_node_slot(root, path->nodes[1], slot - 1); 3988 if (left == NULL) 3989 return 1; 3990 3991 btrfs_tree_lock(left); 3992 btrfs_set_lock_blocking(left); 3993 3994 free_space = btrfs_leaf_free_space(root, left); 3995 if (free_space < data_size) { 3996 ret = 1; 3997 goto out; 3998 } 3999 4000 /* cow and double check */ 4001 ret = btrfs_cow_block(trans, root, left, 4002 path->nodes[1], slot - 1, &left); 4003 if (ret) { 4004 /* we hit -ENOSPC, but it isn't fatal here */ 4005 if (ret == -ENOSPC) 4006 ret = 1; 4007 goto out; 4008 } 4009 4010 free_space = btrfs_leaf_free_space(root, left); 4011 if (free_space < data_size) { 4012 ret = 1; 4013 goto out; 4014 } 4015 4016 return __push_leaf_left(trans, root, path, min_data_size, 4017 empty, left, free_space, right_nritems, 4018 max_slot); 4019 out: 4020 btrfs_tree_unlock(left); 4021 free_extent_buffer(left); 4022 return ret; 4023 } 4024 4025 /* 4026 * split the path's leaf in two, making sure there is at least data_size 4027 * available for the resulting leaf level of the path. 4028 */ 4029 static noinline void copy_for_split(struct btrfs_trans_handle *trans, 4030 struct btrfs_root *root, 4031 struct btrfs_path *path, 4032 struct extent_buffer *l, 4033 struct extent_buffer *right, 4034 int slot, int mid, int nritems) 4035 { 4036 int data_copy_size; 4037 int rt_data_off; 4038 int i; 4039 struct btrfs_disk_key disk_key; 4040 struct btrfs_map_token token; 4041 4042 btrfs_init_map_token(&token); 4043 4044 nritems = nritems - mid; 4045 btrfs_set_header_nritems(right, nritems); 4046 data_copy_size = btrfs_item_end_nr(l, mid) - leaf_data_end(root, l); 4047 4048 copy_extent_buffer(right, l, btrfs_item_nr_offset(0), 4049 btrfs_item_nr_offset(mid), 4050 nritems * sizeof(struct btrfs_item)); 4051 4052 copy_extent_buffer(right, l, 4053 btrfs_leaf_data(right) + BTRFS_LEAF_DATA_SIZE(root) - 4054 data_copy_size, btrfs_leaf_data(l) + 4055 leaf_data_end(root, l), data_copy_size); 4056 4057 rt_data_off = BTRFS_LEAF_DATA_SIZE(root) - 4058 btrfs_item_end_nr(l, mid); 4059 4060 for (i = 0; i < nritems; i++) { 4061 struct btrfs_item *item = btrfs_item_nr(i); 4062 u32 ioff; 4063 4064 ioff = btrfs_token_item_offset(right, item, &token); 4065 btrfs_set_token_item_offset(right, item, 4066 ioff + rt_data_off, &token); 4067 } 4068 4069 btrfs_set_header_nritems(l, mid); 4070 btrfs_item_key(right, &disk_key, 0); 4071 insert_ptr(trans, root, path, &disk_key, right->start, 4072 path->slots[1] + 1, 1); 4073 4074 btrfs_mark_buffer_dirty(right); 4075 btrfs_mark_buffer_dirty(l); 4076 BUG_ON(path->slots[0] != slot); 4077 4078 if (mid <= slot) { 4079 btrfs_tree_unlock(path->nodes[0]); 4080 free_extent_buffer(path->nodes[0]); 4081 path->nodes[0] = right; 4082 path->slots[0] -= mid; 4083 path->slots[1] += 1; 4084 } else { 4085 btrfs_tree_unlock(right); 4086 free_extent_buffer(right); 4087 } 4088 4089 BUG_ON(path->slots[0] < 0); 4090 } 4091 4092 /* 4093 * double splits happen when we need to insert a big item in the middle 4094 * of a leaf. A double split can leave us with 3 mostly empty leaves: 4095 * leaf: [ slots 0 - N] [ our target ] [ N + 1 - total in leaf ] 4096 * A B C 4097 * 4098 * We avoid this by trying to push the items on either side of our target 4099 * into the adjacent leaves. If all goes well we can avoid the double split 4100 * completely. 4101 */ 4102 static noinline int push_for_double_split(struct btrfs_trans_handle *trans, 4103 struct btrfs_root *root, 4104 struct btrfs_path *path, 4105 int data_size) 4106 { 4107 int ret; 4108 int progress = 0; 4109 int slot; 4110 u32 nritems; 4111 int space_needed = data_size; 4112 4113 slot = path->slots[0]; 4114 if (slot < btrfs_header_nritems(path->nodes[0])) 4115 space_needed -= btrfs_leaf_free_space(root, path->nodes[0]); 4116 4117 /* 4118 * try to push all the items after our slot into the 4119 * right leaf 4120 */ 4121 ret = push_leaf_right(trans, root, path, 1, space_needed, 0, slot); 4122 if (ret < 0) 4123 return ret; 4124 4125 if (ret == 0) 4126 progress++; 4127 4128 nritems = btrfs_header_nritems(path->nodes[0]); 4129 /* 4130 * our goal is to get our slot at the start or end of a leaf. If 4131 * we've done so we're done 4132 */ 4133 if (path->slots[0] == 0 || path->slots[0] == nritems) 4134 return 0; 4135 4136 if (btrfs_leaf_free_space(root, path->nodes[0]) >= data_size) 4137 return 0; 4138 4139 /* try to push all the items before our slot into the next leaf */ 4140 slot = path->slots[0]; 4141 ret = push_leaf_left(trans, root, path, 1, space_needed, 0, slot); 4142 if (ret < 0) 4143 return ret; 4144 4145 if (ret == 0) 4146 progress++; 4147 4148 if (progress) 4149 return 0; 4150 return 1; 4151 } 4152 4153 /* 4154 * split the path's leaf in two, making sure there is at least data_size 4155 * available for the resulting leaf level of the path. 4156 * 4157 * returns 0 if all went well and < 0 on failure. 4158 */ 4159 static noinline int split_leaf(struct btrfs_trans_handle *trans, 4160 struct btrfs_root *root, 4161 struct btrfs_key *ins_key, 4162 struct btrfs_path *path, int data_size, 4163 int extend) 4164 { 4165 struct btrfs_disk_key disk_key; 4166 struct extent_buffer *l; 4167 u32 nritems; 4168 int mid; 4169 int slot; 4170 struct extent_buffer *right; 4171 int ret = 0; 4172 int wret; 4173 int split; 4174 int num_doubles = 0; 4175 int tried_avoid_double = 0; 4176 4177 l = path->nodes[0]; 4178 slot = path->slots[0]; 4179 if (extend && data_size + btrfs_item_size_nr(l, slot) + 4180 sizeof(struct btrfs_item) > BTRFS_LEAF_DATA_SIZE(root)) 4181 return -EOVERFLOW; 4182 4183 /* first try to make some room by pushing left and right */ 4184 if (data_size && path->nodes[1]) { 4185 int space_needed = data_size; 4186 4187 if (slot < btrfs_header_nritems(l)) 4188 space_needed -= btrfs_leaf_free_space(root, l); 4189 4190 wret = push_leaf_right(trans, root, path, space_needed, 4191 space_needed, 0, 0); 4192 if (wret < 0) 4193 return wret; 4194 if (wret) { 4195 wret = push_leaf_left(trans, root, path, space_needed, 4196 space_needed, 0, (u32)-1); 4197 if (wret < 0) 4198 return wret; 4199 } 4200 l = path->nodes[0]; 4201 4202 /* did the pushes work? */ 4203 if (btrfs_leaf_free_space(root, l) >= data_size) 4204 return 0; 4205 } 4206 4207 if (!path->nodes[1]) { 4208 ret = insert_new_root(trans, root, path, 1); 4209 if (ret) 4210 return ret; 4211 } 4212 again: 4213 split = 1; 4214 l = path->nodes[0]; 4215 slot = path->slots[0]; 4216 nritems = btrfs_header_nritems(l); 4217 mid = (nritems + 1) / 2; 4218 4219 if (mid <= slot) { 4220 if (nritems == 1 || 4221 leaf_space_used(l, mid, nritems - mid) + data_size > 4222 BTRFS_LEAF_DATA_SIZE(root)) { 4223 if (slot >= nritems) { 4224 split = 0; 4225 } else { 4226 mid = slot; 4227 if (mid != nritems && 4228 leaf_space_used(l, mid, nritems - mid) + 4229 data_size > BTRFS_LEAF_DATA_SIZE(root)) { 4230 if (data_size && !tried_avoid_double) 4231 goto push_for_double; 4232 split = 2; 4233 } 4234 } 4235 } 4236 } else { 4237 if (leaf_space_used(l, 0, mid) + data_size > 4238 BTRFS_LEAF_DATA_SIZE(root)) { 4239 if (!extend && data_size && slot == 0) { 4240 split = 0; 4241 } else if ((extend || !data_size) && slot == 0) { 4242 mid = 1; 4243 } else { 4244 mid = slot; 4245 if (mid != nritems && 4246 leaf_space_used(l, mid, nritems - mid) + 4247 data_size > BTRFS_LEAF_DATA_SIZE(root)) { 4248 if (data_size && !tried_avoid_double) 4249 goto push_for_double; 4250 split = 2; 4251 } 4252 } 4253 } 4254 } 4255 4256 if (split == 0) 4257 btrfs_cpu_key_to_disk(&disk_key, ins_key); 4258 else 4259 btrfs_item_key(l, &disk_key, mid); 4260 4261 right = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid, 4262 &disk_key, 0, l->start, 0); 4263 if (IS_ERR(right)) 4264 return PTR_ERR(right); 4265 4266 root_add_used(root, root->nodesize); 4267 4268 memset_extent_buffer(right, 0, 0, sizeof(struct btrfs_header)); 4269 btrfs_set_header_bytenr(right, right->start); 4270 btrfs_set_header_generation(right, trans->transid); 4271 btrfs_set_header_backref_rev(right, BTRFS_MIXED_BACKREF_REV); 4272 btrfs_set_header_owner(right, root->root_key.objectid); 4273 btrfs_set_header_level(right, 0); 4274 write_extent_buffer(right, root->fs_info->fsid, 4275 btrfs_header_fsid(), BTRFS_FSID_SIZE); 4276 4277 write_extent_buffer(right, root->fs_info->chunk_tree_uuid, 4278 btrfs_header_chunk_tree_uuid(right), 4279 BTRFS_UUID_SIZE); 4280 4281 if (split == 0) { 4282 if (mid <= slot) { 4283 btrfs_set_header_nritems(right, 0); 4284 insert_ptr(trans, root, path, &disk_key, right->start, 4285 path->slots[1] + 1, 1); 4286 btrfs_tree_unlock(path->nodes[0]); 4287 free_extent_buffer(path->nodes[0]); 4288 path->nodes[0] = right; 4289 path->slots[0] = 0; 4290 path->slots[1] += 1; 4291 } else { 4292 btrfs_set_header_nritems(right, 0); 4293 insert_ptr(trans, root, path, &disk_key, right->start, 4294 path->slots[1], 1); 4295 btrfs_tree_unlock(path->nodes[0]); 4296 free_extent_buffer(path->nodes[0]); 4297 path->nodes[0] = right; 4298 path->slots[0] = 0; 4299 if (path->slots[1] == 0) 4300 fixup_low_keys(root, path, &disk_key, 1); 4301 } 4302 btrfs_mark_buffer_dirty(right); 4303 return ret; 4304 } 4305 4306 copy_for_split(trans, root, path, l, right, slot, mid, nritems); 4307 4308 if (split == 2) { 4309 BUG_ON(num_doubles != 0); 4310 num_doubles++; 4311 goto again; 4312 } 4313 4314 return 0; 4315 4316 push_for_double: 4317 push_for_double_split(trans, root, path, data_size); 4318 tried_avoid_double = 1; 4319 if (btrfs_leaf_free_space(root, path->nodes[0]) >= data_size) 4320 return 0; 4321 goto again; 4322 } 4323 4324 static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans, 4325 struct btrfs_root *root, 4326 struct btrfs_path *path, int ins_len) 4327 { 4328 struct btrfs_key key; 4329 struct extent_buffer *leaf; 4330 struct btrfs_file_extent_item *fi; 4331 u64 extent_len = 0; 4332 u32 item_size; 4333 int ret; 4334 4335 leaf = path->nodes[0]; 4336 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 4337 4338 BUG_ON(key.type != BTRFS_EXTENT_DATA_KEY && 4339 key.type != BTRFS_EXTENT_CSUM_KEY); 4340 4341 if (btrfs_leaf_free_space(root, leaf) >= ins_len) 4342 return 0; 4343 4344 item_size = btrfs_item_size_nr(leaf, path->slots[0]); 4345 if (key.type == BTRFS_EXTENT_DATA_KEY) { 4346 fi = btrfs_item_ptr(leaf, path->slots[0], 4347 struct btrfs_file_extent_item); 4348 extent_len = btrfs_file_extent_num_bytes(leaf, fi); 4349 } 4350 btrfs_release_path(path); 4351 4352 path->keep_locks = 1; 4353 path->search_for_split = 1; 4354 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); 4355 path->search_for_split = 0; 4356 if (ret > 0) 4357 ret = -EAGAIN; 4358 if (ret < 0) 4359 goto err; 4360 4361 ret = -EAGAIN; 4362 leaf = path->nodes[0]; 4363 /* if our item isn't there, return now */ 4364 if (item_size != btrfs_item_size_nr(leaf, path->slots[0])) 4365 goto err; 4366 4367 /* the leaf has changed, it now has room. return now */ 4368 if (btrfs_leaf_free_space(root, path->nodes[0]) >= ins_len) 4369 goto err; 4370 4371 if (key.type == BTRFS_EXTENT_DATA_KEY) { 4372 fi = btrfs_item_ptr(leaf, path->slots[0], 4373 struct btrfs_file_extent_item); 4374 if (extent_len != btrfs_file_extent_num_bytes(leaf, fi)) 4375 goto err; 4376 } 4377 4378 btrfs_set_path_blocking(path); 4379 ret = split_leaf(trans, root, &key, path, ins_len, 1); 4380 if (ret) 4381 goto err; 4382 4383 path->keep_locks = 0; 4384 btrfs_unlock_up_safe(path, 1); 4385 return 0; 4386 err: 4387 path->keep_locks = 0; 4388 return ret; 4389 } 4390 4391 static noinline int split_item(struct btrfs_trans_handle *trans, 4392 struct btrfs_root *root, 4393 struct btrfs_path *path, 4394 struct btrfs_key *new_key, 4395 unsigned long split_offset) 4396 { 4397 struct extent_buffer *leaf; 4398 struct btrfs_item *item; 4399 struct btrfs_item *new_item; 4400 int slot; 4401 char *buf; 4402 u32 nritems; 4403 u32 item_size; 4404 u32 orig_offset; 4405 struct btrfs_disk_key disk_key; 4406 4407 leaf = path->nodes[0]; 4408 BUG_ON(btrfs_leaf_free_space(root, leaf) < sizeof(struct btrfs_item)); 4409 4410 btrfs_set_path_blocking(path); 4411 4412 item = btrfs_item_nr(path->slots[0]); 4413 orig_offset = btrfs_item_offset(leaf, item); 4414 item_size = btrfs_item_size(leaf, item); 4415 4416 buf = kmalloc(item_size, GFP_NOFS); 4417 if (!buf) 4418 return -ENOMEM; 4419 4420 read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf, 4421 path->slots[0]), item_size); 4422 4423 slot = path->slots[0] + 1; 4424 nritems = btrfs_header_nritems(leaf); 4425 if (slot != nritems) { 4426 /* shift the items */ 4427 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + 1), 4428 btrfs_item_nr_offset(slot), 4429 (nritems - slot) * sizeof(struct btrfs_item)); 4430 } 4431 4432 btrfs_cpu_key_to_disk(&disk_key, new_key); 4433 btrfs_set_item_key(leaf, &disk_key, slot); 4434 4435 new_item = btrfs_item_nr(slot); 4436 4437 btrfs_set_item_offset(leaf, new_item, orig_offset); 4438 btrfs_set_item_size(leaf, new_item, item_size - split_offset); 4439 4440 btrfs_set_item_offset(leaf, item, 4441 orig_offset + item_size - split_offset); 4442 btrfs_set_item_size(leaf, item, split_offset); 4443 4444 btrfs_set_header_nritems(leaf, nritems + 1); 4445 4446 /* write the data for the start of the original item */ 4447 write_extent_buffer(leaf, buf, 4448 btrfs_item_ptr_offset(leaf, path->slots[0]), 4449 split_offset); 4450 4451 /* write the data for the new item */ 4452 write_extent_buffer(leaf, buf + split_offset, 4453 btrfs_item_ptr_offset(leaf, slot), 4454 item_size - split_offset); 4455 btrfs_mark_buffer_dirty(leaf); 4456 4457 BUG_ON(btrfs_leaf_free_space(root, leaf) < 0); 4458 kfree(buf); 4459 return 0; 4460 } 4461 4462 /* 4463 * This function splits a single item into two items, 4464 * giving 'new_key' to the new item and splitting the 4465 * old one at split_offset (from the start of the item). 4466 * 4467 * The path may be released by this operation. After 4468 * the split, the path is pointing to the old item. The 4469 * new item is going to be in the same node as the old one. 4470 * 4471 * Note, the item being split must be smaller enough to live alone on 4472 * a tree block with room for one extra struct btrfs_item 4473 * 4474 * This allows us to split the item in place, keeping a lock on the 4475 * leaf the entire time. 4476 */ 4477 int btrfs_split_item(struct btrfs_trans_handle *trans, 4478 struct btrfs_root *root, 4479 struct btrfs_path *path, 4480 struct btrfs_key *new_key, 4481 unsigned long split_offset) 4482 { 4483 int ret; 4484 ret = setup_leaf_for_split(trans, root, path, 4485 sizeof(struct btrfs_item)); 4486 if (ret) 4487 return ret; 4488 4489 ret = split_item(trans, root, path, new_key, split_offset); 4490 return ret; 4491 } 4492 4493 /* 4494 * This function duplicate a item, giving 'new_key' to the new item. 4495 * It guarantees both items live in the same tree leaf and the new item 4496 * is contiguous with the original item. 4497 * 4498 * This allows us to split file extent in place, keeping a lock on the 4499 * leaf the entire time. 4500 */ 4501 int btrfs_duplicate_item(struct btrfs_trans_handle *trans, 4502 struct btrfs_root *root, 4503 struct btrfs_path *path, 4504 struct btrfs_key *new_key) 4505 { 4506 struct extent_buffer *leaf; 4507 int ret; 4508 u32 item_size; 4509 4510 leaf = path->nodes[0]; 4511 item_size = btrfs_item_size_nr(leaf, path->slots[0]); 4512 ret = setup_leaf_for_split(trans, root, path, 4513 item_size + sizeof(struct btrfs_item)); 4514 if (ret) 4515 return ret; 4516 4517 path->slots[0]++; 4518 setup_items_for_insert(root, path, new_key, &item_size, 4519 item_size, item_size + 4520 sizeof(struct btrfs_item), 1); 4521 leaf = path->nodes[0]; 4522 memcpy_extent_buffer(leaf, 4523 btrfs_item_ptr_offset(leaf, path->slots[0]), 4524 btrfs_item_ptr_offset(leaf, path->slots[0] - 1), 4525 item_size); 4526 return 0; 4527 } 4528 4529 /* 4530 * make the item pointed to by the path smaller. new_size indicates 4531 * how small to make it, and from_end tells us if we just chop bytes 4532 * off the end of the item or if we shift the item to chop bytes off 4533 * the front. 4534 */ 4535 void btrfs_truncate_item(struct btrfs_root *root, struct btrfs_path *path, 4536 u32 new_size, int from_end) 4537 { 4538 int slot; 4539 struct extent_buffer *leaf; 4540 struct btrfs_item *item; 4541 u32 nritems; 4542 unsigned int data_end; 4543 unsigned int old_data_start; 4544 unsigned int old_size; 4545 unsigned int size_diff; 4546 int i; 4547 struct btrfs_map_token token; 4548 4549 btrfs_init_map_token(&token); 4550 4551 leaf = path->nodes[0]; 4552 slot = path->slots[0]; 4553 4554 old_size = btrfs_item_size_nr(leaf, slot); 4555 if (old_size == new_size) 4556 return; 4557 4558 nritems = btrfs_header_nritems(leaf); 4559 data_end = leaf_data_end(root, leaf); 4560 4561 old_data_start = btrfs_item_offset_nr(leaf, slot); 4562 4563 size_diff = old_size - new_size; 4564 4565 BUG_ON(slot < 0); 4566 BUG_ON(slot >= nritems); 4567 4568 /* 4569 * item0..itemN ... dataN.offset..dataN.size .. data0.size 4570 */ 4571 /* first correct the data pointers */ 4572 for (i = slot; i < nritems; i++) { 4573 u32 ioff; 4574 item = btrfs_item_nr(i); 4575 4576 ioff = btrfs_token_item_offset(leaf, item, &token); 4577 btrfs_set_token_item_offset(leaf, item, 4578 ioff + size_diff, &token); 4579 } 4580 4581 /* shift the data */ 4582 if (from_end) { 4583 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) + 4584 data_end + size_diff, btrfs_leaf_data(leaf) + 4585 data_end, old_data_start + new_size - data_end); 4586 } else { 4587 struct btrfs_disk_key disk_key; 4588 u64 offset; 4589 4590 btrfs_item_key(leaf, &disk_key, slot); 4591 4592 if (btrfs_disk_key_type(&disk_key) == BTRFS_EXTENT_DATA_KEY) { 4593 unsigned long ptr; 4594 struct btrfs_file_extent_item *fi; 4595 4596 fi = btrfs_item_ptr(leaf, slot, 4597 struct btrfs_file_extent_item); 4598 fi = (struct btrfs_file_extent_item *)( 4599 (unsigned long)fi - size_diff); 4600 4601 if (btrfs_file_extent_type(leaf, fi) == 4602 BTRFS_FILE_EXTENT_INLINE) { 4603 ptr = btrfs_item_ptr_offset(leaf, slot); 4604 memmove_extent_buffer(leaf, ptr, 4605 (unsigned long)fi, 4606 BTRFS_FILE_EXTENT_INLINE_DATA_START); 4607 } 4608 } 4609 4610 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) + 4611 data_end + size_diff, btrfs_leaf_data(leaf) + 4612 data_end, old_data_start - data_end); 4613 4614 offset = btrfs_disk_key_offset(&disk_key); 4615 btrfs_set_disk_key_offset(&disk_key, offset + size_diff); 4616 btrfs_set_item_key(leaf, &disk_key, slot); 4617 if (slot == 0) 4618 fixup_low_keys(root, path, &disk_key, 1); 4619 } 4620 4621 item = btrfs_item_nr(slot); 4622 btrfs_set_item_size(leaf, item, new_size); 4623 btrfs_mark_buffer_dirty(leaf); 4624 4625 if (btrfs_leaf_free_space(root, leaf) < 0) { 4626 btrfs_print_leaf(root, leaf); 4627 BUG(); 4628 } 4629 } 4630 4631 /* 4632 * make the item pointed to by the path bigger, data_size is the added size. 4633 */ 4634 void btrfs_extend_item(struct btrfs_root *root, struct btrfs_path *path, 4635 u32 data_size) 4636 { 4637 int slot; 4638 struct extent_buffer *leaf; 4639 struct btrfs_item *item; 4640 u32 nritems; 4641 unsigned int data_end; 4642 unsigned int old_data; 4643 unsigned int old_size; 4644 int i; 4645 struct btrfs_map_token token; 4646 4647 btrfs_init_map_token(&token); 4648 4649 leaf = path->nodes[0]; 4650 4651 nritems = btrfs_header_nritems(leaf); 4652 data_end = leaf_data_end(root, leaf); 4653 4654 if (btrfs_leaf_free_space(root, leaf) < data_size) { 4655 btrfs_print_leaf(root, leaf); 4656 BUG(); 4657 } 4658 slot = path->slots[0]; 4659 old_data = btrfs_item_end_nr(leaf, slot); 4660 4661 BUG_ON(slot < 0); 4662 if (slot >= nritems) { 4663 btrfs_print_leaf(root, leaf); 4664 btrfs_crit(root->fs_info, "slot %d too large, nritems %d", 4665 slot, nritems); 4666 BUG_ON(1); 4667 } 4668 4669 /* 4670 * item0..itemN ... dataN.offset..dataN.size .. data0.size 4671 */ 4672 /* first correct the data pointers */ 4673 for (i = slot; i < nritems; i++) { 4674 u32 ioff; 4675 item = btrfs_item_nr(i); 4676 4677 ioff = btrfs_token_item_offset(leaf, item, &token); 4678 btrfs_set_token_item_offset(leaf, item, 4679 ioff - data_size, &token); 4680 } 4681 4682 /* shift the data */ 4683 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) + 4684 data_end - data_size, btrfs_leaf_data(leaf) + 4685 data_end, old_data - data_end); 4686 4687 data_end = old_data; 4688 old_size = btrfs_item_size_nr(leaf, slot); 4689 item = btrfs_item_nr(slot); 4690 btrfs_set_item_size(leaf, item, old_size + data_size); 4691 btrfs_mark_buffer_dirty(leaf); 4692 4693 if (btrfs_leaf_free_space(root, leaf) < 0) { 4694 btrfs_print_leaf(root, leaf); 4695 BUG(); 4696 } 4697 } 4698 4699 /* 4700 * this is a helper for btrfs_insert_empty_items, the main goal here is 4701 * to save stack depth by doing the bulk of the work in a function 4702 * that doesn't call btrfs_search_slot 4703 */ 4704 void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path, 4705 struct btrfs_key *cpu_key, u32 *data_size, 4706 u32 total_data, u32 total_size, int nr) 4707 { 4708 struct btrfs_item *item; 4709 int i; 4710 u32 nritems; 4711 unsigned int data_end; 4712 struct btrfs_disk_key disk_key; 4713 struct extent_buffer *leaf; 4714 int slot; 4715 struct btrfs_map_token token; 4716 4717 if (path->slots[0] == 0) { 4718 btrfs_cpu_key_to_disk(&disk_key, cpu_key); 4719 fixup_low_keys(root, path, &disk_key, 1); 4720 } 4721 btrfs_unlock_up_safe(path, 1); 4722 4723 btrfs_init_map_token(&token); 4724 4725 leaf = path->nodes[0]; 4726 slot = path->slots[0]; 4727 4728 nritems = btrfs_header_nritems(leaf); 4729 data_end = leaf_data_end(root, leaf); 4730 4731 if (btrfs_leaf_free_space(root, leaf) < total_size) { 4732 btrfs_print_leaf(root, leaf); 4733 btrfs_crit(root->fs_info, "not enough freespace need %u have %d", 4734 total_size, btrfs_leaf_free_space(root, leaf)); 4735 BUG(); 4736 } 4737 4738 if (slot != nritems) { 4739 unsigned int old_data = btrfs_item_end_nr(leaf, slot); 4740 4741 if (old_data < data_end) { 4742 btrfs_print_leaf(root, leaf); 4743 btrfs_crit(root->fs_info, "slot %d old_data %d data_end %d", 4744 slot, old_data, data_end); 4745 BUG_ON(1); 4746 } 4747 /* 4748 * item0..itemN ... dataN.offset..dataN.size .. data0.size 4749 */ 4750 /* first correct the data pointers */ 4751 for (i = slot; i < nritems; i++) { 4752 u32 ioff; 4753 4754 item = btrfs_item_nr( i); 4755 ioff = btrfs_token_item_offset(leaf, item, &token); 4756 btrfs_set_token_item_offset(leaf, item, 4757 ioff - total_data, &token); 4758 } 4759 /* shift the items */ 4760 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + nr), 4761 btrfs_item_nr_offset(slot), 4762 (nritems - slot) * sizeof(struct btrfs_item)); 4763 4764 /* shift the data */ 4765 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) + 4766 data_end - total_data, btrfs_leaf_data(leaf) + 4767 data_end, old_data - data_end); 4768 data_end = old_data; 4769 } 4770 4771 /* setup the item for the new data */ 4772 for (i = 0; i < nr; i++) { 4773 btrfs_cpu_key_to_disk(&disk_key, cpu_key + i); 4774 btrfs_set_item_key(leaf, &disk_key, slot + i); 4775 item = btrfs_item_nr(slot + i); 4776 btrfs_set_token_item_offset(leaf, item, 4777 data_end - data_size[i], &token); 4778 data_end -= data_size[i]; 4779 btrfs_set_token_item_size(leaf, item, data_size[i], &token); 4780 } 4781 4782 btrfs_set_header_nritems(leaf, nritems + nr); 4783 btrfs_mark_buffer_dirty(leaf); 4784 4785 if (btrfs_leaf_free_space(root, leaf) < 0) { 4786 btrfs_print_leaf(root, leaf); 4787 BUG(); 4788 } 4789 } 4790 4791 /* 4792 * Given a key and some data, insert items into the tree. 4793 * This does all the path init required, making room in the tree if needed. 4794 */ 4795 int btrfs_insert_empty_items(struct btrfs_trans_handle *trans, 4796 struct btrfs_root *root, 4797 struct btrfs_path *path, 4798 struct btrfs_key *cpu_key, u32 *data_size, 4799 int nr) 4800 { 4801 int ret = 0; 4802 int slot; 4803 int i; 4804 u32 total_size = 0; 4805 u32 total_data = 0; 4806 4807 for (i = 0; i < nr; i++) 4808 total_data += data_size[i]; 4809 4810 total_size = total_data + (nr * sizeof(struct btrfs_item)); 4811 ret = btrfs_search_slot(trans, root, cpu_key, path, total_size, 1); 4812 if (ret == 0) 4813 return -EEXIST; 4814 if (ret < 0) 4815 return ret; 4816 4817 slot = path->slots[0]; 4818 BUG_ON(slot < 0); 4819 4820 setup_items_for_insert(root, path, cpu_key, data_size, 4821 total_data, total_size, nr); 4822 return 0; 4823 } 4824 4825 /* 4826 * Given a key and some data, insert an item into the tree. 4827 * This does all the path init required, making room in the tree if needed. 4828 */ 4829 int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root 4830 *root, struct btrfs_key *cpu_key, void *data, u32 4831 data_size) 4832 { 4833 int ret = 0; 4834 struct btrfs_path *path; 4835 struct extent_buffer *leaf; 4836 unsigned long ptr; 4837 4838 path = btrfs_alloc_path(); 4839 if (!path) 4840 return -ENOMEM; 4841 ret = btrfs_insert_empty_item(trans, root, path, cpu_key, data_size); 4842 if (!ret) { 4843 leaf = path->nodes[0]; 4844 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); 4845 write_extent_buffer(leaf, data, ptr, data_size); 4846 btrfs_mark_buffer_dirty(leaf); 4847 } 4848 btrfs_free_path(path); 4849 return ret; 4850 } 4851 4852 /* 4853 * delete the pointer from a given node. 4854 * 4855 * the tree should have been previously balanced so the deletion does not 4856 * empty a node. 4857 */ 4858 static void del_ptr(struct btrfs_root *root, struct btrfs_path *path, 4859 int level, int slot) 4860 { 4861 struct extent_buffer *parent = path->nodes[level]; 4862 u32 nritems; 4863 int ret; 4864 4865 nritems = btrfs_header_nritems(parent); 4866 if (slot != nritems - 1) { 4867 if (level) 4868 tree_mod_log_eb_move(root->fs_info, parent, slot, 4869 slot + 1, nritems - slot - 1); 4870 memmove_extent_buffer(parent, 4871 btrfs_node_key_ptr_offset(slot), 4872 btrfs_node_key_ptr_offset(slot + 1), 4873 sizeof(struct btrfs_key_ptr) * 4874 (nritems - slot - 1)); 4875 } else if (level) { 4876 ret = tree_mod_log_insert_key(root->fs_info, parent, slot, 4877 MOD_LOG_KEY_REMOVE, GFP_NOFS); 4878 BUG_ON(ret < 0); 4879 } 4880 4881 nritems--; 4882 btrfs_set_header_nritems(parent, nritems); 4883 if (nritems == 0 && parent == root->node) { 4884 BUG_ON(btrfs_header_level(root->node) != 1); 4885 /* just turn the root into a leaf and break */ 4886 btrfs_set_header_level(root->node, 0); 4887 } else if (slot == 0) { 4888 struct btrfs_disk_key disk_key; 4889 4890 btrfs_node_key(parent, &disk_key, 0); 4891 fixup_low_keys(root, path, &disk_key, level + 1); 4892 } 4893 btrfs_mark_buffer_dirty(parent); 4894 } 4895 4896 /* 4897 * a helper function to delete the leaf pointed to by path->slots[1] and 4898 * path->nodes[1]. 4899 * 4900 * This deletes the pointer in path->nodes[1] and frees the leaf 4901 * block extent. zero is returned if it all worked out, < 0 otherwise. 4902 * 4903 * The path must have already been setup for deleting the leaf, including 4904 * all the proper balancing. path->nodes[1] must be locked. 4905 */ 4906 static noinline void btrfs_del_leaf(struct btrfs_trans_handle *trans, 4907 struct btrfs_root *root, 4908 struct btrfs_path *path, 4909 struct extent_buffer *leaf) 4910 { 4911 WARN_ON(btrfs_header_generation(leaf) != trans->transid); 4912 del_ptr(root, path, 1, path->slots[1]); 4913 4914 /* 4915 * btrfs_free_extent is expensive, we want to make sure we 4916 * aren't holding any locks when we call it 4917 */ 4918 btrfs_unlock_up_safe(path, 0); 4919 4920 root_sub_used(root, leaf->len); 4921 4922 extent_buffer_get(leaf); 4923 btrfs_free_tree_block(trans, root, leaf, 0, 1); 4924 free_extent_buffer_stale(leaf); 4925 } 4926 /* 4927 * delete the item at the leaf level in path. If that empties 4928 * the leaf, remove it from the tree 4929 */ 4930 int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root, 4931 struct btrfs_path *path, int slot, int nr) 4932 { 4933 struct extent_buffer *leaf; 4934 struct btrfs_item *item; 4935 int last_off; 4936 int dsize = 0; 4937 int ret = 0; 4938 int wret; 4939 int i; 4940 u32 nritems; 4941 struct btrfs_map_token token; 4942 4943 btrfs_init_map_token(&token); 4944 4945 leaf = path->nodes[0]; 4946 last_off = btrfs_item_offset_nr(leaf, slot + nr - 1); 4947 4948 for (i = 0; i < nr; i++) 4949 dsize += btrfs_item_size_nr(leaf, slot + i); 4950 4951 nritems = btrfs_header_nritems(leaf); 4952 4953 if (slot + nr != nritems) { 4954 int data_end = leaf_data_end(root, leaf); 4955 4956 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) + 4957 data_end + dsize, 4958 btrfs_leaf_data(leaf) + data_end, 4959 last_off - data_end); 4960 4961 for (i = slot + nr; i < nritems; i++) { 4962 u32 ioff; 4963 4964 item = btrfs_item_nr(i); 4965 ioff = btrfs_token_item_offset(leaf, item, &token); 4966 btrfs_set_token_item_offset(leaf, item, 4967 ioff + dsize, &token); 4968 } 4969 4970 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot), 4971 btrfs_item_nr_offset(slot + nr), 4972 sizeof(struct btrfs_item) * 4973 (nritems - slot - nr)); 4974 } 4975 btrfs_set_header_nritems(leaf, nritems - nr); 4976 nritems -= nr; 4977 4978 /* delete the leaf if we've emptied it */ 4979 if (nritems == 0) { 4980 if (leaf == root->node) { 4981 btrfs_set_header_level(leaf, 0); 4982 } else { 4983 btrfs_set_path_blocking(path); 4984 clean_tree_block(trans, root, leaf); 4985 btrfs_del_leaf(trans, root, path, leaf); 4986 } 4987 } else { 4988 int used = leaf_space_used(leaf, 0, nritems); 4989 if (slot == 0) { 4990 struct btrfs_disk_key disk_key; 4991 4992 btrfs_item_key(leaf, &disk_key, 0); 4993 fixup_low_keys(root, path, &disk_key, 1); 4994 } 4995 4996 /* delete the leaf if it is mostly empty */ 4997 if (used < BTRFS_LEAF_DATA_SIZE(root) / 3) { 4998 /* push_leaf_left fixes the path. 4999 * make sure the path still points to our leaf 5000 * for possible call to del_ptr below 5001 */ 5002 slot = path->slots[1]; 5003 extent_buffer_get(leaf); 5004 5005 btrfs_set_path_blocking(path); 5006 wret = push_leaf_left(trans, root, path, 1, 1, 5007 1, (u32)-1); 5008 if (wret < 0 && wret != -ENOSPC) 5009 ret = wret; 5010 5011 if (path->nodes[0] == leaf && 5012 btrfs_header_nritems(leaf)) { 5013 wret = push_leaf_right(trans, root, path, 1, 5014 1, 1, 0); 5015 if (wret < 0 && wret != -ENOSPC) 5016 ret = wret; 5017 } 5018 5019 if (btrfs_header_nritems(leaf) == 0) { 5020 path->slots[1] = slot; 5021 btrfs_del_leaf(trans, root, path, leaf); 5022 free_extent_buffer(leaf); 5023 ret = 0; 5024 } else { 5025 /* if we're still in the path, make sure 5026 * we're dirty. Otherwise, one of the 5027 * push_leaf functions must have already 5028 * dirtied this buffer 5029 */ 5030 if (path->nodes[0] == leaf) 5031 btrfs_mark_buffer_dirty(leaf); 5032 free_extent_buffer(leaf); 5033 } 5034 } else { 5035 btrfs_mark_buffer_dirty(leaf); 5036 } 5037 } 5038 return ret; 5039 } 5040 5041 /* 5042 * search the tree again to find a leaf with lesser keys 5043 * returns 0 if it found something or 1 if there are no lesser leaves. 5044 * returns < 0 on io errors. 5045 * 5046 * This may release the path, and so you may lose any locks held at the 5047 * time you call it. 5048 */ 5049 int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path) 5050 { 5051 struct btrfs_key key; 5052 struct btrfs_disk_key found_key; 5053 int ret; 5054 5055 btrfs_item_key_to_cpu(path->nodes[0], &key, 0); 5056 5057 if (key.offset > 0) { 5058 key.offset--; 5059 } else if (key.type > 0) { 5060 key.type--; 5061 key.offset = (u64)-1; 5062 } else if (key.objectid > 0) { 5063 key.objectid--; 5064 key.type = (u8)-1; 5065 key.offset = (u64)-1; 5066 } else { 5067 return 1; 5068 } 5069 5070 btrfs_release_path(path); 5071 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 5072 if (ret < 0) 5073 return ret; 5074 btrfs_item_key(path->nodes[0], &found_key, 0); 5075 ret = comp_keys(&found_key, &key); 5076 /* 5077 * We might have had an item with the previous key in the tree right 5078 * before we released our path. And after we released our path, that 5079 * item might have been pushed to the first slot (0) of the leaf we 5080 * were holding due to a tree balance. Alternatively, an item with the 5081 * previous key can exist as the only element of a leaf (big fat item). 5082 * Therefore account for these 2 cases, so that our callers (like 5083 * btrfs_previous_item) don't miss an existing item with a key matching 5084 * the previous key we computed above. 5085 */ 5086 if (ret <= 0) 5087 return 0; 5088 return 1; 5089 } 5090 5091 /* 5092 * A helper function to walk down the tree starting at min_key, and looking 5093 * for nodes or leaves that are have a minimum transaction id. 5094 * This is used by the btree defrag code, and tree logging 5095 * 5096 * This does not cow, but it does stuff the starting key it finds back 5097 * into min_key, so you can call btrfs_search_slot with cow=1 on the 5098 * key and get a writable path. 5099 * 5100 * This does lock as it descends, and path->keep_locks should be set 5101 * to 1 by the caller. 5102 * 5103 * This honors path->lowest_level to prevent descent past a given level 5104 * of the tree. 5105 * 5106 * min_trans indicates the oldest transaction that you are interested 5107 * in walking through. Any nodes or leaves older than min_trans are 5108 * skipped over (without reading them). 5109 * 5110 * returns zero if something useful was found, < 0 on error and 1 if there 5111 * was nothing in the tree that matched the search criteria. 5112 */ 5113 int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key, 5114 struct btrfs_path *path, 5115 u64 min_trans) 5116 { 5117 struct extent_buffer *cur; 5118 struct btrfs_key found_key; 5119 int slot; 5120 int sret; 5121 u32 nritems; 5122 int level; 5123 int ret = 1; 5124 int keep_locks = path->keep_locks; 5125 5126 path->keep_locks = 1; 5127 again: 5128 cur = btrfs_read_lock_root_node(root); 5129 level = btrfs_header_level(cur); 5130 WARN_ON(path->nodes[level]); 5131 path->nodes[level] = cur; 5132 path->locks[level] = BTRFS_READ_LOCK; 5133 5134 if (btrfs_header_generation(cur) < min_trans) { 5135 ret = 1; 5136 goto out; 5137 } 5138 while (1) { 5139 nritems = btrfs_header_nritems(cur); 5140 level = btrfs_header_level(cur); 5141 sret = bin_search(cur, min_key, level, &slot); 5142 5143 /* at the lowest level, we're done, setup the path and exit */ 5144 if (level == path->lowest_level) { 5145 if (slot >= nritems) 5146 goto find_next_key; 5147 ret = 0; 5148 path->slots[level] = slot; 5149 btrfs_item_key_to_cpu(cur, &found_key, slot); 5150 goto out; 5151 } 5152 if (sret && slot > 0) 5153 slot--; 5154 /* 5155 * check this node pointer against the min_trans parameters. 5156 * If it is too old, old, skip to the next one. 5157 */ 5158 while (slot < nritems) { 5159 u64 gen; 5160 5161 gen = btrfs_node_ptr_generation(cur, slot); 5162 if (gen < min_trans) { 5163 slot++; 5164 continue; 5165 } 5166 break; 5167 } 5168 find_next_key: 5169 /* 5170 * we didn't find a candidate key in this node, walk forward 5171 * and find another one 5172 */ 5173 if (slot >= nritems) { 5174 path->slots[level] = slot; 5175 btrfs_set_path_blocking(path); 5176 sret = btrfs_find_next_key(root, path, min_key, level, 5177 min_trans); 5178 if (sret == 0) { 5179 btrfs_release_path(path); 5180 goto again; 5181 } else { 5182 goto out; 5183 } 5184 } 5185 /* save our key for returning back */ 5186 btrfs_node_key_to_cpu(cur, &found_key, slot); 5187 path->slots[level] = slot; 5188 if (level == path->lowest_level) { 5189 ret = 0; 5190 goto out; 5191 } 5192 btrfs_set_path_blocking(path); 5193 cur = read_node_slot(root, cur, slot); 5194 BUG_ON(!cur); /* -ENOMEM */ 5195 5196 btrfs_tree_read_lock(cur); 5197 5198 path->locks[level - 1] = BTRFS_READ_LOCK; 5199 path->nodes[level - 1] = cur; 5200 unlock_up(path, level, 1, 0, NULL); 5201 btrfs_clear_path_blocking(path, NULL, 0); 5202 } 5203 out: 5204 path->keep_locks = keep_locks; 5205 if (ret == 0) { 5206 btrfs_unlock_up_safe(path, path->lowest_level + 1); 5207 btrfs_set_path_blocking(path); 5208 memcpy(min_key, &found_key, sizeof(found_key)); 5209 } 5210 return ret; 5211 } 5212 5213 static void tree_move_down(struct btrfs_root *root, 5214 struct btrfs_path *path, 5215 int *level, int root_level) 5216 { 5217 BUG_ON(*level == 0); 5218 path->nodes[*level - 1] = read_node_slot(root, path->nodes[*level], 5219 path->slots[*level]); 5220 path->slots[*level - 1] = 0; 5221 (*level)--; 5222 } 5223 5224 static int tree_move_next_or_upnext(struct btrfs_root *root, 5225 struct btrfs_path *path, 5226 int *level, int root_level) 5227 { 5228 int ret = 0; 5229 int nritems; 5230 nritems = btrfs_header_nritems(path->nodes[*level]); 5231 5232 path->slots[*level]++; 5233 5234 while (path->slots[*level] >= nritems) { 5235 if (*level == root_level) 5236 return -1; 5237 5238 /* move upnext */ 5239 path->slots[*level] = 0; 5240 free_extent_buffer(path->nodes[*level]); 5241 path->nodes[*level] = NULL; 5242 (*level)++; 5243 path->slots[*level]++; 5244 5245 nritems = btrfs_header_nritems(path->nodes[*level]); 5246 ret = 1; 5247 } 5248 return ret; 5249 } 5250 5251 /* 5252 * Returns 1 if it had to move up and next. 0 is returned if it moved only next 5253 * or down. 5254 */ 5255 static int tree_advance(struct btrfs_root *root, 5256 struct btrfs_path *path, 5257 int *level, int root_level, 5258 int allow_down, 5259 struct btrfs_key *key) 5260 { 5261 int ret; 5262 5263 if (*level == 0 || !allow_down) { 5264 ret = tree_move_next_or_upnext(root, path, level, root_level); 5265 } else { 5266 tree_move_down(root, path, level, root_level); 5267 ret = 0; 5268 } 5269 if (ret >= 0) { 5270 if (*level == 0) 5271 btrfs_item_key_to_cpu(path->nodes[*level], key, 5272 path->slots[*level]); 5273 else 5274 btrfs_node_key_to_cpu(path->nodes[*level], key, 5275 path->slots[*level]); 5276 } 5277 return ret; 5278 } 5279 5280 static int tree_compare_item(struct btrfs_root *left_root, 5281 struct btrfs_path *left_path, 5282 struct btrfs_path *right_path, 5283 char *tmp_buf) 5284 { 5285 int cmp; 5286 int len1, len2; 5287 unsigned long off1, off2; 5288 5289 len1 = btrfs_item_size_nr(left_path->nodes[0], left_path->slots[0]); 5290 len2 = btrfs_item_size_nr(right_path->nodes[0], right_path->slots[0]); 5291 if (len1 != len2) 5292 return 1; 5293 5294 off1 = btrfs_item_ptr_offset(left_path->nodes[0], left_path->slots[0]); 5295 off2 = btrfs_item_ptr_offset(right_path->nodes[0], 5296 right_path->slots[0]); 5297 5298 read_extent_buffer(left_path->nodes[0], tmp_buf, off1, len1); 5299 5300 cmp = memcmp_extent_buffer(right_path->nodes[0], tmp_buf, off2, len1); 5301 if (cmp) 5302 return 1; 5303 return 0; 5304 } 5305 5306 #define ADVANCE 1 5307 #define ADVANCE_ONLY_NEXT -1 5308 5309 /* 5310 * This function compares two trees and calls the provided callback for 5311 * every changed/new/deleted item it finds. 5312 * If shared tree blocks are encountered, whole subtrees are skipped, making 5313 * the compare pretty fast on snapshotted subvolumes. 5314 * 5315 * This currently works on commit roots only. As commit roots are read only, 5316 * we don't do any locking. The commit roots are protected with transactions. 5317 * Transactions are ended and rejoined when a commit is tried in between. 5318 * 5319 * This function checks for modifications done to the trees while comparing. 5320 * If it detects a change, it aborts immediately. 5321 */ 5322 int btrfs_compare_trees(struct btrfs_root *left_root, 5323 struct btrfs_root *right_root, 5324 btrfs_changed_cb_t changed_cb, void *ctx) 5325 { 5326 int ret; 5327 int cmp; 5328 struct btrfs_path *left_path = NULL; 5329 struct btrfs_path *right_path = NULL; 5330 struct btrfs_key left_key; 5331 struct btrfs_key right_key; 5332 char *tmp_buf = NULL; 5333 int left_root_level; 5334 int right_root_level; 5335 int left_level; 5336 int right_level; 5337 int left_end_reached; 5338 int right_end_reached; 5339 int advance_left; 5340 int advance_right; 5341 u64 left_blockptr; 5342 u64 right_blockptr; 5343 u64 left_gen; 5344 u64 right_gen; 5345 5346 left_path = btrfs_alloc_path(); 5347 if (!left_path) { 5348 ret = -ENOMEM; 5349 goto out; 5350 } 5351 right_path = btrfs_alloc_path(); 5352 if (!right_path) { 5353 ret = -ENOMEM; 5354 goto out; 5355 } 5356 5357 tmp_buf = kmalloc(left_root->nodesize, GFP_NOFS); 5358 if (!tmp_buf) { 5359 ret = -ENOMEM; 5360 goto out; 5361 } 5362 5363 left_path->search_commit_root = 1; 5364 left_path->skip_locking = 1; 5365 right_path->search_commit_root = 1; 5366 right_path->skip_locking = 1; 5367 5368 /* 5369 * Strategy: Go to the first items of both trees. Then do 5370 * 5371 * If both trees are at level 0 5372 * Compare keys of current items 5373 * If left < right treat left item as new, advance left tree 5374 * and repeat 5375 * If left > right treat right item as deleted, advance right tree 5376 * and repeat 5377 * If left == right do deep compare of items, treat as changed if 5378 * needed, advance both trees and repeat 5379 * If both trees are at the same level but not at level 0 5380 * Compare keys of current nodes/leafs 5381 * If left < right advance left tree and repeat 5382 * If left > right advance right tree and repeat 5383 * If left == right compare blockptrs of the next nodes/leafs 5384 * If they match advance both trees but stay at the same level 5385 * and repeat 5386 * If they don't match advance both trees while allowing to go 5387 * deeper and repeat 5388 * If tree levels are different 5389 * Advance the tree that needs it and repeat 5390 * 5391 * Advancing a tree means: 5392 * If we are at level 0, try to go to the next slot. If that's not 5393 * possible, go one level up and repeat. Stop when we found a level 5394 * where we could go to the next slot. We may at this point be on a 5395 * node or a leaf. 5396 * 5397 * If we are not at level 0 and not on shared tree blocks, go one 5398 * level deeper. 5399 * 5400 * If we are not at level 0 and on shared tree blocks, go one slot to 5401 * the right if possible or go up and right. 5402 */ 5403 5404 down_read(&left_root->fs_info->commit_root_sem); 5405 left_level = btrfs_header_level(left_root->commit_root); 5406 left_root_level = left_level; 5407 left_path->nodes[left_level] = left_root->commit_root; 5408 extent_buffer_get(left_path->nodes[left_level]); 5409 5410 right_level = btrfs_header_level(right_root->commit_root); 5411 right_root_level = right_level; 5412 right_path->nodes[right_level] = right_root->commit_root; 5413 extent_buffer_get(right_path->nodes[right_level]); 5414 up_read(&left_root->fs_info->commit_root_sem); 5415 5416 if (left_level == 0) 5417 btrfs_item_key_to_cpu(left_path->nodes[left_level], 5418 &left_key, left_path->slots[left_level]); 5419 else 5420 btrfs_node_key_to_cpu(left_path->nodes[left_level], 5421 &left_key, left_path->slots[left_level]); 5422 if (right_level == 0) 5423 btrfs_item_key_to_cpu(right_path->nodes[right_level], 5424 &right_key, right_path->slots[right_level]); 5425 else 5426 btrfs_node_key_to_cpu(right_path->nodes[right_level], 5427 &right_key, right_path->slots[right_level]); 5428 5429 left_end_reached = right_end_reached = 0; 5430 advance_left = advance_right = 0; 5431 5432 while (1) { 5433 if (advance_left && !left_end_reached) { 5434 ret = tree_advance(left_root, left_path, &left_level, 5435 left_root_level, 5436 advance_left != ADVANCE_ONLY_NEXT, 5437 &left_key); 5438 if (ret < 0) 5439 left_end_reached = ADVANCE; 5440 advance_left = 0; 5441 } 5442 if (advance_right && !right_end_reached) { 5443 ret = tree_advance(right_root, right_path, &right_level, 5444 right_root_level, 5445 advance_right != ADVANCE_ONLY_NEXT, 5446 &right_key); 5447 if (ret < 0) 5448 right_end_reached = ADVANCE; 5449 advance_right = 0; 5450 } 5451 5452 if (left_end_reached && right_end_reached) { 5453 ret = 0; 5454 goto out; 5455 } else if (left_end_reached) { 5456 if (right_level == 0) { 5457 ret = changed_cb(left_root, right_root, 5458 left_path, right_path, 5459 &right_key, 5460 BTRFS_COMPARE_TREE_DELETED, 5461 ctx); 5462 if (ret < 0) 5463 goto out; 5464 } 5465 advance_right = ADVANCE; 5466 continue; 5467 } else if (right_end_reached) { 5468 if (left_level == 0) { 5469 ret = changed_cb(left_root, right_root, 5470 left_path, right_path, 5471 &left_key, 5472 BTRFS_COMPARE_TREE_NEW, 5473 ctx); 5474 if (ret < 0) 5475 goto out; 5476 } 5477 advance_left = ADVANCE; 5478 continue; 5479 } 5480 5481 if (left_level == 0 && right_level == 0) { 5482 cmp = btrfs_comp_cpu_keys(&left_key, &right_key); 5483 if (cmp < 0) { 5484 ret = changed_cb(left_root, right_root, 5485 left_path, right_path, 5486 &left_key, 5487 BTRFS_COMPARE_TREE_NEW, 5488 ctx); 5489 if (ret < 0) 5490 goto out; 5491 advance_left = ADVANCE; 5492 } else if (cmp > 0) { 5493 ret = changed_cb(left_root, right_root, 5494 left_path, right_path, 5495 &right_key, 5496 BTRFS_COMPARE_TREE_DELETED, 5497 ctx); 5498 if (ret < 0) 5499 goto out; 5500 advance_right = ADVANCE; 5501 } else { 5502 enum btrfs_compare_tree_result result; 5503 5504 WARN_ON(!extent_buffer_uptodate(left_path->nodes[0])); 5505 ret = tree_compare_item(left_root, left_path, 5506 right_path, tmp_buf); 5507 if (ret) 5508 result = BTRFS_COMPARE_TREE_CHANGED; 5509 else 5510 result = BTRFS_COMPARE_TREE_SAME; 5511 ret = changed_cb(left_root, right_root, 5512 left_path, right_path, 5513 &left_key, result, ctx); 5514 if (ret < 0) 5515 goto out; 5516 advance_left = ADVANCE; 5517 advance_right = ADVANCE; 5518 } 5519 } else if (left_level == right_level) { 5520 cmp = btrfs_comp_cpu_keys(&left_key, &right_key); 5521 if (cmp < 0) { 5522 advance_left = ADVANCE; 5523 } else if (cmp > 0) { 5524 advance_right = ADVANCE; 5525 } else { 5526 left_blockptr = btrfs_node_blockptr( 5527 left_path->nodes[left_level], 5528 left_path->slots[left_level]); 5529 right_blockptr = btrfs_node_blockptr( 5530 right_path->nodes[right_level], 5531 right_path->slots[right_level]); 5532 left_gen = btrfs_node_ptr_generation( 5533 left_path->nodes[left_level], 5534 left_path->slots[left_level]); 5535 right_gen = btrfs_node_ptr_generation( 5536 right_path->nodes[right_level], 5537 right_path->slots[right_level]); 5538 if (left_blockptr == right_blockptr && 5539 left_gen == right_gen) { 5540 /* 5541 * As we're on a shared block, don't 5542 * allow to go deeper. 5543 */ 5544 advance_left = ADVANCE_ONLY_NEXT; 5545 advance_right = ADVANCE_ONLY_NEXT; 5546 } else { 5547 advance_left = ADVANCE; 5548 advance_right = ADVANCE; 5549 } 5550 } 5551 } else if (left_level < right_level) { 5552 advance_right = ADVANCE; 5553 } else { 5554 advance_left = ADVANCE; 5555 } 5556 } 5557 5558 out: 5559 btrfs_free_path(left_path); 5560 btrfs_free_path(right_path); 5561 kfree(tmp_buf); 5562 return ret; 5563 } 5564 5565 /* 5566 * this is similar to btrfs_next_leaf, but does not try to preserve 5567 * and fixup the path. It looks for and returns the next key in the 5568 * tree based on the current path and the min_trans parameters. 5569 * 5570 * 0 is returned if another key is found, < 0 if there are any errors 5571 * and 1 is returned if there are no higher keys in the tree 5572 * 5573 * path->keep_locks should be set to 1 on the search made before 5574 * calling this function. 5575 */ 5576 int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path, 5577 struct btrfs_key *key, int level, u64 min_trans) 5578 { 5579 int slot; 5580 struct extent_buffer *c; 5581 5582 WARN_ON(!path->keep_locks); 5583 while (level < BTRFS_MAX_LEVEL) { 5584 if (!path->nodes[level]) 5585 return 1; 5586 5587 slot = path->slots[level] + 1; 5588 c = path->nodes[level]; 5589 next: 5590 if (slot >= btrfs_header_nritems(c)) { 5591 int ret; 5592 int orig_lowest; 5593 struct btrfs_key cur_key; 5594 if (level + 1 >= BTRFS_MAX_LEVEL || 5595 !path->nodes[level + 1]) 5596 return 1; 5597 5598 if (path->locks[level + 1]) { 5599 level++; 5600 continue; 5601 } 5602 5603 slot = btrfs_header_nritems(c) - 1; 5604 if (level == 0) 5605 btrfs_item_key_to_cpu(c, &cur_key, slot); 5606 else 5607 btrfs_node_key_to_cpu(c, &cur_key, slot); 5608 5609 orig_lowest = path->lowest_level; 5610 btrfs_release_path(path); 5611 path->lowest_level = level; 5612 ret = btrfs_search_slot(NULL, root, &cur_key, path, 5613 0, 0); 5614 path->lowest_level = orig_lowest; 5615 if (ret < 0) 5616 return ret; 5617 5618 c = path->nodes[level]; 5619 slot = path->slots[level]; 5620 if (ret == 0) 5621 slot++; 5622 goto next; 5623 } 5624 5625 if (level == 0) 5626 btrfs_item_key_to_cpu(c, key, slot); 5627 else { 5628 u64 gen = btrfs_node_ptr_generation(c, slot); 5629 5630 if (gen < min_trans) { 5631 slot++; 5632 goto next; 5633 } 5634 btrfs_node_key_to_cpu(c, key, slot); 5635 } 5636 return 0; 5637 } 5638 return 1; 5639 } 5640 5641 /* 5642 * search the tree again to find a leaf with greater keys 5643 * returns 0 if it found something or 1 if there are no greater leaves. 5644 * returns < 0 on io errors. 5645 */ 5646 int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path) 5647 { 5648 return btrfs_next_old_leaf(root, path, 0); 5649 } 5650 5651 int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path, 5652 u64 time_seq) 5653 { 5654 int slot; 5655 int level; 5656 struct extent_buffer *c; 5657 struct extent_buffer *next; 5658 struct btrfs_key key; 5659 u32 nritems; 5660 int ret; 5661 int old_spinning = path->leave_spinning; 5662 int next_rw_lock = 0; 5663 5664 nritems = btrfs_header_nritems(path->nodes[0]); 5665 if (nritems == 0) 5666 return 1; 5667 5668 btrfs_item_key_to_cpu(path->nodes[0], &key, nritems - 1); 5669 again: 5670 level = 1; 5671 next = NULL; 5672 next_rw_lock = 0; 5673 btrfs_release_path(path); 5674 5675 path->keep_locks = 1; 5676 path->leave_spinning = 1; 5677 5678 if (time_seq) 5679 ret = btrfs_search_old_slot(root, &key, path, time_seq); 5680 else 5681 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 5682 path->keep_locks = 0; 5683 5684 if (ret < 0) 5685 return ret; 5686 5687 nritems = btrfs_header_nritems(path->nodes[0]); 5688 /* 5689 * by releasing the path above we dropped all our locks. A balance 5690 * could have added more items next to the key that used to be 5691 * at the very end of the block. So, check again here and 5692 * advance the path if there are now more items available. 5693 */ 5694 if (nritems > 0 && path->slots[0] < nritems - 1) { 5695 if (ret == 0) 5696 path->slots[0]++; 5697 ret = 0; 5698 goto done; 5699 } 5700 /* 5701 * So the above check misses one case: 5702 * - after releasing the path above, someone has removed the item that 5703 * used to be at the very end of the block, and balance between leafs 5704 * gets another one with bigger key.offset to replace it. 5705 * 5706 * This one should be returned as well, or we can get leaf corruption 5707 * later(esp. in __btrfs_drop_extents()). 5708 * 5709 * And a bit more explanation about this check, 5710 * with ret > 0, the key isn't found, the path points to the slot 5711 * where it should be inserted, so the path->slots[0] item must be the 5712 * bigger one. 5713 */ 5714 if (nritems > 0 && ret > 0 && path->slots[0] == nritems - 1) { 5715 ret = 0; 5716 goto done; 5717 } 5718 5719 while (level < BTRFS_MAX_LEVEL) { 5720 if (!path->nodes[level]) { 5721 ret = 1; 5722 goto done; 5723 } 5724 5725 slot = path->slots[level] + 1; 5726 c = path->nodes[level]; 5727 if (slot >= btrfs_header_nritems(c)) { 5728 level++; 5729 if (level == BTRFS_MAX_LEVEL) { 5730 ret = 1; 5731 goto done; 5732 } 5733 continue; 5734 } 5735 5736 if (next) { 5737 btrfs_tree_unlock_rw(next, next_rw_lock); 5738 free_extent_buffer(next); 5739 } 5740 5741 next = c; 5742 next_rw_lock = path->locks[level]; 5743 ret = read_block_for_search(NULL, root, path, &next, level, 5744 slot, &key, 0); 5745 if (ret == -EAGAIN) 5746 goto again; 5747 5748 if (ret < 0) { 5749 btrfs_release_path(path); 5750 goto done; 5751 } 5752 5753 if (!path->skip_locking) { 5754 ret = btrfs_try_tree_read_lock(next); 5755 if (!ret && time_seq) { 5756 /* 5757 * If we don't get the lock, we may be racing 5758 * with push_leaf_left, holding that lock while 5759 * itself waiting for the leaf we've currently 5760 * locked. To solve this situation, we give up 5761 * on our lock and cycle. 5762 */ 5763 free_extent_buffer(next); 5764 btrfs_release_path(path); 5765 cond_resched(); 5766 goto again; 5767 } 5768 if (!ret) { 5769 btrfs_set_path_blocking(path); 5770 btrfs_tree_read_lock(next); 5771 btrfs_clear_path_blocking(path, next, 5772 BTRFS_READ_LOCK); 5773 } 5774 next_rw_lock = BTRFS_READ_LOCK; 5775 } 5776 break; 5777 } 5778 path->slots[level] = slot; 5779 while (1) { 5780 level--; 5781 c = path->nodes[level]; 5782 if (path->locks[level]) 5783 btrfs_tree_unlock_rw(c, path->locks[level]); 5784 5785 free_extent_buffer(c); 5786 path->nodes[level] = next; 5787 path->slots[level] = 0; 5788 if (!path->skip_locking) 5789 path->locks[level] = next_rw_lock; 5790 if (!level) 5791 break; 5792 5793 ret = read_block_for_search(NULL, root, path, &next, level, 5794 0, &key, 0); 5795 if (ret == -EAGAIN) 5796 goto again; 5797 5798 if (ret < 0) { 5799 btrfs_release_path(path); 5800 goto done; 5801 } 5802 5803 if (!path->skip_locking) { 5804 ret = btrfs_try_tree_read_lock(next); 5805 if (!ret) { 5806 btrfs_set_path_blocking(path); 5807 btrfs_tree_read_lock(next); 5808 btrfs_clear_path_blocking(path, next, 5809 BTRFS_READ_LOCK); 5810 } 5811 next_rw_lock = BTRFS_READ_LOCK; 5812 } 5813 } 5814 ret = 0; 5815 done: 5816 unlock_up(path, 0, 1, 0, NULL); 5817 path->leave_spinning = old_spinning; 5818 if (!old_spinning) 5819 btrfs_set_path_blocking(path); 5820 5821 return ret; 5822 } 5823 5824 /* 5825 * this uses btrfs_prev_leaf to walk backwards in the tree, and keeps 5826 * searching until it gets past min_objectid or finds an item of 'type' 5827 * 5828 * returns 0 if something is found, 1 if nothing was found and < 0 on error 5829 */ 5830 int btrfs_previous_item(struct btrfs_root *root, 5831 struct btrfs_path *path, u64 min_objectid, 5832 int type) 5833 { 5834 struct btrfs_key found_key; 5835 struct extent_buffer *leaf; 5836 u32 nritems; 5837 int ret; 5838 5839 while (1) { 5840 if (path->slots[0] == 0) { 5841 btrfs_set_path_blocking(path); 5842 ret = btrfs_prev_leaf(root, path); 5843 if (ret != 0) 5844 return ret; 5845 } else { 5846 path->slots[0]--; 5847 } 5848 leaf = path->nodes[0]; 5849 nritems = btrfs_header_nritems(leaf); 5850 if (nritems == 0) 5851 return 1; 5852 if (path->slots[0] == nritems) 5853 path->slots[0]--; 5854 5855 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 5856 if (found_key.objectid < min_objectid) 5857 break; 5858 if (found_key.type == type) 5859 return 0; 5860 if (found_key.objectid == min_objectid && 5861 found_key.type < type) 5862 break; 5863 } 5864 return 1; 5865 } 5866 5867 /* 5868 * search in extent tree to find a previous Metadata/Data extent item with 5869 * min objecitd. 5870 * 5871 * returns 0 if something is found, 1 if nothing was found and < 0 on error 5872 */ 5873 int btrfs_previous_extent_item(struct btrfs_root *root, 5874 struct btrfs_path *path, u64 min_objectid) 5875 { 5876 struct btrfs_key found_key; 5877 struct extent_buffer *leaf; 5878 u32 nritems; 5879 int ret; 5880 5881 while (1) { 5882 if (path->slots[0] == 0) { 5883 btrfs_set_path_blocking(path); 5884 ret = btrfs_prev_leaf(root, path); 5885 if (ret != 0) 5886 return ret; 5887 } else { 5888 path->slots[0]--; 5889 } 5890 leaf = path->nodes[0]; 5891 nritems = btrfs_header_nritems(leaf); 5892 if (nritems == 0) 5893 return 1; 5894 if (path->slots[0] == nritems) 5895 path->slots[0]--; 5896 5897 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 5898 if (found_key.objectid < min_objectid) 5899 break; 5900 if (found_key.type == BTRFS_EXTENT_ITEM_KEY || 5901 found_key.type == BTRFS_METADATA_ITEM_KEY) 5902 return 0; 5903 if (found_key.objectid == min_objectid && 5904 found_key.type < BTRFS_EXTENT_ITEM_KEY) 5905 break; 5906 } 5907 return 1; 5908 } 5909