1 /* 2 * Copyright (C) 2015 Facebook. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public 6 * License v2 as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public 14 * License along with this program; if not, write to the 15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 16 * Boston, MA 021110-1307, USA. 17 */ 18 19 #include <linux/kernel.h> 20 #include <linux/vmalloc.h> 21 #include "ctree.h" 22 #include "disk-io.h" 23 #include "locking.h" 24 #include "free-space-tree.h" 25 #include "transaction.h" 26 27 static int __add_block_group_free_space(struct btrfs_trans_handle *trans, 28 struct btrfs_fs_info *fs_info, 29 struct btrfs_block_group_cache *block_group, 30 struct btrfs_path *path); 31 32 void set_free_space_tree_thresholds(struct btrfs_block_group_cache *cache) 33 { 34 u32 bitmap_range; 35 size_t bitmap_size; 36 u64 num_bitmaps, total_bitmap_size; 37 38 /* 39 * We convert to bitmaps when the disk space required for using extents 40 * exceeds that required for using bitmaps. 41 */ 42 bitmap_range = cache->sectorsize * BTRFS_FREE_SPACE_BITMAP_BITS; 43 num_bitmaps = div_u64(cache->key.offset + bitmap_range - 1, 44 bitmap_range); 45 bitmap_size = sizeof(struct btrfs_item) + BTRFS_FREE_SPACE_BITMAP_SIZE; 46 total_bitmap_size = num_bitmaps * bitmap_size; 47 cache->bitmap_high_thresh = div_u64(total_bitmap_size, 48 sizeof(struct btrfs_item)); 49 50 /* 51 * We allow for a small buffer between the high threshold and low 52 * threshold to avoid thrashing back and forth between the two formats. 53 */ 54 if (cache->bitmap_high_thresh > 100) 55 cache->bitmap_low_thresh = cache->bitmap_high_thresh - 100; 56 else 57 cache->bitmap_low_thresh = 0; 58 } 59 60 static int add_new_free_space_info(struct btrfs_trans_handle *trans, 61 struct btrfs_fs_info *fs_info, 62 struct btrfs_block_group_cache *block_group, 63 struct btrfs_path *path) 64 { 65 struct btrfs_root *root = fs_info->free_space_root; 66 struct btrfs_free_space_info *info; 67 struct btrfs_key key; 68 struct extent_buffer *leaf; 69 int ret; 70 71 key.objectid = block_group->key.objectid; 72 key.type = BTRFS_FREE_SPACE_INFO_KEY; 73 key.offset = block_group->key.offset; 74 75 ret = btrfs_insert_empty_item(trans, root, path, &key, sizeof(*info)); 76 if (ret) 77 goto out; 78 79 leaf = path->nodes[0]; 80 info = btrfs_item_ptr(leaf, path->slots[0], 81 struct btrfs_free_space_info); 82 btrfs_set_free_space_extent_count(leaf, info, 0); 83 btrfs_set_free_space_flags(leaf, info, 0); 84 btrfs_mark_buffer_dirty(leaf); 85 86 ret = 0; 87 out: 88 btrfs_release_path(path); 89 return ret; 90 } 91 92 struct btrfs_free_space_info * 93 search_free_space_info(struct btrfs_trans_handle *trans, 94 struct btrfs_fs_info *fs_info, 95 struct btrfs_block_group_cache *block_group, 96 struct btrfs_path *path, int cow) 97 { 98 struct btrfs_root *root = fs_info->free_space_root; 99 struct btrfs_key key; 100 int ret; 101 102 key.objectid = block_group->key.objectid; 103 key.type = BTRFS_FREE_SPACE_INFO_KEY; 104 key.offset = block_group->key.offset; 105 106 ret = btrfs_search_slot(trans, root, &key, path, 0, cow); 107 if (ret < 0) 108 return ERR_PTR(ret); 109 if (ret != 0) { 110 btrfs_warn(fs_info, "missing free space info for %llu", 111 block_group->key.objectid); 112 ASSERT(0); 113 return ERR_PTR(-ENOENT); 114 } 115 116 return btrfs_item_ptr(path->nodes[0], path->slots[0], 117 struct btrfs_free_space_info); 118 } 119 120 /* 121 * btrfs_search_slot() but we're looking for the greatest key less than the 122 * passed key. 123 */ 124 static int btrfs_search_prev_slot(struct btrfs_trans_handle *trans, 125 struct btrfs_root *root, 126 struct btrfs_key *key, struct btrfs_path *p, 127 int ins_len, int cow) 128 { 129 int ret; 130 131 ret = btrfs_search_slot(trans, root, key, p, ins_len, cow); 132 if (ret < 0) 133 return ret; 134 135 if (ret == 0) { 136 ASSERT(0); 137 return -EIO; 138 } 139 140 if (p->slots[0] == 0) { 141 ASSERT(0); 142 return -EIO; 143 } 144 p->slots[0]--; 145 146 return 0; 147 } 148 149 static inline u32 free_space_bitmap_size(u64 size, u32 sectorsize) 150 { 151 return DIV_ROUND_UP((u32)div_u64(size, sectorsize), BITS_PER_BYTE); 152 } 153 154 static unsigned long *alloc_bitmap(u32 bitmap_size) 155 { 156 void *mem; 157 158 /* 159 * The allocation size varies, observed numbers were < 4K up to 16K. 160 * Using vmalloc unconditionally would be too heavy, we'll try 161 * contiguous allocations first. 162 */ 163 if (bitmap_size <= PAGE_SIZE) 164 return kzalloc(bitmap_size, GFP_NOFS); 165 166 mem = kzalloc(bitmap_size, GFP_NOFS | __GFP_NOWARN); 167 if (mem) 168 return mem; 169 170 return __vmalloc(bitmap_size, GFP_NOFS | __GFP_HIGHMEM | __GFP_ZERO, 171 PAGE_KERNEL); 172 } 173 174 int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans, 175 struct btrfs_fs_info *fs_info, 176 struct btrfs_block_group_cache *block_group, 177 struct btrfs_path *path) 178 { 179 struct btrfs_root *root = fs_info->free_space_root; 180 struct btrfs_free_space_info *info; 181 struct btrfs_key key, found_key; 182 struct extent_buffer *leaf; 183 unsigned long *bitmap; 184 char *bitmap_cursor; 185 u64 start, end; 186 u64 bitmap_range, i; 187 u32 bitmap_size, flags, expected_extent_count; 188 u32 extent_count = 0; 189 int done = 0, nr; 190 int ret; 191 192 bitmap_size = free_space_bitmap_size(block_group->key.offset, 193 block_group->sectorsize); 194 bitmap = alloc_bitmap(bitmap_size); 195 if (!bitmap) { 196 ret = -ENOMEM; 197 goto out; 198 } 199 200 start = block_group->key.objectid; 201 end = block_group->key.objectid + block_group->key.offset; 202 203 key.objectid = end - 1; 204 key.type = (u8)-1; 205 key.offset = (u64)-1; 206 207 while (!done) { 208 ret = btrfs_search_prev_slot(trans, root, &key, path, -1, 1); 209 if (ret) 210 goto out; 211 212 leaf = path->nodes[0]; 213 nr = 0; 214 path->slots[0]++; 215 while (path->slots[0] > 0) { 216 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0] - 1); 217 218 if (found_key.type == BTRFS_FREE_SPACE_INFO_KEY) { 219 ASSERT(found_key.objectid == block_group->key.objectid); 220 ASSERT(found_key.offset == block_group->key.offset); 221 done = 1; 222 break; 223 } else if (found_key.type == BTRFS_FREE_SPACE_EXTENT_KEY) { 224 u64 first, last; 225 226 ASSERT(found_key.objectid >= start); 227 ASSERT(found_key.objectid < end); 228 ASSERT(found_key.objectid + found_key.offset <= end); 229 230 first = div_u64(found_key.objectid - start, 231 block_group->sectorsize); 232 last = div_u64(found_key.objectid + found_key.offset - start, 233 block_group->sectorsize); 234 bitmap_set(bitmap, first, last - first); 235 236 extent_count++; 237 nr++; 238 path->slots[0]--; 239 } else { 240 ASSERT(0); 241 } 242 } 243 244 ret = btrfs_del_items(trans, root, path, path->slots[0], nr); 245 if (ret) 246 goto out; 247 btrfs_release_path(path); 248 } 249 250 info = search_free_space_info(trans, fs_info, block_group, path, 1); 251 if (IS_ERR(info)) { 252 ret = PTR_ERR(info); 253 goto out; 254 } 255 leaf = path->nodes[0]; 256 flags = btrfs_free_space_flags(leaf, info); 257 flags |= BTRFS_FREE_SPACE_USING_BITMAPS; 258 btrfs_set_free_space_flags(leaf, info, flags); 259 expected_extent_count = btrfs_free_space_extent_count(leaf, info); 260 btrfs_mark_buffer_dirty(leaf); 261 btrfs_release_path(path); 262 263 if (extent_count != expected_extent_count) { 264 btrfs_err(fs_info, 265 "incorrect extent count for %llu; counted %u, expected %u", 266 block_group->key.objectid, extent_count, 267 expected_extent_count); 268 ASSERT(0); 269 ret = -EIO; 270 goto out; 271 } 272 273 bitmap_cursor = (char *)bitmap; 274 bitmap_range = block_group->sectorsize * BTRFS_FREE_SPACE_BITMAP_BITS; 275 i = start; 276 while (i < end) { 277 unsigned long ptr; 278 u64 extent_size; 279 u32 data_size; 280 281 extent_size = min(end - i, bitmap_range); 282 data_size = free_space_bitmap_size(extent_size, 283 block_group->sectorsize); 284 285 key.objectid = i; 286 key.type = BTRFS_FREE_SPACE_BITMAP_KEY; 287 key.offset = extent_size; 288 289 ret = btrfs_insert_empty_item(trans, root, path, &key, 290 data_size); 291 if (ret) 292 goto out; 293 294 leaf = path->nodes[0]; 295 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); 296 write_extent_buffer(leaf, bitmap_cursor, ptr, 297 data_size); 298 btrfs_mark_buffer_dirty(leaf); 299 btrfs_release_path(path); 300 301 i += extent_size; 302 bitmap_cursor += data_size; 303 } 304 305 ret = 0; 306 out: 307 kvfree(bitmap); 308 if (ret) 309 btrfs_abort_transaction(trans, ret); 310 return ret; 311 } 312 313 int convert_free_space_to_extents(struct btrfs_trans_handle *trans, 314 struct btrfs_fs_info *fs_info, 315 struct btrfs_block_group_cache *block_group, 316 struct btrfs_path *path) 317 { 318 struct btrfs_root *root = fs_info->free_space_root; 319 struct btrfs_free_space_info *info; 320 struct btrfs_key key, found_key; 321 struct extent_buffer *leaf; 322 unsigned long *bitmap; 323 u64 start, end; 324 /* Initialize to silence GCC. */ 325 u64 extent_start = 0; 326 u64 offset; 327 u32 bitmap_size, flags, expected_extent_count; 328 int prev_bit = 0, bit, bitnr; 329 u32 extent_count = 0; 330 int done = 0, nr; 331 int ret; 332 333 bitmap_size = free_space_bitmap_size(block_group->key.offset, 334 block_group->sectorsize); 335 bitmap = alloc_bitmap(bitmap_size); 336 if (!bitmap) { 337 ret = -ENOMEM; 338 goto out; 339 } 340 341 start = block_group->key.objectid; 342 end = block_group->key.objectid + block_group->key.offset; 343 344 key.objectid = end - 1; 345 key.type = (u8)-1; 346 key.offset = (u64)-1; 347 348 while (!done) { 349 ret = btrfs_search_prev_slot(trans, root, &key, path, -1, 1); 350 if (ret) 351 goto out; 352 353 leaf = path->nodes[0]; 354 nr = 0; 355 path->slots[0]++; 356 while (path->slots[0] > 0) { 357 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0] - 1); 358 359 if (found_key.type == BTRFS_FREE_SPACE_INFO_KEY) { 360 ASSERT(found_key.objectid == block_group->key.objectid); 361 ASSERT(found_key.offset == block_group->key.offset); 362 done = 1; 363 break; 364 } else if (found_key.type == BTRFS_FREE_SPACE_BITMAP_KEY) { 365 unsigned long ptr; 366 char *bitmap_cursor; 367 u32 bitmap_pos, data_size; 368 369 ASSERT(found_key.objectid >= start); 370 ASSERT(found_key.objectid < end); 371 ASSERT(found_key.objectid + found_key.offset <= end); 372 373 bitmap_pos = div_u64(found_key.objectid - start, 374 block_group->sectorsize * 375 BITS_PER_BYTE); 376 bitmap_cursor = ((char *)bitmap) + bitmap_pos; 377 data_size = free_space_bitmap_size(found_key.offset, 378 block_group->sectorsize); 379 380 ptr = btrfs_item_ptr_offset(leaf, path->slots[0] - 1); 381 read_extent_buffer(leaf, bitmap_cursor, ptr, 382 data_size); 383 384 nr++; 385 path->slots[0]--; 386 } else { 387 ASSERT(0); 388 } 389 } 390 391 ret = btrfs_del_items(trans, root, path, path->slots[0], nr); 392 if (ret) 393 goto out; 394 btrfs_release_path(path); 395 } 396 397 info = search_free_space_info(trans, fs_info, block_group, path, 1); 398 if (IS_ERR(info)) { 399 ret = PTR_ERR(info); 400 goto out; 401 } 402 leaf = path->nodes[0]; 403 flags = btrfs_free_space_flags(leaf, info); 404 flags &= ~BTRFS_FREE_SPACE_USING_BITMAPS; 405 btrfs_set_free_space_flags(leaf, info, flags); 406 expected_extent_count = btrfs_free_space_extent_count(leaf, info); 407 btrfs_mark_buffer_dirty(leaf); 408 btrfs_release_path(path); 409 410 offset = start; 411 bitnr = 0; 412 while (offset < end) { 413 bit = !!test_bit(bitnr, bitmap); 414 if (prev_bit == 0 && bit == 1) { 415 extent_start = offset; 416 } else if (prev_bit == 1 && bit == 0) { 417 key.objectid = extent_start; 418 key.type = BTRFS_FREE_SPACE_EXTENT_KEY; 419 key.offset = offset - extent_start; 420 421 ret = btrfs_insert_empty_item(trans, root, path, &key, 0); 422 if (ret) 423 goto out; 424 btrfs_release_path(path); 425 426 extent_count++; 427 } 428 prev_bit = bit; 429 offset += block_group->sectorsize; 430 bitnr++; 431 } 432 if (prev_bit == 1) { 433 key.objectid = extent_start; 434 key.type = BTRFS_FREE_SPACE_EXTENT_KEY; 435 key.offset = end - extent_start; 436 437 ret = btrfs_insert_empty_item(trans, root, path, &key, 0); 438 if (ret) 439 goto out; 440 btrfs_release_path(path); 441 442 extent_count++; 443 } 444 445 if (extent_count != expected_extent_count) { 446 btrfs_err(fs_info, 447 "incorrect extent count for %llu; counted %u, expected %u", 448 block_group->key.objectid, extent_count, 449 expected_extent_count); 450 ASSERT(0); 451 ret = -EIO; 452 goto out; 453 } 454 455 ret = 0; 456 out: 457 kvfree(bitmap); 458 if (ret) 459 btrfs_abort_transaction(trans, ret); 460 return ret; 461 } 462 463 static int update_free_space_extent_count(struct btrfs_trans_handle *trans, 464 struct btrfs_fs_info *fs_info, 465 struct btrfs_block_group_cache *block_group, 466 struct btrfs_path *path, 467 int new_extents) 468 { 469 struct btrfs_free_space_info *info; 470 u32 flags; 471 u32 extent_count; 472 int ret = 0; 473 474 if (new_extents == 0) 475 return 0; 476 477 info = search_free_space_info(trans, fs_info, block_group, path, 1); 478 if (IS_ERR(info)) { 479 ret = PTR_ERR(info); 480 goto out; 481 } 482 flags = btrfs_free_space_flags(path->nodes[0], info); 483 extent_count = btrfs_free_space_extent_count(path->nodes[0], info); 484 485 extent_count += new_extents; 486 btrfs_set_free_space_extent_count(path->nodes[0], info, extent_count); 487 btrfs_mark_buffer_dirty(path->nodes[0]); 488 btrfs_release_path(path); 489 490 if (!(flags & BTRFS_FREE_SPACE_USING_BITMAPS) && 491 extent_count > block_group->bitmap_high_thresh) { 492 ret = convert_free_space_to_bitmaps(trans, fs_info, block_group, 493 path); 494 } else if ((flags & BTRFS_FREE_SPACE_USING_BITMAPS) && 495 extent_count < block_group->bitmap_low_thresh) { 496 ret = convert_free_space_to_extents(trans, fs_info, block_group, 497 path); 498 } 499 500 out: 501 return ret; 502 } 503 504 int free_space_test_bit(struct btrfs_block_group_cache *block_group, 505 struct btrfs_path *path, u64 offset) 506 { 507 struct extent_buffer *leaf; 508 struct btrfs_key key; 509 u64 found_start, found_end; 510 unsigned long ptr, i; 511 512 leaf = path->nodes[0]; 513 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 514 ASSERT(key.type == BTRFS_FREE_SPACE_BITMAP_KEY); 515 516 found_start = key.objectid; 517 found_end = key.objectid + key.offset; 518 ASSERT(offset >= found_start && offset < found_end); 519 520 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); 521 i = div_u64(offset - found_start, block_group->sectorsize); 522 return !!extent_buffer_test_bit(leaf, ptr, i); 523 } 524 525 static void free_space_set_bits(struct btrfs_block_group_cache *block_group, 526 struct btrfs_path *path, u64 *start, u64 *size, 527 int bit) 528 { 529 struct extent_buffer *leaf; 530 struct btrfs_key key; 531 u64 end = *start + *size; 532 u64 found_start, found_end; 533 unsigned long ptr, first, last; 534 535 leaf = path->nodes[0]; 536 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 537 ASSERT(key.type == BTRFS_FREE_SPACE_BITMAP_KEY); 538 539 found_start = key.objectid; 540 found_end = key.objectid + key.offset; 541 ASSERT(*start >= found_start && *start < found_end); 542 ASSERT(end > found_start); 543 544 if (end > found_end) 545 end = found_end; 546 547 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); 548 first = div_u64(*start - found_start, block_group->sectorsize); 549 last = div_u64(end - found_start, block_group->sectorsize); 550 if (bit) 551 extent_buffer_bitmap_set(leaf, ptr, first, last - first); 552 else 553 extent_buffer_bitmap_clear(leaf, ptr, first, last - first); 554 btrfs_mark_buffer_dirty(leaf); 555 556 *size -= end - *start; 557 *start = end; 558 } 559 560 /* 561 * We can't use btrfs_next_item() in modify_free_space_bitmap() because 562 * btrfs_next_leaf() doesn't get the path for writing. We can forgo the fancy 563 * tree walking in btrfs_next_leaf() anyways because we know exactly what we're 564 * looking for. 565 */ 566 static int free_space_next_bitmap(struct btrfs_trans_handle *trans, 567 struct btrfs_root *root, struct btrfs_path *p) 568 { 569 struct btrfs_key key; 570 571 if (p->slots[0] + 1 < btrfs_header_nritems(p->nodes[0])) { 572 p->slots[0]++; 573 return 0; 574 } 575 576 btrfs_item_key_to_cpu(p->nodes[0], &key, p->slots[0]); 577 btrfs_release_path(p); 578 579 key.objectid += key.offset; 580 key.type = (u8)-1; 581 key.offset = (u64)-1; 582 583 return btrfs_search_prev_slot(trans, root, &key, p, 0, 1); 584 } 585 586 /* 587 * If remove is 1, then we are removing free space, thus clearing bits in the 588 * bitmap. If remove is 0, then we are adding free space, thus setting bits in 589 * the bitmap. 590 */ 591 static int modify_free_space_bitmap(struct btrfs_trans_handle *trans, 592 struct btrfs_fs_info *fs_info, 593 struct btrfs_block_group_cache *block_group, 594 struct btrfs_path *path, 595 u64 start, u64 size, int remove) 596 { 597 struct btrfs_root *root = fs_info->free_space_root; 598 struct btrfs_key key; 599 u64 end = start + size; 600 u64 cur_start, cur_size; 601 int prev_bit, next_bit; 602 int new_extents; 603 int ret; 604 605 /* 606 * Read the bit for the block immediately before the extent of space if 607 * that block is within the block group. 608 */ 609 if (start > block_group->key.objectid) { 610 u64 prev_block = start - block_group->sectorsize; 611 612 key.objectid = prev_block; 613 key.type = (u8)-1; 614 key.offset = (u64)-1; 615 616 ret = btrfs_search_prev_slot(trans, root, &key, path, 0, 1); 617 if (ret) 618 goto out; 619 620 prev_bit = free_space_test_bit(block_group, path, prev_block); 621 622 /* The previous block may have been in the previous bitmap. */ 623 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 624 if (start >= key.objectid + key.offset) { 625 ret = free_space_next_bitmap(trans, root, path); 626 if (ret) 627 goto out; 628 } 629 } else { 630 key.objectid = start; 631 key.type = (u8)-1; 632 key.offset = (u64)-1; 633 634 ret = btrfs_search_prev_slot(trans, root, &key, path, 0, 1); 635 if (ret) 636 goto out; 637 638 prev_bit = -1; 639 } 640 641 /* 642 * Iterate over all of the bitmaps overlapped by the extent of space, 643 * clearing/setting bits as required. 644 */ 645 cur_start = start; 646 cur_size = size; 647 while (1) { 648 free_space_set_bits(block_group, path, &cur_start, &cur_size, 649 !remove); 650 if (cur_size == 0) 651 break; 652 ret = free_space_next_bitmap(trans, root, path); 653 if (ret) 654 goto out; 655 } 656 657 /* 658 * Read the bit for the block immediately after the extent of space if 659 * that block is within the block group. 660 */ 661 if (end < block_group->key.objectid + block_group->key.offset) { 662 /* The next block may be in the next bitmap. */ 663 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 664 if (end >= key.objectid + key.offset) { 665 ret = free_space_next_bitmap(trans, root, path); 666 if (ret) 667 goto out; 668 } 669 670 next_bit = free_space_test_bit(block_group, path, end); 671 } else { 672 next_bit = -1; 673 } 674 675 if (remove) { 676 new_extents = -1; 677 if (prev_bit == 1) { 678 /* Leftover on the left. */ 679 new_extents++; 680 } 681 if (next_bit == 1) { 682 /* Leftover on the right. */ 683 new_extents++; 684 } 685 } else { 686 new_extents = 1; 687 if (prev_bit == 1) { 688 /* Merging with neighbor on the left. */ 689 new_extents--; 690 } 691 if (next_bit == 1) { 692 /* Merging with neighbor on the right. */ 693 new_extents--; 694 } 695 } 696 697 btrfs_release_path(path); 698 ret = update_free_space_extent_count(trans, fs_info, block_group, path, 699 new_extents); 700 701 out: 702 return ret; 703 } 704 705 static int remove_free_space_extent(struct btrfs_trans_handle *trans, 706 struct btrfs_fs_info *fs_info, 707 struct btrfs_block_group_cache *block_group, 708 struct btrfs_path *path, 709 u64 start, u64 size) 710 { 711 struct btrfs_root *root = fs_info->free_space_root; 712 struct btrfs_key key; 713 u64 found_start, found_end; 714 u64 end = start + size; 715 int new_extents = -1; 716 int ret; 717 718 key.objectid = start; 719 key.type = (u8)-1; 720 key.offset = (u64)-1; 721 722 ret = btrfs_search_prev_slot(trans, root, &key, path, -1, 1); 723 if (ret) 724 goto out; 725 726 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 727 728 ASSERT(key.type == BTRFS_FREE_SPACE_EXTENT_KEY); 729 730 found_start = key.objectid; 731 found_end = key.objectid + key.offset; 732 ASSERT(start >= found_start && end <= found_end); 733 734 /* 735 * Okay, now that we've found the free space extent which contains the 736 * free space that we are removing, there are four cases: 737 * 738 * 1. We're using the whole extent: delete the key we found and 739 * decrement the free space extent count. 740 * 2. We are using part of the extent starting at the beginning: delete 741 * the key we found and insert a new key representing the leftover at 742 * the end. There is no net change in the number of extents. 743 * 3. We are using part of the extent ending at the end: delete the key 744 * we found and insert a new key representing the leftover at the 745 * beginning. There is no net change in the number of extents. 746 * 4. We are using part of the extent in the middle: delete the key we 747 * found and insert two new keys representing the leftovers on each 748 * side. Where we used to have one extent, we now have two, so increment 749 * the extent count. We may need to convert the block group to bitmaps 750 * as a result. 751 */ 752 753 /* Delete the existing key (cases 1-4). */ 754 ret = btrfs_del_item(trans, root, path); 755 if (ret) 756 goto out; 757 758 /* Add a key for leftovers at the beginning (cases 3 and 4). */ 759 if (start > found_start) { 760 key.objectid = found_start; 761 key.type = BTRFS_FREE_SPACE_EXTENT_KEY; 762 key.offset = start - found_start; 763 764 btrfs_release_path(path); 765 ret = btrfs_insert_empty_item(trans, root, path, &key, 0); 766 if (ret) 767 goto out; 768 new_extents++; 769 } 770 771 /* Add a key for leftovers at the end (cases 2 and 4). */ 772 if (end < found_end) { 773 key.objectid = end; 774 key.type = BTRFS_FREE_SPACE_EXTENT_KEY; 775 key.offset = found_end - end; 776 777 btrfs_release_path(path); 778 ret = btrfs_insert_empty_item(trans, root, path, &key, 0); 779 if (ret) 780 goto out; 781 new_extents++; 782 } 783 784 btrfs_release_path(path); 785 ret = update_free_space_extent_count(trans, fs_info, block_group, path, 786 new_extents); 787 788 out: 789 return ret; 790 } 791 792 int __remove_from_free_space_tree(struct btrfs_trans_handle *trans, 793 struct btrfs_fs_info *fs_info, 794 struct btrfs_block_group_cache *block_group, 795 struct btrfs_path *path, u64 start, u64 size) 796 { 797 struct btrfs_free_space_info *info; 798 u32 flags; 799 int ret; 800 801 if (block_group->needs_free_space) { 802 ret = __add_block_group_free_space(trans, fs_info, block_group, 803 path); 804 if (ret) 805 return ret; 806 } 807 808 info = search_free_space_info(NULL, fs_info, block_group, path, 0); 809 if (IS_ERR(info)) 810 return PTR_ERR(info); 811 flags = btrfs_free_space_flags(path->nodes[0], info); 812 btrfs_release_path(path); 813 814 if (flags & BTRFS_FREE_SPACE_USING_BITMAPS) { 815 return modify_free_space_bitmap(trans, fs_info, block_group, 816 path, start, size, 1); 817 } else { 818 return remove_free_space_extent(trans, fs_info, block_group, 819 path, start, size); 820 } 821 } 822 823 int remove_from_free_space_tree(struct btrfs_trans_handle *trans, 824 struct btrfs_fs_info *fs_info, 825 u64 start, u64 size) 826 { 827 struct btrfs_block_group_cache *block_group; 828 struct btrfs_path *path; 829 int ret; 830 831 if (!btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) 832 return 0; 833 834 path = btrfs_alloc_path(); 835 if (!path) { 836 ret = -ENOMEM; 837 goto out; 838 } 839 840 block_group = btrfs_lookup_block_group(fs_info, start); 841 if (!block_group) { 842 ASSERT(0); 843 ret = -ENOENT; 844 goto out; 845 } 846 847 mutex_lock(&block_group->free_space_lock); 848 ret = __remove_from_free_space_tree(trans, fs_info, block_group, path, 849 start, size); 850 mutex_unlock(&block_group->free_space_lock); 851 852 btrfs_put_block_group(block_group); 853 out: 854 btrfs_free_path(path); 855 if (ret) 856 btrfs_abort_transaction(trans, ret); 857 return ret; 858 } 859 860 static int add_free_space_extent(struct btrfs_trans_handle *trans, 861 struct btrfs_fs_info *fs_info, 862 struct btrfs_block_group_cache *block_group, 863 struct btrfs_path *path, 864 u64 start, u64 size) 865 { 866 struct btrfs_root *root = fs_info->free_space_root; 867 struct btrfs_key key, new_key; 868 u64 found_start, found_end; 869 u64 end = start + size; 870 int new_extents = 1; 871 int ret; 872 873 /* 874 * We are adding a new extent of free space, but we need to merge 875 * extents. There are four cases here: 876 * 877 * 1. The new extent does not have any immediate neighbors to merge 878 * with: add the new key and increment the free space extent count. We 879 * may need to convert the block group to bitmaps as a result. 880 * 2. The new extent has an immediate neighbor before it: remove the 881 * previous key and insert a new key combining both of them. There is no 882 * net change in the number of extents. 883 * 3. The new extent has an immediate neighbor after it: remove the next 884 * key and insert a new key combining both of them. There is no net 885 * change in the number of extents. 886 * 4. The new extent has immediate neighbors on both sides: remove both 887 * of the keys and insert a new key combining all of them. Where we used 888 * to have two extents, we now have one, so decrement the extent count. 889 */ 890 891 new_key.objectid = start; 892 new_key.type = BTRFS_FREE_SPACE_EXTENT_KEY; 893 new_key.offset = size; 894 895 /* Search for a neighbor on the left. */ 896 if (start == block_group->key.objectid) 897 goto right; 898 key.objectid = start - 1; 899 key.type = (u8)-1; 900 key.offset = (u64)-1; 901 902 ret = btrfs_search_prev_slot(trans, root, &key, path, -1, 1); 903 if (ret) 904 goto out; 905 906 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 907 908 if (key.type != BTRFS_FREE_SPACE_EXTENT_KEY) { 909 ASSERT(key.type == BTRFS_FREE_SPACE_INFO_KEY); 910 btrfs_release_path(path); 911 goto right; 912 } 913 914 found_start = key.objectid; 915 found_end = key.objectid + key.offset; 916 ASSERT(found_start >= block_group->key.objectid && 917 found_end > block_group->key.objectid); 918 ASSERT(found_start < start && found_end <= start); 919 920 /* 921 * Delete the neighbor on the left and absorb it into the new key (cases 922 * 2 and 4). 923 */ 924 if (found_end == start) { 925 ret = btrfs_del_item(trans, root, path); 926 if (ret) 927 goto out; 928 new_key.objectid = found_start; 929 new_key.offset += key.offset; 930 new_extents--; 931 } 932 btrfs_release_path(path); 933 934 right: 935 /* Search for a neighbor on the right. */ 936 if (end == block_group->key.objectid + block_group->key.offset) 937 goto insert; 938 key.objectid = end; 939 key.type = (u8)-1; 940 key.offset = (u64)-1; 941 942 ret = btrfs_search_prev_slot(trans, root, &key, path, -1, 1); 943 if (ret) 944 goto out; 945 946 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 947 948 if (key.type != BTRFS_FREE_SPACE_EXTENT_KEY) { 949 ASSERT(key.type == BTRFS_FREE_SPACE_INFO_KEY); 950 btrfs_release_path(path); 951 goto insert; 952 } 953 954 found_start = key.objectid; 955 found_end = key.objectid + key.offset; 956 ASSERT(found_start >= block_group->key.objectid && 957 found_end > block_group->key.objectid); 958 ASSERT((found_start < start && found_end <= start) || 959 (found_start >= end && found_end > end)); 960 961 /* 962 * Delete the neighbor on the right and absorb it into the new key 963 * (cases 3 and 4). 964 */ 965 if (found_start == end) { 966 ret = btrfs_del_item(trans, root, path); 967 if (ret) 968 goto out; 969 new_key.offset += key.offset; 970 new_extents--; 971 } 972 btrfs_release_path(path); 973 974 insert: 975 /* Insert the new key (cases 1-4). */ 976 ret = btrfs_insert_empty_item(trans, root, path, &new_key, 0); 977 if (ret) 978 goto out; 979 980 btrfs_release_path(path); 981 ret = update_free_space_extent_count(trans, fs_info, block_group, path, 982 new_extents); 983 984 out: 985 return ret; 986 } 987 988 int __add_to_free_space_tree(struct btrfs_trans_handle *trans, 989 struct btrfs_fs_info *fs_info, 990 struct btrfs_block_group_cache *block_group, 991 struct btrfs_path *path, u64 start, u64 size) 992 { 993 struct btrfs_free_space_info *info; 994 u32 flags; 995 int ret; 996 997 if (block_group->needs_free_space) { 998 ret = __add_block_group_free_space(trans, fs_info, block_group, 999 path); 1000 if (ret) 1001 return ret; 1002 } 1003 1004 info = search_free_space_info(NULL, fs_info, block_group, path, 0); 1005 if (IS_ERR(info)) 1006 return PTR_ERR(info); 1007 flags = btrfs_free_space_flags(path->nodes[0], info); 1008 btrfs_release_path(path); 1009 1010 if (flags & BTRFS_FREE_SPACE_USING_BITMAPS) { 1011 return modify_free_space_bitmap(trans, fs_info, block_group, 1012 path, start, size, 0); 1013 } else { 1014 return add_free_space_extent(trans, fs_info, block_group, path, 1015 start, size); 1016 } 1017 } 1018 1019 int add_to_free_space_tree(struct btrfs_trans_handle *trans, 1020 struct btrfs_fs_info *fs_info, 1021 u64 start, u64 size) 1022 { 1023 struct btrfs_block_group_cache *block_group; 1024 struct btrfs_path *path; 1025 int ret; 1026 1027 if (!btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) 1028 return 0; 1029 1030 path = btrfs_alloc_path(); 1031 if (!path) { 1032 ret = -ENOMEM; 1033 goto out; 1034 } 1035 1036 block_group = btrfs_lookup_block_group(fs_info, start); 1037 if (!block_group) { 1038 ASSERT(0); 1039 ret = -ENOENT; 1040 goto out; 1041 } 1042 1043 mutex_lock(&block_group->free_space_lock); 1044 ret = __add_to_free_space_tree(trans, fs_info, block_group, path, start, 1045 size); 1046 mutex_unlock(&block_group->free_space_lock); 1047 1048 btrfs_put_block_group(block_group); 1049 out: 1050 btrfs_free_path(path); 1051 if (ret) 1052 btrfs_abort_transaction(trans, ret); 1053 return ret; 1054 } 1055 1056 /* 1057 * Populate the free space tree by walking the extent tree. Operations on the 1058 * extent tree that happen as a result of writes to the free space tree will go 1059 * through the normal add/remove hooks. 1060 */ 1061 static int populate_free_space_tree(struct btrfs_trans_handle *trans, 1062 struct btrfs_fs_info *fs_info, 1063 struct btrfs_block_group_cache *block_group) 1064 { 1065 struct btrfs_root *extent_root = fs_info->extent_root; 1066 struct btrfs_path *path, *path2; 1067 struct btrfs_key key; 1068 u64 start, end; 1069 int ret; 1070 1071 path = btrfs_alloc_path(); 1072 if (!path) 1073 return -ENOMEM; 1074 path->reada = 1; 1075 1076 path2 = btrfs_alloc_path(); 1077 if (!path2) { 1078 btrfs_free_path(path); 1079 return -ENOMEM; 1080 } 1081 1082 ret = add_new_free_space_info(trans, fs_info, block_group, path2); 1083 if (ret) 1084 goto out; 1085 1086 mutex_lock(&block_group->free_space_lock); 1087 1088 /* 1089 * Iterate through all of the extent and metadata items in this block 1090 * group, adding the free space between them and the free space at the 1091 * end. Note that EXTENT_ITEM and METADATA_ITEM are less than 1092 * BLOCK_GROUP_ITEM, so an extent may precede the block group that it's 1093 * contained in. 1094 */ 1095 key.objectid = block_group->key.objectid; 1096 key.type = BTRFS_EXTENT_ITEM_KEY; 1097 key.offset = 0; 1098 1099 ret = btrfs_search_slot_for_read(extent_root, &key, path, 1, 0); 1100 if (ret < 0) 1101 goto out_locked; 1102 ASSERT(ret == 0); 1103 1104 start = block_group->key.objectid; 1105 end = block_group->key.objectid + block_group->key.offset; 1106 while (1) { 1107 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 1108 1109 if (key.type == BTRFS_EXTENT_ITEM_KEY || 1110 key.type == BTRFS_METADATA_ITEM_KEY) { 1111 if (key.objectid >= end) 1112 break; 1113 1114 if (start < key.objectid) { 1115 ret = __add_to_free_space_tree(trans, fs_info, 1116 block_group, 1117 path2, start, 1118 key.objectid - 1119 start); 1120 if (ret) 1121 goto out_locked; 1122 } 1123 start = key.objectid; 1124 if (key.type == BTRFS_METADATA_ITEM_KEY) 1125 start += fs_info->tree_root->nodesize; 1126 else 1127 start += key.offset; 1128 } else if (key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) { 1129 if (key.objectid != block_group->key.objectid) 1130 break; 1131 } 1132 1133 ret = btrfs_next_item(extent_root, path); 1134 if (ret < 0) 1135 goto out_locked; 1136 if (ret) 1137 break; 1138 } 1139 if (start < end) { 1140 ret = __add_to_free_space_tree(trans, fs_info, block_group, 1141 path2, start, end - start); 1142 if (ret) 1143 goto out_locked; 1144 } 1145 1146 ret = 0; 1147 out_locked: 1148 mutex_unlock(&block_group->free_space_lock); 1149 out: 1150 btrfs_free_path(path2); 1151 btrfs_free_path(path); 1152 return ret; 1153 } 1154 1155 int btrfs_create_free_space_tree(struct btrfs_fs_info *fs_info) 1156 { 1157 struct btrfs_trans_handle *trans; 1158 struct btrfs_root *tree_root = fs_info->tree_root; 1159 struct btrfs_root *free_space_root; 1160 struct btrfs_block_group_cache *block_group; 1161 struct rb_node *node; 1162 int ret; 1163 1164 trans = btrfs_start_transaction(tree_root, 0); 1165 if (IS_ERR(trans)) 1166 return PTR_ERR(trans); 1167 1168 set_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags); 1169 free_space_root = btrfs_create_tree(trans, fs_info, 1170 BTRFS_FREE_SPACE_TREE_OBJECTID); 1171 if (IS_ERR(free_space_root)) { 1172 ret = PTR_ERR(free_space_root); 1173 goto abort; 1174 } 1175 fs_info->free_space_root = free_space_root; 1176 1177 node = rb_first(&fs_info->block_group_cache_tree); 1178 while (node) { 1179 block_group = rb_entry(node, struct btrfs_block_group_cache, 1180 cache_node); 1181 ret = populate_free_space_tree(trans, fs_info, block_group); 1182 if (ret) 1183 goto abort; 1184 node = rb_next(node); 1185 } 1186 1187 btrfs_set_fs_compat_ro(fs_info, FREE_SPACE_TREE); 1188 clear_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags); 1189 1190 ret = btrfs_commit_transaction(trans, tree_root); 1191 if (ret) 1192 return ret; 1193 1194 return 0; 1195 1196 abort: 1197 clear_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags); 1198 btrfs_abort_transaction(trans, ret); 1199 btrfs_end_transaction(trans, tree_root); 1200 return ret; 1201 } 1202 1203 static int clear_free_space_tree(struct btrfs_trans_handle *trans, 1204 struct btrfs_root *root) 1205 { 1206 struct btrfs_path *path; 1207 struct btrfs_key key; 1208 int nr; 1209 int ret; 1210 1211 path = btrfs_alloc_path(); 1212 if (!path) 1213 return -ENOMEM; 1214 1215 path->leave_spinning = 1; 1216 1217 key.objectid = 0; 1218 key.type = 0; 1219 key.offset = 0; 1220 1221 while (1) { 1222 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 1223 if (ret < 0) 1224 goto out; 1225 1226 nr = btrfs_header_nritems(path->nodes[0]); 1227 if (!nr) 1228 break; 1229 1230 path->slots[0] = 0; 1231 ret = btrfs_del_items(trans, root, path, 0, nr); 1232 if (ret) 1233 goto out; 1234 1235 btrfs_release_path(path); 1236 } 1237 1238 ret = 0; 1239 out: 1240 btrfs_free_path(path); 1241 return ret; 1242 } 1243 1244 int btrfs_clear_free_space_tree(struct btrfs_fs_info *fs_info) 1245 { 1246 struct btrfs_trans_handle *trans; 1247 struct btrfs_root *tree_root = fs_info->tree_root; 1248 struct btrfs_root *free_space_root = fs_info->free_space_root; 1249 int ret; 1250 1251 trans = btrfs_start_transaction(tree_root, 0); 1252 if (IS_ERR(trans)) 1253 return PTR_ERR(trans); 1254 1255 btrfs_clear_fs_compat_ro(fs_info, FREE_SPACE_TREE); 1256 fs_info->free_space_root = NULL; 1257 1258 ret = clear_free_space_tree(trans, free_space_root); 1259 if (ret) 1260 goto abort; 1261 1262 ret = btrfs_del_root(trans, tree_root, &free_space_root->root_key); 1263 if (ret) 1264 goto abort; 1265 1266 list_del(&free_space_root->dirty_list); 1267 1268 btrfs_tree_lock(free_space_root->node); 1269 clean_tree_block(trans, tree_root->fs_info, free_space_root->node); 1270 btrfs_tree_unlock(free_space_root->node); 1271 btrfs_free_tree_block(trans, free_space_root, free_space_root->node, 1272 0, 1); 1273 1274 free_extent_buffer(free_space_root->node); 1275 free_extent_buffer(free_space_root->commit_root); 1276 kfree(free_space_root); 1277 1278 ret = btrfs_commit_transaction(trans, tree_root); 1279 if (ret) 1280 return ret; 1281 1282 return 0; 1283 1284 abort: 1285 btrfs_abort_transaction(trans, ret); 1286 btrfs_end_transaction(trans, tree_root); 1287 return ret; 1288 } 1289 1290 static int __add_block_group_free_space(struct btrfs_trans_handle *trans, 1291 struct btrfs_fs_info *fs_info, 1292 struct btrfs_block_group_cache *block_group, 1293 struct btrfs_path *path) 1294 { 1295 u64 start, end; 1296 int ret; 1297 1298 start = block_group->key.objectid; 1299 end = block_group->key.objectid + block_group->key.offset; 1300 1301 block_group->needs_free_space = 0; 1302 1303 ret = add_new_free_space_info(trans, fs_info, block_group, path); 1304 if (ret) 1305 return ret; 1306 1307 return __add_to_free_space_tree(trans, fs_info, block_group, path, 1308 block_group->key.objectid, 1309 block_group->key.offset); 1310 } 1311 1312 int add_block_group_free_space(struct btrfs_trans_handle *trans, 1313 struct btrfs_fs_info *fs_info, 1314 struct btrfs_block_group_cache *block_group) 1315 { 1316 struct btrfs_path *path = NULL; 1317 int ret = 0; 1318 1319 if (!btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) 1320 return 0; 1321 1322 mutex_lock(&block_group->free_space_lock); 1323 if (!block_group->needs_free_space) 1324 goto out; 1325 1326 path = btrfs_alloc_path(); 1327 if (!path) { 1328 ret = -ENOMEM; 1329 goto out; 1330 } 1331 1332 ret = __add_block_group_free_space(trans, fs_info, block_group, path); 1333 1334 out: 1335 btrfs_free_path(path); 1336 mutex_unlock(&block_group->free_space_lock); 1337 if (ret) 1338 btrfs_abort_transaction(trans, ret); 1339 return ret; 1340 } 1341 1342 int remove_block_group_free_space(struct btrfs_trans_handle *trans, 1343 struct btrfs_fs_info *fs_info, 1344 struct btrfs_block_group_cache *block_group) 1345 { 1346 struct btrfs_root *root = fs_info->free_space_root; 1347 struct btrfs_path *path; 1348 struct btrfs_key key, found_key; 1349 struct extent_buffer *leaf; 1350 u64 start, end; 1351 int done = 0, nr; 1352 int ret; 1353 1354 if (!btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) 1355 return 0; 1356 1357 if (block_group->needs_free_space) { 1358 /* We never added this block group to the free space tree. */ 1359 return 0; 1360 } 1361 1362 path = btrfs_alloc_path(); 1363 if (!path) { 1364 ret = -ENOMEM; 1365 goto out; 1366 } 1367 1368 start = block_group->key.objectid; 1369 end = block_group->key.objectid + block_group->key.offset; 1370 1371 key.objectid = end - 1; 1372 key.type = (u8)-1; 1373 key.offset = (u64)-1; 1374 1375 while (!done) { 1376 ret = btrfs_search_prev_slot(trans, root, &key, path, -1, 1); 1377 if (ret) 1378 goto out; 1379 1380 leaf = path->nodes[0]; 1381 nr = 0; 1382 path->slots[0]++; 1383 while (path->slots[0] > 0) { 1384 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0] - 1); 1385 1386 if (found_key.type == BTRFS_FREE_SPACE_INFO_KEY) { 1387 ASSERT(found_key.objectid == block_group->key.objectid); 1388 ASSERT(found_key.offset == block_group->key.offset); 1389 done = 1; 1390 nr++; 1391 path->slots[0]--; 1392 break; 1393 } else if (found_key.type == BTRFS_FREE_SPACE_EXTENT_KEY || 1394 found_key.type == BTRFS_FREE_SPACE_BITMAP_KEY) { 1395 ASSERT(found_key.objectid >= start); 1396 ASSERT(found_key.objectid < end); 1397 ASSERT(found_key.objectid + found_key.offset <= end); 1398 nr++; 1399 path->slots[0]--; 1400 } else { 1401 ASSERT(0); 1402 } 1403 } 1404 1405 ret = btrfs_del_items(trans, root, path, path->slots[0], nr); 1406 if (ret) 1407 goto out; 1408 btrfs_release_path(path); 1409 } 1410 1411 ret = 0; 1412 out: 1413 btrfs_free_path(path); 1414 if (ret) 1415 btrfs_abort_transaction(trans, ret); 1416 return ret; 1417 } 1418 1419 static int load_free_space_bitmaps(struct btrfs_caching_control *caching_ctl, 1420 struct btrfs_path *path, 1421 u32 expected_extent_count) 1422 { 1423 struct btrfs_block_group_cache *block_group; 1424 struct btrfs_fs_info *fs_info; 1425 struct btrfs_root *root; 1426 struct btrfs_key key; 1427 int prev_bit = 0, bit; 1428 /* Initialize to silence GCC. */ 1429 u64 extent_start = 0; 1430 u64 end, offset; 1431 u64 total_found = 0; 1432 u32 extent_count = 0; 1433 int ret; 1434 1435 block_group = caching_ctl->block_group; 1436 fs_info = block_group->fs_info; 1437 root = fs_info->free_space_root; 1438 1439 end = block_group->key.objectid + block_group->key.offset; 1440 1441 while (1) { 1442 ret = btrfs_next_item(root, path); 1443 if (ret < 0) 1444 goto out; 1445 if (ret) 1446 break; 1447 1448 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 1449 1450 if (key.type == BTRFS_FREE_SPACE_INFO_KEY) 1451 break; 1452 1453 ASSERT(key.type == BTRFS_FREE_SPACE_BITMAP_KEY); 1454 ASSERT(key.objectid < end && key.objectid + key.offset <= end); 1455 1456 caching_ctl->progress = key.objectid; 1457 1458 offset = key.objectid; 1459 while (offset < key.objectid + key.offset) { 1460 bit = free_space_test_bit(block_group, path, offset); 1461 if (prev_bit == 0 && bit == 1) { 1462 extent_start = offset; 1463 } else if (prev_bit == 1 && bit == 0) { 1464 total_found += add_new_free_space(block_group, 1465 fs_info, 1466 extent_start, 1467 offset); 1468 if (total_found > CACHING_CTL_WAKE_UP) { 1469 total_found = 0; 1470 wake_up(&caching_ctl->wait); 1471 } 1472 extent_count++; 1473 } 1474 prev_bit = bit; 1475 offset += block_group->sectorsize; 1476 } 1477 } 1478 if (prev_bit == 1) { 1479 total_found += add_new_free_space(block_group, fs_info, 1480 extent_start, end); 1481 extent_count++; 1482 } 1483 1484 if (extent_count != expected_extent_count) { 1485 btrfs_err(fs_info, 1486 "incorrect extent count for %llu; counted %u, expected %u", 1487 block_group->key.objectid, extent_count, 1488 expected_extent_count); 1489 ASSERT(0); 1490 ret = -EIO; 1491 goto out; 1492 } 1493 1494 caching_ctl->progress = (u64)-1; 1495 1496 ret = 0; 1497 out: 1498 return ret; 1499 } 1500 1501 static int load_free_space_extents(struct btrfs_caching_control *caching_ctl, 1502 struct btrfs_path *path, 1503 u32 expected_extent_count) 1504 { 1505 struct btrfs_block_group_cache *block_group; 1506 struct btrfs_fs_info *fs_info; 1507 struct btrfs_root *root; 1508 struct btrfs_key key; 1509 u64 end; 1510 u64 total_found = 0; 1511 u32 extent_count = 0; 1512 int ret; 1513 1514 block_group = caching_ctl->block_group; 1515 fs_info = block_group->fs_info; 1516 root = fs_info->free_space_root; 1517 1518 end = block_group->key.objectid + block_group->key.offset; 1519 1520 while (1) { 1521 ret = btrfs_next_item(root, path); 1522 if (ret < 0) 1523 goto out; 1524 if (ret) 1525 break; 1526 1527 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 1528 1529 if (key.type == BTRFS_FREE_SPACE_INFO_KEY) 1530 break; 1531 1532 ASSERT(key.type == BTRFS_FREE_SPACE_EXTENT_KEY); 1533 ASSERT(key.objectid < end && key.objectid + key.offset <= end); 1534 1535 caching_ctl->progress = key.objectid; 1536 1537 total_found += add_new_free_space(block_group, fs_info, 1538 key.objectid, 1539 key.objectid + key.offset); 1540 if (total_found > CACHING_CTL_WAKE_UP) { 1541 total_found = 0; 1542 wake_up(&caching_ctl->wait); 1543 } 1544 extent_count++; 1545 } 1546 1547 if (extent_count != expected_extent_count) { 1548 btrfs_err(fs_info, 1549 "incorrect extent count for %llu; counted %u, expected %u", 1550 block_group->key.objectid, extent_count, 1551 expected_extent_count); 1552 ASSERT(0); 1553 ret = -EIO; 1554 goto out; 1555 } 1556 1557 caching_ctl->progress = (u64)-1; 1558 1559 ret = 0; 1560 out: 1561 return ret; 1562 } 1563 1564 int load_free_space_tree(struct btrfs_caching_control *caching_ctl) 1565 { 1566 struct btrfs_block_group_cache *block_group; 1567 struct btrfs_fs_info *fs_info; 1568 struct btrfs_free_space_info *info; 1569 struct btrfs_path *path; 1570 u32 extent_count, flags; 1571 int ret; 1572 1573 block_group = caching_ctl->block_group; 1574 fs_info = block_group->fs_info; 1575 1576 path = btrfs_alloc_path(); 1577 if (!path) 1578 return -ENOMEM; 1579 1580 /* 1581 * Just like caching_thread() doesn't want to deadlock on the extent 1582 * tree, we don't want to deadlock on the free space tree. 1583 */ 1584 path->skip_locking = 1; 1585 path->search_commit_root = 1; 1586 path->reada = 1; 1587 1588 info = search_free_space_info(NULL, fs_info, block_group, path, 0); 1589 if (IS_ERR(info)) { 1590 ret = PTR_ERR(info); 1591 goto out; 1592 } 1593 extent_count = btrfs_free_space_extent_count(path->nodes[0], info); 1594 flags = btrfs_free_space_flags(path->nodes[0], info); 1595 1596 /* 1597 * We left path pointing to the free space info item, so now 1598 * load_free_space_foo can just iterate through the free space tree from 1599 * there. 1600 */ 1601 if (flags & BTRFS_FREE_SPACE_USING_BITMAPS) 1602 ret = load_free_space_bitmaps(caching_ctl, path, extent_count); 1603 else 1604 ret = load_free_space_extents(caching_ctl, path, extent_count); 1605 1606 out: 1607 btrfs_free_path(path); 1608 return ret; 1609 } 1610