1 // SPDX-License-Identifier: GPL-2.0 2 3 #include <linux/err.h> 4 #include <linux/slab.h> 5 #include <linux/spinlock.h> 6 #include "messages.h" 7 #include "ctree.h" 8 #include "extent_map.h" 9 #include "compression.h" 10 #include "btrfs_inode.h" 11 #include "disk-io.h" 12 13 14 static struct kmem_cache *extent_map_cache; 15 16 int __init extent_map_init(void) 17 { 18 extent_map_cache = kmem_cache_create("btrfs_extent_map", 19 sizeof(struct extent_map), 0, 0, NULL); 20 if (!extent_map_cache) 21 return -ENOMEM; 22 return 0; 23 } 24 25 void __cold extent_map_exit(void) 26 { 27 kmem_cache_destroy(extent_map_cache); 28 } 29 30 /* 31 * Initialize the extent tree @tree. Should be called for each new inode or 32 * other user of the extent_map interface. 33 */ 34 void extent_map_tree_init(struct extent_map_tree *tree) 35 { 36 tree->root = RB_ROOT; 37 INIT_LIST_HEAD(&tree->modified_extents); 38 rwlock_init(&tree->lock); 39 } 40 41 /* 42 * Allocate a new extent_map structure. The new structure is returned with a 43 * reference count of one and needs to be freed using free_extent_map() 44 */ 45 struct extent_map *alloc_extent_map(void) 46 { 47 struct extent_map *em; 48 em = kmem_cache_zalloc(extent_map_cache, GFP_NOFS); 49 if (!em) 50 return NULL; 51 RB_CLEAR_NODE(&em->rb_node); 52 refcount_set(&em->refs, 1); 53 INIT_LIST_HEAD(&em->list); 54 return em; 55 } 56 57 /* 58 * Drop the reference out on @em by one and free the structure if the reference 59 * count hits zero. 60 */ 61 void free_extent_map(struct extent_map *em) 62 { 63 if (!em) 64 return; 65 if (refcount_dec_and_test(&em->refs)) { 66 WARN_ON(extent_map_in_tree(em)); 67 WARN_ON(!list_empty(&em->list)); 68 kmem_cache_free(extent_map_cache, em); 69 } 70 } 71 72 /* Do the math around the end of an extent, handling wrapping. */ 73 static u64 range_end(u64 start, u64 len) 74 { 75 if (start + len < start) 76 return (u64)-1; 77 return start + len; 78 } 79 80 static void dec_evictable_extent_maps(struct btrfs_inode *inode) 81 { 82 struct btrfs_fs_info *fs_info = inode->root->fs_info; 83 84 if (!btrfs_is_testing(fs_info) && is_fstree(btrfs_root_id(inode->root))) 85 percpu_counter_dec(&fs_info->evictable_extent_maps); 86 } 87 88 static int tree_insert(struct rb_root *root, struct extent_map *em) 89 { 90 struct rb_node **p = &root->rb_node; 91 struct rb_node *parent = NULL; 92 struct extent_map *entry = NULL; 93 struct rb_node *orig_parent = NULL; 94 u64 end = range_end(em->start, em->len); 95 96 while (*p) { 97 parent = *p; 98 entry = rb_entry(parent, struct extent_map, rb_node); 99 100 if (em->start < entry->start) 101 p = &(*p)->rb_left; 102 else if (em->start >= extent_map_end(entry)) 103 p = &(*p)->rb_right; 104 else 105 return -EEXIST; 106 } 107 108 orig_parent = parent; 109 while (parent && em->start >= extent_map_end(entry)) { 110 parent = rb_next(parent); 111 entry = rb_entry(parent, struct extent_map, rb_node); 112 } 113 if (parent) 114 if (end > entry->start && em->start < extent_map_end(entry)) 115 return -EEXIST; 116 117 parent = orig_parent; 118 entry = rb_entry(parent, struct extent_map, rb_node); 119 while (parent && em->start < entry->start) { 120 parent = rb_prev(parent); 121 entry = rb_entry(parent, struct extent_map, rb_node); 122 } 123 if (parent) 124 if (end > entry->start && em->start < extent_map_end(entry)) 125 return -EEXIST; 126 127 rb_link_node(&em->rb_node, orig_parent, p); 128 rb_insert_color(&em->rb_node, root); 129 return 0; 130 } 131 132 /* 133 * Search through the tree for an extent_map with a given offset. If it can't 134 * be found, try to find some neighboring extents 135 */ 136 static struct rb_node *__tree_search(struct rb_root *root, u64 offset, 137 struct rb_node **prev_or_next_ret) 138 { 139 struct rb_node *n = root->rb_node; 140 struct rb_node *prev = NULL; 141 struct rb_node *orig_prev = NULL; 142 struct extent_map *entry; 143 struct extent_map *prev_entry = NULL; 144 145 ASSERT(prev_or_next_ret); 146 147 while (n) { 148 entry = rb_entry(n, struct extent_map, rb_node); 149 prev = n; 150 prev_entry = entry; 151 152 if (offset < entry->start) 153 n = n->rb_left; 154 else if (offset >= extent_map_end(entry)) 155 n = n->rb_right; 156 else 157 return n; 158 } 159 160 orig_prev = prev; 161 while (prev && offset >= extent_map_end(prev_entry)) { 162 prev = rb_next(prev); 163 prev_entry = rb_entry(prev, struct extent_map, rb_node); 164 } 165 166 /* 167 * Previous extent map found, return as in this case the caller does not 168 * care about the next one. 169 */ 170 if (prev) { 171 *prev_or_next_ret = prev; 172 return NULL; 173 } 174 175 prev = orig_prev; 176 prev_entry = rb_entry(prev, struct extent_map, rb_node); 177 while (prev && offset < prev_entry->start) { 178 prev = rb_prev(prev); 179 prev_entry = rb_entry(prev, struct extent_map, rb_node); 180 } 181 *prev_or_next_ret = prev; 182 183 return NULL; 184 } 185 186 static inline u64 extent_map_block_len(const struct extent_map *em) 187 { 188 if (extent_map_is_compressed(em)) 189 return em->disk_num_bytes; 190 return em->len; 191 } 192 193 static inline u64 extent_map_block_end(const struct extent_map *em) 194 { 195 const u64 block_start = extent_map_block_start(em); 196 const u64 block_end = block_start + extent_map_block_len(em); 197 198 if (block_end < block_start) 199 return (u64)-1; 200 201 return block_end; 202 } 203 204 static bool can_merge_extent_map(const struct extent_map *em) 205 { 206 if (em->flags & EXTENT_FLAG_PINNED) 207 return false; 208 209 /* Don't merge compressed extents, we need to know their actual size. */ 210 if (extent_map_is_compressed(em)) 211 return false; 212 213 if (em->flags & EXTENT_FLAG_LOGGING) 214 return false; 215 216 /* 217 * We don't want to merge stuff that hasn't been written to the log yet 218 * since it may not reflect exactly what is on disk, and that would be 219 * bad. 220 */ 221 if (!list_empty(&em->list)) 222 return false; 223 224 return true; 225 } 226 227 /* Check to see if two extent_map structs are adjacent and safe to merge. */ 228 static bool mergeable_maps(const struct extent_map *prev, const struct extent_map *next) 229 { 230 if (extent_map_end(prev) != next->start) 231 return false; 232 233 /* 234 * The merged flag is not an on-disk flag, it just indicates we had the 235 * extent maps of 2 (or more) adjacent extents merged, so factor it out. 236 */ 237 if ((prev->flags & ~EXTENT_FLAG_MERGED) != 238 (next->flags & ~EXTENT_FLAG_MERGED)) 239 return false; 240 241 if (next->disk_bytenr < EXTENT_MAP_LAST_BYTE - 1) 242 return extent_map_block_start(next) == extent_map_block_end(prev); 243 244 /* HOLES and INLINE extents. */ 245 return next->disk_bytenr == prev->disk_bytenr; 246 } 247 248 /* 249 * Handle the on-disk data extents merge for @prev and @next. 250 * 251 * @prev: left extent to merge 252 * @next: right extent to merge 253 * @merged: the extent we will not discard after the merge; updated with new values 254 * 255 * After this, one of the two extents is the new merged extent and the other is 256 * removed from the tree and likely freed. Note that @merged is one of @prev/@next 257 * so there is const/non-const aliasing occurring here. 258 * 259 * Only touches disk_bytenr/disk_num_bytes/offset/ram_bytes. 260 * For now only uncompressed regular extent can be merged. 261 */ 262 static void merge_ondisk_extents(const struct extent_map *prev, const struct extent_map *next, 263 struct extent_map *merged) 264 { 265 u64 new_disk_bytenr; 266 u64 new_disk_num_bytes; 267 u64 new_offset; 268 269 /* @prev and @next should not be compressed. */ 270 ASSERT(!extent_map_is_compressed(prev)); 271 ASSERT(!extent_map_is_compressed(next)); 272 273 /* 274 * There are two different cases where @prev and @next can be merged. 275 * 276 * 1) They are referring to the same data extent: 277 * 278 * |<----- data extent A ----->| 279 * |<- prev ->|<- next ->| 280 * 281 * 2) They are referring to different data extents but still adjacent: 282 * 283 * |<-- data extent A -->|<-- data extent B -->| 284 * |<- prev ->|<- next ->| 285 * 286 * The calculation here always merges the data extents first, then updates 287 * @offset using the new data extents. 288 * 289 * For case 1), the merged data extent would be the same. 290 * For case 2), we just merge the two data extents into one. 291 */ 292 new_disk_bytenr = min(prev->disk_bytenr, next->disk_bytenr); 293 new_disk_num_bytes = max(prev->disk_bytenr + prev->disk_num_bytes, 294 next->disk_bytenr + next->disk_num_bytes) - 295 new_disk_bytenr; 296 new_offset = prev->disk_bytenr + prev->offset - new_disk_bytenr; 297 298 merged->disk_bytenr = new_disk_bytenr; 299 merged->disk_num_bytes = new_disk_num_bytes; 300 merged->ram_bytes = new_disk_num_bytes; 301 merged->offset = new_offset; 302 } 303 304 static void dump_extent_map(struct btrfs_fs_info *fs_info, const char *prefix, 305 struct extent_map *em) 306 { 307 if (!IS_ENABLED(CONFIG_BTRFS_DEBUG)) 308 return; 309 btrfs_crit(fs_info, 310 "%s, start=%llu len=%llu disk_bytenr=%llu disk_num_bytes=%llu ram_bytes=%llu offset=%llu flags=0x%x", 311 prefix, em->start, em->len, em->disk_bytenr, em->disk_num_bytes, 312 em->ram_bytes, em->offset, em->flags); 313 ASSERT(0); 314 } 315 316 /* Internal sanity checks for btrfs debug builds. */ 317 static void validate_extent_map(struct btrfs_fs_info *fs_info, struct extent_map *em) 318 { 319 if (!IS_ENABLED(CONFIG_BTRFS_DEBUG)) 320 return; 321 if (em->disk_bytenr < EXTENT_MAP_LAST_BYTE) { 322 if (em->disk_num_bytes == 0) 323 dump_extent_map(fs_info, "zero disk_num_bytes", em); 324 if (em->offset + em->len > em->ram_bytes) 325 dump_extent_map(fs_info, "ram_bytes too small", em); 326 if (em->offset + em->len > em->disk_num_bytes && 327 !extent_map_is_compressed(em)) 328 dump_extent_map(fs_info, "disk_num_bytes too small", em); 329 if (!extent_map_is_compressed(em) && 330 em->ram_bytes != em->disk_num_bytes) 331 dump_extent_map(fs_info, 332 "ram_bytes mismatch with disk_num_bytes for non-compressed em", 333 em); 334 } else if (em->offset) { 335 dump_extent_map(fs_info, "non-zero offset for hole/inline", em); 336 } 337 } 338 339 static void try_merge_map(struct btrfs_inode *inode, struct extent_map *em) 340 { 341 struct btrfs_fs_info *fs_info = inode->root->fs_info; 342 struct extent_map_tree *tree = &inode->extent_tree; 343 struct extent_map *merge = NULL; 344 struct rb_node *rb; 345 346 /* 347 * We can't modify an extent map that is in the tree and that is being 348 * used by another task, as it can cause that other task to see it in 349 * inconsistent state during the merging. We always have 1 reference for 350 * the tree and 1 for this task (which is unpinning the extent map or 351 * clearing the logging flag), so anything > 2 means it's being used by 352 * other tasks too. 353 */ 354 if (refcount_read(&em->refs) > 2) 355 return; 356 357 if (!can_merge_extent_map(em)) 358 return; 359 360 if (em->start != 0) { 361 rb = rb_prev(&em->rb_node); 362 if (rb) 363 merge = rb_entry(rb, struct extent_map, rb_node); 364 if (rb && can_merge_extent_map(merge) && mergeable_maps(merge, em)) { 365 em->start = merge->start; 366 em->len += merge->len; 367 em->generation = max(em->generation, merge->generation); 368 369 if (em->disk_bytenr < EXTENT_MAP_LAST_BYTE) 370 merge_ondisk_extents(merge, em, em); 371 em->flags |= EXTENT_FLAG_MERGED; 372 373 validate_extent_map(fs_info, em); 374 rb_erase(&merge->rb_node, &tree->root); 375 RB_CLEAR_NODE(&merge->rb_node); 376 free_extent_map(merge); 377 dec_evictable_extent_maps(inode); 378 } 379 } 380 381 rb = rb_next(&em->rb_node); 382 if (rb) 383 merge = rb_entry(rb, struct extent_map, rb_node); 384 if (rb && can_merge_extent_map(merge) && mergeable_maps(em, merge)) { 385 em->len += merge->len; 386 if (em->disk_bytenr < EXTENT_MAP_LAST_BYTE) 387 merge_ondisk_extents(em, merge, em); 388 validate_extent_map(fs_info, em); 389 rb_erase(&merge->rb_node, &tree->root); 390 RB_CLEAR_NODE(&merge->rb_node); 391 em->generation = max(em->generation, merge->generation); 392 em->flags |= EXTENT_FLAG_MERGED; 393 free_extent_map(merge); 394 dec_evictable_extent_maps(inode); 395 } 396 } 397 398 /* 399 * Unpin an extent from the cache. 400 * 401 * @inode: the inode from which we are unpinning an extent range 402 * @start: logical offset in the file 403 * @len: length of the extent 404 * @gen: generation that this extent has been modified in 405 * 406 * Called after an extent has been written to disk properly. Set the generation 407 * to the generation that actually added the file item to the inode so we know 408 * we need to sync this extent when we call fsync(). 409 * 410 * Returns: 0 on success 411 * -ENOENT when the extent is not found in the tree 412 * -EUCLEAN if the found extent does not match the expected start 413 */ 414 int unpin_extent_cache(struct btrfs_inode *inode, u64 start, u64 len, u64 gen) 415 { 416 struct btrfs_fs_info *fs_info = inode->root->fs_info; 417 struct extent_map_tree *tree = &inode->extent_tree; 418 int ret = 0; 419 struct extent_map *em; 420 421 write_lock(&tree->lock); 422 em = lookup_extent_mapping(tree, start, len); 423 424 if (WARN_ON(!em)) { 425 btrfs_warn(fs_info, 426 "no extent map found for inode %llu (root %lld) when unpinning extent range [%llu, %llu), generation %llu", 427 btrfs_ino(inode), btrfs_root_id(inode->root), 428 start, start + len, gen); 429 ret = -ENOENT; 430 goto out; 431 } 432 433 if (WARN_ON(em->start != start)) { 434 btrfs_warn(fs_info, 435 "found extent map for inode %llu (root %lld) with unexpected start offset %llu when unpinning extent range [%llu, %llu), generation %llu", 436 btrfs_ino(inode), btrfs_root_id(inode->root), 437 em->start, start, start + len, gen); 438 ret = -EUCLEAN; 439 goto out; 440 } 441 442 em->generation = gen; 443 em->flags &= ~EXTENT_FLAG_PINNED; 444 445 try_merge_map(inode, em); 446 447 out: 448 write_unlock(&tree->lock); 449 free_extent_map(em); 450 return ret; 451 452 } 453 454 void clear_em_logging(struct btrfs_inode *inode, struct extent_map *em) 455 { 456 lockdep_assert_held_write(&inode->extent_tree.lock); 457 458 em->flags &= ~EXTENT_FLAG_LOGGING; 459 if (extent_map_in_tree(em)) 460 try_merge_map(inode, em); 461 } 462 463 static inline void setup_extent_mapping(struct btrfs_inode *inode, 464 struct extent_map *em, 465 int modified) 466 { 467 refcount_inc(&em->refs); 468 469 ASSERT(list_empty(&em->list)); 470 471 if (modified) 472 list_add(&em->list, &inode->extent_tree.modified_extents); 473 else 474 try_merge_map(inode, em); 475 } 476 477 /* 478 * Add a new extent map to an inode's extent map tree. 479 * 480 * @inode: the target inode 481 * @em: map to insert 482 * @modified: indicate whether the given @em should be added to the 483 * modified list, which indicates the extent needs to be logged 484 * 485 * Insert @em into the @inode's extent map tree or perform a simple 486 * forward/backward merge with existing mappings. The extent_map struct passed 487 * in will be inserted into the tree directly, with an additional reference 488 * taken, or a reference dropped if the merge attempt was successful. 489 */ 490 static int add_extent_mapping(struct btrfs_inode *inode, 491 struct extent_map *em, int modified) 492 { 493 struct extent_map_tree *tree = &inode->extent_tree; 494 struct btrfs_root *root = inode->root; 495 struct btrfs_fs_info *fs_info = root->fs_info; 496 int ret; 497 498 lockdep_assert_held_write(&tree->lock); 499 500 validate_extent_map(fs_info, em); 501 ret = tree_insert(&tree->root, em); 502 if (ret) 503 return ret; 504 505 setup_extent_mapping(inode, em, modified); 506 507 if (!btrfs_is_testing(fs_info) && is_fstree(btrfs_root_id(root))) 508 percpu_counter_inc(&fs_info->evictable_extent_maps); 509 510 return 0; 511 } 512 513 static struct extent_map * 514 __lookup_extent_mapping(struct extent_map_tree *tree, 515 u64 start, u64 len, int strict) 516 { 517 struct extent_map *em; 518 struct rb_node *rb_node; 519 struct rb_node *prev_or_next = NULL; 520 u64 end = range_end(start, len); 521 522 rb_node = __tree_search(&tree->root, start, &prev_or_next); 523 if (!rb_node) { 524 if (prev_or_next) 525 rb_node = prev_or_next; 526 else 527 return NULL; 528 } 529 530 em = rb_entry(rb_node, struct extent_map, rb_node); 531 532 if (strict && !(end > em->start && start < extent_map_end(em))) 533 return NULL; 534 535 refcount_inc(&em->refs); 536 return em; 537 } 538 539 /* 540 * Lookup extent_map that intersects @start + @len range. 541 * 542 * @tree: tree to lookup in 543 * @start: byte offset to start the search 544 * @len: length of the lookup range 545 * 546 * Find and return the first extent_map struct in @tree that intersects the 547 * [start, len] range. There may be additional objects in the tree that 548 * intersect, so check the object returned carefully to make sure that no 549 * additional lookups are needed. 550 */ 551 struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree, 552 u64 start, u64 len) 553 { 554 return __lookup_extent_mapping(tree, start, len, 1); 555 } 556 557 /* 558 * Find a nearby extent map intersecting @start + @len (not an exact search). 559 * 560 * @tree: tree to lookup in 561 * @start: byte offset to start the search 562 * @len: length of the lookup range 563 * 564 * Find and return the first extent_map struct in @tree that intersects the 565 * [start, len] range. 566 * 567 * If one can't be found, any nearby extent may be returned 568 */ 569 struct extent_map *search_extent_mapping(struct extent_map_tree *tree, 570 u64 start, u64 len) 571 { 572 return __lookup_extent_mapping(tree, start, len, 0); 573 } 574 575 /* 576 * Remove an extent_map from its inode's extent tree. 577 * 578 * @inode: the inode the extent map belongs to 579 * @em: extent map being removed 580 * 581 * Remove @em from the extent tree of @inode. No reference counts are dropped, 582 * and no checks are done to see if the range is in use. 583 */ 584 void remove_extent_mapping(struct btrfs_inode *inode, struct extent_map *em) 585 { 586 struct extent_map_tree *tree = &inode->extent_tree; 587 588 lockdep_assert_held_write(&tree->lock); 589 590 WARN_ON(em->flags & EXTENT_FLAG_PINNED); 591 rb_erase(&em->rb_node, &tree->root); 592 if (!(em->flags & EXTENT_FLAG_LOGGING)) 593 list_del_init(&em->list); 594 RB_CLEAR_NODE(&em->rb_node); 595 596 dec_evictable_extent_maps(inode); 597 } 598 599 static void replace_extent_mapping(struct btrfs_inode *inode, 600 struct extent_map *cur, 601 struct extent_map *new, 602 int modified) 603 { 604 struct btrfs_fs_info *fs_info = inode->root->fs_info; 605 struct extent_map_tree *tree = &inode->extent_tree; 606 607 lockdep_assert_held_write(&tree->lock); 608 609 validate_extent_map(fs_info, new); 610 611 WARN_ON(cur->flags & EXTENT_FLAG_PINNED); 612 ASSERT(extent_map_in_tree(cur)); 613 if (!(cur->flags & EXTENT_FLAG_LOGGING)) 614 list_del_init(&cur->list); 615 rb_replace_node(&cur->rb_node, &new->rb_node, &tree->root); 616 RB_CLEAR_NODE(&cur->rb_node); 617 618 setup_extent_mapping(inode, new, modified); 619 } 620 621 static struct extent_map *next_extent_map(const struct extent_map *em) 622 { 623 struct rb_node *next; 624 625 next = rb_next(&em->rb_node); 626 if (!next) 627 return NULL; 628 return container_of(next, struct extent_map, rb_node); 629 } 630 631 static struct extent_map *prev_extent_map(struct extent_map *em) 632 { 633 struct rb_node *prev; 634 635 prev = rb_prev(&em->rb_node); 636 if (!prev) 637 return NULL; 638 return container_of(prev, struct extent_map, rb_node); 639 } 640 641 /* 642 * Helper for btrfs_get_extent. Given an existing extent in the tree, 643 * the existing extent is the nearest extent to map_start, 644 * and an extent that you want to insert, deal with overlap and insert 645 * the best fitted new extent into the tree. 646 */ 647 static noinline int merge_extent_mapping(struct btrfs_inode *inode, 648 struct extent_map *existing, 649 struct extent_map *em, 650 u64 map_start) 651 { 652 struct extent_map *prev; 653 struct extent_map *next; 654 u64 start; 655 u64 end; 656 u64 start_diff; 657 658 if (map_start < em->start || map_start >= extent_map_end(em)) 659 return -EINVAL; 660 661 if (existing->start > map_start) { 662 next = existing; 663 prev = prev_extent_map(next); 664 } else { 665 prev = existing; 666 next = next_extent_map(prev); 667 } 668 669 start = prev ? extent_map_end(prev) : em->start; 670 start = max_t(u64, start, em->start); 671 end = next ? next->start : extent_map_end(em); 672 end = min_t(u64, end, extent_map_end(em)); 673 start_diff = start - em->start; 674 em->start = start; 675 em->len = end - start; 676 if (em->disk_bytenr < EXTENT_MAP_LAST_BYTE) 677 em->offset += start_diff; 678 return add_extent_mapping(inode, em, 0); 679 } 680 681 /* 682 * Add extent mapping into an inode's extent map tree. 683 * 684 * @inode: target inode 685 * @em_in: extent we are inserting 686 * @start: start of the logical range btrfs_get_extent() is requesting 687 * @len: length of the logical range btrfs_get_extent() is requesting 688 * 689 * Note that @em_in's range may be different from [start, start+len), 690 * but they must be overlapped. 691 * 692 * Insert @em_in into the inode's extent map tree. In case there is an 693 * overlapping range, handle the -EEXIST by either: 694 * a) Returning the existing extent in @em_in if @start is within the 695 * existing em. 696 * b) Merge the existing extent with @em_in passed in. 697 * 698 * Return 0 on success, otherwise -EEXIST. 699 * 700 */ 701 int btrfs_add_extent_mapping(struct btrfs_inode *inode, 702 struct extent_map **em_in, u64 start, u64 len) 703 { 704 int ret; 705 struct extent_map *em = *em_in; 706 struct btrfs_fs_info *fs_info = inode->root->fs_info; 707 708 /* 709 * Tree-checker should have rejected any inline extent with non-zero 710 * file offset. Here just do a sanity check. 711 */ 712 if (em->disk_bytenr == EXTENT_MAP_INLINE) 713 ASSERT(em->start == 0); 714 715 ret = add_extent_mapping(inode, em, 0); 716 /* it is possible that someone inserted the extent into the tree 717 * while we had the lock dropped. It is also possible that 718 * an overlapping map exists in the tree 719 */ 720 if (ret == -EEXIST) { 721 struct extent_map *existing; 722 723 existing = search_extent_mapping(&inode->extent_tree, start, len); 724 725 trace_btrfs_handle_em_exist(fs_info, existing, em, start, len); 726 727 /* 728 * existing will always be non-NULL, since there must be 729 * extent causing the -EEXIST. 730 */ 731 if (start >= existing->start && 732 start < extent_map_end(existing)) { 733 free_extent_map(em); 734 *em_in = existing; 735 ret = 0; 736 } else { 737 u64 orig_start = em->start; 738 u64 orig_len = em->len; 739 740 /* 741 * The existing extent map is the one nearest to 742 * the [start, start + len) range which overlaps 743 */ 744 ret = merge_extent_mapping(inode, existing, em, start); 745 if (WARN_ON(ret)) { 746 free_extent_map(em); 747 *em_in = NULL; 748 btrfs_warn(fs_info, 749 "extent map merge error existing [%llu, %llu) with em [%llu, %llu) start %llu", 750 existing->start, extent_map_end(existing), 751 orig_start, orig_start + orig_len, start); 752 } 753 free_extent_map(existing); 754 } 755 } 756 757 ASSERT(ret == 0 || ret == -EEXIST); 758 return ret; 759 } 760 761 /* 762 * Drop all extent maps from a tree in the fastest possible way, rescheduling 763 * if needed. This avoids searching the tree, from the root down to the first 764 * extent map, before each deletion. 765 */ 766 static void drop_all_extent_maps_fast(struct btrfs_inode *inode) 767 { 768 struct extent_map_tree *tree = &inode->extent_tree; 769 struct rb_node *node; 770 771 write_lock(&tree->lock); 772 node = rb_first(&tree->root); 773 while (node) { 774 struct extent_map *em; 775 struct rb_node *next = rb_next(node); 776 777 em = rb_entry(node, struct extent_map, rb_node); 778 em->flags &= ~(EXTENT_FLAG_PINNED | EXTENT_FLAG_LOGGING); 779 remove_extent_mapping(inode, em); 780 free_extent_map(em); 781 782 if (cond_resched_rwlock_write(&tree->lock)) 783 node = rb_first(&tree->root); 784 else 785 node = next; 786 } 787 write_unlock(&tree->lock); 788 } 789 790 /* 791 * Drop all extent maps in a given range. 792 * 793 * @inode: The target inode. 794 * @start: Start offset of the range. 795 * @end: End offset of the range (inclusive value). 796 * @skip_pinned: Indicate if pinned extent maps should be ignored or not. 797 * 798 * This drops all the extent maps that intersect the given range [@start, @end]. 799 * Extent maps that partially overlap the range and extend behind or beyond it, 800 * are split. 801 * The caller should have locked an appropriate file range in the inode's io 802 * tree before calling this function. 803 */ 804 void btrfs_drop_extent_map_range(struct btrfs_inode *inode, u64 start, u64 end, 805 bool skip_pinned) 806 { 807 struct extent_map *split; 808 struct extent_map *split2; 809 struct extent_map *em; 810 struct extent_map_tree *em_tree = &inode->extent_tree; 811 u64 len = end - start + 1; 812 813 WARN_ON(end < start); 814 if (end == (u64)-1) { 815 if (start == 0 && !skip_pinned) { 816 drop_all_extent_maps_fast(inode); 817 return; 818 } 819 len = (u64)-1; 820 } else { 821 /* Make end offset exclusive for use in the loop below. */ 822 end++; 823 } 824 825 /* 826 * It's ok if we fail to allocate the extent maps, see the comment near 827 * the bottom of the loop below. We only need two spare extent maps in 828 * the worst case, where the first extent map that intersects our range 829 * starts before the range and the last extent map that intersects our 830 * range ends after our range (and they might be the same extent map), 831 * because we need to split those two extent maps at the boundaries. 832 */ 833 split = alloc_extent_map(); 834 split2 = alloc_extent_map(); 835 836 write_lock(&em_tree->lock); 837 em = lookup_extent_mapping(em_tree, start, len); 838 839 while (em) { 840 /* extent_map_end() returns exclusive value (last byte + 1). */ 841 const u64 em_end = extent_map_end(em); 842 struct extent_map *next_em = NULL; 843 u64 gen; 844 unsigned long flags; 845 bool modified; 846 847 if (em_end < end) { 848 next_em = next_extent_map(em); 849 if (next_em) { 850 if (next_em->start < end) 851 refcount_inc(&next_em->refs); 852 else 853 next_em = NULL; 854 } 855 } 856 857 if (skip_pinned && (em->flags & EXTENT_FLAG_PINNED)) { 858 start = em_end; 859 goto next; 860 } 861 862 flags = em->flags; 863 /* 864 * In case we split the extent map, we want to preserve the 865 * EXTENT_FLAG_LOGGING flag on our extent map, but we don't want 866 * it on the new extent maps. 867 */ 868 em->flags &= ~(EXTENT_FLAG_PINNED | EXTENT_FLAG_LOGGING); 869 modified = !list_empty(&em->list); 870 871 /* 872 * The extent map does not cross our target range, so no need to 873 * split it, we can remove it directly. 874 */ 875 if (em->start >= start && em_end <= end) 876 goto remove_em; 877 878 gen = em->generation; 879 880 if (em->start < start) { 881 if (!split) { 882 split = split2; 883 split2 = NULL; 884 if (!split) 885 goto remove_em; 886 } 887 split->start = em->start; 888 split->len = start - em->start; 889 890 if (em->disk_bytenr < EXTENT_MAP_LAST_BYTE) { 891 split->disk_bytenr = em->disk_bytenr; 892 split->disk_num_bytes = em->disk_num_bytes; 893 split->offset = em->offset; 894 split->ram_bytes = em->ram_bytes; 895 } else { 896 split->disk_bytenr = em->disk_bytenr; 897 split->disk_num_bytes = 0; 898 split->offset = 0; 899 split->ram_bytes = split->len; 900 } 901 902 split->generation = gen; 903 split->flags = flags; 904 replace_extent_mapping(inode, em, split, modified); 905 free_extent_map(split); 906 split = split2; 907 split2 = NULL; 908 } 909 if (em_end > end) { 910 if (!split) { 911 split = split2; 912 split2 = NULL; 913 if (!split) 914 goto remove_em; 915 } 916 split->start = end; 917 split->len = em_end - end; 918 split->disk_bytenr = em->disk_bytenr; 919 split->flags = flags; 920 split->generation = gen; 921 922 if (em->disk_bytenr < EXTENT_MAP_LAST_BYTE) { 923 split->disk_num_bytes = em->disk_num_bytes; 924 split->offset = em->offset + end - em->start; 925 split->ram_bytes = em->ram_bytes; 926 } else { 927 split->disk_num_bytes = 0; 928 split->offset = 0; 929 split->ram_bytes = split->len; 930 } 931 932 if (extent_map_in_tree(em)) { 933 replace_extent_mapping(inode, em, split, modified); 934 } else { 935 int ret; 936 937 ret = add_extent_mapping(inode, split, modified); 938 /* Logic error, shouldn't happen. */ 939 ASSERT(ret == 0); 940 if (WARN_ON(ret != 0) && modified) 941 btrfs_set_inode_full_sync(inode); 942 } 943 free_extent_map(split); 944 split = NULL; 945 } 946 remove_em: 947 if (extent_map_in_tree(em)) { 948 /* 949 * If the extent map is still in the tree it means that 950 * either of the following is true: 951 * 952 * 1) It fits entirely in our range (doesn't end beyond 953 * it or starts before it); 954 * 955 * 2) It starts before our range and/or ends after our 956 * range, and we were not able to allocate the extent 957 * maps for split operations, @split and @split2. 958 * 959 * If we are at case 2) then we just remove the entire 960 * extent map - this is fine since if anyone needs it to 961 * access the subranges outside our range, will just 962 * load it again from the subvolume tree's file extent 963 * item. However if the extent map was in the list of 964 * modified extents, then we must mark the inode for a 965 * full fsync, otherwise a fast fsync will miss this 966 * extent if it's new and needs to be logged. 967 */ 968 if ((em->start < start || em_end > end) && modified) { 969 ASSERT(!split); 970 btrfs_set_inode_full_sync(inode); 971 } 972 remove_extent_mapping(inode, em); 973 } 974 975 /* 976 * Once for the tree reference (we replaced or removed the 977 * extent map from the tree). 978 */ 979 free_extent_map(em); 980 next: 981 /* Once for us (for our lookup reference). */ 982 free_extent_map(em); 983 984 em = next_em; 985 } 986 987 write_unlock(&em_tree->lock); 988 989 free_extent_map(split); 990 free_extent_map(split2); 991 } 992 993 /* 994 * Replace a range in the inode's extent map tree with a new extent map. 995 * 996 * @inode: The target inode. 997 * @new_em: The new extent map to add to the inode's extent map tree. 998 * @modified: Indicate if the new extent map should be added to the list of 999 * modified extents (for fast fsync tracking). 1000 * 1001 * Drops all the extent maps in the inode's extent map tree that intersect the 1002 * range of the new extent map and adds the new extent map to the tree. 1003 * The caller should have locked an appropriate file range in the inode's io 1004 * tree before calling this function. 1005 */ 1006 int btrfs_replace_extent_map_range(struct btrfs_inode *inode, 1007 struct extent_map *new_em, 1008 bool modified) 1009 { 1010 const u64 end = new_em->start + new_em->len - 1; 1011 struct extent_map_tree *tree = &inode->extent_tree; 1012 int ret; 1013 1014 ASSERT(!extent_map_in_tree(new_em)); 1015 1016 /* 1017 * The caller has locked an appropriate file range in the inode's io 1018 * tree, but getting -EEXIST when adding the new extent map can still 1019 * happen in case there are extents that partially cover the range, and 1020 * this is due to two tasks operating on different parts of the extent. 1021 * See commit 18e83ac75bfe67 ("Btrfs: fix unexpected EEXIST from 1022 * btrfs_get_extent") for an example and details. 1023 */ 1024 do { 1025 btrfs_drop_extent_map_range(inode, new_em->start, end, false); 1026 write_lock(&tree->lock); 1027 ret = add_extent_mapping(inode, new_em, modified); 1028 write_unlock(&tree->lock); 1029 } while (ret == -EEXIST); 1030 1031 return ret; 1032 } 1033 1034 /* 1035 * Split off the first pre bytes from the extent_map at [start, start + len], 1036 * and set the block_start for it to new_logical. 1037 * 1038 * This function is used when an ordered_extent needs to be split. 1039 */ 1040 int split_extent_map(struct btrfs_inode *inode, u64 start, u64 len, u64 pre, 1041 u64 new_logical) 1042 { 1043 struct extent_map_tree *em_tree = &inode->extent_tree; 1044 struct extent_map *em; 1045 struct extent_map *split_pre = NULL; 1046 struct extent_map *split_mid = NULL; 1047 int ret = 0; 1048 unsigned long flags; 1049 1050 ASSERT(pre != 0); 1051 ASSERT(pre < len); 1052 1053 split_pre = alloc_extent_map(); 1054 if (!split_pre) 1055 return -ENOMEM; 1056 split_mid = alloc_extent_map(); 1057 if (!split_mid) { 1058 ret = -ENOMEM; 1059 goto out_free_pre; 1060 } 1061 1062 lock_extent(&inode->io_tree, start, start + len - 1, NULL); 1063 write_lock(&em_tree->lock); 1064 em = lookup_extent_mapping(em_tree, start, len); 1065 if (!em) { 1066 ret = -EIO; 1067 goto out_unlock; 1068 } 1069 1070 ASSERT(em->len == len); 1071 ASSERT(!extent_map_is_compressed(em)); 1072 ASSERT(em->disk_bytenr < EXTENT_MAP_LAST_BYTE); 1073 ASSERT(em->flags & EXTENT_FLAG_PINNED); 1074 ASSERT(!(em->flags & EXTENT_FLAG_LOGGING)); 1075 ASSERT(!list_empty(&em->list)); 1076 1077 flags = em->flags; 1078 em->flags &= ~EXTENT_FLAG_PINNED; 1079 1080 /* First, replace the em with a new extent_map starting from * em->start */ 1081 split_pre->start = em->start; 1082 split_pre->len = pre; 1083 split_pre->disk_bytenr = new_logical; 1084 split_pre->disk_num_bytes = split_pre->len; 1085 split_pre->offset = 0; 1086 split_pre->ram_bytes = split_pre->len; 1087 split_pre->flags = flags; 1088 split_pre->generation = em->generation; 1089 1090 replace_extent_mapping(inode, em, split_pre, 1); 1091 1092 /* 1093 * Now we only have an extent_map at: 1094 * [em->start, em->start + pre] 1095 */ 1096 1097 /* Insert the middle extent_map. */ 1098 split_mid->start = em->start + pre; 1099 split_mid->len = em->len - pre; 1100 split_mid->disk_bytenr = extent_map_block_start(em) + pre; 1101 split_mid->disk_num_bytes = split_mid->len; 1102 split_mid->offset = 0; 1103 split_mid->ram_bytes = split_mid->len; 1104 split_mid->flags = flags; 1105 split_mid->generation = em->generation; 1106 add_extent_mapping(inode, split_mid, 1); 1107 1108 /* Once for us */ 1109 free_extent_map(em); 1110 /* Once for the tree */ 1111 free_extent_map(em); 1112 1113 out_unlock: 1114 write_unlock(&em_tree->lock); 1115 unlock_extent(&inode->io_tree, start, start + len - 1, NULL); 1116 free_extent_map(split_mid); 1117 out_free_pre: 1118 free_extent_map(split_pre); 1119 return ret; 1120 } 1121 1122 struct btrfs_em_shrink_ctx { 1123 long nr_to_scan; 1124 long scanned; 1125 u64 last_ino; 1126 u64 last_root; 1127 }; 1128 1129 static long btrfs_scan_inode(struct btrfs_inode *inode, struct btrfs_em_shrink_ctx *ctx) 1130 { 1131 const u64 cur_fs_gen = btrfs_get_fs_generation(inode->root->fs_info); 1132 struct extent_map_tree *tree = &inode->extent_tree; 1133 long nr_dropped = 0; 1134 struct rb_node *node; 1135 1136 /* 1137 * Take the mmap lock so that we serialize with the inode logging phase 1138 * of fsync because we may need to set the full sync flag on the inode, 1139 * in case we have to remove extent maps in the tree's list of modified 1140 * extents. If we set the full sync flag in the inode while an fsync is 1141 * in progress, we may risk missing new extents because before the flag 1142 * is set, fsync decides to only wait for writeback to complete and then 1143 * during inode logging it sees the flag set and uses the subvolume tree 1144 * to find new extents, which may not be there yet because ordered 1145 * extents haven't completed yet. 1146 * 1147 * We also do a try lock because otherwise we could deadlock. This is 1148 * because the shrinker for this filesystem may be invoked while we are 1149 * in a path that is holding the mmap lock in write mode. For example in 1150 * a reflink operation while COWing an extent buffer, when allocating 1151 * pages for a new extent buffer and under memory pressure, the shrinker 1152 * may be invoked, and therefore we would deadlock by attempting to read 1153 * lock the mmap lock while we are holding already a write lock on it. 1154 */ 1155 if (!down_read_trylock(&inode->i_mmap_lock)) 1156 return 0; 1157 1158 /* 1159 * We want to be fast so if the lock is busy we don't want to spend time 1160 * waiting for it - either some task is about to do IO for the inode or 1161 * we may have another task shrinking extent maps, here in this code, so 1162 * skip this inode. 1163 */ 1164 if (!write_trylock(&tree->lock)) { 1165 up_read(&inode->i_mmap_lock); 1166 return 0; 1167 } 1168 1169 node = rb_first(&tree->root); 1170 while (node) { 1171 struct rb_node *next = rb_next(node); 1172 struct extent_map *em; 1173 1174 em = rb_entry(node, struct extent_map, rb_node); 1175 ctx->scanned++; 1176 1177 if (em->flags & EXTENT_FLAG_PINNED) 1178 goto next; 1179 1180 /* 1181 * If the inode is in the list of modified extents (new) and its 1182 * generation is the same (or is greater than) the current fs 1183 * generation, it means it was not yet persisted so we have to 1184 * set the full sync flag so that the next fsync will not miss 1185 * it. 1186 */ 1187 if (!list_empty(&em->list) && em->generation >= cur_fs_gen) 1188 btrfs_set_inode_full_sync(inode); 1189 1190 remove_extent_mapping(inode, em); 1191 trace_btrfs_extent_map_shrinker_remove_em(inode, em); 1192 /* Drop the reference for the tree. */ 1193 free_extent_map(em); 1194 nr_dropped++; 1195 next: 1196 if (ctx->scanned >= ctx->nr_to_scan) 1197 break; 1198 1199 /* 1200 * Stop if we need to reschedule or there's contention on the 1201 * lock. This is to avoid slowing other tasks trying to take the 1202 * lock. 1203 */ 1204 if (need_resched() || rwlock_needbreak(&tree->lock)) 1205 break; 1206 node = next; 1207 } 1208 write_unlock(&tree->lock); 1209 up_read(&inode->i_mmap_lock); 1210 1211 return nr_dropped; 1212 } 1213 1214 static long btrfs_scan_root(struct btrfs_root *root, struct btrfs_em_shrink_ctx *ctx) 1215 { 1216 struct btrfs_inode *inode; 1217 long nr_dropped = 0; 1218 u64 min_ino = ctx->last_ino + 1; 1219 1220 inode = btrfs_find_first_inode(root, min_ino); 1221 while (inode) { 1222 nr_dropped += btrfs_scan_inode(inode, ctx); 1223 1224 min_ino = btrfs_ino(inode) + 1; 1225 ctx->last_ino = btrfs_ino(inode); 1226 btrfs_add_delayed_iput(inode); 1227 1228 if (ctx->scanned >= ctx->nr_to_scan) 1229 break; 1230 1231 cond_resched(); 1232 1233 inode = btrfs_find_first_inode(root, min_ino); 1234 } 1235 1236 if (inode) { 1237 /* 1238 * There are still inodes in this root or we happened to process 1239 * the last one and reached the scan limit. In either case set 1240 * the current root to this one, so we'll resume from the next 1241 * inode if there is one or we will find out this was the last 1242 * one and move to the next root. 1243 */ 1244 ctx->last_root = btrfs_root_id(root); 1245 } else { 1246 /* 1247 * No more inodes in this root, set extent_map_shrinker_last_ino to 0 so 1248 * that when processing the next root we start from its first inode. 1249 */ 1250 ctx->last_ino = 0; 1251 ctx->last_root = btrfs_root_id(root) + 1; 1252 } 1253 1254 return nr_dropped; 1255 } 1256 1257 long btrfs_free_extent_maps(struct btrfs_fs_info *fs_info, long nr_to_scan) 1258 { 1259 struct btrfs_em_shrink_ctx ctx; 1260 u64 start_root_id; 1261 u64 next_root_id; 1262 bool cycled = false; 1263 long nr_dropped = 0; 1264 1265 ctx.scanned = 0; 1266 ctx.nr_to_scan = nr_to_scan; 1267 1268 /* 1269 * In case we have multiple tasks running this shrinker, make the next 1270 * one start from the next inode in case it starts before we finish. 1271 */ 1272 spin_lock(&fs_info->extent_map_shrinker_lock); 1273 ctx.last_ino = fs_info->extent_map_shrinker_last_ino; 1274 fs_info->extent_map_shrinker_last_ino++; 1275 ctx.last_root = fs_info->extent_map_shrinker_last_root; 1276 spin_unlock(&fs_info->extent_map_shrinker_lock); 1277 1278 start_root_id = ctx.last_root; 1279 next_root_id = ctx.last_root; 1280 1281 if (trace_btrfs_extent_map_shrinker_scan_enter_enabled()) { 1282 s64 nr = percpu_counter_sum_positive(&fs_info->evictable_extent_maps); 1283 1284 trace_btrfs_extent_map_shrinker_scan_enter(fs_info, nr_to_scan, 1285 nr, ctx.last_root, 1286 ctx.last_ino); 1287 } 1288 1289 while (ctx.scanned < ctx.nr_to_scan) { 1290 struct btrfs_root *root; 1291 unsigned long count; 1292 1293 cond_resched(); 1294 1295 spin_lock(&fs_info->fs_roots_radix_lock); 1296 count = radix_tree_gang_lookup(&fs_info->fs_roots_radix, 1297 (void **)&root, 1298 (unsigned long)next_root_id, 1); 1299 if (count == 0) { 1300 spin_unlock(&fs_info->fs_roots_radix_lock); 1301 if (start_root_id > 0 && !cycled) { 1302 next_root_id = 0; 1303 ctx.last_root = 0; 1304 ctx.last_ino = 0; 1305 cycled = true; 1306 continue; 1307 } 1308 break; 1309 } 1310 next_root_id = btrfs_root_id(root) + 1; 1311 root = btrfs_grab_root(root); 1312 spin_unlock(&fs_info->fs_roots_radix_lock); 1313 1314 if (!root) 1315 continue; 1316 1317 if (is_fstree(btrfs_root_id(root))) 1318 nr_dropped += btrfs_scan_root(root, &ctx); 1319 1320 btrfs_put_root(root); 1321 } 1322 1323 /* 1324 * In case of multiple tasks running this extent map shrinking code this 1325 * isn't perfect but it's simple and silences things like KCSAN. It's 1326 * not possible to know which task made more progress because we can 1327 * cycle back to the first root and first inode if it's not the first 1328 * time the shrinker ran, see the above logic. Also a task that started 1329 * later may finish ealier than another task and made less progress. So 1330 * make this simple and update to the progress of the last task that 1331 * finished, with the occasional possiblity of having two consecutive 1332 * runs of the shrinker process the same inodes. 1333 */ 1334 spin_lock(&fs_info->extent_map_shrinker_lock); 1335 fs_info->extent_map_shrinker_last_ino = ctx.last_ino; 1336 fs_info->extent_map_shrinker_last_root = ctx.last_root; 1337 spin_unlock(&fs_info->extent_map_shrinker_lock); 1338 1339 if (trace_btrfs_extent_map_shrinker_scan_exit_enabled()) { 1340 s64 nr = percpu_counter_sum_positive(&fs_info->evictable_extent_maps); 1341 1342 trace_btrfs_extent_map_shrinker_scan_exit(fs_info, nr_dropped, 1343 nr, ctx.last_root, 1344 ctx.last_ino); 1345 } 1346 1347 return nr_dropped; 1348 } 1349