1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2008 Red Hat. All rights reserved. 4 */ 5 6 #include <linux/pagemap.h> 7 #include <linux/sched.h> 8 #include <linux/sched/signal.h> 9 #include <linux/slab.h> 10 #include <linux/math64.h> 11 #include <linux/ratelimit.h> 12 #include <linux/error-injection.h> 13 #include <linux/sched/mm.h> 14 #include <linux/string_choices.h> 15 #include "extent-tree.h" 16 #include "fs.h" 17 #include "messages.h" 18 #include "misc.h" 19 #include "free-space-cache.h" 20 #include "transaction.h" 21 #include "disk-io.h" 22 #include "extent_io.h" 23 #include "space-info.h" 24 #include "block-group.h" 25 #include "discard.h" 26 #include "subpage.h" 27 #include "inode-item.h" 28 #include "accessors.h" 29 #include "file-item.h" 30 #include "file.h" 31 #include "super.h" 32 33 #define BITS_PER_BITMAP (PAGE_SIZE * 8UL) 34 #define MAX_CACHE_BYTES_PER_GIG SZ_64K 35 #define FORCE_EXTENT_THRESHOLD SZ_1M 36 37 static struct kmem_cache *btrfs_free_space_cachep; 38 static struct kmem_cache *btrfs_free_space_bitmap_cachep; 39 40 struct btrfs_trim_range { 41 u64 start; 42 u64 bytes; 43 struct list_head list; 44 }; 45 46 static int link_free_space(struct btrfs_free_space_ctl *ctl, 47 struct btrfs_free_space *info); 48 static void unlink_free_space(struct btrfs_free_space_ctl *ctl, 49 struct btrfs_free_space *info, bool update_stat); 50 static int search_bitmap(struct btrfs_free_space_ctl *ctl, 51 struct btrfs_free_space *bitmap_info, u64 *offset, 52 u64 *bytes, bool for_alloc); 53 static void free_bitmap(struct btrfs_free_space_ctl *ctl, 54 struct btrfs_free_space *bitmap_info); 55 static void bitmap_clear_bits(struct btrfs_free_space_ctl *ctl, 56 struct btrfs_free_space *info, u64 offset, 57 u64 bytes, bool update_stats); 58 59 static void btrfs_crc32c_final(u32 crc, u8 *result) 60 { 61 put_unaligned_le32(~crc, result); 62 } 63 64 static void __btrfs_remove_free_space_cache(struct btrfs_free_space_ctl *ctl) 65 { 66 struct btrfs_free_space *info; 67 struct rb_node *node; 68 69 while ((node = rb_last(&ctl->free_space_offset)) != NULL) { 70 info = rb_entry(node, struct btrfs_free_space, offset_index); 71 if (!info->bitmap) { 72 unlink_free_space(ctl, info, true); 73 kmem_cache_free(btrfs_free_space_cachep, info); 74 } else { 75 free_bitmap(ctl, info); 76 } 77 78 cond_resched_lock(&ctl->tree_lock); 79 } 80 } 81 82 static struct inode *__lookup_free_space_inode(struct btrfs_root *root, 83 struct btrfs_path *path, 84 u64 offset) 85 { 86 struct btrfs_key key; 87 struct btrfs_key location; 88 struct btrfs_disk_key disk_key; 89 struct btrfs_free_space_header *header; 90 struct extent_buffer *leaf; 91 struct btrfs_inode *inode; 92 unsigned nofs_flag; 93 int ret; 94 95 key.objectid = BTRFS_FREE_SPACE_OBJECTID; 96 key.type = 0; 97 key.offset = offset; 98 99 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 100 if (ret < 0) 101 return ERR_PTR(ret); 102 if (ret > 0) { 103 btrfs_release_path(path); 104 return ERR_PTR(-ENOENT); 105 } 106 107 leaf = path->nodes[0]; 108 header = btrfs_item_ptr(leaf, path->slots[0], 109 struct btrfs_free_space_header); 110 btrfs_free_space_key(leaf, header, &disk_key); 111 btrfs_disk_key_to_cpu(&location, &disk_key); 112 btrfs_release_path(path); 113 114 /* 115 * We are often under a trans handle at this point, so we need to make 116 * sure NOFS is set to keep us from deadlocking. 117 */ 118 nofs_flag = memalloc_nofs_save(); 119 inode = btrfs_iget_path(location.objectid, root, path); 120 btrfs_release_path(path); 121 memalloc_nofs_restore(nofs_flag); 122 if (IS_ERR(inode)) 123 return ERR_CAST(inode); 124 125 mapping_set_gfp_mask(inode->vfs_inode.i_mapping, 126 mapping_gfp_constraint(inode->vfs_inode.i_mapping, 127 ~(__GFP_FS | __GFP_HIGHMEM))); 128 129 return &inode->vfs_inode; 130 } 131 132 struct inode *lookup_free_space_inode(struct btrfs_block_group *block_group, 133 struct btrfs_path *path) 134 { 135 struct btrfs_fs_info *fs_info = block_group->fs_info; 136 struct inode *inode = NULL; 137 u32 flags = BTRFS_INODE_NODATASUM | BTRFS_INODE_NODATACOW; 138 139 spin_lock(&block_group->lock); 140 if (block_group->inode) 141 inode = igrab(&block_group->inode->vfs_inode); 142 spin_unlock(&block_group->lock); 143 if (inode) 144 return inode; 145 146 inode = __lookup_free_space_inode(fs_info->tree_root, path, 147 block_group->start); 148 if (IS_ERR(inode)) 149 return inode; 150 151 spin_lock(&block_group->lock); 152 if (!((BTRFS_I(inode)->flags & flags) == flags)) { 153 btrfs_info(fs_info, "Old style space inode found, converting."); 154 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM | 155 BTRFS_INODE_NODATACOW; 156 block_group->disk_cache_state = BTRFS_DC_CLEAR; 157 } 158 159 if (!test_and_set_bit(BLOCK_GROUP_FLAG_IREF, &block_group->runtime_flags)) 160 block_group->inode = BTRFS_I(igrab(inode)); 161 spin_unlock(&block_group->lock); 162 163 return inode; 164 } 165 166 static int __create_free_space_inode(struct btrfs_root *root, 167 struct btrfs_trans_handle *trans, 168 struct btrfs_path *path, 169 u64 ino, u64 offset) 170 { 171 struct btrfs_key key; 172 struct btrfs_disk_key disk_key; 173 struct btrfs_free_space_header *header; 174 struct btrfs_inode_item *inode_item; 175 struct extent_buffer *leaf; 176 /* We inline CRCs for the free disk space cache */ 177 const u64 flags = BTRFS_INODE_NOCOMPRESS | BTRFS_INODE_PREALLOC | 178 BTRFS_INODE_NODATASUM | BTRFS_INODE_NODATACOW; 179 int ret; 180 181 ret = btrfs_insert_empty_inode(trans, root, path, ino); 182 if (ret) 183 return ret; 184 185 leaf = path->nodes[0]; 186 inode_item = btrfs_item_ptr(leaf, path->slots[0], 187 struct btrfs_inode_item); 188 btrfs_item_key(leaf, &disk_key, path->slots[0]); 189 memzero_extent_buffer(leaf, (unsigned long)inode_item, 190 sizeof(*inode_item)); 191 btrfs_set_inode_generation(leaf, inode_item, trans->transid); 192 btrfs_set_inode_size(leaf, inode_item, 0); 193 btrfs_set_inode_nbytes(leaf, inode_item, 0); 194 btrfs_set_inode_uid(leaf, inode_item, 0); 195 btrfs_set_inode_gid(leaf, inode_item, 0); 196 btrfs_set_inode_mode(leaf, inode_item, S_IFREG | 0600); 197 btrfs_set_inode_flags(leaf, inode_item, flags); 198 btrfs_set_inode_nlink(leaf, inode_item, 1); 199 btrfs_set_inode_transid(leaf, inode_item, trans->transid); 200 btrfs_set_inode_block_group(leaf, inode_item, offset); 201 btrfs_release_path(path); 202 203 key.objectid = BTRFS_FREE_SPACE_OBJECTID; 204 key.type = 0; 205 key.offset = offset; 206 ret = btrfs_insert_empty_item(trans, root, path, &key, 207 sizeof(struct btrfs_free_space_header)); 208 if (ret < 0) { 209 btrfs_release_path(path); 210 return ret; 211 } 212 213 leaf = path->nodes[0]; 214 header = btrfs_item_ptr(leaf, path->slots[0], 215 struct btrfs_free_space_header); 216 memzero_extent_buffer(leaf, (unsigned long)header, sizeof(*header)); 217 btrfs_set_free_space_key(leaf, header, &disk_key); 218 btrfs_release_path(path); 219 220 return 0; 221 } 222 223 int create_free_space_inode(struct btrfs_trans_handle *trans, 224 struct btrfs_block_group *block_group, 225 struct btrfs_path *path) 226 { 227 int ret; 228 u64 ino; 229 230 ret = btrfs_get_free_objectid(trans->fs_info->tree_root, &ino); 231 if (ret < 0) 232 return ret; 233 234 return __create_free_space_inode(trans->fs_info->tree_root, trans, path, 235 ino, block_group->start); 236 } 237 238 /* 239 * inode is an optional sink: if it is NULL, btrfs_remove_free_space_inode 240 * handles lookup, otherwise it takes ownership and iputs the inode. 241 * Don't reuse an inode pointer after passing it into this function. 242 */ 243 int btrfs_remove_free_space_inode(struct btrfs_trans_handle *trans, 244 struct inode *inode, 245 struct btrfs_block_group *block_group) 246 { 247 BTRFS_PATH_AUTO_FREE(path); 248 struct btrfs_key key; 249 int ret = 0; 250 251 path = btrfs_alloc_path(); 252 if (!path) 253 return -ENOMEM; 254 255 if (!inode) 256 inode = lookup_free_space_inode(block_group, path); 257 if (IS_ERR(inode)) { 258 if (PTR_ERR(inode) != -ENOENT) 259 ret = PTR_ERR(inode); 260 return ret; 261 } 262 ret = btrfs_orphan_add(trans, BTRFS_I(inode)); 263 if (ret) { 264 btrfs_add_delayed_iput(BTRFS_I(inode)); 265 return ret; 266 } 267 clear_nlink(inode); 268 /* One for the block groups ref */ 269 spin_lock(&block_group->lock); 270 if (test_and_clear_bit(BLOCK_GROUP_FLAG_IREF, &block_group->runtime_flags)) { 271 block_group->inode = NULL; 272 spin_unlock(&block_group->lock); 273 iput(inode); 274 } else { 275 spin_unlock(&block_group->lock); 276 } 277 /* One for the lookup ref */ 278 btrfs_add_delayed_iput(BTRFS_I(inode)); 279 280 key.objectid = BTRFS_FREE_SPACE_OBJECTID; 281 key.type = 0; 282 key.offset = block_group->start; 283 ret = btrfs_search_slot(trans, trans->fs_info->tree_root, &key, path, 284 -1, 1); 285 if (ret) { 286 if (ret > 0) 287 ret = 0; 288 return ret; 289 } 290 return btrfs_del_item(trans, trans->fs_info->tree_root, path); 291 } 292 293 int btrfs_truncate_free_space_cache(struct btrfs_trans_handle *trans, 294 struct btrfs_block_group *block_group, 295 struct inode *vfs_inode) 296 { 297 struct btrfs_truncate_control control = { 298 .inode = BTRFS_I(vfs_inode), 299 .new_size = 0, 300 .ino = btrfs_ino(BTRFS_I(vfs_inode)), 301 .min_type = BTRFS_EXTENT_DATA_KEY, 302 .clear_extent_range = true, 303 }; 304 struct btrfs_inode *inode = BTRFS_I(vfs_inode); 305 struct btrfs_root *root = inode->root; 306 struct extent_state *cached_state = NULL; 307 int ret = 0; 308 bool locked = false; 309 310 if (block_group) { 311 struct btrfs_path *path = btrfs_alloc_path(); 312 313 if (!path) { 314 ret = -ENOMEM; 315 goto fail; 316 } 317 locked = true; 318 mutex_lock(&trans->transaction->cache_write_mutex); 319 if (!list_empty(&block_group->io_list)) { 320 list_del_init(&block_group->io_list); 321 322 btrfs_wait_cache_io(trans, block_group, path); 323 btrfs_put_block_group(block_group); 324 } 325 326 /* 327 * now that we've truncated the cache away, its no longer 328 * setup or written 329 */ 330 spin_lock(&block_group->lock); 331 block_group->disk_cache_state = BTRFS_DC_CLEAR; 332 spin_unlock(&block_group->lock); 333 btrfs_free_path(path); 334 } 335 336 btrfs_i_size_write(inode, 0); 337 truncate_pagecache(vfs_inode, 0); 338 339 lock_extent(&inode->io_tree, 0, (u64)-1, &cached_state); 340 btrfs_drop_extent_map_range(inode, 0, (u64)-1, false); 341 342 /* 343 * We skip the throttling logic for free space cache inodes, so we don't 344 * need to check for -EAGAIN. 345 */ 346 ret = btrfs_truncate_inode_items(trans, root, &control); 347 348 inode_sub_bytes(&inode->vfs_inode, control.sub_bytes); 349 btrfs_inode_safe_disk_i_size_write(inode, control.last_size); 350 351 unlock_extent(&inode->io_tree, 0, (u64)-1, &cached_state); 352 if (ret) 353 goto fail; 354 355 ret = btrfs_update_inode(trans, inode); 356 357 fail: 358 if (locked) 359 mutex_unlock(&trans->transaction->cache_write_mutex); 360 if (ret) 361 btrfs_abort_transaction(trans, ret); 362 363 return ret; 364 } 365 366 static void readahead_cache(struct inode *inode) 367 { 368 struct file_ra_state ra; 369 unsigned long last_index; 370 371 file_ra_state_init(&ra, inode->i_mapping); 372 last_index = (i_size_read(inode) - 1) >> PAGE_SHIFT; 373 374 page_cache_sync_readahead(inode->i_mapping, &ra, NULL, 0, last_index); 375 } 376 377 static int io_ctl_init(struct btrfs_io_ctl *io_ctl, struct inode *inode, 378 int write) 379 { 380 int num_pages; 381 382 num_pages = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); 383 384 /* Make sure we can fit our crcs and generation into the first page */ 385 if (write && (num_pages * sizeof(u32) + sizeof(u64)) > PAGE_SIZE) 386 return -ENOSPC; 387 388 memset(io_ctl, 0, sizeof(struct btrfs_io_ctl)); 389 390 io_ctl->pages = kcalloc(num_pages, sizeof(struct page *), GFP_NOFS); 391 if (!io_ctl->pages) 392 return -ENOMEM; 393 394 io_ctl->num_pages = num_pages; 395 io_ctl->fs_info = inode_to_fs_info(inode); 396 io_ctl->inode = inode; 397 398 return 0; 399 } 400 ALLOW_ERROR_INJECTION(io_ctl_init, ERRNO); 401 402 static void io_ctl_free(struct btrfs_io_ctl *io_ctl) 403 { 404 kfree(io_ctl->pages); 405 io_ctl->pages = NULL; 406 } 407 408 static void io_ctl_unmap_page(struct btrfs_io_ctl *io_ctl) 409 { 410 if (io_ctl->cur) { 411 io_ctl->cur = NULL; 412 io_ctl->orig = NULL; 413 } 414 } 415 416 static void io_ctl_map_page(struct btrfs_io_ctl *io_ctl, int clear) 417 { 418 ASSERT(io_ctl->index < io_ctl->num_pages); 419 io_ctl->page = io_ctl->pages[io_ctl->index++]; 420 io_ctl->cur = page_address(io_ctl->page); 421 io_ctl->orig = io_ctl->cur; 422 io_ctl->size = PAGE_SIZE; 423 if (clear) 424 clear_page(io_ctl->cur); 425 } 426 427 static void io_ctl_drop_pages(struct btrfs_io_ctl *io_ctl) 428 { 429 int i; 430 431 io_ctl_unmap_page(io_ctl); 432 433 for (i = 0; i < io_ctl->num_pages; i++) { 434 if (io_ctl->pages[i]) { 435 btrfs_folio_clear_checked(io_ctl->fs_info, 436 page_folio(io_ctl->pages[i]), 437 page_offset(io_ctl->pages[i]), 438 PAGE_SIZE); 439 unlock_page(io_ctl->pages[i]); 440 put_page(io_ctl->pages[i]); 441 } 442 } 443 } 444 445 static int io_ctl_prepare_pages(struct btrfs_io_ctl *io_ctl, bool uptodate) 446 { 447 struct folio *folio; 448 struct inode *inode = io_ctl->inode; 449 gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping); 450 int i; 451 452 for (i = 0; i < io_ctl->num_pages; i++) { 453 int ret; 454 455 folio = __filemap_get_folio(inode->i_mapping, i, 456 FGP_LOCK | FGP_ACCESSED | FGP_CREAT, 457 mask); 458 if (IS_ERR(folio)) { 459 io_ctl_drop_pages(io_ctl); 460 return -ENOMEM; 461 } 462 463 ret = set_folio_extent_mapped(folio); 464 if (ret < 0) { 465 folio_unlock(folio); 466 folio_put(folio); 467 io_ctl_drop_pages(io_ctl); 468 return ret; 469 } 470 471 io_ctl->pages[i] = &folio->page; 472 if (uptodate && !folio_test_uptodate(folio)) { 473 btrfs_read_folio(NULL, folio); 474 folio_lock(folio); 475 if (folio->mapping != inode->i_mapping) { 476 btrfs_err(BTRFS_I(inode)->root->fs_info, 477 "free space cache page truncated"); 478 io_ctl_drop_pages(io_ctl); 479 return -EIO; 480 } 481 if (!folio_test_uptodate(folio)) { 482 btrfs_err(BTRFS_I(inode)->root->fs_info, 483 "error reading free space cache"); 484 io_ctl_drop_pages(io_ctl); 485 return -EIO; 486 } 487 } 488 } 489 490 for (i = 0; i < io_ctl->num_pages; i++) 491 clear_page_dirty_for_io(io_ctl->pages[i]); 492 493 return 0; 494 } 495 496 static void io_ctl_set_generation(struct btrfs_io_ctl *io_ctl, u64 generation) 497 { 498 io_ctl_map_page(io_ctl, 1); 499 500 /* 501 * Skip the csum areas. If we don't check crcs then we just have a 502 * 64bit chunk at the front of the first page. 503 */ 504 io_ctl->cur += (sizeof(u32) * io_ctl->num_pages); 505 io_ctl->size -= sizeof(u64) + (sizeof(u32) * io_ctl->num_pages); 506 507 put_unaligned_le64(generation, io_ctl->cur); 508 io_ctl->cur += sizeof(u64); 509 } 510 511 static int io_ctl_check_generation(struct btrfs_io_ctl *io_ctl, u64 generation) 512 { 513 u64 cache_gen; 514 515 /* 516 * Skip the crc area. If we don't check crcs then we just have a 64bit 517 * chunk at the front of the first page. 518 */ 519 io_ctl->cur += sizeof(u32) * io_ctl->num_pages; 520 io_ctl->size -= sizeof(u64) + (sizeof(u32) * io_ctl->num_pages); 521 522 cache_gen = get_unaligned_le64(io_ctl->cur); 523 if (cache_gen != generation) { 524 btrfs_err_rl(io_ctl->fs_info, 525 "space cache generation (%llu) does not match inode (%llu)", 526 cache_gen, generation); 527 io_ctl_unmap_page(io_ctl); 528 return -EIO; 529 } 530 io_ctl->cur += sizeof(u64); 531 return 0; 532 } 533 534 static void io_ctl_set_crc(struct btrfs_io_ctl *io_ctl, int index) 535 { 536 u32 *tmp; 537 u32 crc = ~(u32)0; 538 unsigned offset = 0; 539 540 if (index == 0) 541 offset = sizeof(u32) * io_ctl->num_pages; 542 543 crc = crc32c(crc, io_ctl->orig + offset, PAGE_SIZE - offset); 544 btrfs_crc32c_final(crc, (u8 *)&crc); 545 io_ctl_unmap_page(io_ctl); 546 tmp = page_address(io_ctl->pages[0]); 547 tmp += index; 548 *tmp = crc; 549 } 550 551 static int io_ctl_check_crc(struct btrfs_io_ctl *io_ctl, int index) 552 { 553 u32 *tmp, val; 554 u32 crc = ~(u32)0; 555 unsigned offset = 0; 556 557 if (index == 0) 558 offset = sizeof(u32) * io_ctl->num_pages; 559 560 tmp = page_address(io_ctl->pages[0]); 561 tmp += index; 562 val = *tmp; 563 564 io_ctl_map_page(io_ctl, 0); 565 crc = crc32c(crc, io_ctl->orig + offset, PAGE_SIZE - offset); 566 btrfs_crc32c_final(crc, (u8 *)&crc); 567 if (val != crc) { 568 btrfs_err_rl(io_ctl->fs_info, 569 "csum mismatch on free space cache"); 570 io_ctl_unmap_page(io_ctl); 571 return -EIO; 572 } 573 574 return 0; 575 } 576 577 static int io_ctl_add_entry(struct btrfs_io_ctl *io_ctl, u64 offset, u64 bytes, 578 void *bitmap) 579 { 580 struct btrfs_free_space_entry *entry; 581 582 if (!io_ctl->cur) 583 return -ENOSPC; 584 585 entry = io_ctl->cur; 586 put_unaligned_le64(offset, &entry->offset); 587 put_unaligned_le64(bytes, &entry->bytes); 588 entry->type = (bitmap) ? BTRFS_FREE_SPACE_BITMAP : 589 BTRFS_FREE_SPACE_EXTENT; 590 io_ctl->cur += sizeof(struct btrfs_free_space_entry); 591 io_ctl->size -= sizeof(struct btrfs_free_space_entry); 592 593 if (io_ctl->size >= sizeof(struct btrfs_free_space_entry)) 594 return 0; 595 596 io_ctl_set_crc(io_ctl, io_ctl->index - 1); 597 598 /* No more pages to map */ 599 if (io_ctl->index >= io_ctl->num_pages) 600 return 0; 601 602 /* map the next page */ 603 io_ctl_map_page(io_ctl, 1); 604 return 0; 605 } 606 607 static int io_ctl_add_bitmap(struct btrfs_io_ctl *io_ctl, void *bitmap) 608 { 609 if (!io_ctl->cur) 610 return -ENOSPC; 611 612 /* 613 * If we aren't at the start of the current page, unmap this one and 614 * map the next one if there is any left. 615 */ 616 if (io_ctl->cur != io_ctl->orig) { 617 io_ctl_set_crc(io_ctl, io_ctl->index - 1); 618 if (io_ctl->index >= io_ctl->num_pages) 619 return -ENOSPC; 620 io_ctl_map_page(io_ctl, 0); 621 } 622 623 copy_page(io_ctl->cur, bitmap); 624 io_ctl_set_crc(io_ctl, io_ctl->index - 1); 625 if (io_ctl->index < io_ctl->num_pages) 626 io_ctl_map_page(io_ctl, 0); 627 return 0; 628 } 629 630 static void io_ctl_zero_remaining_pages(struct btrfs_io_ctl *io_ctl) 631 { 632 /* 633 * If we're not on the boundary we know we've modified the page and we 634 * need to crc the page. 635 */ 636 if (io_ctl->cur != io_ctl->orig) 637 io_ctl_set_crc(io_ctl, io_ctl->index - 1); 638 else 639 io_ctl_unmap_page(io_ctl); 640 641 while (io_ctl->index < io_ctl->num_pages) { 642 io_ctl_map_page(io_ctl, 1); 643 io_ctl_set_crc(io_ctl, io_ctl->index - 1); 644 } 645 } 646 647 static int io_ctl_read_entry(struct btrfs_io_ctl *io_ctl, 648 struct btrfs_free_space *entry, u8 *type) 649 { 650 struct btrfs_free_space_entry *e; 651 int ret; 652 653 if (!io_ctl->cur) { 654 ret = io_ctl_check_crc(io_ctl, io_ctl->index); 655 if (ret) 656 return ret; 657 } 658 659 e = io_ctl->cur; 660 entry->offset = get_unaligned_le64(&e->offset); 661 entry->bytes = get_unaligned_le64(&e->bytes); 662 *type = e->type; 663 io_ctl->cur += sizeof(struct btrfs_free_space_entry); 664 io_ctl->size -= sizeof(struct btrfs_free_space_entry); 665 666 if (io_ctl->size >= sizeof(struct btrfs_free_space_entry)) 667 return 0; 668 669 io_ctl_unmap_page(io_ctl); 670 671 return 0; 672 } 673 674 static int io_ctl_read_bitmap(struct btrfs_io_ctl *io_ctl, 675 struct btrfs_free_space *entry) 676 { 677 int ret; 678 679 ret = io_ctl_check_crc(io_ctl, io_ctl->index); 680 if (ret) 681 return ret; 682 683 copy_page(entry->bitmap, io_ctl->cur); 684 io_ctl_unmap_page(io_ctl); 685 686 return 0; 687 } 688 689 static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl) 690 { 691 struct btrfs_block_group *block_group = ctl->block_group; 692 u64 max_bytes; 693 u64 bitmap_bytes; 694 u64 extent_bytes; 695 u64 size = block_group->length; 696 u64 bytes_per_bg = BITS_PER_BITMAP * ctl->unit; 697 u64 max_bitmaps = div64_u64(size + bytes_per_bg - 1, bytes_per_bg); 698 699 max_bitmaps = max_t(u64, max_bitmaps, 1); 700 701 if (ctl->total_bitmaps > max_bitmaps) 702 btrfs_err(block_group->fs_info, 703 "invalid free space control: bg start=%llu len=%llu total_bitmaps=%u unit=%u max_bitmaps=%llu bytes_per_bg=%llu", 704 block_group->start, block_group->length, 705 ctl->total_bitmaps, ctl->unit, max_bitmaps, 706 bytes_per_bg); 707 ASSERT(ctl->total_bitmaps <= max_bitmaps); 708 709 /* 710 * We are trying to keep the total amount of memory used per 1GiB of 711 * space to be MAX_CACHE_BYTES_PER_GIG. However, with a reclamation 712 * mechanism of pulling extents >= FORCE_EXTENT_THRESHOLD out of 713 * bitmaps, we may end up using more memory than this. 714 */ 715 if (size < SZ_1G) 716 max_bytes = MAX_CACHE_BYTES_PER_GIG; 717 else 718 max_bytes = MAX_CACHE_BYTES_PER_GIG * div_u64(size, SZ_1G); 719 720 bitmap_bytes = ctl->total_bitmaps * ctl->unit; 721 722 /* 723 * we want the extent entry threshold to always be at most 1/2 the max 724 * bytes we can have, or whatever is less than that. 725 */ 726 extent_bytes = max_bytes - bitmap_bytes; 727 extent_bytes = min_t(u64, extent_bytes, max_bytes >> 1); 728 729 ctl->extents_thresh = 730 div_u64(extent_bytes, sizeof(struct btrfs_free_space)); 731 } 732 733 static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode, 734 struct btrfs_free_space_ctl *ctl, 735 struct btrfs_path *path, u64 offset) 736 { 737 struct btrfs_fs_info *fs_info = root->fs_info; 738 struct btrfs_free_space_header *header; 739 struct extent_buffer *leaf; 740 struct btrfs_io_ctl io_ctl; 741 struct btrfs_key key; 742 struct btrfs_free_space *e, *n; 743 LIST_HEAD(bitmaps); 744 u64 num_entries; 745 u64 num_bitmaps; 746 u64 generation; 747 u8 type; 748 int ret = 0; 749 750 /* Nothing in the space cache, goodbye */ 751 if (!i_size_read(inode)) 752 return 0; 753 754 key.objectid = BTRFS_FREE_SPACE_OBJECTID; 755 key.type = 0; 756 key.offset = offset; 757 758 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 759 if (ret < 0) 760 return 0; 761 else if (ret > 0) { 762 btrfs_release_path(path); 763 return 0; 764 } 765 766 ret = -1; 767 768 leaf = path->nodes[0]; 769 header = btrfs_item_ptr(leaf, path->slots[0], 770 struct btrfs_free_space_header); 771 num_entries = btrfs_free_space_entries(leaf, header); 772 num_bitmaps = btrfs_free_space_bitmaps(leaf, header); 773 generation = btrfs_free_space_generation(leaf, header); 774 btrfs_release_path(path); 775 776 if (!BTRFS_I(inode)->generation) { 777 btrfs_info(fs_info, 778 "the free space cache file (%llu) is invalid, skip it", 779 offset); 780 return 0; 781 } 782 783 if (BTRFS_I(inode)->generation != generation) { 784 btrfs_err(fs_info, 785 "free space inode generation (%llu) did not match free space cache generation (%llu)", 786 BTRFS_I(inode)->generation, generation); 787 return 0; 788 } 789 790 if (!num_entries) 791 return 0; 792 793 ret = io_ctl_init(&io_ctl, inode, 0); 794 if (ret) 795 return ret; 796 797 readahead_cache(inode); 798 799 ret = io_ctl_prepare_pages(&io_ctl, true); 800 if (ret) 801 goto out; 802 803 ret = io_ctl_check_crc(&io_ctl, 0); 804 if (ret) 805 goto free_cache; 806 807 ret = io_ctl_check_generation(&io_ctl, generation); 808 if (ret) 809 goto free_cache; 810 811 while (num_entries) { 812 e = kmem_cache_zalloc(btrfs_free_space_cachep, 813 GFP_NOFS); 814 if (!e) { 815 ret = -ENOMEM; 816 goto free_cache; 817 } 818 819 ret = io_ctl_read_entry(&io_ctl, e, &type); 820 if (ret) { 821 kmem_cache_free(btrfs_free_space_cachep, e); 822 goto free_cache; 823 } 824 825 if (!e->bytes) { 826 ret = -1; 827 kmem_cache_free(btrfs_free_space_cachep, e); 828 goto free_cache; 829 } 830 831 if (type == BTRFS_FREE_SPACE_EXTENT) { 832 spin_lock(&ctl->tree_lock); 833 ret = link_free_space(ctl, e); 834 spin_unlock(&ctl->tree_lock); 835 if (ret) { 836 btrfs_err(fs_info, 837 "Duplicate entries in free space cache, dumping"); 838 kmem_cache_free(btrfs_free_space_cachep, e); 839 goto free_cache; 840 } 841 } else { 842 ASSERT(num_bitmaps); 843 num_bitmaps--; 844 e->bitmap = kmem_cache_zalloc( 845 btrfs_free_space_bitmap_cachep, GFP_NOFS); 846 if (!e->bitmap) { 847 ret = -ENOMEM; 848 kmem_cache_free( 849 btrfs_free_space_cachep, e); 850 goto free_cache; 851 } 852 spin_lock(&ctl->tree_lock); 853 ret = link_free_space(ctl, e); 854 if (ret) { 855 spin_unlock(&ctl->tree_lock); 856 btrfs_err(fs_info, 857 "Duplicate entries in free space cache, dumping"); 858 kmem_cache_free(btrfs_free_space_bitmap_cachep, e->bitmap); 859 kmem_cache_free(btrfs_free_space_cachep, e); 860 goto free_cache; 861 } 862 ctl->total_bitmaps++; 863 recalculate_thresholds(ctl); 864 spin_unlock(&ctl->tree_lock); 865 list_add_tail(&e->list, &bitmaps); 866 } 867 868 num_entries--; 869 } 870 871 io_ctl_unmap_page(&io_ctl); 872 873 /* 874 * We add the bitmaps at the end of the entries in order that 875 * the bitmap entries are added to the cache. 876 */ 877 list_for_each_entry_safe(e, n, &bitmaps, list) { 878 list_del_init(&e->list); 879 ret = io_ctl_read_bitmap(&io_ctl, e); 880 if (ret) 881 goto free_cache; 882 } 883 884 io_ctl_drop_pages(&io_ctl); 885 ret = 1; 886 out: 887 io_ctl_free(&io_ctl); 888 return ret; 889 free_cache: 890 io_ctl_drop_pages(&io_ctl); 891 892 spin_lock(&ctl->tree_lock); 893 __btrfs_remove_free_space_cache(ctl); 894 spin_unlock(&ctl->tree_lock); 895 goto out; 896 } 897 898 static int copy_free_space_cache(struct btrfs_block_group *block_group, 899 struct btrfs_free_space_ctl *ctl) 900 { 901 struct btrfs_free_space *info; 902 struct rb_node *n; 903 int ret = 0; 904 905 while (!ret && (n = rb_first(&ctl->free_space_offset)) != NULL) { 906 info = rb_entry(n, struct btrfs_free_space, offset_index); 907 if (!info->bitmap) { 908 const u64 offset = info->offset; 909 const u64 bytes = info->bytes; 910 911 unlink_free_space(ctl, info, true); 912 spin_unlock(&ctl->tree_lock); 913 kmem_cache_free(btrfs_free_space_cachep, info); 914 ret = btrfs_add_free_space(block_group, offset, bytes); 915 spin_lock(&ctl->tree_lock); 916 } else { 917 u64 offset = info->offset; 918 u64 bytes = ctl->unit; 919 920 ret = search_bitmap(ctl, info, &offset, &bytes, false); 921 if (ret == 0) { 922 bitmap_clear_bits(ctl, info, offset, bytes, true); 923 spin_unlock(&ctl->tree_lock); 924 ret = btrfs_add_free_space(block_group, offset, 925 bytes); 926 spin_lock(&ctl->tree_lock); 927 } else { 928 free_bitmap(ctl, info); 929 ret = 0; 930 } 931 } 932 cond_resched_lock(&ctl->tree_lock); 933 } 934 return ret; 935 } 936 937 static struct lock_class_key btrfs_free_space_inode_key; 938 939 int load_free_space_cache(struct btrfs_block_group *block_group) 940 { 941 struct btrfs_fs_info *fs_info = block_group->fs_info; 942 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; 943 struct btrfs_free_space_ctl tmp_ctl = {}; 944 struct inode *inode; 945 struct btrfs_path *path; 946 int ret = 0; 947 bool matched; 948 u64 used = block_group->used; 949 950 /* 951 * Because we could potentially discard our loaded free space, we want 952 * to load everything into a temporary structure first, and then if it's 953 * valid copy it all into the actual free space ctl. 954 */ 955 btrfs_init_free_space_ctl(block_group, &tmp_ctl); 956 957 /* 958 * If this block group has been marked to be cleared for one reason or 959 * another then we can't trust the on disk cache, so just return. 960 */ 961 spin_lock(&block_group->lock); 962 if (block_group->disk_cache_state != BTRFS_DC_WRITTEN) { 963 spin_unlock(&block_group->lock); 964 return 0; 965 } 966 spin_unlock(&block_group->lock); 967 968 path = btrfs_alloc_path(); 969 if (!path) 970 return 0; 971 path->search_commit_root = 1; 972 path->skip_locking = 1; 973 974 /* 975 * We must pass a path with search_commit_root set to btrfs_iget in 976 * order to avoid a deadlock when allocating extents for the tree root. 977 * 978 * When we are COWing an extent buffer from the tree root, when looking 979 * for a free extent, at extent-tree.c:find_free_extent(), we can find 980 * block group without its free space cache loaded. When we find one 981 * we must load its space cache which requires reading its free space 982 * cache's inode item from the root tree. If this inode item is located 983 * in the same leaf that we started COWing before, then we end up in 984 * deadlock on the extent buffer (trying to read lock it when we 985 * previously write locked it). 986 * 987 * It's safe to read the inode item using the commit root because 988 * block groups, once loaded, stay in memory forever (until they are 989 * removed) as well as their space caches once loaded. New block groups 990 * once created get their ->cached field set to BTRFS_CACHE_FINISHED so 991 * we will never try to read their inode item while the fs is mounted. 992 */ 993 inode = lookup_free_space_inode(block_group, path); 994 if (IS_ERR(inode)) { 995 btrfs_free_path(path); 996 return 0; 997 } 998 999 /* We may have converted the inode and made the cache invalid. */ 1000 spin_lock(&block_group->lock); 1001 if (block_group->disk_cache_state != BTRFS_DC_WRITTEN) { 1002 spin_unlock(&block_group->lock); 1003 btrfs_free_path(path); 1004 goto out; 1005 } 1006 spin_unlock(&block_group->lock); 1007 1008 /* 1009 * Reinitialize the class of struct inode's mapping->invalidate_lock for 1010 * free space inodes to prevent false positives related to locks for normal 1011 * inodes. 1012 */ 1013 lockdep_set_class(&(&inode->i_data)->invalidate_lock, 1014 &btrfs_free_space_inode_key); 1015 1016 ret = __load_free_space_cache(fs_info->tree_root, inode, &tmp_ctl, 1017 path, block_group->start); 1018 btrfs_free_path(path); 1019 if (ret <= 0) 1020 goto out; 1021 1022 matched = (tmp_ctl.free_space == (block_group->length - used - 1023 block_group->bytes_super)); 1024 1025 if (matched) { 1026 spin_lock(&tmp_ctl.tree_lock); 1027 ret = copy_free_space_cache(block_group, &tmp_ctl); 1028 spin_unlock(&tmp_ctl.tree_lock); 1029 /* 1030 * ret == 1 means we successfully loaded the free space cache, 1031 * so we need to re-set it here. 1032 */ 1033 if (ret == 0) 1034 ret = 1; 1035 } else { 1036 /* 1037 * We need to call the _locked variant so we don't try to update 1038 * the discard counters. 1039 */ 1040 spin_lock(&tmp_ctl.tree_lock); 1041 __btrfs_remove_free_space_cache(&tmp_ctl); 1042 spin_unlock(&tmp_ctl.tree_lock); 1043 btrfs_warn(fs_info, 1044 "block group %llu has wrong amount of free space", 1045 block_group->start); 1046 ret = -1; 1047 } 1048 out: 1049 if (ret < 0) { 1050 /* This cache is bogus, make sure it gets cleared */ 1051 spin_lock(&block_group->lock); 1052 block_group->disk_cache_state = BTRFS_DC_CLEAR; 1053 spin_unlock(&block_group->lock); 1054 ret = 0; 1055 1056 btrfs_warn(fs_info, 1057 "failed to load free space cache for block group %llu, rebuilding it now", 1058 block_group->start); 1059 } 1060 1061 spin_lock(&ctl->tree_lock); 1062 btrfs_discard_update_discardable(block_group); 1063 spin_unlock(&ctl->tree_lock); 1064 iput(inode); 1065 return ret; 1066 } 1067 1068 static noinline_for_stack 1069 int write_cache_extent_entries(struct btrfs_io_ctl *io_ctl, 1070 struct btrfs_free_space_ctl *ctl, 1071 struct btrfs_block_group *block_group, 1072 int *entries, int *bitmaps, 1073 struct list_head *bitmap_list) 1074 { 1075 int ret; 1076 struct btrfs_free_cluster *cluster = NULL; 1077 struct btrfs_free_cluster *cluster_locked = NULL; 1078 struct rb_node *node = rb_first(&ctl->free_space_offset); 1079 struct btrfs_trim_range *trim_entry; 1080 1081 /* Get the cluster for this block_group if it exists */ 1082 if (block_group && !list_empty(&block_group->cluster_list)) { 1083 cluster = list_entry(block_group->cluster_list.next, 1084 struct btrfs_free_cluster, 1085 block_group_list); 1086 } 1087 1088 if (!node && cluster) { 1089 cluster_locked = cluster; 1090 spin_lock(&cluster_locked->lock); 1091 node = rb_first(&cluster->root); 1092 cluster = NULL; 1093 } 1094 1095 /* Write out the extent entries */ 1096 while (node) { 1097 struct btrfs_free_space *e; 1098 1099 e = rb_entry(node, struct btrfs_free_space, offset_index); 1100 *entries += 1; 1101 1102 ret = io_ctl_add_entry(io_ctl, e->offset, e->bytes, 1103 e->bitmap); 1104 if (ret) 1105 goto fail; 1106 1107 if (e->bitmap) { 1108 list_add_tail(&e->list, bitmap_list); 1109 *bitmaps += 1; 1110 } 1111 node = rb_next(node); 1112 if (!node && cluster) { 1113 node = rb_first(&cluster->root); 1114 cluster_locked = cluster; 1115 spin_lock(&cluster_locked->lock); 1116 cluster = NULL; 1117 } 1118 } 1119 if (cluster_locked) { 1120 spin_unlock(&cluster_locked->lock); 1121 cluster_locked = NULL; 1122 } 1123 1124 /* 1125 * Make sure we don't miss any range that was removed from our rbtree 1126 * because trimming is running. Otherwise after a umount+mount (or crash 1127 * after committing the transaction) we would leak free space and get 1128 * an inconsistent free space cache report from fsck. 1129 */ 1130 list_for_each_entry(trim_entry, &ctl->trimming_ranges, list) { 1131 ret = io_ctl_add_entry(io_ctl, trim_entry->start, 1132 trim_entry->bytes, NULL); 1133 if (ret) 1134 goto fail; 1135 *entries += 1; 1136 } 1137 1138 return 0; 1139 fail: 1140 if (cluster_locked) 1141 spin_unlock(&cluster_locked->lock); 1142 return -ENOSPC; 1143 } 1144 1145 static noinline_for_stack int 1146 update_cache_item(struct btrfs_trans_handle *trans, 1147 struct btrfs_root *root, 1148 struct inode *inode, 1149 struct btrfs_path *path, u64 offset, 1150 int entries, int bitmaps) 1151 { 1152 struct btrfs_key key; 1153 struct btrfs_free_space_header *header; 1154 struct extent_buffer *leaf; 1155 int ret; 1156 1157 key.objectid = BTRFS_FREE_SPACE_OBJECTID; 1158 key.type = 0; 1159 key.offset = offset; 1160 1161 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); 1162 if (ret < 0) { 1163 clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, inode->i_size - 1, 1164 EXTENT_DELALLOC, NULL); 1165 goto fail; 1166 } 1167 leaf = path->nodes[0]; 1168 if (ret > 0) { 1169 struct btrfs_key found_key; 1170 ASSERT(path->slots[0]); 1171 path->slots[0]--; 1172 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 1173 if (found_key.objectid != BTRFS_FREE_SPACE_OBJECTID || 1174 found_key.offset != offset) { 1175 clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, 1176 inode->i_size - 1, EXTENT_DELALLOC, 1177 NULL); 1178 btrfs_release_path(path); 1179 goto fail; 1180 } 1181 } 1182 1183 BTRFS_I(inode)->generation = trans->transid; 1184 header = btrfs_item_ptr(leaf, path->slots[0], 1185 struct btrfs_free_space_header); 1186 btrfs_set_free_space_entries(leaf, header, entries); 1187 btrfs_set_free_space_bitmaps(leaf, header, bitmaps); 1188 btrfs_set_free_space_generation(leaf, header, trans->transid); 1189 btrfs_release_path(path); 1190 1191 return 0; 1192 1193 fail: 1194 return -1; 1195 } 1196 1197 static noinline_for_stack int write_pinned_extent_entries( 1198 struct btrfs_trans_handle *trans, 1199 struct btrfs_block_group *block_group, 1200 struct btrfs_io_ctl *io_ctl, 1201 int *entries) 1202 { 1203 u64 start, extent_start, extent_end, len; 1204 struct extent_io_tree *unpin = NULL; 1205 int ret; 1206 1207 if (!block_group) 1208 return 0; 1209 1210 /* 1211 * We want to add any pinned extents to our free space cache 1212 * so we don't leak the space 1213 * 1214 * We shouldn't have switched the pinned extents yet so this is the 1215 * right one 1216 */ 1217 unpin = &trans->transaction->pinned_extents; 1218 1219 start = block_group->start; 1220 1221 while (start < block_group->start + block_group->length) { 1222 if (!find_first_extent_bit(unpin, start, 1223 &extent_start, &extent_end, 1224 EXTENT_DIRTY, NULL)) 1225 return 0; 1226 1227 /* This pinned extent is out of our range */ 1228 if (extent_start >= block_group->start + block_group->length) 1229 return 0; 1230 1231 extent_start = max(extent_start, start); 1232 extent_end = min(block_group->start + block_group->length, 1233 extent_end + 1); 1234 len = extent_end - extent_start; 1235 1236 *entries += 1; 1237 ret = io_ctl_add_entry(io_ctl, extent_start, len, NULL); 1238 if (ret) 1239 return -ENOSPC; 1240 1241 start = extent_end; 1242 } 1243 1244 return 0; 1245 } 1246 1247 static noinline_for_stack int 1248 write_bitmap_entries(struct btrfs_io_ctl *io_ctl, struct list_head *bitmap_list) 1249 { 1250 struct btrfs_free_space *entry, *next; 1251 int ret; 1252 1253 /* Write out the bitmaps */ 1254 list_for_each_entry_safe(entry, next, bitmap_list, list) { 1255 ret = io_ctl_add_bitmap(io_ctl, entry->bitmap); 1256 if (ret) 1257 return -ENOSPC; 1258 list_del_init(&entry->list); 1259 } 1260 1261 return 0; 1262 } 1263 1264 static int flush_dirty_cache(struct inode *inode) 1265 { 1266 int ret; 1267 1268 ret = btrfs_wait_ordered_range(BTRFS_I(inode), 0, (u64)-1); 1269 if (ret) 1270 clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, inode->i_size - 1, 1271 EXTENT_DELALLOC, NULL); 1272 1273 return ret; 1274 } 1275 1276 static void noinline_for_stack 1277 cleanup_bitmap_list(struct list_head *bitmap_list) 1278 { 1279 struct btrfs_free_space *entry, *next; 1280 1281 list_for_each_entry_safe(entry, next, bitmap_list, list) 1282 list_del_init(&entry->list); 1283 } 1284 1285 static void noinline_for_stack 1286 cleanup_write_cache_enospc(struct inode *inode, 1287 struct btrfs_io_ctl *io_ctl, 1288 struct extent_state **cached_state) 1289 { 1290 io_ctl_drop_pages(io_ctl); 1291 unlock_extent(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1, 1292 cached_state); 1293 } 1294 1295 static int __btrfs_wait_cache_io(struct btrfs_root *root, 1296 struct btrfs_trans_handle *trans, 1297 struct btrfs_block_group *block_group, 1298 struct btrfs_io_ctl *io_ctl, 1299 struct btrfs_path *path, u64 offset) 1300 { 1301 int ret; 1302 struct inode *inode = io_ctl->inode; 1303 1304 if (!inode) 1305 return 0; 1306 1307 /* Flush the dirty pages in the cache file. */ 1308 ret = flush_dirty_cache(inode); 1309 if (ret) 1310 goto out; 1311 1312 /* Update the cache item to tell everyone this cache file is valid. */ 1313 ret = update_cache_item(trans, root, inode, path, offset, 1314 io_ctl->entries, io_ctl->bitmaps); 1315 out: 1316 if (ret) { 1317 invalidate_inode_pages2(inode->i_mapping); 1318 BTRFS_I(inode)->generation = 0; 1319 if (block_group) 1320 btrfs_debug(root->fs_info, 1321 "failed to write free space cache for block group %llu error %d", 1322 block_group->start, ret); 1323 } 1324 btrfs_update_inode(trans, BTRFS_I(inode)); 1325 1326 if (block_group) { 1327 /* the dirty list is protected by the dirty_bgs_lock */ 1328 spin_lock(&trans->transaction->dirty_bgs_lock); 1329 1330 /* the disk_cache_state is protected by the block group lock */ 1331 spin_lock(&block_group->lock); 1332 1333 /* 1334 * only mark this as written if we didn't get put back on 1335 * the dirty list while waiting for IO. Otherwise our 1336 * cache state won't be right, and we won't get written again 1337 */ 1338 if (!ret && list_empty(&block_group->dirty_list)) 1339 block_group->disk_cache_state = BTRFS_DC_WRITTEN; 1340 else if (ret) 1341 block_group->disk_cache_state = BTRFS_DC_ERROR; 1342 1343 spin_unlock(&block_group->lock); 1344 spin_unlock(&trans->transaction->dirty_bgs_lock); 1345 io_ctl->inode = NULL; 1346 iput(inode); 1347 } 1348 1349 return ret; 1350 1351 } 1352 1353 int btrfs_wait_cache_io(struct btrfs_trans_handle *trans, 1354 struct btrfs_block_group *block_group, 1355 struct btrfs_path *path) 1356 { 1357 return __btrfs_wait_cache_io(block_group->fs_info->tree_root, trans, 1358 block_group, &block_group->io_ctl, 1359 path, block_group->start); 1360 } 1361 1362 /* 1363 * Write out cached info to an inode. 1364 * 1365 * @inode: freespace inode we are writing out 1366 * @ctl: free space cache we are going to write out 1367 * @block_group: block_group for this cache if it belongs to a block_group 1368 * @io_ctl: holds context for the io 1369 * @trans: the trans handle 1370 * 1371 * This function writes out a free space cache struct to disk for quick recovery 1372 * on mount. This will return 0 if it was successful in writing the cache out, 1373 * or an errno if it was not. 1374 */ 1375 static int __btrfs_write_out_cache(struct inode *inode, 1376 struct btrfs_free_space_ctl *ctl, 1377 struct btrfs_block_group *block_group, 1378 struct btrfs_io_ctl *io_ctl, 1379 struct btrfs_trans_handle *trans) 1380 { 1381 struct extent_state *cached_state = NULL; 1382 LIST_HEAD(bitmap_list); 1383 int entries = 0; 1384 int bitmaps = 0; 1385 int ret; 1386 int must_iput = 0; 1387 int i_size; 1388 1389 if (!i_size_read(inode)) 1390 return -EIO; 1391 1392 WARN_ON(io_ctl->pages); 1393 ret = io_ctl_init(io_ctl, inode, 1); 1394 if (ret) 1395 return ret; 1396 1397 if (block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA)) { 1398 down_write(&block_group->data_rwsem); 1399 spin_lock(&block_group->lock); 1400 if (block_group->delalloc_bytes) { 1401 block_group->disk_cache_state = BTRFS_DC_WRITTEN; 1402 spin_unlock(&block_group->lock); 1403 up_write(&block_group->data_rwsem); 1404 BTRFS_I(inode)->generation = 0; 1405 ret = 0; 1406 must_iput = 1; 1407 goto out; 1408 } 1409 spin_unlock(&block_group->lock); 1410 } 1411 1412 /* Lock all pages first so we can lock the extent safely. */ 1413 ret = io_ctl_prepare_pages(io_ctl, false); 1414 if (ret) 1415 goto out_unlock; 1416 1417 lock_extent(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1, 1418 &cached_state); 1419 1420 io_ctl_set_generation(io_ctl, trans->transid); 1421 1422 mutex_lock(&ctl->cache_writeout_mutex); 1423 /* Write out the extent entries in the free space cache */ 1424 spin_lock(&ctl->tree_lock); 1425 ret = write_cache_extent_entries(io_ctl, ctl, 1426 block_group, &entries, &bitmaps, 1427 &bitmap_list); 1428 if (ret) 1429 goto out_nospc_locked; 1430 1431 /* 1432 * Some spaces that are freed in the current transaction are pinned, 1433 * they will be added into free space cache after the transaction is 1434 * committed, we shouldn't lose them. 1435 * 1436 * If this changes while we are working we'll get added back to 1437 * the dirty list and redo it. No locking needed 1438 */ 1439 ret = write_pinned_extent_entries(trans, block_group, io_ctl, &entries); 1440 if (ret) 1441 goto out_nospc_locked; 1442 1443 /* 1444 * At last, we write out all the bitmaps and keep cache_writeout_mutex 1445 * locked while doing it because a concurrent trim can be manipulating 1446 * or freeing the bitmap. 1447 */ 1448 ret = write_bitmap_entries(io_ctl, &bitmap_list); 1449 spin_unlock(&ctl->tree_lock); 1450 mutex_unlock(&ctl->cache_writeout_mutex); 1451 if (ret) 1452 goto out_nospc; 1453 1454 /* Zero out the rest of the pages just to make sure */ 1455 io_ctl_zero_remaining_pages(io_ctl); 1456 1457 /* Everything is written out, now we dirty the pages in the file. */ 1458 i_size = i_size_read(inode); 1459 for (int i = 0; i < round_up(i_size, PAGE_SIZE) / PAGE_SIZE; i++) { 1460 u64 dirty_start = i * PAGE_SIZE; 1461 u64 dirty_len = min_t(u64, dirty_start + PAGE_SIZE, i_size) - dirty_start; 1462 1463 ret = btrfs_dirty_folio(BTRFS_I(inode), page_folio(io_ctl->pages[i]), 1464 dirty_start, dirty_len, &cached_state, false); 1465 if (ret < 0) 1466 goto out_nospc; 1467 } 1468 1469 if (block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA)) 1470 up_write(&block_group->data_rwsem); 1471 /* 1472 * Release the pages and unlock the extent, we will flush 1473 * them out later 1474 */ 1475 io_ctl_drop_pages(io_ctl); 1476 io_ctl_free(io_ctl); 1477 1478 unlock_extent(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1, 1479 &cached_state); 1480 1481 /* 1482 * at this point the pages are under IO and we're happy, 1483 * The caller is responsible for waiting on them and updating 1484 * the cache and the inode 1485 */ 1486 io_ctl->entries = entries; 1487 io_ctl->bitmaps = bitmaps; 1488 1489 ret = btrfs_fdatawrite_range(BTRFS_I(inode), 0, (u64)-1); 1490 if (ret) 1491 goto out; 1492 1493 return 0; 1494 1495 out_nospc_locked: 1496 cleanup_bitmap_list(&bitmap_list); 1497 spin_unlock(&ctl->tree_lock); 1498 mutex_unlock(&ctl->cache_writeout_mutex); 1499 1500 out_nospc: 1501 cleanup_write_cache_enospc(inode, io_ctl, &cached_state); 1502 1503 out_unlock: 1504 if (block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA)) 1505 up_write(&block_group->data_rwsem); 1506 1507 out: 1508 io_ctl->inode = NULL; 1509 io_ctl_free(io_ctl); 1510 if (ret) { 1511 invalidate_inode_pages2(inode->i_mapping); 1512 BTRFS_I(inode)->generation = 0; 1513 } 1514 btrfs_update_inode(trans, BTRFS_I(inode)); 1515 if (must_iput) 1516 iput(inode); 1517 return ret; 1518 } 1519 1520 int btrfs_write_out_cache(struct btrfs_trans_handle *trans, 1521 struct btrfs_block_group *block_group, 1522 struct btrfs_path *path) 1523 { 1524 struct btrfs_fs_info *fs_info = trans->fs_info; 1525 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; 1526 struct inode *inode; 1527 int ret = 0; 1528 1529 spin_lock(&block_group->lock); 1530 if (block_group->disk_cache_state < BTRFS_DC_SETUP) { 1531 spin_unlock(&block_group->lock); 1532 return 0; 1533 } 1534 spin_unlock(&block_group->lock); 1535 1536 inode = lookup_free_space_inode(block_group, path); 1537 if (IS_ERR(inode)) 1538 return 0; 1539 1540 ret = __btrfs_write_out_cache(inode, ctl, block_group, 1541 &block_group->io_ctl, trans); 1542 if (ret) { 1543 btrfs_debug(fs_info, 1544 "failed to write free space cache for block group %llu error %d", 1545 block_group->start, ret); 1546 spin_lock(&block_group->lock); 1547 block_group->disk_cache_state = BTRFS_DC_ERROR; 1548 spin_unlock(&block_group->lock); 1549 1550 block_group->io_ctl.inode = NULL; 1551 iput(inode); 1552 } 1553 1554 /* 1555 * if ret == 0 the caller is expected to call btrfs_wait_cache_io 1556 * to wait for IO and put the inode 1557 */ 1558 1559 return ret; 1560 } 1561 1562 static inline unsigned long offset_to_bit(u64 bitmap_start, u32 unit, 1563 u64 offset) 1564 { 1565 ASSERT(offset >= bitmap_start); 1566 offset -= bitmap_start; 1567 return (unsigned long)(div_u64(offset, unit)); 1568 } 1569 1570 static inline unsigned long bytes_to_bits(u64 bytes, u32 unit) 1571 { 1572 return (unsigned long)(div_u64(bytes, unit)); 1573 } 1574 1575 static inline u64 offset_to_bitmap(struct btrfs_free_space_ctl *ctl, 1576 u64 offset) 1577 { 1578 u64 bitmap_start; 1579 u64 bytes_per_bitmap; 1580 1581 bytes_per_bitmap = BITS_PER_BITMAP * ctl->unit; 1582 bitmap_start = offset - ctl->start; 1583 bitmap_start = div64_u64(bitmap_start, bytes_per_bitmap); 1584 bitmap_start *= bytes_per_bitmap; 1585 bitmap_start += ctl->start; 1586 1587 return bitmap_start; 1588 } 1589 1590 static int tree_insert_offset(struct btrfs_free_space_ctl *ctl, 1591 struct btrfs_free_cluster *cluster, 1592 struct btrfs_free_space *new_entry) 1593 { 1594 struct rb_root *root; 1595 struct rb_node **p; 1596 struct rb_node *parent = NULL; 1597 1598 lockdep_assert_held(&ctl->tree_lock); 1599 1600 if (cluster) { 1601 lockdep_assert_held(&cluster->lock); 1602 root = &cluster->root; 1603 } else { 1604 root = &ctl->free_space_offset; 1605 } 1606 1607 p = &root->rb_node; 1608 1609 while (*p) { 1610 struct btrfs_free_space *info; 1611 1612 parent = *p; 1613 info = rb_entry(parent, struct btrfs_free_space, offset_index); 1614 1615 if (new_entry->offset < info->offset) { 1616 p = &(*p)->rb_left; 1617 } else if (new_entry->offset > info->offset) { 1618 p = &(*p)->rb_right; 1619 } else { 1620 /* 1621 * we could have a bitmap entry and an extent entry 1622 * share the same offset. If this is the case, we want 1623 * the extent entry to always be found first if we do a 1624 * linear search through the tree, since we want to have 1625 * the quickest allocation time, and allocating from an 1626 * extent is faster than allocating from a bitmap. So 1627 * if we're inserting a bitmap and we find an entry at 1628 * this offset, we want to go right, or after this entry 1629 * logically. If we are inserting an extent and we've 1630 * found a bitmap, we want to go left, or before 1631 * logically. 1632 */ 1633 if (new_entry->bitmap) { 1634 if (info->bitmap) { 1635 WARN_ON_ONCE(1); 1636 return -EEXIST; 1637 } 1638 p = &(*p)->rb_right; 1639 } else { 1640 if (!info->bitmap) { 1641 WARN_ON_ONCE(1); 1642 return -EEXIST; 1643 } 1644 p = &(*p)->rb_left; 1645 } 1646 } 1647 } 1648 1649 rb_link_node(&new_entry->offset_index, parent, p); 1650 rb_insert_color(&new_entry->offset_index, root); 1651 1652 return 0; 1653 } 1654 1655 /* 1656 * This is a little subtle. We *only* have ->max_extent_size set if we actually 1657 * searched through the bitmap and figured out the largest ->max_extent_size, 1658 * otherwise it's 0. In the case that it's 0 we don't want to tell the 1659 * allocator the wrong thing, we want to use the actual real max_extent_size 1660 * we've found already if it's larger, or we want to use ->bytes. 1661 * 1662 * This matters because find_free_space() will skip entries who's ->bytes is 1663 * less than the required bytes. So if we didn't search down this bitmap, we 1664 * may pick some previous entry that has a smaller ->max_extent_size than we 1665 * have. For example, assume we have two entries, one that has 1666 * ->max_extent_size set to 4K and ->bytes set to 1M. A second entry hasn't set 1667 * ->max_extent_size yet, has ->bytes set to 8K and it's contiguous. We will 1668 * call into find_free_space(), and return with max_extent_size == 4K, because 1669 * that first bitmap entry had ->max_extent_size set, but the second one did 1670 * not. If instead we returned 8K we'd come in searching for 8K, and find the 1671 * 8K contiguous range. 1672 * 1673 * Consider the other case, we have 2 8K chunks in that second entry and still 1674 * don't have ->max_extent_size set. We'll return 16K, and the next time the 1675 * allocator comes in it'll fully search our second bitmap, and this time it'll 1676 * get an uptodate value of 8K as the maximum chunk size. Then we'll get the 1677 * right allocation the next loop through. 1678 */ 1679 static inline u64 get_max_extent_size(const struct btrfs_free_space *entry) 1680 { 1681 if (entry->bitmap && entry->max_extent_size) 1682 return entry->max_extent_size; 1683 return entry->bytes; 1684 } 1685 1686 /* 1687 * We want the largest entry to be leftmost, so this is inverted from what you'd 1688 * normally expect. 1689 */ 1690 static bool entry_less(struct rb_node *node, const struct rb_node *parent) 1691 { 1692 const struct btrfs_free_space *entry, *exist; 1693 1694 entry = rb_entry(node, struct btrfs_free_space, bytes_index); 1695 exist = rb_entry(parent, struct btrfs_free_space, bytes_index); 1696 return get_max_extent_size(exist) < get_max_extent_size(entry); 1697 } 1698 1699 /* 1700 * searches the tree for the given offset. 1701 * 1702 * fuzzy - If this is set, then we are trying to make an allocation, and we just 1703 * want a section that has at least bytes size and comes at or after the given 1704 * offset. 1705 */ 1706 static struct btrfs_free_space * 1707 tree_search_offset(struct btrfs_free_space_ctl *ctl, 1708 u64 offset, int bitmap_only, int fuzzy) 1709 { 1710 struct rb_node *n = ctl->free_space_offset.rb_node; 1711 struct btrfs_free_space *entry = NULL, *prev = NULL; 1712 1713 lockdep_assert_held(&ctl->tree_lock); 1714 1715 /* find entry that is closest to the 'offset' */ 1716 while (n) { 1717 entry = rb_entry(n, struct btrfs_free_space, offset_index); 1718 prev = entry; 1719 1720 if (offset < entry->offset) 1721 n = n->rb_left; 1722 else if (offset > entry->offset) 1723 n = n->rb_right; 1724 else 1725 break; 1726 1727 entry = NULL; 1728 } 1729 1730 if (bitmap_only) { 1731 if (!entry) 1732 return NULL; 1733 if (entry->bitmap) 1734 return entry; 1735 1736 /* 1737 * bitmap entry and extent entry may share same offset, 1738 * in that case, bitmap entry comes after extent entry. 1739 */ 1740 n = rb_next(n); 1741 if (!n) 1742 return NULL; 1743 entry = rb_entry(n, struct btrfs_free_space, offset_index); 1744 if (entry->offset != offset) 1745 return NULL; 1746 1747 WARN_ON(!entry->bitmap); 1748 return entry; 1749 } else if (entry) { 1750 if (entry->bitmap) { 1751 /* 1752 * if previous extent entry covers the offset, 1753 * we should return it instead of the bitmap entry 1754 */ 1755 n = rb_prev(&entry->offset_index); 1756 if (n) { 1757 prev = rb_entry(n, struct btrfs_free_space, 1758 offset_index); 1759 if (!prev->bitmap && 1760 prev->offset + prev->bytes > offset) 1761 entry = prev; 1762 } 1763 } 1764 return entry; 1765 } 1766 1767 if (!prev) 1768 return NULL; 1769 1770 /* find last entry before the 'offset' */ 1771 entry = prev; 1772 if (entry->offset > offset) { 1773 n = rb_prev(&entry->offset_index); 1774 if (n) { 1775 entry = rb_entry(n, struct btrfs_free_space, 1776 offset_index); 1777 ASSERT(entry->offset <= offset); 1778 } else { 1779 if (fuzzy) 1780 return entry; 1781 else 1782 return NULL; 1783 } 1784 } 1785 1786 if (entry->bitmap) { 1787 n = rb_prev(&entry->offset_index); 1788 if (n) { 1789 prev = rb_entry(n, struct btrfs_free_space, 1790 offset_index); 1791 if (!prev->bitmap && 1792 prev->offset + prev->bytes > offset) 1793 return prev; 1794 } 1795 if (entry->offset + BITS_PER_BITMAP * ctl->unit > offset) 1796 return entry; 1797 } else if (entry->offset + entry->bytes > offset) 1798 return entry; 1799 1800 if (!fuzzy) 1801 return NULL; 1802 1803 while (1) { 1804 n = rb_next(&entry->offset_index); 1805 if (!n) 1806 return NULL; 1807 entry = rb_entry(n, struct btrfs_free_space, offset_index); 1808 if (entry->bitmap) { 1809 if (entry->offset + BITS_PER_BITMAP * 1810 ctl->unit > offset) 1811 break; 1812 } else { 1813 if (entry->offset + entry->bytes > offset) 1814 break; 1815 } 1816 } 1817 return entry; 1818 } 1819 1820 static inline void unlink_free_space(struct btrfs_free_space_ctl *ctl, 1821 struct btrfs_free_space *info, 1822 bool update_stat) 1823 { 1824 lockdep_assert_held(&ctl->tree_lock); 1825 1826 rb_erase(&info->offset_index, &ctl->free_space_offset); 1827 rb_erase_cached(&info->bytes_index, &ctl->free_space_bytes); 1828 ctl->free_extents--; 1829 1830 if (!info->bitmap && !btrfs_free_space_trimmed(info)) { 1831 ctl->discardable_extents[BTRFS_STAT_CURR]--; 1832 ctl->discardable_bytes[BTRFS_STAT_CURR] -= info->bytes; 1833 } 1834 1835 if (update_stat) 1836 ctl->free_space -= info->bytes; 1837 } 1838 1839 static int link_free_space(struct btrfs_free_space_ctl *ctl, 1840 struct btrfs_free_space *info) 1841 { 1842 int ret = 0; 1843 1844 lockdep_assert_held(&ctl->tree_lock); 1845 1846 ASSERT(info->bytes || info->bitmap); 1847 ret = tree_insert_offset(ctl, NULL, info); 1848 if (ret) 1849 return ret; 1850 1851 rb_add_cached(&info->bytes_index, &ctl->free_space_bytes, entry_less); 1852 1853 if (!info->bitmap && !btrfs_free_space_trimmed(info)) { 1854 ctl->discardable_extents[BTRFS_STAT_CURR]++; 1855 ctl->discardable_bytes[BTRFS_STAT_CURR] += info->bytes; 1856 } 1857 1858 ctl->free_space += info->bytes; 1859 ctl->free_extents++; 1860 return ret; 1861 } 1862 1863 static void relink_bitmap_entry(struct btrfs_free_space_ctl *ctl, 1864 struct btrfs_free_space *info) 1865 { 1866 ASSERT(info->bitmap); 1867 1868 /* 1869 * If our entry is empty it's because we're on a cluster and we don't 1870 * want to re-link it into our ctl bytes index. 1871 */ 1872 if (RB_EMPTY_NODE(&info->bytes_index)) 1873 return; 1874 1875 lockdep_assert_held(&ctl->tree_lock); 1876 1877 rb_erase_cached(&info->bytes_index, &ctl->free_space_bytes); 1878 rb_add_cached(&info->bytes_index, &ctl->free_space_bytes, entry_less); 1879 } 1880 1881 static inline void bitmap_clear_bits(struct btrfs_free_space_ctl *ctl, 1882 struct btrfs_free_space *info, 1883 u64 offset, u64 bytes, bool update_stat) 1884 { 1885 unsigned long start, count, end; 1886 int extent_delta = -1; 1887 1888 start = offset_to_bit(info->offset, ctl->unit, offset); 1889 count = bytes_to_bits(bytes, ctl->unit); 1890 end = start + count; 1891 ASSERT(end <= BITS_PER_BITMAP); 1892 1893 bitmap_clear(info->bitmap, start, count); 1894 1895 info->bytes -= bytes; 1896 if (info->max_extent_size > ctl->unit) 1897 info->max_extent_size = 0; 1898 1899 relink_bitmap_entry(ctl, info); 1900 1901 if (start && test_bit(start - 1, info->bitmap)) 1902 extent_delta++; 1903 1904 if (end < BITS_PER_BITMAP && test_bit(end, info->bitmap)) 1905 extent_delta++; 1906 1907 info->bitmap_extents += extent_delta; 1908 if (!btrfs_free_space_trimmed(info)) { 1909 ctl->discardable_extents[BTRFS_STAT_CURR] += extent_delta; 1910 ctl->discardable_bytes[BTRFS_STAT_CURR] -= bytes; 1911 } 1912 1913 if (update_stat) 1914 ctl->free_space -= bytes; 1915 } 1916 1917 static void btrfs_bitmap_set_bits(struct btrfs_free_space_ctl *ctl, 1918 struct btrfs_free_space *info, u64 offset, 1919 u64 bytes) 1920 { 1921 unsigned long start, count, end; 1922 int extent_delta = 1; 1923 1924 start = offset_to_bit(info->offset, ctl->unit, offset); 1925 count = bytes_to_bits(bytes, ctl->unit); 1926 end = start + count; 1927 ASSERT(end <= BITS_PER_BITMAP); 1928 1929 bitmap_set(info->bitmap, start, count); 1930 1931 /* 1932 * We set some bytes, we have no idea what the max extent size is 1933 * anymore. 1934 */ 1935 info->max_extent_size = 0; 1936 info->bytes += bytes; 1937 ctl->free_space += bytes; 1938 1939 relink_bitmap_entry(ctl, info); 1940 1941 if (start && test_bit(start - 1, info->bitmap)) 1942 extent_delta--; 1943 1944 if (end < BITS_PER_BITMAP && test_bit(end, info->bitmap)) 1945 extent_delta--; 1946 1947 info->bitmap_extents += extent_delta; 1948 if (!btrfs_free_space_trimmed(info)) { 1949 ctl->discardable_extents[BTRFS_STAT_CURR] += extent_delta; 1950 ctl->discardable_bytes[BTRFS_STAT_CURR] += bytes; 1951 } 1952 } 1953 1954 /* 1955 * If we can not find suitable extent, we will use bytes to record 1956 * the size of the max extent. 1957 */ 1958 static int search_bitmap(struct btrfs_free_space_ctl *ctl, 1959 struct btrfs_free_space *bitmap_info, u64 *offset, 1960 u64 *bytes, bool for_alloc) 1961 { 1962 unsigned long found_bits = 0; 1963 unsigned long max_bits = 0; 1964 unsigned long bits, i; 1965 unsigned long next_zero; 1966 unsigned long extent_bits; 1967 1968 /* 1969 * Skip searching the bitmap if we don't have a contiguous section that 1970 * is large enough for this allocation. 1971 */ 1972 if (for_alloc && 1973 bitmap_info->max_extent_size && 1974 bitmap_info->max_extent_size < *bytes) { 1975 *bytes = bitmap_info->max_extent_size; 1976 return -1; 1977 } 1978 1979 i = offset_to_bit(bitmap_info->offset, ctl->unit, 1980 max_t(u64, *offset, bitmap_info->offset)); 1981 bits = bytes_to_bits(*bytes, ctl->unit); 1982 1983 for_each_set_bit_from(i, bitmap_info->bitmap, BITS_PER_BITMAP) { 1984 if (for_alloc && bits == 1) { 1985 found_bits = 1; 1986 break; 1987 } 1988 next_zero = find_next_zero_bit(bitmap_info->bitmap, 1989 BITS_PER_BITMAP, i); 1990 extent_bits = next_zero - i; 1991 if (extent_bits >= bits) { 1992 found_bits = extent_bits; 1993 break; 1994 } else if (extent_bits > max_bits) { 1995 max_bits = extent_bits; 1996 } 1997 i = next_zero; 1998 } 1999 2000 if (found_bits) { 2001 *offset = (u64)(i * ctl->unit) + bitmap_info->offset; 2002 *bytes = (u64)(found_bits) * ctl->unit; 2003 return 0; 2004 } 2005 2006 *bytes = (u64)(max_bits) * ctl->unit; 2007 bitmap_info->max_extent_size = *bytes; 2008 relink_bitmap_entry(ctl, bitmap_info); 2009 return -1; 2010 } 2011 2012 /* Cache the size of the max extent in bytes */ 2013 static struct btrfs_free_space * 2014 find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes, 2015 unsigned long align, u64 *max_extent_size, bool use_bytes_index) 2016 { 2017 struct btrfs_free_space *entry; 2018 struct rb_node *node; 2019 u64 tmp; 2020 u64 align_off; 2021 int ret; 2022 2023 if (!ctl->free_space_offset.rb_node) 2024 goto out; 2025 again: 2026 if (use_bytes_index) { 2027 node = rb_first_cached(&ctl->free_space_bytes); 2028 } else { 2029 entry = tree_search_offset(ctl, offset_to_bitmap(ctl, *offset), 2030 0, 1); 2031 if (!entry) 2032 goto out; 2033 node = &entry->offset_index; 2034 } 2035 2036 for (; node; node = rb_next(node)) { 2037 if (use_bytes_index) 2038 entry = rb_entry(node, struct btrfs_free_space, 2039 bytes_index); 2040 else 2041 entry = rb_entry(node, struct btrfs_free_space, 2042 offset_index); 2043 2044 /* 2045 * If we are using the bytes index then all subsequent entries 2046 * in this tree are going to be < bytes, so simply set the max 2047 * extent size and exit the loop. 2048 * 2049 * If we're using the offset index then we need to keep going 2050 * through the rest of the tree. 2051 */ 2052 if (entry->bytes < *bytes) { 2053 *max_extent_size = max(get_max_extent_size(entry), 2054 *max_extent_size); 2055 if (use_bytes_index) 2056 break; 2057 continue; 2058 } 2059 2060 /* make sure the space returned is big enough 2061 * to match our requested alignment 2062 */ 2063 if (*bytes >= align) { 2064 tmp = entry->offset - ctl->start + align - 1; 2065 tmp = div64_u64(tmp, align); 2066 tmp = tmp * align + ctl->start; 2067 align_off = tmp - entry->offset; 2068 } else { 2069 align_off = 0; 2070 tmp = entry->offset; 2071 } 2072 2073 /* 2074 * We don't break here if we're using the bytes index because we 2075 * may have another entry that has the correct alignment that is 2076 * the right size, so we don't want to miss that possibility. 2077 * At worst this adds another loop through the logic, but if we 2078 * broke here we could prematurely ENOSPC. 2079 */ 2080 if (entry->bytes < *bytes + align_off) { 2081 *max_extent_size = max(get_max_extent_size(entry), 2082 *max_extent_size); 2083 continue; 2084 } 2085 2086 if (entry->bitmap) { 2087 struct rb_node *old_next = rb_next(node); 2088 u64 size = *bytes; 2089 2090 ret = search_bitmap(ctl, entry, &tmp, &size, true); 2091 if (!ret) { 2092 *offset = tmp; 2093 *bytes = size; 2094 return entry; 2095 } else { 2096 *max_extent_size = 2097 max(get_max_extent_size(entry), 2098 *max_extent_size); 2099 } 2100 2101 /* 2102 * The bitmap may have gotten re-arranged in the space 2103 * index here because the max_extent_size may have been 2104 * updated. Start from the beginning again if this 2105 * happened. 2106 */ 2107 if (use_bytes_index && old_next != rb_next(node)) 2108 goto again; 2109 continue; 2110 } 2111 2112 *offset = tmp; 2113 *bytes = entry->bytes - align_off; 2114 return entry; 2115 } 2116 out: 2117 return NULL; 2118 } 2119 2120 static void add_new_bitmap(struct btrfs_free_space_ctl *ctl, 2121 struct btrfs_free_space *info, u64 offset) 2122 { 2123 info->offset = offset_to_bitmap(ctl, offset); 2124 info->bytes = 0; 2125 info->bitmap_extents = 0; 2126 INIT_LIST_HEAD(&info->list); 2127 link_free_space(ctl, info); 2128 ctl->total_bitmaps++; 2129 recalculate_thresholds(ctl); 2130 } 2131 2132 static void free_bitmap(struct btrfs_free_space_ctl *ctl, 2133 struct btrfs_free_space *bitmap_info) 2134 { 2135 /* 2136 * Normally when this is called, the bitmap is completely empty. However, 2137 * if we are blowing up the free space cache for one reason or another 2138 * via __btrfs_remove_free_space_cache(), then it may not be freed and 2139 * we may leave stats on the table. 2140 */ 2141 if (bitmap_info->bytes && !btrfs_free_space_trimmed(bitmap_info)) { 2142 ctl->discardable_extents[BTRFS_STAT_CURR] -= 2143 bitmap_info->bitmap_extents; 2144 ctl->discardable_bytes[BTRFS_STAT_CURR] -= bitmap_info->bytes; 2145 2146 } 2147 unlink_free_space(ctl, bitmap_info, true); 2148 kmem_cache_free(btrfs_free_space_bitmap_cachep, bitmap_info->bitmap); 2149 kmem_cache_free(btrfs_free_space_cachep, bitmap_info); 2150 ctl->total_bitmaps--; 2151 recalculate_thresholds(ctl); 2152 } 2153 2154 static noinline int remove_from_bitmap(struct btrfs_free_space_ctl *ctl, 2155 struct btrfs_free_space *bitmap_info, 2156 u64 *offset, u64 *bytes) 2157 { 2158 u64 end; 2159 u64 search_start, search_bytes; 2160 int ret; 2161 2162 again: 2163 end = bitmap_info->offset + (u64)(BITS_PER_BITMAP * ctl->unit) - 1; 2164 2165 /* 2166 * We need to search for bits in this bitmap. We could only cover some 2167 * of the extent in this bitmap thanks to how we add space, so we need 2168 * to search for as much as it as we can and clear that amount, and then 2169 * go searching for the next bit. 2170 */ 2171 search_start = *offset; 2172 search_bytes = ctl->unit; 2173 search_bytes = min(search_bytes, end - search_start + 1); 2174 ret = search_bitmap(ctl, bitmap_info, &search_start, &search_bytes, 2175 false); 2176 if (ret < 0 || search_start != *offset) 2177 return -EINVAL; 2178 2179 /* We may have found more bits than what we need */ 2180 search_bytes = min(search_bytes, *bytes); 2181 2182 /* Cannot clear past the end of the bitmap */ 2183 search_bytes = min(search_bytes, end - search_start + 1); 2184 2185 bitmap_clear_bits(ctl, bitmap_info, search_start, search_bytes, true); 2186 *offset += search_bytes; 2187 *bytes -= search_bytes; 2188 2189 if (*bytes) { 2190 struct rb_node *next = rb_next(&bitmap_info->offset_index); 2191 if (!bitmap_info->bytes) 2192 free_bitmap(ctl, bitmap_info); 2193 2194 /* 2195 * no entry after this bitmap, but we still have bytes to 2196 * remove, so something has gone wrong. 2197 */ 2198 if (!next) 2199 return -EINVAL; 2200 2201 bitmap_info = rb_entry(next, struct btrfs_free_space, 2202 offset_index); 2203 2204 /* 2205 * if the next entry isn't a bitmap we need to return to let the 2206 * extent stuff do its work. 2207 */ 2208 if (!bitmap_info->bitmap) 2209 return -EAGAIN; 2210 2211 /* 2212 * Ok the next item is a bitmap, but it may not actually hold 2213 * the information for the rest of this free space stuff, so 2214 * look for it, and if we don't find it return so we can try 2215 * everything over again. 2216 */ 2217 search_start = *offset; 2218 search_bytes = ctl->unit; 2219 ret = search_bitmap(ctl, bitmap_info, &search_start, 2220 &search_bytes, false); 2221 if (ret < 0 || search_start != *offset) 2222 return -EAGAIN; 2223 2224 goto again; 2225 } else if (!bitmap_info->bytes) 2226 free_bitmap(ctl, bitmap_info); 2227 2228 return 0; 2229 } 2230 2231 static u64 add_bytes_to_bitmap(struct btrfs_free_space_ctl *ctl, 2232 struct btrfs_free_space *info, u64 offset, 2233 u64 bytes, enum btrfs_trim_state trim_state) 2234 { 2235 u64 bytes_to_set = 0; 2236 u64 end; 2237 2238 /* 2239 * This is a tradeoff to make bitmap trim state minimal. We mark the 2240 * whole bitmap untrimmed if at any point we add untrimmed regions. 2241 */ 2242 if (trim_state == BTRFS_TRIM_STATE_UNTRIMMED) { 2243 if (btrfs_free_space_trimmed(info)) { 2244 ctl->discardable_extents[BTRFS_STAT_CURR] += 2245 info->bitmap_extents; 2246 ctl->discardable_bytes[BTRFS_STAT_CURR] += info->bytes; 2247 } 2248 info->trim_state = BTRFS_TRIM_STATE_UNTRIMMED; 2249 } 2250 2251 end = info->offset + (u64)(BITS_PER_BITMAP * ctl->unit); 2252 2253 bytes_to_set = min(end - offset, bytes); 2254 2255 btrfs_bitmap_set_bits(ctl, info, offset, bytes_to_set); 2256 2257 return bytes_to_set; 2258 2259 } 2260 2261 static bool use_bitmap(struct btrfs_free_space_ctl *ctl, 2262 struct btrfs_free_space *info) 2263 { 2264 struct btrfs_block_group *block_group = ctl->block_group; 2265 struct btrfs_fs_info *fs_info = block_group->fs_info; 2266 bool forced = false; 2267 2268 #ifdef CONFIG_BTRFS_DEBUG 2269 if (btrfs_should_fragment_free_space(block_group)) 2270 forced = true; 2271 #endif 2272 2273 /* This is a way to reclaim large regions from the bitmaps. */ 2274 if (!forced && info->bytes >= FORCE_EXTENT_THRESHOLD) 2275 return false; 2276 2277 /* 2278 * If we are below the extents threshold then we can add this as an 2279 * extent, and don't have to deal with the bitmap 2280 */ 2281 if (!forced && ctl->free_extents < ctl->extents_thresh) { 2282 /* 2283 * If this block group has some small extents we don't want to 2284 * use up all of our free slots in the cache with them, we want 2285 * to reserve them to larger extents, however if we have plenty 2286 * of cache left then go ahead an dadd them, no sense in adding 2287 * the overhead of a bitmap if we don't have to. 2288 */ 2289 if (info->bytes <= fs_info->sectorsize * 8) { 2290 if (ctl->free_extents * 3 <= ctl->extents_thresh) 2291 return false; 2292 } else { 2293 return false; 2294 } 2295 } 2296 2297 /* 2298 * The original block groups from mkfs can be really small, like 8 2299 * megabytes, so don't bother with a bitmap for those entries. However 2300 * some block groups can be smaller than what a bitmap would cover but 2301 * are still large enough that they could overflow the 32k memory limit, 2302 * so allow those block groups to still be allowed to have a bitmap 2303 * entry. 2304 */ 2305 if (((BITS_PER_BITMAP * ctl->unit) >> 1) > block_group->length) 2306 return false; 2307 2308 return true; 2309 } 2310 2311 static const struct btrfs_free_space_op free_space_op = { 2312 .use_bitmap = use_bitmap, 2313 }; 2314 2315 static int insert_into_bitmap(struct btrfs_free_space_ctl *ctl, 2316 struct btrfs_free_space *info) 2317 { 2318 struct btrfs_free_space *bitmap_info; 2319 struct btrfs_block_group *block_group = NULL; 2320 int added = 0; 2321 u64 bytes, offset, bytes_added; 2322 enum btrfs_trim_state trim_state; 2323 int ret; 2324 2325 bytes = info->bytes; 2326 offset = info->offset; 2327 trim_state = info->trim_state; 2328 2329 if (!ctl->op->use_bitmap(ctl, info)) 2330 return 0; 2331 2332 if (ctl->op == &free_space_op) 2333 block_group = ctl->block_group; 2334 again: 2335 /* 2336 * Since we link bitmaps right into the cluster we need to see if we 2337 * have a cluster here, and if so and it has our bitmap we need to add 2338 * the free space to that bitmap. 2339 */ 2340 if (block_group && !list_empty(&block_group->cluster_list)) { 2341 struct btrfs_free_cluster *cluster; 2342 struct rb_node *node; 2343 struct btrfs_free_space *entry; 2344 2345 cluster = list_entry(block_group->cluster_list.next, 2346 struct btrfs_free_cluster, 2347 block_group_list); 2348 spin_lock(&cluster->lock); 2349 node = rb_first(&cluster->root); 2350 if (!node) { 2351 spin_unlock(&cluster->lock); 2352 goto no_cluster_bitmap; 2353 } 2354 2355 entry = rb_entry(node, struct btrfs_free_space, offset_index); 2356 if (!entry->bitmap) { 2357 spin_unlock(&cluster->lock); 2358 goto no_cluster_bitmap; 2359 } 2360 2361 if (entry->offset == offset_to_bitmap(ctl, offset)) { 2362 bytes_added = add_bytes_to_bitmap(ctl, entry, offset, 2363 bytes, trim_state); 2364 bytes -= bytes_added; 2365 offset += bytes_added; 2366 } 2367 spin_unlock(&cluster->lock); 2368 if (!bytes) { 2369 ret = 1; 2370 goto out; 2371 } 2372 } 2373 2374 no_cluster_bitmap: 2375 bitmap_info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset), 2376 1, 0); 2377 if (!bitmap_info) { 2378 ASSERT(added == 0); 2379 goto new_bitmap; 2380 } 2381 2382 bytes_added = add_bytes_to_bitmap(ctl, bitmap_info, offset, bytes, 2383 trim_state); 2384 bytes -= bytes_added; 2385 offset += bytes_added; 2386 added = 0; 2387 2388 if (!bytes) { 2389 ret = 1; 2390 goto out; 2391 } else 2392 goto again; 2393 2394 new_bitmap: 2395 if (info && info->bitmap) { 2396 add_new_bitmap(ctl, info, offset); 2397 added = 1; 2398 info = NULL; 2399 goto again; 2400 } else { 2401 spin_unlock(&ctl->tree_lock); 2402 2403 /* no pre-allocated info, allocate a new one */ 2404 if (!info) { 2405 info = kmem_cache_zalloc(btrfs_free_space_cachep, 2406 GFP_NOFS); 2407 if (!info) { 2408 spin_lock(&ctl->tree_lock); 2409 ret = -ENOMEM; 2410 goto out; 2411 } 2412 } 2413 2414 /* allocate the bitmap */ 2415 info->bitmap = kmem_cache_zalloc(btrfs_free_space_bitmap_cachep, 2416 GFP_NOFS); 2417 info->trim_state = BTRFS_TRIM_STATE_TRIMMED; 2418 spin_lock(&ctl->tree_lock); 2419 if (!info->bitmap) { 2420 ret = -ENOMEM; 2421 goto out; 2422 } 2423 goto again; 2424 } 2425 2426 out: 2427 if (info) { 2428 if (info->bitmap) 2429 kmem_cache_free(btrfs_free_space_bitmap_cachep, 2430 info->bitmap); 2431 kmem_cache_free(btrfs_free_space_cachep, info); 2432 } 2433 2434 return ret; 2435 } 2436 2437 /* 2438 * Free space merging rules: 2439 * 1) Merge trimmed areas together 2440 * 2) Let untrimmed areas coalesce with trimmed areas 2441 * 3) Always pull neighboring regions from bitmaps 2442 * 2443 * The above rules are for when we merge free space based on btrfs_trim_state. 2444 * Rules 2 and 3 are subtle because they are suboptimal, but are done for the 2445 * same reason: to promote larger extent regions which makes life easier for 2446 * find_free_extent(). Rule 2 enables coalescing based on the common path 2447 * being returning free space from btrfs_finish_extent_commit(). So when free 2448 * space is trimmed, it will prevent aggregating trimmed new region and 2449 * untrimmed regions in the rb_tree. Rule 3 is purely to obtain larger extents 2450 * and provide find_free_extent() with the largest extents possible hoping for 2451 * the reuse path. 2452 */ 2453 static bool try_merge_free_space(struct btrfs_free_space_ctl *ctl, 2454 struct btrfs_free_space *info, bool update_stat) 2455 { 2456 struct btrfs_free_space *left_info = NULL; 2457 struct btrfs_free_space *right_info; 2458 bool merged = false; 2459 u64 offset = info->offset; 2460 u64 bytes = info->bytes; 2461 const bool is_trimmed = btrfs_free_space_trimmed(info); 2462 struct rb_node *right_prev = NULL; 2463 2464 /* 2465 * first we want to see if there is free space adjacent to the range we 2466 * are adding, if there is remove that struct and add a new one to 2467 * cover the entire range 2468 */ 2469 right_info = tree_search_offset(ctl, offset + bytes, 0, 0); 2470 if (right_info) 2471 right_prev = rb_prev(&right_info->offset_index); 2472 2473 if (right_prev) 2474 left_info = rb_entry(right_prev, struct btrfs_free_space, offset_index); 2475 else if (!right_info) 2476 left_info = tree_search_offset(ctl, offset - 1, 0, 0); 2477 2478 /* See try_merge_free_space() comment. */ 2479 if (right_info && !right_info->bitmap && 2480 (!is_trimmed || btrfs_free_space_trimmed(right_info))) { 2481 unlink_free_space(ctl, right_info, update_stat); 2482 info->bytes += right_info->bytes; 2483 kmem_cache_free(btrfs_free_space_cachep, right_info); 2484 merged = true; 2485 } 2486 2487 /* See try_merge_free_space() comment. */ 2488 if (left_info && !left_info->bitmap && 2489 left_info->offset + left_info->bytes == offset && 2490 (!is_trimmed || btrfs_free_space_trimmed(left_info))) { 2491 unlink_free_space(ctl, left_info, update_stat); 2492 info->offset = left_info->offset; 2493 info->bytes += left_info->bytes; 2494 kmem_cache_free(btrfs_free_space_cachep, left_info); 2495 merged = true; 2496 } 2497 2498 return merged; 2499 } 2500 2501 static bool steal_from_bitmap_to_end(struct btrfs_free_space_ctl *ctl, 2502 struct btrfs_free_space *info, 2503 bool update_stat) 2504 { 2505 struct btrfs_free_space *bitmap; 2506 unsigned long i; 2507 unsigned long j; 2508 const u64 end = info->offset + info->bytes; 2509 const u64 bitmap_offset = offset_to_bitmap(ctl, end); 2510 u64 bytes; 2511 2512 bitmap = tree_search_offset(ctl, bitmap_offset, 1, 0); 2513 if (!bitmap) 2514 return false; 2515 2516 i = offset_to_bit(bitmap->offset, ctl->unit, end); 2517 j = find_next_zero_bit(bitmap->bitmap, BITS_PER_BITMAP, i); 2518 if (j == i) 2519 return false; 2520 bytes = (j - i) * ctl->unit; 2521 info->bytes += bytes; 2522 2523 /* See try_merge_free_space() comment. */ 2524 if (!btrfs_free_space_trimmed(bitmap)) 2525 info->trim_state = BTRFS_TRIM_STATE_UNTRIMMED; 2526 2527 bitmap_clear_bits(ctl, bitmap, end, bytes, update_stat); 2528 2529 if (!bitmap->bytes) 2530 free_bitmap(ctl, bitmap); 2531 2532 return true; 2533 } 2534 2535 static bool steal_from_bitmap_to_front(struct btrfs_free_space_ctl *ctl, 2536 struct btrfs_free_space *info, 2537 bool update_stat) 2538 { 2539 struct btrfs_free_space *bitmap; 2540 u64 bitmap_offset; 2541 unsigned long i; 2542 unsigned long j; 2543 unsigned long prev_j; 2544 u64 bytes; 2545 2546 bitmap_offset = offset_to_bitmap(ctl, info->offset); 2547 /* If we're on a boundary, try the previous logical bitmap. */ 2548 if (bitmap_offset == info->offset) { 2549 if (info->offset == 0) 2550 return false; 2551 bitmap_offset = offset_to_bitmap(ctl, info->offset - 1); 2552 } 2553 2554 bitmap = tree_search_offset(ctl, bitmap_offset, 1, 0); 2555 if (!bitmap) 2556 return false; 2557 2558 i = offset_to_bit(bitmap->offset, ctl->unit, info->offset) - 1; 2559 j = 0; 2560 prev_j = (unsigned long)-1; 2561 for_each_clear_bit_from(j, bitmap->bitmap, BITS_PER_BITMAP) { 2562 if (j > i) 2563 break; 2564 prev_j = j; 2565 } 2566 if (prev_j == i) 2567 return false; 2568 2569 if (prev_j == (unsigned long)-1) 2570 bytes = (i + 1) * ctl->unit; 2571 else 2572 bytes = (i - prev_j) * ctl->unit; 2573 2574 info->offset -= bytes; 2575 info->bytes += bytes; 2576 2577 /* See try_merge_free_space() comment. */ 2578 if (!btrfs_free_space_trimmed(bitmap)) 2579 info->trim_state = BTRFS_TRIM_STATE_UNTRIMMED; 2580 2581 bitmap_clear_bits(ctl, bitmap, info->offset, bytes, update_stat); 2582 2583 if (!bitmap->bytes) 2584 free_bitmap(ctl, bitmap); 2585 2586 return true; 2587 } 2588 2589 /* 2590 * We prefer always to allocate from extent entries, both for clustered and 2591 * non-clustered allocation requests. So when attempting to add a new extent 2592 * entry, try to see if there's adjacent free space in bitmap entries, and if 2593 * there is, migrate that space from the bitmaps to the extent. 2594 * Like this we get better chances of satisfying space allocation requests 2595 * because we attempt to satisfy them based on a single cache entry, and never 2596 * on 2 or more entries - even if the entries represent a contiguous free space 2597 * region (e.g. 1 extent entry + 1 bitmap entry starting where the extent entry 2598 * ends). 2599 */ 2600 static void steal_from_bitmap(struct btrfs_free_space_ctl *ctl, 2601 struct btrfs_free_space *info, 2602 bool update_stat) 2603 { 2604 /* 2605 * Only work with disconnected entries, as we can change their offset, 2606 * and must be extent entries. 2607 */ 2608 ASSERT(!info->bitmap); 2609 ASSERT(RB_EMPTY_NODE(&info->offset_index)); 2610 2611 if (ctl->total_bitmaps > 0) { 2612 bool stole_end; 2613 bool stole_front = false; 2614 2615 stole_end = steal_from_bitmap_to_end(ctl, info, update_stat); 2616 if (ctl->total_bitmaps > 0) 2617 stole_front = steal_from_bitmap_to_front(ctl, info, 2618 update_stat); 2619 2620 if (stole_end || stole_front) 2621 try_merge_free_space(ctl, info, update_stat); 2622 } 2623 } 2624 2625 static int __btrfs_add_free_space(struct btrfs_block_group *block_group, 2626 u64 offset, u64 bytes, 2627 enum btrfs_trim_state trim_state) 2628 { 2629 struct btrfs_fs_info *fs_info = block_group->fs_info; 2630 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; 2631 struct btrfs_free_space *info; 2632 int ret = 0; 2633 u64 filter_bytes = bytes; 2634 2635 ASSERT(!btrfs_is_zoned(fs_info)); 2636 2637 info = kmem_cache_zalloc(btrfs_free_space_cachep, GFP_NOFS); 2638 if (!info) 2639 return -ENOMEM; 2640 2641 info->offset = offset; 2642 info->bytes = bytes; 2643 info->trim_state = trim_state; 2644 RB_CLEAR_NODE(&info->offset_index); 2645 RB_CLEAR_NODE(&info->bytes_index); 2646 2647 spin_lock(&ctl->tree_lock); 2648 2649 if (try_merge_free_space(ctl, info, true)) 2650 goto link; 2651 2652 /* 2653 * There was no extent directly to the left or right of this new 2654 * extent then we know we're going to have to allocate a new extent, so 2655 * before we do that see if we need to drop this into a bitmap 2656 */ 2657 ret = insert_into_bitmap(ctl, info); 2658 if (ret < 0) { 2659 goto out; 2660 } else if (ret) { 2661 ret = 0; 2662 goto out; 2663 } 2664 link: 2665 /* 2666 * Only steal free space from adjacent bitmaps if we're sure we're not 2667 * going to add the new free space to existing bitmap entries - because 2668 * that would mean unnecessary work that would be reverted. Therefore 2669 * attempt to steal space from bitmaps if we're adding an extent entry. 2670 */ 2671 steal_from_bitmap(ctl, info, true); 2672 2673 filter_bytes = max(filter_bytes, info->bytes); 2674 2675 ret = link_free_space(ctl, info); 2676 if (ret) 2677 kmem_cache_free(btrfs_free_space_cachep, info); 2678 out: 2679 btrfs_discard_update_discardable(block_group); 2680 spin_unlock(&ctl->tree_lock); 2681 2682 if (ret) { 2683 btrfs_crit(fs_info, "unable to add free space :%d", ret); 2684 ASSERT(ret != -EEXIST); 2685 } 2686 2687 if (trim_state != BTRFS_TRIM_STATE_TRIMMED) { 2688 btrfs_discard_check_filter(block_group, filter_bytes); 2689 btrfs_discard_queue_work(&fs_info->discard_ctl, block_group); 2690 } 2691 2692 return ret; 2693 } 2694 2695 static int __btrfs_add_free_space_zoned(struct btrfs_block_group *block_group, 2696 u64 bytenr, u64 size, bool used) 2697 { 2698 struct btrfs_space_info *sinfo = block_group->space_info; 2699 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; 2700 u64 offset = bytenr - block_group->start; 2701 u64 to_free, to_unusable; 2702 int bg_reclaim_threshold = 0; 2703 bool initial; 2704 u64 reclaimable_unusable; 2705 2706 spin_lock(&block_group->lock); 2707 2708 initial = ((size == block_group->length) && (block_group->alloc_offset == 0)); 2709 WARN_ON(!initial && offset + size > block_group->zone_capacity); 2710 if (!initial) 2711 bg_reclaim_threshold = READ_ONCE(sinfo->bg_reclaim_threshold); 2712 2713 if (!used) 2714 to_free = size; 2715 else if (initial) 2716 to_free = block_group->zone_capacity; 2717 else if (offset >= block_group->alloc_offset) 2718 to_free = size; 2719 else if (offset + size <= block_group->alloc_offset) 2720 to_free = 0; 2721 else 2722 to_free = offset + size - block_group->alloc_offset; 2723 to_unusable = size - to_free; 2724 2725 spin_lock(&ctl->tree_lock); 2726 ctl->free_space += to_free; 2727 spin_unlock(&ctl->tree_lock); 2728 /* 2729 * If the block group is read-only, we should account freed space into 2730 * bytes_readonly. 2731 */ 2732 if (!block_group->ro) { 2733 block_group->zone_unusable += to_unusable; 2734 WARN_ON(block_group->zone_unusable > block_group->length); 2735 } 2736 if (!used) { 2737 block_group->alloc_offset -= size; 2738 } 2739 2740 reclaimable_unusable = block_group->zone_unusable - 2741 (block_group->length - block_group->zone_capacity); 2742 /* All the region is now unusable. Mark it as unused and reclaim */ 2743 if (block_group->zone_unusable == block_group->length) { 2744 btrfs_mark_bg_unused(block_group); 2745 } else if (bg_reclaim_threshold && 2746 reclaimable_unusable >= 2747 mult_perc(block_group->zone_capacity, bg_reclaim_threshold)) { 2748 btrfs_mark_bg_to_reclaim(block_group); 2749 } 2750 2751 spin_unlock(&block_group->lock); 2752 2753 return 0; 2754 } 2755 2756 int btrfs_add_free_space(struct btrfs_block_group *block_group, 2757 u64 bytenr, u64 size) 2758 { 2759 enum btrfs_trim_state trim_state = BTRFS_TRIM_STATE_UNTRIMMED; 2760 2761 if (btrfs_is_zoned(block_group->fs_info)) 2762 return __btrfs_add_free_space_zoned(block_group, bytenr, size, 2763 true); 2764 2765 if (btrfs_test_opt(block_group->fs_info, DISCARD_SYNC)) 2766 trim_state = BTRFS_TRIM_STATE_TRIMMED; 2767 2768 return __btrfs_add_free_space(block_group, bytenr, size, trim_state); 2769 } 2770 2771 int btrfs_add_free_space_unused(struct btrfs_block_group *block_group, 2772 u64 bytenr, u64 size) 2773 { 2774 if (btrfs_is_zoned(block_group->fs_info)) 2775 return __btrfs_add_free_space_zoned(block_group, bytenr, size, 2776 false); 2777 2778 return btrfs_add_free_space(block_group, bytenr, size); 2779 } 2780 2781 /* 2782 * This is a subtle distinction because when adding free space back in general, 2783 * we want it to be added as untrimmed for async. But in the case where we add 2784 * it on loading of a block group, we want to consider it trimmed. 2785 */ 2786 int btrfs_add_free_space_async_trimmed(struct btrfs_block_group *block_group, 2787 u64 bytenr, u64 size) 2788 { 2789 enum btrfs_trim_state trim_state = BTRFS_TRIM_STATE_UNTRIMMED; 2790 2791 if (btrfs_is_zoned(block_group->fs_info)) 2792 return __btrfs_add_free_space_zoned(block_group, bytenr, size, 2793 true); 2794 2795 if (btrfs_test_opt(block_group->fs_info, DISCARD_SYNC) || 2796 btrfs_test_opt(block_group->fs_info, DISCARD_ASYNC)) 2797 trim_state = BTRFS_TRIM_STATE_TRIMMED; 2798 2799 return __btrfs_add_free_space(block_group, bytenr, size, trim_state); 2800 } 2801 2802 int btrfs_remove_free_space(struct btrfs_block_group *block_group, 2803 u64 offset, u64 bytes) 2804 { 2805 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; 2806 struct btrfs_free_space *info; 2807 int ret; 2808 bool re_search = false; 2809 2810 if (btrfs_is_zoned(block_group->fs_info)) { 2811 /* 2812 * This can happen with conventional zones when replaying log. 2813 * Since the allocation info of tree-log nodes are not recorded 2814 * to the extent-tree, calculate_alloc_pointer() failed to 2815 * advance the allocation pointer after last allocated tree log 2816 * node blocks. 2817 * 2818 * This function is called from 2819 * btrfs_pin_extent_for_log_replay() when replaying the log. 2820 * Advance the pointer not to overwrite the tree-log nodes. 2821 */ 2822 if (block_group->start + block_group->alloc_offset < 2823 offset + bytes) { 2824 block_group->alloc_offset = 2825 offset + bytes - block_group->start; 2826 } 2827 return 0; 2828 } 2829 2830 spin_lock(&ctl->tree_lock); 2831 2832 again: 2833 ret = 0; 2834 if (!bytes) 2835 goto out_lock; 2836 2837 info = tree_search_offset(ctl, offset, 0, 0); 2838 if (!info) { 2839 /* 2840 * oops didn't find an extent that matched the space we wanted 2841 * to remove, look for a bitmap instead 2842 */ 2843 info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset), 2844 1, 0); 2845 if (!info) { 2846 /* 2847 * If we found a partial bit of our free space in a 2848 * bitmap but then couldn't find the other part this may 2849 * be a problem, so WARN about it. 2850 */ 2851 WARN_ON(re_search); 2852 goto out_lock; 2853 } 2854 } 2855 2856 re_search = false; 2857 if (!info->bitmap) { 2858 unlink_free_space(ctl, info, true); 2859 if (offset == info->offset) { 2860 u64 to_free = min(bytes, info->bytes); 2861 2862 info->bytes -= to_free; 2863 info->offset += to_free; 2864 if (info->bytes) { 2865 ret = link_free_space(ctl, info); 2866 WARN_ON(ret); 2867 } else { 2868 kmem_cache_free(btrfs_free_space_cachep, info); 2869 } 2870 2871 offset += to_free; 2872 bytes -= to_free; 2873 goto again; 2874 } else { 2875 u64 old_end = info->bytes + info->offset; 2876 2877 info->bytes = offset - info->offset; 2878 ret = link_free_space(ctl, info); 2879 WARN_ON(ret); 2880 if (ret) 2881 goto out_lock; 2882 2883 /* Not enough bytes in this entry to satisfy us */ 2884 if (old_end < offset + bytes) { 2885 bytes -= old_end - offset; 2886 offset = old_end; 2887 goto again; 2888 } else if (old_end == offset + bytes) { 2889 /* all done */ 2890 goto out_lock; 2891 } 2892 spin_unlock(&ctl->tree_lock); 2893 2894 ret = __btrfs_add_free_space(block_group, 2895 offset + bytes, 2896 old_end - (offset + bytes), 2897 info->trim_state); 2898 WARN_ON(ret); 2899 goto out; 2900 } 2901 } 2902 2903 ret = remove_from_bitmap(ctl, info, &offset, &bytes); 2904 if (ret == -EAGAIN) { 2905 re_search = true; 2906 goto again; 2907 } 2908 out_lock: 2909 btrfs_discard_update_discardable(block_group); 2910 spin_unlock(&ctl->tree_lock); 2911 out: 2912 return ret; 2913 } 2914 2915 void btrfs_dump_free_space(struct btrfs_block_group *block_group, 2916 u64 bytes) 2917 { 2918 struct btrfs_fs_info *fs_info = block_group->fs_info; 2919 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; 2920 struct btrfs_free_space *info; 2921 struct rb_node *n; 2922 int count = 0; 2923 2924 /* 2925 * Zoned btrfs does not use free space tree and cluster. Just print 2926 * out the free space after the allocation offset. 2927 */ 2928 if (btrfs_is_zoned(fs_info)) { 2929 btrfs_info(fs_info, "free space %llu active %d", 2930 block_group->zone_capacity - block_group->alloc_offset, 2931 test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, 2932 &block_group->runtime_flags)); 2933 return; 2934 } 2935 2936 spin_lock(&ctl->tree_lock); 2937 for (n = rb_first(&ctl->free_space_offset); n; n = rb_next(n)) { 2938 info = rb_entry(n, struct btrfs_free_space, offset_index); 2939 if (info->bytes >= bytes && !block_group->ro) 2940 count++; 2941 btrfs_crit(fs_info, "entry offset %llu, bytes %llu, bitmap %s", 2942 info->offset, info->bytes, str_yes_no(info->bitmap)); 2943 } 2944 spin_unlock(&ctl->tree_lock); 2945 btrfs_info(fs_info, "block group has cluster?: %s", 2946 str_no_yes(list_empty(&block_group->cluster_list))); 2947 btrfs_info(fs_info, 2948 "%d free space entries at or bigger than %llu bytes", 2949 count, bytes); 2950 } 2951 2952 void btrfs_init_free_space_ctl(struct btrfs_block_group *block_group, 2953 struct btrfs_free_space_ctl *ctl) 2954 { 2955 struct btrfs_fs_info *fs_info = block_group->fs_info; 2956 2957 spin_lock_init(&ctl->tree_lock); 2958 ctl->unit = fs_info->sectorsize; 2959 ctl->start = block_group->start; 2960 ctl->block_group = block_group; 2961 ctl->op = &free_space_op; 2962 ctl->free_space_bytes = RB_ROOT_CACHED; 2963 INIT_LIST_HEAD(&ctl->trimming_ranges); 2964 mutex_init(&ctl->cache_writeout_mutex); 2965 2966 /* 2967 * we only want to have 32k of ram per block group for keeping 2968 * track of free space, and if we pass 1/2 of that we want to 2969 * start converting things over to using bitmaps 2970 */ 2971 ctl->extents_thresh = (SZ_32K / 2) / sizeof(struct btrfs_free_space); 2972 } 2973 2974 /* 2975 * for a given cluster, put all of its extents back into the free 2976 * space cache. If the block group passed doesn't match the block group 2977 * pointed to by the cluster, someone else raced in and freed the 2978 * cluster already. In that case, we just return without changing anything 2979 */ 2980 static void __btrfs_return_cluster_to_free_space( 2981 struct btrfs_block_group *block_group, 2982 struct btrfs_free_cluster *cluster) 2983 { 2984 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; 2985 struct rb_node *node; 2986 2987 lockdep_assert_held(&ctl->tree_lock); 2988 2989 spin_lock(&cluster->lock); 2990 if (cluster->block_group != block_group) { 2991 spin_unlock(&cluster->lock); 2992 return; 2993 } 2994 2995 cluster->block_group = NULL; 2996 cluster->window_start = 0; 2997 list_del_init(&cluster->block_group_list); 2998 2999 node = rb_first(&cluster->root); 3000 while (node) { 3001 struct btrfs_free_space *entry; 3002 3003 entry = rb_entry(node, struct btrfs_free_space, offset_index); 3004 node = rb_next(&entry->offset_index); 3005 rb_erase(&entry->offset_index, &cluster->root); 3006 RB_CLEAR_NODE(&entry->offset_index); 3007 3008 if (!entry->bitmap) { 3009 /* Merging treats extents as if they were new */ 3010 if (!btrfs_free_space_trimmed(entry)) { 3011 ctl->discardable_extents[BTRFS_STAT_CURR]--; 3012 ctl->discardable_bytes[BTRFS_STAT_CURR] -= 3013 entry->bytes; 3014 } 3015 3016 try_merge_free_space(ctl, entry, false); 3017 steal_from_bitmap(ctl, entry, false); 3018 3019 /* As we insert directly, update these statistics */ 3020 if (!btrfs_free_space_trimmed(entry)) { 3021 ctl->discardable_extents[BTRFS_STAT_CURR]++; 3022 ctl->discardable_bytes[BTRFS_STAT_CURR] += 3023 entry->bytes; 3024 } 3025 } 3026 tree_insert_offset(ctl, NULL, entry); 3027 rb_add_cached(&entry->bytes_index, &ctl->free_space_bytes, 3028 entry_less); 3029 } 3030 cluster->root = RB_ROOT; 3031 spin_unlock(&cluster->lock); 3032 btrfs_put_block_group(block_group); 3033 } 3034 3035 void btrfs_remove_free_space_cache(struct btrfs_block_group *block_group) 3036 { 3037 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; 3038 struct btrfs_free_cluster *cluster; 3039 struct list_head *head; 3040 3041 spin_lock(&ctl->tree_lock); 3042 while ((head = block_group->cluster_list.next) != 3043 &block_group->cluster_list) { 3044 cluster = list_entry(head, struct btrfs_free_cluster, 3045 block_group_list); 3046 3047 WARN_ON(cluster->block_group != block_group); 3048 __btrfs_return_cluster_to_free_space(block_group, cluster); 3049 3050 cond_resched_lock(&ctl->tree_lock); 3051 } 3052 __btrfs_remove_free_space_cache(ctl); 3053 btrfs_discard_update_discardable(block_group); 3054 spin_unlock(&ctl->tree_lock); 3055 3056 } 3057 3058 /* 3059 * Walk @block_group's free space rb_tree to determine if everything is trimmed. 3060 */ 3061 bool btrfs_is_free_space_trimmed(struct btrfs_block_group *block_group) 3062 { 3063 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; 3064 struct btrfs_free_space *info; 3065 struct rb_node *node; 3066 bool ret = true; 3067 3068 spin_lock(&ctl->tree_lock); 3069 node = rb_first(&ctl->free_space_offset); 3070 3071 while (node) { 3072 info = rb_entry(node, struct btrfs_free_space, offset_index); 3073 3074 if (!btrfs_free_space_trimmed(info)) { 3075 ret = false; 3076 break; 3077 } 3078 3079 node = rb_next(node); 3080 } 3081 3082 spin_unlock(&ctl->tree_lock); 3083 return ret; 3084 } 3085 3086 u64 btrfs_find_space_for_alloc(struct btrfs_block_group *block_group, 3087 u64 offset, u64 bytes, u64 empty_size, 3088 u64 *max_extent_size) 3089 { 3090 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; 3091 struct btrfs_discard_ctl *discard_ctl = 3092 &block_group->fs_info->discard_ctl; 3093 struct btrfs_free_space *entry = NULL; 3094 u64 bytes_search = bytes + empty_size; 3095 u64 ret = 0; 3096 u64 align_gap = 0; 3097 u64 align_gap_len = 0; 3098 enum btrfs_trim_state align_gap_trim_state = BTRFS_TRIM_STATE_UNTRIMMED; 3099 bool use_bytes_index = (offset == block_group->start); 3100 3101 ASSERT(!btrfs_is_zoned(block_group->fs_info)); 3102 3103 spin_lock(&ctl->tree_lock); 3104 entry = find_free_space(ctl, &offset, &bytes_search, 3105 block_group->full_stripe_len, max_extent_size, 3106 use_bytes_index); 3107 if (!entry) 3108 goto out; 3109 3110 ret = offset; 3111 if (entry->bitmap) { 3112 bitmap_clear_bits(ctl, entry, offset, bytes, true); 3113 3114 if (!btrfs_free_space_trimmed(entry)) 3115 atomic64_add(bytes, &discard_ctl->discard_bytes_saved); 3116 3117 if (!entry->bytes) 3118 free_bitmap(ctl, entry); 3119 } else { 3120 unlink_free_space(ctl, entry, true); 3121 align_gap_len = offset - entry->offset; 3122 align_gap = entry->offset; 3123 align_gap_trim_state = entry->trim_state; 3124 3125 if (!btrfs_free_space_trimmed(entry)) 3126 atomic64_add(bytes, &discard_ctl->discard_bytes_saved); 3127 3128 entry->offset = offset + bytes; 3129 WARN_ON(entry->bytes < bytes + align_gap_len); 3130 3131 entry->bytes -= bytes + align_gap_len; 3132 if (!entry->bytes) 3133 kmem_cache_free(btrfs_free_space_cachep, entry); 3134 else 3135 link_free_space(ctl, entry); 3136 } 3137 out: 3138 btrfs_discard_update_discardable(block_group); 3139 spin_unlock(&ctl->tree_lock); 3140 3141 if (align_gap_len) 3142 __btrfs_add_free_space(block_group, align_gap, align_gap_len, 3143 align_gap_trim_state); 3144 return ret; 3145 } 3146 3147 /* 3148 * given a cluster, put all of its extents back into the free space 3149 * cache. If a block group is passed, this function will only free 3150 * a cluster that belongs to the passed block group. 3151 * 3152 * Otherwise, it'll get a reference on the block group pointed to by the 3153 * cluster and remove the cluster from it. 3154 */ 3155 void btrfs_return_cluster_to_free_space( 3156 struct btrfs_block_group *block_group, 3157 struct btrfs_free_cluster *cluster) 3158 { 3159 struct btrfs_free_space_ctl *ctl; 3160 3161 /* first, get a safe pointer to the block group */ 3162 spin_lock(&cluster->lock); 3163 if (!block_group) { 3164 block_group = cluster->block_group; 3165 if (!block_group) { 3166 spin_unlock(&cluster->lock); 3167 return; 3168 } 3169 } else if (cluster->block_group != block_group) { 3170 /* someone else has already freed it don't redo their work */ 3171 spin_unlock(&cluster->lock); 3172 return; 3173 } 3174 btrfs_get_block_group(block_group); 3175 spin_unlock(&cluster->lock); 3176 3177 ctl = block_group->free_space_ctl; 3178 3179 /* now return any extents the cluster had on it */ 3180 spin_lock(&ctl->tree_lock); 3181 __btrfs_return_cluster_to_free_space(block_group, cluster); 3182 spin_unlock(&ctl->tree_lock); 3183 3184 btrfs_discard_queue_work(&block_group->fs_info->discard_ctl, block_group); 3185 3186 /* finally drop our ref */ 3187 btrfs_put_block_group(block_group); 3188 } 3189 3190 static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group *block_group, 3191 struct btrfs_free_cluster *cluster, 3192 struct btrfs_free_space *entry, 3193 u64 bytes, u64 min_start, 3194 u64 *max_extent_size) 3195 { 3196 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; 3197 int err; 3198 u64 search_start = cluster->window_start; 3199 u64 search_bytes = bytes; 3200 u64 ret = 0; 3201 3202 search_start = min_start; 3203 search_bytes = bytes; 3204 3205 err = search_bitmap(ctl, entry, &search_start, &search_bytes, true); 3206 if (err) { 3207 *max_extent_size = max(get_max_extent_size(entry), 3208 *max_extent_size); 3209 return 0; 3210 } 3211 3212 ret = search_start; 3213 bitmap_clear_bits(ctl, entry, ret, bytes, false); 3214 3215 return ret; 3216 } 3217 3218 /* 3219 * given a cluster, try to allocate 'bytes' from it, returns 0 3220 * if it couldn't find anything suitably large, or a logical disk offset 3221 * if things worked out 3222 */ 3223 u64 btrfs_alloc_from_cluster(struct btrfs_block_group *block_group, 3224 struct btrfs_free_cluster *cluster, u64 bytes, 3225 u64 min_start, u64 *max_extent_size) 3226 { 3227 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; 3228 struct btrfs_discard_ctl *discard_ctl = 3229 &block_group->fs_info->discard_ctl; 3230 struct btrfs_free_space *entry = NULL; 3231 struct rb_node *node; 3232 u64 ret = 0; 3233 3234 ASSERT(!btrfs_is_zoned(block_group->fs_info)); 3235 3236 spin_lock(&cluster->lock); 3237 if (bytes > cluster->max_size) 3238 goto out; 3239 3240 if (cluster->block_group != block_group) 3241 goto out; 3242 3243 node = rb_first(&cluster->root); 3244 if (!node) 3245 goto out; 3246 3247 entry = rb_entry(node, struct btrfs_free_space, offset_index); 3248 while (1) { 3249 if (entry->bytes < bytes) 3250 *max_extent_size = max(get_max_extent_size(entry), 3251 *max_extent_size); 3252 3253 if (entry->bytes < bytes || 3254 (!entry->bitmap && entry->offset < min_start)) { 3255 node = rb_next(&entry->offset_index); 3256 if (!node) 3257 break; 3258 entry = rb_entry(node, struct btrfs_free_space, 3259 offset_index); 3260 continue; 3261 } 3262 3263 if (entry->bitmap) { 3264 ret = btrfs_alloc_from_bitmap(block_group, 3265 cluster, entry, bytes, 3266 cluster->window_start, 3267 max_extent_size); 3268 if (ret == 0) { 3269 node = rb_next(&entry->offset_index); 3270 if (!node) 3271 break; 3272 entry = rb_entry(node, struct btrfs_free_space, 3273 offset_index); 3274 continue; 3275 } 3276 cluster->window_start += bytes; 3277 } else { 3278 ret = entry->offset; 3279 3280 entry->offset += bytes; 3281 entry->bytes -= bytes; 3282 } 3283 3284 break; 3285 } 3286 out: 3287 spin_unlock(&cluster->lock); 3288 3289 if (!ret) 3290 return 0; 3291 3292 spin_lock(&ctl->tree_lock); 3293 3294 if (!btrfs_free_space_trimmed(entry)) 3295 atomic64_add(bytes, &discard_ctl->discard_bytes_saved); 3296 3297 ctl->free_space -= bytes; 3298 if (!entry->bitmap && !btrfs_free_space_trimmed(entry)) 3299 ctl->discardable_bytes[BTRFS_STAT_CURR] -= bytes; 3300 3301 spin_lock(&cluster->lock); 3302 if (entry->bytes == 0) { 3303 rb_erase(&entry->offset_index, &cluster->root); 3304 ctl->free_extents--; 3305 if (entry->bitmap) { 3306 kmem_cache_free(btrfs_free_space_bitmap_cachep, 3307 entry->bitmap); 3308 ctl->total_bitmaps--; 3309 recalculate_thresholds(ctl); 3310 } else if (!btrfs_free_space_trimmed(entry)) { 3311 ctl->discardable_extents[BTRFS_STAT_CURR]--; 3312 } 3313 kmem_cache_free(btrfs_free_space_cachep, entry); 3314 } 3315 3316 spin_unlock(&cluster->lock); 3317 spin_unlock(&ctl->tree_lock); 3318 3319 return ret; 3320 } 3321 3322 static int btrfs_bitmap_cluster(struct btrfs_block_group *block_group, 3323 struct btrfs_free_space *entry, 3324 struct btrfs_free_cluster *cluster, 3325 u64 offset, u64 bytes, 3326 u64 cont1_bytes, u64 min_bytes) 3327 { 3328 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; 3329 unsigned long next_zero; 3330 unsigned long i; 3331 unsigned long want_bits; 3332 unsigned long min_bits; 3333 unsigned long found_bits; 3334 unsigned long max_bits = 0; 3335 unsigned long start = 0; 3336 unsigned long total_found = 0; 3337 int ret; 3338 3339 lockdep_assert_held(&ctl->tree_lock); 3340 3341 i = offset_to_bit(entry->offset, ctl->unit, 3342 max_t(u64, offset, entry->offset)); 3343 want_bits = bytes_to_bits(bytes, ctl->unit); 3344 min_bits = bytes_to_bits(min_bytes, ctl->unit); 3345 3346 /* 3347 * Don't bother looking for a cluster in this bitmap if it's heavily 3348 * fragmented. 3349 */ 3350 if (entry->max_extent_size && 3351 entry->max_extent_size < cont1_bytes) 3352 return -ENOSPC; 3353 again: 3354 found_bits = 0; 3355 for_each_set_bit_from(i, entry->bitmap, BITS_PER_BITMAP) { 3356 next_zero = find_next_zero_bit(entry->bitmap, 3357 BITS_PER_BITMAP, i); 3358 if (next_zero - i >= min_bits) { 3359 found_bits = next_zero - i; 3360 if (found_bits > max_bits) 3361 max_bits = found_bits; 3362 break; 3363 } 3364 if (next_zero - i > max_bits) 3365 max_bits = next_zero - i; 3366 i = next_zero; 3367 } 3368 3369 if (!found_bits) { 3370 entry->max_extent_size = (u64)max_bits * ctl->unit; 3371 return -ENOSPC; 3372 } 3373 3374 if (!total_found) { 3375 start = i; 3376 cluster->max_size = 0; 3377 } 3378 3379 total_found += found_bits; 3380 3381 if (cluster->max_size < found_bits * ctl->unit) 3382 cluster->max_size = found_bits * ctl->unit; 3383 3384 if (total_found < want_bits || cluster->max_size < cont1_bytes) { 3385 i = next_zero + 1; 3386 goto again; 3387 } 3388 3389 cluster->window_start = start * ctl->unit + entry->offset; 3390 rb_erase(&entry->offset_index, &ctl->free_space_offset); 3391 rb_erase_cached(&entry->bytes_index, &ctl->free_space_bytes); 3392 3393 /* 3394 * We need to know if we're currently on the normal space index when we 3395 * manipulate the bitmap so that we know we need to remove and re-insert 3396 * it into the space_index tree. Clear the bytes_index node here so the 3397 * bitmap manipulation helpers know not to mess with the space_index 3398 * until this bitmap entry is added back into the normal cache. 3399 */ 3400 RB_CLEAR_NODE(&entry->bytes_index); 3401 3402 ret = tree_insert_offset(ctl, cluster, entry); 3403 ASSERT(!ret); /* -EEXIST; Logic error */ 3404 3405 trace_btrfs_setup_cluster(block_group, cluster, 3406 total_found * ctl->unit, 1); 3407 return 0; 3408 } 3409 3410 /* 3411 * This searches the block group for just extents to fill the cluster with. 3412 * Try to find a cluster with at least bytes total bytes, at least one 3413 * extent of cont1_bytes, and other clusters of at least min_bytes. 3414 */ 3415 static noinline int 3416 setup_cluster_no_bitmap(struct btrfs_block_group *block_group, 3417 struct btrfs_free_cluster *cluster, 3418 struct list_head *bitmaps, u64 offset, u64 bytes, 3419 u64 cont1_bytes, u64 min_bytes) 3420 { 3421 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; 3422 struct btrfs_free_space *first = NULL; 3423 struct btrfs_free_space *entry = NULL; 3424 struct btrfs_free_space *last; 3425 struct rb_node *node; 3426 u64 window_free; 3427 u64 max_extent; 3428 u64 total_size = 0; 3429 3430 lockdep_assert_held(&ctl->tree_lock); 3431 3432 entry = tree_search_offset(ctl, offset, 0, 1); 3433 if (!entry) 3434 return -ENOSPC; 3435 3436 /* 3437 * We don't want bitmaps, so just move along until we find a normal 3438 * extent entry. 3439 */ 3440 while (entry->bitmap || entry->bytes < min_bytes) { 3441 if (entry->bitmap && list_empty(&entry->list)) 3442 list_add_tail(&entry->list, bitmaps); 3443 node = rb_next(&entry->offset_index); 3444 if (!node) 3445 return -ENOSPC; 3446 entry = rb_entry(node, struct btrfs_free_space, offset_index); 3447 } 3448 3449 window_free = entry->bytes; 3450 max_extent = entry->bytes; 3451 first = entry; 3452 last = entry; 3453 3454 for (node = rb_next(&entry->offset_index); node; 3455 node = rb_next(&entry->offset_index)) { 3456 entry = rb_entry(node, struct btrfs_free_space, offset_index); 3457 3458 if (entry->bitmap) { 3459 if (list_empty(&entry->list)) 3460 list_add_tail(&entry->list, bitmaps); 3461 continue; 3462 } 3463 3464 if (entry->bytes < min_bytes) 3465 continue; 3466 3467 last = entry; 3468 window_free += entry->bytes; 3469 if (entry->bytes > max_extent) 3470 max_extent = entry->bytes; 3471 } 3472 3473 if (window_free < bytes || max_extent < cont1_bytes) 3474 return -ENOSPC; 3475 3476 cluster->window_start = first->offset; 3477 3478 node = &first->offset_index; 3479 3480 /* 3481 * now we've found our entries, pull them out of the free space 3482 * cache and put them into the cluster rbtree 3483 */ 3484 do { 3485 int ret; 3486 3487 entry = rb_entry(node, struct btrfs_free_space, offset_index); 3488 node = rb_next(&entry->offset_index); 3489 if (entry->bitmap || entry->bytes < min_bytes) 3490 continue; 3491 3492 rb_erase(&entry->offset_index, &ctl->free_space_offset); 3493 rb_erase_cached(&entry->bytes_index, &ctl->free_space_bytes); 3494 ret = tree_insert_offset(ctl, cluster, entry); 3495 total_size += entry->bytes; 3496 ASSERT(!ret); /* -EEXIST; Logic error */ 3497 } while (node && entry != last); 3498 3499 cluster->max_size = max_extent; 3500 trace_btrfs_setup_cluster(block_group, cluster, total_size, 0); 3501 return 0; 3502 } 3503 3504 /* 3505 * This specifically looks for bitmaps that may work in the cluster, we assume 3506 * that we have already failed to find extents that will work. 3507 */ 3508 static noinline int 3509 setup_cluster_bitmap(struct btrfs_block_group *block_group, 3510 struct btrfs_free_cluster *cluster, 3511 struct list_head *bitmaps, u64 offset, u64 bytes, 3512 u64 cont1_bytes, u64 min_bytes) 3513 { 3514 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; 3515 struct btrfs_free_space *entry = NULL; 3516 int ret = -ENOSPC; 3517 u64 bitmap_offset = offset_to_bitmap(ctl, offset); 3518 3519 if (ctl->total_bitmaps == 0) 3520 return -ENOSPC; 3521 3522 /* 3523 * The bitmap that covers offset won't be in the list unless offset 3524 * is just its start offset. 3525 */ 3526 if (!list_empty(bitmaps)) 3527 entry = list_first_entry(bitmaps, struct btrfs_free_space, list); 3528 3529 if (!entry || entry->offset != bitmap_offset) { 3530 entry = tree_search_offset(ctl, bitmap_offset, 1, 0); 3531 if (entry && list_empty(&entry->list)) 3532 list_add(&entry->list, bitmaps); 3533 } 3534 3535 list_for_each_entry(entry, bitmaps, list) { 3536 if (entry->bytes < bytes) 3537 continue; 3538 ret = btrfs_bitmap_cluster(block_group, entry, cluster, offset, 3539 bytes, cont1_bytes, min_bytes); 3540 if (!ret) 3541 return 0; 3542 } 3543 3544 /* 3545 * The bitmaps list has all the bitmaps that record free space 3546 * starting after offset, so no more search is required. 3547 */ 3548 return -ENOSPC; 3549 } 3550 3551 /* 3552 * here we try to find a cluster of blocks in a block group. The goal 3553 * is to find at least bytes+empty_size. 3554 * We might not find them all in one contiguous area. 3555 * 3556 * returns zero and sets up cluster if things worked out, otherwise 3557 * it returns -enospc 3558 */ 3559 int btrfs_find_space_cluster(struct btrfs_block_group *block_group, 3560 struct btrfs_free_cluster *cluster, 3561 u64 offset, u64 bytes, u64 empty_size) 3562 { 3563 struct btrfs_fs_info *fs_info = block_group->fs_info; 3564 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; 3565 struct btrfs_free_space *entry, *tmp; 3566 LIST_HEAD(bitmaps); 3567 u64 min_bytes; 3568 u64 cont1_bytes; 3569 int ret; 3570 3571 /* 3572 * Choose the minimum extent size we'll require for this 3573 * cluster. For SSD_SPREAD, don't allow any fragmentation. 3574 * For metadata, allow allocates with smaller extents. For 3575 * data, keep it dense. 3576 */ 3577 if (btrfs_test_opt(fs_info, SSD_SPREAD)) { 3578 cont1_bytes = bytes + empty_size; 3579 min_bytes = cont1_bytes; 3580 } else if (block_group->flags & BTRFS_BLOCK_GROUP_METADATA) { 3581 cont1_bytes = bytes; 3582 min_bytes = fs_info->sectorsize; 3583 } else { 3584 cont1_bytes = max(bytes, (bytes + empty_size) >> 2); 3585 min_bytes = fs_info->sectorsize; 3586 } 3587 3588 spin_lock(&ctl->tree_lock); 3589 3590 /* 3591 * If we know we don't have enough space to make a cluster don't even 3592 * bother doing all the work to try and find one. 3593 */ 3594 if (ctl->free_space < bytes) { 3595 spin_unlock(&ctl->tree_lock); 3596 return -ENOSPC; 3597 } 3598 3599 spin_lock(&cluster->lock); 3600 3601 /* someone already found a cluster, hooray */ 3602 if (cluster->block_group) { 3603 ret = 0; 3604 goto out; 3605 } 3606 3607 trace_btrfs_find_cluster(block_group, offset, bytes, empty_size, 3608 min_bytes); 3609 3610 ret = setup_cluster_no_bitmap(block_group, cluster, &bitmaps, offset, 3611 bytes + empty_size, 3612 cont1_bytes, min_bytes); 3613 if (ret) 3614 ret = setup_cluster_bitmap(block_group, cluster, &bitmaps, 3615 offset, bytes + empty_size, 3616 cont1_bytes, min_bytes); 3617 3618 /* Clear our temporary list */ 3619 list_for_each_entry_safe(entry, tmp, &bitmaps, list) 3620 list_del_init(&entry->list); 3621 3622 if (!ret) { 3623 btrfs_get_block_group(block_group); 3624 list_add_tail(&cluster->block_group_list, 3625 &block_group->cluster_list); 3626 cluster->block_group = block_group; 3627 } else { 3628 trace_btrfs_failed_cluster_setup(block_group); 3629 } 3630 out: 3631 spin_unlock(&cluster->lock); 3632 spin_unlock(&ctl->tree_lock); 3633 3634 return ret; 3635 } 3636 3637 /* 3638 * simple code to zero out a cluster 3639 */ 3640 void btrfs_init_free_cluster(struct btrfs_free_cluster *cluster) 3641 { 3642 spin_lock_init(&cluster->lock); 3643 spin_lock_init(&cluster->refill_lock); 3644 cluster->root = RB_ROOT; 3645 cluster->max_size = 0; 3646 cluster->fragmented = false; 3647 INIT_LIST_HEAD(&cluster->block_group_list); 3648 cluster->block_group = NULL; 3649 } 3650 3651 static int do_trimming(struct btrfs_block_group *block_group, 3652 u64 *total_trimmed, u64 start, u64 bytes, 3653 u64 reserved_start, u64 reserved_bytes, 3654 enum btrfs_trim_state reserved_trim_state, 3655 struct btrfs_trim_range *trim_entry) 3656 { 3657 struct btrfs_space_info *space_info = block_group->space_info; 3658 struct btrfs_fs_info *fs_info = block_group->fs_info; 3659 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; 3660 int ret; 3661 int update = 0; 3662 const u64 end = start + bytes; 3663 const u64 reserved_end = reserved_start + reserved_bytes; 3664 enum btrfs_trim_state trim_state = BTRFS_TRIM_STATE_UNTRIMMED; 3665 u64 trimmed = 0; 3666 3667 spin_lock(&space_info->lock); 3668 spin_lock(&block_group->lock); 3669 if (!block_group->ro) { 3670 block_group->reserved += reserved_bytes; 3671 space_info->bytes_reserved += reserved_bytes; 3672 update = 1; 3673 } 3674 spin_unlock(&block_group->lock); 3675 spin_unlock(&space_info->lock); 3676 3677 ret = btrfs_discard_extent(fs_info, start, bytes, &trimmed); 3678 if (!ret) { 3679 *total_trimmed += trimmed; 3680 trim_state = BTRFS_TRIM_STATE_TRIMMED; 3681 } 3682 3683 mutex_lock(&ctl->cache_writeout_mutex); 3684 if (reserved_start < start) 3685 __btrfs_add_free_space(block_group, reserved_start, 3686 start - reserved_start, 3687 reserved_trim_state); 3688 if (end < reserved_end) 3689 __btrfs_add_free_space(block_group, end, reserved_end - end, 3690 reserved_trim_state); 3691 __btrfs_add_free_space(block_group, start, bytes, trim_state); 3692 list_del(&trim_entry->list); 3693 mutex_unlock(&ctl->cache_writeout_mutex); 3694 3695 if (update) { 3696 spin_lock(&space_info->lock); 3697 spin_lock(&block_group->lock); 3698 if (block_group->ro) 3699 space_info->bytes_readonly += reserved_bytes; 3700 block_group->reserved -= reserved_bytes; 3701 space_info->bytes_reserved -= reserved_bytes; 3702 spin_unlock(&block_group->lock); 3703 spin_unlock(&space_info->lock); 3704 } 3705 3706 return ret; 3707 } 3708 3709 /* 3710 * If @async is set, then we will trim 1 region and return. 3711 */ 3712 static int trim_no_bitmap(struct btrfs_block_group *block_group, 3713 u64 *total_trimmed, u64 start, u64 end, u64 minlen, 3714 bool async) 3715 { 3716 struct btrfs_discard_ctl *discard_ctl = 3717 &block_group->fs_info->discard_ctl; 3718 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; 3719 struct btrfs_free_space *entry; 3720 struct rb_node *node; 3721 int ret = 0; 3722 u64 extent_start; 3723 u64 extent_bytes; 3724 enum btrfs_trim_state extent_trim_state; 3725 u64 bytes; 3726 const u64 max_discard_size = READ_ONCE(discard_ctl->max_discard_size); 3727 3728 while (start < end) { 3729 struct btrfs_trim_range trim_entry; 3730 3731 mutex_lock(&ctl->cache_writeout_mutex); 3732 spin_lock(&ctl->tree_lock); 3733 3734 if (ctl->free_space < minlen) 3735 goto out_unlock; 3736 3737 entry = tree_search_offset(ctl, start, 0, 1); 3738 if (!entry) 3739 goto out_unlock; 3740 3741 /* Skip bitmaps and if async, already trimmed entries */ 3742 while (entry->bitmap || 3743 (async && btrfs_free_space_trimmed(entry))) { 3744 node = rb_next(&entry->offset_index); 3745 if (!node) 3746 goto out_unlock; 3747 entry = rb_entry(node, struct btrfs_free_space, 3748 offset_index); 3749 } 3750 3751 if (entry->offset >= end) 3752 goto out_unlock; 3753 3754 extent_start = entry->offset; 3755 extent_bytes = entry->bytes; 3756 extent_trim_state = entry->trim_state; 3757 if (async) { 3758 start = entry->offset; 3759 bytes = entry->bytes; 3760 if (bytes < minlen) { 3761 spin_unlock(&ctl->tree_lock); 3762 mutex_unlock(&ctl->cache_writeout_mutex); 3763 goto next; 3764 } 3765 unlink_free_space(ctl, entry, true); 3766 /* 3767 * Let bytes = BTRFS_MAX_DISCARD_SIZE + X. 3768 * If X < BTRFS_ASYNC_DISCARD_MIN_FILTER, we won't trim 3769 * X when we come back around. So trim it now. 3770 */ 3771 if (max_discard_size && 3772 bytes >= (max_discard_size + 3773 BTRFS_ASYNC_DISCARD_MIN_FILTER)) { 3774 bytes = max_discard_size; 3775 extent_bytes = max_discard_size; 3776 entry->offset += max_discard_size; 3777 entry->bytes -= max_discard_size; 3778 link_free_space(ctl, entry); 3779 } else { 3780 kmem_cache_free(btrfs_free_space_cachep, entry); 3781 } 3782 } else { 3783 start = max(start, extent_start); 3784 bytes = min(extent_start + extent_bytes, end) - start; 3785 if (bytes < minlen) { 3786 spin_unlock(&ctl->tree_lock); 3787 mutex_unlock(&ctl->cache_writeout_mutex); 3788 goto next; 3789 } 3790 3791 unlink_free_space(ctl, entry, true); 3792 kmem_cache_free(btrfs_free_space_cachep, entry); 3793 } 3794 3795 spin_unlock(&ctl->tree_lock); 3796 trim_entry.start = extent_start; 3797 trim_entry.bytes = extent_bytes; 3798 list_add_tail(&trim_entry.list, &ctl->trimming_ranges); 3799 mutex_unlock(&ctl->cache_writeout_mutex); 3800 3801 ret = do_trimming(block_group, total_trimmed, start, bytes, 3802 extent_start, extent_bytes, extent_trim_state, 3803 &trim_entry); 3804 if (ret) { 3805 block_group->discard_cursor = start + bytes; 3806 break; 3807 } 3808 next: 3809 start += bytes; 3810 block_group->discard_cursor = start; 3811 if (async && *total_trimmed) 3812 break; 3813 3814 if (btrfs_trim_interrupted()) { 3815 ret = -ERESTARTSYS; 3816 break; 3817 } 3818 3819 cond_resched(); 3820 } 3821 3822 return ret; 3823 3824 out_unlock: 3825 block_group->discard_cursor = btrfs_block_group_end(block_group); 3826 spin_unlock(&ctl->tree_lock); 3827 mutex_unlock(&ctl->cache_writeout_mutex); 3828 3829 return ret; 3830 } 3831 3832 /* 3833 * If we break out of trimming a bitmap prematurely, we should reset the 3834 * trimming bit. In a rather contrieved case, it's possible to race here so 3835 * reset the state to BTRFS_TRIM_STATE_UNTRIMMED. 3836 * 3837 * start = start of bitmap 3838 * end = near end of bitmap 3839 * 3840 * Thread 1: Thread 2: 3841 * trim_bitmaps(start) 3842 * trim_bitmaps(end) 3843 * end_trimming_bitmap() 3844 * reset_trimming_bitmap() 3845 */ 3846 static void reset_trimming_bitmap(struct btrfs_free_space_ctl *ctl, u64 offset) 3847 { 3848 struct btrfs_free_space *entry; 3849 3850 spin_lock(&ctl->tree_lock); 3851 entry = tree_search_offset(ctl, offset, 1, 0); 3852 if (entry) { 3853 if (btrfs_free_space_trimmed(entry)) { 3854 ctl->discardable_extents[BTRFS_STAT_CURR] += 3855 entry->bitmap_extents; 3856 ctl->discardable_bytes[BTRFS_STAT_CURR] += entry->bytes; 3857 } 3858 entry->trim_state = BTRFS_TRIM_STATE_UNTRIMMED; 3859 } 3860 3861 spin_unlock(&ctl->tree_lock); 3862 } 3863 3864 static void end_trimming_bitmap(struct btrfs_free_space_ctl *ctl, 3865 struct btrfs_free_space *entry) 3866 { 3867 if (btrfs_free_space_trimming_bitmap(entry)) { 3868 entry->trim_state = BTRFS_TRIM_STATE_TRIMMED; 3869 ctl->discardable_extents[BTRFS_STAT_CURR] -= 3870 entry->bitmap_extents; 3871 ctl->discardable_bytes[BTRFS_STAT_CURR] -= entry->bytes; 3872 } 3873 } 3874 3875 /* 3876 * If @async is set, then we will trim 1 region and return. 3877 */ 3878 static int trim_bitmaps(struct btrfs_block_group *block_group, 3879 u64 *total_trimmed, u64 start, u64 end, u64 minlen, 3880 u64 maxlen, bool async) 3881 { 3882 struct btrfs_discard_ctl *discard_ctl = 3883 &block_group->fs_info->discard_ctl; 3884 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; 3885 struct btrfs_free_space *entry; 3886 int ret = 0; 3887 int ret2; 3888 u64 bytes; 3889 u64 offset = offset_to_bitmap(ctl, start); 3890 const u64 max_discard_size = READ_ONCE(discard_ctl->max_discard_size); 3891 3892 while (offset < end) { 3893 bool next_bitmap = false; 3894 struct btrfs_trim_range trim_entry; 3895 3896 mutex_lock(&ctl->cache_writeout_mutex); 3897 spin_lock(&ctl->tree_lock); 3898 3899 if (ctl->free_space < minlen) { 3900 block_group->discard_cursor = 3901 btrfs_block_group_end(block_group); 3902 spin_unlock(&ctl->tree_lock); 3903 mutex_unlock(&ctl->cache_writeout_mutex); 3904 break; 3905 } 3906 3907 entry = tree_search_offset(ctl, offset, 1, 0); 3908 /* 3909 * Bitmaps are marked trimmed lossily now to prevent constant 3910 * discarding of the same bitmap (the reason why we are bound 3911 * by the filters). So, retrim the block group bitmaps when we 3912 * are preparing to punt to the unused_bgs list. This uses 3913 * @minlen to determine if we are in BTRFS_DISCARD_INDEX_UNUSED 3914 * which is the only discard index which sets minlen to 0. 3915 */ 3916 if (!entry || (async && minlen && start == offset && 3917 btrfs_free_space_trimmed(entry))) { 3918 spin_unlock(&ctl->tree_lock); 3919 mutex_unlock(&ctl->cache_writeout_mutex); 3920 next_bitmap = true; 3921 goto next; 3922 } 3923 3924 /* 3925 * Async discard bitmap trimming begins at by setting the start 3926 * to be key.objectid and the offset_to_bitmap() aligns to the 3927 * start of the bitmap. This lets us know we are fully 3928 * scanning the bitmap rather than only some portion of it. 3929 */ 3930 if (start == offset) 3931 entry->trim_state = BTRFS_TRIM_STATE_TRIMMING; 3932 3933 bytes = minlen; 3934 ret2 = search_bitmap(ctl, entry, &start, &bytes, false); 3935 if (ret2 || start >= end) { 3936 /* 3937 * We lossily consider a bitmap trimmed if we only skip 3938 * over regions <= BTRFS_ASYNC_DISCARD_MIN_FILTER. 3939 */ 3940 if (ret2 && minlen <= BTRFS_ASYNC_DISCARD_MIN_FILTER) 3941 end_trimming_bitmap(ctl, entry); 3942 else 3943 entry->trim_state = BTRFS_TRIM_STATE_UNTRIMMED; 3944 spin_unlock(&ctl->tree_lock); 3945 mutex_unlock(&ctl->cache_writeout_mutex); 3946 next_bitmap = true; 3947 goto next; 3948 } 3949 3950 /* 3951 * We already trimmed a region, but are using the locking above 3952 * to reset the trim_state. 3953 */ 3954 if (async && *total_trimmed) { 3955 spin_unlock(&ctl->tree_lock); 3956 mutex_unlock(&ctl->cache_writeout_mutex); 3957 goto out; 3958 } 3959 3960 bytes = min(bytes, end - start); 3961 if (bytes < minlen || (async && maxlen && bytes > maxlen)) { 3962 spin_unlock(&ctl->tree_lock); 3963 mutex_unlock(&ctl->cache_writeout_mutex); 3964 goto next; 3965 } 3966 3967 /* 3968 * Let bytes = BTRFS_MAX_DISCARD_SIZE + X. 3969 * If X < @minlen, we won't trim X when we come back around. 3970 * So trim it now. We differ here from trimming extents as we 3971 * don't keep individual state per bit. 3972 */ 3973 if (async && 3974 max_discard_size && 3975 bytes > (max_discard_size + minlen)) 3976 bytes = max_discard_size; 3977 3978 bitmap_clear_bits(ctl, entry, start, bytes, true); 3979 if (entry->bytes == 0) 3980 free_bitmap(ctl, entry); 3981 3982 spin_unlock(&ctl->tree_lock); 3983 trim_entry.start = start; 3984 trim_entry.bytes = bytes; 3985 list_add_tail(&trim_entry.list, &ctl->trimming_ranges); 3986 mutex_unlock(&ctl->cache_writeout_mutex); 3987 3988 ret = do_trimming(block_group, total_trimmed, start, bytes, 3989 start, bytes, 0, &trim_entry); 3990 if (ret) { 3991 reset_trimming_bitmap(ctl, offset); 3992 block_group->discard_cursor = 3993 btrfs_block_group_end(block_group); 3994 break; 3995 } 3996 next: 3997 if (next_bitmap) { 3998 offset += BITS_PER_BITMAP * ctl->unit; 3999 start = offset; 4000 } else { 4001 start += bytes; 4002 } 4003 block_group->discard_cursor = start; 4004 4005 if (btrfs_trim_interrupted()) { 4006 if (start != offset) 4007 reset_trimming_bitmap(ctl, offset); 4008 ret = -ERESTARTSYS; 4009 break; 4010 } 4011 4012 cond_resched(); 4013 } 4014 4015 if (offset >= end) 4016 block_group->discard_cursor = end; 4017 4018 out: 4019 return ret; 4020 } 4021 4022 int btrfs_trim_block_group(struct btrfs_block_group *block_group, 4023 u64 *trimmed, u64 start, u64 end, u64 minlen) 4024 { 4025 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; 4026 int ret; 4027 u64 rem = 0; 4028 4029 ASSERT(!btrfs_is_zoned(block_group->fs_info)); 4030 4031 *trimmed = 0; 4032 4033 spin_lock(&block_group->lock); 4034 if (test_bit(BLOCK_GROUP_FLAG_REMOVED, &block_group->runtime_flags)) { 4035 spin_unlock(&block_group->lock); 4036 return 0; 4037 } 4038 btrfs_freeze_block_group(block_group); 4039 spin_unlock(&block_group->lock); 4040 4041 ret = trim_no_bitmap(block_group, trimmed, start, end, minlen, false); 4042 if (ret) 4043 goto out; 4044 4045 ret = trim_bitmaps(block_group, trimmed, start, end, minlen, 0, false); 4046 div64_u64_rem(end, BITS_PER_BITMAP * ctl->unit, &rem); 4047 /* If we ended in the middle of a bitmap, reset the trimming flag */ 4048 if (rem) 4049 reset_trimming_bitmap(ctl, offset_to_bitmap(ctl, end)); 4050 out: 4051 btrfs_unfreeze_block_group(block_group); 4052 return ret; 4053 } 4054 4055 int btrfs_trim_block_group_extents(struct btrfs_block_group *block_group, 4056 u64 *trimmed, u64 start, u64 end, u64 minlen, 4057 bool async) 4058 { 4059 int ret; 4060 4061 *trimmed = 0; 4062 4063 spin_lock(&block_group->lock); 4064 if (test_bit(BLOCK_GROUP_FLAG_REMOVED, &block_group->runtime_flags)) { 4065 spin_unlock(&block_group->lock); 4066 return 0; 4067 } 4068 btrfs_freeze_block_group(block_group); 4069 spin_unlock(&block_group->lock); 4070 4071 ret = trim_no_bitmap(block_group, trimmed, start, end, minlen, async); 4072 btrfs_unfreeze_block_group(block_group); 4073 4074 return ret; 4075 } 4076 4077 int btrfs_trim_block_group_bitmaps(struct btrfs_block_group *block_group, 4078 u64 *trimmed, u64 start, u64 end, u64 minlen, 4079 u64 maxlen, bool async) 4080 { 4081 int ret; 4082 4083 *trimmed = 0; 4084 4085 spin_lock(&block_group->lock); 4086 if (test_bit(BLOCK_GROUP_FLAG_REMOVED, &block_group->runtime_flags)) { 4087 spin_unlock(&block_group->lock); 4088 return 0; 4089 } 4090 btrfs_freeze_block_group(block_group); 4091 spin_unlock(&block_group->lock); 4092 4093 ret = trim_bitmaps(block_group, trimmed, start, end, minlen, maxlen, 4094 async); 4095 4096 btrfs_unfreeze_block_group(block_group); 4097 4098 return ret; 4099 } 4100 4101 bool btrfs_free_space_cache_v1_active(struct btrfs_fs_info *fs_info) 4102 { 4103 return btrfs_super_cache_generation(fs_info->super_copy); 4104 } 4105 4106 static int cleanup_free_space_cache_v1(struct btrfs_fs_info *fs_info, 4107 struct btrfs_trans_handle *trans) 4108 { 4109 struct btrfs_block_group *block_group; 4110 struct rb_node *node; 4111 int ret = 0; 4112 4113 btrfs_info(fs_info, "cleaning free space cache v1"); 4114 4115 node = rb_first_cached(&fs_info->block_group_cache_tree); 4116 while (node) { 4117 block_group = rb_entry(node, struct btrfs_block_group, cache_node); 4118 ret = btrfs_remove_free_space_inode(trans, NULL, block_group); 4119 if (ret) 4120 goto out; 4121 node = rb_next(node); 4122 } 4123 out: 4124 return ret; 4125 } 4126 4127 int btrfs_set_free_space_cache_v1_active(struct btrfs_fs_info *fs_info, bool active) 4128 { 4129 struct btrfs_trans_handle *trans; 4130 int ret; 4131 4132 /* 4133 * update_super_roots will appropriately set or unset 4134 * super_copy->cache_generation based on SPACE_CACHE and 4135 * BTRFS_FS_CLEANUP_SPACE_CACHE_V1. For this reason, we need a 4136 * transaction commit whether we are enabling space cache v1 and don't 4137 * have any other work to do, or are disabling it and removing free 4138 * space inodes. 4139 */ 4140 trans = btrfs_start_transaction(fs_info->tree_root, 0); 4141 if (IS_ERR(trans)) 4142 return PTR_ERR(trans); 4143 4144 if (!active) { 4145 set_bit(BTRFS_FS_CLEANUP_SPACE_CACHE_V1, &fs_info->flags); 4146 ret = cleanup_free_space_cache_v1(fs_info, trans); 4147 if (ret) { 4148 btrfs_abort_transaction(trans, ret); 4149 btrfs_end_transaction(trans); 4150 goto out; 4151 } 4152 } 4153 4154 ret = btrfs_commit_transaction(trans); 4155 out: 4156 clear_bit(BTRFS_FS_CLEANUP_SPACE_CACHE_V1, &fs_info->flags); 4157 4158 return ret; 4159 } 4160 4161 int __init btrfs_free_space_init(void) 4162 { 4163 btrfs_free_space_cachep = KMEM_CACHE(btrfs_free_space, 0); 4164 if (!btrfs_free_space_cachep) 4165 return -ENOMEM; 4166 4167 btrfs_free_space_bitmap_cachep = kmem_cache_create("btrfs_free_space_bitmap", 4168 PAGE_SIZE, PAGE_SIZE, 4169 0, NULL); 4170 if (!btrfs_free_space_bitmap_cachep) { 4171 kmem_cache_destroy(btrfs_free_space_cachep); 4172 return -ENOMEM; 4173 } 4174 4175 return 0; 4176 } 4177 4178 void __cold btrfs_free_space_exit(void) 4179 { 4180 kmem_cache_destroy(btrfs_free_space_cachep); 4181 kmem_cache_destroy(btrfs_free_space_bitmap_cachep); 4182 } 4183 4184 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS 4185 /* 4186 * Use this if you need to make a bitmap or extent entry specifically, it 4187 * doesn't do any of the merging that add_free_space does, this acts a lot like 4188 * how the free space cache loading stuff works, so you can get really weird 4189 * configurations. 4190 */ 4191 int test_add_free_space_entry(struct btrfs_block_group *cache, 4192 u64 offset, u64 bytes, bool bitmap) 4193 { 4194 struct btrfs_free_space_ctl *ctl = cache->free_space_ctl; 4195 struct btrfs_free_space *info = NULL, *bitmap_info; 4196 void *map = NULL; 4197 enum btrfs_trim_state trim_state = BTRFS_TRIM_STATE_TRIMMED; 4198 u64 bytes_added; 4199 int ret; 4200 4201 again: 4202 if (!info) { 4203 info = kmem_cache_zalloc(btrfs_free_space_cachep, GFP_NOFS); 4204 if (!info) 4205 return -ENOMEM; 4206 } 4207 4208 if (!bitmap) { 4209 spin_lock(&ctl->tree_lock); 4210 info->offset = offset; 4211 info->bytes = bytes; 4212 info->max_extent_size = 0; 4213 ret = link_free_space(ctl, info); 4214 spin_unlock(&ctl->tree_lock); 4215 if (ret) 4216 kmem_cache_free(btrfs_free_space_cachep, info); 4217 return ret; 4218 } 4219 4220 if (!map) { 4221 map = kmem_cache_zalloc(btrfs_free_space_bitmap_cachep, GFP_NOFS); 4222 if (!map) { 4223 kmem_cache_free(btrfs_free_space_cachep, info); 4224 return -ENOMEM; 4225 } 4226 } 4227 4228 spin_lock(&ctl->tree_lock); 4229 bitmap_info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset), 4230 1, 0); 4231 if (!bitmap_info) { 4232 info->bitmap = map; 4233 map = NULL; 4234 add_new_bitmap(ctl, info, offset); 4235 bitmap_info = info; 4236 info = NULL; 4237 } 4238 4239 bytes_added = add_bytes_to_bitmap(ctl, bitmap_info, offset, bytes, 4240 trim_state); 4241 4242 bytes -= bytes_added; 4243 offset += bytes_added; 4244 spin_unlock(&ctl->tree_lock); 4245 4246 if (bytes) 4247 goto again; 4248 4249 if (info) 4250 kmem_cache_free(btrfs_free_space_cachep, info); 4251 if (map) 4252 kmem_cache_free(btrfs_free_space_bitmap_cachep, map); 4253 return 0; 4254 } 4255 4256 /* 4257 * Checks to see if the given range is in the free space cache. This is really 4258 * just used to check the absence of space, so if there is free space in the 4259 * range at all we will return 1. 4260 */ 4261 int test_check_exists(struct btrfs_block_group *cache, 4262 u64 offset, u64 bytes) 4263 { 4264 struct btrfs_free_space_ctl *ctl = cache->free_space_ctl; 4265 struct btrfs_free_space *info; 4266 int ret = 0; 4267 4268 spin_lock(&ctl->tree_lock); 4269 info = tree_search_offset(ctl, offset, 0, 0); 4270 if (!info) { 4271 info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset), 4272 1, 0); 4273 if (!info) 4274 goto out; 4275 } 4276 4277 have_info: 4278 if (info->bitmap) { 4279 u64 bit_off, bit_bytes; 4280 struct rb_node *n; 4281 struct btrfs_free_space *tmp; 4282 4283 bit_off = offset; 4284 bit_bytes = ctl->unit; 4285 ret = search_bitmap(ctl, info, &bit_off, &bit_bytes, false); 4286 if (!ret) { 4287 if (bit_off == offset) { 4288 ret = 1; 4289 goto out; 4290 } else if (bit_off > offset && 4291 offset + bytes > bit_off) { 4292 ret = 1; 4293 goto out; 4294 } 4295 } 4296 4297 n = rb_prev(&info->offset_index); 4298 while (n) { 4299 tmp = rb_entry(n, struct btrfs_free_space, 4300 offset_index); 4301 if (tmp->offset + tmp->bytes < offset) 4302 break; 4303 if (offset + bytes < tmp->offset) { 4304 n = rb_prev(&tmp->offset_index); 4305 continue; 4306 } 4307 info = tmp; 4308 goto have_info; 4309 } 4310 4311 n = rb_next(&info->offset_index); 4312 while (n) { 4313 tmp = rb_entry(n, struct btrfs_free_space, 4314 offset_index); 4315 if (offset + bytes < tmp->offset) 4316 break; 4317 if (tmp->offset + tmp->bytes < offset) { 4318 n = rb_next(&tmp->offset_index); 4319 continue; 4320 } 4321 info = tmp; 4322 goto have_info; 4323 } 4324 4325 ret = 0; 4326 goto out; 4327 } 4328 4329 if (info->offset == offset) { 4330 ret = 1; 4331 goto out; 4332 } 4333 4334 if (offset > info->offset && offset < info->offset + info->bytes) 4335 ret = 1; 4336 out: 4337 spin_unlock(&ctl->tree_lock); 4338 return ret; 4339 } 4340 #endif /* CONFIG_BTRFS_FS_RUN_SANITY_TESTS */ 4341