Lines Matching +full:cluster +full:- +full:index
1 // SPDX-License-Identifier: GPL-2.0
12 #include <linux/error-injection.h>
18 #include "free-space-cache.h"
20 #include "disk-io.h"
22 #include "space-info.h"
23 #include "block-group.h"
26 #include "inode-item.h"
28 #include "file-item.h"
68 while ((node = rb_last(&ctl->free_space_offset)) != NULL) { in __btrfs_remove_free_space_cache()
70 if (!info->bitmap) { in __btrfs_remove_free_space_cache()
77 cond_resched_lock(&ctl->tree_lock); in __btrfs_remove_free_space_cache()
103 return ERR_PTR(-ENOENT); in __lookup_free_space_inode()
106 leaf = path->nodes[0]; in __lookup_free_space_inode()
107 header = btrfs_item_ptr(leaf, path->slots[0], in __lookup_free_space_inode()
124 mapping_set_gfp_mask(inode->i_mapping, in __lookup_free_space_inode()
125 mapping_gfp_constraint(inode->i_mapping, in __lookup_free_space_inode()
134 struct btrfs_fs_info *fs_info = block_group->fs_info; in lookup_free_space_inode()
138 spin_lock(&block_group->lock); in lookup_free_space_inode()
139 if (block_group->inode) in lookup_free_space_inode()
140 inode = igrab(&block_group->inode->vfs_inode); in lookup_free_space_inode()
141 spin_unlock(&block_group->lock); in lookup_free_space_inode()
145 inode = __lookup_free_space_inode(fs_info->tree_root, path, in lookup_free_space_inode()
146 block_group->start); in lookup_free_space_inode()
150 spin_lock(&block_group->lock); in lookup_free_space_inode()
151 if (!((BTRFS_I(inode)->flags & flags) == flags)) { in lookup_free_space_inode()
153 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM | in lookup_free_space_inode()
155 block_group->disk_cache_state = BTRFS_DC_CLEAR; in lookup_free_space_inode()
158 if (!test_and_set_bit(BLOCK_GROUP_FLAG_IREF, &block_group->runtime_flags)) in lookup_free_space_inode()
159 block_group->inode = BTRFS_I(igrab(inode)); in lookup_free_space_inode()
160 spin_unlock(&block_group->lock); in lookup_free_space_inode()
184 leaf = path->nodes[0]; in __create_free_space_inode()
185 inode_item = btrfs_item_ptr(leaf, path->slots[0], in __create_free_space_inode()
187 btrfs_item_key(leaf, &disk_key, path->slots[0]); in __create_free_space_inode()
190 btrfs_set_inode_generation(leaf, inode_item, trans->transid); in __create_free_space_inode()
198 btrfs_set_inode_transid(leaf, inode_item, trans->transid); in __create_free_space_inode()
213 leaf = path->nodes[0]; in __create_free_space_inode()
214 header = btrfs_item_ptr(leaf, path->slots[0], in __create_free_space_inode()
231 ret = btrfs_get_free_objectid(trans->fs_info->tree_root, &ino); in create_free_space_inode()
235 return __create_free_space_inode(trans->fs_info->tree_root, trans, path, in create_free_space_inode()
236 ino, block_group->start); in create_free_space_inode()
254 return -ENOMEM; in btrfs_remove_free_space_inode()
259 if (PTR_ERR(inode) != -ENOENT) in btrfs_remove_free_space_inode()
270 spin_lock(&block_group->lock); in btrfs_remove_free_space_inode()
271 if (test_and_clear_bit(BLOCK_GROUP_FLAG_IREF, &block_group->runtime_flags)) { in btrfs_remove_free_space_inode()
272 block_group->inode = NULL; in btrfs_remove_free_space_inode()
273 spin_unlock(&block_group->lock); in btrfs_remove_free_space_inode()
276 spin_unlock(&block_group->lock); in btrfs_remove_free_space_inode()
283 key.offset = block_group->start; in btrfs_remove_free_space_inode()
284 ret = btrfs_search_slot(trans, trans->fs_info->tree_root, &key, path, in btrfs_remove_free_space_inode()
285 -1, 1); in btrfs_remove_free_space_inode()
291 ret = btrfs_del_item(trans, trans->fs_info->tree_root, path); in btrfs_remove_free_space_inode()
309 struct btrfs_root *root = inode->root; in btrfs_truncate_free_space_cache()
318 ret = -ENOMEM; in btrfs_truncate_free_space_cache()
322 mutex_lock(&trans->transaction->cache_write_mutex); in btrfs_truncate_free_space_cache()
323 if (!list_empty(&block_group->io_list)) { in btrfs_truncate_free_space_cache()
324 list_del_init(&block_group->io_list); in btrfs_truncate_free_space_cache()
334 spin_lock(&block_group->lock); in btrfs_truncate_free_space_cache()
335 block_group->disk_cache_state = BTRFS_DC_CLEAR; in btrfs_truncate_free_space_cache()
336 spin_unlock(&block_group->lock); in btrfs_truncate_free_space_cache()
343 lock_extent(&inode->io_tree, 0, (u64)-1, &cached_state); in btrfs_truncate_free_space_cache()
344 btrfs_drop_extent_map_range(inode, 0, (u64)-1, false); in btrfs_truncate_free_space_cache()
348 * need to check for -EAGAIN. in btrfs_truncate_free_space_cache()
352 inode_sub_bytes(&inode->vfs_inode, control.sub_bytes); in btrfs_truncate_free_space_cache()
355 unlock_extent(&inode->io_tree, 0, (u64)-1, &cached_state); in btrfs_truncate_free_space_cache()
363 mutex_unlock(&trans->transaction->cache_write_mutex); in btrfs_truncate_free_space_cache()
375 file_ra_state_init(&ra, inode->i_mapping); in readahead_cache()
376 last_index = (i_size_read(inode) - 1) >> PAGE_SHIFT; in readahead_cache()
378 page_cache_sync_readahead(inode->i_mapping, &ra, NULL, 0, last_index); in readahead_cache()
390 return -ENOSPC; in io_ctl_init()
394 io_ctl->pages = kcalloc(num_pages, sizeof(struct page *), GFP_NOFS); in io_ctl_init()
395 if (!io_ctl->pages) in io_ctl_init()
396 return -ENOMEM; in io_ctl_init()
398 io_ctl->num_pages = num_pages; in io_ctl_init()
399 io_ctl->fs_info = inode_to_fs_info(inode); in io_ctl_init()
400 io_ctl->inode = inode; in io_ctl_init()
408 kfree(io_ctl->pages); in io_ctl_free()
409 io_ctl->pages = NULL; in io_ctl_free()
414 if (io_ctl->cur) { in io_ctl_unmap_page()
415 io_ctl->cur = NULL; in io_ctl_unmap_page()
416 io_ctl->orig = NULL; in io_ctl_unmap_page()
422 ASSERT(io_ctl->index < io_ctl->num_pages); in io_ctl_map_page()
423 io_ctl->page = io_ctl->pages[io_ctl->index++]; in io_ctl_map_page()
424 io_ctl->cur = page_address(io_ctl->page); in io_ctl_map_page()
425 io_ctl->orig = io_ctl->cur; in io_ctl_map_page()
426 io_ctl->size = PAGE_SIZE; in io_ctl_map_page()
428 clear_page(io_ctl->cur); in io_ctl_map_page()
437 for (i = 0; i < io_ctl->num_pages; i++) { in io_ctl_drop_pages()
438 if (io_ctl->pages[i]) { in io_ctl_drop_pages()
439 btrfs_folio_clear_checked(io_ctl->fs_info, in io_ctl_drop_pages()
440 page_folio(io_ctl->pages[i]), in io_ctl_drop_pages()
441 page_offset(io_ctl->pages[i]), in io_ctl_drop_pages()
443 unlock_page(io_ctl->pages[i]); in io_ctl_drop_pages()
444 put_page(io_ctl->pages[i]); in io_ctl_drop_pages()
452 struct inode *inode = io_ctl->inode; in io_ctl_prepare_pages()
453 gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping); in io_ctl_prepare_pages()
456 for (i = 0; i < io_ctl->num_pages; i++) { in io_ctl_prepare_pages()
459 page = find_or_create_page(inode->i_mapping, i, mask); in io_ctl_prepare_pages()
462 return -ENOMEM; in io_ctl_prepare_pages()
473 io_ctl->pages[i] = page; in io_ctl_prepare_pages()
477 if (page->mapping != inode->i_mapping) { in io_ctl_prepare_pages()
478 btrfs_err(BTRFS_I(inode)->root->fs_info, in io_ctl_prepare_pages()
481 return -EIO; in io_ctl_prepare_pages()
484 btrfs_err(BTRFS_I(inode)->root->fs_info, in io_ctl_prepare_pages()
487 return -EIO; in io_ctl_prepare_pages()
492 for (i = 0; i < io_ctl->num_pages; i++) in io_ctl_prepare_pages()
493 clear_page_dirty_for_io(io_ctl->pages[i]); in io_ctl_prepare_pages()
506 io_ctl->cur += (sizeof(u32) * io_ctl->num_pages); in io_ctl_set_generation()
507 io_ctl->size -= sizeof(u64) + (sizeof(u32) * io_ctl->num_pages); in io_ctl_set_generation()
509 put_unaligned_le64(generation, io_ctl->cur); in io_ctl_set_generation()
510 io_ctl->cur += sizeof(u64); in io_ctl_set_generation()
521 io_ctl->cur += sizeof(u32) * io_ctl->num_pages; in io_ctl_check_generation()
522 io_ctl->size -= sizeof(u64) + (sizeof(u32) * io_ctl->num_pages); in io_ctl_check_generation()
524 cache_gen = get_unaligned_le64(io_ctl->cur); in io_ctl_check_generation()
526 btrfs_err_rl(io_ctl->fs_info, in io_ctl_check_generation()
530 return -EIO; in io_ctl_check_generation()
532 io_ctl->cur += sizeof(u64); in io_ctl_check_generation()
536 static void io_ctl_set_crc(struct btrfs_io_ctl *io_ctl, int index) in io_ctl_set_crc() argument
542 if (index == 0) in io_ctl_set_crc()
543 offset = sizeof(u32) * io_ctl->num_pages; in io_ctl_set_crc()
545 crc = crc32c(crc, io_ctl->orig + offset, PAGE_SIZE - offset); in io_ctl_set_crc()
548 tmp = page_address(io_ctl->pages[0]); in io_ctl_set_crc()
549 tmp += index; in io_ctl_set_crc()
553 static int io_ctl_check_crc(struct btrfs_io_ctl *io_ctl, int index) in io_ctl_check_crc() argument
559 if (index == 0) in io_ctl_check_crc()
560 offset = sizeof(u32) * io_ctl->num_pages; in io_ctl_check_crc()
562 tmp = page_address(io_ctl->pages[0]); in io_ctl_check_crc()
563 tmp += index; in io_ctl_check_crc()
567 crc = crc32c(crc, io_ctl->orig + offset, PAGE_SIZE - offset); in io_ctl_check_crc()
570 btrfs_err_rl(io_ctl->fs_info, in io_ctl_check_crc()
573 return -EIO; in io_ctl_check_crc()
584 if (!io_ctl->cur) in io_ctl_add_entry()
585 return -ENOSPC; in io_ctl_add_entry()
587 entry = io_ctl->cur; in io_ctl_add_entry()
588 put_unaligned_le64(offset, &entry->offset); in io_ctl_add_entry()
589 put_unaligned_le64(bytes, &entry->bytes); in io_ctl_add_entry()
590 entry->type = (bitmap) ? BTRFS_FREE_SPACE_BITMAP : in io_ctl_add_entry()
592 io_ctl->cur += sizeof(struct btrfs_free_space_entry); in io_ctl_add_entry()
593 io_ctl->size -= sizeof(struct btrfs_free_space_entry); in io_ctl_add_entry()
595 if (io_ctl->size >= sizeof(struct btrfs_free_space_entry)) in io_ctl_add_entry()
598 io_ctl_set_crc(io_ctl, io_ctl->index - 1); in io_ctl_add_entry()
601 if (io_ctl->index >= io_ctl->num_pages) in io_ctl_add_entry()
611 if (!io_ctl->cur) in io_ctl_add_bitmap()
612 return -ENOSPC; in io_ctl_add_bitmap()
618 if (io_ctl->cur != io_ctl->orig) { in io_ctl_add_bitmap()
619 io_ctl_set_crc(io_ctl, io_ctl->index - 1); in io_ctl_add_bitmap()
620 if (io_ctl->index >= io_ctl->num_pages) in io_ctl_add_bitmap()
621 return -ENOSPC; in io_ctl_add_bitmap()
625 copy_page(io_ctl->cur, bitmap); in io_ctl_add_bitmap()
626 io_ctl_set_crc(io_ctl, io_ctl->index - 1); in io_ctl_add_bitmap()
627 if (io_ctl->index < io_ctl->num_pages) in io_ctl_add_bitmap()
638 if (io_ctl->cur != io_ctl->orig) in io_ctl_zero_remaining_pages()
639 io_ctl_set_crc(io_ctl, io_ctl->index - 1); in io_ctl_zero_remaining_pages()
643 while (io_ctl->index < io_ctl->num_pages) { in io_ctl_zero_remaining_pages()
645 io_ctl_set_crc(io_ctl, io_ctl->index - 1); in io_ctl_zero_remaining_pages()
655 if (!io_ctl->cur) { in io_ctl_read_entry()
656 ret = io_ctl_check_crc(io_ctl, io_ctl->index); in io_ctl_read_entry()
661 e = io_ctl->cur; in io_ctl_read_entry()
662 entry->offset = get_unaligned_le64(&e->offset); in io_ctl_read_entry()
663 entry->bytes = get_unaligned_le64(&e->bytes); in io_ctl_read_entry()
664 *type = e->type; in io_ctl_read_entry()
665 io_ctl->cur += sizeof(struct btrfs_free_space_entry); in io_ctl_read_entry()
666 io_ctl->size -= sizeof(struct btrfs_free_space_entry); in io_ctl_read_entry()
668 if (io_ctl->size >= sizeof(struct btrfs_free_space_entry)) in io_ctl_read_entry()
681 ret = io_ctl_check_crc(io_ctl, io_ctl->index); in io_ctl_read_bitmap()
685 copy_page(entry->bitmap, io_ctl->cur); in io_ctl_read_bitmap()
693 struct btrfs_block_group *block_group = ctl->block_group; in recalculate_thresholds()
697 u64 size = block_group->length; in recalculate_thresholds()
698 u64 bytes_per_bg = BITS_PER_BITMAP * ctl->unit; in recalculate_thresholds()
699 u64 max_bitmaps = div64_u64(size + bytes_per_bg - 1, bytes_per_bg); in recalculate_thresholds()
703 if (ctl->total_bitmaps > max_bitmaps) in recalculate_thresholds()
704 btrfs_err(block_group->fs_info, in recalculate_thresholds()
706 block_group->start, block_group->length, in recalculate_thresholds()
707 ctl->total_bitmaps, ctl->unit, max_bitmaps, in recalculate_thresholds()
709 ASSERT(ctl->total_bitmaps <= max_bitmaps); in recalculate_thresholds()
722 bitmap_bytes = ctl->total_bitmaps * ctl->unit; in recalculate_thresholds()
728 extent_bytes = max_bytes - bitmap_bytes; in recalculate_thresholds()
731 ctl->extents_thresh = in recalculate_thresholds()
739 struct btrfs_fs_info *fs_info = root->fs_info; in __load_free_space_cache()
768 ret = -1; in __load_free_space_cache()
770 leaf = path->nodes[0]; in __load_free_space_cache()
771 header = btrfs_item_ptr(leaf, path->slots[0], in __load_free_space_cache()
778 if (!BTRFS_I(inode)->generation) { in __load_free_space_cache()
785 if (BTRFS_I(inode)->generation != generation) { in __load_free_space_cache()
788 BTRFS_I(inode)->generation, generation); in __load_free_space_cache()
817 ret = -ENOMEM; in __load_free_space_cache()
827 if (!e->bytes) { in __load_free_space_cache()
828 ret = -1; in __load_free_space_cache()
834 spin_lock(&ctl->tree_lock); in __load_free_space_cache()
836 spin_unlock(&ctl->tree_lock); in __load_free_space_cache()
845 num_bitmaps--; in __load_free_space_cache()
846 e->bitmap = kmem_cache_zalloc( in __load_free_space_cache()
848 if (!e->bitmap) { in __load_free_space_cache()
849 ret = -ENOMEM; in __load_free_space_cache()
854 spin_lock(&ctl->tree_lock); in __load_free_space_cache()
857 spin_unlock(&ctl->tree_lock); in __load_free_space_cache()
860 kmem_cache_free(btrfs_free_space_bitmap_cachep, e->bitmap); in __load_free_space_cache()
864 ctl->total_bitmaps++; in __load_free_space_cache()
866 spin_unlock(&ctl->tree_lock); in __load_free_space_cache()
867 list_add_tail(&e->list, &bitmaps); in __load_free_space_cache()
870 num_entries--; in __load_free_space_cache()
880 list_del_init(&e->list); in __load_free_space_cache()
894 spin_lock(&ctl->tree_lock); in __load_free_space_cache()
896 spin_unlock(&ctl->tree_lock); in __load_free_space_cache()
907 while (!ret && (n = rb_first(&ctl->free_space_offset)) != NULL) { in copy_free_space_cache()
909 if (!info->bitmap) { in copy_free_space_cache()
910 const u64 offset = info->offset; in copy_free_space_cache()
911 const u64 bytes = info->bytes; in copy_free_space_cache()
914 spin_unlock(&ctl->tree_lock); in copy_free_space_cache()
917 spin_lock(&ctl->tree_lock); in copy_free_space_cache()
919 u64 offset = info->offset; in copy_free_space_cache()
920 u64 bytes = ctl->unit; in copy_free_space_cache()
925 spin_unlock(&ctl->tree_lock); in copy_free_space_cache()
928 spin_lock(&ctl->tree_lock); in copy_free_space_cache()
934 cond_resched_lock(&ctl->tree_lock); in copy_free_space_cache()
943 struct btrfs_fs_info *fs_info = block_group->fs_info; in load_free_space_cache()
944 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; in load_free_space_cache()
950 u64 used = block_group->used; in load_free_space_cache()
963 spin_lock(&block_group->lock); in load_free_space_cache()
964 if (block_group->disk_cache_state != BTRFS_DC_WRITTEN) { in load_free_space_cache()
965 spin_unlock(&block_group->lock); in load_free_space_cache()
968 spin_unlock(&block_group->lock); in load_free_space_cache()
973 path->search_commit_root = 1; in load_free_space_cache()
974 path->skip_locking = 1; in load_free_space_cache()
981 * for a free extent, at extent-tree.c:find_free_extent(), we can find in load_free_space_cache()
992 * once created get their ->cached field set to BTRFS_CACHE_FINISHED so in load_free_space_cache()
1002 spin_lock(&block_group->lock); in load_free_space_cache()
1003 if (block_group->disk_cache_state != BTRFS_DC_WRITTEN) { in load_free_space_cache()
1004 spin_unlock(&block_group->lock); in load_free_space_cache()
1008 spin_unlock(&block_group->lock); in load_free_space_cache()
1011 * Reinitialize the class of struct inode's mapping->invalidate_lock for in load_free_space_cache()
1015 lockdep_set_class(&(&inode->i_data)->invalidate_lock, in load_free_space_cache()
1018 ret = __load_free_space_cache(fs_info->tree_root, inode, &tmp_ctl, in load_free_space_cache()
1019 path, block_group->start); in load_free_space_cache()
1024 matched = (tmp_ctl.free_space == (block_group->length - used - in load_free_space_cache()
1025 block_group->bytes_super)); in load_free_space_cache()
1033 * so we need to re-set it here. in load_free_space_cache()
1047 block_group->start); in load_free_space_cache()
1048 ret = -1; in load_free_space_cache()
1053 spin_lock(&block_group->lock); in load_free_space_cache()
1054 block_group->disk_cache_state = BTRFS_DC_CLEAR; in load_free_space_cache()
1055 spin_unlock(&block_group->lock); in load_free_space_cache()
1060 block_group->start); in load_free_space_cache()
1063 spin_lock(&ctl->tree_lock); in load_free_space_cache()
1065 spin_unlock(&ctl->tree_lock); in load_free_space_cache()
1078 struct btrfs_free_cluster *cluster = NULL; in write_cache_extent_entries() local
1080 struct rb_node *node = rb_first(&ctl->free_space_offset); in write_cache_extent_entries()
1083 /* Get the cluster for this block_group if it exists */ in write_cache_extent_entries()
1084 if (block_group && !list_empty(&block_group->cluster_list)) { in write_cache_extent_entries()
1085 cluster = list_entry(block_group->cluster_list.next, in write_cache_extent_entries()
1090 if (!node && cluster) { in write_cache_extent_entries()
1091 cluster_locked = cluster; in write_cache_extent_entries()
1092 spin_lock(&cluster_locked->lock); in write_cache_extent_entries()
1093 node = rb_first(&cluster->root); in write_cache_extent_entries()
1094 cluster = NULL; in write_cache_extent_entries()
1104 ret = io_ctl_add_entry(io_ctl, e->offset, e->bytes, in write_cache_extent_entries()
1105 e->bitmap); in write_cache_extent_entries()
1109 if (e->bitmap) { in write_cache_extent_entries()
1110 list_add_tail(&e->list, bitmap_list); in write_cache_extent_entries()
1114 if (!node && cluster) { in write_cache_extent_entries()
1115 node = rb_first(&cluster->root); in write_cache_extent_entries()
1116 cluster_locked = cluster; in write_cache_extent_entries()
1117 spin_lock(&cluster_locked->lock); in write_cache_extent_entries()
1118 cluster = NULL; in write_cache_extent_entries()
1122 spin_unlock(&cluster_locked->lock); in write_cache_extent_entries()
1132 list_for_each_entry(trim_entry, &ctl->trimming_ranges, list) { in write_cache_extent_entries()
1133 ret = io_ctl_add_entry(io_ctl, trim_entry->start, in write_cache_extent_entries()
1134 trim_entry->bytes, NULL); in write_cache_extent_entries()
1143 spin_unlock(&cluster_locked->lock); in write_cache_extent_entries()
1144 return -ENOSPC; in write_cache_extent_entries()
1165 clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, inode->i_size - 1, in update_cache_item()
1169 leaf = path->nodes[0]; in update_cache_item()
1172 ASSERT(path->slots[0]); in update_cache_item()
1173 path->slots[0]--; in update_cache_item()
1174 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); in update_cache_item()
1177 clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, in update_cache_item()
1178 inode->i_size - 1, EXTENT_DELALLOC, in update_cache_item()
1185 BTRFS_I(inode)->generation = trans->transid; in update_cache_item()
1186 header = btrfs_item_ptr(leaf, path->slots[0], in update_cache_item()
1190 btrfs_set_free_space_generation(leaf, header, trans->transid); in update_cache_item()
1197 return -1; in update_cache_item()
1220 unpin = &trans->transaction->pinned_extents; in write_pinned_extent_entries()
1222 start = block_group->start; in write_pinned_extent_entries()
1224 while (start < block_group->start + block_group->length) { in write_pinned_extent_entries()
1231 if (extent_start >= block_group->start + block_group->length) in write_pinned_extent_entries()
1235 extent_end = min(block_group->start + block_group->length, in write_pinned_extent_entries()
1237 len = extent_end - extent_start; in write_pinned_extent_entries()
1242 return -ENOSPC; in write_pinned_extent_entries()
1258 ret = io_ctl_add_bitmap(io_ctl, entry->bitmap); in write_bitmap_entries()
1260 return -ENOSPC; in write_bitmap_entries()
1261 list_del_init(&entry->list); in write_bitmap_entries()
1271 ret = btrfs_wait_ordered_range(BTRFS_I(inode), 0, (u64)-1); in flush_dirty_cache()
1273 clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, inode->i_size - 1, in flush_dirty_cache()
1285 list_del_init(&entry->list); in cleanup_bitmap_list()
1294 unlock_extent(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1, in cleanup_write_cache_enospc()
1305 struct inode *inode = io_ctl->inode; in __btrfs_wait_cache_io()
1317 io_ctl->entries, io_ctl->bitmaps); in __btrfs_wait_cache_io()
1320 invalidate_inode_pages2(inode->i_mapping); in __btrfs_wait_cache_io()
1321 BTRFS_I(inode)->generation = 0; in __btrfs_wait_cache_io()
1323 btrfs_debug(root->fs_info, in __btrfs_wait_cache_io()
1325 block_group->start, ret); in __btrfs_wait_cache_io()
1331 spin_lock(&trans->transaction->dirty_bgs_lock); in __btrfs_wait_cache_io()
1334 spin_lock(&block_group->lock); in __btrfs_wait_cache_io()
1341 if (!ret && list_empty(&block_group->dirty_list)) in __btrfs_wait_cache_io()
1342 block_group->disk_cache_state = BTRFS_DC_WRITTEN; in __btrfs_wait_cache_io()
1344 block_group->disk_cache_state = BTRFS_DC_ERROR; in __btrfs_wait_cache_io()
1346 spin_unlock(&block_group->lock); in __btrfs_wait_cache_io()
1347 spin_unlock(&trans->transaction->dirty_bgs_lock); in __btrfs_wait_cache_io()
1348 io_ctl->inode = NULL; in __btrfs_wait_cache_io()
1360 return __btrfs_wait_cache_io(block_group->fs_info->tree_root, trans, in btrfs_wait_cache_io()
1361 block_group, &block_group->io_ctl, in btrfs_wait_cache_io()
1362 path, block_group->start); in btrfs_wait_cache_io()
1392 return -EIO; in __btrfs_write_out_cache()
1394 WARN_ON(io_ctl->pages); in __btrfs_write_out_cache()
1399 if (block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA)) { in __btrfs_write_out_cache()
1400 down_write(&block_group->data_rwsem); in __btrfs_write_out_cache()
1401 spin_lock(&block_group->lock); in __btrfs_write_out_cache()
1402 if (block_group->delalloc_bytes) { in __btrfs_write_out_cache()
1403 block_group->disk_cache_state = BTRFS_DC_WRITTEN; in __btrfs_write_out_cache()
1404 spin_unlock(&block_group->lock); in __btrfs_write_out_cache()
1405 up_write(&block_group->data_rwsem); in __btrfs_write_out_cache()
1406 BTRFS_I(inode)->generation = 0; in __btrfs_write_out_cache()
1411 spin_unlock(&block_group->lock); in __btrfs_write_out_cache()
1419 lock_extent(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1, in __btrfs_write_out_cache()
1422 io_ctl_set_generation(io_ctl, trans->transid); in __btrfs_write_out_cache()
1424 mutex_lock(&ctl->cache_writeout_mutex); in __btrfs_write_out_cache()
1426 spin_lock(&ctl->tree_lock); in __btrfs_write_out_cache()
1451 spin_unlock(&ctl->tree_lock); in __btrfs_write_out_cache()
1452 mutex_unlock(&ctl->cache_writeout_mutex); in __btrfs_write_out_cache()
1460 ret = btrfs_dirty_pages(BTRFS_I(inode), io_ctl->pages, in __btrfs_write_out_cache()
1461 io_ctl->num_pages, 0, i_size_read(inode), in __btrfs_write_out_cache()
1466 if (block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA)) in __btrfs_write_out_cache()
1467 up_write(&block_group->data_rwsem); in __btrfs_write_out_cache()
1475 unlock_extent(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1, in __btrfs_write_out_cache()
1483 io_ctl->entries = entries; in __btrfs_write_out_cache()
1484 io_ctl->bitmaps = bitmaps; in __btrfs_write_out_cache()
1486 ret = btrfs_fdatawrite_range(BTRFS_I(inode), 0, (u64)-1); in __btrfs_write_out_cache()
1494 spin_unlock(&ctl->tree_lock); in __btrfs_write_out_cache()
1495 mutex_unlock(&ctl->cache_writeout_mutex); in __btrfs_write_out_cache()
1501 if (block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA)) in __btrfs_write_out_cache()
1502 up_write(&block_group->data_rwsem); in __btrfs_write_out_cache()
1505 io_ctl->inode = NULL; in __btrfs_write_out_cache()
1508 invalidate_inode_pages2(inode->i_mapping); in __btrfs_write_out_cache()
1509 BTRFS_I(inode)->generation = 0; in __btrfs_write_out_cache()
1521 struct btrfs_fs_info *fs_info = trans->fs_info; in btrfs_write_out_cache()
1522 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; in btrfs_write_out_cache()
1526 spin_lock(&block_group->lock); in btrfs_write_out_cache()
1527 if (block_group->disk_cache_state < BTRFS_DC_SETUP) { in btrfs_write_out_cache()
1528 spin_unlock(&block_group->lock); in btrfs_write_out_cache()
1531 spin_unlock(&block_group->lock); in btrfs_write_out_cache()
1538 &block_group->io_ctl, trans); in btrfs_write_out_cache()
1542 block_group->start, ret); in btrfs_write_out_cache()
1543 spin_lock(&block_group->lock); in btrfs_write_out_cache()
1544 block_group->disk_cache_state = BTRFS_DC_ERROR; in btrfs_write_out_cache()
1545 spin_unlock(&block_group->lock); in btrfs_write_out_cache()
1547 block_group->io_ctl.inode = NULL; in btrfs_write_out_cache()
1563 offset -= bitmap_start; in offset_to_bit()
1578 bytes_per_bitmap = BITS_PER_BITMAP * ctl->unit; in offset_to_bitmap()
1579 bitmap_start = offset - ctl->start; in offset_to_bitmap()
1582 bitmap_start += ctl->start; in offset_to_bitmap()
1588 struct btrfs_free_cluster *cluster, in tree_insert_offset() argument
1595 lockdep_assert_held(&ctl->tree_lock); in tree_insert_offset()
1597 if (cluster) { in tree_insert_offset()
1598 lockdep_assert_held(&cluster->lock); in tree_insert_offset()
1599 root = &cluster->root; in tree_insert_offset()
1601 root = &ctl->free_space_offset; in tree_insert_offset()
1604 p = &root->rb_node; in tree_insert_offset()
1612 if (new_entry->offset < info->offset) { in tree_insert_offset()
1613 p = &(*p)->rb_left; in tree_insert_offset()
1614 } else if (new_entry->offset > info->offset) { in tree_insert_offset()
1615 p = &(*p)->rb_right; in tree_insert_offset()
1630 if (new_entry->bitmap) { in tree_insert_offset()
1631 if (info->bitmap) { in tree_insert_offset()
1633 return -EEXIST; in tree_insert_offset()
1635 p = &(*p)->rb_right; in tree_insert_offset()
1637 if (!info->bitmap) { in tree_insert_offset()
1639 return -EEXIST; in tree_insert_offset()
1641 p = &(*p)->rb_left; in tree_insert_offset()
1646 rb_link_node(&new_entry->offset_index, parent, p); in tree_insert_offset()
1647 rb_insert_color(&new_entry->offset_index, root); in tree_insert_offset()
1653 * This is a little subtle. We *only* have ->max_extent_size set if we actually
1654 * searched through the bitmap and figured out the largest ->max_extent_size,
1657 * we've found already if it's larger, or we want to use ->bytes.
1659 * This matters because find_free_space() will skip entries who's ->bytes is
1661 * may pick some previous entry that has a smaller ->max_extent_size than we
1663 * ->max_extent_size set to 4K and ->bytes set to 1M. A second entry hasn't set
1664 * ->max_extent_size yet, has ->bytes set to 8K and it's contiguous. We will
1666 * that first bitmap entry had ->max_extent_size set, but the second one did
1671 * don't have ->max_extent_size set. We'll return 16K, and the next time the
1678 if (entry->bitmap && entry->max_extent_size) in get_max_extent_size()
1679 return entry->max_extent_size; in get_max_extent_size()
1680 return entry->bytes; in get_max_extent_size()
1699 * fuzzy - If this is set, then we are trying to make an allocation, and we just
1707 struct rb_node *n = ctl->free_space_offset.rb_node; in tree_search_offset()
1710 lockdep_assert_held(&ctl->tree_lock); in tree_search_offset()
1717 if (offset < entry->offset) in tree_search_offset()
1718 n = n->rb_left; in tree_search_offset()
1719 else if (offset > entry->offset) in tree_search_offset()
1720 n = n->rb_right; in tree_search_offset()
1730 if (entry->bitmap) in tree_search_offset()
1741 if (entry->offset != offset) in tree_search_offset()
1744 WARN_ON(!entry->bitmap); in tree_search_offset()
1747 if (entry->bitmap) { in tree_search_offset()
1752 n = rb_prev(&entry->offset_index); in tree_search_offset()
1756 if (!prev->bitmap && in tree_search_offset()
1757 prev->offset + prev->bytes > offset) in tree_search_offset()
1769 if (entry->offset > offset) { in tree_search_offset()
1770 n = rb_prev(&entry->offset_index); in tree_search_offset()
1774 ASSERT(entry->offset <= offset); in tree_search_offset()
1783 if (entry->bitmap) { in tree_search_offset()
1784 n = rb_prev(&entry->offset_index); in tree_search_offset()
1788 if (!prev->bitmap && in tree_search_offset()
1789 prev->offset + prev->bytes > offset) in tree_search_offset()
1792 if (entry->offset + BITS_PER_BITMAP * ctl->unit > offset) in tree_search_offset()
1794 } else if (entry->offset + entry->bytes > offset) in tree_search_offset()
1801 n = rb_next(&entry->offset_index); in tree_search_offset()
1805 if (entry->bitmap) { in tree_search_offset()
1806 if (entry->offset + BITS_PER_BITMAP * in tree_search_offset()
1807 ctl->unit > offset) in tree_search_offset()
1810 if (entry->offset + entry->bytes > offset) in tree_search_offset()
1821 lockdep_assert_held(&ctl->tree_lock); in unlink_free_space()
1823 rb_erase(&info->offset_index, &ctl->free_space_offset); in unlink_free_space()
1824 rb_erase_cached(&info->bytes_index, &ctl->free_space_bytes); in unlink_free_space()
1825 ctl->free_extents--; in unlink_free_space()
1827 if (!info->bitmap && !btrfs_free_space_trimmed(info)) { in unlink_free_space()
1828 ctl->discardable_extents[BTRFS_STAT_CURR]--; in unlink_free_space()
1829 ctl->discardable_bytes[BTRFS_STAT_CURR] -= info->bytes; in unlink_free_space()
1833 ctl->free_space -= info->bytes; in unlink_free_space()
1841 lockdep_assert_held(&ctl->tree_lock); in link_free_space()
1843 ASSERT(info->bytes || info->bitmap); in link_free_space()
1848 rb_add_cached(&info->bytes_index, &ctl->free_space_bytes, entry_less); in link_free_space()
1850 if (!info->bitmap && !btrfs_free_space_trimmed(info)) { in link_free_space()
1851 ctl->discardable_extents[BTRFS_STAT_CURR]++; in link_free_space()
1852 ctl->discardable_bytes[BTRFS_STAT_CURR] += info->bytes; in link_free_space()
1855 ctl->free_space += info->bytes; in link_free_space()
1856 ctl->free_extents++; in link_free_space()
1863 ASSERT(info->bitmap); in relink_bitmap_entry()
1866 * If our entry is empty it's because we're on a cluster and we don't in relink_bitmap_entry()
1867 * want to re-link it into our ctl bytes index. in relink_bitmap_entry()
1869 if (RB_EMPTY_NODE(&info->bytes_index)) in relink_bitmap_entry()
1872 lockdep_assert_held(&ctl->tree_lock); in relink_bitmap_entry()
1874 rb_erase_cached(&info->bytes_index, &ctl->free_space_bytes); in relink_bitmap_entry()
1875 rb_add_cached(&info->bytes_index, &ctl->free_space_bytes, entry_less); in relink_bitmap_entry()
1883 int extent_delta = -1; in bitmap_clear_bits()
1885 start = offset_to_bit(info->offset, ctl->unit, offset); in bitmap_clear_bits()
1886 count = bytes_to_bits(bytes, ctl->unit); in bitmap_clear_bits()
1890 bitmap_clear(info->bitmap, start, count); in bitmap_clear_bits()
1892 info->bytes -= bytes; in bitmap_clear_bits()
1893 if (info->max_extent_size > ctl->unit) in bitmap_clear_bits()
1894 info->max_extent_size = 0; in bitmap_clear_bits()
1898 if (start && test_bit(start - 1, info->bitmap)) in bitmap_clear_bits()
1901 if (end < BITS_PER_BITMAP && test_bit(end, info->bitmap)) in bitmap_clear_bits()
1904 info->bitmap_extents += extent_delta; in bitmap_clear_bits()
1906 ctl->discardable_extents[BTRFS_STAT_CURR] += extent_delta; in bitmap_clear_bits()
1907 ctl->discardable_bytes[BTRFS_STAT_CURR] -= bytes; in bitmap_clear_bits()
1911 ctl->free_space -= bytes; in bitmap_clear_bits()
1921 start = offset_to_bit(info->offset, ctl->unit, offset); in btrfs_bitmap_set_bits()
1922 count = bytes_to_bits(bytes, ctl->unit); in btrfs_bitmap_set_bits()
1926 bitmap_set(info->bitmap, start, count); in btrfs_bitmap_set_bits()
1932 info->max_extent_size = 0; in btrfs_bitmap_set_bits()
1933 info->bytes += bytes; in btrfs_bitmap_set_bits()
1934 ctl->free_space += bytes; in btrfs_bitmap_set_bits()
1938 if (start && test_bit(start - 1, info->bitmap)) in btrfs_bitmap_set_bits()
1939 extent_delta--; in btrfs_bitmap_set_bits()
1941 if (end < BITS_PER_BITMAP && test_bit(end, info->bitmap)) in btrfs_bitmap_set_bits()
1942 extent_delta--; in btrfs_bitmap_set_bits()
1944 info->bitmap_extents += extent_delta; in btrfs_bitmap_set_bits()
1946 ctl->discardable_extents[BTRFS_STAT_CURR] += extent_delta; in btrfs_bitmap_set_bits()
1947 ctl->discardable_bytes[BTRFS_STAT_CURR] += bytes; in btrfs_bitmap_set_bits()
1970 bitmap_info->max_extent_size && in search_bitmap()
1971 bitmap_info->max_extent_size < *bytes) { in search_bitmap()
1972 *bytes = bitmap_info->max_extent_size; in search_bitmap()
1973 return -1; in search_bitmap()
1976 i = offset_to_bit(bitmap_info->offset, ctl->unit, in search_bitmap()
1977 max_t(u64, *offset, bitmap_info->offset)); in search_bitmap()
1978 bits = bytes_to_bits(*bytes, ctl->unit); in search_bitmap()
1980 for_each_set_bit_from(i, bitmap_info->bitmap, BITS_PER_BITMAP) { in search_bitmap()
1985 next_zero = find_next_zero_bit(bitmap_info->bitmap, in search_bitmap()
1987 extent_bits = next_zero - i; in search_bitmap()
1998 *offset = (u64)(i * ctl->unit) + bitmap_info->offset; in search_bitmap()
1999 *bytes = (u64)(found_bits) * ctl->unit; in search_bitmap()
2003 *bytes = (u64)(max_bits) * ctl->unit; in search_bitmap()
2004 bitmap_info->max_extent_size = *bytes; in search_bitmap()
2006 return -1; in search_bitmap()
2020 if (!ctl->free_space_offset.rb_node) in find_free_space()
2024 node = rb_first_cached(&ctl->free_space_bytes); in find_free_space()
2030 node = &entry->offset_index; in find_free_space()
2042 * If we are using the bytes index then all subsequent entries in find_free_space()
2046 * If we're using the offset index then we need to keep going in find_free_space()
2049 if (entry->bytes < *bytes) { in find_free_space()
2061 tmp = entry->offset - ctl->start + align - 1; in find_free_space()
2063 tmp = tmp * align + ctl->start; in find_free_space()
2064 align_off = tmp - entry->offset; in find_free_space()
2067 tmp = entry->offset; in find_free_space()
2071 * We don't break here if we're using the bytes index because we in find_free_space()
2077 if (entry->bytes < *bytes + align_off) { in find_free_space()
2083 if (entry->bitmap) { in find_free_space()
2099 * The bitmap may have gotten re-arranged in the space in find_free_space()
2100 * index here because the max_extent_size may have been in find_free_space()
2110 *bytes = entry->bytes - align_off; in find_free_space()
2120 info->offset = offset_to_bitmap(ctl, offset); in add_new_bitmap()
2121 info->bytes = 0; in add_new_bitmap()
2122 info->bitmap_extents = 0; in add_new_bitmap()
2123 INIT_LIST_HEAD(&info->list); in add_new_bitmap()
2125 ctl->total_bitmaps++; in add_new_bitmap()
2138 if (bitmap_info->bytes && !btrfs_free_space_trimmed(bitmap_info)) { in free_bitmap()
2139 ctl->discardable_extents[BTRFS_STAT_CURR] -= in free_bitmap()
2140 bitmap_info->bitmap_extents; in free_bitmap()
2141 ctl->discardable_bytes[BTRFS_STAT_CURR] -= bitmap_info->bytes; in free_bitmap()
2145 kmem_cache_free(btrfs_free_space_bitmap_cachep, bitmap_info->bitmap); in free_bitmap()
2147 ctl->total_bitmaps--; in free_bitmap()
2160 end = bitmap_info->offset + (u64)(BITS_PER_BITMAP * ctl->unit) - 1; in remove_from_bitmap()
2169 search_bytes = ctl->unit; in remove_from_bitmap()
2170 search_bytes = min(search_bytes, end - search_start + 1); in remove_from_bitmap()
2174 return -EINVAL; in remove_from_bitmap()
2180 search_bytes = min(search_bytes, end - search_start + 1); in remove_from_bitmap()
2184 *bytes -= search_bytes; in remove_from_bitmap()
2187 struct rb_node *next = rb_next(&bitmap_info->offset_index); in remove_from_bitmap()
2188 if (!bitmap_info->bytes) in remove_from_bitmap()
2196 return -EINVAL; in remove_from_bitmap()
2205 if (!bitmap_info->bitmap) in remove_from_bitmap()
2206 return -EAGAIN; in remove_from_bitmap()
2215 search_bytes = ctl->unit; in remove_from_bitmap()
2219 return -EAGAIN; in remove_from_bitmap()
2222 } else if (!bitmap_info->bytes) in remove_from_bitmap()
2241 ctl->discardable_extents[BTRFS_STAT_CURR] += in add_bytes_to_bitmap()
2242 info->bitmap_extents; in add_bytes_to_bitmap()
2243 ctl->discardable_bytes[BTRFS_STAT_CURR] += info->bytes; in add_bytes_to_bitmap()
2245 info->trim_state = BTRFS_TRIM_STATE_UNTRIMMED; in add_bytes_to_bitmap()
2248 end = info->offset + (u64)(BITS_PER_BITMAP * ctl->unit); in add_bytes_to_bitmap()
2250 bytes_to_set = min(end - offset, bytes); in add_bytes_to_bitmap()
2261 struct btrfs_block_group *block_group = ctl->block_group; in use_bitmap()
2262 struct btrfs_fs_info *fs_info = block_group->fs_info; in use_bitmap()
2271 if (!forced && info->bytes >= FORCE_EXTENT_THRESHOLD) in use_bitmap()
2278 if (!forced && ctl->free_extents < ctl->extents_thresh) { in use_bitmap()
2286 if (info->bytes <= fs_info->sectorsize * 8) { in use_bitmap()
2287 if (ctl->free_extents * 3 <= ctl->extents_thresh) in use_bitmap()
2302 if (((BITS_PER_BITMAP * ctl->unit) >> 1) > block_group->length) in use_bitmap()
2322 bytes = info->bytes; in insert_into_bitmap()
2323 offset = info->offset; in insert_into_bitmap()
2324 trim_state = info->trim_state; in insert_into_bitmap()
2326 if (!ctl->op->use_bitmap(ctl, info)) in insert_into_bitmap()
2329 if (ctl->op == &free_space_op) in insert_into_bitmap()
2330 block_group = ctl->block_group; in insert_into_bitmap()
2333 * Since we link bitmaps right into the cluster we need to see if we in insert_into_bitmap()
2334 * have a cluster here, and if so and it has our bitmap we need to add in insert_into_bitmap()
2337 if (block_group && !list_empty(&block_group->cluster_list)) { in insert_into_bitmap()
2338 struct btrfs_free_cluster *cluster; in insert_into_bitmap() local
2342 cluster = list_entry(block_group->cluster_list.next, in insert_into_bitmap()
2345 spin_lock(&cluster->lock); in insert_into_bitmap()
2346 node = rb_first(&cluster->root); in insert_into_bitmap()
2348 spin_unlock(&cluster->lock); in insert_into_bitmap()
2353 if (!entry->bitmap) { in insert_into_bitmap()
2354 spin_unlock(&cluster->lock); in insert_into_bitmap()
2358 if (entry->offset == offset_to_bitmap(ctl, offset)) { in insert_into_bitmap()
2361 bytes -= bytes_added; in insert_into_bitmap()
2364 spin_unlock(&cluster->lock); in insert_into_bitmap()
2381 bytes -= bytes_added; in insert_into_bitmap()
2392 if (info && info->bitmap) { in insert_into_bitmap()
2398 spin_unlock(&ctl->tree_lock); in insert_into_bitmap()
2400 /* no pre-allocated info, allocate a new one */ in insert_into_bitmap()
2405 spin_lock(&ctl->tree_lock); in insert_into_bitmap()
2406 ret = -ENOMEM; in insert_into_bitmap()
2412 info->bitmap = kmem_cache_zalloc(btrfs_free_space_bitmap_cachep, in insert_into_bitmap()
2414 info->trim_state = BTRFS_TRIM_STATE_TRIMMED; in insert_into_bitmap()
2415 spin_lock(&ctl->tree_lock); in insert_into_bitmap()
2416 if (!info->bitmap) { in insert_into_bitmap()
2417 ret = -ENOMEM; in insert_into_bitmap()
2425 if (info->bitmap) in insert_into_bitmap()
2427 info->bitmap); in insert_into_bitmap()
2456 u64 offset = info->offset; in try_merge_free_space()
2457 u64 bytes = info->bytes; in try_merge_free_space()
2468 right_prev = rb_prev(&right_info->offset_index); in try_merge_free_space()
2473 left_info = tree_search_offset(ctl, offset - 1, 0, 0); in try_merge_free_space()
2476 if (right_info && !right_info->bitmap && in try_merge_free_space()
2479 info->bytes += right_info->bytes; in try_merge_free_space()
2485 if (left_info && !left_info->bitmap && in try_merge_free_space()
2486 left_info->offset + left_info->bytes == offset && in try_merge_free_space()
2489 info->offset = left_info->offset; in try_merge_free_space()
2490 info->bytes += left_info->bytes; in try_merge_free_space()
2505 const u64 end = info->offset + info->bytes; in steal_from_bitmap_to_end()
2513 i = offset_to_bit(bitmap->offset, ctl->unit, end); in steal_from_bitmap_to_end()
2514 j = find_next_zero_bit(bitmap->bitmap, BITS_PER_BITMAP, i); in steal_from_bitmap_to_end()
2517 bytes = (j - i) * ctl->unit; in steal_from_bitmap_to_end()
2518 info->bytes += bytes; in steal_from_bitmap_to_end()
2522 info->trim_state = BTRFS_TRIM_STATE_UNTRIMMED; in steal_from_bitmap_to_end()
2526 if (!bitmap->bytes) in steal_from_bitmap_to_end()
2543 bitmap_offset = offset_to_bitmap(ctl, info->offset); in steal_from_bitmap_to_front()
2545 if (bitmap_offset == info->offset) { in steal_from_bitmap_to_front()
2546 if (info->offset == 0) in steal_from_bitmap_to_front()
2548 bitmap_offset = offset_to_bitmap(ctl, info->offset - 1); in steal_from_bitmap_to_front()
2555 i = offset_to_bit(bitmap->offset, ctl->unit, info->offset) - 1; in steal_from_bitmap_to_front()
2557 prev_j = (unsigned long)-1; in steal_from_bitmap_to_front()
2558 for_each_clear_bit_from(j, bitmap->bitmap, BITS_PER_BITMAP) { in steal_from_bitmap_to_front()
2566 if (prev_j == (unsigned long)-1) in steal_from_bitmap_to_front()
2567 bytes = (i + 1) * ctl->unit; in steal_from_bitmap_to_front()
2569 bytes = (i - prev_j) * ctl->unit; in steal_from_bitmap_to_front()
2571 info->offset -= bytes; in steal_from_bitmap_to_front()
2572 info->bytes += bytes; in steal_from_bitmap_to_front()
2576 info->trim_state = BTRFS_TRIM_STATE_UNTRIMMED; in steal_from_bitmap_to_front()
2578 bitmap_clear_bits(ctl, bitmap, info->offset, bytes, update_stat); in steal_from_bitmap_to_front()
2580 if (!bitmap->bytes) in steal_from_bitmap_to_front()
2588 * non-clustered allocation requests. So when attempting to add a new extent
2593 * on 2 or more entries - even if the entries represent a contiguous free space
2605 ASSERT(!info->bitmap); in steal_from_bitmap()
2606 ASSERT(RB_EMPTY_NODE(&info->offset_index)); in steal_from_bitmap()
2608 if (ctl->total_bitmaps > 0) { in steal_from_bitmap()
2613 if (ctl->total_bitmaps > 0) in steal_from_bitmap()
2626 struct btrfs_fs_info *fs_info = block_group->fs_info; in __btrfs_add_free_space()
2627 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; in __btrfs_add_free_space()
2636 return -ENOMEM; in __btrfs_add_free_space()
2638 info->offset = offset; in __btrfs_add_free_space()
2639 info->bytes = bytes; in __btrfs_add_free_space()
2640 info->trim_state = trim_state; in __btrfs_add_free_space()
2641 RB_CLEAR_NODE(&info->offset_index); in __btrfs_add_free_space()
2642 RB_CLEAR_NODE(&info->bytes_index); in __btrfs_add_free_space()
2644 spin_lock(&ctl->tree_lock); in __btrfs_add_free_space()
2664 * going to add the new free space to existing bitmap entries - because in __btrfs_add_free_space()
2670 filter_bytes = max(filter_bytes, info->bytes); in __btrfs_add_free_space()
2677 spin_unlock(&ctl->tree_lock); in __btrfs_add_free_space()
2681 ASSERT(ret != -EEXIST); in __btrfs_add_free_space()
2686 btrfs_discard_queue_work(&fs_info->discard_ctl, block_group); in __btrfs_add_free_space()
2695 struct btrfs_space_info *sinfo = block_group->space_info; in __btrfs_add_free_space_zoned()
2696 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; in __btrfs_add_free_space_zoned()
2697 u64 offset = bytenr - block_group->start; in __btrfs_add_free_space_zoned()
2703 spin_lock(&block_group->lock); in __btrfs_add_free_space_zoned()
2705 initial = ((size == block_group->length) && (block_group->alloc_offset == 0)); in __btrfs_add_free_space_zoned()
2706 WARN_ON(!initial && offset + size > block_group->zone_capacity); in __btrfs_add_free_space_zoned()
2708 bg_reclaim_threshold = READ_ONCE(sinfo->bg_reclaim_threshold); in __btrfs_add_free_space_zoned()
2713 to_free = block_group->zone_capacity; in __btrfs_add_free_space_zoned()
2714 else if (offset >= block_group->alloc_offset) in __btrfs_add_free_space_zoned()
2716 else if (offset + size <= block_group->alloc_offset) in __btrfs_add_free_space_zoned()
2719 to_free = offset + size - block_group->alloc_offset; in __btrfs_add_free_space_zoned()
2720 to_unusable = size - to_free; in __btrfs_add_free_space_zoned()
2722 spin_lock(&ctl->tree_lock); in __btrfs_add_free_space_zoned()
2723 ctl->free_space += to_free; in __btrfs_add_free_space_zoned()
2724 spin_unlock(&ctl->tree_lock); in __btrfs_add_free_space_zoned()
2726 * If the block group is read-only, we should account freed space into in __btrfs_add_free_space_zoned()
2729 if (!block_group->ro) { in __btrfs_add_free_space_zoned()
2730 block_group->zone_unusable += to_unusable; in __btrfs_add_free_space_zoned()
2731 WARN_ON(block_group->zone_unusable > block_group->length); in __btrfs_add_free_space_zoned()
2734 block_group->alloc_offset -= size; in __btrfs_add_free_space_zoned()
2737 reclaimable_unusable = block_group->zone_unusable - in __btrfs_add_free_space_zoned()
2738 (block_group->length - block_group->zone_capacity); in __btrfs_add_free_space_zoned()
2740 if (block_group->zone_unusable == block_group->length) { in __btrfs_add_free_space_zoned()
2744 mult_perc(block_group->zone_capacity, bg_reclaim_threshold)) { in __btrfs_add_free_space_zoned()
2748 spin_unlock(&block_group->lock); in __btrfs_add_free_space_zoned()
2758 if (btrfs_is_zoned(block_group->fs_info)) in btrfs_add_free_space()
2762 if (btrfs_test_opt(block_group->fs_info, DISCARD_SYNC)) in btrfs_add_free_space()
2771 if (btrfs_is_zoned(block_group->fs_info)) in btrfs_add_free_space_unused()
2788 if (btrfs_is_zoned(block_group->fs_info)) in btrfs_add_free_space_async_trimmed()
2792 if (btrfs_test_opt(block_group->fs_info, DISCARD_SYNC) || in btrfs_add_free_space_async_trimmed()
2793 btrfs_test_opt(block_group->fs_info, DISCARD_ASYNC)) in btrfs_add_free_space_async_trimmed()
2802 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; in btrfs_remove_free_space()
2807 if (btrfs_is_zoned(block_group->fs_info)) { in btrfs_remove_free_space()
2810 * Since the allocation info of tree-log nodes are not recorded in btrfs_remove_free_space()
2811 * to the extent-tree, calculate_alloc_pointer() failed to in btrfs_remove_free_space()
2817 * Advance the pointer not to overwrite the tree-log nodes. in btrfs_remove_free_space()
2819 if (block_group->start + block_group->alloc_offset < in btrfs_remove_free_space()
2821 block_group->alloc_offset = in btrfs_remove_free_space()
2822 offset + bytes - block_group->start; in btrfs_remove_free_space()
2827 spin_lock(&ctl->tree_lock); in btrfs_remove_free_space()
2854 if (!info->bitmap) { in btrfs_remove_free_space()
2856 if (offset == info->offset) { in btrfs_remove_free_space()
2857 u64 to_free = min(bytes, info->bytes); in btrfs_remove_free_space()
2859 info->bytes -= to_free; in btrfs_remove_free_space()
2860 info->offset += to_free; in btrfs_remove_free_space()
2861 if (info->bytes) { in btrfs_remove_free_space()
2869 bytes -= to_free; in btrfs_remove_free_space()
2872 u64 old_end = info->bytes + info->offset; in btrfs_remove_free_space()
2874 info->bytes = offset - info->offset; in btrfs_remove_free_space()
2882 bytes -= old_end - offset; in btrfs_remove_free_space()
2889 spin_unlock(&ctl->tree_lock); in btrfs_remove_free_space()
2893 old_end - (offset + bytes), in btrfs_remove_free_space()
2894 info->trim_state); in btrfs_remove_free_space()
2901 if (ret == -EAGAIN) { in btrfs_remove_free_space()
2907 spin_unlock(&ctl->tree_lock); in btrfs_remove_free_space()
2915 struct btrfs_fs_info *fs_info = block_group->fs_info; in btrfs_dump_free_space()
2916 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; in btrfs_dump_free_space()
2922 * Zoned btrfs does not use free space tree and cluster. Just print in btrfs_dump_free_space()
2927 block_group->zone_capacity - block_group->alloc_offset, in btrfs_dump_free_space()
2929 &block_group->runtime_flags)); in btrfs_dump_free_space()
2933 spin_lock(&ctl->tree_lock); in btrfs_dump_free_space()
2934 for (n = rb_first(&ctl->free_space_offset); n; n = rb_next(n)) { in btrfs_dump_free_space()
2936 if (info->bytes >= bytes && !block_group->ro) in btrfs_dump_free_space()
2939 info->offset, info->bytes, in btrfs_dump_free_space()
2940 (info->bitmap) ? "yes" : "no"); in btrfs_dump_free_space()
2942 spin_unlock(&ctl->tree_lock); in btrfs_dump_free_space()
2943 btrfs_info(fs_info, "block group has cluster?: %s", in btrfs_dump_free_space()
2944 list_empty(&block_group->cluster_list) ? "no" : "yes"); in btrfs_dump_free_space()
2953 struct btrfs_fs_info *fs_info = block_group->fs_info; in btrfs_init_free_space_ctl()
2955 spin_lock_init(&ctl->tree_lock); in btrfs_init_free_space_ctl()
2956 ctl->unit = fs_info->sectorsize; in btrfs_init_free_space_ctl()
2957 ctl->start = block_group->start; in btrfs_init_free_space_ctl()
2958 ctl->block_group = block_group; in btrfs_init_free_space_ctl()
2959 ctl->op = &free_space_op; in btrfs_init_free_space_ctl()
2960 ctl->free_space_bytes = RB_ROOT_CACHED; in btrfs_init_free_space_ctl()
2961 INIT_LIST_HEAD(&ctl->trimming_ranges); in btrfs_init_free_space_ctl()
2962 mutex_init(&ctl->cache_writeout_mutex); in btrfs_init_free_space_ctl()
2969 ctl->extents_thresh = (SZ_32K / 2) / sizeof(struct btrfs_free_space); in btrfs_init_free_space_ctl()
2973 * for a given cluster, put all of its extents back into the free
2975 * pointed to by the cluster, someone else raced in and freed the
2976 * cluster already. In that case, we just return without changing anything
2980 struct btrfs_free_cluster *cluster) in __btrfs_return_cluster_to_free_space() argument
2982 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; in __btrfs_return_cluster_to_free_space()
2985 lockdep_assert_held(&ctl->tree_lock); in __btrfs_return_cluster_to_free_space()
2987 spin_lock(&cluster->lock); in __btrfs_return_cluster_to_free_space()
2988 if (cluster->block_group != block_group) { in __btrfs_return_cluster_to_free_space()
2989 spin_unlock(&cluster->lock); in __btrfs_return_cluster_to_free_space()
2993 cluster->block_group = NULL; in __btrfs_return_cluster_to_free_space()
2994 cluster->window_start = 0; in __btrfs_return_cluster_to_free_space()
2995 list_del_init(&cluster->block_group_list); in __btrfs_return_cluster_to_free_space()
2997 node = rb_first(&cluster->root); in __btrfs_return_cluster_to_free_space()
3002 node = rb_next(&entry->offset_index); in __btrfs_return_cluster_to_free_space()
3003 rb_erase(&entry->offset_index, &cluster->root); in __btrfs_return_cluster_to_free_space()
3004 RB_CLEAR_NODE(&entry->offset_index); in __btrfs_return_cluster_to_free_space()
3006 if (!entry->bitmap) { in __btrfs_return_cluster_to_free_space()
3009 ctl->discardable_extents[BTRFS_STAT_CURR]--; in __btrfs_return_cluster_to_free_space()
3010 ctl->discardable_bytes[BTRFS_STAT_CURR] -= in __btrfs_return_cluster_to_free_space()
3011 entry->bytes; in __btrfs_return_cluster_to_free_space()
3019 ctl->discardable_extents[BTRFS_STAT_CURR]++; in __btrfs_return_cluster_to_free_space()
3020 ctl->discardable_bytes[BTRFS_STAT_CURR] += in __btrfs_return_cluster_to_free_space()
3021 entry->bytes; in __btrfs_return_cluster_to_free_space()
3025 rb_add_cached(&entry->bytes_index, &ctl->free_space_bytes, in __btrfs_return_cluster_to_free_space()
3028 cluster->root = RB_ROOT; in __btrfs_return_cluster_to_free_space()
3029 spin_unlock(&cluster->lock); in __btrfs_return_cluster_to_free_space()
3035 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; in btrfs_remove_free_space_cache()
3036 struct btrfs_free_cluster *cluster; in btrfs_remove_free_space_cache() local
3039 spin_lock(&ctl->tree_lock); in btrfs_remove_free_space_cache()
3040 while ((head = block_group->cluster_list.next) != in btrfs_remove_free_space_cache()
3041 &block_group->cluster_list) { in btrfs_remove_free_space_cache()
3042 cluster = list_entry(head, struct btrfs_free_cluster, in btrfs_remove_free_space_cache()
3045 WARN_ON(cluster->block_group != block_group); in btrfs_remove_free_space_cache()
3046 __btrfs_return_cluster_to_free_space(block_group, cluster); in btrfs_remove_free_space_cache()
3048 cond_resched_lock(&ctl->tree_lock); in btrfs_remove_free_space_cache()
3052 spin_unlock(&ctl->tree_lock); in btrfs_remove_free_space_cache()
3061 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; in btrfs_is_free_space_trimmed()
3066 spin_lock(&ctl->tree_lock); in btrfs_is_free_space_trimmed()
3067 node = rb_first(&ctl->free_space_offset); in btrfs_is_free_space_trimmed()
3080 spin_unlock(&ctl->tree_lock); in btrfs_is_free_space_trimmed()
3088 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; in btrfs_find_space_for_alloc()
3090 &block_group->fs_info->discard_ctl; in btrfs_find_space_for_alloc()
3097 bool use_bytes_index = (offset == block_group->start); in btrfs_find_space_for_alloc()
3099 ASSERT(!btrfs_is_zoned(block_group->fs_info)); in btrfs_find_space_for_alloc()
3101 spin_lock(&ctl->tree_lock); in btrfs_find_space_for_alloc()
3103 block_group->full_stripe_len, max_extent_size, in btrfs_find_space_for_alloc()
3109 if (entry->bitmap) { in btrfs_find_space_for_alloc()
3113 atomic64_add(bytes, &discard_ctl->discard_bytes_saved); in btrfs_find_space_for_alloc()
3115 if (!entry->bytes) in btrfs_find_space_for_alloc()
3119 align_gap_len = offset - entry->offset; in btrfs_find_space_for_alloc()
3120 align_gap = entry->offset; in btrfs_find_space_for_alloc()
3121 align_gap_trim_state = entry->trim_state; in btrfs_find_space_for_alloc()
3124 atomic64_add(bytes, &discard_ctl->discard_bytes_saved); in btrfs_find_space_for_alloc()
3126 entry->offset = offset + bytes; in btrfs_find_space_for_alloc()
3127 WARN_ON(entry->bytes < bytes + align_gap_len); in btrfs_find_space_for_alloc()
3129 entry->bytes -= bytes + align_gap_len; in btrfs_find_space_for_alloc()
3130 if (!entry->bytes) in btrfs_find_space_for_alloc()
3137 spin_unlock(&ctl->tree_lock); in btrfs_find_space_for_alloc()
3146 * given a cluster, put all of its extents back into the free space
3148 * a cluster that belongs to the passed block group.
3151 * cluster and remove the cluster from it.
3155 struct btrfs_free_cluster *cluster) in btrfs_return_cluster_to_free_space() argument
3160 spin_lock(&cluster->lock); in btrfs_return_cluster_to_free_space()
3162 block_group = cluster->block_group; in btrfs_return_cluster_to_free_space()
3164 spin_unlock(&cluster->lock); in btrfs_return_cluster_to_free_space()
3167 } else if (cluster->block_group != block_group) { in btrfs_return_cluster_to_free_space()
3169 spin_unlock(&cluster->lock); in btrfs_return_cluster_to_free_space()
3173 spin_unlock(&cluster->lock); in btrfs_return_cluster_to_free_space()
3175 ctl = block_group->free_space_ctl; in btrfs_return_cluster_to_free_space()
3177 /* now return any extents the cluster had on it */ in btrfs_return_cluster_to_free_space()
3178 spin_lock(&ctl->tree_lock); in btrfs_return_cluster_to_free_space()
3179 __btrfs_return_cluster_to_free_space(block_group, cluster); in btrfs_return_cluster_to_free_space()
3180 spin_unlock(&ctl->tree_lock); in btrfs_return_cluster_to_free_space()
3182 btrfs_discard_queue_work(&block_group->fs_info->discard_ctl, block_group); in btrfs_return_cluster_to_free_space()
3189 struct btrfs_free_cluster *cluster, in btrfs_alloc_from_bitmap() argument
3194 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; in btrfs_alloc_from_bitmap()
3196 u64 search_start = cluster->window_start; in btrfs_alloc_from_bitmap()
3217 * given a cluster, try to allocate 'bytes' from it, returns 0
3222 struct btrfs_free_cluster *cluster, u64 bytes, in btrfs_alloc_from_cluster() argument
3225 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; in btrfs_alloc_from_cluster()
3227 &block_group->fs_info->discard_ctl; in btrfs_alloc_from_cluster()
3232 ASSERT(!btrfs_is_zoned(block_group->fs_info)); in btrfs_alloc_from_cluster()
3234 spin_lock(&cluster->lock); in btrfs_alloc_from_cluster()
3235 if (bytes > cluster->max_size) in btrfs_alloc_from_cluster()
3238 if (cluster->block_group != block_group) in btrfs_alloc_from_cluster()
3241 node = rb_first(&cluster->root); in btrfs_alloc_from_cluster()
3247 if (entry->bytes < bytes) in btrfs_alloc_from_cluster()
3251 if (entry->bytes < bytes || in btrfs_alloc_from_cluster()
3252 (!entry->bitmap && entry->offset < min_start)) { in btrfs_alloc_from_cluster()
3253 node = rb_next(&entry->offset_index); in btrfs_alloc_from_cluster()
3261 if (entry->bitmap) { in btrfs_alloc_from_cluster()
3263 cluster, entry, bytes, in btrfs_alloc_from_cluster()
3264 cluster->window_start, in btrfs_alloc_from_cluster()
3267 node = rb_next(&entry->offset_index); in btrfs_alloc_from_cluster()
3274 cluster->window_start += bytes; in btrfs_alloc_from_cluster()
3276 ret = entry->offset; in btrfs_alloc_from_cluster()
3278 entry->offset += bytes; in btrfs_alloc_from_cluster()
3279 entry->bytes -= bytes; in btrfs_alloc_from_cluster()
3285 spin_unlock(&cluster->lock); in btrfs_alloc_from_cluster()
3290 spin_lock(&ctl->tree_lock); in btrfs_alloc_from_cluster()
3293 atomic64_add(bytes, &discard_ctl->discard_bytes_saved); in btrfs_alloc_from_cluster()
3295 ctl->free_space -= bytes; in btrfs_alloc_from_cluster()
3296 if (!entry->bitmap && !btrfs_free_space_trimmed(entry)) in btrfs_alloc_from_cluster()
3297 ctl->discardable_bytes[BTRFS_STAT_CURR] -= bytes; in btrfs_alloc_from_cluster()
3299 spin_lock(&cluster->lock); in btrfs_alloc_from_cluster()
3300 if (entry->bytes == 0) { in btrfs_alloc_from_cluster()
3301 rb_erase(&entry->offset_index, &cluster->root); in btrfs_alloc_from_cluster()
3302 ctl->free_extents--; in btrfs_alloc_from_cluster()
3303 if (entry->bitmap) { in btrfs_alloc_from_cluster()
3305 entry->bitmap); in btrfs_alloc_from_cluster()
3306 ctl->total_bitmaps--; in btrfs_alloc_from_cluster()
3309 ctl->discardable_extents[BTRFS_STAT_CURR]--; in btrfs_alloc_from_cluster()
3314 spin_unlock(&cluster->lock); in btrfs_alloc_from_cluster()
3315 spin_unlock(&ctl->tree_lock); in btrfs_alloc_from_cluster()
3322 struct btrfs_free_cluster *cluster, in btrfs_bitmap_cluster() argument
3326 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; in btrfs_bitmap_cluster()
3337 lockdep_assert_held(&ctl->tree_lock); in btrfs_bitmap_cluster()
3339 i = offset_to_bit(entry->offset, ctl->unit, in btrfs_bitmap_cluster()
3340 max_t(u64, offset, entry->offset)); in btrfs_bitmap_cluster()
3341 want_bits = bytes_to_bits(bytes, ctl->unit); in btrfs_bitmap_cluster()
3342 min_bits = bytes_to_bits(min_bytes, ctl->unit); in btrfs_bitmap_cluster()
3345 * Don't bother looking for a cluster in this bitmap if it's heavily in btrfs_bitmap_cluster()
3348 if (entry->max_extent_size && in btrfs_bitmap_cluster()
3349 entry->max_extent_size < cont1_bytes) in btrfs_bitmap_cluster()
3350 return -ENOSPC; in btrfs_bitmap_cluster()
3353 for_each_set_bit_from(i, entry->bitmap, BITS_PER_BITMAP) { in btrfs_bitmap_cluster()
3354 next_zero = find_next_zero_bit(entry->bitmap, in btrfs_bitmap_cluster()
3356 if (next_zero - i >= min_bits) { in btrfs_bitmap_cluster()
3357 found_bits = next_zero - i; in btrfs_bitmap_cluster()
3362 if (next_zero - i > max_bits) in btrfs_bitmap_cluster()
3363 max_bits = next_zero - i; in btrfs_bitmap_cluster()
3368 entry->max_extent_size = (u64)max_bits * ctl->unit; in btrfs_bitmap_cluster()
3369 return -ENOSPC; in btrfs_bitmap_cluster()
3374 cluster->max_size = 0; in btrfs_bitmap_cluster()
3379 if (cluster->max_size < found_bits * ctl->unit) in btrfs_bitmap_cluster()
3380 cluster->max_size = found_bits * ctl->unit; in btrfs_bitmap_cluster()
3382 if (total_found < want_bits || cluster->max_size < cont1_bytes) { in btrfs_bitmap_cluster()
3387 cluster->window_start = start * ctl->unit + entry->offset; in btrfs_bitmap_cluster()
3388 rb_erase(&entry->offset_index, &ctl->free_space_offset); in btrfs_bitmap_cluster()
3389 rb_erase_cached(&entry->bytes_index, &ctl->free_space_bytes); in btrfs_bitmap_cluster()
3392 * We need to know if we're currently on the normal space index when we in btrfs_bitmap_cluster()
3393 * manipulate the bitmap so that we know we need to remove and re-insert in btrfs_bitmap_cluster()
3398 RB_CLEAR_NODE(&entry->bytes_index); in btrfs_bitmap_cluster()
3400 ret = tree_insert_offset(ctl, cluster, entry); in btrfs_bitmap_cluster()
3401 ASSERT(!ret); /* -EEXIST; Logic error */ in btrfs_bitmap_cluster()
3403 trace_btrfs_setup_cluster(block_group, cluster, in btrfs_bitmap_cluster()
3404 total_found * ctl->unit, 1); in btrfs_bitmap_cluster()
3409 * This searches the block group for just extents to fill the cluster with.
3410 * Try to find a cluster with at least bytes total bytes, at least one
3415 struct btrfs_free_cluster *cluster, in setup_cluster_no_bitmap() argument
3419 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; in setup_cluster_no_bitmap()
3428 lockdep_assert_held(&ctl->tree_lock); in setup_cluster_no_bitmap()
3432 return -ENOSPC; in setup_cluster_no_bitmap()
3438 while (entry->bitmap || entry->bytes < min_bytes) { in setup_cluster_no_bitmap()
3439 if (entry->bitmap && list_empty(&entry->list)) in setup_cluster_no_bitmap()
3440 list_add_tail(&entry->list, bitmaps); in setup_cluster_no_bitmap()
3441 node = rb_next(&entry->offset_index); in setup_cluster_no_bitmap()
3443 return -ENOSPC; in setup_cluster_no_bitmap()
3447 window_free = entry->bytes; in setup_cluster_no_bitmap()
3448 max_extent = entry->bytes; in setup_cluster_no_bitmap()
3452 for (node = rb_next(&entry->offset_index); node; in setup_cluster_no_bitmap()
3453 node = rb_next(&entry->offset_index)) { in setup_cluster_no_bitmap()
3456 if (entry->bitmap) { in setup_cluster_no_bitmap()
3457 if (list_empty(&entry->list)) in setup_cluster_no_bitmap()
3458 list_add_tail(&entry->list, bitmaps); in setup_cluster_no_bitmap()
3462 if (entry->bytes < min_bytes) in setup_cluster_no_bitmap()
3466 window_free += entry->bytes; in setup_cluster_no_bitmap()
3467 if (entry->bytes > max_extent) in setup_cluster_no_bitmap()
3468 max_extent = entry->bytes; in setup_cluster_no_bitmap()
3472 return -ENOSPC; in setup_cluster_no_bitmap()
3474 cluster->window_start = first->offset; in setup_cluster_no_bitmap()
3476 node = &first->offset_index; in setup_cluster_no_bitmap()
3480 * cache and put them into the cluster rbtree in setup_cluster_no_bitmap()
3486 node = rb_next(&entry->offset_index); in setup_cluster_no_bitmap()
3487 if (entry->bitmap || entry->bytes < min_bytes) in setup_cluster_no_bitmap()
3490 rb_erase(&entry->offset_index, &ctl->free_space_offset); in setup_cluster_no_bitmap()
3491 rb_erase_cached(&entry->bytes_index, &ctl->free_space_bytes); in setup_cluster_no_bitmap()
3492 ret = tree_insert_offset(ctl, cluster, entry); in setup_cluster_no_bitmap()
3493 total_size += entry->bytes; in setup_cluster_no_bitmap()
3494 ASSERT(!ret); /* -EEXIST; Logic error */ in setup_cluster_no_bitmap()
3497 cluster->max_size = max_extent; in setup_cluster_no_bitmap()
3498 trace_btrfs_setup_cluster(block_group, cluster, total_size, 0); in setup_cluster_no_bitmap()
3503 * This specifically looks for bitmaps that may work in the cluster, we assume
3508 struct btrfs_free_cluster *cluster, in setup_cluster_bitmap() argument
3512 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; in setup_cluster_bitmap()
3514 int ret = -ENOSPC; in setup_cluster_bitmap()
3517 if (ctl->total_bitmaps == 0) in setup_cluster_bitmap()
3518 return -ENOSPC; in setup_cluster_bitmap()
3527 if (!entry || entry->offset != bitmap_offset) { in setup_cluster_bitmap()
3529 if (entry && list_empty(&entry->list)) in setup_cluster_bitmap()
3530 list_add(&entry->list, bitmaps); in setup_cluster_bitmap()
3534 if (entry->bytes < bytes) in setup_cluster_bitmap()
3536 ret = btrfs_bitmap_cluster(block_group, entry, cluster, offset, in setup_cluster_bitmap()
3546 return -ENOSPC; in setup_cluster_bitmap()
3550 * here we try to find a cluster of blocks in a block group. The goal
3554 * returns zero and sets up cluster if things worked out, otherwise
3555 * it returns -enospc
3558 struct btrfs_free_cluster *cluster, in btrfs_find_space_cluster() argument
3561 struct btrfs_fs_info *fs_info = block_group->fs_info; in btrfs_find_space_cluster()
3562 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; in btrfs_find_space_cluster()
3571 * cluster. For SSD_SPREAD, don't allow any fragmentation. in btrfs_find_space_cluster()
3578 } else if (block_group->flags & BTRFS_BLOCK_GROUP_METADATA) { in btrfs_find_space_cluster()
3580 min_bytes = fs_info->sectorsize; in btrfs_find_space_cluster()
3583 min_bytes = fs_info->sectorsize; in btrfs_find_space_cluster()
3586 spin_lock(&ctl->tree_lock); in btrfs_find_space_cluster()
3589 * If we know we don't have enough space to make a cluster don't even in btrfs_find_space_cluster()
3592 if (ctl->free_space < bytes) { in btrfs_find_space_cluster()
3593 spin_unlock(&ctl->tree_lock); in btrfs_find_space_cluster()
3594 return -ENOSPC; in btrfs_find_space_cluster()
3597 spin_lock(&cluster->lock); in btrfs_find_space_cluster()
3599 /* someone already found a cluster, hooray */ in btrfs_find_space_cluster()
3600 if (cluster->block_group) { in btrfs_find_space_cluster()
3608 ret = setup_cluster_no_bitmap(block_group, cluster, &bitmaps, offset, in btrfs_find_space_cluster()
3612 ret = setup_cluster_bitmap(block_group, cluster, &bitmaps, in btrfs_find_space_cluster()
3618 list_del_init(&entry->list); in btrfs_find_space_cluster()
3622 list_add_tail(&cluster->block_group_list, in btrfs_find_space_cluster()
3623 &block_group->cluster_list); in btrfs_find_space_cluster()
3624 cluster->block_group = block_group; in btrfs_find_space_cluster()
3629 spin_unlock(&cluster->lock); in btrfs_find_space_cluster()
3630 spin_unlock(&ctl->tree_lock); in btrfs_find_space_cluster()
3636 * simple code to zero out a cluster
3638 void btrfs_init_free_cluster(struct btrfs_free_cluster *cluster) in btrfs_init_free_cluster() argument
3640 spin_lock_init(&cluster->lock); in btrfs_init_free_cluster()
3641 spin_lock_init(&cluster->refill_lock); in btrfs_init_free_cluster()
3642 cluster->root = RB_ROOT; in btrfs_init_free_cluster()
3643 cluster->max_size = 0; in btrfs_init_free_cluster()
3644 cluster->fragmented = false; in btrfs_init_free_cluster()
3645 INIT_LIST_HEAD(&cluster->block_group_list); in btrfs_init_free_cluster()
3646 cluster->block_group = NULL; in btrfs_init_free_cluster()
3655 struct btrfs_space_info *space_info = block_group->space_info; in do_trimming()
3656 struct btrfs_fs_info *fs_info = block_group->fs_info; in do_trimming()
3657 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; in do_trimming()
3665 spin_lock(&space_info->lock); in do_trimming()
3666 spin_lock(&block_group->lock); in do_trimming()
3667 if (!block_group->ro) { in do_trimming()
3668 block_group->reserved += reserved_bytes; in do_trimming()
3669 space_info->bytes_reserved += reserved_bytes; in do_trimming()
3672 spin_unlock(&block_group->lock); in do_trimming()
3673 spin_unlock(&space_info->lock); in do_trimming()
3681 mutex_lock(&ctl->cache_writeout_mutex); in do_trimming()
3684 start - reserved_start, in do_trimming()
3687 __btrfs_add_free_space(block_group, end, reserved_end - end, in do_trimming()
3690 list_del(&trim_entry->list); in do_trimming()
3691 mutex_unlock(&ctl->cache_writeout_mutex); in do_trimming()
3694 spin_lock(&space_info->lock); in do_trimming()
3695 spin_lock(&block_group->lock); in do_trimming()
3696 if (block_group->ro) in do_trimming()
3697 space_info->bytes_readonly += reserved_bytes; in do_trimming()
3698 block_group->reserved -= reserved_bytes; in do_trimming()
3699 space_info->bytes_reserved -= reserved_bytes; in do_trimming()
3700 spin_unlock(&block_group->lock); in do_trimming()
3701 spin_unlock(&space_info->lock); in do_trimming()
3715 &block_group->fs_info->discard_ctl; in trim_no_bitmap()
3716 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; in trim_no_bitmap()
3724 const u64 max_discard_size = READ_ONCE(discard_ctl->max_discard_size); in trim_no_bitmap()
3729 mutex_lock(&ctl->cache_writeout_mutex); in trim_no_bitmap()
3730 spin_lock(&ctl->tree_lock); in trim_no_bitmap()
3732 if (ctl->free_space < minlen) in trim_no_bitmap()
3740 while (entry->bitmap || in trim_no_bitmap()
3742 node = rb_next(&entry->offset_index); in trim_no_bitmap()
3749 if (entry->offset >= end) in trim_no_bitmap()
3752 extent_start = entry->offset; in trim_no_bitmap()
3753 extent_bytes = entry->bytes; in trim_no_bitmap()
3754 extent_trim_state = entry->trim_state; in trim_no_bitmap()
3756 start = entry->offset; in trim_no_bitmap()
3757 bytes = entry->bytes; in trim_no_bitmap()
3759 spin_unlock(&ctl->tree_lock); in trim_no_bitmap()
3760 mutex_unlock(&ctl->cache_writeout_mutex); in trim_no_bitmap()
3774 entry->offset += max_discard_size; in trim_no_bitmap()
3775 entry->bytes -= max_discard_size; in trim_no_bitmap()
3782 bytes = min(extent_start + extent_bytes, end) - start; in trim_no_bitmap()
3784 spin_unlock(&ctl->tree_lock); in trim_no_bitmap()
3785 mutex_unlock(&ctl->cache_writeout_mutex); in trim_no_bitmap()
3793 spin_unlock(&ctl->tree_lock); in trim_no_bitmap()
3796 list_add_tail(&trim_entry.list, &ctl->trimming_ranges); in trim_no_bitmap()
3797 mutex_unlock(&ctl->cache_writeout_mutex); in trim_no_bitmap()
3803 block_group->discard_cursor = start + bytes; in trim_no_bitmap()
3808 block_group->discard_cursor = start; in trim_no_bitmap()
3813 ret = -ERESTARTSYS; in trim_no_bitmap()
3823 block_group->discard_cursor = btrfs_block_group_end(block_group); in trim_no_bitmap()
3824 spin_unlock(&ctl->tree_lock); in trim_no_bitmap()
3825 mutex_unlock(&ctl->cache_writeout_mutex); in trim_no_bitmap()
3848 spin_lock(&ctl->tree_lock); in reset_trimming_bitmap()
3852 ctl->discardable_extents[BTRFS_STAT_CURR] += in reset_trimming_bitmap()
3853 entry->bitmap_extents; in reset_trimming_bitmap()
3854 ctl->discardable_bytes[BTRFS_STAT_CURR] += entry->bytes; in reset_trimming_bitmap()
3856 entry->trim_state = BTRFS_TRIM_STATE_UNTRIMMED; in reset_trimming_bitmap()
3859 spin_unlock(&ctl->tree_lock); in reset_trimming_bitmap()
3866 entry->trim_state = BTRFS_TRIM_STATE_TRIMMED; in end_trimming_bitmap()
3867 ctl->discardable_extents[BTRFS_STAT_CURR] -= in end_trimming_bitmap()
3868 entry->bitmap_extents; in end_trimming_bitmap()
3869 ctl->discardable_bytes[BTRFS_STAT_CURR] -= entry->bytes; in end_trimming_bitmap()
3881 &block_group->fs_info->discard_ctl; in trim_bitmaps()
3882 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; in trim_bitmaps()
3888 const u64 max_discard_size = READ_ONCE(discard_ctl->max_discard_size); in trim_bitmaps()
3894 mutex_lock(&ctl->cache_writeout_mutex); in trim_bitmaps()
3895 spin_lock(&ctl->tree_lock); in trim_bitmaps()
3897 if (ctl->free_space < minlen) { in trim_bitmaps()
3898 block_group->discard_cursor = in trim_bitmaps()
3900 spin_unlock(&ctl->tree_lock); in trim_bitmaps()
3901 mutex_unlock(&ctl->cache_writeout_mutex); in trim_bitmaps()
3912 * which is the only discard index which sets minlen to 0. in trim_bitmaps()
3916 spin_unlock(&ctl->tree_lock); in trim_bitmaps()
3917 mutex_unlock(&ctl->cache_writeout_mutex); in trim_bitmaps()
3929 entry->trim_state = BTRFS_TRIM_STATE_TRIMMING; in trim_bitmaps()
3941 entry->trim_state = BTRFS_TRIM_STATE_UNTRIMMED; in trim_bitmaps()
3942 spin_unlock(&ctl->tree_lock); in trim_bitmaps()
3943 mutex_unlock(&ctl->cache_writeout_mutex); in trim_bitmaps()
3953 spin_unlock(&ctl->tree_lock); in trim_bitmaps()
3954 mutex_unlock(&ctl->cache_writeout_mutex); in trim_bitmaps()
3958 bytes = min(bytes, end - start); in trim_bitmaps()
3960 spin_unlock(&ctl->tree_lock); in trim_bitmaps()
3961 mutex_unlock(&ctl->cache_writeout_mutex); in trim_bitmaps()
3977 if (entry->bytes == 0) in trim_bitmaps()
3980 spin_unlock(&ctl->tree_lock); in trim_bitmaps()
3983 list_add_tail(&trim_entry.list, &ctl->trimming_ranges); in trim_bitmaps()
3984 mutex_unlock(&ctl->cache_writeout_mutex); in trim_bitmaps()
3990 block_group->discard_cursor = in trim_bitmaps()
3996 offset += BITS_PER_BITMAP * ctl->unit; in trim_bitmaps()
4001 block_group->discard_cursor = start; in trim_bitmaps()
4006 ret = -ERESTARTSYS; in trim_bitmaps()
4014 block_group->discard_cursor = end; in trim_bitmaps()
4023 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; in btrfs_trim_block_group()
4027 ASSERT(!btrfs_is_zoned(block_group->fs_info)); in btrfs_trim_block_group()
4031 spin_lock(&block_group->lock); in btrfs_trim_block_group()
4032 if (test_bit(BLOCK_GROUP_FLAG_REMOVED, &block_group->runtime_flags)) { in btrfs_trim_block_group()
4033 spin_unlock(&block_group->lock); in btrfs_trim_block_group()
4037 spin_unlock(&block_group->lock); in btrfs_trim_block_group()
4044 div64_u64_rem(end, BITS_PER_BITMAP * ctl->unit, &rem); in btrfs_trim_block_group()
4061 spin_lock(&block_group->lock); in btrfs_trim_block_group_extents()
4062 if (test_bit(BLOCK_GROUP_FLAG_REMOVED, &block_group->runtime_flags)) { in btrfs_trim_block_group_extents()
4063 spin_unlock(&block_group->lock); in btrfs_trim_block_group_extents()
4067 spin_unlock(&block_group->lock); in btrfs_trim_block_group_extents()
4083 spin_lock(&block_group->lock); in btrfs_trim_block_group_bitmaps()
4084 if (test_bit(BLOCK_GROUP_FLAG_REMOVED, &block_group->runtime_flags)) { in btrfs_trim_block_group_bitmaps()
4085 spin_unlock(&block_group->lock); in btrfs_trim_block_group_bitmaps()
4089 spin_unlock(&block_group->lock); in btrfs_trim_block_group_bitmaps()
4101 return btrfs_super_cache_generation(fs_info->super_copy); in btrfs_free_space_cache_v1_active()
4113 node = rb_first_cached(&fs_info->block_group_cache_tree); in cleanup_free_space_cache_v1()
4132 * super_copy->cache_generation based on SPACE_CACHE and in btrfs_set_free_space_cache_v1_active()
4138 trans = btrfs_start_transaction(fs_info->tree_root, 0); in btrfs_set_free_space_cache_v1_active()
4143 set_bit(BTRFS_FS_CLEANUP_SPACE_CACHE_V1, &fs_info->flags); in btrfs_set_free_space_cache_v1_active()
4154 clear_bit(BTRFS_FS_CLEANUP_SPACE_CACHE_V1, &fs_info->flags); in btrfs_set_free_space_cache_v1_active()
4163 return -ENOMEM; in btrfs_free_space_init()
4170 return -ENOMEM; in btrfs_free_space_init()
4192 struct btrfs_free_space_ctl *ctl = cache->free_space_ctl; in test_add_free_space_entry()
4203 return -ENOMEM; in test_add_free_space_entry()
4207 spin_lock(&ctl->tree_lock); in test_add_free_space_entry()
4208 info->offset = offset; in test_add_free_space_entry()
4209 info->bytes = bytes; in test_add_free_space_entry()
4210 info->max_extent_size = 0; in test_add_free_space_entry()
4212 spin_unlock(&ctl->tree_lock); in test_add_free_space_entry()
4222 return -ENOMEM; in test_add_free_space_entry()
4226 spin_lock(&ctl->tree_lock); in test_add_free_space_entry()
4230 info->bitmap = map; in test_add_free_space_entry()
4240 bytes -= bytes_added; in test_add_free_space_entry()
4242 spin_unlock(&ctl->tree_lock); in test_add_free_space_entry()
4262 struct btrfs_free_space_ctl *ctl = cache->free_space_ctl; in test_check_exists()
4266 spin_lock(&ctl->tree_lock); in test_check_exists()
4276 if (info->bitmap) { in test_check_exists()
4282 bit_bytes = ctl->unit; in test_check_exists()
4295 n = rb_prev(&info->offset_index); in test_check_exists()
4299 if (tmp->offset + tmp->bytes < offset) in test_check_exists()
4301 if (offset + bytes < tmp->offset) { in test_check_exists()
4302 n = rb_prev(&tmp->offset_index); in test_check_exists()
4309 n = rb_next(&info->offset_index); in test_check_exists()
4313 if (offset + bytes < tmp->offset) in test_check_exists()
4315 if (tmp->offset + tmp->bytes < offset) { in test_check_exists()
4316 n = rb_next(&tmp->offset_index); in test_check_exists()
4327 if (info->offset == offset) { in test_check_exists()
4332 if (offset > info->offset && offset < info->offset + info->bytes) in test_check_exists()
4335 spin_unlock(&ctl->tree_lock); in test_check_exists()