Lines Matching +full:lock +full:- +full:offset

1 // SPDX-License-Identifier: GPL-2.0
12 #include <linux/error-injection.h>
15 #include "extent-tree.h"
19 #include "free-space-cache.h"
21 #include "disk-io.h"
23 #include "space-info.h"
24 #include "block-group.h"
27 #include "inode-item.h"
29 #include "file-item.h"
51 struct btrfs_free_space *bitmap_info, u64 *offset,
56 struct btrfs_free_space *info, u64 offset,
69 while ((node = rb_last(&ctl->free_space_offset)) != NULL) { in __btrfs_remove_free_space_cache()
71 if (!info->bitmap) { in __btrfs_remove_free_space_cache()
78 cond_resched_lock(&ctl->tree_lock); in __btrfs_remove_free_space_cache()
84 u64 offset) in __lookup_free_space_inode() argument
96 key.offset = offset; in __lookup_free_space_inode()
104 return ERR_PTR(-ENOENT); in __lookup_free_space_inode()
107 leaf = path->nodes[0]; in __lookup_free_space_inode()
108 header = btrfs_item_ptr(leaf, path->slots[0], in __lookup_free_space_inode()
125 mapping_set_gfp_mask(inode->i_mapping, in __lookup_free_space_inode()
126 mapping_gfp_constraint(inode->i_mapping, in __lookup_free_space_inode()
135 struct btrfs_fs_info *fs_info = block_group->fs_info; in lookup_free_space_inode()
139 spin_lock(&block_group->lock); in lookup_free_space_inode()
140 if (block_group->inode) in lookup_free_space_inode()
141 inode = igrab(&block_group->inode->vfs_inode); in lookup_free_space_inode()
142 spin_unlock(&block_group->lock); in lookup_free_space_inode()
146 inode = __lookup_free_space_inode(fs_info->tree_root, path, in lookup_free_space_inode()
147 block_group->start); in lookup_free_space_inode()
151 spin_lock(&block_group->lock); in lookup_free_space_inode()
152 if (!((BTRFS_I(inode)->flags & flags) == flags)) { in lookup_free_space_inode()
154 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM | in lookup_free_space_inode()
156 block_group->disk_cache_state = BTRFS_DC_CLEAR; in lookup_free_space_inode()
159 if (!test_and_set_bit(BLOCK_GROUP_FLAG_IREF, &block_group->runtime_flags)) in lookup_free_space_inode()
160 block_group->inode = BTRFS_I(igrab(inode)); in lookup_free_space_inode()
161 spin_unlock(&block_group->lock); in lookup_free_space_inode()
169 u64 ino, u64 offset) in __create_free_space_inode() argument
185 leaf = path->nodes[0]; in __create_free_space_inode()
186 inode_item = btrfs_item_ptr(leaf, path->slots[0], in __create_free_space_inode()
188 btrfs_item_key(leaf, &disk_key, path->slots[0]); in __create_free_space_inode()
191 btrfs_set_inode_generation(leaf, inode_item, trans->transid); in __create_free_space_inode()
199 btrfs_set_inode_transid(leaf, inode_item, trans->transid); in __create_free_space_inode()
200 btrfs_set_inode_block_group(leaf, inode_item, offset); in __create_free_space_inode()
204 key.offset = offset; in __create_free_space_inode()
213 leaf = path->nodes[0]; in __create_free_space_inode()
214 header = btrfs_item_ptr(leaf, path->slots[0], in __create_free_space_inode()
230 ret = btrfs_get_free_objectid(trans->fs_info->tree_root, &ino); in create_free_space_inode()
234 return __create_free_space_inode(trans->fs_info->tree_root, trans, path, in create_free_space_inode()
235 ino, block_group->start); in create_free_space_inode()
253 return -ENOMEM; in btrfs_remove_free_space_inode()
258 if (PTR_ERR(inode) != -ENOENT) in btrfs_remove_free_space_inode()
269 spin_lock(&block_group->lock); in btrfs_remove_free_space_inode()
270 if (test_and_clear_bit(BLOCK_GROUP_FLAG_IREF, &block_group->runtime_flags)) { in btrfs_remove_free_space_inode()
271 block_group->inode = NULL; in btrfs_remove_free_space_inode()
272 spin_unlock(&block_group->lock); in btrfs_remove_free_space_inode()
275 spin_unlock(&block_group->lock); in btrfs_remove_free_space_inode()
282 key.offset = block_group->start; in btrfs_remove_free_space_inode()
283 ret = btrfs_search_slot(trans, trans->fs_info->tree_root, &key, path, in btrfs_remove_free_space_inode()
284 -1, 1); in btrfs_remove_free_space_inode()
290 ret = btrfs_del_item(trans, trans->fs_info->tree_root, path); in btrfs_remove_free_space_inode()
308 struct btrfs_root *root = inode->root; in btrfs_truncate_free_space_cache()
317 ret = -ENOMEM; in btrfs_truncate_free_space_cache()
321 mutex_lock(&trans->transaction->cache_write_mutex); in btrfs_truncate_free_space_cache()
322 if (!list_empty(&block_group->io_list)) { in btrfs_truncate_free_space_cache()
323 list_del_init(&block_group->io_list); in btrfs_truncate_free_space_cache()
333 spin_lock(&block_group->lock); in btrfs_truncate_free_space_cache()
334 block_group->disk_cache_state = BTRFS_DC_CLEAR; in btrfs_truncate_free_space_cache()
335 spin_unlock(&block_group->lock); in btrfs_truncate_free_space_cache()
342 lock_extent(&inode->io_tree, 0, (u64)-1, &cached_state); in btrfs_truncate_free_space_cache()
343 btrfs_drop_extent_map_range(inode, 0, (u64)-1, false); in btrfs_truncate_free_space_cache()
347 * need to check for -EAGAIN. in btrfs_truncate_free_space_cache()
351 inode_sub_bytes(&inode->vfs_inode, control.sub_bytes); in btrfs_truncate_free_space_cache()
354 unlock_extent(&inode->io_tree, 0, (u64)-1, &cached_state); in btrfs_truncate_free_space_cache()
362 mutex_unlock(&trans->transaction->cache_write_mutex); in btrfs_truncate_free_space_cache()
374 file_ra_state_init(&ra, inode->i_mapping); in readahead_cache()
375 last_index = (i_size_read(inode) - 1) >> PAGE_SHIFT; in readahead_cache()
377 page_cache_sync_readahead(inode->i_mapping, &ra, NULL, 0, last_index); in readahead_cache()
389 return -ENOSPC; in io_ctl_init()
393 io_ctl->pages = kcalloc(num_pages, sizeof(struct page *), GFP_NOFS); in io_ctl_init()
394 if (!io_ctl->pages) in io_ctl_init()
395 return -ENOMEM; in io_ctl_init()
397 io_ctl->num_pages = num_pages; in io_ctl_init()
398 io_ctl->fs_info = inode_to_fs_info(inode); in io_ctl_init()
399 io_ctl->inode = inode; in io_ctl_init()
407 kfree(io_ctl->pages); in io_ctl_free()
408 io_ctl->pages = NULL; in io_ctl_free()
413 if (io_ctl->cur) { in io_ctl_unmap_page()
414 io_ctl->cur = NULL; in io_ctl_unmap_page()
415 io_ctl->orig = NULL; in io_ctl_unmap_page()
421 ASSERT(io_ctl->index < io_ctl->num_pages); in io_ctl_map_page()
422 io_ctl->page = io_ctl->pages[io_ctl->index++]; in io_ctl_map_page()
423 io_ctl->cur = page_address(io_ctl->page); in io_ctl_map_page()
424 io_ctl->orig = io_ctl->cur; in io_ctl_map_page()
425 io_ctl->size = PAGE_SIZE; in io_ctl_map_page()
427 clear_page(io_ctl->cur); in io_ctl_map_page()
436 for (i = 0; i < io_ctl->num_pages; i++) { in io_ctl_drop_pages()
437 if (io_ctl->pages[i]) { in io_ctl_drop_pages()
438 btrfs_folio_clear_checked(io_ctl->fs_info, in io_ctl_drop_pages()
439 page_folio(io_ctl->pages[i]), in io_ctl_drop_pages()
440 page_offset(io_ctl->pages[i]), in io_ctl_drop_pages()
442 unlock_page(io_ctl->pages[i]); in io_ctl_drop_pages()
443 put_page(io_ctl->pages[i]); in io_ctl_drop_pages()
451 struct inode *inode = io_ctl->inode; in io_ctl_prepare_pages()
452 gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping); in io_ctl_prepare_pages()
455 for (i = 0; i < io_ctl->num_pages; i++) { in io_ctl_prepare_pages()
458 page = find_or_create_page(inode->i_mapping, i, mask); in io_ctl_prepare_pages()
461 return -ENOMEM; in io_ctl_prepare_pages()
472 io_ctl->pages[i] = page; in io_ctl_prepare_pages()
476 if (page->mapping != inode->i_mapping) { in io_ctl_prepare_pages()
477 btrfs_err(BTRFS_I(inode)->root->fs_info, in io_ctl_prepare_pages()
480 return -EIO; in io_ctl_prepare_pages()
483 btrfs_err(BTRFS_I(inode)->root->fs_info, in io_ctl_prepare_pages()
486 return -EIO; in io_ctl_prepare_pages()
491 for (i = 0; i < io_ctl->num_pages; i++) in io_ctl_prepare_pages()
492 clear_page_dirty_for_io(io_ctl->pages[i]); in io_ctl_prepare_pages()
505 io_ctl->cur += (sizeof(u32) * io_ctl->num_pages); in io_ctl_set_generation()
506 io_ctl->size -= sizeof(u64) + (sizeof(u32) * io_ctl->num_pages); in io_ctl_set_generation()
508 put_unaligned_le64(generation, io_ctl->cur); in io_ctl_set_generation()
509 io_ctl->cur += sizeof(u64); in io_ctl_set_generation()
520 io_ctl->cur += sizeof(u32) * io_ctl->num_pages; in io_ctl_check_generation()
521 io_ctl->size -= sizeof(u64) + (sizeof(u32) * io_ctl->num_pages); in io_ctl_check_generation()
523 cache_gen = get_unaligned_le64(io_ctl->cur); in io_ctl_check_generation()
525 btrfs_err_rl(io_ctl->fs_info, in io_ctl_check_generation()
529 return -EIO; in io_ctl_check_generation()
531 io_ctl->cur += sizeof(u64); in io_ctl_check_generation()
539 unsigned offset = 0; in io_ctl_set_crc() local
542 offset = sizeof(u32) * io_ctl->num_pages; in io_ctl_set_crc()
544 crc = crc32c(crc, io_ctl->orig + offset, PAGE_SIZE - offset); in io_ctl_set_crc()
547 tmp = page_address(io_ctl->pages[0]); in io_ctl_set_crc()
556 unsigned offset = 0; in io_ctl_check_crc() local
559 offset = sizeof(u32) * io_ctl->num_pages; in io_ctl_check_crc()
561 tmp = page_address(io_ctl->pages[0]); in io_ctl_check_crc()
566 crc = crc32c(crc, io_ctl->orig + offset, PAGE_SIZE - offset); in io_ctl_check_crc()
569 btrfs_err_rl(io_ctl->fs_info, in io_ctl_check_crc()
572 return -EIO; in io_ctl_check_crc()
578 static int io_ctl_add_entry(struct btrfs_io_ctl *io_ctl, u64 offset, u64 bytes, in io_ctl_add_entry() argument
583 if (!io_ctl->cur) in io_ctl_add_entry()
584 return -ENOSPC; in io_ctl_add_entry()
586 entry = io_ctl->cur; in io_ctl_add_entry()
587 put_unaligned_le64(offset, &entry->offset); in io_ctl_add_entry()
588 put_unaligned_le64(bytes, &entry->bytes); in io_ctl_add_entry()
589 entry->type = (bitmap) ? BTRFS_FREE_SPACE_BITMAP : in io_ctl_add_entry()
591 io_ctl->cur += sizeof(struct btrfs_free_space_entry); in io_ctl_add_entry()
592 io_ctl->size -= sizeof(struct btrfs_free_space_entry); in io_ctl_add_entry()
594 if (io_ctl->size >= sizeof(struct btrfs_free_space_entry)) in io_ctl_add_entry()
597 io_ctl_set_crc(io_ctl, io_ctl->index - 1); in io_ctl_add_entry()
600 if (io_ctl->index >= io_ctl->num_pages) in io_ctl_add_entry()
610 if (!io_ctl->cur) in io_ctl_add_bitmap()
611 return -ENOSPC; in io_ctl_add_bitmap()
617 if (io_ctl->cur != io_ctl->orig) { in io_ctl_add_bitmap()
618 io_ctl_set_crc(io_ctl, io_ctl->index - 1); in io_ctl_add_bitmap()
619 if (io_ctl->index >= io_ctl->num_pages) in io_ctl_add_bitmap()
620 return -ENOSPC; in io_ctl_add_bitmap()
624 copy_page(io_ctl->cur, bitmap); in io_ctl_add_bitmap()
625 io_ctl_set_crc(io_ctl, io_ctl->index - 1); in io_ctl_add_bitmap()
626 if (io_ctl->index < io_ctl->num_pages) in io_ctl_add_bitmap()
637 if (io_ctl->cur != io_ctl->orig) in io_ctl_zero_remaining_pages()
638 io_ctl_set_crc(io_ctl, io_ctl->index - 1); in io_ctl_zero_remaining_pages()
642 while (io_ctl->index < io_ctl->num_pages) { in io_ctl_zero_remaining_pages()
644 io_ctl_set_crc(io_ctl, io_ctl->index - 1); in io_ctl_zero_remaining_pages()
654 if (!io_ctl->cur) { in io_ctl_read_entry()
655 ret = io_ctl_check_crc(io_ctl, io_ctl->index); in io_ctl_read_entry()
660 e = io_ctl->cur; in io_ctl_read_entry()
661 entry->offset = get_unaligned_le64(&e->offset); in io_ctl_read_entry()
662 entry->bytes = get_unaligned_le64(&e->bytes); in io_ctl_read_entry()
663 *type = e->type; in io_ctl_read_entry()
664 io_ctl->cur += sizeof(struct btrfs_free_space_entry); in io_ctl_read_entry()
665 io_ctl->size -= sizeof(struct btrfs_free_space_entry); in io_ctl_read_entry()
667 if (io_ctl->size >= sizeof(struct btrfs_free_space_entry)) in io_ctl_read_entry()
680 ret = io_ctl_check_crc(io_ctl, io_ctl->index); in io_ctl_read_bitmap()
684 copy_page(entry->bitmap, io_ctl->cur); in io_ctl_read_bitmap()
692 struct btrfs_block_group *block_group = ctl->block_group; in recalculate_thresholds()
696 u64 size = block_group->length; in recalculate_thresholds()
697 u64 bytes_per_bg = BITS_PER_BITMAP * ctl->unit; in recalculate_thresholds()
698 u64 max_bitmaps = div64_u64(size + bytes_per_bg - 1, bytes_per_bg); in recalculate_thresholds()
702 if (ctl->total_bitmaps > max_bitmaps) in recalculate_thresholds()
703 btrfs_err(block_group->fs_info, in recalculate_thresholds()
705 block_group->start, block_group->length, in recalculate_thresholds()
706 ctl->total_bitmaps, ctl->unit, max_bitmaps, in recalculate_thresholds()
708 ASSERT(ctl->total_bitmaps <= max_bitmaps); in recalculate_thresholds()
721 bitmap_bytes = ctl->total_bitmaps * ctl->unit; in recalculate_thresholds()
727 extent_bytes = max_bytes - bitmap_bytes; in recalculate_thresholds()
730 ctl->extents_thresh = in recalculate_thresholds()
736 struct btrfs_path *path, u64 offset) in __load_free_space_cache() argument
738 struct btrfs_fs_info *fs_info = root->fs_info; in __load_free_space_cache()
756 key.offset = offset; in __load_free_space_cache()
767 ret = -1; in __load_free_space_cache()
769 leaf = path->nodes[0]; in __load_free_space_cache()
770 header = btrfs_item_ptr(leaf, path->slots[0], in __load_free_space_cache()
777 if (!BTRFS_I(inode)->generation) { in __load_free_space_cache()
780 offset); in __load_free_space_cache()
784 if (BTRFS_I(inode)->generation != generation) { in __load_free_space_cache()
787 BTRFS_I(inode)->generation, generation); in __load_free_space_cache()
816 ret = -ENOMEM; in __load_free_space_cache()
826 if (!e->bytes) { in __load_free_space_cache()
827 ret = -1; in __load_free_space_cache()
833 spin_lock(&ctl->tree_lock); in __load_free_space_cache()
835 spin_unlock(&ctl->tree_lock); in __load_free_space_cache()
844 num_bitmaps--; in __load_free_space_cache()
845 e->bitmap = kmem_cache_zalloc( in __load_free_space_cache()
847 if (!e->bitmap) { in __load_free_space_cache()
848 ret = -ENOMEM; in __load_free_space_cache()
853 spin_lock(&ctl->tree_lock); in __load_free_space_cache()
856 spin_unlock(&ctl->tree_lock); in __load_free_space_cache()
859 kmem_cache_free(btrfs_free_space_bitmap_cachep, e->bitmap); in __load_free_space_cache()
863 ctl->total_bitmaps++; in __load_free_space_cache()
865 spin_unlock(&ctl->tree_lock); in __load_free_space_cache()
866 list_add_tail(&e->list, &bitmaps); in __load_free_space_cache()
869 num_entries--; in __load_free_space_cache()
879 list_del_init(&e->list); in __load_free_space_cache()
893 spin_lock(&ctl->tree_lock); in __load_free_space_cache()
895 spin_unlock(&ctl->tree_lock); in __load_free_space_cache()
906 while (!ret && (n = rb_first(&ctl->free_space_offset)) != NULL) { in copy_free_space_cache()
908 if (!info->bitmap) { in copy_free_space_cache()
909 const u64 offset = info->offset; in copy_free_space_cache() local
910 const u64 bytes = info->bytes; in copy_free_space_cache()
913 spin_unlock(&ctl->tree_lock); in copy_free_space_cache()
915 ret = btrfs_add_free_space(block_group, offset, bytes); in copy_free_space_cache()
916 spin_lock(&ctl->tree_lock); in copy_free_space_cache()
918 u64 offset = info->offset; in copy_free_space_cache() local
919 u64 bytes = ctl->unit; in copy_free_space_cache()
921 ret = search_bitmap(ctl, info, &offset, &bytes, false); in copy_free_space_cache()
923 bitmap_clear_bits(ctl, info, offset, bytes, true); in copy_free_space_cache()
924 spin_unlock(&ctl->tree_lock); in copy_free_space_cache()
925 ret = btrfs_add_free_space(block_group, offset, in copy_free_space_cache()
927 spin_lock(&ctl->tree_lock); in copy_free_space_cache()
933 cond_resched_lock(&ctl->tree_lock); in copy_free_space_cache()
942 struct btrfs_fs_info *fs_info = block_group->fs_info; in load_free_space_cache()
943 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; in load_free_space_cache()
949 u64 used = block_group->used; in load_free_space_cache()
962 spin_lock(&block_group->lock); in load_free_space_cache()
963 if (block_group->disk_cache_state != BTRFS_DC_WRITTEN) { in load_free_space_cache()
964 spin_unlock(&block_group->lock); in load_free_space_cache()
967 spin_unlock(&block_group->lock); in load_free_space_cache()
972 path->search_commit_root = 1; in load_free_space_cache()
973 path->skip_locking = 1; in load_free_space_cache()
980 * for a free extent, at extent-tree.c:find_free_extent(), we can find in load_free_space_cache()
985 * deadlock on the extent buffer (trying to read lock it when we in load_free_space_cache()
991 * once created get their ->cached field set to BTRFS_CACHE_FINISHED so in load_free_space_cache()
1001 spin_lock(&block_group->lock); in load_free_space_cache()
1002 if (block_group->disk_cache_state != BTRFS_DC_WRITTEN) { in load_free_space_cache()
1003 spin_unlock(&block_group->lock); in load_free_space_cache()
1007 spin_unlock(&block_group->lock); in load_free_space_cache()
1010 * Reinitialize the class of struct inode's mapping->invalidate_lock for in load_free_space_cache()
1014 lockdep_set_class(&(&inode->i_data)->invalidate_lock, in load_free_space_cache()
1017 ret = __load_free_space_cache(fs_info->tree_root, inode, &tmp_ctl, in load_free_space_cache()
1018 path, block_group->start); in load_free_space_cache()
1023 matched = (tmp_ctl.free_space == (block_group->length - used - in load_free_space_cache()
1024 block_group->bytes_super)); in load_free_space_cache()
1032 * so we need to re-set it here. in load_free_space_cache()
1046 block_group->start); in load_free_space_cache()
1047 ret = -1; in load_free_space_cache()
1052 spin_lock(&block_group->lock); in load_free_space_cache()
1053 block_group->disk_cache_state = BTRFS_DC_CLEAR; in load_free_space_cache()
1054 spin_unlock(&block_group->lock); in load_free_space_cache()
1059 block_group->start); in load_free_space_cache()
1062 spin_lock(&ctl->tree_lock); in load_free_space_cache()
1064 spin_unlock(&ctl->tree_lock); in load_free_space_cache()
1079 struct rb_node *node = rb_first(&ctl->free_space_offset); in write_cache_extent_entries()
1083 if (block_group && !list_empty(&block_group->cluster_list)) { in write_cache_extent_entries()
1084 cluster = list_entry(block_group->cluster_list.next, in write_cache_extent_entries()
1091 spin_lock(&cluster_locked->lock); in write_cache_extent_entries()
1092 node = rb_first(&cluster->root); in write_cache_extent_entries()
1103 ret = io_ctl_add_entry(io_ctl, e->offset, e->bytes, in write_cache_extent_entries()
1104 e->bitmap); in write_cache_extent_entries()
1108 if (e->bitmap) { in write_cache_extent_entries()
1109 list_add_tail(&e->list, bitmap_list); in write_cache_extent_entries()
1114 node = rb_first(&cluster->root); in write_cache_extent_entries()
1116 spin_lock(&cluster_locked->lock); in write_cache_extent_entries()
1121 spin_unlock(&cluster_locked->lock); in write_cache_extent_entries()
1131 list_for_each_entry(trim_entry, &ctl->trimming_ranges, list) { in write_cache_extent_entries()
1132 ret = io_ctl_add_entry(io_ctl, trim_entry->start, in write_cache_extent_entries()
1133 trim_entry->bytes, NULL); in write_cache_extent_entries()
1142 spin_unlock(&cluster_locked->lock); in write_cache_extent_entries()
1143 return -ENOSPC; in write_cache_extent_entries()
1150 struct btrfs_path *path, u64 offset, in update_cache_item() argument
1159 key.offset = offset; in update_cache_item()
1164 clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, inode->i_size - 1, in update_cache_item()
1168 leaf = path->nodes[0]; in update_cache_item()
1171 ASSERT(path->slots[0]); in update_cache_item()
1172 path->slots[0]--; in update_cache_item()
1173 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); in update_cache_item()
1175 found_key.offset != offset) { in update_cache_item()
1176 clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, in update_cache_item()
1177 inode->i_size - 1, EXTENT_DELALLOC, in update_cache_item()
1184 BTRFS_I(inode)->generation = trans->transid; in update_cache_item()
1185 header = btrfs_item_ptr(leaf, path->slots[0], in update_cache_item()
1189 btrfs_set_free_space_generation(leaf, header, trans->transid); in update_cache_item()
1195 return -1; in update_cache_item()
1218 unpin = &trans->transaction->pinned_extents; in write_pinned_extent_entries()
1220 start = block_group->start; in write_pinned_extent_entries()
1222 while (start < block_group->start + block_group->length) { in write_pinned_extent_entries()
1229 if (extent_start >= block_group->start + block_group->length) in write_pinned_extent_entries()
1233 extent_end = min(block_group->start + block_group->length, in write_pinned_extent_entries()
1235 len = extent_end - extent_start; in write_pinned_extent_entries()
1240 return -ENOSPC; in write_pinned_extent_entries()
1256 ret = io_ctl_add_bitmap(io_ctl, entry->bitmap); in write_bitmap_entries()
1258 return -ENOSPC; in write_bitmap_entries()
1259 list_del_init(&entry->list); in write_bitmap_entries()
1269 ret = btrfs_wait_ordered_range(BTRFS_I(inode), 0, (u64)-1); in flush_dirty_cache()
1271 clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, inode->i_size - 1, in flush_dirty_cache()
1283 list_del_init(&entry->list); in cleanup_bitmap_list()
1292 unlock_extent(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1, in cleanup_write_cache_enospc()
1300 struct btrfs_path *path, u64 offset) in __btrfs_wait_cache_io() argument
1303 struct inode *inode = io_ctl->inode; in __btrfs_wait_cache_io()
1314 ret = update_cache_item(trans, root, inode, path, offset, in __btrfs_wait_cache_io()
1315 io_ctl->entries, io_ctl->bitmaps); in __btrfs_wait_cache_io()
1318 invalidate_inode_pages2(inode->i_mapping); in __btrfs_wait_cache_io()
1319 BTRFS_I(inode)->generation = 0; in __btrfs_wait_cache_io()
1321 btrfs_debug(root->fs_info, in __btrfs_wait_cache_io()
1323 block_group->start, ret); in __btrfs_wait_cache_io()
1329 spin_lock(&trans->transaction->dirty_bgs_lock); in __btrfs_wait_cache_io()
1331 /* the disk_cache_state is protected by the block group lock */ in __btrfs_wait_cache_io()
1332 spin_lock(&block_group->lock); in __btrfs_wait_cache_io()
1339 if (!ret && list_empty(&block_group->dirty_list)) in __btrfs_wait_cache_io()
1340 block_group->disk_cache_state = BTRFS_DC_WRITTEN; in __btrfs_wait_cache_io()
1342 block_group->disk_cache_state = BTRFS_DC_ERROR; in __btrfs_wait_cache_io()
1344 spin_unlock(&block_group->lock); in __btrfs_wait_cache_io()
1345 spin_unlock(&trans->transaction->dirty_bgs_lock); in __btrfs_wait_cache_io()
1346 io_ctl->inode = NULL; in __btrfs_wait_cache_io()
1358 return __btrfs_wait_cache_io(block_group->fs_info->tree_root, trans, in btrfs_wait_cache_io()
1359 block_group, &block_group->io_ctl, in btrfs_wait_cache_io()
1360 path, block_group->start); in btrfs_wait_cache_io()
1391 return -EIO; in __btrfs_write_out_cache()
1393 WARN_ON(io_ctl->pages); in __btrfs_write_out_cache()
1398 if (block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA)) { in __btrfs_write_out_cache()
1399 down_write(&block_group->data_rwsem); in __btrfs_write_out_cache()
1400 spin_lock(&block_group->lock); in __btrfs_write_out_cache()
1401 if (block_group->delalloc_bytes) { in __btrfs_write_out_cache()
1402 block_group->disk_cache_state = BTRFS_DC_WRITTEN; in __btrfs_write_out_cache()
1403 spin_unlock(&block_group->lock); in __btrfs_write_out_cache()
1404 up_write(&block_group->data_rwsem); in __btrfs_write_out_cache()
1405 BTRFS_I(inode)->generation = 0; in __btrfs_write_out_cache()
1410 spin_unlock(&block_group->lock); in __btrfs_write_out_cache()
1413 /* Lock all pages first so we can lock the extent safely. */ in __btrfs_write_out_cache()
1418 lock_extent(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1, in __btrfs_write_out_cache()
1421 io_ctl_set_generation(io_ctl, trans->transid); in __btrfs_write_out_cache()
1423 mutex_lock(&ctl->cache_writeout_mutex); in __btrfs_write_out_cache()
1425 spin_lock(&ctl->tree_lock); in __btrfs_write_out_cache()
1450 spin_unlock(&ctl->tree_lock); in __btrfs_write_out_cache()
1451 mutex_unlock(&ctl->cache_writeout_mutex); in __btrfs_write_out_cache()
1462 u64 dirty_len = min_t(u64, dirty_start + PAGE_SIZE, i_size) - dirty_start; in __btrfs_write_out_cache()
1464 ret = btrfs_dirty_folio(BTRFS_I(inode), page_folio(io_ctl->pages[i]), in __btrfs_write_out_cache()
1470 if (block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA)) in __btrfs_write_out_cache()
1471 up_write(&block_group->data_rwsem); in __btrfs_write_out_cache()
1479 unlock_extent(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1, in __btrfs_write_out_cache()
1487 io_ctl->entries = entries; in __btrfs_write_out_cache()
1488 io_ctl->bitmaps = bitmaps; in __btrfs_write_out_cache()
1490 ret = btrfs_fdatawrite_range(BTRFS_I(inode), 0, (u64)-1); in __btrfs_write_out_cache()
1498 spin_unlock(&ctl->tree_lock); in __btrfs_write_out_cache()
1499 mutex_unlock(&ctl->cache_writeout_mutex); in __btrfs_write_out_cache()
1505 if (block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA)) in __btrfs_write_out_cache()
1506 up_write(&block_group->data_rwsem); in __btrfs_write_out_cache()
1509 io_ctl->inode = NULL; in __btrfs_write_out_cache()
1512 invalidate_inode_pages2(inode->i_mapping); in __btrfs_write_out_cache()
1513 BTRFS_I(inode)->generation = 0; in __btrfs_write_out_cache()
1525 struct btrfs_fs_info *fs_info = trans->fs_info; in btrfs_write_out_cache()
1526 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; in btrfs_write_out_cache()
1530 spin_lock(&block_group->lock); in btrfs_write_out_cache()
1531 if (block_group->disk_cache_state < BTRFS_DC_SETUP) { in btrfs_write_out_cache()
1532 spin_unlock(&block_group->lock); in btrfs_write_out_cache()
1535 spin_unlock(&block_group->lock); in btrfs_write_out_cache()
1542 &block_group->io_ctl, trans); in btrfs_write_out_cache()
1546 block_group->start, ret); in btrfs_write_out_cache()
1547 spin_lock(&block_group->lock); in btrfs_write_out_cache()
1548 block_group->disk_cache_state = BTRFS_DC_ERROR; in btrfs_write_out_cache()
1549 spin_unlock(&block_group->lock); in btrfs_write_out_cache()
1551 block_group->io_ctl.inode = NULL; in btrfs_write_out_cache()
1564 u64 offset) in offset_to_bit() argument
1566 ASSERT(offset >= bitmap_start); in offset_to_bit()
1567 offset -= bitmap_start; in offset_to_bit()
1568 return (unsigned long)(div_u64(offset, unit)); in offset_to_bit()
1577 u64 offset) in offset_to_bitmap()
1582 bytes_per_bitmap = BITS_PER_BITMAP * ctl->unit; in offset_to_bitmap()
1583 bitmap_start = offset - ctl->start; in offset_to_bitmap()
1586 bitmap_start += ctl->start; in offset_to_bitmap()
1599 lockdep_assert_held(&ctl->tree_lock); in tree_insert_offset()
1602 lockdep_assert_held(&cluster->lock); in tree_insert_offset()
1603 root = &cluster->root; in tree_insert_offset()
1605 root = &ctl->free_space_offset; in tree_insert_offset()
1608 p = &root->rb_node; in tree_insert_offset()
1616 if (new_entry->offset < info->offset) { in tree_insert_offset()
1617 p = &(*p)->rb_left; in tree_insert_offset()
1618 } else if (new_entry->offset > info->offset) { in tree_insert_offset()
1619 p = &(*p)->rb_right; in tree_insert_offset()
1623 * share the same offset. If this is the case, we want in tree_insert_offset()
1629 * this offset, we want to go right, or after this entry in tree_insert_offset()
1634 if (new_entry->bitmap) { in tree_insert_offset()
1635 if (info->bitmap) { in tree_insert_offset()
1637 return -EEXIST; in tree_insert_offset()
1639 p = &(*p)->rb_right; in tree_insert_offset()
1641 if (!info->bitmap) { in tree_insert_offset()
1643 return -EEXIST; in tree_insert_offset()
1645 p = &(*p)->rb_left; in tree_insert_offset()
1650 rb_link_node(&new_entry->offset_index, parent, p); in tree_insert_offset()
1651 rb_insert_color(&new_entry->offset_index, root); in tree_insert_offset()
1657 * This is a little subtle. We *only* have ->max_extent_size set if we actually
1658 * searched through the bitmap and figured out the largest ->max_extent_size,
1661 * we've found already if it's larger, or we want to use ->bytes.
1663 * This matters because find_free_space() will skip entries who's ->bytes is
1665 * may pick some previous entry that has a smaller ->max_extent_size than we
1667 * ->max_extent_size set to 4K and ->bytes set to 1M. A second entry hasn't set
1668 * ->max_extent_size yet, has ->bytes set to 8K and it's contiguous. We will
1670 * that first bitmap entry had ->max_extent_size set, but the second one did
1675 * don't have ->max_extent_size set. We'll return 16K, and the next time the
1682 if (entry->bitmap && entry->max_extent_size) in get_max_extent_size()
1683 return entry->max_extent_size; in get_max_extent_size()
1684 return entry->bytes; in get_max_extent_size()
1701 * searches the tree for the given offset.
1703 * fuzzy - If this is set, then we are trying to make an allocation, and we just
1705 * offset.
1709 u64 offset, int bitmap_only, int fuzzy) in tree_search_offset() argument
1711 struct rb_node *n = ctl->free_space_offset.rb_node; in tree_search_offset()
1714 lockdep_assert_held(&ctl->tree_lock); in tree_search_offset()
1716 /* find entry that is closest to the 'offset' */ in tree_search_offset()
1721 if (offset < entry->offset) in tree_search_offset()
1722 n = n->rb_left; in tree_search_offset()
1723 else if (offset > entry->offset) in tree_search_offset()
1724 n = n->rb_right; in tree_search_offset()
1734 if (entry->bitmap) in tree_search_offset()
1738 * bitmap entry and extent entry may share same offset, in tree_search_offset()
1745 if (entry->offset != offset) in tree_search_offset()
1748 WARN_ON(!entry->bitmap); in tree_search_offset()
1751 if (entry->bitmap) { in tree_search_offset()
1753 * if previous extent entry covers the offset, in tree_search_offset()
1756 n = rb_prev(&entry->offset_index); in tree_search_offset()
1760 if (!prev->bitmap && in tree_search_offset()
1761 prev->offset + prev->bytes > offset) in tree_search_offset()
1771 /* find last entry before the 'offset' */ in tree_search_offset()
1773 if (entry->offset > offset) { in tree_search_offset()
1774 n = rb_prev(&entry->offset_index); in tree_search_offset()
1778 ASSERT(entry->offset <= offset); in tree_search_offset()
1787 if (entry->bitmap) { in tree_search_offset()
1788 n = rb_prev(&entry->offset_index); in tree_search_offset()
1792 if (!prev->bitmap && in tree_search_offset()
1793 prev->offset + prev->bytes > offset) in tree_search_offset()
1796 if (entry->offset + BITS_PER_BITMAP * ctl->unit > offset) in tree_search_offset()
1798 } else if (entry->offset + entry->bytes > offset) in tree_search_offset()
1805 n = rb_next(&entry->offset_index); in tree_search_offset()
1809 if (entry->bitmap) { in tree_search_offset()
1810 if (entry->offset + BITS_PER_BITMAP * in tree_search_offset()
1811 ctl->unit > offset) in tree_search_offset()
1814 if (entry->offset + entry->bytes > offset) in tree_search_offset()
1825 lockdep_assert_held(&ctl->tree_lock); in unlink_free_space()
1827 rb_erase(&info->offset_index, &ctl->free_space_offset); in unlink_free_space()
1828 rb_erase_cached(&info->bytes_index, &ctl->free_space_bytes); in unlink_free_space()
1829 ctl->free_extents--; in unlink_free_space()
1831 if (!info->bitmap && !btrfs_free_space_trimmed(info)) { in unlink_free_space()
1832 ctl->discardable_extents[BTRFS_STAT_CURR]--; in unlink_free_space()
1833 ctl->discardable_bytes[BTRFS_STAT_CURR] -= info->bytes; in unlink_free_space()
1837 ctl->free_space -= info->bytes; in unlink_free_space()
1845 lockdep_assert_held(&ctl->tree_lock); in link_free_space()
1847 ASSERT(info->bytes || info->bitmap); in link_free_space()
1852 rb_add_cached(&info->bytes_index, &ctl->free_space_bytes, entry_less); in link_free_space()
1854 if (!info->bitmap && !btrfs_free_space_trimmed(info)) { in link_free_space()
1855 ctl->discardable_extents[BTRFS_STAT_CURR]++; in link_free_space()
1856 ctl->discardable_bytes[BTRFS_STAT_CURR] += info->bytes; in link_free_space()
1859 ctl->free_space += info->bytes; in link_free_space()
1860 ctl->free_extents++; in link_free_space()
1867 ASSERT(info->bitmap); in relink_bitmap_entry()
1871 * want to re-link it into our ctl bytes index. in relink_bitmap_entry()
1873 if (RB_EMPTY_NODE(&info->bytes_index)) in relink_bitmap_entry()
1876 lockdep_assert_held(&ctl->tree_lock); in relink_bitmap_entry()
1878 rb_erase_cached(&info->bytes_index, &ctl->free_space_bytes); in relink_bitmap_entry()
1879 rb_add_cached(&info->bytes_index, &ctl->free_space_bytes, entry_less); in relink_bitmap_entry()
1884 u64 offset, u64 bytes, bool update_stat) in bitmap_clear_bits() argument
1887 int extent_delta = -1; in bitmap_clear_bits()
1889 start = offset_to_bit(info->offset, ctl->unit, offset); in bitmap_clear_bits()
1890 count = bytes_to_bits(bytes, ctl->unit); in bitmap_clear_bits()
1894 bitmap_clear(info->bitmap, start, count); in bitmap_clear_bits()
1896 info->bytes -= bytes; in bitmap_clear_bits()
1897 if (info->max_extent_size > ctl->unit) in bitmap_clear_bits()
1898 info->max_extent_size = 0; in bitmap_clear_bits()
1902 if (start && test_bit(start - 1, info->bitmap)) in bitmap_clear_bits()
1905 if (end < BITS_PER_BITMAP && test_bit(end, info->bitmap)) in bitmap_clear_bits()
1908 info->bitmap_extents += extent_delta; in bitmap_clear_bits()
1910 ctl->discardable_extents[BTRFS_STAT_CURR] += extent_delta; in bitmap_clear_bits()
1911 ctl->discardable_bytes[BTRFS_STAT_CURR] -= bytes; in bitmap_clear_bits()
1915 ctl->free_space -= bytes; in bitmap_clear_bits()
1919 struct btrfs_free_space *info, u64 offset, in btrfs_bitmap_set_bits() argument
1925 start = offset_to_bit(info->offset, ctl->unit, offset); in btrfs_bitmap_set_bits()
1926 count = bytes_to_bits(bytes, ctl->unit); in btrfs_bitmap_set_bits()
1930 bitmap_set(info->bitmap, start, count); in btrfs_bitmap_set_bits()
1936 info->max_extent_size = 0; in btrfs_bitmap_set_bits()
1937 info->bytes += bytes; in btrfs_bitmap_set_bits()
1938 ctl->free_space += bytes; in btrfs_bitmap_set_bits()
1942 if (start && test_bit(start - 1, info->bitmap)) in btrfs_bitmap_set_bits()
1943 extent_delta--; in btrfs_bitmap_set_bits()
1945 if (end < BITS_PER_BITMAP && test_bit(end, info->bitmap)) in btrfs_bitmap_set_bits()
1946 extent_delta--; in btrfs_bitmap_set_bits()
1948 info->bitmap_extents += extent_delta; in btrfs_bitmap_set_bits()
1950 ctl->discardable_extents[BTRFS_STAT_CURR] += extent_delta; in btrfs_bitmap_set_bits()
1951 ctl->discardable_bytes[BTRFS_STAT_CURR] += bytes; in btrfs_bitmap_set_bits()
1960 struct btrfs_free_space *bitmap_info, u64 *offset, in search_bitmap() argument
1974 bitmap_info->max_extent_size && in search_bitmap()
1975 bitmap_info->max_extent_size < *bytes) { in search_bitmap()
1976 *bytes = bitmap_info->max_extent_size; in search_bitmap()
1977 return -1; in search_bitmap()
1980 i = offset_to_bit(bitmap_info->offset, ctl->unit, in search_bitmap()
1981 max_t(u64, *offset, bitmap_info->offset)); in search_bitmap()
1982 bits = bytes_to_bits(*bytes, ctl->unit); in search_bitmap()
1984 for_each_set_bit_from(i, bitmap_info->bitmap, BITS_PER_BITMAP) { in search_bitmap()
1989 next_zero = find_next_zero_bit(bitmap_info->bitmap, in search_bitmap()
1991 extent_bits = next_zero - i; in search_bitmap()
2002 *offset = (u64)(i * ctl->unit) + bitmap_info->offset; in search_bitmap()
2003 *bytes = (u64)(found_bits) * ctl->unit; in search_bitmap()
2007 *bytes = (u64)(max_bits) * ctl->unit; in search_bitmap()
2008 bitmap_info->max_extent_size = *bytes; in search_bitmap()
2010 return -1; in search_bitmap()
2015 find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes, in find_free_space() argument
2024 if (!ctl->free_space_offset.rb_node) in find_free_space()
2028 node = rb_first_cached(&ctl->free_space_bytes); in find_free_space()
2030 entry = tree_search_offset(ctl, offset_to_bitmap(ctl, *offset), in find_free_space()
2034 node = &entry->offset_index; in find_free_space()
2050 * If we're using the offset index then we need to keep going in find_free_space()
2053 if (entry->bytes < *bytes) { in find_free_space()
2065 tmp = entry->offset - ctl->start + align - 1; in find_free_space()
2067 tmp = tmp * align + ctl->start; in find_free_space()
2068 align_off = tmp - entry->offset; in find_free_space()
2071 tmp = entry->offset; in find_free_space()
2081 if (entry->bytes < *bytes + align_off) { in find_free_space()
2087 if (entry->bitmap) { in find_free_space()
2093 *offset = tmp; in find_free_space()
2103 * The bitmap may have gotten re-arranged in the space in find_free_space()
2113 *offset = tmp; in find_free_space()
2114 *bytes = entry->bytes - align_off; in find_free_space()
2122 struct btrfs_free_space *info, u64 offset) in add_new_bitmap() argument
2124 info->offset = offset_to_bitmap(ctl, offset); in add_new_bitmap()
2125 info->bytes = 0; in add_new_bitmap()
2126 info->bitmap_extents = 0; in add_new_bitmap()
2127 INIT_LIST_HEAD(&info->list); in add_new_bitmap()
2129 ctl->total_bitmaps++; in add_new_bitmap()
2142 if (bitmap_info->bytes && !btrfs_free_space_trimmed(bitmap_info)) { in free_bitmap()
2143 ctl->discardable_extents[BTRFS_STAT_CURR] -= in free_bitmap()
2144 bitmap_info->bitmap_extents; in free_bitmap()
2145 ctl->discardable_bytes[BTRFS_STAT_CURR] -= bitmap_info->bytes; in free_bitmap()
2149 kmem_cache_free(btrfs_free_space_bitmap_cachep, bitmap_info->bitmap); in free_bitmap()
2151 ctl->total_bitmaps--; in free_bitmap()
2157 u64 *offset, u64 *bytes) in remove_from_bitmap() argument
2164 end = bitmap_info->offset + (u64)(BITS_PER_BITMAP * ctl->unit) - 1; in remove_from_bitmap()
2172 search_start = *offset; in remove_from_bitmap()
2173 search_bytes = ctl->unit; in remove_from_bitmap()
2174 search_bytes = min(search_bytes, end - search_start + 1); in remove_from_bitmap()
2177 if (ret < 0 || search_start != *offset) in remove_from_bitmap()
2178 return -EINVAL; in remove_from_bitmap()
2184 search_bytes = min(search_bytes, end - search_start + 1); in remove_from_bitmap()
2187 *offset += search_bytes; in remove_from_bitmap()
2188 *bytes -= search_bytes; in remove_from_bitmap()
2191 struct rb_node *next = rb_next(&bitmap_info->offset_index); in remove_from_bitmap()
2192 if (!bitmap_info->bytes) in remove_from_bitmap()
2200 return -EINVAL; in remove_from_bitmap()
2209 if (!bitmap_info->bitmap) in remove_from_bitmap()
2210 return -EAGAIN; in remove_from_bitmap()
2218 search_start = *offset; in remove_from_bitmap()
2219 search_bytes = ctl->unit; in remove_from_bitmap()
2222 if (ret < 0 || search_start != *offset) in remove_from_bitmap()
2223 return -EAGAIN; in remove_from_bitmap()
2226 } else if (!bitmap_info->bytes) in remove_from_bitmap()
2233 struct btrfs_free_space *info, u64 offset, in add_bytes_to_bitmap() argument
2245 ctl->discardable_extents[BTRFS_STAT_CURR] += in add_bytes_to_bitmap()
2246 info->bitmap_extents; in add_bytes_to_bitmap()
2247 ctl->discardable_bytes[BTRFS_STAT_CURR] += info->bytes; in add_bytes_to_bitmap()
2249 info->trim_state = BTRFS_TRIM_STATE_UNTRIMMED; in add_bytes_to_bitmap()
2252 end = info->offset + (u64)(BITS_PER_BITMAP * ctl->unit); in add_bytes_to_bitmap()
2254 bytes_to_set = min(end - offset, bytes); in add_bytes_to_bitmap()
2256 btrfs_bitmap_set_bits(ctl, info, offset, bytes_to_set); in add_bytes_to_bitmap()
2265 struct btrfs_block_group *block_group = ctl->block_group; in use_bitmap()
2266 struct btrfs_fs_info *fs_info = block_group->fs_info; in use_bitmap()
2275 if (!forced && info->bytes >= FORCE_EXTENT_THRESHOLD) in use_bitmap()
2282 if (!forced && ctl->free_extents < ctl->extents_thresh) { in use_bitmap()
2290 if (info->bytes <= fs_info->sectorsize * 8) { in use_bitmap()
2291 if (ctl->free_extents * 3 <= ctl->extents_thresh) in use_bitmap()
2306 if (((BITS_PER_BITMAP * ctl->unit) >> 1) > block_group->length) in use_bitmap()
2322 u64 bytes, offset, bytes_added; in insert_into_bitmap() local
2326 bytes = info->bytes; in insert_into_bitmap()
2327 offset = info->offset; in insert_into_bitmap()
2328 trim_state = info->trim_state; in insert_into_bitmap()
2330 if (!ctl->op->use_bitmap(ctl, info)) in insert_into_bitmap()
2333 if (ctl->op == &free_space_op) in insert_into_bitmap()
2334 block_group = ctl->block_group; in insert_into_bitmap()
2341 if (block_group && !list_empty(&block_group->cluster_list)) { in insert_into_bitmap()
2346 cluster = list_entry(block_group->cluster_list.next, in insert_into_bitmap()
2349 spin_lock(&cluster->lock); in insert_into_bitmap()
2350 node = rb_first(&cluster->root); in insert_into_bitmap()
2352 spin_unlock(&cluster->lock); in insert_into_bitmap()
2357 if (!entry->bitmap) { in insert_into_bitmap()
2358 spin_unlock(&cluster->lock); in insert_into_bitmap()
2362 if (entry->offset == offset_to_bitmap(ctl, offset)) { in insert_into_bitmap()
2363 bytes_added = add_bytes_to_bitmap(ctl, entry, offset, in insert_into_bitmap()
2365 bytes -= bytes_added; in insert_into_bitmap()
2366 offset += bytes_added; in insert_into_bitmap()
2368 spin_unlock(&cluster->lock); in insert_into_bitmap()
2376 bitmap_info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset), in insert_into_bitmap()
2383 bytes_added = add_bytes_to_bitmap(ctl, bitmap_info, offset, bytes, in insert_into_bitmap()
2385 bytes -= bytes_added; in insert_into_bitmap()
2386 offset += bytes_added; in insert_into_bitmap()
2396 if (info && info->bitmap) { in insert_into_bitmap()
2397 add_new_bitmap(ctl, info, offset); in insert_into_bitmap()
2402 spin_unlock(&ctl->tree_lock); in insert_into_bitmap()
2404 /* no pre-allocated info, allocate a new one */ in insert_into_bitmap()
2409 spin_lock(&ctl->tree_lock); in insert_into_bitmap()
2410 ret = -ENOMEM; in insert_into_bitmap()
2416 info->bitmap = kmem_cache_zalloc(btrfs_free_space_bitmap_cachep, in insert_into_bitmap()
2418 info->trim_state = BTRFS_TRIM_STATE_TRIMMED; in insert_into_bitmap()
2419 spin_lock(&ctl->tree_lock); in insert_into_bitmap()
2420 if (!info->bitmap) { in insert_into_bitmap()
2421 ret = -ENOMEM; in insert_into_bitmap()
2429 if (info->bitmap) in insert_into_bitmap()
2431 info->bitmap); in insert_into_bitmap()
2460 u64 offset = info->offset; in try_merge_free_space() local
2461 u64 bytes = info->bytes; in try_merge_free_space()
2470 right_info = tree_search_offset(ctl, offset + bytes, 0, 0); in try_merge_free_space()
2472 right_prev = rb_prev(&right_info->offset_index); in try_merge_free_space()
2477 left_info = tree_search_offset(ctl, offset - 1, 0, 0); in try_merge_free_space()
2480 if (right_info && !right_info->bitmap && in try_merge_free_space()
2483 info->bytes += right_info->bytes; in try_merge_free_space()
2489 if (left_info && !left_info->bitmap && in try_merge_free_space()
2490 left_info->offset + left_info->bytes == offset && in try_merge_free_space()
2493 info->offset = left_info->offset; in try_merge_free_space()
2494 info->bytes += left_info->bytes; in try_merge_free_space()
2509 const u64 end = info->offset + info->bytes; in steal_from_bitmap_to_end()
2517 i = offset_to_bit(bitmap->offset, ctl->unit, end); in steal_from_bitmap_to_end()
2518 j = find_next_zero_bit(bitmap->bitmap, BITS_PER_BITMAP, i); in steal_from_bitmap_to_end()
2521 bytes = (j - i) * ctl->unit; in steal_from_bitmap_to_end()
2522 info->bytes += bytes; in steal_from_bitmap_to_end()
2526 info->trim_state = BTRFS_TRIM_STATE_UNTRIMMED; in steal_from_bitmap_to_end()
2530 if (!bitmap->bytes) in steal_from_bitmap_to_end()
2547 bitmap_offset = offset_to_bitmap(ctl, info->offset); in steal_from_bitmap_to_front()
2549 if (bitmap_offset == info->offset) { in steal_from_bitmap_to_front()
2550 if (info->offset == 0) in steal_from_bitmap_to_front()
2552 bitmap_offset = offset_to_bitmap(ctl, info->offset - 1); in steal_from_bitmap_to_front()
2559 i = offset_to_bit(bitmap->offset, ctl->unit, info->offset) - 1; in steal_from_bitmap_to_front()
2561 prev_j = (unsigned long)-1; in steal_from_bitmap_to_front()
2562 for_each_clear_bit_from(j, bitmap->bitmap, BITS_PER_BITMAP) { in steal_from_bitmap_to_front()
2570 if (prev_j == (unsigned long)-1) in steal_from_bitmap_to_front()
2571 bytes = (i + 1) * ctl->unit; in steal_from_bitmap_to_front()
2573 bytes = (i - prev_j) * ctl->unit; in steal_from_bitmap_to_front()
2575 info->offset -= bytes; in steal_from_bitmap_to_front()
2576 info->bytes += bytes; in steal_from_bitmap_to_front()
2580 info->trim_state = BTRFS_TRIM_STATE_UNTRIMMED; in steal_from_bitmap_to_front()
2582 bitmap_clear_bits(ctl, bitmap, info->offset, bytes, update_stat); in steal_from_bitmap_to_front()
2584 if (!bitmap->bytes) in steal_from_bitmap_to_front()
2592 * non-clustered allocation requests. So when attempting to add a new extent
2597 * on 2 or more entries - even if the entries represent a contiguous free space
2606 * Only work with disconnected entries, as we can change their offset, in steal_from_bitmap()
2609 ASSERT(!info->bitmap); in steal_from_bitmap()
2610 ASSERT(RB_EMPTY_NODE(&info->offset_index)); in steal_from_bitmap()
2612 if (ctl->total_bitmaps > 0) { in steal_from_bitmap()
2617 if (ctl->total_bitmaps > 0) in steal_from_bitmap()
2627 u64 offset, u64 bytes, in __btrfs_add_free_space() argument
2630 struct btrfs_fs_info *fs_info = block_group->fs_info; in __btrfs_add_free_space()
2631 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; in __btrfs_add_free_space()
2640 return -ENOMEM; in __btrfs_add_free_space()
2642 info->offset = offset; in __btrfs_add_free_space()
2643 info->bytes = bytes; in __btrfs_add_free_space()
2644 info->trim_state = trim_state; in __btrfs_add_free_space()
2645 RB_CLEAR_NODE(&info->offset_index); in __btrfs_add_free_space()
2646 RB_CLEAR_NODE(&info->bytes_index); in __btrfs_add_free_space()
2648 spin_lock(&ctl->tree_lock); in __btrfs_add_free_space()
2668 * going to add the new free space to existing bitmap entries - because in __btrfs_add_free_space()
2674 filter_bytes = max(filter_bytes, info->bytes); in __btrfs_add_free_space()
2681 spin_unlock(&ctl->tree_lock); in __btrfs_add_free_space()
2685 ASSERT(ret != -EEXIST); in __btrfs_add_free_space()
2690 btrfs_discard_queue_work(&fs_info->discard_ctl, block_group); in __btrfs_add_free_space()
2699 struct btrfs_space_info *sinfo = block_group->space_info; in __btrfs_add_free_space_zoned()
2700 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; in __btrfs_add_free_space_zoned()
2701 u64 offset = bytenr - block_group->start; in __btrfs_add_free_space_zoned() local
2707 spin_lock(&block_group->lock); in __btrfs_add_free_space_zoned()
2709 initial = ((size == block_group->length) && (block_group->alloc_offset == 0)); in __btrfs_add_free_space_zoned()
2710 WARN_ON(!initial && offset + size > block_group->zone_capacity); in __btrfs_add_free_space_zoned()
2712 bg_reclaim_threshold = READ_ONCE(sinfo->bg_reclaim_threshold); in __btrfs_add_free_space_zoned()
2717 to_free = block_group->zone_capacity; in __btrfs_add_free_space_zoned()
2718 else if (offset >= block_group->alloc_offset) in __btrfs_add_free_space_zoned()
2720 else if (offset + size <= block_group->alloc_offset) in __btrfs_add_free_space_zoned()
2723 to_free = offset + size - block_group->alloc_offset; in __btrfs_add_free_space_zoned()
2724 to_unusable = size - to_free; in __btrfs_add_free_space_zoned()
2726 spin_lock(&ctl->tree_lock); in __btrfs_add_free_space_zoned()
2727 ctl->free_space += to_free; in __btrfs_add_free_space_zoned()
2728 spin_unlock(&ctl->tree_lock); in __btrfs_add_free_space_zoned()
2730 * If the block group is read-only, we should account freed space into in __btrfs_add_free_space_zoned()
2733 if (!block_group->ro) { in __btrfs_add_free_space_zoned()
2734 block_group->zone_unusable += to_unusable; in __btrfs_add_free_space_zoned()
2735 WARN_ON(block_group->zone_unusable > block_group->length); in __btrfs_add_free_space_zoned()
2738 block_group->alloc_offset -= size; in __btrfs_add_free_space_zoned()
2741 reclaimable_unusable = block_group->zone_unusable - in __btrfs_add_free_space_zoned()
2742 (block_group->length - block_group->zone_capacity); in __btrfs_add_free_space_zoned()
2744 if (block_group->zone_unusable == block_group->length) { in __btrfs_add_free_space_zoned()
2748 mult_perc(block_group->zone_capacity, bg_reclaim_threshold)) { in __btrfs_add_free_space_zoned()
2752 spin_unlock(&block_group->lock); in __btrfs_add_free_space_zoned()
2762 if (btrfs_is_zoned(block_group->fs_info)) in btrfs_add_free_space()
2766 if (btrfs_test_opt(block_group->fs_info, DISCARD_SYNC)) in btrfs_add_free_space()
2775 if (btrfs_is_zoned(block_group->fs_info)) in btrfs_add_free_space_unused()
2792 if (btrfs_is_zoned(block_group->fs_info)) in btrfs_add_free_space_async_trimmed()
2796 if (btrfs_test_opt(block_group->fs_info, DISCARD_SYNC) || in btrfs_add_free_space_async_trimmed()
2797 btrfs_test_opt(block_group->fs_info, DISCARD_ASYNC)) in btrfs_add_free_space_async_trimmed()
2804 u64 offset, u64 bytes) in btrfs_remove_free_space() argument
2806 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; in btrfs_remove_free_space()
2811 if (btrfs_is_zoned(block_group->fs_info)) { in btrfs_remove_free_space()
2814 * Since the allocation info of tree-log nodes are not recorded in btrfs_remove_free_space()
2815 * to the extent-tree, calculate_alloc_pointer() failed to in btrfs_remove_free_space()
2821 * Advance the pointer not to overwrite the tree-log nodes. in btrfs_remove_free_space()
2823 if (block_group->start + block_group->alloc_offset < in btrfs_remove_free_space()
2824 offset + bytes) { in btrfs_remove_free_space()
2825 block_group->alloc_offset = in btrfs_remove_free_space()
2826 offset + bytes - block_group->start; in btrfs_remove_free_space()
2831 spin_lock(&ctl->tree_lock); in btrfs_remove_free_space()
2838 info = tree_search_offset(ctl, offset, 0, 0); in btrfs_remove_free_space()
2844 info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset), in btrfs_remove_free_space()
2858 if (!info->bitmap) { in btrfs_remove_free_space()
2860 if (offset == info->offset) { in btrfs_remove_free_space()
2861 u64 to_free = min(bytes, info->bytes); in btrfs_remove_free_space()
2863 info->bytes -= to_free; in btrfs_remove_free_space()
2864 info->offset += to_free; in btrfs_remove_free_space()
2865 if (info->bytes) { in btrfs_remove_free_space()
2872 offset += to_free; in btrfs_remove_free_space()
2873 bytes -= to_free; in btrfs_remove_free_space()
2876 u64 old_end = info->bytes + info->offset; in btrfs_remove_free_space()
2878 info->bytes = offset - info->offset; in btrfs_remove_free_space()
2885 if (old_end < offset + bytes) { in btrfs_remove_free_space()
2886 bytes -= old_end - offset; in btrfs_remove_free_space()
2887 offset = old_end; in btrfs_remove_free_space()
2889 } else if (old_end == offset + bytes) { in btrfs_remove_free_space()
2893 spin_unlock(&ctl->tree_lock); in btrfs_remove_free_space()
2896 offset + bytes, in btrfs_remove_free_space()
2897 old_end - (offset + bytes), in btrfs_remove_free_space()
2898 info->trim_state); in btrfs_remove_free_space()
2904 ret = remove_from_bitmap(ctl, info, &offset, &bytes); in btrfs_remove_free_space()
2905 if (ret == -EAGAIN) { in btrfs_remove_free_space()
2911 spin_unlock(&ctl->tree_lock); in btrfs_remove_free_space()
2919 struct btrfs_fs_info *fs_info = block_group->fs_info; in btrfs_dump_free_space()
2920 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; in btrfs_dump_free_space()
2927 * out the free space after the allocation offset. in btrfs_dump_free_space()
2931 block_group->zone_capacity - block_group->alloc_offset, in btrfs_dump_free_space()
2933 &block_group->runtime_flags)); in btrfs_dump_free_space()
2937 spin_lock(&ctl->tree_lock); in btrfs_dump_free_space()
2938 for (n = rb_first(&ctl->free_space_offset); n; n = rb_next(n)) { in btrfs_dump_free_space()
2940 if (info->bytes >= bytes && !block_group->ro) in btrfs_dump_free_space()
2942 btrfs_crit(fs_info, "entry offset %llu, bytes %llu, bitmap %s", in btrfs_dump_free_space()
2943 info->offset, info->bytes, str_yes_no(info->bitmap)); in btrfs_dump_free_space()
2945 spin_unlock(&ctl->tree_lock); in btrfs_dump_free_space()
2947 str_no_yes(list_empty(&block_group->cluster_list))); in btrfs_dump_free_space()
2956 struct btrfs_fs_info *fs_info = block_group->fs_info; in btrfs_init_free_space_ctl()
2958 spin_lock_init(&ctl->tree_lock); in btrfs_init_free_space_ctl()
2959 ctl->unit = fs_info->sectorsize; in btrfs_init_free_space_ctl()
2960 ctl->start = block_group->start; in btrfs_init_free_space_ctl()
2961 ctl->block_group = block_group; in btrfs_init_free_space_ctl()
2962 ctl->op = &free_space_op; in btrfs_init_free_space_ctl()
2963 ctl->free_space_bytes = RB_ROOT_CACHED; in btrfs_init_free_space_ctl()
2964 INIT_LIST_HEAD(&ctl->trimming_ranges); in btrfs_init_free_space_ctl()
2965 mutex_init(&ctl->cache_writeout_mutex); in btrfs_init_free_space_ctl()
2972 ctl->extents_thresh = (SZ_32K / 2) / sizeof(struct btrfs_free_space); in btrfs_init_free_space_ctl()
2985 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; in __btrfs_return_cluster_to_free_space()
2988 lockdep_assert_held(&ctl->tree_lock); in __btrfs_return_cluster_to_free_space()
2990 spin_lock(&cluster->lock); in __btrfs_return_cluster_to_free_space()
2991 if (cluster->block_group != block_group) { in __btrfs_return_cluster_to_free_space()
2992 spin_unlock(&cluster->lock); in __btrfs_return_cluster_to_free_space()
2996 cluster->block_group = NULL; in __btrfs_return_cluster_to_free_space()
2997 cluster->window_start = 0; in __btrfs_return_cluster_to_free_space()
2998 list_del_init(&cluster->block_group_list); in __btrfs_return_cluster_to_free_space()
3000 node = rb_first(&cluster->root); in __btrfs_return_cluster_to_free_space()
3005 node = rb_next(&entry->offset_index); in __btrfs_return_cluster_to_free_space()
3006 rb_erase(&entry->offset_index, &cluster->root); in __btrfs_return_cluster_to_free_space()
3007 RB_CLEAR_NODE(&entry->offset_index); in __btrfs_return_cluster_to_free_space()
3009 if (!entry->bitmap) { in __btrfs_return_cluster_to_free_space()
3012 ctl->discardable_extents[BTRFS_STAT_CURR]--; in __btrfs_return_cluster_to_free_space()
3013 ctl->discardable_bytes[BTRFS_STAT_CURR] -= in __btrfs_return_cluster_to_free_space()
3014 entry->bytes; in __btrfs_return_cluster_to_free_space()
3022 ctl->discardable_extents[BTRFS_STAT_CURR]++; in __btrfs_return_cluster_to_free_space()
3023 ctl->discardable_bytes[BTRFS_STAT_CURR] += in __btrfs_return_cluster_to_free_space()
3024 entry->bytes; in __btrfs_return_cluster_to_free_space()
3028 rb_add_cached(&entry->bytes_index, &ctl->free_space_bytes, in __btrfs_return_cluster_to_free_space()
3031 cluster->root = RB_ROOT; in __btrfs_return_cluster_to_free_space()
3032 spin_unlock(&cluster->lock); in __btrfs_return_cluster_to_free_space()
3038 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; in btrfs_remove_free_space_cache()
3042 spin_lock(&ctl->tree_lock); in btrfs_remove_free_space_cache()
3043 while ((head = block_group->cluster_list.next) != in btrfs_remove_free_space_cache()
3044 &block_group->cluster_list) { in btrfs_remove_free_space_cache()
3048 WARN_ON(cluster->block_group != block_group); in btrfs_remove_free_space_cache()
3051 cond_resched_lock(&ctl->tree_lock); in btrfs_remove_free_space_cache()
3055 spin_unlock(&ctl->tree_lock); in btrfs_remove_free_space_cache()
3064 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; in btrfs_is_free_space_trimmed()
3069 spin_lock(&ctl->tree_lock); in btrfs_is_free_space_trimmed()
3070 node = rb_first(&ctl->free_space_offset); in btrfs_is_free_space_trimmed()
3083 spin_unlock(&ctl->tree_lock); in btrfs_is_free_space_trimmed()
3088 u64 offset, u64 bytes, u64 empty_size, in btrfs_find_space_for_alloc() argument
3091 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; in btrfs_find_space_for_alloc()
3093 &block_group->fs_info->discard_ctl; in btrfs_find_space_for_alloc()
3100 bool use_bytes_index = (offset == block_group->start); in btrfs_find_space_for_alloc()
3102 ASSERT(!btrfs_is_zoned(block_group->fs_info)); in btrfs_find_space_for_alloc()
3104 spin_lock(&ctl->tree_lock); in btrfs_find_space_for_alloc()
3105 entry = find_free_space(ctl, &offset, &bytes_search, in btrfs_find_space_for_alloc()
3106 block_group->full_stripe_len, max_extent_size, in btrfs_find_space_for_alloc()
3111 ret = offset; in btrfs_find_space_for_alloc()
3112 if (entry->bitmap) { in btrfs_find_space_for_alloc()
3113 bitmap_clear_bits(ctl, entry, offset, bytes, true); in btrfs_find_space_for_alloc()
3116 atomic64_add(bytes, &discard_ctl->discard_bytes_saved); in btrfs_find_space_for_alloc()
3118 if (!entry->bytes) in btrfs_find_space_for_alloc()
3122 align_gap_len = offset - entry->offset; in btrfs_find_space_for_alloc()
3123 align_gap = entry->offset; in btrfs_find_space_for_alloc()
3124 align_gap_trim_state = entry->trim_state; in btrfs_find_space_for_alloc()
3127 atomic64_add(bytes, &discard_ctl->discard_bytes_saved); in btrfs_find_space_for_alloc()
3129 entry->offset = offset + bytes; in btrfs_find_space_for_alloc()
3130 WARN_ON(entry->bytes < bytes + align_gap_len); in btrfs_find_space_for_alloc()
3132 entry->bytes -= bytes + align_gap_len; in btrfs_find_space_for_alloc()
3133 if (!entry->bytes) in btrfs_find_space_for_alloc()
3140 spin_unlock(&ctl->tree_lock); in btrfs_find_space_for_alloc()
3163 spin_lock(&cluster->lock); in btrfs_return_cluster_to_free_space()
3165 block_group = cluster->block_group; in btrfs_return_cluster_to_free_space()
3167 spin_unlock(&cluster->lock); in btrfs_return_cluster_to_free_space()
3170 } else if (cluster->block_group != block_group) { in btrfs_return_cluster_to_free_space()
3172 spin_unlock(&cluster->lock); in btrfs_return_cluster_to_free_space()
3176 spin_unlock(&cluster->lock); in btrfs_return_cluster_to_free_space()
3178 ctl = block_group->free_space_ctl; in btrfs_return_cluster_to_free_space()
3181 spin_lock(&ctl->tree_lock); in btrfs_return_cluster_to_free_space()
3183 spin_unlock(&ctl->tree_lock); in btrfs_return_cluster_to_free_space()
3185 btrfs_discard_queue_work(&block_group->fs_info->discard_ctl, block_group); in btrfs_return_cluster_to_free_space()
3197 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; in btrfs_alloc_from_bitmap()
3199 u64 search_start = cluster->window_start; in btrfs_alloc_from_bitmap()
3221 * if it couldn't find anything suitably large, or a logical disk offset
3228 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; in btrfs_alloc_from_cluster()
3230 &block_group->fs_info->discard_ctl; in btrfs_alloc_from_cluster()
3235 ASSERT(!btrfs_is_zoned(block_group->fs_info)); in btrfs_alloc_from_cluster()
3237 spin_lock(&cluster->lock); in btrfs_alloc_from_cluster()
3238 if (bytes > cluster->max_size) in btrfs_alloc_from_cluster()
3241 if (cluster->block_group != block_group) in btrfs_alloc_from_cluster()
3244 node = rb_first(&cluster->root); in btrfs_alloc_from_cluster()
3250 if (entry->bytes < bytes) in btrfs_alloc_from_cluster()
3254 if (entry->bytes < bytes || in btrfs_alloc_from_cluster()
3255 (!entry->bitmap && entry->offset < min_start)) { in btrfs_alloc_from_cluster()
3256 node = rb_next(&entry->offset_index); in btrfs_alloc_from_cluster()
3264 if (entry->bitmap) { in btrfs_alloc_from_cluster()
3267 cluster->window_start, in btrfs_alloc_from_cluster()
3270 node = rb_next(&entry->offset_index); in btrfs_alloc_from_cluster()
3277 cluster->window_start += bytes; in btrfs_alloc_from_cluster()
3279 ret = entry->offset; in btrfs_alloc_from_cluster()
3281 entry->offset += bytes; in btrfs_alloc_from_cluster()
3282 entry->bytes -= bytes; in btrfs_alloc_from_cluster()
3288 spin_unlock(&cluster->lock); in btrfs_alloc_from_cluster()
3293 spin_lock(&ctl->tree_lock); in btrfs_alloc_from_cluster()
3296 atomic64_add(bytes, &discard_ctl->discard_bytes_saved); in btrfs_alloc_from_cluster()
3298 ctl->free_space -= bytes; in btrfs_alloc_from_cluster()
3299 if (!entry->bitmap && !btrfs_free_space_trimmed(entry)) in btrfs_alloc_from_cluster()
3300 ctl->discardable_bytes[BTRFS_STAT_CURR] -= bytes; in btrfs_alloc_from_cluster()
3302 spin_lock(&cluster->lock); in btrfs_alloc_from_cluster()
3303 if (entry->bytes == 0) { in btrfs_alloc_from_cluster()
3304 rb_erase(&entry->offset_index, &cluster->root); in btrfs_alloc_from_cluster()
3305 ctl->free_extents--; in btrfs_alloc_from_cluster()
3306 if (entry->bitmap) { in btrfs_alloc_from_cluster()
3308 entry->bitmap); in btrfs_alloc_from_cluster()
3309 ctl->total_bitmaps--; in btrfs_alloc_from_cluster()
3312 ctl->discardable_extents[BTRFS_STAT_CURR]--; in btrfs_alloc_from_cluster()
3317 spin_unlock(&cluster->lock); in btrfs_alloc_from_cluster()
3318 spin_unlock(&ctl->tree_lock); in btrfs_alloc_from_cluster()
3326 u64 offset, u64 bytes, in btrfs_bitmap_cluster() argument
3329 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; in btrfs_bitmap_cluster()
3340 lockdep_assert_held(&ctl->tree_lock); in btrfs_bitmap_cluster()
3342 i = offset_to_bit(entry->offset, ctl->unit, in btrfs_bitmap_cluster()
3343 max_t(u64, offset, entry->offset)); in btrfs_bitmap_cluster()
3344 want_bits = bytes_to_bits(bytes, ctl->unit); in btrfs_bitmap_cluster()
3345 min_bits = bytes_to_bits(min_bytes, ctl->unit); in btrfs_bitmap_cluster()
3351 if (entry->max_extent_size && in btrfs_bitmap_cluster()
3352 entry->max_extent_size < cont1_bytes) in btrfs_bitmap_cluster()
3353 return -ENOSPC; in btrfs_bitmap_cluster()
3356 for_each_set_bit_from(i, entry->bitmap, BITS_PER_BITMAP) { in btrfs_bitmap_cluster()
3357 next_zero = find_next_zero_bit(entry->bitmap, in btrfs_bitmap_cluster()
3359 if (next_zero - i >= min_bits) { in btrfs_bitmap_cluster()
3360 found_bits = next_zero - i; in btrfs_bitmap_cluster()
3365 if (next_zero - i > max_bits) in btrfs_bitmap_cluster()
3366 max_bits = next_zero - i; in btrfs_bitmap_cluster()
3371 entry->max_extent_size = (u64)max_bits * ctl->unit; in btrfs_bitmap_cluster()
3372 return -ENOSPC; in btrfs_bitmap_cluster()
3377 cluster->max_size = 0; in btrfs_bitmap_cluster()
3382 if (cluster->max_size < found_bits * ctl->unit) in btrfs_bitmap_cluster()
3383 cluster->max_size = found_bits * ctl->unit; in btrfs_bitmap_cluster()
3385 if (total_found < want_bits || cluster->max_size < cont1_bytes) { in btrfs_bitmap_cluster()
3390 cluster->window_start = start * ctl->unit + entry->offset; in btrfs_bitmap_cluster()
3391 rb_erase(&entry->offset_index, &ctl->free_space_offset); in btrfs_bitmap_cluster()
3392 rb_erase_cached(&entry->bytes_index, &ctl->free_space_bytes); in btrfs_bitmap_cluster()
3396 * manipulate the bitmap so that we know we need to remove and re-insert in btrfs_bitmap_cluster()
3401 RB_CLEAR_NODE(&entry->bytes_index); in btrfs_bitmap_cluster()
3404 ASSERT(!ret); /* -EEXIST; Logic error */ in btrfs_bitmap_cluster()
3407 total_found * ctl->unit, 1); in btrfs_bitmap_cluster()
3419 struct list_head *bitmaps, u64 offset, u64 bytes, in setup_cluster_no_bitmap() argument
3422 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; in setup_cluster_no_bitmap()
3431 lockdep_assert_held(&ctl->tree_lock); in setup_cluster_no_bitmap()
3433 entry = tree_search_offset(ctl, offset, 0, 1); in setup_cluster_no_bitmap()
3435 return -ENOSPC; in setup_cluster_no_bitmap()
3441 while (entry->bitmap || entry->bytes < min_bytes) { in setup_cluster_no_bitmap()
3442 if (entry->bitmap && list_empty(&entry->list)) in setup_cluster_no_bitmap()
3443 list_add_tail(&entry->list, bitmaps); in setup_cluster_no_bitmap()
3444 node = rb_next(&entry->offset_index); in setup_cluster_no_bitmap()
3446 return -ENOSPC; in setup_cluster_no_bitmap()
3450 window_free = entry->bytes; in setup_cluster_no_bitmap()
3451 max_extent = entry->bytes; in setup_cluster_no_bitmap()
3455 for (node = rb_next(&entry->offset_index); node; in setup_cluster_no_bitmap()
3456 node = rb_next(&entry->offset_index)) { in setup_cluster_no_bitmap()
3459 if (entry->bitmap) { in setup_cluster_no_bitmap()
3460 if (list_empty(&entry->list)) in setup_cluster_no_bitmap()
3461 list_add_tail(&entry->list, bitmaps); in setup_cluster_no_bitmap()
3465 if (entry->bytes < min_bytes) in setup_cluster_no_bitmap()
3469 window_free += entry->bytes; in setup_cluster_no_bitmap()
3470 if (entry->bytes > max_extent) in setup_cluster_no_bitmap()
3471 max_extent = entry->bytes; in setup_cluster_no_bitmap()
3475 return -ENOSPC; in setup_cluster_no_bitmap()
3477 cluster->window_start = first->offset; in setup_cluster_no_bitmap()
3479 node = &first->offset_index; in setup_cluster_no_bitmap()
3489 node = rb_next(&entry->offset_index); in setup_cluster_no_bitmap()
3490 if (entry->bitmap || entry->bytes < min_bytes) in setup_cluster_no_bitmap()
3493 rb_erase(&entry->offset_index, &ctl->free_space_offset); in setup_cluster_no_bitmap()
3494 rb_erase_cached(&entry->bytes_index, &ctl->free_space_bytes); in setup_cluster_no_bitmap()
3496 total_size += entry->bytes; in setup_cluster_no_bitmap()
3497 ASSERT(!ret); /* -EEXIST; Logic error */ in setup_cluster_no_bitmap()
3500 cluster->max_size = max_extent; in setup_cluster_no_bitmap()
3512 struct list_head *bitmaps, u64 offset, u64 bytes, in setup_cluster_bitmap() argument
3515 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; in setup_cluster_bitmap()
3517 int ret = -ENOSPC; in setup_cluster_bitmap()
3518 u64 bitmap_offset = offset_to_bitmap(ctl, offset); in setup_cluster_bitmap()
3520 if (ctl->total_bitmaps == 0) in setup_cluster_bitmap()
3521 return -ENOSPC; in setup_cluster_bitmap()
3524 * The bitmap that covers offset won't be in the list unless offset in setup_cluster_bitmap()
3525 * is just its start offset. in setup_cluster_bitmap()
3530 if (!entry || entry->offset != bitmap_offset) { in setup_cluster_bitmap()
3532 if (entry && list_empty(&entry->list)) in setup_cluster_bitmap()
3533 list_add(&entry->list, bitmaps); in setup_cluster_bitmap()
3537 if (entry->bytes < bytes) in setup_cluster_bitmap()
3539 ret = btrfs_bitmap_cluster(block_group, entry, cluster, offset, in setup_cluster_bitmap()
3547 * starting after offset, so no more search is required. in setup_cluster_bitmap()
3549 return -ENOSPC; in setup_cluster_bitmap()
3558 * it returns -enospc
3562 u64 offset, u64 bytes, u64 empty_size) in btrfs_find_space_cluster() argument
3564 struct btrfs_fs_info *fs_info = block_group->fs_info; in btrfs_find_space_cluster()
3565 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; in btrfs_find_space_cluster()
3581 } else if (block_group->flags & BTRFS_BLOCK_GROUP_METADATA) { in btrfs_find_space_cluster()
3583 min_bytes = fs_info->sectorsize; in btrfs_find_space_cluster()
3586 min_bytes = fs_info->sectorsize; in btrfs_find_space_cluster()
3589 spin_lock(&ctl->tree_lock); in btrfs_find_space_cluster()
3595 if (ctl->free_space < bytes) { in btrfs_find_space_cluster()
3596 spin_unlock(&ctl->tree_lock); in btrfs_find_space_cluster()
3597 return -ENOSPC; in btrfs_find_space_cluster()
3600 spin_lock(&cluster->lock); in btrfs_find_space_cluster()
3603 if (cluster->block_group) { in btrfs_find_space_cluster()
3608 trace_btrfs_find_cluster(block_group, offset, bytes, empty_size, in btrfs_find_space_cluster()
3611 ret = setup_cluster_no_bitmap(block_group, cluster, &bitmaps, offset, in btrfs_find_space_cluster()
3616 offset, bytes + empty_size, in btrfs_find_space_cluster()
3621 list_del_init(&entry->list); in btrfs_find_space_cluster()
3625 list_add_tail(&cluster->block_group_list, in btrfs_find_space_cluster()
3626 &block_group->cluster_list); in btrfs_find_space_cluster()
3627 cluster->block_group = block_group; in btrfs_find_space_cluster()
3632 spin_unlock(&cluster->lock); in btrfs_find_space_cluster()
3633 spin_unlock(&ctl->tree_lock); in btrfs_find_space_cluster()
3643 spin_lock_init(&cluster->lock); in btrfs_init_free_cluster()
3644 spin_lock_init(&cluster->refill_lock); in btrfs_init_free_cluster()
3645 cluster->root = RB_ROOT; in btrfs_init_free_cluster()
3646 cluster->max_size = 0; in btrfs_init_free_cluster()
3647 cluster->fragmented = false; in btrfs_init_free_cluster()
3648 INIT_LIST_HEAD(&cluster->block_group_list); in btrfs_init_free_cluster()
3649 cluster->block_group = NULL; in btrfs_init_free_cluster()
3658 struct btrfs_space_info *space_info = block_group->space_info; in do_trimming()
3659 struct btrfs_fs_info *fs_info = block_group->fs_info; in do_trimming()
3660 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; in do_trimming()
3668 spin_lock(&space_info->lock); in do_trimming()
3669 spin_lock(&block_group->lock); in do_trimming()
3670 if (!block_group->ro) { in do_trimming()
3671 block_group->reserved += reserved_bytes; in do_trimming()
3672 space_info->bytes_reserved += reserved_bytes; in do_trimming()
3675 spin_unlock(&block_group->lock); in do_trimming()
3676 spin_unlock(&space_info->lock); in do_trimming()
3684 mutex_lock(&ctl->cache_writeout_mutex); in do_trimming()
3687 start - reserved_start, in do_trimming()
3690 __btrfs_add_free_space(block_group, end, reserved_end - end, in do_trimming()
3693 list_del(&trim_entry->list); in do_trimming()
3694 mutex_unlock(&ctl->cache_writeout_mutex); in do_trimming()
3697 spin_lock(&space_info->lock); in do_trimming()
3698 spin_lock(&block_group->lock); in do_trimming()
3699 if (block_group->ro) in do_trimming()
3700 space_info->bytes_readonly += reserved_bytes; in do_trimming()
3701 block_group->reserved -= reserved_bytes; in do_trimming()
3702 space_info->bytes_reserved -= reserved_bytes; in do_trimming()
3703 spin_unlock(&block_group->lock); in do_trimming()
3704 spin_unlock(&space_info->lock); in do_trimming()
3718 &block_group->fs_info->discard_ctl; in trim_no_bitmap()
3719 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; in trim_no_bitmap()
3727 const u64 max_discard_size = READ_ONCE(discard_ctl->max_discard_size); in trim_no_bitmap()
3732 mutex_lock(&ctl->cache_writeout_mutex); in trim_no_bitmap()
3733 spin_lock(&ctl->tree_lock); in trim_no_bitmap()
3735 if (ctl->free_space < minlen) in trim_no_bitmap()
3743 while (entry->bitmap || in trim_no_bitmap()
3745 node = rb_next(&entry->offset_index); in trim_no_bitmap()
3752 if (entry->offset >= end) in trim_no_bitmap()
3755 extent_start = entry->offset; in trim_no_bitmap()
3756 extent_bytes = entry->bytes; in trim_no_bitmap()
3757 extent_trim_state = entry->trim_state; in trim_no_bitmap()
3759 start = entry->offset; in trim_no_bitmap()
3760 bytes = entry->bytes; in trim_no_bitmap()
3762 spin_unlock(&ctl->tree_lock); in trim_no_bitmap()
3763 mutex_unlock(&ctl->cache_writeout_mutex); in trim_no_bitmap()
3777 entry->offset += max_discard_size; in trim_no_bitmap()
3778 entry->bytes -= max_discard_size; in trim_no_bitmap()
3785 bytes = min(extent_start + extent_bytes, end) - start; in trim_no_bitmap()
3787 spin_unlock(&ctl->tree_lock); in trim_no_bitmap()
3788 mutex_unlock(&ctl->cache_writeout_mutex); in trim_no_bitmap()
3796 spin_unlock(&ctl->tree_lock); in trim_no_bitmap()
3799 list_add_tail(&trim_entry.list, &ctl->trimming_ranges); in trim_no_bitmap()
3800 mutex_unlock(&ctl->cache_writeout_mutex); in trim_no_bitmap()
3806 block_group->discard_cursor = start + bytes; in trim_no_bitmap()
3811 block_group->discard_cursor = start; in trim_no_bitmap()
3816 ret = -ERESTARTSYS; in trim_no_bitmap()
3826 block_group->discard_cursor = btrfs_block_group_end(block_group); in trim_no_bitmap()
3827 spin_unlock(&ctl->tree_lock); in trim_no_bitmap()
3828 mutex_unlock(&ctl->cache_writeout_mutex); in trim_no_bitmap()
3847 static void reset_trimming_bitmap(struct btrfs_free_space_ctl *ctl, u64 offset) in reset_trimming_bitmap() argument
3851 spin_lock(&ctl->tree_lock); in reset_trimming_bitmap()
3852 entry = tree_search_offset(ctl, offset, 1, 0); in reset_trimming_bitmap()
3855 ctl->discardable_extents[BTRFS_STAT_CURR] += in reset_trimming_bitmap()
3856 entry->bitmap_extents; in reset_trimming_bitmap()
3857 ctl->discardable_bytes[BTRFS_STAT_CURR] += entry->bytes; in reset_trimming_bitmap()
3859 entry->trim_state = BTRFS_TRIM_STATE_UNTRIMMED; in reset_trimming_bitmap()
3862 spin_unlock(&ctl->tree_lock); in reset_trimming_bitmap()
3869 entry->trim_state = BTRFS_TRIM_STATE_TRIMMED; in end_trimming_bitmap()
3870 ctl->discardable_extents[BTRFS_STAT_CURR] -= in end_trimming_bitmap()
3871 entry->bitmap_extents; in end_trimming_bitmap()
3872 ctl->discardable_bytes[BTRFS_STAT_CURR] -= entry->bytes; in end_trimming_bitmap()
3884 &block_group->fs_info->discard_ctl; in trim_bitmaps()
3885 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; in trim_bitmaps()
3890 u64 offset = offset_to_bitmap(ctl, start); in trim_bitmaps() local
3891 const u64 max_discard_size = READ_ONCE(discard_ctl->max_discard_size); in trim_bitmaps()
3893 while (offset < end) { in trim_bitmaps()
3897 mutex_lock(&ctl->cache_writeout_mutex); in trim_bitmaps()
3898 spin_lock(&ctl->tree_lock); in trim_bitmaps()
3900 if (ctl->free_space < minlen) { in trim_bitmaps()
3901 block_group->discard_cursor = in trim_bitmaps()
3903 spin_unlock(&ctl->tree_lock); in trim_bitmaps()
3904 mutex_unlock(&ctl->cache_writeout_mutex); in trim_bitmaps()
3908 entry = tree_search_offset(ctl, offset, 1, 0); in trim_bitmaps()
3917 if (!entry || (async && minlen && start == offset && in trim_bitmaps()
3919 spin_unlock(&ctl->tree_lock); in trim_bitmaps()
3920 mutex_unlock(&ctl->cache_writeout_mutex); in trim_bitmaps()
3931 if (start == offset) in trim_bitmaps()
3932 entry->trim_state = BTRFS_TRIM_STATE_TRIMMING; in trim_bitmaps()
3944 entry->trim_state = BTRFS_TRIM_STATE_UNTRIMMED; in trim_bitmaps()
3945 spin_unlock(&ctl->tree_lock); in trim_bitmaps()
3946 mutex_unlock(&ctl->cache_writeout_mutex); in trim_bitmaps()
3956 spin_unlock(&ctl->tree_lock); in trim_bitmaps()
3957 mutex_unlock(&ctl->cache_writeout_mutex); in trim_bitmaps()
3961 bytes = min(bytes, end - start); in trim_bitmaps()
3963 spin_unlock(&ctl->tree_lock); in trim_bitmaps()
3964 mutex_unlock(&ctl->cache_writeout_mutex); in trim_bitmaps()
3980 if (entry->bytes == 0) in trim_bitmaps()
3983 spin_unlock(&ctl->tree_lock); in trim_bitmaps()
3986 list_add_tail(&trim_entry.list, &ctl->trimming_ranges); in trim_bitmaps()
3987 mutex_unlock(&ctl->cache_writeout_mutex); in trim_bitmaps()
3992 reset_trimming_bitmap(ctl, offset); in trim_bitmaps()
3993 block_group->discard_cursor = in trim_bitmaps()
3999 offset += BITS_PER_BITMAP * ctl->unit; in trim_bitmaps()
4000 start = offset; in trim_bitmaps()
4004 block_group->discard_cursor = start; in trim_bitmaps()
4007 if (start != offset) in trim_bitmaps()
4008 reset_trimming_bitmap(ctl, offset); in trim_bitmaps()
4009 ret = -ERESTARTSYS; in trim_bitmaps()
4016 if (offset >= end) in trim_bitmaps()
4017 block_group->discard_cursor = end; in trim_bitmaps()
4026 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; in btrfs_trim_block_group()
4030 ASSERT(!btrfs_is_zoned(block_group->fs_info)); in btrfs_trim_block_group()
4034 spin_lock(&block_group->lock); in btrfs_trim_block_group()
4035 if (test_bit(BLOCK_GROUP_FLAG_REMOVED, &block_group->runtime_flags)) { in btrfs_trim_block_group()
4036 spin_unlock(&block_group->lock); in btrfs_trim_block_group()
4040 spin_unlock(&block_group->lock); in btrfs_trim_block_group()
4047 div64_u64_rem(end, BITS_PER_BITMAP * ctl->unit, &rem); in btrfs_trim_block_group()
4064 spin_lock(&block_group->lock); in btrfs_trim_block_group_extents()
4065 if (test_bit(BLOCK_GROUP_FLAG_REMOVED, &block_group->runtime_flags)) { in btrfs_trim_block_group_extents()
4066 spin_unlock(&block_group->lock); in btrfs_trim_block_group_extents()
4070 spin_unlock(&block_group->lock); in btrfs_trim_block_group_extents()
4086 spin_lock(&block_group->lock); in btrfs_trim_block_group_bitmaps()
4087 if (test_bit(BLOCK_GROUP_FLAG_REMOVED, &block_group->runtime_flags)) { in btrfs_trim_block_group_bitmaps()
4088 spin_unlock(&block_group->lock); in btrfs_trim_block_group_bitmaps()
4092 spin_unlock(&block_group->lock); in btrfs_trim_block_group_bitmaps()
4104 return btrfs_super_cache_generation(fs_info->super_copy); in btrfs_free_space_cache_v1_active()
4116 node = rb_first_cached(&fs_info->block_group_cache_tree); in cleanup_free_space_cache_v1()
4135 * super_copy->cache_generation based on SPACE_CACHE and in btrfs_set_free_space_cache_v1_active()
4141 trans = btrfs_start_transaction(fs_info->tree_root, 0); in btrfs_set_free_space_cache_v1_active()
4146 set_bit(BTRFS_FS_CLEANUP_SPACE_CACHE_V1, &fs_info->flags); in btrfs_set_free_space_cache_v1_active()
4157 clear_bit(BTRFS_FS_CLEANUP_SPACE_CACHE_V1, &fs_info->flags); in btrfs_set_free_space_cache_v1_active()
4166 return -ENOMEM; in btrfs_free_space_init()
4173 return -ENOMEM; in btrfs_free_space_init()
4193 u64 offset, u64 bytes, bool bitmap) in test_add_free_space_entry() argument
4195 struct btrfs_free_space_ctl *ctl = cache->free_space_ctl; in test_add_free_space_entry()
4206 return -ENOMEM; in test_add_free_space_entry()
4210 spin_lock(&ctl->tree_lock); in test_add_free_space_entry()
4211 info->offset = offset; in test_add_free_space_entry()
4212 info->bytes = bytes; in test_add_free_space_entry()
4213 info->max_extent_size = 0; in test_add_free_space_entry()
4215 spin_unlock(&ctl->tree_lock); in test_add_free_space_entry()
4225 return -ENOMEM; in test_add_free_space_entry()
4229 spin_lock(&ctl->tree_lock); in test_add_free_space_entry()
4230 bitmap_info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset), in test_add_free_space_entry()
4233 info->bitmap = map; in test_add_free_space_entry()
4235 add_new_bitmap(ctl, info, offset); in test_add_free_space_entry()
4240 bytes_added = add_bytes_to_bitmap(ctl, bitmap_info, offset, bytes, in test_add_free_space_entry()
4243 bytes -= bytes_added; in test_add_free_space_entry()
4244 offset += bytes_added; in test_add_free_space_entry()
4245 spin_unlock(&ctl->tree_lock); in test_add_free_space_entry()
4263 u64 offset, u64 bytes) in test_check_exists() argument
4265 struct btrfs_free_space_ctl *ctl = cache->free_space_ctl; in test_check_exists()
4269 spin_lock(&ctl->tree_lock); in test_check_exists()
4270 info = tree_search_offset(ctl, offset, 0, 0); in test_check_exists()
4272 info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset), in test_check_exists()
4279 if (info->bitmap) { in test_check_exists()
4284 bit_off = offset; in test_check_exists()
4285 bit_bytes = ctl->unit; in test_check_exists()
4288 if (bit_off == offset) { in test_check_exists()
4291 } else if (bit_off > offset && in test_check_exists()
4292 offset + bytes > bit_off) { in test_check_exists()
4298 n = rb_prev(&info->offset_index); in test_check_exists()
4302 if (tmp->offset + tmp->bytes < offset) in test_check_exists()
4304 if (offset + bytes < tmp->offset) { in test_check_exists()
4305 n = rb_prev(&tmp->offset_index); in test_check_exists()
4312 n = rb_next(&info->offset_index); in test_check_exists()
4316 if (offset + bytes < tmp->offset) in test_check_exists()
4318 if (tmp->offset + tmp->bytes < offset) { in test_check_exists()
4319 n = rb_next(&tmp->offset_index); in test_check_exists()
4330 if (info->offset == offset) { in test_check_exists()
4335 if (offset > info->offset && offset < info->offset + info->bytes) in test_check_exists()
4338 spin_unlock(&ctl->tree_lock); in test_check_exists()