Lines Matching +full:cache +full:- +full:block +full:- +full:size
1 /* SPDX-License-Identifier: GPL-2.0 */
16 #include "free-space-cache.h"
33 /* 0 < size <= 128K */
35 /* 128K < size <= 8M */
37 /* 8M < size < BG_LENGTH */
75 /* Block group flags set at runtime */
84 /* Does the block group need to be added to the free space tree? */
86 /* Indicate that the block group is placed on a sequential zone */
89 * Indicate that block group is in the list of new block groups of a
132 * The last committed used bytes of this block group, if the above @used
133 * is still the same as @commit_used, we don't need to update block
134 * group item of this block group.
138 * If the free space extent count exceeds this number, convert the block
145 * block group back to extents.
164 /* Cache tracking stuff */
170 /* Free space cache stuff */
173 /* Block group cache stuff */
176 /* For block groups in the same raid type */
182 * List of struct btrfs_free_clusters for this block group.
197 /* For read-only block groups */
201 * When non-zero it means the block group's logical address and its
202 * device extents can not be reused for future block group allocations
204 * reused while some task is still using the block group after it was
205 * deleted - we want to make sure they can only be reused for new block
206 * groups after that task is done with the deleted block group.
217 /* For dirty block groups */
227 * block group's range is created (after it's added to its inode's
239 * This is to prevent races between block group relocation and nocow
248 * Number of extents in this block group used for swap files.
254 * Allocation offset for the block group to implement sequential
271 return (block_group->start + block_group->length); in btrfs_block_group_end()
276 lockdep_assert_held(&bg->lock); in btrfs_is_block_group_used()
278 return (bg->used > 0 || bg->reserved > 0 || bg->pinned > 0); in btrfs_is_block_group_used()
285 * efficiency, so only proper data block groups are considered. in btrfs_is_block_group_data_only()
287 return (block_group->flags & BTRFS_BLOCK_GROUP_DATA) && in btrfs_is_block_group_data_only()
288 !(block_group->flags & BTRFS_BLOCK_GROUP_METADATA); in btrfs_is_block_group_data_only()
300 struct btrfs_block_group *cache);
301 void btrfs_get_block_group(struct btrfs_block_group *cache);
302 void btrfs_put_block_group(struct btrfs_block_group *cache);
310 void btrfs_wait_block_group_cache_progress(struct btrfs_block_group *cache,
312 int btrfs_cache_block_group(struct btrfs_block_group *cache, bool wait);
314 struct btrfs_block_group *cache);
330 u64 chunk_offset, u64 size);
332 int btrfs_inc_block_group_ro(struct btrfs_block_group *cache,
334 void btrfs_dec_block_group_ro(struct btrfs_block_group *cache);
340 int btrfs_add_reserved_bytes(struct btrfs_block_group *cache,
343 void btrfs_free_reserved_bytes(struct btrfs_block_group *cache,
372 static inline int btrfs_block_group_done(const struct btrfs_block_group *cache) in btrfs_block_group_done() argument
375 return cache->cached == BTRFS_CACHE_FINISHED || in btrfs_block_group_done()
376 cache->cached == BTRFS_CACHE_ERROR; in btrfs_block_group_done()
379 void btrfs_freeze_block_group(struct btrfs_block_group *cache);
380 void btrfs_unfreeze_block_group(struct btrfs_block_group *cache);
385 enum btrfs_block_group_size_class btrfs_calc_block_group_size_class(u64 size);