1 /* SPDX-License-Identifier: GPL-2.0 */ 2 3 #ifndef BTRFS_SPACE_INFO_H 4 #define BTRFS_SPACE_INFO_H 5 6 #include <trace/events/btrfs.h> 7 #include <linux/spinlock.h> 8 #include <linux/list.h> 9 #include <linux/kobject.h> 10 #include <linux/lockdep.h> 11 #include <linux/wait.h> 12 #include <linux/rwsem.h> 13 #include "volumes.h" 14 15 struct btrfs_fs_info; 16 struct btrfs_block_group; 17 18 /* 19 * Different levels for to flush space when doing space reservations. 20 * 21 * The higher the level, the more methods we try to reclaim space. 22 */ 23 enum btrfs_reserve_flush_enum { 24 /* If we are in the transaction, we can't flush anything.*/ 25 BTRFS_RESERVE_NO_FLUSH, 26 27 /* 28 * Flush space by: 29 * - Running delayed inode items 30 * - Allocating a new chunk 31 */ 32 BTRFS_RESERVE_FLUSH_LIMIT, 33 34 /* 35 * Flush space by: 36 * - Running delayed inode items 37 * - Running delayed refs 38 * - Running delalloc and waiting for ordered extents 39 * - Allocating a new chunk 40 * - Committing transaction 41 */ 42 BTRFS_RESERVE_FLUSH_EVICT, 43 44 /* 45 * Flush space by above mentioned methods and by: 46 * - Running delayed iputs 47 * - Committing transaction 48 * 49 * Can be interrupted by a fatal signal. 50 */ 51 BTRFS_RESERVE_FLUSH_DATA, 52 BTRFS_RESERVE_FLUSH_FREE_SPACE_INODE, 53 BTRFS_RESERVE_FLUSH_ALL, 54 55 /* 56 * Pretty much the same as FLUSH_ALL, but can also steal space from 57 * global rsv. 58 * 59 * Can be interrupted by a fatal signal. 60 */ 61 BTRFS_RESERVE_FLUSH_ALL_STEAL, 62 63 /* 64 * This is for btrfs_use_block_rsv only. We have exhausted our block 65 * rsv and our global block rsv. This can happen for things like 66 * delalloc where we are overwriting a lot of extents with a single 67 * extent and didn't reserve enough space. Alternatively it can happen 68 * with delalloc where we reserve 1 extents worth for a large extent but 69 * fragmentation leads to multiple extents being created. This will 70 * give us the reservation in the case of 71 * 72 * if (num_bytes < (space_info->total_bytes - 73 * btrfs_space_info_used(space_info, false)) 74 * 75 * Which ignores bytes_may_use. This is potentially dangerous, but our 76 * reservation system is generally pessimistic so is able to absorb this 77 * style of mistake. 78 */ 79 BTRFS_RESERVE_FLUSH_EMERGENCY, 80 }; 81 82 enum btrfs_flush_state { 83 FLUSH_DELAYED_ITEMS_NR = 1, 84 FLUSH_DELAYED_ITEMS = 2, 85 FLUSH_DELAYED_REFS_NR = 3, 86 FLUSH_DELAYED_REFS = 4, 87 FLUSH_DELALLOC = 5, 88 FLUSH_DELALLOC_WAIT = 6, 89 FLUSH_DELALLOC_FULL = 7, 90 ALLOC_CHUNK = 8, 91 ALLOC_CHUNK_FORCE = 9, 92 RUN_DELAYED_IPUTS = 10, 93 COMMIT_TRANS = 11, 94 }; 95 96 struct btrfs_space_info { 97 spinlock_t lock; 98 99 u64 total_bytes; /* total bytes in the space, 100 this doesn't take mirrors into account */ 101 u64 bytes_used; /* total bytes used, 102 this doesn't take mirrors into account */ 103 u64 bytes_pinned; /* total bytes pinned, will be freed when the 104 transaction finishes */ 105 u64 bytes_reserved; /* total bytes the allocator has reserved for 106 current allocations */ 107 u64 bytes_may_use; /* number of bytes that may be used for 108 delalloc/allocations */ 109 u64 bytes_readonly; /* total bytes that are read only */ 110 u64 bytes_zone_unusable; /* total bytes that are unusable until 111 resetting the device zone */ 112 113 u64 max_extent_size; /* This will hold the maximum extent size of 114 the space info if we had an ENOSPC in the 115 allocator. */ 116 /* Chunk size in bytes */ 117 u64 chunk_size; 118 119 /* 120 * Once a block group drops below this threshold (percents) we'll 121 * schedule it for reclaim. 122 */ 123 int bg_reclaim_threshold; 124 125 int clamp; /* Used to scale our threshold for preemptive 126 flushing. The value is >> clamp, so turns 127 out to be a 2^clamp divisor. */ 128 129 unsigned int full:1; /* indicates that we cannot allocate any more 130 chunks for this space */ 131 unsigned int chunk_alloc:1; /* set if we are allocating a chunk */ 132 133 unsigned int flush:1; /* set if we are trying to make space */ 134 135 unsigned int force_alloc; /* set if we need to force a chunk 136 alloc for this space */ 137 138 u64 disk_used; /* total bytes used on disk */ 139 u64 disk_total; /* total bytes on disk, takes mirrors into 140 account */ 141 142 u64 flags; 143 144 struct list_head list; 145 /* Protected by the spinlock 'lock'. */ 146 struct list_head ro_bgs; 147 struct list_head priority_tickets; 148 struct list_head tickets; 149 150 /* 151 * Size of space that needs to be reclaimed in order to satisfy pending 152 * tickets 153 */ 154 u64 reclaim_size; 155 156 /* 157 * tickets_id just indicates the next ticket will be handled, so note 158 * it's not stored per ticket. 159 */ 160 u64 tickets_id; 161 162 struct rw_semaphore groups_sem; 163 /* for block groups in our same type */ 164 struct list_head block_groups[BTRFS_NR_RAID_TYPES]; 165 166 struct kobject kobj; 167 struct kobject *block_group_kobjs[BTRFS_NR_RAID_TYPES]; 168 }; 169 170 struct reserve_ticket { 171 u64 bytes; 172 int error; 173 bool steal; 174 struct list_head list; 175 wait_queue_head_t wait; 176 }; 177 178 static inline bool btrfs_mixed_space_info(struct btrfs_space_info *space_info) 179 { 180 return ((space_info->flags & BTRFS_BLOCK_GROUP_METADATA) && 181 (space_info->flags & BTRFS_BLOCK_GROUP_DATA)); 182 } 183 184 /* 185 * 186 * Declare a helper function to detect underflow of various space info members 187 */ 188 #define DECLARE_SPACE_INFO_UPDATE(name, trace_name) \ 189 static inline void \ 190 btrfs_space_info_update_##name(struct btrfs_fs_info *fs_info, \ 191 struct btrfs_space_info *sinfo, \ 192 s64 bytes) \ 193 { \ 194 const u64 abs_bytes = (bytes < 0) ? -bytes : bytes; \ 195 lockdep_assert_held(&sinfo->lock); \ 196 trace_update_##name(fs_info, sinfo, sinfo->name, bytes); \ 197 trace_btrfs_space_reservation(fs_info, trace_name, \ 198 sinfo->flags, abs_bytes, \ 199 bytes > 0); \ 200 if (bytes < 0 && sinfo->name < -bytes) { \ 201 WARN_ON(1); \ 202 sinfo->name = 0; \ 203 return; \ 204 } \ 205 sinfo->name += bytes; \ 206 } 207 208 DECLARE_SPACE_INFO_UPDATE(bytes_may_use, "space_info"); 209 DECLARE_SPACE_INFO_UPDATE(bytes_pinned, "pinned"); 210 211 int btrfs_init_space_info(struct btrfs_fs_info *fs_info); 212 void btrfs_add_bg_to_space_info(struct btrfs_fs_info *info, 213 struct btrfs_block_group *block_group); 214 void btrfs_update_space_info_chunk_size(struct btrfs_space_info *space_info, 215 u64 chunk_size); 216 struct btrfs_space_info *btrfs_find_space_info(struct btrfs_fs_info *info, 217 u64 flags); 218 u64 __pure btrfs_space_info_used(struct btrfs_space_info *s_info, 219 bool may_use_included); 220 void btrfs_clear_space_info_full(struct btrfs_fs_info *info); 221 void btrfs_dump_space_info(struct btrfs_fs_info *fs_info, 222 struct btrfs_space_info *info, u64 bytes, 223 int dump_block_groups); 224 int btrfs_reserve_metadata_bytes(struct btrfs_fs_info *fs_info, 225 struct btrfs_space_info *space_info, 226 u64 orig_bytes, 227 enum btrfs_reserve_flush_enum flush); 228 void btrfs_try_granting_tickets(struct btrfs_fs_info *fs_info, 229 struct btrfs_space_info *space_info); 230 int btrfs_can_overcommit(struct btrfs_fs_info *fs_info, 231 struct btrfs_space_info *space_info, u64 bytes, 232 enum btrfs_reserve_flush_enum flush); 233 234 static inline void btrfs_space_info_free_bytes_may_use( 235 struct btrfs_fs_info *fs_info, 236 struct btrfs_space_info *space_info, 237 u64 num_bytes) 238 { 239 spin_lock(&space_info->lock); 240 btrfs_space_info_update_bytes_may_use(fs_info, space_info, -num_bytes); 241 btrfs_try_granting_tickets(fs_info, space_info); 242 spin_unlock(&space_info->lock); 243 } 244 int btrfs_reserve_data_bytes(struct btrfs_fs_info *fs_info, u64 bytes, 245 enum btrfs_reserve_flush_enum flush); 246 void btrfs_dump_space_info_for_trans_abort(struct btrfs_fs_info *fs_info); 247 void btrfs_init_async_reclaim_work(struct btrfs_fs_info *fs_info); 248 u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo); 249 250 #endif /* BTRFS_SPACE_INFO_H */ 251