1 /* SPDX-License-Identifier: GPL-2.0 */ 2 3 #ifndef BTRFS_SPACE_INFO_H 4 #define BTRFS_SPACE_INFO_H 5 6 #include <trace/events/btrfs.h> 7 #include <linux/spinlock.h> 8 #include <linux/list.h> 9 #include <linux/kobject.h> 10 #include <linux/lockdep.h> 11 #include <linux/wait.h> 12 #include <linux/rwsem.h> 13 #include "volumes.h" 14 15 struct btrfs_fs_info; 16 struct btrfs_block_group; 17 18 /* 19 * Different levels for to flush space when doing space reservations. 20 * 21 * The higher the level, the more methods we try to reclaim space. 22 */ 23 enum btrfs_reserve_flush_enum { 24 /* If we are in the transaction, we can't flush anything.*/ 25 BTRFS_RESERVE_NO_FLUSH, 26 27 /* 28 * Flush space by: 29 * - Running delayed inode items 30 * - Allocating a new chunk 31 */ 32 BTRFS_RESERVE_FLUSH_LIMIT, 33 34 /* 35 * Flush space by: 36 * - Running delayed inode items 37 * - Running delayed refs 38 * - Running delalloc and waiting for ordered extents 39 * - Allocating a new chunk 40 * - Committing transaction 41 */ 42 BTRFS_RESERVE_FLUSH_EVICT, 43 44 /* 45 * Flush space by above mentioned methods and by: 46 * - Running delayed iputs 47 * - Committing transaction 48 * 49 * Can be interrupted by a fatal signal. 50 */ 51 BTRFS_RESERVE_FLUSH_DATA, 52 BTRFS_RESERVE_FLUSH_FREE_SPACE_INODE, 53 BTRFS_RESERVE_FLUSH_ALL, 54 55 /* 56 * Pretty much the same as FLUSH_ALL, but can also steal space from 57 * global rsv. 58 * 59 * Can be interrupted by a fatal signal. 60 */ 61 BTRFS_RESERVE_FLUSH_ALL_STEAL, 62 63 /* 64 * This is for btrfs_use_block_rsv only. We have exhausted our block 65 * rsv and our global block rsv. This can happen for things like 66 * delalloc where we are overwriting a lot of extents with a single 67 * extent and didn't reserve enough space. Alternatively it can happen 68 * with delalloc where we reserve 1 extents worth for a large extent but 69 * fragmentation leads to multiple extents being created. This will 70 * give us the reservation in the case of 71 * 72 * if (num_bytes < (space_info->total_bytes - 73 * btrfs_space_info_used(space_info, false)) 74 * 75 * Which ignores bytes_may_use. This is potentially dangerous, but our 76 * reservation system is generally pessimistic so is able to absorb this 77 * style of mistake. 78 */ 79 BTRFS_RESERVE_FLUSH_EMERGENCY, 80 }; 81 82 /* 83 * Please be aware that the order of enum values will be the order of the reclaim 84 * process in btrfs_async_reclaim_metadata_space(). 85 */ 86 enum btrfs_flush_state { 87 FLUSH_DELAYED_ITEMS_NR = 1, 88 FLUSH_DELAYED_ITEMS = 2, 89 FLUSH_DELAYED_REFS_NR = 3, 90 FLUSH_DELAYED_REFS = 4, 91 FLUSH_DELALLOC = 5, 92 FLUSH_DELALLOC_WAIT = 6, 93 FLUSH_DELALLOC_FULL = 7, 94 ALLOC_CHUNK = 8, 95 ALLOC_CHUNK_FORCE = 9, 96 RUN_DELAYED_IPUTS = 10, 97 COMMIT_TRANS = 11, 98 RESET_ZONES = 12, 99 }; 100 101 struct btrfs_space_info { 102 struct btrfs_fs_info *fs_info; 103 spinlock_t lock; 104 105 u64 total_bytes; /* total bytes in the space, 106 this doesn't take mirrors into account */ 107 u64 bytes_used; /* total bytes used, 108 this doesn't take mirrors into account */ 109 u64 bytes_pinned; /* total bytes pinned, will be freed when the 110 transaction finishes */ 111 u64 bytes_reserved; /* total bytes the allocator has reserved for 112 current allocations */ 113 u64 bytes_may_use; /* number of bytes that may be used for 114 delalloc/allocations */ 115 u64 bytes_readonly; /* total bytes that are read only */ 116 u64 bytes_zone_unusable; /* total bytes that are unusable until 117 resetting the device zone */ 118 119 u64 max_extent_size; /* This will hold the maximum extent size of 120 the space info if we had an ENOSPC in the 121 allocator. */ 122 /* Chunk size in bytes */ 123 u64 chunk_size; 124 125 /* 126 * Once a block group drops below this threshold (percents) we'll 127 * schedule it for reclaim. 128 */ 129 int bg_reclaim_threshold; 130 131 int clamp; /* Used to scale our threshold for preemptive 132 flushing. The value is >> clamp, so turns 133 out to be a 2^clamp divisor. */ 134 135 unsigned int full:1; /* indicates that we cannot allocate any more 136 chunks for this space */ 137 unsigned int chunk_alloc:1; /* set if we are allocating a chunk */ 138 139 unsigned int flush:1; /* set if we are trying to make space */ 140 141 unsigned int force_alloc; /* set if we need to force a chunk 142 alloc for this space */ 143 144 u64 disk_used; /* total bytes used on disk */ 145 u64 disk_total; /* total bytes on disk, takes mirrors into 146 account */ 147 148 u64 flags; 149 150 struct list_head list; 151 /* Protected by the spinlock 'lock'. */ 152 struct list_head ro_bgs; 153 struct list_head priority_tickets; 154 struct list_head tickets; 155 156 /* 157 * Size of space that needs to be reclaimed in order to satisfy pending 158 * tickets 159 */ 160 u64 reclaim_size; 161 162 /* 163 * tickets_id just indicates the next ticket will be handled, so note 164 * it's not stored per ticket. 165 */ 166 u64 tickets_id; 167 168 struct rw_semaphore groups_sem; 169 /* for block groups in our same type */ 170 struct list_head block_groups[BTRFS_NR_RAID_TYPES]; 171 172 struct kobject kobj; 173 struct kobject *block_group_kobjs[BTRFS_NR_RAID_TYPES]; 174 175 /* 176 * Monotonically increasing counter of block group reclaim attempts 177 * Exposed in /sys/fs/<uuid>/allocation/<type>/reclaim_count 178 */ 179 u64 reclaim_count; 180 181 /* 182 * Monotonically increasing counter of reclaimed bytes 183 * Exposed in /sys/fs/<uuid>/allocation/<type>/reclaim_bytes 184 */ 185 u64 reclaim_bytes; 186 187 /* 188 * Monotonically increasing counter of reclaim errors 189 * Exposed in /sys/fs/<uuid>/allocation/<type>/reclaim_errors 190 */ 191 u64 reclaim_errors; 192 193 /* 194 * If true, use the dynamic relocation threshold, instead of the 195 * fixed bg_reclaim_threshold. 196 */ 197 bool dynamic_reclaim; 198 199 /* 200 * Periodically check all block groups against the reclaim 201 * threshold in the cleaner thread. 202 */ 203 bool periodic_reclaim; 204 205 /* 206 * Periodic reclaim should be a no-op if a space_info hasn't 207 * freed any space since the last time we tried. 208 */ 209 bool periodic_reclaim_ready; 210 211 /* 212 * Net bytes freed or allocated since the last reclaim pass. 213 */ 214 s64 reclaimable_bytes; 215 }; 216 217 struct reserve_ticket { 218 u64 bytes; 219 int error; 220 bool steal; 221 struct list_head list; 222 wait_queue_head_t wait; 223 }; 224 225 static inline bool btrfs_mixed_space_info(const struct btrfs_space_info *space_info) 226 { 227 return ((space_info->flags & BTRFS_BLOCK_GROUP_METADATA) && 228 (space_info->flags & BTRFS_BLOCK_GROUP_DATA)); 229 } 230 231 /* 232 * 233 * Declare a helper function to detect underflow of various space info members 234 */ 235 #define DECLARE_SPACE_INFO_UPDATE(name, trace_name) \ 236 static inline void \ 237 btrfs_space_info_update_##name(struct btrfs_space_info *sinfo, \ 238 s64 bytes) \ 239 { \ 240 struct btrfs_fs_info *fs_info = sinfo->fs_info; \ 241 const u64 abs_bytes = (bytes < 0) ? -bytes : bytes; \ 242 lockdep_assert_held(&sinfo->lock); \ 243 trace_update_##name(fs_info, sinfo, sinfo->name, bytes); \ 244 trace_btrfs_space_reservation(fs_info, trace_name, \ 245 sinfo->flags, abs_bytes, \ 246 bytes > 0); \ 247 if (bytes < 0 && sinfo->name < -bytes) { \ 248 WARN_ON(1); \ 249 sinfo->name = 0; \ 250 return; \ 251 } \ 252 sinfo->name += bytes; \ 253 } 254 255 DECLARE_SPACE_INFO_UPDATE(bytes_may_use, "space_info"); 256 DECLARE_SPACE_INFO_UPDATE(bytes_pinned, "pinned"); 257 DECLARE_SPACE_INFO_UPDATE(bytes_zone_unusable, "zone_unusable"); 258 259 int btrfs_init_space_info(struct btrfs_fs_info *fs_info); 260 void btrfs_add_bg_to_space_info(struct btrfs_fs_info *info, 261 struct btrfs_block_group *block_group); 262 void btrfs_update_space_info_chunk_size(struct btrfs_space_info *space_info, 263 u64 chunk_size); 264 struct btrfs_space_info *btrfs_find_space_info(struct btrfs_fs_info *info, 265 u64 flags); 266 u64 __pure btrfs_space_info_used(const struct btrfs_space_info *s_info, 267 bool may_use_included); 268 void btrfs_clear_space_info_full(struct btrfs_fs_info *info); 269 void btrfs_dump_space_info(struct btrfs_fs_info *fs_info, 270 struct btrfs_space_info *info, u64 bytes, 271 int dump_block_groups); 272 int btrfs_reserve_metadata_bytes(struct btrfs_fs_info *fs_info, 273 struct btrfs_space_info *space_info, 274 u64 orig_bytes, 275 enum btrfs_reserve_flush_enum flush); 276 void btrfs_try_granting_tickets(struct btrfs_fs_info *fs_info, 277 struct btrfs_space_info *space_info); 278 int btrfs_can_overcommit(struct btrfs_fs_info *fs_info, 279 const struct btrfs_space_info *space_info, u64 bytes, 280 enum btrfs_reserve_flush_enum flush); 281 282 static inline void btrfs_space_info_free_bytes_may_use( 283 struct btrfs_space_info *space_info, 284 u64 num_bytes) 285 { 286 spin_lock(&space_info->lock); 287 btrfs_space_info_update_bytes_may_use(space_info, -num_bytes); 288 btrfs_try_granting_tickets(space_info->fs_info, space_info); 289 spin_unlock(&space_info->lock); 290 } 291 int btrfs_reserve_data_bytes(struct btrfs_fs_info *fs_info, u64 bytes, 292 enum btrfs_reserve_flush_enum flush); 293 void btrfs_dump_space_info_for_trans_abort(struct btrfs_fs_info *fs_info); 294 void btrfs_init_async_reclaim_work(struct btrfs_fs_info *fs_info); 295 u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo); 296 297 void btrfs_space_info_update_reclaimable(struct btrfs_space_info *space_info, s64 bytes); 298 void btrfs_set_periodic_reclaim_ready(struct btrfs_space_info *space_info, bool ready); 299 bool btrfs_should_periodic_reclaim(struct btrfs_space_info *space_info); 300 int btrfs_calc_reclaim_threshold(const struct btrfs_space_info *space_info); 301 void btrfs_reclaim_sweep(const struct btrfs_fs_info *fs_info); 302 void btrfs_return_free_space(struct btrfs_space_info *space_info, u64 len); 303 304 #endif /* BTRFS_SPACE_INFO_H */ 305