xref: /linux/fs/btrfs/space-info.h (revision 7a40974fd0efa3698de4c6d1d0ee0436bcc4445d)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 
3 #ifndef BTRFS_SPACE_INFO_H
4 #define BTRFS_SPACE_INFO_H
5 
6 #include <trace/events/btrfs.h>
7 #include <linux/spinlock.h>
8 #include <linux/list.h>
9 #include <linux/kobject.h>
10 #include <linux/lockdep.h>
11 #include <linux/wait.h>
12 #include <linux/rwsem.h>
13 #include "volumes.h"
14 
15 struct btrfs_fs_info;
16 struct btrfs_block_group;
17 
18 /*
19  * Different levels for to flush space when doing space reservations.
20  *
21  * The higher the level, the more methods we try to reclaim space.
22  */
23 enum btrfs_reserve_flush_enum {
24 	/* If we are in the transaction, we can't flush anything.*/
25 	BTRFS_RESERVE_NO_FLUSH,
26 
27 	/*
28 	 * Flush space by:
29 	 * - Running delayed inode items
30 	 * - Allocating a new chunk
31 	 */
32 	BTRFS_RESERVE_FLUSH_LIMIT,
33 
34 	/*
35 	 * Flush space by:
36 	 * - Running delayed inode items
37 	 * - Running delayed refs
38 	 * - Running delalloc and waiting for ordered extents
39 	 * - Allocating a new chunk
40 	 * - Committing transaction
41 	 */
42 	BTRFS_RESERVE_FLUSH_EVICT,
43 
44 	/*
45 	 * Flush space by above mentioned methods and by:
46 	 * - Running delayed iputs
47 	 * - Committing transaction
48 	 *
49 	 * Can be interrupted by a fatal signal.
50 	 */
51 	BTRFS_RESERVE_FLUSH_DATA,
52 	BTRFS_RESERVE_FLUSH_FREE_SPACE_INODE,
53 	BTRFS_RESERVE_FLUSH_ALL,
54 
55 	/*
56 	 * Pretty much the same as FLUSH_ALL, but can also steal space from
57 	 * global rsv.
58 	 *
59 	 * Can be interrupted by a fatal signal.
60 	 */
61 	BTRFS_RESERVE_FLUSH_ALL_STEAL,
62 
63 	/*
64 	 * This is for btrfs_use_block_rsv only.  We have exhausted our block
65 	 * rsv and our global block rsv.  This can happen for things like
66 	 * delalloc where we are overwriting a lot of extents with a single
67 	 * extent and didn't reserve enough space.  Alternatively it can happen
68 	 * with delalloc where we reserve 1 extents worth for a large extent but
69 	 * fragmentation leads to multiple extents being created.  This will
70 	 * give us the reservation in the case of
71 	 *
72 	 * if (num_bytes < (space_info->total_bytes -
73 	 *		    btrfs_space_info_used(space_info, false))
74 	 *
75 	 * Which ignores bytes_may_use.  This is potentially dangerous, but our
76 	 * reservation system is generally pessimistic so is able to absorb this
77 	 * style of mistake.
78 	 */
79 	BTRFS_RESERVE_FLUSH_EMERGENCY,
80 };
81 
82 enum btrfs_flush_state {
83 	FLUSH_DELAYED_ITEMS_NR	= 1,
84 	FLUSH_DELAYED_ITEMS	= 2,
85 	FLUSH_DELAYED_REFS_NR	= 3,
86 	FLUSH_DELAYED_REFS	= 4,
87 	FLUSH_DELALLOC		= 5,
88 	FLUSH_DELALLOC_WAIT	= 6,
89 	FLUSH_DELALLOC_FULL	= 7,
90 	ALLOC_CHUNK		= 8,
91 	ALLOC_CHUNK_FORCE	= 9,
92 	RUN_DELAYED_IPUTS	= 10,
93 	COMMIT_TRANS		= 11,
94 };
95 
96 struct btrfs_space_info {
97 	struct btrfs_fs_info *fs_info;
98 	spinlock_t lock;
99 
100 	u64 total_bytes;	/* total bytes in the space,
101 				   this doesn't take mirrors into account */
102 	u64 bytes_used;		/* total bytes used,
103 				   this doesn't take mirrors into account */
104 	u64 bytes_pinned;	/* total bytes pinned, will be freed when the
105 				   transaction finishes */
106 	u64 bytes_reserved;	/* total bytes the allocator has reserved for
107 				   current allocations */
108 	u64 bytes_may_use;	/* number of bytes that may be used for
109 				   delalloc/allocations */
110 	u64 bytes_readonly;	/* total bytes that are read only */
111 	u64 bytes_zone_unusable;	/* total bytes that are unusable until
112 					   resetting the device zone */
113 
114 	u64 max_extent_size;	/* This will hold the maximum extent size of
115 				   the space info if we had an ENOSPC in the
116 				   allocator. */
117 	/* Chunk size in bytes */
118 	u64 chunk_size;
119 
120 	/*
121 	 * Once a block group drops below this threshold (percents) we'll
122 	 * schedule it for reclaim.
123 	 */
124 	int bg_reclaim_threshold;
125 
126 	int clamp;		/* Used to scale our threshold for preemptive
127 				   flushing. The value is >> clamp, so turns
128 				   out to be a 2^clamp divisor. */
129 
130 	unsigned int full:1;	/* indicates that we cannot allocate any more
131 				   chunks for this space */
132 	unsigned int chunk_alloc:1;	/* set if we are allocating a chunk */
133 
134 	unsigned int flush:1;		/* set if we are trying to make space */
135 
136 	unsigned int force_alloc;	/* set if we need to force a chunk
137 					   alloc for this space */
138 
139 	u64 disk_used;		/* total bytes used on disk */
140 	u64 disk_total;		/* total bytes on disk, takes mirrors into
141 				   account */
142 
143 	u64 flags;
144 
145 	struct list_head list;
146 	/* Protected by the spinlock 'lock'. */
147 	struct list_head ro_bgs;
148 	struct list_head priority_tickets;
149 	struct list_head tickets;
150 
151 	/*
152 	 * Size of space that needs to be reclaimed in order to satisfy pending
153 	 * tickets
154 	 */
155 	u64 reclaim_size;
156 
157 	/*
158 	 * tickets_id just indicates the next ticket will be handled, so note
159 	 * it's not stored per ticket.
160 	 */
161 	u64 tickets_id;
162 
163 	struct rw_semaphore groups_sem;
164 	/* for block groups in our same type */
165 	struct list_head block_groups[BTRFS_NR_RAID_TYPES];
166 
167 	struct kobject kobj;
168 	struct kobject *block_group_kobjs[BTRFS_NR_RAID_TYPES];
169 
170 	/*
171 	 * Monotonically increasing counter of block group reclaim attempts
172 	 * Exposed in /sys/fs/<uuid>/allocation/<type>/reclaim_count
173 	 */
174 	u64 reclaim_count;
175 
176 	/*
177 	 * Monotonically increasing counter of reclaimed bytes
178 	 * Exposed in /sys/fs/<uuid>/allocation/<type>/reclaim_bytes
179 	 */
180 	u64 reclaim_bytes;
181 
182 	/*
183 	 * Monotonically increasing counter of reclaim errors
184 	 * Exposed in /sys/fs/<uuid>/allocation/<type>/reclaim_errors
185 	 */
186 	u64 reclaim_errors;
187 
188 	/*
189 	 * If true, use the dynamic relocation threshold, instead of the
190 	 * fixed bg_reclaim_threshold.
191 	 */
192 	bool dynamic_reclaim;
193 
194 	/*
195 	 * Periodically check all block groups against the reclaim
196 	 * threshold in the cleaner thread.
197 	 */
198 	bool periodic_reclaim;
199 
200 	/*
201 	 * Periodic reclaim should be a no-op if a space_info hasn't
202 	 * freed any space since the last time we tried.
203 	 */
204 	bool periodic_reclaim_ready;
205 
206 	/*
207 	 * Net bytes freed or allocated since the last reclaim pass.
208 	 */
209 	s64 reclaimable_bytes;
210 };
211 
212 struct reserve_ticket {
213 	u64 bytes;
214 	int error;
215 	bool steal;
216 	struct list_head list;
217 	wait_queue_head_t wait;
218 };
219 
btrfs_mixed_space_info(const struct btrfs_space_info * space_info)220 static inline bool btrfs_mixed_space_info(const struct btrfs_space_info *space_info)
221 {
222 	return ((space_info->flags & BTRFS_BLOCK_GROUP_METADATA) &&
223 		(space_info->flags & BTRFS_BLOCK_GROUP_DATA));
224 }
225 
226 /*
227  *
228  * Declare a helper function to detect underflow of various space info members
229  */
230 #define DECLARE_SPACE_INFO_UPDATE(name, trace_name)			\
231 static inline void							\
232 btrfs_space_info_update_##name(struct btrfs_fs_info *fs_info,		\
233 			       struct btrfs_space_info *sinfo,		\
234 			       s64 bytes)				\
235 {									\
236 	const u64 abs_bytes = (bytes < 0) ? -bytes : bytes;		\
237 	lockdep_assert_held(&sinfo->lock);				\
238 	trace_update_##name(fs_info, sinfo, sinfo->name, bytes);	\
239 	trace_btrfs_space_reservation(fs_info, trace_name,		\
240 				      sinfo->flags, abs_bytes,		\
241 				      bytes > 0);			\
242 	if (bytes < 0 && sinfo->name < -bytes) {			\
243 		WARN_ON(1);						\
244 		sinfo->name = 0;					\
245 		return;							\
246 	}								\
247 	sinfo->name += bytes;						\
248 }
249 
250 DECLARE_SPACE_INFO_UPDATE(bytes_may_use, "space_info");
251 DECLARE_SPACE_INFO_UPDATE(bytes_pinned, "pinned");
252 DECLARE_SPACE_INFO_UPDATE(bytes_zone_unusable, "zone_unusable");
253 
254 int btrfs_init_space_info(struct btrfs_fs_info *fs_info);
255 void btrfs_add_bg_to_space_info(struct btrfs_fs_info *info,
256 				struct btrfs_block_group *block_group);
257 void btrfs_update_space_info_chunk_size(struct btrfs_space_info *space_info,
258 					u64 chunk_size);
259 struct btrfs_space_info *btrfs_find_space_info(struct btrfs_fs_info *info,
260 					       u64 flags);
261 u64 __pure btrfs_space_info_used(const struct btrfs_space_info *s_info,
262 			  bool may_use_included);
263 void btrfs_clear_space_info_full(struct btrfs_fs_info *info);
264 void btrfs_dump_space_info(struct btrfs_fs_info *fs_info,
265 			   struct btrfs_space_info *info, u64 bytes,
266 			   int dump_block_groups);
267 int btrfs_reserve_metadata_bytes(struct btrfs_fs_info *fs_info,
268 				 struct btrfs_space_info *space_info,
269 				 u64 orig_bytes,
270 				 enum btrfs_reserve_flush_enum flush);
271 void btrfs_try_granting_tickets(struct btrfs_fs_info *fs_info,
272 				struct btrfs_space_info *space_info);
273 int btrfs_can_overcommit(struct btrfs_fs_info *fs_info,
274 			 const struct btrfs_space_info *space_info, u64 bytes,
275 			 enum btrfs_reserve_flush_enum flush);
276 
btrfs_space_info_free_bytes_may_use(struct btrfs_fs_info * fs_info,struct btrfs_space_info * space_info,u64 num_bytes)277 static inline void btrfs_space_info_free_bytes_may_use(
278 				struct btrfs_fs_info *fs_info,
279 				struct btrfs_space_info *space_info,
280 				u64 num_bytes)
281 {
282 	spin_lock(&space_info->lock);
283 	btrfs_space_info_update_bytes_may_use(fs_info, space_info, -num_bytes);
284 	btrfs_try_granting_tickets(fs_info, space_info);
285 	spin_unlock(&space_info->lock);
286 }
287 int btrfs_reserve_data_bytes(struct btrfs_fs_info *fs_info, u64 bytes,
288 			     enum btrfs_reserve_flush_enum flush);
289 void btrfs_dump_space_info_for_trans_abort(struct btrfs_fs_info *fs_info);
290 void btrfs_init_async_reclaim_work(struct btrfs_fs_info *fs_info);
291 u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo);
292 
293 void btrfs_space_info_update_reclaimable(struct btrfs_space_info *space_info, s64 bytes);
294 void btrfs_set_periodic_reclaim_ready(struct btrfs_space_info *space_info, bool ready);
295 bool btrfs_should_periodic_reclaim(struct btrfs_space_info *space_info);
296 int btrfs_calc_reclaim_threshold(const struct btrfs_space_info *space_info);
297 void btrfs_reclaim_sweep(const struct btrfs_fs_info *fs_info);
298 
299 #endif /* BTRFS_SPACE_INFO_H */
300