xref: /linux/fs/bcachefs/alloc_background.h (revision ff0905bbf991f4337b5ebc19c0d43525ebb0d96b)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHEFS_ALLOC_BACKGROUND_H
3 #define _BCACHEFS_ALLOC_BACKGROUND_H
4 
5 #include "bcachefs.h"
6 #include "alloc_types.h"
7 #include "buckets.h"
8 #include "debug.h"
9 #include "super.h"
10 
11 /* How out of date a pointer gen is allowed to be: */
12 #define BUCKET_GC_GEN_MAX	96U
13 
bch2_dev_bucket_exists(struct bch_fs * c,struct bpos pos)14 static inline bool bch2_dev_bucket_exists(struct bch_fs *c, struct bpos pos)
15 {
16 	guard(rcu)();
17 	struct bch_dev *ca = bch2_dev_rcu_noerror(c, pos.inode);
18 	return ca && bucket_valid(ca, pos.offset);
19 }
20 
bucket_to_u64(struct bpos bucket)21 static inline u64 bucket_to_u64(struct bpos bucket)
22 {
23 	return (bucket.inode << 48) | bucket.offset;
24 }
25 
u64_to_bucket(u64 bucket)26 static inline struct bpos u64_to_bucket(u64 bucket)
27 {
28 	return POS(bucket >> 48, bucket & ~(~0ULL << 48));
29 }
30 
alloc_gc_gen(struct bch_alloc_v4 a)31 static inline u8 alloc_gc_gen(struct bch_alloc_v4 a)
32 {
33 	return a.gen - a.oldest_gen;
34 }
35 
alloc_to_bucket(struct bucket * dst,struct bch_alloc_v4 src)36 static inline void alloc_to_bucket(struct bucket *dst, struct bch_alloc_v4 src)
37 {
38 	dst->gen		= src.gen;
39 	dst->data_type		= src.data_type;
40 	dst->stripe_sectors	= src.stripe_sectors;
41 	dst->dirty_sectors	= src.dirty_sectors;
42 	dst->cached_sectors	= src.cached_sectors;
43 	dst->stripe		= src.stripe;
44 }
45 
__bucket_m_to_alloc(struct bch_alloc_v4 * dst,struct bucket src)46 static inline void __bucket_m_to_alloc(struct bch_alloc_v4 *dst, struct bucket src)
47 {
48 	dst->gen		= src.gen;
49 	dst->data_type		= src.data_type;
50 	dst->stripe_sectors	= src.stripe_sectors;
51 	dst->dirty_sectors	= src.dirty_sectors;
52 	dst->cached_sectors	= src.cached_sectors;
53 	dst->stripe		= src.stripe;
54 }
55 
bucket_m_to_alloc(struct bucket b)56 static inline struct bch_alloc_v4 bucket_m_to_alloc(struct bucket b)
57 {
58 	struct bch_alloc_v4 ret = {};
59 	__bucket_m_to_alloc(&ret, b);
60 	return ret;
61 }
62 
bucket_data_type(enum bch_data_type data_type)63 static inline enum bch_data_type bucket_data_type(enum bch_data_type data_type)
64 {
65 	switch (data_type) {
66 	case BCH_DATA_cached:
67 	case BCH_DATA_stripe:
68 		return BCH_DATA_user;
69 	default:
70 		return data_type;
71 	}
72 }
73 
bucket_data_type_mismatch(enum bch_data_type bucket,enum bch_data_type ptr)74 static inline bool bucket_data_type_mismatch(enum bch_data_type bucket,
75 					     enum bch_data_type ptr)
76 {
77 	return !data_type_is_empty(bucket) &&
78 		bucket_data_type(bucket) != bucket_data_type(ptr);
79 }
80 
81 /*
82  * It is my general preference to use unsigned types for unsigned quantities -
83  * however, these helpers are used in disk accounting calculations run by
84  * triggers where the output will be negated and added to an s64. unsigned is
85  * right out even though all these quantities will fit in 32 bits, since it
86  * won't be sign extended correctly; u64 will negate "correctly", but s64 is the
87  * simpler option here.
88  */
bch2_bucket_sectors_total(struct bch_alloc_v4 a)89 static inline s64 bch2_bucket_sectors_total(struct bch_alloc_v4 a)
90 {
91 	return a.stripe_sectors + a.dirty_sectors + a.cached_sectors;
92 }
93 
bch2_bucket_sectors_dirty(struct bch_alloc_v4 a)94 static inline s64 bch2_bucket_sectors_dirty(struct bch_alloc_v4 a)
95 {
96 	return a.stripe_sectors + a.dirty_sectors;
97 }
98 
bch2_bucket_sectors(struct bch_alloc_v4 a)99 static inline s64 bch2_bucket_sectors(struct bch_alloc_v4 a)
100 {
101 	return a.data_type == BCH_DATA_cached
102 		? a.cached_sectors
103 		: bch2_bucket_sectors_dirty(a);
104 }
105 
bch2_bucket_sectors_fragmented(struct bch_dev * ca,struct bch_alloc_v4 a)106 static inline s64 bch2_bucket_sectors_fragmented(struct bch_dev *ca,
107 						 struct bch_alloc_v4 a)
108 {
109 	int d = bch2_bucket_sectors(a);
110 
111 	return d ? max(0, ca->mi.bucket_size - d) : 0;
112 }
113 
bch2_gc_bucket_sectors_fragmented(struct bch_dev * ca,struct bucket a)114 static inline s64 bch2_gc_bucket_sectors_fragmented(struct bch_dev *ca, struct bucket a)
115 {
116 	int d = a.stripe_sectors + a.dirty_sectors;
117 
118 	return d ? max(0, ca->mi.bucket_size - d) : 0;
119 }
120 
bch2_bucket_sectors_unstriped(struct bch_alloc_v4 a)121 static inline s64 bch2_bucket_sectors_unstriped(struct bch_alloc_v4 a)
122 {
123 	return a.data_type == BCH_DATA_stripe ? a.dirty_sectors : 0;
124 }
125 
alloc_data_type(struct bch_alloc_v4 a,enum bch_data_type data_type)126 static inline enum bch_data_type alloc_data_type(struct bch_alloc_v4 a,
127 						 enum bch_data_type data_type)
128 {
129 	if (a.stripe)
130 		return data_type == BCH_DATA_parity ? data_type : BCH_DATA_stripe;
131 	if (bch2_bucket_sectors_dirty(a))
132 		return bucket_data_type(data_type);
133 	if (a.cached_sectors)
134 		return BCH_DATA_cached;
135 	if (BCH_ALLOC_V4_NEED_DISCARD(&a))
136 		return BCH_DATA_need_discard;
137 	if (alloc_gc_gen(a) >= BUCKET_GC_GEN_MAX)
138 		return BCH_DATA_need_gc_gens;
139 	return BCH_DATA_free;
140 }
141 
alloc_data_type_set(struct bch_alloc_v4 * a,enum bch_data_type data_type)142 static inline void alloc_data_type_set(struct bch_alloc_v4 *a, enum bch_data_type data_type)
143 {
144 	a->data_type = alloc_data_type(*a, data_type);
145 }
146 
alloc_lru_idx_read(struct bch_alloc_v4 a)147 static inline u64 alloc_lru_idx_read(struct bch_alloc_v4 a)
148 {
149 	return a.data_type == BCH_DATA_cached
150 		? a.io_time[READ] & LRU_TIME_MAX
151 		: 0;
152 }
153 
154 #define DATA_TYPES_MOVABLE		\
155 	((1U << BCH_DATA_btree)|	\
156 	 (1U << BCH_DATA_user)|		\
157 	 (1U << BCH_DATA_stripe))
158 
data_type_movable(enum bch_data_type type)159 static inline bool data_type_movable(enum bch_data_type type)
160 {
161 	return (1U << type) & DATA_TYPES_MOVABLE;
162 }
163 
alloc_lru_idx_fragmentation(struct bch_alloc_v4 a,struct bch_dev * ca)164 static inline u64 alloc_lru_idx_fragmentation(struct bch_alloc_v4 a,
165 					      struct bch_dev *ca)
166 {
167 	if (a.data_type >= BCH_DATA_NR)
168 		return 0;
169 
170 	if (!data_type_movable(a.data_type) ||
171 	    !bch2_bucket_sectors_fragmented(ca, a))
172 		return 0;
173 
174 	/*
175 	 * avoid overflowing LRU_TIME_BITS on a corrupted fs, when
176 	 * bucket_sectors_dirty is (much) bigger than bucket_size
177 	 */
178 	u64 d = min_t(s64, bch2_bucket_sectors_dirty(a),
179 		      ca->mi.bucket_size);
180 
181 	return div_u64(d * (1ULL << 31), ca->mi.bucket_size);
182 }
183 
alloc_freespace_genbits(struct bch_alloc_v4 a)184 static inline u64 alloc_freespace_genbits(struct bch_alloc_v4 a)
185 {
186 	return ((u64) alloc_gc_gen(a) >> 4) << 56;
187 }
188 
alloc_freespace_pos(struct bpos pos,struct bch_alloc_v4 a)189 static inline struct bpos alloc_freespace_pos(struct bpos pos, struct bch_alloc_v4 a)
190 {
191 	pos.offset |= alloc_freespace_genbits(a);
192 	return pos;
193 }
194 
alloc_v4_u64s_noerror(const struct bch_alloc_v4 * a)195 static inline unsigned alloc_v4_u64s_noerror(const struct bch_alloc_v4 *a)
196 {
197 	return (BCH_ALLOC_V4_BACKPOINTERS_START(a) ?:
198 			BCH_ALLOC_V4_U64s_V0) +
199 		BCH_ALLOC_V4_NR_BACKPOINTERS(a) *
200 		(sizeof(struct bch_backpointer) / sizeof(u64));
201 }
202 
alloc_v4_u64s(const struct bch_alloc_v4 * a)203 static inline unsigned alloc_v4_u64s(const struct bch_alloc_v4 *a)
204 {
205 	unsigned ret = alloc_v4_u64s_noerror(a);
206 	BUG_ON(ret > U8_MAX - BKEY_U64s);
207 	return ret;
208 }
209 
set_alloc_v4_u64s(struct bkey_i_alloc_v4 * a)210 static inline void set_alloc_v4_u64s(struct bkey_i_alloc_v4 *a)
211 {
212 	set_bkey_val_u64s(&a->k, alloc_v4_u64s(&a->v));
213 }
214 
215 struct bkey_i_alloc_v4 *
216 bch2_trans_start_alloc_update_noupdate(struct btree_trans *, struct btree_iter *, struct bpos);
217 struct bkey_i_alloc_v4 *
218 bch2_trans_start_alloc_update(struct btree_trans *, struct bpos,
219 			      enum btree_iter_update_trigger_flags);
220 
221 void __bch2_alloc_to_v4(struct bkey_s_c, struct bch_alloc_v4 *);
222 
bch2_alloc_to_v4(struct bkey_s_c k,struct bch_alloc_v4 * convert)223 static inline const struct bch_alloc_v4 *bch2_alloc_to_v4(struct bkey_s_c k, struct bch_alloc_v4 *convert)
224 {
225 	const struct bch_alloc_v4 *ret;
226 
227 	if (unlikely(k.k->type != KEY_TYPE_alloc_v4))
228 		goto slowpath;
229 
230 	ret = bkey_s_c_to_alloc_v4(k).v;
231 	if (BCH_ALLOC_V4_BACKPOINTERS_START(ret) != BCH_ALLOC_V4_U64s)
232 		goto slowpath;
233 
234 	return ret;
235 slowpath:
236 	__bch2_alloc_to_v4(k, convert);
237 	return convert;
238 }
239 
240 struct bkey_i_alloc_v4 *bch2_alloc_to_v4_mut(struct btree_trans *, struct bkey_s_c);
241 
242 int bch2_bucket_io_time_reset(struct btree_trans *, unsigned, size_t, int);
243 
244 int bch2_alloc_v1_validate(struct bch_fs *, struct bkey_s_c,
245 			   struct bkey_validate_context);
246 int bch2_alloc_v2_validate(struct bch_fs *, struct bkey_s_c,
247 			   struct bkey_validate_context);
248 int bch2_alloc_v3_validate(struct bch_fs *, struct bkey_s_c,
249 			   struct bkey_validate_context);
250 int bch2_alloc_v4_validate(struct bch_fs *, struct bkey_s_c,
251 			   struct bkey_validate_context);
252 void bch2_alloc_v4_swab(struct bkey_s);
253 void bch2_alloc_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
254 void bch2_alloc_v4_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
255 
256 #define bch2_bkey_ops_alloc ((struct bkey_ops) {	\
257 	.key_validate	= bch2_alloc_v1_validate,	\
258 	.val_to_text	= bch2_alloc_to_text,		\
259 	.trigger	= bch2_trigger_alloc,		\
260 	.min_val_size	= 8,				\
261 })
262 
263 #define bch2_bkey_ops_alloc_v2 ((struct bkey_ops) {	\
264 	.key_validate	= bch2_alloc_v2_validate,	\
265 	.val_to_text	= bch2_alloc_to_text,		\
266 	.trigger	= bch2_trigger_alloc,		\
267 	.min_val_size	= 8,				\
268 })
269 
270 #define bch2_bkey_ops_alloc_v3 ((struct bkey_ops) {	\
271 	.key_validate	= bch2_alloc_v3_validate,	\
272 	.val_to_text	= bch2_alloc_to_text,		\
273 	.trigger	= bch2_trigger_alloc,		\
274 	.min_val_size	= 16,				\
275 })
276 
277 #define bch2_bkey_ops_alloc_v4 ((struct bkey_ops) {	\
278 	.key_validate	= bch2_alloc_v4_validate,	\
279 	.val_to_text	= bch2_alloc_v4_to_text,	\
280 	.swab		= bch2_alloc_v4_swab,		\
281 	.trigger	= bch2_trigger_alloc,		\
282 	.min_val_size	= 48,				\
283 })
284 
285 int bch2_bucket_gens_validate(struct bch_fs *, struct bkey_s_c,
286 			      struct bkey_validate_context);
287 void bch2_bucket_gens_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
288 
289 #define bch2_bkey_ops_bucket_gens ((struct bkey_ops) {	\
290 	.key_validate	= bch2_bucket_gens_validate,	\
291 	.val_to_text	= bch2_bucket_gens_to_text,	\
292 })
293 
294 int bch2_bucket_gens_init(struct bch_fs *);
295 
bkey_is_alloc(const struct bkey * k)296 static inline bool bkey_is_alloc(const struct bkey *k)
297 {
298 	return  k->type == KEY_TYPE_alloc ||
299 		k->type == KEY_TYPE_alloc_v2 ||
300 		k->type == KEY_TYPE_alloc_v3;
301 }
302 
303 int bch2_alloc_read(struct bch_fs *);
304 
305 int bch2_alloc_key_to_dev_counters(struct btree_trans *, struct bch_dev *,
306 				   const struct bch_alloc_v4 *,
307 				   const struct bch_alloc_v4 *, unsigned);
308 int bch2_trigger_alloc(struct btree_trans *, enum btree_id, unsigned,
309 		       struct bkey_s_c, struct bkey_s,
310 		       enum btree_iter_update_trigger_flags);
311 
312 int bch2_check_discard_freespace_key(struct btree_trans *, struct btree_iter *, u8 *, bool);
313 int bch2_check_alloc_info(struct bch_fs *);
314 int bch2_check_alloc_to_lru_refs(struct bch_fs *);
315 void bch2_dev_do_discards(struct bch_dev *);
316 void bch2_do_discards(struct bch_fs *);
317 
should_invalidate_buckets(struct bch_dev * ca,struct bch_dev_usage u)318 static inline u64 should_invalidate_buckets(struct bch_dev *ca,
319 					    struct bch_dev_usage u)
320 {
321 	u64 want_free = ca->mi.nbuckets >> 7;
322 	u64 free = max_t(s64, 0,
323 			   u.buckets[BCH_DATA_free]
324 			 + u.buckets[BCH_DATA_need_discard]
325 			 - bch2_dev_buckets_reserved(ca, BCH_WATERMARK_stripe));
326 
327 	return clamp_t(s64, want_free - free, 0, u.buckets[BCH_DATA_cached]);
328 }
329 
330 void bch2_dev_do_invalidates(struct bch_dev *);
331 void bch2_do_invalidates(struct bch_fs *);
332 
alloc_v4_backpointers(struct bch_alloc_v4 * a)333 static inline struct bch_backpointer *alloc_v4_backpointers(struct bch_alloc_v4 *a)
334 {
335 	return (void *) ((u64 *) &a->v +
336 			 (BCH_ALLOC_V4_BACKPOINTERS_START(a) ?:
337 			  BCH_ALLOC_V4_U64s_V0));
338 }
339 
alloc_v4_backpointers_c(const struct bch_alloc_v4 * a)340 static inline const struct bch_backpointer *alloc_v4_backpointers_c(const struct bch_alloc_v4 *a)
341 {
342 	return (void *) ((u64 *) &a->v + BCH_ALLOC_V4_BACKPOINTERS_START(a));
343 }
344 
345 int bch2_dev_freespace_init(struct bch_fs *, struct bch_dev *, u64, u64);
346 int bch2_fs_freespace_init(struct bch_fs *);
347 int bch2_dev_remove_alloc(struct bch_fs *, struct bch_dev *);
348 
349 void bch2_recalc_capacity(struct bch_fs *);
350 u64 bch2_min_rw_member_capacity(struct bch_fs *);
351 
352 void bch2_dev_allocator_set_rw(struct bch_fs *, struct bch_dev *, bool);
353 void bch2_dev_allocator_remove(struct bch_fs *, struct bch_dev *);
354 void bch2_dev_allocator_add(struct bch_fs *, struct bch_dev *);
355 
356 void bch2_dev_allocator_background_exit(struct bch_dev *);
357 void bch2_dev_allocator_background_init(struct bch_dev *);
358 
359 void bch2_fs_allocator_background_init(struct bch_fs *);
360 
361 #endif /* _BCACHEFS_ALLOC_BACKGROUND_H */
362