xref: /linux/fs/bcachefs/alloc_background.h (revision 46e6acfe3501fa938af9c5bd730f0020235b08a2)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHEFS_ALLOC_BACKGROUND_H
3 #define _BCACHEFS_ALLOC_BACKGROUND_H
4 
5 #include "bcachefs.h"
6 #include "alloc_types.h"
7 #include "buckets.h"
8 #include "debug.h"
9 #include "super.h"
10 
11 enum bch_validate_flags;
12 
13 /* How out of date a pointer gen is allowed to be: */
14 #define BUCKET_GC_GEN_MAX	96U
15 
16 static inline bool bch2_dev_bucket_exists(struct bch_fs *c, struct bpos pos)
17 {
18 	rcu_read_lock();
19 	struct bch_dev *ca = bch2_dev_rcu(c, pos.inode);
20 	bool ret = ca && bucket_valid(ca, pos.offset);
21 	rcu_read_unlock();
22 	return ret;
23 }
24 
25 static inline u64 bucket_to_u64(struct bpos bucket)
26 {
27 	return (bucket.inode << 48) | bucket.offset;
28 }
29 
30 static inline struct bpos u64_to_bucket(u64 bucket)
31 {
32 	return POS(bucket >> 48, bucket & ~(~0ULL << 48));
33 }
34 
35 static inline u8 alloc_gc_gen(struct bch_alloc_v4 a)
36 {
37 	return a.gen - a.oldest_gen;
38 }
39 
40 static inline void alloc_to_bucket(struct bucket *dst, struct bch_alloc_v4 src)
41 {
42 	dst->gen		= src.gen;
43 	dst->data_type		= src.data_type;
44 	dst->stripe_sectors	= src.stripe_sectors;
45 	dst->dirty_sectors	= src.dirty_sectors;
46 	dst->cached_sectors	= src.cached_sectors;
47 	dst->stripe		= src.stripe;
48 }
49 
50 static inline void __bucket_m_to_alloc(struct bch_alloc_v4 *dst, struct bucket src)
51 {
52 	dst->gen		= src.gen;
53 	dst->data_type		= src.data_type;
54 	dst->stripe_sectors	= src.stripe_sectors;
55 	dst->dirty_sectors	= src.dirty_sectors;
56 	dst->cached_sectors	= src.cached_sectors;
57 	dst->stripe		= src.stripe;
58 }
59 
60 static inline struct bch_alloc_v4 bucket_m_to_alloc(struct bucket b)
61 {
62 	struct bch_alloc_v4 ret = {};
63 	__bucket_m_to_alloc(&ret, b);
64 	return ret;
65 }
66 
67 static inline enum bch_data_type bucket_data_type(enum bch_data_type data_type)
68 {
69 	switch (data_type) {
70 	case BCH_DATA_cached:
71 	case BCH_DATA_stripe:
72 		return BCH_DATA_user;
73 	default:
74 		return data_type;
75 	}
76 }
77 
78 static inline bool bucket_data_type_mismatch(enum bch_data_type bucket,
79 					     enum bch_data_type ptr)
80 {
81 	return !data_type_is_empty(bucket) &&
82 		bucket_data_type(bucket) != bucket_data_type(ptr);
83 }
84 
85 static inline s64 bch2_bucket_sectors_total(struct bch_alloc_v4 a)
86 {
87 	return a.stripe_sectors + a.dirty_sectors + a.cached_sectors;
88 }
89 
90 static inline s64 bch2_bucket_sectors_dirty(struct bch_alloc_v4 a)
91 {
92 	return a.stripe_sectors + a.dirty_sectors;
93 }
94 
95 static inline s64 bch2_bucket_sectors(struct bch_alloc_v4 a)
96 {
97 	return a.data_type == BCH_DATA_cached
98 		? a.cached_sectors
99 		: bch2_bucket_sectors_dirty(a);
100 }
101 
102 static inline s64 bch2_bucket_sectors_fragmented(struct bch_dev *ca,
103 						 struct bch_alloc_v4 a)
104 {
105 	int d = bch2_bucket_sectors(a);
106 
107 	return d ? max(0, ca->mi.bucket_size - d) : 0;
108 }
109 
110 static inline s64 bch2_gc_bucket_sectors_fragmented(struct bch_dev *ca, struct bucket a)
111 {
112 	int d = a.stripe_sectors + a.dirty_sectors;
113 
114 	return d ? max(0, ca->mi.bucket_size - d) : 0;
115 }
116 
117 static inline s64 bch2_bucket_sectors_unstriped(struct bch_alloc_v4 a)
118 {
119 	return a.data_type == BCH_DATA_stripe ? a.dirty_sectors : 0;
120 }
121 
122 static inline enum bch_data_type alloc_data_type(struct bch_alloc_v4 a,
123 						 enum bch_data_type data_type)
124 {
125 	if (a.stripe)
126 		return data_type == BCH_DATA_parity ? data_type : BCH_DATA_stripe;
127 	if (bch2_bucket_sectors_dirty(a))
128 		return data_type;
129 	if (a.cached_sectors)
130 		return BCH_DATA_cached;
131 	if (BCH_ALLOC_V4_NEED_DISCARD(&a))
132 		return BCH_DATA_need_discard;
133 	if (alloc_gc_gen(a) >= BUCKET_GC_GEN_MAX)
134 		return BCH_DATA_need_gc_gens;
135 	return BCH_DATA_free;
136 }
137 
138 static inline void alloc_data_type_set(struct bch_alloc_v4 *a, enum bch_data_type data_type)
139 {
140 	a->data_type = alloc_data_type(*a, data_type);
141 }
142 
143 static inline u64 alloc_lru_idx_read(struct bch_alloc_v4 a)
144 {
145 	return a.data_type == BCH_DATA_cached ? a.io_time[READ] : 0;
146 }
147 
148 #define DATA_TYPES_MOVABLE		\
149 	((1U << BCH_DATA_btree)|	\
150 	 (1U << BCH_DATA_user)|		\
151 	 (1U << BCH_DATA_stripe))
152 
153 static inline bool data_type_movable(enum bch_data_type type)
154 {
155 	return (1U << type) & DATA_TYPES_MOVABLE;
156 }
157 
158 static inline u64 alloc_lru_idx_fragmentation(struct bch_alloc_v4 a,
159 					      struct bch_dev *ca)
160 {
161 	if (!data_type_movable(a.data_type) ||
162 	    !bch2_bucket_sectors_fragmented(ca, a))
163 		return 0;
164 
165 	/*
166 	 * avoid overflowing LRU_TIME_BITS on a corrupted fs, when
167 	 * bucket_sectors_dirty is (much) bigger than bucket_size
168 	 */
169 	u64 d = min(bch2_bucket_sectors_dirty(a),
170 		    ca->mi.bucket_size);
171 
172 	return div_u64(d * (1ULL << 31), ca->mi.bucket_size);
173 }
174 
175 static inline u64 alloc_freespace_genbits(struct bch_alloc_v4 a)
176 {
177 	return ((u64) alloc_gc_gen(a) >> 4) << 56;
178 }
179 
180 static inline struct bpos alloc_freespace_pos(struct bpos pos, struct bch_alloc_v4 a)
181 {
182 	pos.offset |= alloc_freespace_genbits(a);
183 	return pos;
184 }
185 
186 static inline unsigned alloc_v4_u64s_noerror(const struct bch_alloc_v4 *a)
187 {
188 	return (BCH_ALLOC_V4_BACKPOINTERS_START(a) ?:
189 			BCH_ALLOC_V4_U64s_V0) +
190 		BCH_ALLOC_V4_NR_BACKPOINTERS(a) *
191 		(sizeof(struct bch_backpointer) / sizeof(u64));
192 }
193 
194 static inline unsigned alloc_v4_u64s(const struct bch_alloc_v4 *a)
195 {
196 	unsigned ret = alloc_v4_u64s_noerror(a);
197 	BUG_ON(ret > U8_MAX - BKEY_U64s);
198 	return ret;
199 }
200 
201 static inline void set_alloc_v4_u64s(struct bkey_i_alloc_v4 *a)
202 {
203 	set_bkey_val_u64s(&a->k, alloc_v4_u64s(&a->v));
204 }
205 
206 struct bkey_i_alloc_v4 *
207 bch2_trans_start_alloc_update_noupdate(struct btree_trans *, struct btree_iter *, struct bpos);
208 struct bkey_i_alloc_v4 *
209 bch2_trans_start_alloc_update(struct btree_trans *, struct bpos,
210 			      enum btree_iter_update_trigger_flags);
211 
212 void __bch2_alloc_to_v4(struct bkey_s_c, struct bch_alloc_v4 *);
213 
214 static inline const struct bch_alloc_v4 *bch2_alloc_to_v4(struct bkey_s_c k, struct bch_alloc_v4 *convert)
215 {
216 	const struct bch_alloc_v4 *ret;
217 
218 	if (unlikely(k.k->type != KEY_TYPE_alloc_v4))
219 		goto slowpath;
220 
221 	ret = bkey_s_c_to_alloc_v4(k).v;
222 	if (BCH_ALLOC_V4_BACKPOINTERS_START(ret) != BCH_ALLOC_V4_U64s)
223 		goto slowpath;
224 
225 	return ret;
226 slowpath:
227 	__bch2_alloc_to_v4(k, convert);
228 	return convert;
229 }
230 
231 struct bkey_i_alloc_v4 *bch2_alloc_to_v4_mut(struct btree_trans *, struct bkey_s_c);
232 
233 int bch2_bucket_io_time_reset(struct btree_trans *, unsigned, size_t, int);
234 
235 int bch2_alloc_v1_invalid(struct bch_fs *, struct bkey_s_c,
236 			  enum bch_validate_flags, struct printbuf *);
237 int bch2_alloc_v2_invalid(struct bch_fs *, struct bkey_s_c,
238 			  enum bch_validate_flags, struct printbuf *);
239 int bch2_alloc_v3_invalid(struct bch_fs *, struct bkey_s_c,
240 			  enum bch_validate_flags, struct printbuf *);
241 int bch2_alloc_v4_invalid(struct bch_fs *, struct bkey_s_c,
242 			  enum bch_validate_flags, struct printbuf *);
243 void bch2_alloc_v4_swab(struct bkey_s);
244 void bch2_alloc_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
245 
246 #define bch2_bkey_ops_alloc ((struct bkey_ops) {	\
247 	.key_invalid	= bch2_alloc_v1_invalid,	\
248 	.val_to_text	= bch2_alloc_to_text,		\
249 	.trigger	= bch2_trigger_alloc,		\
250 	.min_val_size	= 8,				\
251 })
252 
253 #define bch2_bkey_ops_alloc_v2 ((struct bkey_ops) {	\
254 	.key_invalid	= bch2_alloc_v2_invalid,	\
255 	.val_to_text	= bch2_alloc_to_text,		\
256 	.trigger	= bch2_trigger_alloc,		\
257 	.min_val_size	= 8,				\
258 })
259 
260 #define bch2_bkey_ops_alloc_v3 ((struct bkey_ops) {	\
261 	.key_invalid	= bch2_alloc_v3_invalid,	\
262 	.val_to_text	= bch2_alloc_to_text,		\
263 	.trigger	= bch2_trigger_alloc,		\
264 	.min_val_size	= 16,				\
265 })
266 
267 #define bch2_bkey_ops_alloc_v4 ((struct bkey_ops) {	\
268 	.key_invalid	= bch2_alloc_v4_invalid,	\
269 	.val_to_text	= bch2_alloc_to_text,		\
270 	.swab		= bch2_alloc_v4_swab,		\
271 	.trigger	= bch2_trigger_alloc,		\
272 	.min_val_size	= 48,				\
273 })
274 
275 int bch2_bucket_gens_invalid(struct bch_fs *, struct bkey_s_c,
276 			     enum bch_validate_flags, struct printbuf *);
277 void bch2_bucket_gens_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
278 
279 #define bch2_bkey_ops_bucket_gens ((struct bkey_ops) {	\
280 	.key_invalid	= bch2_bucket_gens_invalid,	\
281 	.val_to_text	= bch2_bucket_gens_to_text,	\
282 })
283 
284 int bch2_bucket_gens_init(struct bch_fs *);
285 
286 static inline bool bkey_is_alloc(const struct bkey *k)
287 {
288 	return  k->type == KEY_TYPE_alloc ||
289 		k->type == KEY_TYPE_alloc_v2 ||
290 		k->type == KEY_TYPE_alloc_v3;
291 }
292 
293 int bch2_alloc_read(struct bch_fs *);
294 
295 int bch2_alloc_key_to_dev_counters(struct btree_trans *, struct bch_dev *,
296 				   const struct bch_alloc_v4 *,
297 				   const struct bch_alloc_v4 *, unsigned);
298 int bch2_trigger_alloc(struct btree_trans *, enum btree_id, unsigned,
299 		       struct bkey_s_c, struct bkey_s,
300 		       enum btree_iter_update_trigger_flags);
301 int bch2_check_alloc_info(struct bch_fs *);
302 int bch2_check_alloc_to_lru_refs(struct bch_fs *);
303 void bch2_dev_do_discards(struct bch_dev *);
304 void bch2_do_discards(struct bch_fs *);
305 
306 static inline u64 should_invalidate_buckets(struct bch_dev *ca,
307 					    struct bch_dev_usage u)
308 {
309 	u64 want_free = ca->mi.nbuckets >> 7;
310 	u64 free = max_t(s64, 0,
311 			   u.d[BCH_DATA_free].buckets
312 			 + u.d[BCH_DATA_need_discard].buckets
313 			 - bch2_dev_buckets_reserved(ca, BCH_WATERMARK_stripe));
314 
315 	return clamp_t(s64, want_free - free, 0, u.d[BCH_DATA_cached].buckets);
316 }
317 
318 void bch2_dev_do_invalidates(struct bch_dev *);
319 void bch2_do_invalidates(struct bch_fs *);
320 
321 static inline struct bch_backpointer *alloc_v4_backpointers(struct bch_alloc_v4 *a)
322 {
323 	return (void *) ((u64 *) &a->v +
324 			 (BCH_ALLOC_V4_BACKPOINTERS_START(a) ?:
325 			  BCH_ALLOC_V4_U64s_V0));
326 }
327 
328 static inline const struct bch_backpointer *alloc_v4_backpointers_c(const struct bch_alloc_v4 *a)
329 {
330 	return (void *) ((u64 *) &a->v + BCH_ALLOC_V4_BACKPOINTERS_START(a));
331 }
332 
333 int bch2_dev_freespace_init(struct bch_fs *, struct bch_dev *, u64, u64);
334 int bch2_fs_freespace_init(struct bch_fs *);
335 
336 void bch2_recalc_capacity(struct bch_fs *);
337 u64 bch2_min_rw_member_capacity(struct bch_fs *);
338 
339 void bch2_dev_allocator_remove(struct bch_fs *, struct bch_dev *);
340 void bch2_dev_allocator_add(struct bch_fs *, struct bch_dev *);
341 
342 void bch2_dev_allocator_background_exit(struct bch_dev *);
343 void bch2_dev_allocator_background_init(struct bch_dev *);
344 
345 void bch2_fs_allocator_background_init(struct bch_fs *);
346 
347 #endif /* _BCACHEFS_ALLOC_BACKGROUND_H */
348