1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHEFS_ALLOC_BACKGROUND_H
3 #define _BCACHEFS_ALLOC_BACKGROUND_H
4
5 #include "bcachefs.h"
6 #include "alloc_types.h"
7 #include "buckets.h"
8 #include "debug.h"
9 #include "super.h"
10
11 enum bch_validate_flags;
12
13 /* How out of date a pointer gen is allowed to be: */
14 #define BUCKET_GC_GEN_MAX 96U
15
bch2_dev_bucket_exists(struct bch_fs * c,struct bpos pos)16 static inline bool bch2_dev_bucket_exists(struct bch_fs *c, struct bpos pos)
17 {
18 rcu_read_lock();
19 struct bch_dev *ca = bch2_dev_rcu_noerror(c, pos.inode);
20 bool ret = ca && bucket_valid(ca, pos.offset);
21 rcu_read_unlock();
22 return ret;
23 }
24
bucket_to_u64(struct bpos bucket)25 static inline u64 bucket_to_u64(struct bpos bucket)
26 {
27 return (bucket.inode << 48) | bucket.offset;
28 }
29
u64_to_bucket(u64 bucket)30 static inline struct bpos u64_to_bucket(u64 bucket)
31 {
32 return POS(bucket >> 48, bucket & ~(~0ULL << 48));
33 }
34
alloc_gc_gen(struct bch_alloc_v4 a)35 static inline u8 alloc_gc_gen(struct bch_alloc_v4 a)
36 {
37 return a.gen - a.oldest_gen;
38 }
39
alloc_to_bucket(struct bucket * dst,struct bch_alloc_v4 src)40 static inline void alloc_to_bucket(struct bucket *dst, struct bch_alloc_v4 src)
41 {
42 dst->gen = src.gen;
43 dst->data_type = src.data_type;
44 dst->stripe_sectors = src.stripe_sectors;
45 dst->dirty_sectors = src.dirty_sectors;
46 dst->cached_sectors = src.cached_sectors;
47 dst->stripe = src.stripe;
48 }
49
__bucket_m_to_alloc(struct bch_alloc_v4 * dst,struct bucket src)50 static inline void __bucket_m_to_alloc(struct bch_alloc_v4 *dst, struct bucket src)
51 {
52 dst->gen = src.gen;
53 dst->data_type = src.data_type;
54 dst->stripe_sectors = src.stripe_sectors;
55 dst->dirty_sectors = src.dirty_sectors;
56 dst->cached_sectors = src.cached_sectors;
57 dst->stripe = src.stripe;
58 }
59
bucket_m_to_alloc(struct bucket b)60 static inline struct bch_alloc_v4 bucket_m_to_alloc(struct bucket b)
61 {
62 struct bch_alloc_v4 ret = {};
63 __bucket_m_to_alloc(&ret, b);
64 return ret;
65 }
66
bucket_data_type(enum bch_data_type data_type)67 static inline enum bch_data_type bucket_data_type(enum bch_data_type data_type)
68 {
69 switch (data_type) {
70 case BCH_DATA_cached:
71 case BCH_DATA_stripe:
72 return BCH_DATA_user;
73 default:
74 return data_type;
75 }
76 }
77
bucket_data_type_mismatch(enum bch_data_type bucket,enum bch_data_type ptr)78 static inline bool bucket_data_type_mismatch(enum bch_data_type bucket,
79 enum bch_data_type ptr)
80 {
81 return !data_type_is_empty(bucket) &&
82 bucket_data_type(bucket) != bucket_data_type(ptr);
83 }
84
85 /*
86 * It is my general preference to use unsigned types for unsigned quantities -
87 * however, these helpers are used in disk accounting calculations run by
88 * triggers where the output will be negated and added to an s64. unsigned is
89 * right out even though all these quantities will fit in 32 bits, since it
90 * won't be sign extended correctly; u64 will negate "correctly", but s64 is the
91 * simpler option here.
92 */
bch2_bucket_sectors_total(struct bch_alloc_v4 a)93 static inline s64 bch2_bucket_sectors_total(struct bch_alloc_v4 a)
94 {
95 return a.stripe_sectors + a.dirty_sectors + a.cached_sectors;
96 }
97
bch2_bucket_sectors_dirty(struct bch_alloc_v4 a)98 static inline s64 bch2_bucket_sectors_dirty(struct bch_alloc_v4 a)
99 {
100 return a.stripe_sectors + a.dirty_sectors;
101 }
102
bch2_bucket_sectors(struct bch_alloc_v4 a)103 static inline s64 bch2_bucket_sectors(struct bch_alloc_v4 a)
104 {
105 return a.data_type == BCH_DATA_cached
106 ? a.cached_sectors
107 : bch2_bucket_sectors_dirty(a);
108 }
109
bch2_bucket_sectors_fragmented(struct bch_dev * ca,struct bch_alloc_v4 a)110 static inline s64 bch2_bucket_sectors_fragmented(struct bch_dev *ca,
111 struct bch_alloc_v4 a)
112 {
113 int d = bch2_bucket_sectors(a);
114
115 return d ? max(0, ca->mi.bucket_size - d) : 0;
116 }
117
bch2_gc_bucket_sectors_fragmented(struct bch_dev * ca,struct bucket a)118 static inline s64 bch2_gc_bucket_sectors_fragmented(struct bch_dev *ca, struct bucket a)
119 {
120 int d = a.stripe_sectors + a.dirty_sectors;
121
122 return d ? max(0, ca->mi.bucket_size - d) : 0;
123 }
124
bch2_bucket_sectors_unstriped(struct bch_alloc_v4 a)125 static inline s64 bch2_bucket_sectors_unstriped(struct bch_alloc_v4 a)
126 {
127 return a.data_type == BCH_DATA_stripe ? a.dirty_sectors : 0;
128 }
129
alloc_data_type(struct bch_alloc_v4 a,enum bch_data_type data_type)130 static inline enum bch_data_type alloc_data_type(struct bch_alloc_v4 a,
131 enum bch_data_type data_type)
132 {
133 if (a.stripe)
134 return data_type == BCH_DATA_parity ? data_type : BCH_DATA_stripe;
135 if (bch2_bucket_sectors_dirty(a))
136 return data_type;
137 if (a.cached_sectors)
138 return BCH_DATA_cached;
139 if (BCH_ALLOC_V4_NEED_DISCARD(&a))
140 return BCH_DATA_need_discard;
141 if (alloc_gc_gen(a) >= BUCKET_GC_GEN_MAX)
142 return BCH_DATA_need_gc_gens;
143 return BCH_DATA_free;
144 }
145
alloc_data_type_set(struct bch_alloc_v4 * a,enum bch_data_type data_type)146 static inline void alloc_data_type_set(struct bch_alloc_v4 *a, enum bch_data_type data_type)
147 {
148 a->data_type = alloc_data_type(*a, data_type);
149 }
150
alloc_lru_idx_read(struct bch_alloc_v4 a)151 static inline u64 alloc_lru_idx_read(struct bch_alloc_v4 a)
152 {
153 return a.data_type == BCH_DATA_cached
154 ? a.io_time[READ] & LRU_TIME_MAX
155 : 0;
156 }
157
158 #define DATA_TYPES_MOVABLE \
159 ((1U << BCH_DATA_btree)| \
160 (1U << BCH_DATA_user)| \
161 (1U << BCH_DATA_stripe))
162
data_type_movable(enum bch_data_type type)163 static inline bool data_type_movable(enum bch_data_type type)
164 {
165 return (1U << type) & DATA_TYPES_MOVABLE;
166 }
167
alloc_lru_idx_fragmentation(struct bch_alloc_v4 a,struct bch_dev * ca)168 static inline u64 alloc_lru_idx_fragmentation(struct bch_alloc_v4 a,
169 struct bch_dev *ca)
170 {
171 if (a.data_type >= BCH_DATA_NR)
172 return 0;
173
174 if (!data_type_movable(a.data_type) ||
175 !bch2_bucket_sectors_fragmented(ca, a))
176 return 0;
177
178 /*
179 * avoid overflowing LRU_TIME_BITS on a corrupted fs, when
180 * bucket_sectors_dirty is (much) bigger than bucket_size
181 */
182 u64 d = min_t(s64, bch2_bucket_sectors_dirty(a),
183 ca->mi.bucket_size);
184
185 return div_u64(d * (1ULL << 31), ca->mi.bucket_size);
186 }
187
alloc_freespace_genbits(struct bch_alloc_v4 a)188 static inline u64 alloc_freespace_genbits(struct bch_alloc_v4 a)
189 {
190 return ((u64) alloc_gc_gen(a) >> 4) << 56;
191 }
192
alloc_freespace_pos(struct bpos pos,struct bch_alloc_v4 a)193 static inline struct bpos alloc_freespace_pos(struct bpos pos, struct bch_alloc_v4 a)
194 {
195 pos.offset |= alloc_freespace_genbits(a);
196 return pos;
197 }
198
alloc_v4_u64s_noerror(const struct bch_alloc_v4 * a)199 static inline unsigned alloc_v4_u64s_noerror(const struct bch_alloc_v4 *a)
200 {
201 return (BCH_ALLOC_V4_BACKPOINTERS_START(a) ?:
202 BCH_ALLOC_V4_U64s_V0) +
203 BCH_ALLOC_V4_NR_BACKPOINTERS(a) *
204 (sizeof(struct bch_backpointer) / sizeof(u64));
205 }
206
alloc_v4_u64s(const struct bch_alloc_v4 * a)207 static inline unsigned alloc_v4_u64s(const struct bch_alloc_v4 *a)
208 {
209 unsigned ret = alloc_v4_u64s_noerror(a);
210 BUG_ON(ret > U8_MAX - BKEY_U64s);
211 return ret;
212 }
213
set_alloc_v4_u64s(struct bkey_i_alloc_v4 * a)214 static inline void set_alloc_v4_u64s(struct bkey_i_alloc_v4 *a)
215 {
216 set_bkey_val_u64s(&a->k, alloc_v4_u64s(&a->v));
217 }
218
219 struct bkey_i_alloc_v4 *
220 bch2_trans_start_alloc_update_noupdate(struct btree_trans *, struct btree_iter *, struct bpos);
221 struct bkey_i_alloc_v4 *
222 bch2_trans_start_alloc_update(struct btree_trans *, struct bpos,
223 enum btree_iter_update_trigger_flags);
224
225 void __bch2_alloc_to_v4(struct bkey_s_c, struct bch_alloc_v4 *);
226
bch2_alloc_to_v4(struct bkey_s_c k,struct bch_alloc_v4 * convert)227 static inline const struct bch_alloc_v4 *bch2_alloc_to_v4(struct bkey_s_c k, struct bch_alloc_v4 *convert)
228 {
229 const struct bch_alloc_v4 *ret;
230
231 if (unlikely(k.k->type != KEY_TYPE_alloc_v4))
232 goto slowpath;
233
234 ret = bkey_s_c_to_alloc_v4(k).v;
235 if (BCH_ALLOC_V4_BACKPOINTERS_START(ret) != BCH_ALLOC_V4_U64s)
236 goto slowpath;
237
238 return ret;
239 slowpath:
240 __bch2_alloc_to_v4(k, convert);
241 return convert;
242 }
243
244 struct bkey_i_alloc_v4 *bch2_alloc_to_v4_mut(struct btree_trans *, struct bkey_s_c);
245
246 int bch2_bucket_io_time_reset(struct btree_trans *, unsigned, size_t, int);
247
248 int bch2_alloc_v1_validate(struct bch_fs *, struct bkey_s_c, enum bch_validate_flags);
249 int bch2_alloc_v2_validate(struct bch_fs *, struct bkey_s_c, enum bch_validate_flags);
250 int bch2_alloc_v3_validate(struct bch_fs *, struct bkey_s_c, enum bch_validate_flags);
251 int bch2_alloc_v4_validate(struct bch_fs *, struct bkey_s_c, enum bch_validate_flags);
252 void bch2_alloc_v4_swab(struct bkey_s);
253 void bch2_alloc_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
254
255 #define bch2_bkey_ops_alloc ((struct bkey_ops) { \
256 .key_validate = bch2_alloc_v1_validate, \
257 .val_to_text = bch2_alloc_to_text, \
258 .trigger = bch2_trigger_alloc, \
259 .min_val_size = 8, \
260 })
261
262 #define bch2_bkey_ops_alloc_v2 ((struct bkey_ops) { \
263 .key_validate = bch2_alloc_v2_validate, \
264 .val_to_text = bch2_alloc_to_text, \
265 .trigger = bch2_trigger_alloc, \
266 .min_val_size = 8, \
267 })
268
269 #define bch2_bkey_ops_alloc_v3 ((struct bkey_ops) { \
270 .key_validate = bch2_alloc_v3_validate, \
271 .val_to_text = bch2_alloc_to_text, \
272 .trigger = bch2_trigger_alloc, \
273 .min_val_size = 16, \
274 })
275
276 #define bch2_bkey_ops_alloc_v4 ((struct bkey_ops) { \
277 .key_validate = bch2_alloc_v4_validate, \
278 .val_to_text = bch2_alloc_to_text, \
279 .swab = bch2_alloc_v4_swab, \
280 .trigger = bch2_trigger_alloc, \
281 .min_val_size = 48, \
282 })
283
284 int bch2_bucket_gens_validate(struct bch_fs *, struct bkey_s_c,
285 enum bch_validate_flags);
286 void bch2_bucket_gens_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
287
288 #define bch2_bkey_ops_bucket_gens ((struct bkey_ops) { \
289 .key_validate = bch2_bucket_gens_validate, \
290 .val_to_text = bch2_bucket_gens_to_text, \
291 })
292
293 int bch2_bucket_gens_init(struct bch_fs *);
294
bkey_is_alloc(const struct bkey * k)295 static inline bool bkey_is_alloc(const struct bkey *k)
296 {
297 return k->type == KEY_TYPE_alloc ||
298 k->type == KEY_TYPE_alloc_v2 ||
299 k->type == KEY_TYPE_alloc_v3;
300 }
301
302 int bch2_alloc_read(struct bch_fs *);
303
304 int bch2_alloc_key_to_dev_counters(struct btree_trans *, struct bch_dev *,
305 const struct bch_alloc_v4 *,
306 const struct bch_alloc_v4 *, unsigned);
307 int bch2_trigger_alloc(struct btree_trans *, enum btree_id, unsigned,
308 struct bkey_s_c, struct bkey_s,
309 enum btree_iter_update_trigger_flags);
310 int bch2_check_alloc_info(struct bch_fs *);
311 int bch2_check_alloc_to_lru_refs(struct bch_fs *);
312 void bch2_dev_do_discards(struct bch_dev *);
313 void bch2_do_discards(struct bch_fs *);
314
should_invalidate_buckets(struct bch_dev * ca,struct bch_dev_usage u)315 static inline u64 should_invalidate_buckets(struct bch_dev *ca,
316 struct bch_dev_usage u)
317 {
318 u64 want_free = ca->mi.nbuckets >> 7;
319 u64 free = max_t(s64, 0,
320 u.d[BCH_DATA_free].buckets
321 + u.d[BCH_DATA_need_discard].buckets
322 - bch2_dev_buckets_reserved(ca, BCH_WATERMARK_stripe));
323
324 return clamp_t(s64, want_free - free, 0, u.d[BCH_DATA_cached].buckets);
325 }
326
327 void bch2_dev_do_invalidates(struct bch_dev *);
328 void bch2_do_invalidates(struct bch_fs *);
329
alloc_v4_backpointers(struct bch_alloc_v4 * a)330 static inline struct bch_backpointer *alloc_v4_backpointers(struct bch_alloc_v4 *a)
331 {
332 return (void *) ((u64 *) &a->v +
333 (BCH_ALLOC_V4_BACKPOINTERS_START(a) ?:
334 BCH_ALLOC_V4_U64s_V0));
335 }
336
alloc_v4_backpointers_c(const struct bch_alloc_v4 * a)337 static inline const struct bch_backpointer *alloc_v4_backpointers_c(const struct bch_alloc_v4 *a)
338 {
339 return (void *) ((u64 *) &a->v + BCH_ALLOC_V4_BACKPOINTERS_START(a));
340 }
341
342 int bch2_dev_freespace_init(struct bch_fs *, struct bch_dev *, u64, u64);
343 int bch2_fs_freespace_init(struct bch_fs *);
344 int bch2_dev_remove_alloc(struct bch_fs *, struct bch_dev *);
345
346 void bch2_recalc_capacity(struct bch_fs *);
347 u64 bch2_min_rw_member_capacity(struct bch_fs *);
348
349 void bch2_dev_allocator_remove(struct bch_fs *, struct bch_dev *);
350 void bch2_dev_allocator_add(struct bch_fs *, struct bch_dev *);
351
352 void bch2_dev_allocator_background_exit(struct bch_dev *);
353 void bch2_dev_allocator_background_init(struct bch_dev *);
354
355 void bch2_fs_allocator_background_init(struct bch_fs *);
356
357 #endif /* _BCACHEFS_ALLOC_BACKGROUND_H */
358