xref: /linux/fs/bcachefs/alloc_background.h (revision 4eca0ef49af9b2b0c52ef2b58e045ab34629796b)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHEFS_ALLOC_BACKGROUND_H
3 #define _BCACHEFS_ALLOC_BACKGROUND_H
4 
5 #include "bcachefs.h"
6 #include "alloc_types.h"
7 #include "buckets.h"
8 #include "debug.h"
9 #include "super.h"
10 
11 enum bkey_invalid_flags;
12 
13 /* How out of date a pointer gen is allowed to be: */
14 #define BUCKET_GC_GEN_MAX	96U
15 
16 static inline bool bch2_dev_bucket_exists(struct bch_fs *c, struct bpos pos)
17 {
18 	struct bch_dev *ca;
19 
20 	if (!bch2_dev_exists2(c, pos.inode))
21 		return false;
22 
23 	ca = bch_dev_bkey_exists(c, pos.inode);
24 	return pos.offset >= ca->mi.first_bucket &&
25 		pos.offset < ca->mi.nbuckets;
26 }
27 
28 static inline u64 bucket_to_u64(struct bpos bucket)
29 {
30 	return (bucket.inode << 48) | bucket.offset;
31 }
32 
33 static inline struct bpos u64_to_bucket(u64 bucket)
34 {
35 	return POS(bucket >> 48, bucket & ~(~0ULL << 48));
36 }
37 
38 static inline u8 alloc_gc_gen(struct bch_alloc_v4 a)
39 {
40 	return a.gen - a.oldest_gen;
41 }
42 
43 static inline enum bch_data_type __alloc_data_type(u32 dirty_sectors,
44 						   u32 cached_sectors,
45 						   u32 stripe,
46 						   struct bch_alloc_v4 a,
47 						   enum bch_data_type data_type)
48 {
49 	if (stripe)
50 		return data_type == BCH_DATA_parity ? data_type : BCH_DATA_stripe;
51 	if (dirty_sectors)
52 		return data_type;
53 	if (cached_sectors)
54 		return BCH_DATA_cached;
55 	if (BCH_ALLOC_V4_NEED_DISCARD(&a))
56 		return BCH_DATA_need_discard;
57 	if (alloc_gc_gen(a) >= BUCKET_GC_GEN_MAX)
58 		return BCH_DATA_need_gc_gens;
59 	return BCH_DATA_free;
60 }
61 
62 static inline enum bch_data_type alloc_data_type(struct bch_alloc_v4 a,
63 						 enum bch_data_type data_type)
64 {
65 	return __alloc_data_type(a.dirty_sectors, a.cached_sectors,
66 				 a.stripe, a, data_type);
67 }
68 
69 static inline enum bch_data_type bucket_data_type(enum bch_data_type data_type)
70 {
71 	return data_type == BCH_DATA_stripe ? BCH_DATA_user : data_type;
72 }
73 
74 static inline u64 alloc_lru_idx_read(struct bch_alloc_v4 a)
75 {
76 	return a.data_type == BCH_DATA_cached ? a.io_time[READ] : 0;
77 }
78 
79 #define DATA_TYPES_MOVABLE		\
80 	((1U << BCH_DATA_btree)|	\
81 	 (1U << BCH_DATA_user)|		\
82 	 (1U << BCH_DATA_stripe))
83 
84 static inline bool data_type_movable(enum bch_data_type type)
85 {
86 	return (1U << type) & DATA_TYPES_MOVABLE;
87 }
88 
89 static inline u64 alloc_lru_idx_fragmentation(struct bch_alloc_v4 a,
90 					      struct bch_dev *ca)
91 {
92 	if (!data_type_movable(a.data_type) ||
93 	    a.dirty_sectors >= ca->mi.bucket_size)
94 		return 0;
95 
96 	return div_u64((u64) a.dirty_sectors * (1ULL << 31), ca->mi.bucket_size);
97 }
98 
99 static inline u64 alloc_freespace_genbits(struct bch_alloc_v4 a)
100 {
101 	return ((u64) alloc_gc_gen(a) >> 4) << 56;
102 }
103 
104 static inline struct bpos alloc_freespace_pos(struct bpos pos, struct bch_alloc_v4 a)
105 {
106 	pos.offset |= alloc_freespace_genbits(a);
107 	return pos;
108 }
109 
110 static inline unsigned alloc_v4_u64s(const struct bch_alloc_v4 *a)
111 {
112 	unsigned ret = (BCH_ALLOC_V4_BACKPOINTERS_START(a) ?:
113 			BCH_ALLOC_V4_U64s_V0) +
114 		BCH_ALLOC_V4_NR_BACKPOINTERS(a) *
115 		(sizeof(struct bch_backpointer) / sizeof(u64));
116 
117 	BUG_ON(ret > U8_MAX - BKEY_U64s);
118 	return ret;
119 }
120 
121 static inline void set_alloc_v4_u64s(struct bkey_i_alloc_v4 *a)
122 {
123 	set_bkey_val_u64s(&a->k, alloc_v4_u64s(&a->v));
124 }
125 
126 struct bkey_i_alloc_v4 *
127 bch2_trans_start_alloc_update(struct btree_trans *, struct btree_iter *, struct bpos);
128 
129 void __bch2_alloc_to_v4(struct bkey_s_c, struct bch_alloc_v4 *);
130 
131 static inline const struct bch_alloc_v4 *bch2_alloc_to_v4(struct bkey_s_c k, struct bch_alloc_v4 *convert)
132 {
133 	const struct bch_alloc_v4 *ret;
134 
135 	if (unlikely(k.k->type != KEY_TYPE_alloc_v4))
136 		goto slowpath;
137 
138 	ret = bkey_s_c_to_alloc_v4(k).v;
139 	if (BCH_ALLOC_V4_BACKPOINTERS_START(ret) != BCH_ALLOC_V4_U64s)
140 		goto slowpath;
141 
142 	return ret;
143 slowpath:
144 	__bch2_alloc_to_v4(k, convert);
145 	return convert;
146 }
147 
148 struct bkey_i_alloc_v4 *bch2_alloc_to_v4_mut(struct btree_trans *, struct bkey_s_c);
149 
150 int bch2_bucket_io_time_reset(struct btree_trans *, unsigned, size_t, int);
151 
152 int bch2_alloc_v1_invalid(struct bch_fs *, struct bkey_s_c,
153 			  enum bkey_invalid_flags, struct printbuf *);
154 int bch2_alloc_v2_invalid(struct bch_fs *, struct bkey_s_c,
155 			  enum bkey_invalid_flags, struct printbuf *);
156 int bch2_alloc_v3_invalid(struct bch_fs *, struct bkey_s_c,
157 			  enum bkey_invalid_flags, struct printbuf *);
158 int bch2_alloc_v4_invalid(struct bch_fs *, struct bkey_s_c,
159 			  enum bkey_invalid_flags, struct printbuf *);
160 void bch2_alloc_v4_swab(struct bkey_s);
161 void bch2_alloc_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
162 
163 #define bch2_bkey_ops_alloc ((struct bkey_ops) {	\
164 	.key_invalid	= bch2_alloc_v1_invalid,	\
165 	.val_to_text	= bch2_alloc_to_text,		\
166 	.trans_trigger	= bch2_trans_mark_alloc,	\
167 	.atomic_trigger	= bch2_mark_alloc,		\
168 	.min_val_size	= 8,				\
169 })
170 
171 #define bch2_bkey_ops_alloc_v2 ((struct bkey_ops) {	\
172 	.key_invalid	= bch2_alloc_v2_invalid,	\
173 	.val_to_text	= bch2_alloc_to_text,		\
174 	.trans_trigger	= bch2_trans_mark_alloc,	\
175 	.atomic_trigger	= bch2_mark_alloc,		\
176 	.min_val_size	= 8,				\
177 })
178 
179 #define bch2_bkey_ops_alloc_v3 ((struct bkey_ops) {	\
180 	.key_invalid	= bch2_alloc_v3_invalid,	\
181 	.val_to_text	= bch2_alloc_to_text,		\
182 	.trans_trigger	= bch2_trans_mark_alloc,	\
183 	.atomic_trigger	= bch2_mark_alloc,		\
184 	.min_val_size	= 16,				\
185 })
186 
187 #define bch2_bkey_ops_alloc_v4 ((struct bkey_ops) {	\
188 	.key_invalid	= bch2_alloc_v4_invalid,	\
189 	.val_to_text	= bch2_alloc_to_text,		\
190 	.swab		= bch2_alloc_v4_swab,		\
191 	.trans_trigger	= bch2_trans_mark_alloc,	\
192 	.atomic_trigger	= bch2_mark_alloc,		\
193 	.min_val_size	= 48,				\
194 })
195 
196 int bch2_bucket_gens_invalid(struct bch_fs *, struct bkey_s_c,
197 			     enum bkey_invalid_flags, struct printbuf *);
198 void bch2_bucket_gens_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
199 
200 #define bch2_bkey_ops_bucket_gens ((struct bkey_ops) {	\
201 	.key_invalid	= bch2_bucket_gens_invalid,	\
202 	.val_to_text	= bch2_bucket_gens_to_text,	\
203 })
204 
205 int bch2_bucket_gens_init(struct bch_fs *);
206 
207 static inline bool bkey_is_alloc(const struct bkey *k)
208 {
209 	return  k->type == KEY_TYPE_alloc ||
210 		k->type == KEY_TYPE_alloc_v2 ||
211 		k->type == KEY_TYPE_alloc_v3;
212 }
213 
214 int bch2_alloc_read(struct bch_fs *);
215 
216 int bch2_trans_mark_alloc(struct btree_trans *, enum btree_id, unsigned,
217 			  struct bkey_s_c, struct bkey_i *, unsigned);
218 int bch2_check_alloc_info(struct bch_fs *);
219 int bch2_check_alloc_to_lru_refs(struct bch_fs *);
220 void bch2_do_discards(struct bch_fs *);
221 
222 static inline u64 should_invalidate_buckets(struct bch_dev *ca,
223 					    struct bch_dev_usage u)
224 {
225 	u64 want_free = ca->mi.nbuckets >> 7;
226 	u64 free = max_t(s64, 0,
227 			   u.d[BCH_DATA_free].buckets
228 			 + u.d[BCH_DATA_need_discard].buckets
229 			 - bch2_dev_buckets_reserved(ca, BCH_WATERMARK_stripe));
230 
231 	return clamp_t(s64, want_free - free, 0, u.d[BCH_DATA_cached].buckets);
232 }
233 
234 void bch2_do_invalidates(struct bch_fs *);
235 
236 static inline struct bch_backpointer *alloc_v4_backpointers(struct bch_alloc_v4 *a)
237 {
238 	return (void *) ((u64 *) &a->v +
239 			 (BCH_ALLOC_V4_BACKPOINTERS_START(a) ?:
240 			  BCH_ALLOC_V4_U64s_V0));
241 }
242 
243 static inline const struct bch_backpointer *alloc_v4_backpointers_c(const struct bch_alloc_v4 *a)
244 {
245 	return (void *) ((u64 *) &a->v + BCH_ALLOC_V4_BACKPOINTERS_START(a));
246 }
247 
248 int bch2_dev_freespace_init(struct bch_fs *, struct bch_dev *, u64, u64);
249 int bch2_fs_freespace_init(struct bch_fs *);
250 
251 void bch2_recalc_capacity(struct bch_fs *);
252 u64 bch2_min_rw_member_capacity(struct bch_fs *);
253 
254 void bch2_dev_allocator_remove(struct bch_fs *, struct bch_dev *);
255 void bch2_dev_allocator_add(struct bch_fs *, struct bch_dev *);
256 
257 void bch2_fs_allocator_background_init(struct bch_fs *);
258 
259 #endif /* _BCACHEFS_ALLOC_BACKGROUND_H */
260