xref: /linux/fs/bcachefs/alloc_foreground.h (revision ff0905bbf991f4337b5ebc19c0d43525ebb0d96b)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHEFS_ALLOC_FOREGROUND_H
3 #define _BCACHEFS_ALLOC_FOREGROUND_H
4 
5 #include "bcachefs.h"
6 #include "buckets.h"
7 #include "alloc_types.h"
8 #include "extents.h"
9 #include "io_write_types.h"
10 #include "sb-members.h"
11 
12 #include <linux/hash.h>
13 
14 struct bkey;
15 struct bch_dev;
16 struct bch_fs;
17 struct bch_devs_List;
18 
19 extern const char * const bch2_watermarks[];
20 
21 void bch2_reset_alloc_cursors(struct bch_fs *);
22 
23 struct dev_alloc_list {
24 	unsigned	nr;
25 	u8		data[BCH_SB_MEMBERS_MAX];
26 };
27 
28 struct alloc_request {
29 	unsigned		nr_replicas;
30 	unsigned		target;
31 	bool			ec;
32 	enum bch_watermark	watermark;
33 	enum bch_write_flags	flags;
34 	enum bch_data_type	data_type;
35 	struct bch_devs_list	*devs_have;
36 	struct write_point	*wp;
37 
38 	/* These fields are used primarily by open_bucket_add_buckets */
39 	struct open_buckets	ptrs;
40 	unsigned		nr_effective;	/* sum of @ptrs durability */
41 	bool			have_cache;	/* have we allocated from a 0 durability dev */
42 	struct bch_devs_mask	devs_may_alloc;
43 
44 	/* bch2_bucket_alloc_set_trans(): */
45 	struct dev_alloc_list	devs_sorted;
46 	struct bch_dev_usage	usage;
47 
48 	/* bch2_bucket_alloc_trans(): */
49 	struct bch_dev		*ca;
50 
51 	enum {
52 				BTREE_BITMAP_NO,
53 				BTREE_BITMAP_YES,
54 				BTREE_BITMAP_ANY,
55 	}			btree_bitmap;
56 
57 	struct {
58 		u64		buckets_seen;
59 		u64		skipped_open;
60 		u64		skipped_need_journal_commit;
61 		u64		need_journal_commit;
62 		u64		skipped_nocow;
63 		u64		skipped_nouse;
64 		u64		skipped_mi_btree_bitmap;
65 	} counters;
66 
67 	unsigned		scratch_nr_replicas;
68 	unsigned		scratch_nr_effective;
69 	bool			scratch_have_cache;
70 	enum bch_data_type	scratch_data_type;
71 	struct open_buckets	scratch_ptrs;
72 	struct bch_devs_mask	scratch_devs_may_alloc;
73 };
74 
75 void bch2_dev_alloc_list(struct bch_fs *,
76 			 struct dev_stripe_state *,
77 			 struct bch_devs_mask *,
78 			 struct dev_alloc_list *);
79 void bch2_dev_stripe_increment(struct bch_dev *, struct dev_stripe_state *);
80 
ob_dev(struct bch_fs * c,struct open_bucket * ob)81 static inline struct bch_dev *ob_dev(struct bch_fs *c, struct open_bucket *ob)
82 {
83 	return bch2_dev_have_ref(c, ob->dev);
84 }
85 
bch2_open_buckets_reserved(enum bch_watermark watermark)86 static inline unsigned bch2_open_buckets_reserved(enum bch_watermark watermark)
87 {
88 	switch (watermark) {
89 	case BCH_WATERMARK_interior_updates:
90 		return 0;
91 	case BCH_WATERMARK_reclaim:
92 		return OPEN_BUCKETS_COUNT / 6;
93 	case BCH_WATERMARK_btree:
94 	case BCH_WATERMARK_btree_copygc:
95 		return OPEN_BUCKETS_COUNT / 4;
96 	case BCH_WATERMARK_copygc:
97 		return OPEN_BUCKETS_COUNT / 3;
98 	default:
99 		return OPEN_BUCKETS_COUNT / 2;
100 	}
101 }
102 
103 struct open_bucket *bch2_bucket_alloc(struct bch_fs *, struct bch_dev *,
104 				      enum bch_watermark, enum bch_data_type,
105 				      struct closure *);
106 
ob_push(struct bch_fs * c,struct open_buckets * obs,struct open_bucket * ob)107 static inline void ob_push(struct bch_fs *c, struct open_buckets *obs,
108 			   struct open_bucket *ob)
109 {
110 	BUG_ON(obs->nr >= ARRAY_SIZE(obs->v));
111 
112 	obs->v[obs->nr++] = ob - c->open_buckets;
113 }
114 
115 #define open_bucket_for_each(_c, _obs, _ob, _i)				\
116 	for ((_i) = 0;							\
117 	     (_i) < (_obs)->nr &&					\
118 	     ((_ob) = (_c)->open_buckets + (_obs)->v[_i], true);	\
119 	     (_i)++)
120 
ec_open_bucket(struct bch_fs * c,struct open_buckets * obs)121 static inline struct open_bucket *ec_open_bucket(struct bch_fs *c,
122 						 struct open_buckets *obs)
123 {
124 	struct open_bucket *ob;
125 	unsigned i;
126 
127 	open_bucket_for_each(c, obs, ob, i)
128 		if (ob->ec)
129 			return ob;
130 
131 	return NULL;
132 }
133 
134 void bch2_open_bucket_write_error(struct bch_fs *,
135 			struct open_buckets *, unsigned, int);
136 
137 void __bch2_open_bucket_put(struct bch_fs *, struct open_bucket *);
138 
bch2_open_bucket_put(struct bch_fs * c,struct open_bucket * ob)139 static inline void bch2_open_bucket_put(struct bch_fs *c, struct open_bucket *ob)
140 {
141 	if (atomic_dec_and_test(&ob->pin))
142 		__bch2_open_bucket_put(c, ob);
143 }
144 
bch2_open_buckets_put(struct bch_fs * c,struct open_buckets * ptrs)145 static inline void bch2_open_buckets_put(struct bch_fs *c,
146 					 struct open_buckets *ptrs)
147 {
148 	struct open_bucket *ob;
149 	unsigned i;
150 
151 	open_bucket_for_each(c, ptrs, ob, i)
152 		bch2_open_bucket_put(c, ob);
153 	ptrs->nr = 0;
154 }
155 
bch2_alloc_sectors_done_inlined(struct bch_fs * c,struct write_point * wp)156 static inline void bch2_alloc_sectors_done_inlined(struct bch_fs *c, struct write_point *wp)
157 {
158 	struct open_buckets ptrs = { .nr = 0 }, keep = { .nr = 0 };
159 	struct open_bucket *ob;
160 	unsigned i;
161 
162 	open_bucket_for_each(c, &wp->ptrs, ob, i)
163 		ob_push(c, ob->sectors_free < block_sectors(c)
164 			? &ptrs
165 			: &keep, ob);
166 	wp->ptrs = keep;
167 
168 	mutex_unlock(&wp->lock);
169 
170 	bch2_open_buckets_put(c, &ptrs);
171 }
172 
bch2_open_bucket_get(struct bch_fs * c,struct write_point * wp,struct open_buckets * ptrs)173 static inline void bch2_open_bucket_get(struct bch_fs *c,
174 					struct write_point *wp,
175 					struct open_buckets *ptrs)
176 {
177 	struct open_bucket *ob;
178 	unsigned i;
179 
180 	open_bucket_for_each(c, &wp->ptrs, ob, i) {
181 		ob->data_type = wp->data_type;
182 		atomic_inc(&ob->pin);
183 		ob_push(c, ptrs, ob);
184 	}
185 }
186 
open_bucket_hashslot(struct bch_fs * c,unsigned dev,u64 bucket)187 static inline open_bucket_idx_t *open_bucket_hashslot(struct bch_fs *c,
188 						  unsigned dev, u64 bucket)
189 {
190 	return c->open_buckets_hash +
191 		(jhash_3words(dev, bucket, bucket >> 32, 0) &
192 		 (OPEN_BUCKETS_COUNT - 1));
193 }
194 
bch2_bucket_is_open(struct bch_fs * c,unsigned dev,u64 bucket)195 static inline bool bch2_bucket_is_open(struct bch_fs *c, unsigned dev, u64 bucket)
196 {
197 	open_bucket_idx_t slot = *open_bucket_hashslot(c, dev, bucket);
198 
199 	while (slot) {
200 		struct open_bucket *ob = &c->open_buckets[slot];
201 
202 		if (ob->dev == dev && ob->bucket == bucket)
203 			return true;
204 
205 		slot = ob->hash;
206 	}
207 
208 	return false;
209 }
210 
bch2_bucket_is_open_safe(struct bch_fs * c,unsigned dev,u64 bucket)211 static inline bool bch2_bucket_is_open_safe(struct bch_fs *c, unsigned dev, u64 bucket)
212 {
213 	bool ret;
214 
215 	if (bch2_bucket_is_open(c, dev, bucket))
216 		return true;
217 
218 	spin_lock(&c->freelist_lock);
219 	ret = bch2_bucket_is_open(c, dev, bucket);
220 	spin_unlock(&c->freelist_lock);
221 
222 	return ret;
223 }
224 
225 enum bch_write_flags;
226 int bch2_bucket_alloc_set_trans(struct btree_trans *, struct alloc_request *,
227 				struct dev_stripe_state *, struct closure *);
228 
229 int bch2_alloc_sectors_start_trans(struct btree_trans *,
230 				   unsigned, unsigned,
231 				   struct write_point_specifier,
232 				   struct bch_devs_list *,
233 				   unsigned, unsigned,
234 				   enum bch_watermark,
235 				   enum bch_write_flags,
236 				   struct closure *,
237 				   struct write_point **);
238 
bch2_ob_ptr(struct bch_fs * c,struct open_bucket * ob)239 static inline struct bch_extent_ptr bch2_ob_ptr(struct bch_fs *c, struct open_bucket *ob)
240 {
241 	struct bch_dev *ca = ob_dev(c, ob);
242 
243 	return (struct bch_extent_ptr) {
244 		.type	= 1 << BCH_EXTENT_ENTRY_ptr,
245 		.gen	= ob->gen,
246 		.dev	= ob->dev,
247 		.offset	= bucket_to_sector(ca, ob->bucket) +
248 			ca->mi.bucket_size -
249 			ob->sectors_free,
250 	};
251 }
252 
253 /*
254  * Append pointers to the space we just allocated to @k, and mark @sectors space
255  * as allocated out of @ob
256  */
257 static inline void
bch2_alloc_sectors_append_ptrs_inlined(struct bch_fs * c,struct write_point * wp,struct bkey_i * k,unsigned sectors,bool cached)258 bch2_alloc_sectors_append_ptrs_inlined(struct bch_fs *c, struct write_point *wp,
259 				       struct bkey_i *k, unsigned sectors,
260 				       bool cached)
261 {
262 	struct open_bucket *ob;
263 	unsigned i;
264 
265 	BUG_ON(sectors > wp->sectors_free);
266 	wp->sectors_free	-= sectors;
267 	wp->sectors_allocated	+= sectors;
268 
269 	open_bucket_for_each(c, &wp->ptrs, ob, i) {
270 		struct bch_dev *ca = ob_dev(c, ob);
271 		struct bch_extent_ptr ptr = bch2_ob_ptr(c, ob);
272 
273 		ptr.cached = cached ||
274 			(!ca->mi.durability &&
275 			 wp->data_type == BCH_DATA_user);
276 
277 		bch2_bkey_append_ptr(k, ptr);
278 
279 		BUG_ON(sectors > ob->sectors_free);
280 		ob->sectors_free -= sectors;
281 	}
282 }
283 
284 void bch2_alloc_sectors_append_ptrs(struct bch_fs *, struct write_point *,
285 				    struct bkey_i *, unsigned, bool);
286 void bch2_alloc_sectors_done(struct bch_fs *, struct write_point *);
287 
288 void bch2_open_buckets_stop(struct bch_fs *c, struct bch_dev *, bool);
289 
writepoint_hashed(unsigned long v)290 static inline struct write_point_specifier writepoint_hashed(unsigned long v)
291 {
292 	return (struct write_point_specifier) { .v = v | 1 };
293 }
294 
writepoint_ptr(struct write_point * wp)295 static inline struct write_point_specifier writepoint_ptr(struct write_point *wp)
296 {
297 	return (struct write_point_specifier) { .v = (unsigned long) wp };
298 }
299 
300 void bch2_fs_allocator_foreground_init(struct bch_fs *);
301 
302 void bch2_open_bucket_to_text(struct printbuf *, struct bch_fs *, struct open_bucket *);
303 void bch2_open_buckets_to_text(struct printbuf *, struct bch_fs *, struct bch_dev *);
304 void bch2_open_buckets_partial_to_text(struct printbuf *, struct bch_fs *);
305 
306 void bch2_write_points_to_text(struct printbuf *, struct bch_fs *);
307 
308 void bch2_fs_alloc_debug_to_text(struct printbuf *, struct bch_fs *);
309 void bch2_dev_alloc_debug_to_text(struct printbuf *, struct bch_dev *);
310 
311 void __bch2_wait_on_allocator(struct bch_fs *, struct closure *);
bch2_wait_on_allocator(struct bch_fs * c,struct closure * cl)312 static inline void bch2_wait_on_allocator(struct bch_fs *c, struct closure *cl)
313 {
314 	if (cl->closure_get_happened)
315 		__bch2_wait_on_allocator(c, cl);
316 }
317 
318 #endif /* _BCACHEFS_ALLOC_FOREGROUND_H */
319