xref: /linux/fs/bcachefs/sb-members.h (revision 56770e24f678a84a21f21bcc1ae9cbc1364677bd)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHEFS_SB_MEMBERS_H
3 #define _BCACHEFS_SB_MEMBERS_H
4 
5 #include "darray.h"
6 #include "bkey_types.h"
7 
8 extern char * const bch2_member_error_strs[];
9 
10 static inline struct bch_member *
__bch2_members_v2_get_mut(struct bch_sb_field_members_v2 * mi,unsigned i)11 __bch2_members_v2_get_mut(struct bch_sb_field_members_v2 *mi, unsigned i)
12 {
13 	return (void *) mi->_members + (i * le16_to_cpu(mi->member_bytes));
14 }
15 
16 int bch2_sb_members_v2_init(struct bch_fs *c);
17 int bch2_sb_members_cpy_v2_v1(struct bch_sb_handle *disk_sb);
18 struct bch_member *bch2_members_v2_get_mut(struct bch_sb *sb, int i);
19 struct bch_member bch2_sb_member_get(struct bch_sb *sb, int i);
20 
bch2_dev_is_online(struct bch_dev * ca)21 static inline bool bch2_dev_is_online(struct bch_dev *ca)
22 {
23 	return !percpu_ref_is_zero(&ca->io_ref[READ]);
24 }
25 
26 static inline struct bch_dev *bch2_dev_rcu(struct bch_fs *, unsigned);
27 
bch2_dev_idx_is_online(struct bch_fs * c,unsigned dev)28 static inline bool bch2_dev_idx_is_online(struct bch_fs *c, unsigned dev)
29 {
30 	rcu_read_lock();
31 	struct bch_dev *ca = bch2_dev_rcu(c, dev);
32 	bool ret = ca && bch2_dev_is_online(ca);
33 	rcu_read_unlock();
34 
35 	return ret;
36 }
37 
bch2_dev_is_healthy(struct bch_dev * ca)38 static inline bool bch2_dev_is_healthy(struct bch_dev *ca)
39 {
40 	return bch2_dev_is_online(ca) &&
41 		ca->mi.state != BCH_MEMBER_STATE_failed;
42 }
43 
dev_mask_nr(const struct bch_devs_mask * devs)44 static inline unsigned dev_mask_nr(const struct bch_devs_mask *devs)
45 {
46 	return bitmap_weight(devs->d, BCH_SB_MEMBERS_MAX);
47 }
48 
bch2_dev_list_has_dev(struct bch_devs_list devs,unsigned dev)49 static inline bool bch2_dev_list_has_dev(struct bch_devs_list devs,
50 					 unsigned dev)
51 {
52 	darray_for_each(devs, i)
53 		if (*i == dev)
54 			return true;
55 	return false;
56 }
57 
bch2_dev_list_drop_dev(struct bch_devs_list * devs,unsigned dev)58 static inline void bch2_dev_list_drop_dev(struct bch_devs_list *devs,
59 					  unsigned dev)
60 {
61 	darray_for_each(*devs, i)
62 		if (*i == dev) {
63 			darray_remove_item(devs, i);
64 			return;
65 		}
66 }
67 
bch2_dev_list_add_dev(struct bch_devs_list * devs,unsigned dev)68 static inline void bch2_dev_list_add_dev(struct bch_devs_list *devs,
69 					 unsigned dev)
70 {
71 	if (!bch2_dev_list_has_dev(*devs, dev)) {
72 		BUG_ON(devs->nr >= ARRAY_SIZE(devs->data));
73 		devs->data[devs->nr++] = dev;
74 	}
75 }
76 
bch2_dev_list_single(unsigned dev)77 static inline struct bch_devs_list bch2_dev_list_single(unsigned dev)
78 {
79 	return (struct bch_devs_list) { .nr = 1, .data[0] = dev };
80 }
81 
__bch2_next_dev_idx(struct bch_fs * c,unsigned idx,const struct bch_devs_mask * mask)82 static inline struct bch_dev *__bch2_next_dev_idx(struct bch_fs *c, unsigned idx,
83 						  const struct bch_devs_mask *mask)
84 {
85 	struct bch_dev *ca = NULL;
86 
87 	while ((idx = mask
88 		? find_next_bit(mask->d, c->sb.nr_devices, idx)
89 		: idx) < c->sb.nr_devices &&
90 	       !(ca = rcu_dereference_check(c->devs[idx],
91 					    lockdep_is_held(&c->state_lock))))
92 		idx++;
93 
94 	return ca;
95 }
96 
__bch2_next_dev(struct bch_fs * c,struct bch_dev * ca,const struct bch_devs_mask * mask)97 static inline struct bch_dev *__bch2_next_dev(struct bch_fs *c, struct bch_dev *ca,
98 					      const struct bch_devs_mask *mask)
99 {
100 	return __bch2_next_dev_idx(c, ca ? ca->dev_idx + 1 : 0, mask);
101 }
102 
103 #define for_each_member_device_rcu(_c, _ca, _mask)			\
104 	for (struct bch_dev *_ca = NULL;				\
105 	     (_ca = __bch2_next_dev((_c), _ca, (_mask)));)
106 
bch2_dev_get(struct bch_dev * ca)107 static inline void bch2_dev_get(struct bch_dev *ca)
108 {
109 #ifdef CONFIG_BCACHEFS_DEBUG
110 	BUG_ON(atomic_long_inc_return(&ca->ref) <= 1L);
111 #else
112 	percpu_ref_get(&ca->ref);
113 #endif
114 }
115 
__bch2_dev_put(struct bch_dev * ca)116 static inline void __bch2_dev_put(struct bch_dev *ca)
117 {
118 #ifdef CONFIG_BCACHEFS_DEBUG
119 	long r = atomic_long_dec_return(&ca->ref);
120 	if (r < (long) !ca->dying)
121 		panic("bch_dev->ref underflow, last put: %pS\n", (void *) ca->last_put);
122 	ca->last_put = _THIS_IP_;
123 	if (!r)
124 		complete(&ca->ref_completion);
125 #else
126 	percpu_ref_put(&ca->ref);
127 #endif
128 }
129 
bch2_dev_put(struct bch_dev * ca)130 static inline void bch2_dev_put(struct bch_dev *ca)
131 {
132 	if (ca)
133 		__bch2_dev_put(ca);
134 }
135 
bch2_get_next_dev(struct bch_fs * c,struct bch_dev * ca)136 static inline struct bch_dev *bch2_get_next_dev(struct bch_fs *c, struct bch_dev *ca)
137 {
138 	rcu_read_lock();
139 	bch2_dev_put(ca);
140 	if ((ca = __bch2_next_dev(c, ca, NULL)))
141 		bch2_dev_get(ca);
142 	rcu_read_unlock();
143 
144 	return ca;
145 }
146 
147 /*
148  * If you break early, you must drop your ref on the current device
149  */
150 #define __for_each_member_device(_c, _ca)				\
151 	for (;	(_ca = bch2_get_next_dev(_c, _ca));)
152 
153 #define for_each_member_device(_c, _ca)					\
154 	for (struct bch_dev *_ca = NULL;				\
155 	     (_ca = bch2_get_next_dev(_c, _ca));)
156 
bch2_get_next_online_dev(struct bch_fs * c,struct bch_dev * ca,unsigned state_mask,int rw)157 static inline struct bch_dev *bch2_get_next_online_dev(struct bch_fs *c,
158 						       struct bch_dev *ca,
159 						       unsigned state_mask,
160 						       int rw)
161 {
162 	rcu_read_lock();
163 	if (ca)
164 		percpu_ref_put(&ca->io_ref[rw]);
165 
166 	while ((ca = __bch2_next_dev(c, ca, NULL)) &&
167 	       (!((1 << ca->mi.state) & state_mask) ||
168 		!percpu_ref_tryget(&ca->io_ref[rw])))
169 		;
170 	rcu_read_unlock();
171 
172 	return ca;
173 }
174 
175 #define __for_each_online_member(_c, _ca, state_mask, rw)		\
176 	for (struct bch_dev *_ca = NULL;				\
177 	     (_ca = bch2_get_next_online_dev(_c, _ca, state_mask, rw));)
178 
179 #define for_each_online_member(c, ca)					\
180 	__for_each_online_member(c, ca, ~0, READ)
181 
182 #define for_each_rw_member(c, ca)					\
183 	__for_each_online_member(c, ca, BIT(BCH_MEMBER_STATE_rw), WRITE)
184 
185 #define for_each_readable_member(c, ca)				\
186 	__for_each_online_member(c, ca,	BIT( BCH_MEMBER_STATE_rw)|BIT(BCH_MEMBER_STATE_ro), READ)
187 
bch2_dev_exists(const struct bch_fs * c,unsigned dev)188 static inline bool bch2_dev_exists(const struct bch_fs *c, unsigned dev)
189 {
190 	return dev < c->sb.nr_devices && c->devs[dev];
191 }
192 
bucket_valid(const struct bch_dev * ca,u64 b)193 static inline bool bucket_valid(const struct bch_dev *ca, u64 b)
194 {
195 	return b - ca->mi.first_bucket < ca->mi.nbuckets_minus_first;
196 }
197 
bch2_dev_have_ref(const struct bch_fs * c,unsigned dev)198 static inline struct bch_dev *bch2_dev_have_ref(const struct bch_fs *c, unsigned dev)
199 {
200 	EBUG_ON(!bch2_dev_exists(c, dev));
201 
202 	return rcu_dereference_check(c->devs[dev], 1);
203 }
204 
bch2_dev_locked(struct bch_fs * c,unsigned dev)205 static inline struct bch_dev *bch2_dev_locked(struct bch_fs *c, unsigned dev)
206 {
207 	EBUG_ON(!bch2_dev_exists(c, dev));
208 
209 	return rcu_dereference_protected(c->devs[dev],
210 					 lockdep_is_held(&c->sb_lock) ||
211 					 lockdep_is_held(&c->state_lock));
212 }
213 
bch2_dev_rcu_noerror(struct bch_fs * c,unsigned dev)214 static inline struct bch_dev *bch2_dev_rcu_noerror(struct bch_fs *c, unsigned dev)
215 {
216 	return c && dev < c->sb.nr_devices
217 		? rcu_dereference(c->devs[dev])
218 		: NULL;
219 }
220 
221 void bch2_dev_missing(struct bch_fs *, unsigned);
222 
bch2_dev_rcu(struct bch_fs * c,unsigned dev)223 static inline struct bch_dev *bch2_dev_rcu(struct bch_fs *c, unsigned dev)
224 {
225 	struct bch_dev *ca = bch2_dev_rcu_noerror(c, dev);
226 	if (unlikely(!ca))
227 		bch2_dev_missing(c, dev);
228 	return ca;
229 }
230 
bch2_dev_tryget_noerror(struct bch_fs * c,unsigned dev)231 static inline struct bch_dev *bch2_dev_tryget_noerror(struct bch_fs *c, unsigned dev)
232 {
233 	rcu_read_lock();
234 	struct bch_dev *ca = bch2_dev_rcu_noerror(c, dev);
235 	if (ca)
236 		bch2_dev_get(ca);
237 	rcu_read_unlock();
238 	return ca;
239 }
240 
bch2_dev_tryget(struct bch_fs * c,unsigned dev)241 static inline struct bch_dev *bch2_dev_tryget(struct bch_fs *c, unsigned dev)
242 {
243 	struct bch_dev *ca = bch2_dev_tryget_noerror(c, dev);
244 	if (unlikely(!ca))
245 		bch2_dev_missing(c, dev);
246 	return ca;
247 }
248 
bch2_dev_bucket_tryget_noerror(struct bch_fs * c,struct bpos bucket)249 static inline struct bch_dev *bch2_dev_bucket_tryget_noerror(struct bch_fs *c, struct bpos bucket)
250 {
251 	struct bch_dev *ca = bch2_dev_tryget_noerror(c, bucket.inode);
252 	if (ca && !bucket_valid(ca, bucket.offset)) {
253 		bch2_dev_put(ca);
254 		ca = NULL;
255 	}
256 	return ca;
257 }
258 
259 void bch2_dev_bucket_missing(struct bch_fs *, struct bpos);
260 
bch2_dev_bucket_tryget(struct bch_fs * c,struct bpos bucket)261 static inline struct bch_dev *bch2_dev_bucket_tryget(struct bch_fs *c, struct bpos bucket)
262 {
263 	struct bch_dev *ca = bch2_dev_bucket_tryget_noerror(c, bucket);
264 	if (!ca)
265 		bch2_dev_bucket_missing(c, bucket);
266 	return ca;
267 }
268 
bch2_dev_iterate_noerror(struct bch_fs * c,struct bch_dev * ca,unsigned dev_idx)269 static inline struct bch_dev *bch2_dev_iterate_noerror(struct bch_fs *c, struct bch_dev *ca, unsigned dev_idx)
270 {
271 	if (ca && ca->dev_idx == dev_idx)
272 		return ca;
273 	bch2_dev_put(ca);
274 	return bch2_dev_tryget_noerror(c, dev_idx);
275 }
276 
bch2_dev_iterate(struct bch_fs * c,struct bch_dev * ca,unsigned dev_idx)277 static inline struct bch_dev *bch2_dev_iterate(struct bch_fs *c, struct bch_dev *ca, unsigned dev_idx)
278 {
279 	if (ca && ca->dev_idx == dev_idx)
280 		return ca;
281 	bch2_dev_put(ca);
282 	return bch2_dev_tryget(c, dev_idx);
283 }
284 
bch2_dev_get_ioref(struct bch_fs * c,unsigned dev,int rw)285 static inline struct bch_dev *bch2_dev_get_ioref(struct bch_fs *c, unsigned dev, int rw)
286 {
287 	might_sleep();
288 
289 	rcu_read_lock();
290 	struct bch_dev *ca = bch2_dev_rcu(c, dev);
291 	if (ca && !percpu_ref_tryget(&ca->io_ref[rw]))
292 		ca = NULL;
293 	rcu_read_unlock();
294 
295 	if (ca &&
296 	    (ca->mi.state == BCH_MEMBER_STATE_rw ||
297 	    (ca->mi.state == BCH_MEMBER_STATE_ro && rw == READ)))
298 		return ca;
299 
300 	if (ca)
301 		percpu_ref_put(&ca->io_ref[rw]);
302 	return NULL;
303 }
304 
305 /* XXX kill, move to struct bch_fs */
bch2_online_devs(struct bch_fs * c)306 static inline struct bch_devs_mask bch2_online_devs(struct bch_fs *c)
307 {
308 	struct bch_devs_mask devs;
309 
310 	memset(&devs, 0, sizeof(devs));
311 	for_each_online_member(c, ca)
312 		__set_bit(ca->dev_idx, devs.d);
313 	return devs;
314 }
315 
316 extern const struct bch_sb_field_ops bch_sb_field_ops_members_v1;
317 extern const struct bch_sb_field_ops bch_sb_field_ops_members_v2;
318 
bch2_member_alive(struct bch_member * m)319 static inline bool bch2_member_alive(struct bch_member *m)
320 {
321 	return !bch2_is_zero(&m->uuid, sizeof(m->uuid));
322 }
323 
bch2_member_exists(struct bch_sb * sb,unsigned dev)324 static inline bool bch2_member_exists(struct bch_sb *sb, unsigned dev)
325 {
326 	if (dev < sb->nr_devices) {
327 		struct bch_member m = bch2_sb_member_get(sb, dev);
328 		return bch2_member_alive(&m);
329 	}
330 	return false;
331 }
332 
333 unsigned bch2_sb_nr_devices(const struct bch_sb *);
334 
bch2_mi_to_cpu(struct bch_member * mi)335 static inline struct bch_member_cpu bch2_mi_to_cpu(struct bch_member *mi)
336 {
337 	return (struct bch_member_cpu) {
338 		.nbuckets	= le64_to_cpu(mi->nbuckets),
339 		.nbuckets_minus_first = le64_to_cpu(mi->nbuckets) -
340 			le16_to_cpu(mi->first_bucket),
341 		.first_bucket	= le16_to_cpu(mi->first_bucket),
342 		.bucket_size	= le16_to_cpu(mi->bucket_size),
343 		.group		= BCH_MEMBER_GROUP(mi),
344 		.state		= BCH_MEMBER_STATE(mi),
345 		.discard	= BCH_MEMBER_DISCARD(mi),
346 		.data_allowed	= BCH_MEMBER_DATA_ALLOWED(mi),
347 		.durability	= BCH_MEMBER_DURABILITY(mi)
348 			? BCH_MEMBER_DURABILITY(mi) - 1
349 			: 1,
350 		.freespace_initialized = BCH_MEMBER_FREESPACE_INITIALIZED(mi),
351 		.valid		= bch2_member_alive(mi),
352 		.btree_bitmap_shift	= mi->btree_bitmap_shift,
353 		.btree_allocated_bitmap = le64_to_cpu(mi->btree_allocated_bitmap),
354 	};
355 }
356 
357 void bch2_sb_members_from_cpu(struct bch_fs *);
358 
359 void bch2_dev_io_errors_to_text(struct printbuf *, struct bch_dev *);
360 void bch2_dev_errors_reset(struct bch_dev *);
361 
bch2_dev_btree_bitmap_marked_sectors(struct bch_dev * ca,u64 start,unsigned sectors)362 static inline bool bch2_dev_btree_bitmap_marked_sectors(struct bch_dev *ca, u64 start, unsigned sectors)
363 {
364 	u64 end = start + sectors;
365 
366 	if (end > 64ULL << ca->mi.btree_bitmap_shift)
367 		return false;
368 
369 	for (unsigned bit = start >> ca->mi.btree_bitmap_shift;
370 	     (u64) bit << ca->mi.btree_bitmap_shift < end;
371 	     bit++)
372 		if (!(ca->mi.btree_allocated_bitmap & BIT_ULL(bit)))
373 			return false;
374 	return true;
375 }
376 
377 bool bch2_dev_btree_bitmap_marked(struct bch_fs *, struct bkey_s_c);
378 void bch2_dev_btree_bitmap_mark(struct bch_fs *, struct bkey_s_c);
379 
380 int bch2_sb_member_alloc(struct bch_fs *);
381 
382 #endif /* _BCACHEFS_SB_MEMBERS_H */
383