xref: /linux/fs/bcachefs/buckets.h (revision 14062c267f09c7b33a8d5a7d9eb3908b9941aae4)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Code for manipulating bucket marks for garbage collection.
4  *
5  * Copyright 2014 Datera, Inc.
6  */
7 
8 #ifndef _BUCKETS_H
9 #define _BUCKETS_H
10 
11 #include "buckets_types.h"
12 #include "extents.h"
13 #include "sb-members.h"
14 
sector_to_bucket(const struct bch_dev * ca,sector_t s)15 static inline u64 sector_to_bucket(const struct bch_dev *ca, sector_t s)
16 {
17 	return div_u64(s, ca->mi.bucket_size);
18 }
19 
bucket_to_sector(const struct bch_dev * ca,size_t b)20 static inline sector_t bucket_to_sector(const struct bch_dev *ca, size_t b)
21 {
22 	return ((sector_t) b) * ca->mi.bucket_size;
23 }
24 
bucket_remainder(const struct bch_dev * ca,sector_t s)25 static inline sector_t bucket_remainder(const struct bch_dev *ca, sector_t s)
26 {
27 	u32 remainder;
28 
29 	div_u64_rem(s, ca->mi.bucket_size, &remainder);
30 	return remainder;
31 }
32 
sector_to_bucket_and_offset(const struct bch_dev * ca,sector_t s,u32 * offset)33 static inline u64 sector_to_bucket_and_offset(const struct bch_dev *ca, sector_t s, u32 *offset)
34 {
35 	return div_u64_rem(s, ca->mi.bucket_size, offset);
36 }
37 
38 #define for_each_bucket(_b, _buckets)				\
39 	for (_b = (_buckets)->b + (_buckets)->first_bucket;	\
40 	     _b < (_buckets)->b + (_buckets)->nbuckets; _b++)
41 
42 /*
43  * Ugly hack alert:
44  *
45  * We need to cram a spinlock in a single byte, because that's what we have left
46  * in struct bucket, and we care about the size of these - during fsck, we need
47  * in memory state for every single bucket on every device.
48  *
49  * We used to do
50  *   while (xchg(&b->lock, 1) cpu_relax();
51  * but, it turns out not all architectures support xchg on a single byte.
52  *
53  * So now we use bit_spin_lock(), with fun games since we can't burn a whole
54  * ulong for this - we just need to make sure the lock bit always ends up in the
55  * first byte.
56  */
57 
58 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
59 #define BUCKET_LOCK_BITNR	0
60 #else
61 #define BUCKET_LOCK_BITNR	(BITS_PER_LONG - 1)
62 #endif
63 
64 union ulong_byte_assert {
65 	ulong	ulong;
66 	u8	byte;
67 };
68 
bucket_unlock(struct bucket * b)69 static inline void bucket_unlock(struct bucket *b)
70 {
71 	BUILD_BUG_ON(!((union ulong_byte_assert) { .ulong = 1UL << BUCKET_LOCK_BITNR }).byte);
72 
73 	clear_bit_unlock(BUCKET_LOCK_BITNR, (void *) &b->lock);
74 	wake_up_bit((void *) &b->lock, BUCKET_LOCK_BITNR);
75 }
76 
bucket_lock(struct bucket * b)77 static inline void bucket_lock(struct bucket *b)
78 {
79 	wait_on_bit_lock((void *) &b->lock, BUCKET_LOCK_BITNR,
80 			 TASK_UNINTERRUPTIBLE);
81 }
82 
gc_bucket(struct bch_dev * ca,size_t b)83 static inline struct bucket *gc_bucket(struct bch_dev *ca, size_t b)
84 {
85 	return genradix_ptr(&ca->buckets_gc, b);
86 }
87 
bucket_gens(struct bch_dev * ca)88 static inline struct bucket_gens *bucket_gens(struct bch_dev *ca)
89 {
90 	return rcu_dereference_check(ca->bucket_gens,
91 				     !ca->fs ||
92 				     percpu_rwsem_is_held(&ca->fs->mark_lock) ||
93 				     lockdep_is_held(&ca->fs->state_lock) ||
94 				     lockdep_is_held(&ca->bucket_lock));
95 }
96 
bucket_gen(struct bch_dev * ca,size_t b)97 static inline u8 *bucket_gen(struct bch_dev *ca, size_t b)
98 {
99 	struct bucket_gens *gens = bucket_gens(ca);
100 
101 	if (b - gens->first_bucket >= gens->nbuckets_minus_first)
102 		return NULL;
103 	return gens->b + b;
104 }
105 
bucket_gen_get_rcu(struct bch_dev * ca,size_t b)106 static inline int bucket_gen_get_rcu(struct bch_dev *ca, size_t b)
107 {
108 	u8 *gen = bucket_gen(ca, b);
109 	return gen ? *gen : -1;
110 }
111 
bucket_gen_get(struct bch_dev * ca,size_t b)112 static inline int bucket_gen_get(struct bch_dev *ca, size_t b)
113 {
114 	rcu_read_lock();
115 	int ret = bucket_gen_get_rcu(ca, b);
116 	rcu_read_unlock();
117 	return ret;
118 }
119 
PTR_BUCKET_NR(const struct bch_dev * ca,const struct bch_extent_ptr * ptr)120 static inline size_t PTR_BUCKET_NR(const struct bch_dev *ca,
121 				   const struct bch_extent_ptr *ptr)
122 {
123 	return sector_to_bucket(ca, ptr->offset);
124 }
125 
PTR_BUCKET_POS(const struct bch_dev * ca,const struct bch_extent_ptr * ptr)126 static inline struct bpos PTR_BUCKET_POS(const struct bch_dev *ca,
127 					 const struct bch_extent_ptr *ptr)
128 {
129 	return POS(ptr->dev, PTR_BUCKET_NR(ca, ptr));
130 }
131 
PTR_BUCKET_POS_OFFSET(const struct bch_dev * ca,const struct bch_extent_ptr * ptr,u32 * bucket_offset)132 static inline struct bpos PTR_BUCKET_POS_OFFSET(const struct bch_dev *ca,
133 						const struct bch_extent_ptr *ptr,
134 						u32 *bucket_offset)
135 {
136 	return POS(ptr->dev, sector_to_bucket_and_offset(ca, ptr->offset, bucket_offset));
137 }
138 
PTR_GC_BUCKET(struct bch_dev * ca,const struct bch_extent_ptr * ptr)139 static inline struct bucket *PTR_GC_BUCKET(struct bch_dev *ca,
140 					   const struct bch_extent_ptr *ptr)
141 {
142 	return gc_bucket(ca, PTR_BUCKET_NR(ca, ptr));
143 }
144 
ptr_data_type(const struct bkey * k,const struct bch_extent_ptr * ptr)145 static inline enum bch_data_type ptr_data_type(const struct bkey *k,
146 					       const struct bch_extent_ptr *ptr)
147 {
148 	if (bkey_is_btree_ptr(k))
149 		return BCH_DATA_btree;
150 
151 	return ptr->cached ? BCH_DATA_cached : BCH_DATA_user;
152 }
153 
ptr_disk_sectors(s64 sectors,struct extent_ptr_decoded p)154 static inline s64 ptr_disk_sectors(s64 sectors, struct extent_ptr_decoded p)
155 {
156 	EBUG_ON(sectors < 0);
157 
158 	return crc_is_compressed(p.crc)
159 		? DIV_ROUND_UP_ULL(sectors * p.crc.compressed_size,
160 				   p.crc.uncompressed_size)
161 		: sectors;
162 }
163 
gen_cmp(u8 a,u8 b)164 static inline int gen_cmp(u8 a, u8 b)
165 {
166 	return (s8) (a - b);
167 }
168 
gen_after(u8 a,u8 b)169 static inline int gen_after(u8 a, u8 b)
170 {
171 	int r = gen_cmp(a, b);
172 
173 	return r > 0 ? r : 0;
174 }
175 
dev_ptr_stale_rcu(struct bch_dev * ca,const struct bch_extent_ptr * ptr)176 static inline int dev_ptr_stale_rcu(struct bch_dev *ca, const struct bch_extent_ptr *ptr)
177 {
178 	int gen = bucket_gen_get_rcu(ca, PTR_BUCKET_NR(ca, ptr));
179 	return gen < 0 ? gen : gen_after(gen, ptr->gen);
180 }
181 
182 /**
183  * dev_ptr_stale() - check if a pointer points into a bucket that has been
184  * invalidated.
185  */
dev_ptr_stale(struct bch_dev * ca,const struct bch_extent_ptr * ptr)186 static inline int dev_ptr_stale(struct bch_dev *ca, const struct bch_extent_ptr *ptr)
187 {
188 	rcu_read_lock();
189 	int ret = dev_ptr_stale_rcu(ca, ptr);
190 	rcu_read_unlock();
191 	return ret;
192 }
193 
194 /* Device usage: */
195 
196 void bch2_dev_usage_read_fast(struct bch_dev *, struct bch_dev_usage *);
bch2_dev_usage_read(struct bch_dev * ca)197 static inline struct bch_dev_usage bch2_dev_usage_read(struct bch_dev *ca)
198 {
199 	struct bch_dev_usage ret;
200 
201 	bch2_dev_usage_read_fast(ca, &ret);
202 	return ret;
203 }
204 
205 void bch2_dev_usage_to_text(struct printbuf *, struct bch_dev *, struct bch_dev_usage *);
206 
bch2_dev_buckets_reserved(struct bch_dev * ca,enum bch_watermark watermark)207 static inline u64 bch2_dev_buckets_reserved(struct bch_dev *ca, enum bch_watermark watermark)
208 {
209 	s64 reserved = 0;
210 
211 	switch (watermark) {
212 	case BCH_WATERMARK_NR:
213 		BUG();
214 	case BCH_WATERMARK_stripe:
215 		reserved += ca->mi.nbuckets >> 6;
216 		fallthrough;
217 	case BCH_WATERMARK_normal:
218 		reserved += ca->mi.nbuckets >> 6;
219 		fallthrough;
220 	case BCH_WATERMARK_copygc:
221 		reserved += ca->nr_btree_reserve;
222 		fallthrough;
223 	case BCH_WATERMARK_btree:
224 		reserved += ca->nr_btree_reserve;
225 		fallthrough;
226 	case BCH_WATERMARK_btree_copygc:
227 	case BCH_WATERMARK_reclaim:
228 	case BCH_WATERMARK_interior_updates:
229 		break;
230 	}
231 
232 	return reserved;
233 }
234 
dev_buckets_free(struct bch_dev * ca,struct bch_dev_usage usage,enum bch_watermark watermark)235 static inline u64 dev_buckets_free(struct bch_dev *ca,
236 				   struct bch_dev_usage usage,
237 				   enum bch_watermark watermark)
238 {
239 	return max_t(s64, 0,
240 		     usage.d[BCH_DATA_free].buckets -
241 		     ca->nr_open_buckets -
242 		     bch2_dev_buckets_reserved(ca, watermark));
243 }
244 
__dev_buckets_available(struct bch_dev * ca,struct bch_dev_usage usage,enum bch_watermark watermark)245 static inline u64 __dev_buckets_available(struct bch_dev *ca,
246 					  struct bch_dev_usage usage,
247 					  enum bch_watermark watermark)
248 {
249 	return max_t(s64, 0,
250 		       usage.d[BCH_DATA_free].buckets
251 		     + usage.d[BCH_DATA_cached].buckets
252 		     + usage.d[BCH_DATA_need_gc_gens].buckets
253 		     + usage.d[BCH_DATA_need_discard].buckets
254 		     - ca->nr_open_buckets
255 		     - bch2_dev_buckets_reserved(ca, watermark));
256 }
257 
dev_buckets_available(struct bch_dev * ca,enum bch_watermark watermark)258 static inline u64 dev_buckets_available(struct bch_dev *ca,
259 					enum bch_watermark watermark)
260 {
261 	return __dev_buckets_available(ca, bch2_dev_usage_read(ca), watermark);
262 }
263 
264 /* Filesystem usage: */
265 
dev_usage_u64s(void)266 static inline unsigned dev_usage_u64s(void)
267 {
268 	return sizeof(struct bch_dev_usage) / sizeof(u64);
269 }
270 
271 struct bch_fs_usage_short
272 bch2_fs_usage_read_short(struct bch_fs *);
273 
274 int bch2_bucket_ref_update(struct btree_trans *, struct bch_dev *,
275 			   struct bkey_s_c, const struct bch_extent_ptr *,
276 			   s64, enum bch_data_type, u8, u8, u32 *);
277 
278 int bch2_check_fix_ptrs(struct btree_trans *,
279 			enum btree_id, unsigned, struct bkey_s_c,
280 			enum btree_iter_update_trigger_flags);
281 
282 int bch2_trigger_extent(struct btree_trans *, enum btree_id, unsigned,
283 			struct bkey_s_c, struct bkey_s,
284 			enum btree_iter_update_trigger_flags);
285 int bch2_trigger_reservation(struct btree_trans *, enum btree_id, unsigned,
286 			  struct bkey_s_c, struct bkey_s,
287 			  enum btree_iter_update_trigger_flags);
288 
289 #define trigger_run_overwrite_then_insert(_fn, _trans, _btree_id, _level, _old, _new, _flags)\
290 ({												\
291 	int ret = 0;										\
292 												\
293 	if (_old.k->type)									\
294 		ret = _fn(_trans, _btree_id, _level, _old, _flags & ~BTREE_TRIGGER_insert);	\
295 	if (!ret && _new.k->type)								\
296 		ret = _fn(_trans, _btree_id, _level, _new.s_c, _flags & ~BTREE_TRIGGER_overwrite);\
297 	ret;											\
298 })
299 
300 void bch2_trans_account_disk_usage_change(struct btree_trans *);
301 
302 int bch2_trans_mark_metadata_bucket(struct btree_trans *, struct bch_dev *, u64,
303 				    enum bch_data_type, unsigned,
304 				    enum btree_iter_update_trigger_flags);
305 int bch2_trans_mark_dev_sb(struct bch_fs *, struct bch_dev *,
306 				    enum btree_iter_update_trigger_flags);
307 int bch2_trans_mark_dev_sbs_flags(struct bch_fs *,
308 				    enum btree_iter_update_trigger_flags);
309 int bch2_trans_mark_dev_sbs(struct bch_fs *);
310 
is_superblock_bucket(struct bch_dev * ca,u64 b)311 static inline bool is_superblock_bucket(struct bch_dev *ca, u64 b)
312 {
313 	struct bch_sb_layout *layout = &ca->disk_sb.sb->layout;
314 	u64 b_offset	= bucket_to_sector(ca, b);
315 	u64 b_end	= bucket_to_sector(ca, b + 1);
316 	unsigned i;
317 
318 	if (!b)
319 		return true;
320 
321 	for (i = 0; i < layout->nr_superblocks; i++) {
322 		u64 offset = le64_to_cpu(layout->sb_offset[i]);
323 		u64 end = offset + (1 << layout->sb_max_size_bits);
324 
325 		if (!(offset >= b_end || end <= b_offset))
326 			return true;
327 	}
328 
329 	return false;
330 }
331 
bch2_data_type_str(enum bch_data_type type)332 static inline const char *bch2_data_type_str(enum bch_data_type type)
333 {
334 	return type < BCH_DATA_NR
335 		? __bch2_data_types[type]
336 		: "(invalid data type)";
337 }
338 
339 /* disk reservations: */
340 
bch2_disk_reservation_put(struct bch_fs * c,struct disk_reservation * res)341 static inline void bch2_disk_reservation_put(struct bch_fs *c,
342 					     struct disk_reservation *res)
343 {
344 	if (res->sectors) {
345 		this_cpu_sub(*c->online_reserved, res->sectors);
346 		res->sectors = 0;
347 	}
348 }
349 
350 enum bch_reservation_flags {
351 	BCH_DISK_RESERVATION_NOFAIL	= 1 << 0,
352 	BCH_DISK_RESERVATION_PARTIAL	= 1 << 1,
353 };
354 
355 int __bch2_disk_reservation_add(struct bch_fs *, struct disk_reservation *,
356 				u64, enum bch_reservation_flags);
357 
bch2_disk_reservation_add(struct bch_fs * c,struct disk_reservation * res,u64 sectors,enum bch_reservation_flags flags)358 static inline int bch2_disk_reservation_add(struct bch_fs *c, struct disk_reservation *res,
359 					    u64 sectors, enum bch_reservation_flags flags)
360 {
361 #ifdef __KERNEL__
362 	u64 old, new;
363 
364 	old = this_cpu_read(c->pcpu->sectors_available);
365 	do {
366 		if (sectors > old)
367 			return __bch2_disk_reservation_add(c, res, sectors, flags);
368 
369 		new = old - sectors;
370 	} while (!this_cpu_try_cmpxchg(c->pcpu->sectors_available, &old, new));
371 
372 	this_cpu_add(*c->online_reserved, sectors);
373 	res->sectors			+= sectors;
374 	return 0;
375 #else
376 	return __bch2_disk_reservation_add(c, res, sectors, flags);
377 #endif
378 }
379 
380 static inline struct disk_reservation
bch2_disk_reservation_init(struct bch_fs * c,unsigned nr_replicas)381 bch2_disk_reservation_init(struct bch_fs *c, unsigned nr_replicas)
382 {
383 	return (struct disk_reservation) {
384 		.sectors	= 0,
385 #if 0
386 		/* not used yet: */
387 		.gen		= c->capacity_gen,
388 #endif
389 		.nr_replicas	= nr_replicas,
390 	};
391 }
392 
bch2_disk_reservation_get(struct bch_fs * c,struct disk_reservation * res,u64 sectors,unsigned nr_replicas,int flags)393 static inline int bch2_disk_reservation_get(struct bch_fs *c,
394 					    struct disk_reservation *res,
395 					    u64 sectors, unsigned nr_replicas,
396 					    int flags)
397 {
398 	*res = bch2_disk_reservation_init(c, nr_replicas);
399 
400 	return bch2_disk_reservation_add(c, res, sectors * nr_replicas, flags);
401 }
402 
403 #define RESERVE_FACTOR	6
404 
avail_factor(u64 r)405 static inline u64 avail_factor(u64 r)
406 {
407 	return div_u64(r << RESERVE_FACTOR, (1 << RESERVE_FACTOR) + 1);
408 }
409 
410 void bch2_buckets_nouse_free(struct bch_fs *);
411 int bch2_buckets_nouse_alloc(struct bch_fs *);
412 
413 int bch2_dev_buckets_resize(struct bch_fs *, struct bch_dev *, u64);
414 void bch2_dev_buckets_free(struct bch_dev *);
415 int bch2_dev_buckets_alloc(struct bch_fs *, struct bch_dev *);
416 
417 #endif /* _BUCKETS_H */
418