xref: /linux/fs/bcachefs/buckets.h (revision ff9fbcafbaf13346c742c0d672a22f5ac20b9d92)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Code for manipulating bucket marks for garbage collection.
4  *
5  * Copyright 2014 Datera, Inc.
6  */
7 
8 #ifndef _BUCKETS_H
9 #define _BUCKETS_H
10 
11 #include "buckets_types.h"
12 #include "extents.h"
13 #include "sb-members.h"
14 
15 static inline u64 sector_to_bucket(const struct bch_dev *ca, sector_t s)
16 {
17 	return div_u64(s, ca->mi.bucket_size);
18 }
19 
20 static inline sector_t bucket_to_sector(const struct bch_dev *ca, size_t b)
21 {
22 	return ((sector_t) b) * ca->mi.bucket_size;
23 }
24 
25 static inline sector_t bucket_remainder(const struct bch_dev *ca, sector_t s)
26 {
27 	u32 remainder;
28 
29 	div_u64_rem(s, ca->mi.bucket_size, &remainder);
30 	return remainder;
31 }
32 
33 static inline u64 sector_to_bucket_and_offset(const struct bch_dev *ca, sector_t s, u32 *offset)
34 {
35 	return div_u64_rem(s, ca->mi.bucket_size, offset);
36 }
37 
38 #define for_each_bucket(_b, _buckets)				\
39 	for (_b = (_buckets)->b + (_buckets)->first_bucket;	\
40 	     _b < (_buckets)->b + (_buckets)->nbuckets; _b++)
41 
42 /*
43  * Ugly hack alert:
44  *
45  * We need to cram a spinlock in a single byte, because that's what we have left
46  * in struct bucket, and we care about the size of these - during fsck, we need
47  * in memory state for every single bucket on every device.
48  *
49  * We used to do
50  *   while (xchg(&b->lock, 1) cpu_relax();
51  * but, it turns out not all architectures support xchg on a single byte.
52  *
53  * So now we use bit_spin_lock(), with fun games since we can't burn a whole
54  * ulong for this - we just need to make sure the lock bit always ends up in the
55  * first byte.
56  */
57 
58 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
59 #define BUCKET_LOCK_BITNR	0
60 #else
61 #define BUCKET_LOCK_BITNR	(BITS_PER_LONG - 1)
62 #endif
63 
64 union ulong_byte_assert {
65 	ulong	ulong;
66 	u8	byte;
67 };
68 
69 static inline void bucket_unlock(struct bucket *b)
70 {
71 	BUILD_BUG_ON(!((union ulong_byte_assert) { .ulong = 1UL << BUCKET_LOCK_BITNR }).byte);
72 
73 	clear_bit_unlock(BUCKET_LOCK_BITNR, (void *) &b->lock);
74 	wake_up_bit((void *) &b->lock, BUCKET_LOCK_BITNR);
75 }
76 
77 static inline void bucket_lock(struct bucket *b)
78 {
79 	wait_on_bit_lock((void *) &b->lock, BUCKET_LOCK_BITNR,
80 			 TASK_UNINTERRUPTIBLE);
81 }
82 
83 static inline struct bucket_array *gc_bucket_array(struct bch_dev *ca)
84 {
85 	return rcu_dereference_check(ca->buckets_gc,
86 				     !ca->fs ||
87 				     percpu_rwsem_is_held(&ca->fs->mark_lock) ||
88 				     lockdep_is_held(&ca->fs->gc_lock) ||
89 				     lockdep_is_held(&ca->bucket_lock));
90 }
91 
92 static inline struct bucket *gc_bucket(struct bch_dev *ca, size_t b)
93 {
94 	struct bucket_array *buckets = gc_bucket_array(ca);
95 
96 	if (b - buckets->first_bucket >= buckets->nbuckets_minus_first)
97 		return NULL;
98 	return buckets->b + b;
99 }
100 
101 static inline struct bucket_gens *bucket_gens(struct bch_dev *ca)
102 {
103 	return rcu_dereference_check(ca->bucket_gens,
104 				     !ca->fs ||
105 				     percpu_rwsem_is_held(&ca->fs->mark_lock) ||
106 				     lockdep_is_held(&ca->fs->gc_lock) ||
107 				     lockdep_is_held(&ca->bucket_lock));
108 }
109 
110 static inline u8 *bucket_gen(struct bch_dev *ca, size_t b)
111 {
112 	struct bucket_gens *gens = bucket_gens(ca);
113 
114 	if (b - gens->first_bucket >= gens->nbuckets_minus_first)
115 		return NULL;
116 	return gens->b + b;
117 }
118 
119 static inline u8 bucket_gen_get(struct bch_dev *ca, size_t b)
120 {
121 	rcu_read_lock();
122 	u8 gen = *bucket_gen(ca, b);
123 	rcu_read_unlock();
124 	return gen;
125 }
126 
127 static inline size_t PTR_BUCKET_NR(const struct bch_dev *ca,
128 				   const struct bch_extent_ptr *ptr)
129 {
130 	return sector_to_bucket(ca, ptr->offset);
131 }
132 
133 static inline struct bpos PTR_BUCKET_POS(const struct bch_dev *ca,
134 					 const struct bch_extent_ptr *ptr)
135 {
136 	return POS(ptr->dev, PTR_BUCKET_NR(ca, ptr));
137 }
138 
139 static inline struct bpos PTR_BUCKET_POS_OFFSET(const struct bch_dev *ca,
140 						const struct bch_extent_ptr *ptr,
141 						u32 *bucket_offset)
142 {
143 	return POS(ptr->dev, sector_to_bucket_and_offset(ca, ptr->offset, bucket_offset));
144 }
145 
146 static inline struct bucket *PTR_GC_BUCKET(struct bch_dev *ca,
147 					   const struct bch_extent_ptr *ptr)
148 {
149 	return gc_bucket(ca, PTR_BUCKET_NR(ca, ptr));
150 }
151 
152 static inline enum bch_data_type ptr_data_type(const struct bkey *k,
153 					       const struct bch_extent_ptr *ptr)
154 {
155 	if (bkey_is_btree_ptr(k))
156 		return BCH_DATA_btree;
157 
158 	return ptr->cached ? BCH_DATA_cached : BCH_DATA_user;
159 }
160 
161 static inline s64 ptr_disk_sectors(s64 sectors, struct extent_ptr_decoded p)
162 {
163 	EBUG_ON(sectors < 0);
164 
165 	return crc_is_compressed(p.crc)
166 		? DIV_ROUND_UP_ULL(sectors * p.crc.compressed_size,
167 				   p.crc.uncompressed_size)
168 		: sectors;
169 }
170 
171 static inline int gen_cmp(u8 a, u8 b)
172 {
173 	return (s8) (a - b);
174 }
175 
176 static inline int gen_after(u8 a, u8 b)
177 {
178 	int r = gen_cmp(a, b);
179 
180 	return r > 0 ? r : 0;
181 }
182 
183 static inline int dev_ptr_stale_rcu(struct bch_dev *ca, const struct bch_extent_ptr *ptr)
184 {
185 	u8 *gen = bucket_gen(ca, PTR_BUCKET_NR(ca, ptr));
186 	if (!gen)
187 		return -1;
188 	return gen_after(*gen, ptr->gen);
189 }
190 
191 /**
192  * dev_ptr_stale() - check if a pointer points into a bucket that has been
193  * invalidated.
194  */
195 static inline int dev_ptr_stale(struct bch_dev *ca, const struct bch_extent_ptr *ptr)
196 {
197 	rcu_read_lock();
198 	int ret = dev_ptr_stale_rcu(ca, ptr);
199 	rcu_read_unlock();
200 
201 	return ret;
202 }
203 
204 /* Device usage: */
205 
206 void bch2_dev_usage_read_fast(struct bch_dev *, struct bch_dev_usage *);
207 static inline struct bch_dev_usage bch2_dev_usage_read(struct bch_dev *ca)
208 {
209 	struct bch_dev_usage ret;
210 
211 	bch2_dev_usage_read_fast(ca, &ret);
212 	return ret;
213 }
214 
215 void bch2_dev_usage_init(struct bch_dev *);
216 void bch2_dev_usage_to_text(struct printbuf *, struct bch_dev_usage *);
217 
218 static inline u64 bch2_dev_buckets_reserved(struct bch_dev *ca, enum bch_watermark watermark)
219 {
220 	s64 reserved = 0;
221 
222 	switch (watermark) {
223 	case BCH_WATERMARK_NR:
224 		BUG();
225 	case BCH_WATERMARK_stripe:
226 		reserved += ca->mi.nbuckets >> 6;
227 		fallthrough;
228 	case BCH_WATERMARK_normal:
229 		reserved += ca->mi.nbuckets >> 6;
230 		fallthrough;
231 	case BCH_WATERMARK_copygc:
232 		reserved += ca->nr_btree_reserve;
233 		fallthrough;
234 	case BCH_WATERMARK_btree:
235 		reserved += ca->nr_btree_reserve;
236 		fallthrough;
237 	case BCH_WATERMARK_btree_copygc:
238 	case BCH_WATERMARK_reclaim:
239 	case BCH_WATERMARK_interior_updates:
240 		break;
241 	}
242 
243 	return reserved;
244 }
245 
246 static inline u64 dev_buckets_free(struct bch_dev *ca,
247 				   struct bch_dev_usage usage,
248 				   enum bch_watermark watermark)
249 {
250 	return max_t(s64, 0,
251 		     usage.d[BCH_DATA_free].buckets -
252 		     ca->nr_open_buckets -
253 		     bch2_dev_buckets_reserved(ca, watermark));
254 }
255 
256 static inline u64 __dev_buckets_available(struct bch_dev *ca,
257 					  struct bch_dev_usage usage,
258 					  enum bch_watermark watermark)
259 {
260 	return max_t(s64, 0,
261 		       usage.d[BCH_DATA_free].buckets
262 		     + usage.d[BCH_DATA_cached].buckets
263 		     + usage.d[BCH_DATA_need_gc_gens].buckets
264 		     + usage.d[BCH_DATA_need_discard].buckets
265 		     - ca->nr_open_buckets
266 		     - bch2_dev_buckets_reserved(ca, watermark));
267 }
268 
269 static inline u64 dev_buckets_available(struct bch_dev *ca,
270 					enum bch_watermark watermark)
271 {
272 	return __dev_buckets_available(ca, bch2_dev_usage_read(ca), watermark);
273 }
274 
275 /* Filesystem usage: */
276 
277 static inline unsigned __fs_usage_u64s(unsigned nr_replicas)
278 {
279 	return sizeof(struct bch_fs_usage) / sizeof(u64) + nr_replicas;
280 }
281 
282 static inline unsigned fs_usage_u64s(struct bch_fs *c)
283 {
284 	return __fs_usage_u64s(READ_ONCE(c->replicas.nr));
285 }
286 
287 static inline unsigned __fs_usage_online_u64s(unsigned nr_replicas)
288 {
289 	return sizeof(struct bch_fs_usage_online) / sizeof(u64) + nr_replicas;
290 }
291 
292 static inline unsigned fs_usage_online_u64s(struct bch_fs *c)
293 {
294 	return __fs_usage_online_u64s(READ_ONCE(c->replicas.nr));
295 }
296 
297 static inline unsigned dev_usage_u64s(void)
298 {
299 	return sizeof(struct bch_dev_usage) / sizeof(u64);
300 }
301 
302 u64 bch2_fs_usage_read_one(struct bch_fs *, u64 *);
303 
304 struct bch_fs_usage_online *bch2_fs_usage_read(struct bch_fs *);
305 
306 void bch2_fs_usage_acc_to_base(struct bch_fs *, unsigned);
307 
308 void bch2_fs_usage_to_text(struct printbuf *,
309 			   struct bch_fs *, struct bch_fs_usage_online *);
310 
311 u64 bch2_fs_sectors_used(struct bch_fs *, struct bch_fs_usage_online *);
312 
313 struct bch_fs_usage_short
314 bch2_fs_usage_read_short(struct bch_fs *);
315 
316 void bch2_dev_usage_update(struct bch_fs *, struct bch_dev *,
317 			   const struct bch_alloc_v4 *,
318 			   const struct bch_alloc_v4 *, u64, bool);
319 
320 /* key/bucket marking: */
321 
322 static inline struct bch_fs_usage *fs_usage_ptr(struct bch_fs *c,
323 						unsigned journal_seq,
324 						bool gc)
325 {
326 	percpu_rwsem_assert_held(&c->mark_lock);
327 	BUG_ON(!gc && !journal_seq);
328 
329 	return this_cpu_ptr(gc
330 			    ? c->usage_gc
331 			    : c->usage[journal_seq & JOURNAL_BUF_MASK]);
332 }
333 
334 int bch2_update_replicas(struct bch_fs *, struct bkey_s_c,
335 			 struct bch_replicas_entry_v1 *, s64,
336 			 unsigned, bool);
337 int bch2_update_replicas_list(struct btree_trans *,
338 			 struct bch_replicas_entry_v1 *, s64);
339 int bch2_update_cached_sectors_list(struct btree_trans *, unsigned, s64);
340 int bch2_replicas_deltas_realloc(struct btree_trans *, unsigned);
341 
342 void bch2_fs_usage_initialize(struct bch_fs *);
343 
344 int bch2_bucket_ref_update(struct btree_trans *, struct bch_dev *,
345 			   struct bkey_s_c, const struct bch_extent_ptr *,
346 			   s64, enum bch_data_type, u8, u8, u32 *);
347 
348 int bch2_check_fix_ptrs(struct btree_trans *,
349 			enum btree_id, unsigned, struct bkey_s_c,
350 			enum btree_iter_update_trigger_flags);
351 
352 int bch2_trigger_extent(struct btree_trans *, enum btree_id, unsigned,
353 			struct bkey_s_c, struct bkey_s,
354 			enum btree_iter_update_trigger_flags);
355 int bch2_trigger_reservation(struct btree_trans *, enum btree_id, unsigned,
356 			  struct bkey_s_c, struct bkey_s,
357 			  enum btree_iter_update_trigger_flags);
358 
359 #define trigger_run_overwrite_then_insert(_fn, _trans, _btree_id, _level, _old, _new, _flags)\
360 ({												\
361 	int ret = 0;										\
362 												\
363 	if (_old.k->type)									\
364 		ret = _fn(_trans, _btree_id, _level, _old, _flags & ~BTREE_TRIGGER_insert);	\
365 	if (!ret && _new.k->type)								\
366 		ret = _fn(_trans, _btree_id, _level, _new.s_c, _flags & ~BTREE_TRIGGER_overwrite);\
367 	ret;											\
368 })
369 
370 void bch2_trans_account_disk_usage_change(struct btree_trans *);
371 
372 void bch2_trans_fs_usage_revert(struct btree_trans *, struct replicas_delta_list *);
373 int bch2_trans_fs_usage_apply(struct btree_trans *, struct replicas_delta_list *);
374 
375 int bch2_trans_mark_metadata_bucket(struct btree_trans *, struct bch_dev *, u64,
376 				    enum bch_data_type, unsigned,
377 				    enum btree_iter_update_trigger_flags);
378 int bch2_trans_mark_dev_sb(struct bch_fs *, struct bch_dev *,
379 				    enum btree_iter_update_trigger_flags);
380 int bch2_trans_mark_dev_sbs_flags(struct bch_fs *,
381 				    enum btree_iter_update_trigger_flags);
382 int bch2_trans_mark_dev_sbs(struct bch_fs *);
383 
384 static inline bool is_superblock_bucket(struct bch_dev *ca, u64 b)
385 {
386 	struct bch_sb_layout *layout = &ca->disk_sb.sb->layout;
387 	u64 b_offset	= bucket_to_sector(ca, b);
388 	u64 b_end	= bucket_to_sector(ca, b + 1);
389 	unsigned i;
390 
391 	if (!b)
392 		return true;
393 
394 	for (i = 0; i < layout->nr_superblocks; i++) {
395 		u64 offset = le64_to_cpu(layout->sb_offset[i]);
396 		u64 end = offset + (1 << layout->sb_max_size_bits);
397 
398 		if (!(offset >= b_end || end <= b_offset))
399 			return true;
400 	}
401 
402 	return false;
403 }
404 
405 static inline const char *bch2_data_type_str(enum bch_data_type type)
406 {
407 	return type < BCH_DATA_NR
408 		? __bch2_data_types[type]
409 		: "(invalid data type)";
410 }
411 
412 /* disk reservations: */
413 
414 static inline void bch2_disk_reservation_put(struct bch_fs *c,
415 					     struct disk_reservation *res)
416 {
417 	if (res->sectors) {
418 		this_cpu_sub(*c->online_reserved, res->sectors);
419 		res->sectors = 0;
420 	}
421 }
422 
423 #define BCH_DISK_RESERVATION_NOFAIL		(1 << 0)
424 
425 int __bch2_disk_reservation_add(struct bch_fs *,
426 				struct disk_reservation *,
427 				u64, int);
428 
429 static inline int bch2_disk_reservation_add(struct bch_fs *c, struct disk_reservation *res,
430 					    u64 sectors, int flags)
431 {
432 #ifdef __KERNEL__
433 	u64 old, new;
434 
435 	do {
436 		old = this_cpu_read(c->pcpu->sectors_available);
437 		if (sectors > old)
438 			return __bch2_disk_reservation_add(c, res, sectors, flags);
439 
440 		new = old - sectors;
441 	} while (this_cpu_cmpxchg(c->pcpu->sectors_available, old, new) != old);
442 
443 	this_cpu_add(*c->online_reserved, sectors);
444 	res->sectors			+= sectors;
445 	return 0;
446 #else
447 	return __bch2_disk_reservation_add(c, res, sectors, flags);
448 #endif
449 }
450 
451 static inline struct disk_reservation
452 bch2_disk_reservation_init(struct bch_fs *c, unsigned nr_replicas)
453 {
454 	return (struct disk_reservation) {
455 		.sectors	= 0,
456 #if 0
457 		/* not used yet: */
458 		.gen		= c->capacity_gen,
459 #endif
460 		.nr_replicas	= nr_replicas,
461 	};
462 }
463 
464 static inline int bch2_disk_reservation_get(struct bch_fs *c,
465 					    struct disk_reservation *res,
466 					    u64 sectors, unsigned nr_replicas,
467 					    int flags)
468 {
469 	*res = bch2_disk_reservation_init(c, nr_replicas);
470 
471 	return bch2_disk_reservation_add(c, res, sectors * nr_replicas, flags);
472 }
473 
474 #define RESERVE_FACTOR	6
475 
476 static inline u64 avail_factor(u64 r)
477 {
478 	return div_u64(r << RESERVE_FACTOR, (1 << RESERVE_FACTOR) + 1);
479 }
480 
481 void bch2_buckets_nouse_free(struct bch_fs *);
482 int bch2_buckets_nouse_alloc(struct bch_fs *);
483 
484 int bch2_dev_buckets_resize(struct bch_fs *, struct bch_dev *, u64);
485 void bch2_dev_buckets_free(struct bch_dev *);
486 int bch2_dev_buckets_alloc(struct bch_fs *, struct bch_dev *);
487 
488 #endif /* _BUCKETS_H */
489