xref: /linux/fs/bcachefs/buckets.h (revision 7a9b709e7cc5ce1ffb84ce07bf6d157e1de758df)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Code for manipulating bucket marks for garbage collection.
4  *
5  * Copyright 2014 Datera, Inc.
6  */
7 
8 #ifndef _BUCKETS_H
9 #define _BUCKETS_H
10 
11 #include "buckets_types.h"
12 #include "extents.h"
13 #include "sb-members.h"
14 
15 static inline u64 sector_to_bucket(const struct bch_dev *ca, sector_t s)
16 {
17 	return div_u64(s, ca->mi.bucket_size);
18 }
19 
20 static inline sector_t bucket_to_sector(const struct bch_dev *ca, size_t b)
21 {
22 	return ((sector_t) b) * ca->mi.bucket_size;
23 }
24 
25 static inline sector_t bucket_remainder(const struct bch_dev *ca, sector_t s)
26 {
27 	u32 remainder;
28 
29 	div_u64_rem(s, ca->mi.bucket_size, &remainder);
30 	return remainder;
31 }
32 
33 static inline u64 sector_to_bucket_and_offset(const struct bch_dev *ca, sector_t s, u32 *offset)
34 {
35 	return div_u64_rem(s, ca->mi.bucket_size, offset);
36 }
37 
38 #define for_each_bucket(_b, _buckets)				\
39 	for (_b = (_buckets)->b + (_buckets)->first_bucket;	\
40 	     _b < (_buckets)->b + (_buckets)->nbuckets; _b++)
41 
42 static inline void bucket_unlock(struct bucket *b)
43 {
44 	BUILD_BUG_ON(!((union ulong_byte_assert) { .ulong = 1UL << BUCKET_LOCK_BITNR }).byte);
45 
46 	clear_bit_unlock(BUCKET_LOCK_BITNR, (void *) &b->lock);
47 	wake_up_bit((void *) &b->lock, BUCKET_LOCK_BITNR);
48 }
49 
50 static inline void bucket_lock(struct bucket *b)
51 {
52 	wait_on_bit_lock((void *) &b->lock, BUCKET_LOCK_BITNR,
53 			 TASK_UNINTERRUPTIBLE);
54 }
55 
56 static inline struct bucket *gc_bucket(struct bch_dev *ca, size_t b)
57 {
58 	return bucket_valid(ca, b)
59 		? genradix_ptr(&ca->buckets_gc, b)
60 		: NULL;
61 }
62 
63 static inline struct bucket_gens *bucket_gens(struct bch_dev *ca)
64 {
65 	return rcu_dereference_check(ca->bucket_gens,
66 				     lockdep_is_held(&ca->fs->state_lock));
67 }
68 
69 static inline u8 *bucket_gen(struct bch_dev *ca, size_t b)
70 {
71 	struct bucket_gens *gens = bucket_gens(ca);
72 
73 	if (b - gens->first_bucket >= gens->nbuckets_minus_first)
74 		return NULL;
75 	return gens->b + b;
76 }
77 
78 static inline int bucket_gen_get_rcu(struct bch_dev *ca, size_t b)
79 {
80 	u8 *gen = bucket_gen(ca, b);
81 	return gen ? *gen : -1;
82 }
83 
84 static inline int bucket_gen_get(struct bch_dev *ca, size_t b)
85 {
86 	rcu_read_lock();
87 	int ret = bucket_gen_get_rcu(ca, b);
88 	rcu_read_unlock();
89 	return ret;
90 }
91 
92 static inline size_t PTR_BUCKET_NR(const struct bch_dev *ca,
93 				   const struct bch_extent_ptr *ptr)
94 {
95 	return sector_to_bucket(ca, ptr->offset);
96 }
97 
98 static inline struct bpos PTR_BUCKET_POS(const struct bch_dev *ca,
99 					 const struct bch_extent_ptr *ptr)
100 {
101 	return POS(ptr->dev, PTR_BUCKET_NR(ca, ptr));
102 }
103 
104 static inline struct bpos PTR_BUCKET_POS_OFFSET(const struct bch_dev *ca,
105 						const struct bch_extent_ptr *ptr,
106 						u32 *bucket_offset)
107 {
108 	return POS(ptr->dev, sector_to_bucket_and_offset(ca, ptr->offset, bucket_offset));
109 }
110 
111 static inline struct bucket *PTR_GC_BUCKET(struct bch_dev *ca,
112 					   const struct bch_extent_ptr *ptr)
113 {
114 	return gc_bucket(ca, PTR_BUCKET_NR(ca, ptr));
115 }
116 
117 static inline enum bch_data_type ptr_data_type(const struct bkey *k,
118 					       const struct bch_extent_ptr *ptr)
119 {
120 	if (bkey_is_btree_ptr(k))
121 		return BCH_DATA_btree;
122 
123 	return ptr->cached ? BCH_DATA_cached : BCH_DATA_user;
124 }
125 
126 static inline s64 ptr_disk_sectors(s64 sectors, struct extent_ptr_decoded p)
127 {
128 	EBUG_ON(sectors < 0);
129 
130 	return crc_is_compressed(p.crc)
131 		? DIV_ROUND_UP_ULL(sectors * p.crc.compressed_size,
132 				   p.crc.uncompressed_size)
133 		: sectors;
134 }
135 
136 static inline int gen_cmp(u8 a, u8 b)
137 {
138 	return (s8) (a - b);
139 }
140 
141 static inline int gen_after(u8 a, u8 b)
142 {
143 	return max(0, gen_cmp(a, b));
144 }
145 
146 static inline int dev_ptr_stale_rcu(struct bch_dev *ca, const struct bch_extent_ptr *ptr)
147 {
148 	int gen = bucket_gen_get_rcu(ca, PTR_BUCKET_NR(ca, ptr));
149 	return gen < 0 ? gen : gen_after(gen, ptr->gen);
150 }
151 
152 /**
153  * dev_ptr_stale() - check if a pointer points into a bucket that has been
154  * invalidated.
155  */
156 static inline int dev_ptr_stale(struct bch_dev *ca, const struct bch_extent_ptr *ptr)
157 {
158 	rcu_read_lock();
159 	int ret = dev_ptr_stale_rcu(ca, ptr);
160 	rcu_read_unlock();
161 	return ret;
162 }
163 
164 /* Device usage: */
165 
166 void bch2_dev_usage_read_fast(struct bch_dev *, struct bch_dev_usage *);
167 static inline struct bch_dev_usage bch2_dev_usage_read(struct bch_dev *ca)
168 {
169 	struct bch_dev_usage ret;
170 
171 	bch2_dev_usage_read_fast(ca, &ret);
172 	return ret;
173 }
174 
175 void bch2_dev_usage_full_read_fast(struct bch_dev *, struct bch_dev_usage_full *);
176 static inline struct bch_dev_usage_full bch2_dev_usage_full_read(struct bch_dev *ca)
177 {
178 	struct bch_dev_usage_full ret;
179 
180 	bch2_dev_usage_full_read_fast(ca, &ret);
181 	return ret;
182 }
183 
184 void bch2_dev_usage_to_text(struct printbuf *, struct bch_dev *, struct bch_dev_usage_full *);
185 
186 static inline u64 bch2_dev_buckets_reserved(struct bch_dev *ca, enum bch_watermark watermark)
187 {
188 	s64 reserved = 0;
189 
190 	switch (watermark) {
191 	case BCH_WATERMARK_NR:
192 		BUG();
193 	case BCH_WATERMARK_stripe:
194 		reserved += ca->mi.nbuckets >> 6;
195 		fallthrough;
196 	case BCH_WATERMARK_normal:
197 		reserved += ca->mi.nbuckets >> 6;
198 		fallthrough;
199 	case BCH_WATERMARK_copygc:
200 		reserved += ca->nr_btree_reserve;
201 		fallthrough;
202 	case BCH_WATERMARK_btree:
203 		reserved += ca->nr_btree_reserve;
204 		fallthrough;
205 	case BCH_WATERMARK_btree_copygc:
206 	case BCH_WATERMARK_reclaim:
207 	case BCH_WATERMARK_interior_updates:
208 		break;
209 	}
210 
211 	return reserved;
212 }
213 
214 static inline u64 dev_buckets_free(struct bch_dev *ca,
215 				   struct bch_dev_usage usage,
216 				   enum bch_watermark watermark)
217 {
218 	return max_t(s64, 0,
219 		     usage.buckets[BCH_DATA_free]-
220 		     ca->nr_open_buckets -
221 		     bch2_dev_buckets_reserved(ca, watermark));
222 }
223 
224 static inline u64 __dev_buckets_available(struct bch_dev *ca,
225 					  struct bch_dev_usage usage,
226 					  enum bch_watermark watermark)
227 {
228 	return max_t(s64, 0,
229 		       usage.buckets[BCH_DATA_free]
230 		     + usage.buckets[BCH_DATA_cached]
231 		     + usage.buckets[BCH_DATA_need_gc_gens]
232 		     + usage.buckets[BCH_DATA_need_discard]
233 		     - ca->nr_open_buckets
234 		     - bch2_dev_buckets_reserved(ca, watermark));
235 }
236 
237 static inline u64 dev_buckets_available(struct bch_dev *ca,
238 					enum bch_watermark watermark)
239 {
240 	return __dev_buckets_available(ca, bch2_dev_usage_read(ca), watermark);
241 }
242 
243 /* Filesystem usage: */
244 
245 struct bch_fs_usage_short
246 bch2_fs_usage_read_short(struct bch_fs *);
247 
248 int bch2_bucket_ref_update(struct btree_trans *, struct bch_dev *,
249 			   struct bkey_s_c, const struct bch_extent_ptr *,
250 			   s64, enum bch_data_type, u8, u8, u32 *);
251 
252 int bch2_check_fix_ptrs(struct btree_trans *,
253 			enum btree_id, unsigned, struct bkey_s_c,
254 			enum btree_iter_update_trigger_flags);
255 
256 int bch2_trigger_extent(struct btree_trans *, enum btree_id, unsigned,
257 			struct bkey_s_c, struct bkey_s,
258 			enum btree_iter_update_trigger_flags);
259 int bch2_trigger_reservation(struct btree_trans *, enum btree_id, unsigned,
260 			  struct bkey_s_c, struct bkey_s,
261 			  enum btree_iter_update_trigger_flags);
262 
263 #define trigger_run_overwrite_then_insert(_fn, _trans, _btree_id, _level, _old, _new, _flags)\
264 ({												\
265 	int ret = 0;										\
266 												\
267 	if (_old.k->type)									\
268 		ret = _fn(_trans, _btree_id, _level, _old, _flags & ~BTREE_TRIGGER_insert);	\
269 	if (!ret && _new.k->type)								\
270 		ret = _fn(_trans, _btree_id, _level, _new.s_c, _flags & ~BTREE_TRIGGER_overwrite);\
271 	ret;											\
272 })
273 
274 void bch2_trans_account_disk_usage_change(struct btree_trans *);
275 
276 int bch2_trans_mark_metadata_bucket(struct btree_trans *, struct bch_dev *, u64,
277 				    enum bch_data_type, unsigned,
278 				    enum btree_iter_update_trigger_flags);
279 int bch2_trans_mark_dev_sb(struct bch_fs *, struct bch_dev *,
280 				    enum btree_iter_update_trigger_flags);
281 int bch2_trans_mark_dev_sbs_flags(struct bch_fs *,
282 				    enum btree_iter_update_trigger_flags);
283 int bch2_trans_mark_dev_sbs(struct bch_fs *);
284 
285 bool bch2_is_superblock_bucket(struct bch_dev *, u64);
286 
287 static inline const char *bch2_data_type_str(enum bch_data_type type)
288 {
289 	return type < BCH_DATA_NR
290 		? __bch2_data_types[type]
291 		: "(invalid data type)";
292 }
293 
294 /* disk reservations: */
295 
296 static inline void bch2_disk_reservation_put(struct bch_fs *c,
297 					     struct disk_reservation *res)
298 {
299 	if (res->sectors) {
300 		this_cpu_sub(*c->online_reserved, res->sectors);
301 		res->sectors = 0;
302 	}
303 }
304 
305 enum bch_reservation_flags {
306 	BCH_DISK_RESERVATION_NOFAIL	= 1 << 0,
307 	BCH_DISK_RESERVATION_PARTIAL	= 1 << 1,
308 };
309 
310 int __bch2_disk_reservation_add(struct bch_fs *, struct disk_reservation *,
311 				u64, enum bch_reservation_flags);
312 
313 static inline int bch2_disk_reservation_add(struct bch_fs *c, struct disk_reservation *res,
314 					    u64 sectors, enum bch_reservation_flags flags)
315 {
316 #ifdef __KERNEL__
317 	u64 old, new;
318 
319 	old = this_cpu_read(c->pcpu->sectors_available);
320 	do {
321 		if (sectors > old)
322 			return __bch2_disk_reservation_add(c, res, sectors, flags);
323 
324 		new = old - sectors;
325 	} while (!this_cpu_try_cmpxchg(c->pcpu->sectors_available, &old, new));
326 
327 	this_cpu_add(*c->online_reserved, sectors);
328 	res->sectors			+= sectors;
329 	return 0;
330 #else
331 	return __bch2_disk_reservation_add(c, res, sectors, flags);
332 #endif
333 }
334 
335 static inline struct disk_reservation
336 bch2_disk_reservation_init(struct bch_fs *c, unsigned nr_replicas)
337 {
338 	return (struct disk_reservation) {
339 		.sectors	= 0,
340 #if 0
341 		/* not used yet: */
342 		.gen		= c->capacity_gen,
343 #endif
344 		.nr_replicas	= nr_replicas,
345 	};
346 }
347 
348 static inline int bch2_disk_reservation_get(struct bch_fs *c,
349 					    struct disk_reservation *res,
350 					    u64 sectors, unsigned nr_replicas,
351 					    int flags)
352 {
353 	*res = bch2_disk_reservation_init(c, nr_replicas);
354 
355 	return bch2_disk_reservation_add(c, res, sectors * nr_replicas, flags);
356 }
357 
358 #define RESERVE_FACTOR	6
359 
360 static inline u64 avail_factor(u64 r)
361 {
362 	return div_u64(r << RESERVE_FACTOR, (1 << RESERVE_FACTOR) + 1);
363 }
364 
365 void bch2_buckets_nouse_free(struct bch_fs *);
366 int bch2_buckets_nouse_alloc(struct bch_fs *);
367 
368 int bch2_dev_buckets_resize(struct bch_fs *, struct bch_dev *, u64);
369 void bch2_dev_buckets_free(struct bch_dev *);
370 int bch2_dev_buckets_alloc(struct bch_fs *, struct bch_dev *);
371 
372 #endif /* _BUCKETS_H */
373