xref: /linux/fs/bcachefs/buckets.h (revision 4a4b30ea80d8cb5e8c4c62bb86201f4ea0d9b030)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Code for manipulating bucket marks for garbage collection.
4  *
5  * Copyright 2014 Datera, Inc.
6  */
7 
8 #ifndef _BUCKETS_H
9 #define _BUCKETS_H
10 
11 #include "buckets_types.h"
12 #include "extents.h"
13 #include "sb-members.h"
14 
sector_to_bucket(const struct bch_dev * ca,sector_t s)15 static inline u64 sector_to_bucket(const struct bch_dev *ca, sector_t s)
16 {
17 	return div_u64(s, ca->mi.bucket_size);
18 }
19 
bucket_to_sector(const struct bch_dev * ca,size_t b)20 static inline sector_t bucket_to_sector(const struct bch_dev *ca, size_t b)
21 {
22 	return ((sector_t) b) * ca->mi.bucket_size;
23 }
24 
bucket_remainder(const struct bch_dev * ca,sector_t s)25 static inline sector_t bucket_remainder(const struct bch_dev *ca, sector_t s)
26 {
27 	u32 remainder;
28 
29 	div_u64_rem(s, ca->mi.bucket_size, &remainder);
30 	return remainder;
31 }
32 
sector_to_bucket_and_offset(const struct bch_dev * ca,sector_t s,u32 * offset)33 static inline u64 sector_to_bucket_and_offset(const struct bch_dev *ca, sector_t s, u32 *offset)
34 {
35 	return div_u64_rem(s, ca->mi.bucket_size, offset);
36 }
37 
38 #define for_each_bucket(_b, _buckets)				\
39 	for (_b = (_buckets)->b + (_buckets)->first_bucket;	\
40 	     _b < (_buckets)->b + (_buckets)->nbuckets; _b++)
41 
bucket_unlock(struct bucket * b)42 static inline void bucket_unlock(struct bucket *b)
43 {
44 	BUILD_BUG_ON(!((union ulong_byte_assert) { .ulong = 1UL << BUCKET_LOCK_BITNR }).byte);
45 
46 	clear_bit_unlock(BUCKET_LOCK_BITNR, (void *) &b->lock);
47 	wake_up_bit((void *) &b->lock, BUCKET_LOCK_BITNR);
48 }
49 
bucket_lock(struct bucket * b)50 static inline void bucket_lock(struct bucket *b)
51 {
52 	wait_on_bit_lock((void *) &b->lock, BUCKET_LOCK_BITNR,
53 			 TASK_UNINTERRUPTIBLE);
54 }
55 
gc_bucket(struct bch_dev * ca,size_t b)56 static inline struct bucket *gc_bucket(struct bch_dev *ca, size_t b)
57 {
58 	return bucket_valid(ca, b)
59 		? genradix_ptr(&ca->buckets_gc, b)
60 		: NULL;
61 }
62 
bucket_gens(struct bch_dev * ca)63 static inline struct bucket_gens *bucket_gens(struct bch_dev *ca)
64 {
65 	return rcu_dereference_check(ca->bucket_gens,
66 				     lockdep_is_held(&ca->fs->state_lock));
67 }
68 
bucket_gen(struct bch_dev * ca,size_t b)69 static inline u8 *bucket_gen(struct bch_dev *ca, size_t b)
70 {
71 	struct bucket_gens *gens = bucket_gens(ca);
72 
73 	if (b - gens->first_bucket >= gens->nbuckets_minus_first)
74 		return NULL;
75 	return gens->b + b;
76 }
77 
bucket_gen_get_rcu(struct bch_dev * ca,size_t b)78 static inline int bucket_gen_get_rcu(struct bch_dev *ca, size_t b)
79 {
80 	u8 *gen = bucket_gen(ca, b);
81 	return gen ? *gen : -1;
82 }
83 
bucket_gen_get(struct bch_dev * ca,size_t b)84 static inline int bucket_gen_get(struct bch_dev *ca, size_t b)
85 {
86 	rcu_read_lock();
87 	int ret = bucket_gen_get_rcu(ca, b);
88 	rcu_read_unlock();
89 	return ret;
90 }
91 
PTR_BUCKET_NR(const struct bch_dev * ca,const struct bch_extent_ptr * ptr)92 static inline size_t PTR_BUCKET_NR(const struct bch_dev *ca,
93 				   const struct bch_extent_ptr *ptr)
94 {
95 	return sector_to_bucket(ca, ptr->offset);
96 }
97 
PTR_BUCKET_POS(const struct bch_dev * ca,const struct bch_extent_ptr * ptr)98 static inline struct bpos PTR_BUCKET_POS(const struct bch_dev *ca,
99 					 const struct bch_extent_ptr *ptr)
100 {
101 	return POS(ptr->dev, PTR_BUCKET_NR(ca, ptr));
102 }
103 
PTR_BUCKET_POS_OFFSET(const struct bch_dev * ca,const struct bch_extent_ptr * ptr,u32 * bucket_offset)104 static inline struct bpos PTR_BUCKET_POS_OFFSET(const struct bch_dev *ca,
105 						const struct bch_extent_ptr *ptr,
106 						u32 *bucket_offset)
107 {
108 	return POS(ptr->dev, sector_to_bucket_and_offset(ca, ptr->offset, bucket_offset));
109 }
110 
PTR_GC_BUCKET(struct bch_dev * ca,const struct bch_extent_ptr * ptr)111 static inline struct bucket *PTR_GC_BUCKET(struct bch_dev *ca,
112 					   const struct bch_extent_ptr *ptr)
113 {
114 	return gc_bucket(ca, PTR_BUCKET_NR(ca, ptr));
115 }
116 
ptr_data_type(const struct bkey * k,const struct bch_extent_ptr * ptr)117 static inline enum bch_data_type ptr_data_type(const struct bkey *k,
118 					       const struct bch_extent_ptr *ptr)
119 {
120 	if (bkey_is_btree_ptr(k))
121 		return BCH_DATA_btree;
122 
123 	return ptr->cached ? BCH_DATA_cached : BCH_DATA_user;
124 }
125 
ptr_disk_sectors(s64 sectors,struct extent_ptr_decoded p)126 static inline s64 ptr_disk_sectors(s64 sectors, struct extent_ptr_decoded p)
127 {
128 	EBUG_ON(sectors < 0);
129 
130 	return crc_is_compressed(p.crc)
131 		? DIV_ROUND_UP_ULL(sectors * p.crc.compressed_size,
132 				   p.crc.uncompressed_size)
133 		: sectors;
134 }
135 
gen_cmp(u8 a,u8 b)136 static inline int gen_cmp(u8 a, u8 b)
137 {
138 	return (s8) (a - b);
139 }
140 
gen_after(u8 a,u8 b)141 static inline int gen_after(u8 a, u8 b)
142 {
143 	return max(0, gen_cmp(a, b));
144 }
145 
dev_ptr_stale_rcu(struct bch_dev * ca,const struct bch_extent_ptr * ptr)146 static inline int dev_ptr_stale_rcu(struct bch_dev *ca, const struct bch_extent_ptr *ptr)
147 {
148 	int gen = bucket_gen_get_rcu(ca, PTR_BUCKET_NR(ca, ptr));
149 	return gen < 0 ? gen : gen_after(gen, ptr->gen);
150 }
151 
152 /**
153  * dev_ptr_stale() - check if a pointer points into a bucket that has been
154  * invalidated.
155  */
dev_ptr_stale(struct bch_dev * ca,const struct bch_extent_ptr * ptr)156 static inline int dev_ptr_stale(struct bch_dev *ca, const struct bch_extent_ptr *ptr)
157 {
158 	rcu_read_lock();
159 	int ret = dev_ptr_stale_rcu(ca, ptr);
160 	rcu_read_unlock();
161 	return ret;
162 }
163 
164 /* Device usage: */
165 
166 void bch2_dev_usage_read_fast(struct bch_dev *, struct bch_dev_usage *);
bch2_dev_usage_read(struct bch_dev * ca)167 static inline struct bch_dev_usage bch2_dev_usage_read(struct bch_dev *ca)
168 {
169 	struct bch_dev_usage ret;
170 
171 	bch2_dev_usage_read_fast(ca, &ret);
172 	return ret;
173 }
174 
175 void bch2_dev_usage_to_text(struct printbuf *, struct bch_dev *, struct bch_dev_usage *);
176 
bch2_dev_buckets_reserved(struct bch_dev * ca,enum bch_watermark watermark)177 static inline u64 bch2_dev_buckets_reserved(struct bch_dev *ca, enum bch_watermark watermark)
178 {
179 	s64 reserved = 0;
180 
181 	switch (watermark) {
182 	case BCH_WATERMARK_NR:
183 		BUG();
184 	case BCH_WATERMARK_stripe:
185 		reserved += ca->mi.nbuckets >> 6;
186 		fallthrough;
187 	case BCH_WATERMARK_normal:
188 		reserved += ca->mi.nbuckets >> 6;
189 		fallthrough;
190 	case BCH_WATERMARK_copygc:
191 		reserved += ca->nr_btree_reserve;
192 		fallthrough;
193 	case BCH_WATERMARK_btree:
194 		reserved += ca->nr_btree_reserve;
195 		fallthrough;
196 	case BCH_WATERMARK_btree_copygc:
197 	case BCH_WATERMARK_reclaim:
198 	case BCH_WATERMARK_interior_updates:
199 		break;
200 	}
201 
202 	return reserved;
203 }
204 
dev_buckets_free(struct bch_dev * ca,struct bch_dev_usage usage,enum bch_watermark watermark)205 static inline u64 dev_buckets_free(struct bch_dev *ca,
206 				   struct bch_dev_usage usage,
207 				   enum bch_watermark watermark)
208 {
209 	return max_t(s64, 0,
210 		     usage.d[BCH_DATA_free].buckets -
211 		     ca->nr_open_buckets -
212 		     bch2_dev_buckets_reserved(ca, watermark));
213 }
214 
__dev_buckets_available(struct bch_dev * ca,struct bch_dev_usage usage,enum bch_watermark watermark)215 static inline u64 __dev_buckets_available(struct bch_dev *ca,
216 					  struct bch_dev_usage usage,
217 					  enum bch_watermark watermark)
218 {
219 	return max_t(s64, 0,
220 		       usage.d[BCH_DATA_free].buckets
221 		     + usage.d[BCH_DATA_cached].buckets
222 		     + usage.d[BCH_DATA_need_gc_gens].buckets
223 		     + usage.d[BCH_DATA_need_discard].buckets
224 		     - ca->nr_open_buckets
225 		     - bch2_dev_buckets_reserved(ca, watermark));
226 }
227 
dev_buckets_available(struct bch_dev * ca,enum bch_watermark watermark)228 static inline u64 dev_buckets_available(struct bch_dev *ca,
229 					enum bch_watermark watermark)
230 {
231 	return __dev_buckets_available(ca, bch2_dev_usage_read(ca), watermark);
232 }
233 
234 /* Filesystem usage: */
235 
dev_usage_u64s(void)236 static inline unsigned dev_usage_u64s(void)
237 {
238 	return sizeof(struct bch_dev_usage) / sizeof(u64);
239 }
240 
241 struct bch_fs_usage_short
242 bch2_fs_usage_read_short(struct bch_fs *);
243 
244 int bch2_bucket_ref_update(struct btree_trans *, struct bch_dev *,
245 			   struct bkey_s_c, const struct bch_extent_ptr *,
246 			   s64, enum bch_data_type, u8, u8, u32 *);
247 
248 int bch2_check_fix_ptrs(struct btree_trans *,
249 			enum btree_id, unsigned, struct bkey_s_c,
250 			enum btree_iter_update_trigger_flags);
251 
252 int bch2_trigger_extent(struct btree_trans *, enum btree_id, unsigned,
253 			struct bkey_s_c, struct bkey_s,
254 			enum btree_iter_update_trigger_flags);
255 int bch2_trigger_reservation(struct btree_trans *, enum btree_id, unsigned,
256 			  struct bkey_s_c, struct bkey_s,
257 			  enum btree_iter_update_trigger_flags);
258 
259 #define trigger_run_overwrite_then_insert(_fn, _trans, _btree_id, _level, _old, _new, _flags)\
260 ({												\
261 	int ret = 0;										\
262 												\
263 	if (_old.k->type)									\
264 		ret = _fn(_trans, _btree_id, _level, _old, _flags & ~BTREE_TRIGGER_insert);	\
265 	if (!ret && _new.k->type)								\
266 		ret = _fn(_trans, _btree_id, _level, _new.s_c, _flags & ~BTREE_TRIGGER_overwrite);\
267 	ret;											\
268 })
269 
270 void bch2_trans_account_disk_usage_change(struct btree_trans *);
271 
272 int bch2_trans_mark_metadata_bucket(struct btree_trans *, struct bch_dev *, u64,
273 				    enum bch_data_type, unsigned,
274 				    enum btree_iter_update_trigger_flags);
275 int bch2_trans_mark_dev_sb(struct bch_fs *, struct bch_dev *,
276 				    enum btree_iter_update_trigger_flags);
277 int bch2_trans_mark_dev_sbs_flags(struct bch_fs *,
278 				    enum btree_iter_update_trigger_flags);
279 int bch2_trans_mark_dev_sbs(struct bch_fs *);
280 
281 bool bch2_is_superblock_bucket(struct bch_dev *, u64);
282 
bch2_data_type_str(enum bch_data_type type)283 static inline const char *bch2_data_type_str(enum bch_data_type type)
284 {
285 	return type < BCH_DATA_NR
286 		? __bch2_data_types[type]
287 		: "(invalid data type)";
288 }
289 
290 /* disk reservations: */
291 
bch2_disk_reservation_put(struct bch_fs * c,struct disk_reservation * res)292 static inline void bch2_disk_reservation_put(struct bch_fs *c,
293 					     struct disk_reservation *res)
294 {
295 	if (res->sectors) {
296 		this_cpu_sub(*c->online_reserved, res->sectors);
297 		res->sectors = 0;
298 	}
299 }
300 
301 enum bch_reservation_flags {
302 	BCH_DISK_RESERVATION_NOFAIL	= 1 << 0,
303 	BCH_DISK_RESERVATION_PARTIAL	= 1 << 1,
304 };
305 
306 int __bch2_disk_reservation_add(struct bch_fs *, struct disk_reservation *,
307 				u64, enum bch_reservation_flags);
308 
bch2_disk_reservation_add(struct bch_fs * c,struct disk_reservation * res,u64 sectors,enum bch_reservation_flags flags)309 static inline int bch2_disk_reservation_add(struct bch_fs *c, struct disk_reservation *res,
310 					    u64 sectors, enum bch_reservation_flags flags)
311 {
312 #ifdef __KERNEL__
313 	u64 old, new;
314 
315 	old = this_cpu_read(c->pcpu->sectors_available);
316 	do {
317 		if (sectors > old)
318 			return __bch2_disk_reservation_add(c, res, sectors, flags);
319 
320 		new = old - sectors;
321 	} while (!this_cpu_try_cmpxchg(c->pcpu->sectors_available, &old, new));
322 
323 	this_cpu_add(*c->online_reserved, sectors);
324 	res->sectors			+= sectors;
325 	return 0;
326 #else
327 	return __bch2_disk_reservation_add(c, res, sectors, flags);
328 #endif
329 }
330 
331 static inline struct disk_reservation
bch2_disk_reservation_init(struct bch_fs * c,unsigned nr_replicas)332 bch2_disk_reservation_init(struct bch_fs *c, unsigned nr_replicas)
333 {
334 	return (struct disk_reservation) {
335 		.sectors	= 0,
336 #if 0
337 		/* not used yet: */
338 		.gen		= c->capacity_gen,
339 #endif
340 		.nr_replicas	= nr_replicas,
341 	};
342 }
343 
bch2_disk_reservation_get(struct bch_fs * c,struct disk_reservation * res,u64 sectors,unsigned nr_replicas,int flags)344 static inline int bch2_disk_reservation_get(struct bch_fs *c,
345 					    struct disk_reservation *res,
346 					    u64 sectors, unsigned nr_replicas,
347 					    int flags)
348 {
349 	*res = bch2_disk_reservation_init(c, nr_replicas);
350 
351 	return bch2_disk_reservation_add(c, res, sectors * nr_replicas, flags);
352 }
353 
354 #define RESERVE_FACTOR	6
355 
avail_factor(u64 r)356 static inline u64 avail_factor(u64 r)
357 {
358 	return div_u64(r << RESERVE_FACTOR, (1 << RESERVE_FACTOR) + 1);
359 }
360 
361 void bch2_buckets_nouse_free(struct bch_fs *);
362 int bch2_buckets_nouse_alloc(struct bch_fs *);
363 
364 int bch2_dev_buckets_resize(struct bch_fs *, struct bch_dev *, u64);
365 void bch2_dev_buckets_free(struct bch_dev *);
366 int bch2_dev_buckets_alloc(struct bch_fs *, struct bch_dev *);
367 
368 #endif /* _BUCKETS_H */
369