xref: /linux/fs/bcachefs/btree_gc.h (revision 41f7adb676f6c4aef439d78b15c7e1119216bc2b)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHEFS_BTREE_GC_H
3 #define _BCACHEFS_BTREE_GC_H
4 
5 #include "bkey.h"
6 #include "btree_types.h"
7 
8 int bch2_check_topology(struct bch_fs *);
9 int bch2_check_allocations(struct bch_fs *);
10 
11 /*
12  * For concurrent mark and sweep (with other index updates), we define a total
13  * ordering of _all_ references GC walks:
14  *
15  * Note that some references will have the same GC position as others - e.g.
16  * everything within the same btree node; in those cases we're relying on
17  * whatever locking exists for where those references live, i.e. the write lock
18  * on a btree node.
19  *
20  * That locking is also required to ensure GC doesn't pass the updater in
21  * between the updater adding/removing the reference and updating the GC marks;
22  * without that, we would at best double count sometimes.
23  *
24  * That part is important - whenever calling bch2_mark_pointers(), a lock _must_
25  * be held that prevents GC from passing the position the updater is at.
26  *
27  * (What about the start of gc, when we're clearing all the marks? GC clears the
28  * mark with the gc pos seqlock held, and bch_mark_bucket checks against the gc
29  * position inside its cmpxchg loop, so crap magically works).
30  */
31 
32 /* Position of (the start of) a gc phase: */
33 static inline struct gc_pos gc_phase(enum gc_phase phase)
34 {
35 	return (struct gc_pos) {
36 		.phase	= phase,
37 		.level	= 0,
38 		.pos	= POS_MIN,
39 	};
40 }
41 
42 static inline int gc_pos_cmp(struct gc_pos l, struct gc_pos r)
43 {
44 	return   cmp_int(l.phase, r.phase) ?:
45 		-cmp_int(l.level, r.level) ?:
46 		 bpos_cmp(l.pos, r.pos);
47 }
48 
49 static inline enum gc_phase btree_id_to_gc_phase(enum btree_id id)
50 {
51 	switch (id) {
52 #define x(name, v, ...) case BTREE_ID_##name: return GC_PHASE_BTREE_##name;
53 	BCH_BTREE_IDS()
54 #undef x
55 	default:
56 		BUG();
57 	}
58 }
59 
60 static inline struct gc_pos gc_pos_btree(enum btree_id btree, unsigned level,
61 					 struct bpos pos)
62 {
63 	return (struct gc_pos) {
64 		.phase	= btree_id_to_gc_phase(btree),
65 		.level	= level,
66 		.pos	= pos,
67 	};
68 }
69 
70 /*
71  * GC position of the pointers within a btree node: note, _not_ for &b->key
72  * itself, that lives in the parent node:
73  */
74 static inline struct gc_pos gc_pos_btree_node(struct btree *b)
75 {
76 	return gc_pos_btree(b->c.btree_id, b->c.level, b->key.k.p);
77 }
78 
79 static inline bool gc_visited(struct bch_fs *c, struct gc_pos pos)
80 {
81 	unsigned seq;
82 	bool ret;
83 
84 	do {
85 		seq = read_seqcount_begin(&c->gc_pos_lock);
86 		ret = gc_pos_cmp(pos, c->gc_pos) <= 0;
87 	} while (read_seqcount_retry(&c->gc_pos_lock, seq));
88 
89 	return ret;
90 }
91 
92 int bch2_gc_gens(struct bch_fs *);
93 void bch2_gc_gens_async(struct bch_fs *);
94 void bch2_fs_gc_init(struct bch_fs *);
95 
96 #endif /* _BCACHEFS_BTREE_GC_H */
97