1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHEFS_BTREE_UPDATE_INTERIOR_H
3 #define _BCACHEFS_BTREE_UPDATE_INTERIOR_H
4
5 #include "btree_cache.h"
6 #include "btree_locking.h"
7 #include "btree_update.h"
8
9 #define BTREE_UPDATE_NODES_MAX ((BTREE_MAX_DEPTH - 2) * 2 + GC_MERGE_NODES)
10
11 #define BTREE_UPDATE_JOURNAL_RES (BTREE_UPDATE_NODES_MAX * (BKEY_BTREE_PTR_U64s_MAX + 1))
12
13 int bch2_btree_node_check_topology(struct btree_trans *, struct btree *);
14
15 #define BTREE_UPDATE_MODES() \
16 x(none) \
17 x(node) \
18 x(root) \
19 x(update)
20
21 enum btree_update_mode {
22 #define x(n) BTREE_UPDATE_##n,
23 BTREE_UPDATE_MODES()
24 #undef x
25 };
26
27 /*
28 * Tracks an in progress split/rewrite of a btree node and the update to the
29 * parent node:
30 *
31 * When we split/rewrite a node, we do all the updates in memory without
32 * waiting for any writes to complete - we allocate the new node(s) and update
33 * the parent node, possibly recursively up to the root.
34 *
35 * The end result is that we have one or more new nodes being written -
36 * possibly several, if there were multiple splits - and then a write (updating
37 * an interior node) which will make all these new nodes visible.
38 *
39 * Additionally, as we split/rewrite nodes we free the old nodes - but the old
40 * nodes can't be freed (their space on disk can't be reclaimed) until the
41 * update to the interior node that makes the new node visible completes -
42 * until then, the old nodes are still reachable on disk.
43 *
44 */
45 struct btree_update {
46 struct closure cl;
47 struct bch_fs *c;
48 u64 start_time;
49 unsigned long ip_started;
50
51 struct list_head list;
52 struct list_head unwritten_list;
53
54 enum btree_update_mode mode;
55 enum bch_trans_commit_flags flags;
56 unsigned nodes_written:1;
57 unsigned took_gc_lock:1;
58
59 enum btree_id btree_id;
60 unsigned update_level_start;
61 unsigned update_level_end;
62
63 struct disk_reservation disk_res;
64
65 /*
66 * BTREE_UPDATE_node:
67 * The update that made the new nodes visible was a regular update to an
68 * existing interior node - @b. We can't write out the update to @b
69 * until the new nodes we created are finished writing, so we block @b
70 * from writing by putting this btree_interior update on the
71 * @b->write_blocked list with @write_blocked_list:
72 */
73 struct btree *b;
74 struct list_head write_blocked_list;
75
76 /*
77 * We may be freeing nodes that were dirty, and thus had journal entries
78 * pinned: we need to transfer the oldest of those pins to the
79 * btree_update operation, and release it when the new node(s)
80 * are all persistent and reachable:
81 */
82 struct journal_entry_pin journal;
83
84 /* Preallocated nodes we reserve when we start the update: */
85 struct prealloc_nodes {
86 struct btree *b[BTREE_UPDATE_NODES_MAX];
87 unsigned nr;
88 } prealloc_nodes[2];
89
90 /* Nodes being freed: */
91 struct keylist old_keys;
92 u64 _old_keys[BTREE_UPDATE_NODES_MAX *
93 BKEY_BTREE_PTR_U64s_MAX];
94
95 /* Nodes being added: */
96 struct keylist new_keys;
97 u64 _new_keys[BTREE_UPDATE_NODES_MAX *
98 BKEY_BTREE_PTR_U64s_MAX];
99
100 /* New nodes, that will be made reachable by this update: */
101 struct btree *new_nodes[BTREE_UPDATE_NODES_MAX];
102 unsigned nr_new_nodes;
103
104 struct btree *old_nodes[BTREE_UPDATE_NODES_MAX];
105 __le64 old_nodes_seq[BTREE_UPDATE_NODES_MAX];
106 unsigned nr_old_nodes;
107
108 open_bucket_idx_t open_buckets[BTREE_UPDATE_NODES_MAX *
109 BCH_REPLICAS_MAX];
110 open_bucket_idx_t nr_open_buckets;
111
112 unsigned journal_u64s;
113 u64 journal_entries[BTREE_UPDATE_JOURNAL_RES];
114
115 /* Only here to reduce stack usage on recursive splits: */
116 struct keylist parent_keys;
117 /*
118 * Enough room for btree_split's keys without realloc - btree node
119 * pointers never have crc/compression info, so we only need to acount
120 * for the pointers for three keys
121 */
122 u64 inline_keys[BKEY_BTREE_PTR_U64s_MAX * 3];
123 };
124
125 struct btree *__bch2_btree_node_alloc_replacement(struct btree_update *,
126 struct btree_trans *,
127 struct btree *,
128 struct bkey_format);
129
130 int bch2_btree_split_leaf(struct btree_trans *, btree_path_idx_t, unsigned);
131
132 int bch2_btree_increase_depth(struct btree_trans *, btree_path_idx_t, unsigned);
133
134 int __bch2_foreground_maybe_merge(struct btree_trans *, btree_path_idx_t,
135 unsigned, unsigned, enum btree_node_sibling);
136
bch2_foreground_maybe_merge_sibling(struct btree_trans * trans,btree_path_idx_t path_idx,unsigned level,unsigned flags,enum btree_node_sibling sib)137 static inline int bch2_foreground_maybe_merge_sibling(struct btree_trans *trans,
138 btree_path_idx_t path_idx,
139 unsigned level, unsigned flags,
140 enum btree_node_sibling sib)
141 {
142 struct btree_path *path = trans->paths + path_idx;
143 struct btree *b;
144
145 EBUG_ON(!btree_node_locked(path, level));
146
147 if (bch2_btree_node_merging_disabled)
148 return 0;
149
150 b = path->l[level].b;
151 if (b->sib_u64s[sib] > trans->c->btree_foreground_merge_threshold)
152 return 0;
153
154 return __bch2_foreground_maybe_merge(trans, path_idx, level, flags, sib);
155 }
156
bch2_foreground_maybe_merge(struct btree_trans * trans,btree_path_idx_t path,unsigned level,unsigned flags)157 static inline int bch2_foreground_maybe_merge(struct btree_trans *trans,
158 btree_path_idx_t path,
159 unsigned level,
160 unsigned flags)
161 {
162 bch2_trans_verify_not_unlocked_or_in_restart(trans);
163
164 return bch2_foreground_maybe_merge_sibling(trans, path, level, flags,
165 btree_prev_sib) ?:
166 bch2_foreground_maybe_merge_sibling(trans, path, level, flags,
167 btree_next_sib);
168 }
169
170 int bch2_btree_node_rewrite(struct btree_trans *, struct btree_iter *,
171 struct btree *, unsigned);
172 int bch2_btree_node_rewrite_pos(struct btree_trans *,
173 enum btree_id, unsigned,
174 struct bpos, unsigned);
175 int bch2_btree_node_rewrite_key_get_iter(struct btree_trans *,
176 struct btree *, unsigned);
177
178 void bch2_btree_node_rewrite_async(struct bch_fs *, struct btree *);
179
180 int bch2_btree_node_update_key(struct btree_trans *, struct btree_iter *,
181 struct btree *, struct bkey_i *,
182 unsigned, bool);
183 int bch2_btree_node_update_key_get_iter(struct btree_trans *, struct btree *,
184 struct bkey_i *, unsigned, bool);
185
186 void bch2_btree_set_root_for_read(struct bch_fs *, struct btree *);
187
188 int bch2_btree_root_alloc_fake_trans(struct btree_trans *, enum btree_id, unsigned);
189 void bch2_btree_root_alloc_fake(struct bch_fs *, enum btree_id, unsigned);
190
btree_update_reserve_required(struct bch_fs * c,struct btree * b)191 static inline unsigned btree_update_reserve_required(struct bch_fs *c,
192 struct btree *b)
193 {
194 unsigned depth = btree_node_root(c, b)->c.level + 1;
195
196 /*
197 * Number of nodes we might have to allocate in a worst case btree
198 * split operation - we split all the way up to the root, then allocate
199 * a new root, unless we're already at max depth:
200 */
201 if (depth < BTREE_MAX_DEPTH)
202 return (depth - b->c.level) * 2 + 1;
203 else
204 return (depth - b->c.level) * 2 - 1;
205 }
206
btree_node_reset_sib_u64s(struct btree * b)207 static inline void btree_node_reset_sib_u64s(struct btree *b)
208 {
209 b->sib_u64s[0] = b->nr.live_u64s;
210 b->sib_u64s[1] = b->nr.live_u64s;
211 }
212
btree_data_end(struct btree * b)213 static inline void *btree_data_end(struct btree *b)
214 {
215 return (void *) b->data + btree_buf_bytes(b);
216 }
217
unwritten_whiteouts_start(struct btree * b)218 static inline struct bkey_packed *unwritten_whiteouts_start(struct btree *b)
219 {
220 return (void *) ((u64 *) btree_data_end(b) - b->whiteout_u64s);
221 }
222
unwritten_whiteouts_end(struct btree * b)223 static inline struct bkey_packed *unwritten_whiteouts_end(struct btree *b)
224 {
225 return btree_data_end(b);
226 }
227
write_block(struct btree * b)228 static inline void *write_block(struct btree *b)
229 {
230 return (void *) b->data + (b->written << 9);
231 }
232
__btree_addr_written(struct btree * b,void * p)233 static inline bool __btree_addr_written(struct btree *b, void *p)
234 {
235 return p < write_block(b);
236 }
237
bset_written(struct btree * b,struct bset * i)238 static inline bool bset_written(struct btree *b, struct bset *i)
239 {
240 return __btree_addr_written(b, i);
241 }
242
bkey_written(struct btree * b,struct bkey_packed * k)243 static inline bool bkey_written(struct btree *b, struct bkey_packed *k)
244 {
245 return __btree_addr_written(b, k);
246 }
247
__bch2_btree_u64s_remaining(struct btree * b,void * end)248 static inline ssize_t __bch2_btree_u64s_remaining(struct btree *b, void *end)
249 {
250 ssize_t used = bset_byte_offset(b, end) / sizeof(u64) +
251 b->whiteout_u64s;
252 ssize_t total = btree_buf_bytes(b) >> 3;
253
254 /* Always leave one extra u64 for bch2_varint_decode: */
255 used++;
256
257 return total - used;
258 }
259
bch2_btree_keys_u64s_remaining(struct btree * b)260 static inline size_t bch2_btree_keys_u64s_remaining(struct btree *b)
261 {
262 ssize_t remaining = __bch2_btree_u64s_remaining(b,
263 btree_bkey_last(b, bset_tree_last(b)));
264
265 BUG_ON(remaining < 0);
266
267 if (bset_written(b, btree_bset_last(b)))
268 return 0;
269
270 return remaining;
271 }
272
273 #define BTREE_WRITE_SET_U64s_BITS 9
274
btree_write_set_buffer(struct btree * b)275 static inline unsigned btree_write_set_buffer(struct btree *b)
276 {
277 /*
278 * Could buffer up larger amounts of keys for btrees with larger keys,
279 * pending benchmarking:
280 */
281 return 8 << BTREE_WRITE_SET_U64s_BITS;
282 }
283
want_new_bset(struct bch_fs * c,struct btree * b)284 static inline struct btree_node_entry *want_new_bset(struct bch_fs *c, struct btree *b)
285 {
286 struct bset_tree *t = bset_tree_last(b);
287 struct btree_node_entry *bne = max(write_block(b),
288 (void *) btree_bkey_last(b, t));
289 ssize_t remaining_space =
290 __bch2_btree_u64s_remaining(b, bne->keys.start);
291
292 if (unlikely(bset_written(b, bset(b, t)))) {
293 if (b->written + block_sectors(c) <= btree_sectors(c))
294 return bne;
295 } else {
296 if (unlikely(bset_u64s(t) * sizeof(u64) > btree_write_set_buffer(b)) &&
297 remaining_space > (ssize_t) (btree_write_set_buffer(b) >> 3))
298 return bne;
299 }
300
301 return NULL;
302 }
303
push_whiteout(struct btree * b,struct bpos pos)304 static inline void push_whiteout(struct btree *b, struct bpos pos)
305 {
306 struct bkey_packed k;
307
308 BUG_ON(bch2_btree_keys_u64s_remaining(b) < BKEY_U64s);
309 EBUG_ON(btree_node_just_written(b));
310
311 if (!bkey_pack_pos(&k, pos, b)) {
312 struct bkey *u = (void *) &k;
313
314 bkey_init(u);
315 u->p = pos;
316 }
317
318 k.needs_whiteout = true;
319
320 b->whiteout_u64s += k.u64s;
321 bkey_p_copy(unwritten_whiteouts_start(b), &k);
322 }
323
324 /*
325 * write lock must be held on @b (else the dirty bset that we were going to
326 * insert into could be written out from under us)
327 */
bch2_btree_node_insert_fits(struct btree * b,unsigned u64s)328 static inline bool bch2_btree_node_insert_fits(struct btree *b, unsigned u64s)
329 {
330 if (unlikely(btree_node_need_rewrite(b)))
331 return false;
332
333 return u64s <= bch2_btree_keys_u64s_remaining(b);
334 }
335
336 void bch2_btree_updates_to_text(struct printbuf *, struct bch_fs *);
337
338 bool bch2_btree_interior_updates_flush(struct bch_fs *);
339
340 void bch2_journal_entry_to_btree_root(struct bch_fs *, struct jset_entry *);
341 struct jset_entry *bch2_btree_roots_to_journal_entries(struct bch_fs *,
342 struct jset_entry *, unsigned long);
343
344 void bch2_async_btree_node_rewrites_flush(struct bch_fs *);
345 void bch2_do_pending_node_rewrites(struct bch_fs *);
346 void bch2_free_pending_node_rewrites(struct bch_fs *);
347
348 void bch2_btree_reserve_cache_to_text(struct printbuf *, struct bch_fs *);
349
350 void bch2_fs_btree_interior_update_exit(struct bch_fs *);
351 void bch2_fs_btree_interior_update_init_early(struct bch_fs *);
352 int bch2_fs_btree_interior_update_init(struct bch_fs *);
353
354 #endif /* _BCACHEFS_BTREE_UPDATE_INTERIOR_H */
355