1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _BCACHEFS_BTREE_UPDATE_INTERIOR_H 3 #define _BCACHEFS_BTREE_UPDATE_INTERIOR_H 4 5 #include "btree_cache.h" 6 #include "btree_locking.h" 7 #include "btree_update.h" 8 9 #define BTREE_UPDATE_NODES_MAX ((BTREE_MAX_DEPTH - 2) * 2 + GC_MERGE_NODES) 10 11 #define BTREE_UPDATE_JOURNAL_RES (BTREE_UPDATE_NODES_MAX * (BKEY_BTREE_PTR_U64s_MAX + 1)) 12 13 /* 14 * Tracks an in progress split/rewrite of a btree node and the update to the 15 * parent node: 16 * 17 * When we split/rewrite a node, we do all the updates in memory without 18 * waiting for any writes to complete - we allocate the new node(s) and update 19 * the parent node, possibly recursively up to the root. 20 * 21 * The end result is that we have one or more new nodes being written - 22 * possibly several, if there were multiple splits - and then a write (updating 23 * an interior node) which will make all these new nodes visible. 24 * 25 * Additionally, as we split/rewrite nodes we free the old nodes - but the old 26 * nodes can't be freed (their space on disk can't be reclaimed) until the 27 * update to the interior node that makes the new node visible completes - 28 * until then, the old nodes are still reachable on disk. 29 * 30 */ 31 struct btree_update { 32 struct closure cl; 33 struct bch_fs *c; 34 u64 start_time; 35 unsigned long ip_started; 36 37 struct list_head list; 38 struct list_head unwritten_list; 39 40 /* What kind of update are we doing? */ 41 enum { 42 BTREE_INTERIOR_NO_UPDATE, 43 BTREE_INTERIOR_UPDATING_NODE, 44 BTREE_INTERIOR_UPDATING_ROOT, 45 BTREE_INTERIOR_UPDATING_AS, 46 } mode; 47 48 unsigned nodes_written:1; 49 unsigned took_gc_lock:1; 50 51 enum btree_id btree_id; 52 unsigned update_level; 53 54 struct disk_reservation disk_res; 55 56 /* 57 * BTREE_INTERIOR_UPDATING_NODE: 58 * The update that made the new nodes visible was a regular update to an 59 * existing interior node - @b. We can't write out the update to @b 60 * until the new nodes we created are finished writing, so we block @b 61 * from writing by putting this btree_interior update on the 62 * @b->write_blocked list with @write_blocked_list: 63 */ 64 struct btree *b; 65 struct list_head write_blocked_list; 66 67 /* 68 * We may be freeing nodes that were dirty, and thus had journal entries 69 * pinned: we need to transfer the oldest of those pins to the 70 * btree_update operation, and release it when the new node(s) 71 * are all persistent and reachable: 72 */ 73 struct journal_entry_pin journal; 74 75 /* Preallocated nodes we reserve when we start the update: */ 76 struct prealloc_nodes { 77 struct btree *b[BTREE_UPDATE_NODES_MAX]; 78 unsigned nr; 79 } prealloc_nodes[2]; 80 81 /* Nodes being freed: */ 82 struct keylist old_keys; 83 u64 _old_keys[BTREE_UPDATE_NODES_MAX * 84 BKEY_BTREE_PTR_U64s_MAX]; 85 86 /* Nodes being added: */ 87 struct keylist new_keys; 88 u64 _new_keys[BTREE_UPDATE_NODES_MAX * 89 BKEY_BTREE_PTR_U64s_MAX]; 90 91 /* New nodes, that will be made reachable by this update: */ 92 struct btree *new_nodes[BTREE_UPDATE_NODES_MAX]; 93 unsigned nr_new_nodes; 94 95 struct btree *old_nodes[BTREE_UPDATE_NODES_MAX]; 96 __le64 old_nodes_seq[BTREE_UPDATE_NODES_MAX]; 97 unsigned nr_old_nodes; 98 99 open_bucket_idx_t open_buckets[BTREE_UPDATE_NODES_MAX * 100 BCH_REPLICAS_MAX]; 101 open_bucket_idx_t nr_open_buckets; 102 103 unsigned journal_u64s; 104 u64 journal_entries[BTREE_UPDATE_JOURNAL_RES]; 105 106 /* Only here to reduce stack usage on recursive splits: */ 107 struct keylist parent_keys; 108 /* 109 * Enough room for btree_split's keys without realloc - btree node 110 * pointers never have crc/compression info, so we only need to acount 111 * for the pointers for three keys 112 */ 113 u64 inline_keys[BKEY_BTREE_PTR_U64s_MAX * 3]; 114 }; 115 116 struct btree *__bch2_btree_node_alloc_replacement(struct btree_update *, 117 struct btree_trans *, 118 struct btree *, 119 struct bkey_format); 120 121 int bch2_btree_split_leaf(struct btree_trans *, btree_path_idx_t, unsigned); 122 123 int bch2_btree_increase_depth(struct btree_trans *, btree_path_idx_t, unsigned); 124 125 int __bch2_foreground_maybe_merge(struct btree_trans *, btree_path_idx_t, 126 unsigned, unsigned, enum btree_node_sibling); 127 128 static inline int bch2_foreground_maybe_merge_sibling(struct btree_trans *trans, 129 btree_path_idx_t path_idx, 130 unsigned level, unsigned flags, 131 enum btree_node_sibling sib) 132 { 133 struct btree_path *path = trans->paths + path_idx; 134 struct btree *b; 135 136 EBUG_ON(!btree_node_locked(path, level)); 137 138 b = path->l[level].b; 139 if (b->sib_u64s[sib] > trans->c->btree_foreground_merge_threshold) 140 return 0; 141 142 return __bch2_foreground_maybe_merge(trans, path_idx, level, flags, sib); 143 } 144 145 static inline int bch2_foreground_maybe_merge(struct btree_trans *trans, 146 btree_path_idx_t path, 147 unsigned level, 148 unsigned flags) 149 { 150 return bch2_foreground_maybe_merge_sibling(trans, path, level, flags, 151 btree_prev_sib) ?: 152 bch2_foreground_maybe_merge_sibling(trans, path, level, flags, 153 btree_next_sib); 154 } 155 156 int bch2_btree_node_rewrite(struct btree_trans *, struct btree_iter *, 157 struct btree *, unsigned); 158 void bch2_btree_node_rewrite_async(struct bch_fs *, struct btree *); 159 int bch2_btree_node_update_key(struct btree_trans *, struct btree_iter *, 160 struct btree *, struct bkey_i *, 161 unsigned, bool); 162 int bch2_btree_node_update_key_get_iter(struct btree_trans *, struct btree *, 163 struct bkey_i *, unsigned, bool); 164 165 void bch2_btree_set_root_for_read(struct bch_fs *, struct btree *); 166 void bch2_btree_root_alloc(struct bch_fs *, enum btree_id); 167 168 static inline unsigned btree_update_reserve_required(struct bch_fs *c, 169 struct btree *b) 170 { 171 unsigned depth = btree_node_root(c, b)->c.level + 1; 172 173 /* 174 * Number of nodes we might have to allocate in a worst case btree 175 * split operation - we split all the way up to the root, then allocate 176 * a new root, unless we're already at max depth: 177 */ 178 if (depth < BTREE_MAX_DEPTH) 179 return (depth - b->c.level) * 2 + 1; 180 else 181 return (depth - b->c.level) * 2 - 1; 182 } 183 184 static inline void btree_node_reset_sib_u64s(struct btree *b) 185 { 186 b->sib_u64s[0] = b->nr.live_u64s; 187 b->sib_u64s[1] = b->nr.live_u64s; 188 } 189 190 static inline void *btree_data_end(struct btree *b) 191 { 192 return (void *) b->data + btree_buf_bytes(b); 193 } 194 195 static inline struct bkey_packed *unwritten_whiteouts_start(struct btree *b) 196 { 197 return (void *) ((u64 *) btree_data_end(b) - b->whiteout_u64s); 198 } 199 200 static inline struct bkey_packed *unwritten_whiteouts_end(struct btree *b) 201 { 202 return btree_data_end(b); 203 } 204 205 static inline void *write_block(struct btree *b) 206 { 207 return (void *) b->data + (b->written << 9); 208 } 209 210 static inline bool __btree_addr_written(struct btree *b, void *p) 211 { 212 return p < write_block(b); 213 } 214 215 static inline bool bset_written(struct btree *b, struct bset *i) 216 { 217 return __btree_addr_written(b, i); 218 } 219 220 static inline bool bkey_written(struct btree *b, struct bkey_packed *k) 221 { 222 return __btree_addr_written(b, k); 223 } 224 225 static inline ssize_t __bch2_btree_u64s_remaining(struct btree *b, void *end) 226 { 227 ssize_t used = bset_byte_offset(b, end) / sizeof(u64) + 228 b->whiteout_u64s; 229 ssize_t total = btree_buf_bytes(b) >> 3; 230 231 /* Always leave one extra u64 for bch2_varint_decode: */ 232 used++; 233 234 return total - used; 235 } 236 237 static inline size_t bch2_btree_keys_u64s_remaining(struct btree *b) 238 { 239 ssize_t remaining = __bch2_btree_u64s_remaining(b, 240 btree_bkey_last(b, bset_tree_last(b))); 241 242 BUG_ON(remaining < 0); 243 244 if (bset_written(b, btree_bset_last(b))) 245 return 0; 246 247 return remaining; 248 } 249 250 #define BTREE_WRITE_SET_U64s_BITS 9 251 252 static inline unsigned btree_write_set_buffer(struct btree *b) 253 { 254 /* 255 * Could buffer up larger amounts of keys for btrees with larger keys, 256 * pending benchmarking: 257 */ 258 return 8 << BTREE_WRITE_SET_U64s_BITS; 259 } 260 261 static inline struct btree_node_entry *want_new_bset(struct bch_fs *c, struct btree *b) 262 { 263 struct bset_tree *t = bset_tree_last(b); 264 struct btree_node_entry *bne = max(write_block(b), 265 (void *) btree_bkey_last(b, bset_tree_last(b))); 266 ssize_t remaining_space = 267 __bch2_btree_u64s_remaining(b, bne->keys.start); 268 269 if (unlikely(bset_written(b, bset(b, t)))) { 270 if (remaining_space > (ssize_t) (block_bytes(c) >> 3)) 271 return bne; 272 } else { 273 if (unlikely(bset_u64s(t) * sizeof(u64) > btree_write_set_buffer(b)) && 274 remaining_space > (ssize_t) (btree_write_set_buffer(b) >> 3)) 275 return bne; 276 } 277 278 return NULL; 279 } 280 281 static inline void push_whiteout(struct btree *b, struct bpos pos) 282 { 283 struct bkey_packed k; 284 285 BUG_ON(bch2_btree_keys_u64s_remaining(b) < BKEY_U64s); 286 EBUG_ON(btree_node_just_written(b)); 287 288 if (!bkey_pack_pos(&k, pos, b)) { 289 struct bkey *u = (void *) &k; 290 291 bkey_init(u); 292 u->p = pos; 293 } 294 295 k.needs_whiteout = true; 296 297 b->whiteout_u64s += k.u64s; 298 bkey_p_copy(unwritten_whiteouts_start(b), &k); 299 } 300 301 /* 302 * write lock must be held on @b (else the dirty bset that we were going to 303 * insert into could be written out from under us) 304 */ 305 static inline bool bch2_btree_node_insert_fits(struct btree *b, unsigned u64s) 306 { 307 if (unlikely(btree_node_need_rewrite(b))) 308 return false; 309 310 return u64s <= bch2_btree_keys_u64s_remaining(b); 311 } 312 313 void bch2_btree_updates_to_text(struct printbuf *, struct bch_fs *); 314 315 bool bch2_btree_interior_updates_flush(struct bch_fs *); 316 317 void bch2_journal_entry_to_btree_root(struct bch_fs *, struct jset_entry *); 318 struct jset_entry *bch2_btree_roots_to_journal_entries(struct bch_fs *, 319 struct jset_entry *, unsigned long); 320 321 void bch2_do_pending_node_rewrites(struct bch_fs *); 322 void bch2_free_pending_node_rewrites(struct bch_fs *); 323 324 void bch2_fs_btree_interior_update_exit(struct bch_fs *); 325 void bch2_fs_btree_interior_update_init_early(struct bch_fs *); 326 int bch2_fs_btree_interior_update_init(struct bch_fs *); 327 328 #endif /* _BCACHEFS_BTREE_UPDATE_INTERIOR_H */ 329