11c6fdbd8SKent Overstreet /* SPDX-License-Identifier: GPL-2.0 */ 21c6fdbd8SKent Overstreet #ifndef _BCACHEFS_BTREE_UPDATE_INTERIOR_H 31c6fdbd8SKent Overstreet #define _BCACHEFS_BTREE_UPDATE_INTERIOR_H 41c6fdbd8SKent Overstreet 51c6fdbd8SKent Overstreet #include "btree_cache.h" 61c6fdbd8SKent Overstreet #include "btree_locking.h" 71c6fdbd8SKent Overstreet #include "btree_update.h" 81c6fdbd8SKent Overstreet 91c6fdbd8SKent Overstreet struct btree_reserve { 101c6fdbd8SKent Overstreet struct disk_reservation disk_res; 111c6fdbd8SKent Overstreet unsigned nr; 121c6fdbd8SKent Overstreet struct btree *b[BTREE_RESERVE_MAX]; 131c6fdbd8SKent Overstreet }; 141c6fdbd8SKent Overstreet 151c6fdbd8SKent Overstreet void __bch2_btree_calc_format(struct bkey_format_state *, struct btree *); 161c6fdbd8SKent Overstreet bool bch2_btree_node_format_fits(struct bch_fs *c, struct btree *, 171c6fdbd8SKent Overstreet struct bkey_format *); 181c6fdbd8SKent Overstreet 191c6fdbd8SKent Overstreet /* Btree node freeing/allocation: */ 201c6fdbd8SKent Overstreet 211c6fdbd8SKent Overstreet /* 221c6fdbd8SKent Overstreet * Tracks a btree node that has been (or is about to be) freed in memory, but 231c6fdbd8SKent Overstreet * has _not_ yet been freed on disk (because the write that makes the new 241c6fdbd8SKent Overstreet * node(s) visible and frees the old hasn't completed yet) 251c6fdbd8SKent Overstreet */ 261c6fdbd8SKent Overstreet struct pending_btree_node_free { 271c6fdbd8SKent Overstreet bool index_update_done; 281c6fdbd8SKent Overstreet 291c6fdbd8SKent Overstreet __le64 seq; 301c6fdbd8SKent Overstreet enum btree_id btree_id; 311c6fdbd8SKent Overstreet unsigned level; 321c6fdbd8SKent Overstreet __BKEY_PADDED(key, BKEY_BTREE_PTR_VAL_U64s_MAX); 331c6fdbd8SKent Overstreet }; 341c6fdbd8SKent Overstreet 351c6fdbd8SKent Overstreet /* 361c6fdbd8SKent Overstreet * Tracks an in progress split/rewrite of a btree node and the update to the 371c6fdbd8SKent Overstreet * parent node: 381c6fdbd8SKent Overstreet * 391c6fdbd8SKent Overstreet * When we split/rewrite a node, we do all the updates in memory without 401c6fdbd8SKent Overstreet * waiting for any writes to complete - we allocate the new node(s) and update 411c6fdbd8SKent Overstreet * the parent node, possibly recursively up to the root. 421c6fdbd8SKent Overstreet * 431c6fdbd8SKent Overstreet * The end result is that we have one or more new nodes being written - 441c6fdbd8SKent Overstreet * possibly several, if there were multiple splits - and then a write (updating 451c6fdbd8SKent Overstreet * an interior node) which will make all these new nodes visible. 461c6fdbd8SKent Overstreet * 471c6fdbd8SKent Overstreet * Additionally, as we split/rewrite nodes we free the old nodes - but the old 481c6fdbd8SKent Overstreet * nodes can't be freed (their space on disk can't be reclaimed) until the 491c6fdbd8SKent Overstreet * update to the interior node that makes the new node visible completes - 501c6fdbd8SKent Overstreet * until then, the old nodes are still reachable on disk. 511c6fdbd8SKent Overstreet * 521c6fdbd8SKent Overstreet */ 531c6fdbd8SKent Overstreet struct btree_update { 541c6fdbd8SKent Overstreet struct closure cl; 551c6fdbd8SKent Overstreet struct bch_fs *c; 561c6fdbd8SKent Overstreet 571c6fdbd8SKent Overstreet struct list_head list; 581c6fdbd8SKent Overstreet 591c6fdbd8SKent Overstreet /* What kind of update are we doing? */ 601c6fdbd8SKent Overstreet enum { 611c6fdbd8SKent Overstreet BTREE_INTERIOR_NO_UPDATE, 621c6fdbd8SKent Overstreet BTREE_INTERIOR_UPDATING_NODE, 631c6fdbd8SKent Overstreet BTREE_INTERIOR_UPDATING_ROOT, 641c6fdbd8SKent Overstreet BTREE_INTERIOR_UPDATING_AS, 651c6fdbd8SKent Overstreet } mode; 661c6fdbd8SKent Overstreet 671c6fdbd8SKent Overstreet unsigned must_rewrite:1; 681c6fdbd8SKent Overstreet unsigned nodes_written:1; 691c6fdbd8SKent Overstreet 701c6fdbd8SKent Overstreet enum btree_id btree_id; 711c6fdbd8SKent Overstreet 721c6fdbd8SKent Overstreet struct btree_reserve *reserve; 731c6fdbd8SKent Overstreet 741c6fdbd8SKent Overstreet /* 751c6fdbd8SKent Overstreet * BTREE_INTERIOR_UPDATING_NODE: 761c6fdbd8SKent Overstreet * The update that made the new nodes visible was a regular update to an 771c6fdbd8SKent Overstreet * existing interior node - @b. We can't write out the update to @b 781c6fdbd8SKent Overstreet * until the new nodes we created are finished writing, so we block @b 791c6fdbd8SKent Overstreet * from writing by putting this btree_interior update on the 801c6fdbd8SKent Overstreet * @b->write_blocked list with @write_blocked_list: 811c6fdbd8SKent Overstreet */ 821c6fdbd8SKent Overstreet struct btree *b; 831c6fdbd8SKent Overstreet struct list_head write_blocked_list; 841c6fdbd8SKent Overstreet 851c6fdbd8SKent Overstreet /* 861c6fdbd8SKent Overstreet * BTREE_INTERIOR_UPDATING_AS: btree node we updated was freed, so now 871c6fdbd8SKent Overstreet * we're now blocking another btree_update 881c6fdbd8SKent Overstreet * @parent_as - btree_update that's waiting on our nodes to finish 891c6fdbd8SKent Overstreet * writing, before it can make new nodes visible on disk 901c6fdbd8SKent Overstreet * @wait - list of child btree_updates that are waiting on this 911c6fdbd8SKent Overstreet * btree_update to make all the new nodes visible before they can free 921c6fdbd8SKent Overstreet * their old btree nodes 931c6fdbd8SKent Overstreet */ 941c6fdbd8SKent Overstreet struct btree_update *parent_as; 951c6fdbd8SKent Overstreet struct closure_waitlist wait; 961c6fdbd8SKent Overstreet 971c6fdbd8SKent Overstreet /* 981c6fdbd8SKent Overstreet * We may be freeing nodes that were dirty, and thus had journal entries 991c6fdbd8SKent Overstreet * pinned: we need to transfer the oldest of those pins to the 1001c6fdbd8SKent Overstreet * btree_update operation, and release it when the new node(s) 1011c6fdbd8SKent Overstreet * are all persistent and reachable: 1021c6fdbd8SKent Overstreet */ 1031c6fdbd8SKent Overstreet struct journal_entry_pin journal; 1041c6fdbd8SKent Overstreet 1051c6fdbd8SKent Overstreet u64 journal_seq; 1061c6fdbd8SKent Overstreet 1071c6fdbd8SKent Overstreet /* 1081c6fdbd8SKent Overstreet * Nodes being freed: 1091c6fdbd8SKent Overstreet * Protected by c->btree_node_pending_free_lock 1101c6fdbd8SKent Overstreet */ 1111c6fdbd8SKent Overstreet struct pending_btree_node_free pending[BTREE_MAX_DEPTH + GC_MERGE_NODES]; 1121c6fdbd8SKent Overstreet unsigned nr_pending; 1131c6fdbd8SKent Overstreet 1141c6fdbd8SKent Overstreet /* New nodes, that will be made reachable by this update: */ 1151c6fdbd8SKent Overstreet struct btree *new_nodes[BTREE_MAX_DEPTH * 2 + GC_MERGE_NODES]; 1161c6fdbd8SKent Overstreet unsigned nr_new_nodes; 1171c6fdbd8SKent Overstreet 1181c6fdbd8SKent Overstreet /* Only here to reduce stack usage on recursive splits: */ 1191c6fdbd8SKent Overstreet struct keylist parent_keys; 1201c6fdbd8SKent Overstreet /* 1211c6fdbd8SKent Overstreet * Enough room for btree_split's keys without realloc - btree node 1221c6fdbd8SKent Overstreet * pointers never have crc/compression info, so we only need to acount 1231c6fdbd8SKent Overstreet * for the pointers for three keys 1241c6fdbd8SKent Overstreet */ 1251c6fdbd8SKent Overstreet u64 inline_keys[BKEY_BTREE_PTR_U64s_MAX * 3]; 1261c6fdbd8SKent Overstreet }; 1271c6fdbd8SKent Overstreet 1281c6fdbd8SKent Overstreet #define for_each_pending_btree_node_free(c, as, p) \ 1291c6fdbd8SKent Overstreet list_for_each_entry(as, &c->btree_interior_update_list, list) \ 1301c6fdbd8SKent Overstreet for (p = as->pending; p < as->pending + as->nr_pending; p++) 1311c6fdbd8SKent Overstreet 1321c6fdbd8SKent Overstreet void bch2_btree_node_free_inmem(struct bch_fs *, struct btree *, 1331c6fdbd8SKent Overstreet struct btree_iter *); 1341c6fdbd8SKent Overstreet void bch2_btree_node_free_never_inserted(struct bch_fs *, struct btree *); 1351c6fdbd8SKent Overstreet void bch2_btree_open_bucket_put(struct bch_fs *, struct btree *); 1361c6fdbd8SKent Overstreet 1371c6fdbd8SKent Overstreet struct btree *__bch2_btree_node_alloc_replacement(struct btree_update *, 1381c6fdbd8SKent Overstreet struct btree *, 1391c6fdbd8SKent Overstreet struct bkey_format); 1401c6fdbd8SKent Overstreet 1411c6fdbd8SKent Overstreet void bch2_btree_update_done(struct btree_update *); 1421c6fdbd8SKent Overstreet struct btree_update * 1431c6fdbd8SKent Overstreet bch2_btree_update_start(struct bch_fs *, enum btree_id, unsigned, 1441c6fdbd8SKent Overstreet unsigned, struct closure *); 1451c6fdbd8SKent Overstreet 1461c6fdbd8SKent Overstreet void bch2_btree_interior_update_will_free_node(struct btree_update *, 1471c6fdbd8SKent Overstreet struct btree *); 1481c6fdbd8SKent Overstreet 1491c6fdbd8SKent Overstreet void bch2_btree_insert_node(struct btree_update *, struct btree *, 1501c6fdbd8SKent Overstreet struct btree_iter *, struct keylist *, 1511c6fdbd8SKent Overstreet unsigned); 1521c6fdbd8SKent Overstreet int bch2_btree_split_leaf(struct bch_fs *, struct btree_iter *, unsigned); 1531c6fdbd8SKent Overstreet 1541c6fdbd8SKent Overstreet void __bch2_foreground_maybe_merge(struct bch_fs *, struct btree_iter *, 1551c6fdbd8SKent Overstreet unsigned, unsigned, enum btree_node_sibling); 1561c6fdbd8SKent Overstreet 1571c6fdbd8SKent Overstreet static inline void bch2_foreground_maybe_merge_sibling(struct bch_fs *c, 1581c6fdbd8SKent Overstreet struct btree_iter *iter, 1591c6fdbd8SKent Overstreet unsigned level, unsigned flags, 1601c6fdbd8SKent Overstreet enum btree_node_sibling sib) 1611c6fdbd8SKent Overstreet { 1621c6fdbd8SKent Overstreet struct btree *b; 1631c6fdbd8SKent Overstreet 1641c6fdbd8SKent Overstreet /* 1651c6fdbd8SKent Overstreet * iterators are inconsistent when they hit end of leaf, until 1661c6fdbd8SKent Overstreet * traversed again 1671c6fdbd8SKent Overstreet * 1681c6fdbd8SKent Overstreet * XXX inconsistent how? 1691c6fdbd8SKent Overstreet */ 1701c6fdbd8SKent Overstreet if (iter->flags & BTREE_ITER_AT_END_OF_LEAF) 1711c6fdbd8SKent Overstreet return; 1721c6fdbd8SKent Overstreet 1731c6fdbd8SKent Overstreet if (iter->uptodate >= BTREE_ITER_NEED_TRAVERSE) 1741c6fdbd8SKent Overstreet return; 1751c6fdbd8SKent Overstreet 1761c6fdbd8SKent Overstreet if (!bch2_btree_node_relock(iter, level)) 1771c6fdbd8SKent Overstreet return; 1781c6fdbd8SKent Overstreet 1791c6fdbd8SKent Overstreet b = iter->l[level].b; 1801c6fdbd8SKent Overstreet if (b->sib_u64s[sib] > c->btree_foreground_merge_threshold) 1811c6fdbd8SKent Overstreet return; 1821c6fdbd8SKent Overstreet 1831c6fdbd8SKent Overstreet __bch2_foreground_maybe_merge(c, iter, level, flags, sib); 1841c6fdbd8SKent Overstreet } 1851c6fdbd8SKent Overstreet 1861c6fdbd8SKent Overstreet static inline void bch2_foreground_maybe_merge(struct bch_fs *c, 1871c6fdbd8SKent Overstreet struct btree_iter *iter, 1881c6fdbd8SKent Overstreet unsigned level, 1891c6fdbd8SKent Overstreet unsigned flags) 1901c6fdbd8SKent Overstreet { 1911c6fdbd8SKent Overstreet bch2_foreground_maybe_merge_sibling(c, iter, level, flags, 1921c6fdbd8SKent Overstreet btree_prev_sib); 1931c6fdbd8SKent Overstreet bch2_foreground_maybe_merge_sibling(c, iter, level, flags, 1941c6fdbd8SKent Overstreet btree_next_sib); 1951c6fdbd8SKent Overstreet } 1961c6fdbd8SKent Overstreet 1971c6fdbd8SKent Overstreet void bch2_btree_set_root_for_read(struct bch_fs *, struct btree *); 1981c6fdbd8SKent Overstreet void bch2_btree_root_alloc(struct bch_fs *, enum btree_id); 1991c6fdbd8SKent Overstreet 2001c6fdbd8SKent Overstreet static inline unsigned btree_update_reserve_required(struct bch_fs *c, 2011c6fdbd8SKent Overstreet struct btree *b) 2021c6fdbd8SKent Overstreet { 2031c6fdbd8SKent Overstreet unsigned depth = btree_node_root(c, b)->level + 1; 2041c6fdbd8SKent Overstreet 2051c6fdbd8SKent Overstreet /* 2061c6fdbd8SKent Overstreet * Number of nodes we might have to allocate in a worst case btree 2071c6fdbd8SKent Overstreet * split operation - we split all the way up to the root, then allocate 2081c6fdbd8SKent Overstreet * a new root, unless we're already at max depth: 2091c6fdbd8SKent Overstreet */ 2101c6fdbd8SKent Overstreet if (depth < BTREE_MAX_DEPTH) 2111c6fdbd8SKent Overstreet return (depth - b->level) * 2 + 1; 2121c6fdbd8SKent Overstreet else 2131c6fdbd8SKent Overstreet return (depth - b->level) * 2 - 1; 2141c6fdbd8SKent Overstreet } 2151c6fdbd8SKent Overstreet 2161c6fdbd8SKent Overstreet static inline void btree_node_reset_sib_u64s(struct btree *b) 2171c6fdbd8SKent Overstreet { 2181c6fdbd8SKent Overstreet b->sib_u64s[0] = b->nr.live_u64s; 2191c6fdbd8SKent Overstreet b->sib_u64s[1] = b->nr.live_u64s; 2201c6fdbd8SKent Overstreet } 2211c6fdbd8SKent Overstreet 2221c6fdbd8SKent Overstreet static inline void *btree_data_end(struct bch_fs *c, struct btree *b) 2231c6fdbd8SKent Overstreet { 2241c6fdbd8SKent Overstreet return (void *) b->data + btree_bytes(c); 2251c6fdbd8SKent Overstreet } 2261c6fdbd8SKent Overstreet 2271c6fdbd8SKent Overstreet static inline struct bkey_packed *unwritten_whiteouts_start(struct bch_fs *c, 2281c6fdbd8SKent Overstreet struct btree *b) 2291c6fdbd8SKent Overstreet { 2301c6fdbd8SKent Overstreet return (void *) ((u64 *) btree_data_end(c, b) - b->whiteout_u64s); 2311c6fdbd8SKent Overstreet } 2321c6fdbd8SKent Overstreet 2331c6fdbd8SKent Overstreet static inline struct bkey_packed *unwritten_whiteouts_end(struct bch_fs *c, 2341c6fdbd8SKent Overstreet struct btree *b) 2351c6fdbd8SKent Overstreet { 2361c6fdbd8SKent Overstreet return btree_data_end(c, b); 2371c6fdbd8SKent Overstreet } 2381c6fdbd8SKent Overstreet 2391c6fdbd8SKent Overstreet static inline void *write_block(struct btree *b) 2401c6fdbd8SKent Overstreet { 2411c6fdbd8SKent Overstreet return (void *) b->data + (b->written << 9); 2421c6fdbd8SKent Overstreet } 2431c6fdbd8SKent Overstreet 244*1fe08f31SKent Overstreet static inline bool __btree_addr_written(struct btree *b, void *p) 2451c6fdbd8SKent Overstreet { 246*1fe08f31SKent Overstreet return p < write_block(b); 2471c6fdbd8SKent Overstreet } 2481c6fdbd8SKent Overstreet 249*1fe08f31SKent Overstreet static inline bool bset_written(struct btree *b, struct bset *i) 2501c6fdbd8SKent Overstreet { 251*1fe08f31SKent Overstreet return __btree_addr_written(b, i); 252*1fe08f31SKent Overstreet } 253*1fe08f31SKent Overstreet 254*1fe08f31SKent Overstreet static inline bool bkey_written(struct btree *b, struct bkey_packed *k) 255*1fe08f31SKent Overstreet { 256*1fe08f31SKent Overstreet return __btree_addr_written(b, k); 2571c6fdbd8SKent Overstreet } 2581c6fdbd8SKent Overstreet 2591c6fdbd8SKent Overstreet static inline ssize_t __bch_btree_u64s_remaining(struct bch_fs *c, 2601c6fdbd8SKent Overstreet struct btree *b, 2611c6fdbd8SKent Overstreet void *end) 2621c6fdbd8SKent Overstreet { 2631c6fdbd8SKent Overstreet ssize_t used = bset_byte_offset(b, end) / sizeof(u64) + 2641c6fdbd8SKent Overstreet b->whiteout_u64s + 2651c6fdbd8SKent Overstreet b->uncompacted_whiteout_u64s; 2661c6fdbd8SKent Overstreet ssize_t total = c->opts.btree_node_size << 6; 2671c6fdbd8SKent Overstreet 2681c6fdbd8SKent Overstreet return total - used; 2691c6fdbd8SKent Overstreet } 2701c6fdbd8SKent Overstreet 2711c6fdbd8SKent Overstreet static inline size_t bch_btree_keys_u64s_remaining(struct bch_fs *c, 2721c6fdbd8SKent Overstreet struct btree *b) 2731c6fdbd8SKent Overstreet { 2741c6fdbd8SKent Overstreet ssize_t remaining = __bch_btree_u64s_remaining(c, b, 2751c6fdbd8SKent Overstreet btree_bkey_last(b, bset_tree_last(b))); 2761c6fdbd8SKent Overstreet 2771c6fdbd8SKent Overstreet BUG_ON(remaining < 0); 2781c6fdbd8SKent Overstreet 2791c6fdbd8SKent Overstreet if (bset_written(b, btree_bset_last(b))) 2801c6fdbd8SKent Overstreet return 0; 2811c6fdbd8SKent Overstreet 2821c6fdbd8SKent Overstreet return remaining; 2831c6fdbd8SKent Overstreet } 2841c6fdbd8SKent Overstreet 2851c6fdbd8SKent Overstreet static inline unsigned btree_write_set_buffer(struct btree *b) 2861c6fdbd8SKent Overstreet { 2871c6fdbd8SKent Overstreet /* 2881c6fdbd8SKent Overstreet * Could buffer up larger amounts of keys for btrees with larger keys, 2891c6fdbd8SKent Overstreet * pending benchmarking: 2901c6fdbd8SKent Overstreet */ 2911c6fdbd8SKent Overstreet return 4 << 10; 2921c6fdbd8SKent Overstreet } 2931c6fdbd8SKent Overstreet 2941c6fdbd8SKent Overstreet static inline struct btree_node_entry *want_new_bset(struct bch_fs *c, 2951c6fdbd8SKent Overstreet struct btree *b) 2961c6fdbd8SKent Overstreet { 2971c6fdbd8SKent Overstreet struct bset *i = btree_bset_last(b); 2981c6fdbd8SKent Overstreet struct btree_node_entry *bne = max(write_block(b), 2991c6fdbd8SKent Overstreet (void *) btree_bkey_last(b, bset_tree_last(b))); 3001c6fdbd8SKent Overstreet ssize_t remaining_space = 3011c6fdbd8SKent Overstreet __bch_btree_u64s_remaining(c, b, &bne->keys.start[0]); 3021c6fdbd8SKent Overstreet 3031c6fdbd8SKent Overstreet if (unlikely(bset_written(b, i))) { 3041c6fdbd8SKent Overstreet if (remaining_space > (ssize_t) (block_bytes(c) >> 3)) 3051c6fdbd8SKent Overstreet return bne; 3061c6fdbd8SKent Overstreet } else { 3071c6fdbd8SKent Overstreet if (unlikely(vstruct_bytes(i) > btree_write_set_buffer(b)) && 3081c6fdbd8SKent Overstreet remaining_space > (ssize_t) (btree_write_set_buffer(b) >> 3)) 3091c6fdbd8SKent Overstreet return bne; 3101c6fdbd8SKent Overstreet } 3111c6fdbd8SKent Overstreet 3121c6fdbd8SKent Overstreet return NULL; 3131c6fdbd8SKent Overstreet } 3141c6fdbd8SKent Overstreet 315*1fe08f31SKent Overstreet static inline void unreserve_whiteout(struct btree *b, struct bkey_packed *k) 3161c6fdbd8SKent Overstreet { 317*1fe08f31SKent Overstreet if (bkey_written(b, k)) { 3181c6fdbd8SKent Overstreet EBUG_ON(b->uncompacted_whiteout_u64s < 3191c6fdbd8SKent Overstreet bkeyp_key_u64s(&b->format, k)); 3201c6fdbd8SKent Overstreet b->uncompacted_whiteout_u64s -= 3211c6fdbd8SKent Overstreet bkeyp_key_u64s(&b->format, k); 3221c6fdbd8SKent Overstreet } 3231c6fdbd8SKent Overstreet } 3241c6fdbd8SKent Overstreet 325*1fe08f31SKent Overstreet static inline void reserve_whiteout(struct btree *b, struct bkey_packed *k) 3261c6fdbd8SKent Overstreet { 327*1fe08f31SKent Overstreet if (bkey_written(b, k)) { 3281c6fdbd8SKent Overstreet BUG_ON(!k->needs_whiteout); 3291c6fdbd8SKent Overstreet b->uncompacted_whiteout_u64s += 3301c6fdbd8SKent Overstreet bkeyp_key_u64s(&b->format, k); 3311c6fdbd8SKent Overstreet } 3321c6fdbd8SKent Overstreet } 3331c6fdbd8SKent Overstreet 3341c6fdbd8SKent Overstreet /* 3351c6fdbd8SKent Overstreet * write lock must be held on @b (else the dirty bset that we were going to 3361c6fdbd8SKent Overstreet * insert into could be written out from under us) 3371c6fdbd8SKent Overstreet */ 3381c6fdbd8SKent Overstreet static inline bool bch2_btree_node_insert_fits(struct bch_fs *c, 3391c6fdbd8SKent Overstreet struct btree *b, unsigned u64s) 3401c6fdbd8SKent Overstreet { 3411c6fdbd8SKent Overstreet if (unlikely(btree_node_fake(b))) 3421c6fdbd8SKent Overstreet return false; 3431c6fdbd8SKent Overstreet 3441c6fdbd8SKent Overstreet if (btree_node_is_extents(b)) { 3451c6fdbd8SKent Overstreet /* The insert key might split an existing key 3461c6fdbd8SKent Overstreet * (bch2_insert_fixup_extent() -> BCH_EXTENT_OVERLAP_MIDDLE case: 3471c6fdbd8SKent Overstreet */ 3481c6fdbd8SKent Overstreet u64s += BKEY_EXTENT_U64s_MAX; 3491c6fdbd8SKent Overstreet } 3501c6fdbd8SKent Overstreet 3511c6fdbd8SKent Overstreet return u64s <= bch_btree_keys_u64s_remaining(c, b); 3521c6fdbd8SKent Overstreet } 3531c6fdbd8SKent Overstreet 3541c6fdbd8SKent Overstreet static inline bool journal_res_insert_fits(struct btree_insert *trans, 3551c6fdbd8SKent Overstreet struct btree_insert_entry *insert) 3561c6fdbd8SKent Overstreet { 3571c6fdbd8SKent Overstreet unsigned u64s = 0; 3581c6fdbd8SKent Overstreet struct btree_insert_entry *i; 3591c6fdbd8SKent Overstreet 3601c6fdbd8SKent Overstreet /* 3611c6fdbd8SKent Overstreet * If we didn't get a journal reservation, we're in journal replay and 3621c6fdbd8SKent Overstreet * we're not journalling updates: 3631c6fdbd8SKent Overstreet */ 3641c6fdbd8SKent Overstreet if (!trans->journal_res.ref) 3651c6fdbd8SKent Overstreet return true; 3661c6fdbd8SKent Overstreet 3671c6fdbd8SKent Overstreet for (i = insert; i < trans->entries + trans->nr; i++) 3681c6fdbd8SKent Overstreet u64s += jset_u64s(i->k->k.u64s + i->extra_res); 3691c6fdbd8SKent Overstreet 3701c6fdbd8SKent Overstreet return u64s <= trans->journal_res.u64s; 3711c6fdbd8SKent Overstreet } 3721c6fdbd8SKent Overstreet 3731c6fdbd8SKent Overstreet ssize_t bch2_btree_updates_print(struct bch_fs *, char *); 3741c6fdbd8SKent Overstreet 3751c6fdbd8SKent Overstreet size_t bch2_btree_interior_updates_nr_pending(struct bch_fs *); 3761c6fdbd8SKent Overstreet 3771c6fdbd8SKent Overstreet #endif /* _BCACHEFS_BTREE_UPDATE_INTERIOR_H */ 378