1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _BCACHEFS_BTREE_WRITE_BUFFER_H 3 #define _BCACHEFS_BTREE_WRITE_BUFFER_H 4 5 #include "bkey.h" 6 #include "disk_accounting.h" 7 8 static inline bool bch2_btree_write_buffer_should_flush(struct bch_fs *c) 9 { 10 struct btree_write_buffer *wb = &c->btree_write_buffer; 11 12 return wb->inc.keys.nr + wb->flushing.keys.nr > wb->inc.keys.size / 4; 13 } 14 15 static inline bool bch2_btree_write_buffer_must_wait(struct bch_fs *c) 16 { 17 struct btree_write_buffer *wb = &c->btree_write_buffer; 18 19 return wb->inc.keys.nr > wb->inc.keys.size * 3 / 4; 20 } 21 22 struct btree_trans; 23 int bch2_btree_write_buffer_flush_sync(struct btree_trans *); 24 bool bch2_btree_write_buffer_flush_going_ro(struct bch_fs *); 25 int bch2_btree_write_buffer_flush_nocheck_rw(struct btree_trans *); 26 int bch2_btree_write_buffer_tryflush(struct btree_trans *); 27 28 struct bkey_buf; 29 int bch2_btree_write_buffer_maybe_flush(struct btree_trans *, struct bkey_s_c, struct bkey_buf *); 30 31 struct journal_keys_to_wb { 32 struct btree_write_buffer_keys *wb; 33 size_t room; 34 u64 seq; 35 }; 36 37 static inline int wb_key_cmp(const void *_l, const void *_r) 38 { 39 const struct btree_write_buffered_key *l = _l; 40 const struct btree_write_buffered_key *r = _r; 41 42 return cmp_int(l->btree, r->btree) ?: bpos_cmp(l->k.k.p, r->k.k.p); 43 } 44 45 int bch2_accounting_key_to_wb_slowpath(struct bch_fs *, 46 enum btree_id, struct bkey_i_accounting *); 47 48 static inline int bch2_accounting_key_to_wb(struct bch_fs *c, 49 enum btree_id btree, struct bkey_i_accounting *k) 50 { 51 struct btree_write_buffer *wb = &c->btree_write_buffer; 52 struct btree_write_buffered_key search; 53 search.btree = btree; 54 search.k.k.p = k->k.p; 55 56 unsigned idx = eytzinger0_find(wb->accounting.data, wb->accounting.nr, 57 sizeof(wb->accounting.data[0]), 58 wb_key_cmp, &search); 59 60 if (idx >= wb->accounting.nr) 61 return bch2_accounting_key_to_wb_slowpath(c, btree, k); 62 63 struct bkey_i_accounting *dst = bkey_i_to_accounting(&wb->accounting.data[idx].k); 64 bch2_accounting_accumulate(dst, accounting_i_to_s_c(k)); 65 return 0; 66 } 67 68 int bch2_journal_key_to_wb_slowpath(struct bch_fs *, 69 struct journal_keys_to_wb *, 70 enum btree_id, struct bkey_i *); 71 72 static inline int __bch2_journal_key_to_wb(struct bch_fs *c, 73 struct journal_keys_to_wb *dst, 74 enum btree_id btree, struct bkey_i *k) 75 { 76 if (unlikely(!dst->room)) 77 return bch2_journal_key_to_wb_slowpath(c, dst, btree, k); 78 79 struct btree_write_buffered_key *wb_k = &darray_top(dst->wb->keys); 80 wb_k->journal_seq = dst->seq; 81 wb_k->btree = btree; 82 bkey_copy(&wb_k->k, k); 83 dst->wb->keys.nr++; 84 dst->room--; 85 return 0; 86 } 87 88 static inline int bch2_journal_key_to_wb(struct bch_fs *c, 89 struct journal_keys_to_wb *dst, 90 enum btree_id btree, struct bkey_i *k) 91 { 92 EBUG_ON(!dst->seq); 93 94 return k->k.type == KEY_TYPE_accounting 95 ? bch2_accounting_key_to_wb(c, btree, bkey_i_to_accounting(k)) 96 : __bch2_journal_key_to_wb(c, dst, btree, k); 97 } 98 99 void bch2_journal_keys_to_write_buffer_start(struct bch_fs *, struct journal_keys_to_wb *, u64); 100 int bch2_journal_keys_to_write_buffer_end(struct bch_fs *, struct journal_keys_to_wb *); 101 102 int bch2_btree_write_buffer_resize(struct bch_fs *, size_t); 103 void bch2_fs_btree_write_buffer_exit(struct bch_fs *); 104 int bch2_fs_btree_write_buffer_init(struct bch_fs *); 105 106 #endif /* _BCACHEFS_BTREE_WRITE_BUFFER_H */ 107