1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _BCACHEFS_ALLOC_BACKGROUND_H 3 #define _BCACHEFS_ALLOC_BACKGROUND_H 4 5 #include "bcachefs.h" 6 #include "alloc_types.h" 7 #include "buckets.h" 8 #include "debug.h" 9 #include "super.h" 10 11 /* How out of date a pointer gen is allowed to be: */ 12 #define BUCKET_GC_GEN_MAX 96U 13 14 static inline bool bch2_dev_bucket_exists(struct bch_fs *c, struct bpos pos) 15 { 16 struct bch_dev *ca; 17 18 if (!bch2_dev_exists2(c, pos.inode)) 19 return false; 20 21 ca = bch_dev_bkey_exists(c, pos.inode); 22 return pos.offset >= ca->mi.first_bucket && 23 pos.offset < ca->mi.nbuckets; 24 } 25 26 static inline u8 alloc_gc_gen(struct bch_alloc_v4 a) 27 { 28 return a.gen - a.oldest_gen; 29 } 30 31 static inline enum bch_data_type __alloc_data_type(u32 dirty_sectors, 32 u32 cached_sectors, 33 u32 stripe, 34 struct bch_alloc_v4 a, 35 enum bch_data_type data_type) 36 { 37 if (dirty_sectors) 38 return data_type; 39 if (stripe) 40 return BCH_DATA_stripe; 41 if (cached_sectors) 42 return BCH_DATA_cached; 43 if (BCH_ALLOC_V4_NEED_DISCARD(&a)) 44 return BCH_DATA_need_discard; 45 if (alloc_gc_gen(a) >= BUCKET_GC_GEN_MAX) 46 return BCH_DATA_need_gc_gens; 47 return BCH_DATA_free; 48 } 49 50 static inline enum bch_data_type alloc_data_type(struct bch_alloc_v4 a, 51 enum bch_data_type data_type) 52 { 53 return __alloc_data_type(a.dirty_sectors, a.cached_sectors, 54 a.stripe, a, data_type); 55 } 56 57 static inline u64 alloc_lru_idx(struct bch_alloc_v4 a) 58 { 59 return a.data_type == BCH_DATA_cached ? a.io_time[READ] : 0; 60 } 61 62 static inline u64 alloc_freespace_genbits(struct bch_alloc_v4 a) 63 { 64 return ((u64) alloc_gc_gen(a) >> 4) << 56; 65 } 66 67 static inline struct bpos alloc_freespace_pos(struct bpos pos, struct bch_alloc_v4 a) 68 { 69 pos.offset |= alloc_freespace_genbits(a); 70 return pos; 71 } 72 73 struct bkey_i_alloc_v4 * 74 bch2_trans_start_alloc_update(struct btree_trans *, struct btree_iter *, struct bpos); 75 76 void bch2_alloc_to_v4(struct bkey_s_c, struct bch_alloc_v4 *); 77 struct bkey_i_alloc_v4 *bch2_alloc_to_v4_mut(struct btree_trans *, struct bkey_s_c); 78 79 int bch2_bucket_io_time_reset(struct btree_trans *, unsigned, size_t, int); 80 81 #define ALLOC_SCAN_BATCH(ca) max_t(size_t, 1, (ca)->mi.nbuckets >> 9) 82 83 int bch2_alloc_v1_invalid(const struct bch_fs *, struct bkey_s_c, int, struct printbuf *); 84 int bch2_alloc_v2_invalid(const struct bch_fs *, struct bkey_s_c, int, struct printbuf *); 85 int bch2_alloc_v3_invalid(const struct bch_fs *, struct bkey_s_c, int, struct printbuf *); 86 int bch2_alloc_v4_invalid(const struct bch_fs *, struct bkey_s_c, int, struct printbuf *); 87 void bch2_alloc_v4_swab(struct bkey_s); 88 void bch2_alloc_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c); 89 90 #define bch2_bkey_ops_alloc ((struct bkey_ops) { \ 91 .key_invalid = bch2_alloc_v1_invalid, \ 92 .val_to_text = bch2_alloc_to_text, \ 93 .trans_trigger = bch2_trans_mark_alloc, \ 94 .atomic_trigger = bch2_mark_alloc, \ 95 }) 96 97 #define bch2_bkey_ops_alloc_v2 ((struct bkey_ops) { \ 98 .key_invalid = bch2_alloc_v2_invalid, \ 99 .val_to_text = bch2_alloc_to_text, \ 100 .trans_trigger = bch2_trans_mark_alloc, \ 101 .atomic_trigger = bch2_mark_alloc, \ 102 }) 103 104 #define bch2_bkey_ops_alloc_v3 ((struct bkey_ops) { \ 105 .key_invalid = bch2_alloc_v3_invalid, \ 106 .val_to_text = bch2_alloc_to_text, \ 107 .trans_trigger = bch2_trans_mark_alloc, \ 108 .atomic_trigger = bch2_mark_alloc, \ 109 }) 110 111 #define bch2_bkey_ops_alloc_v4 ((struct bkey_ops) { \ 112 .key_invalid = bch2_alloc_v4_invalid, \ 113 .val_to_text = bch2_alloc_to_text, \ 114 .swab = bch2_alloc_v4_swab, \ 115 .trans_trigger = bch2_trans_mark_alloc, \ 116 .atomic_trigger = bch2_mark_alloc, \ 117 }) 118 119 static inline bool bkey_is_alloc(const struct bkey *k) 120 { 121 return k->type == KEY_TYPE_alloc || 122 k->type == KEY_TYPE_alloc_v2 || 123 k->type == KEY_TYPE_alloc_v3; 124 } 125 126 int bch2_alloc_read(struct bch_fs *); 127 128 int bch2_trans_mark_alloc(struct btree_trans *, enum btree_id, unsigned, 129 struct bkey_s_c, struct bkey_i *, unsigned); 130 int bch2_check_alloc_info(struct bch_fs *); 131 int bch2_check_alloc_to_lru_refs(struct bch_fs *); 132 void bch2_do_discards(struct bch_fs *); 133 134 static inline u64 should_invalidate_buckets(struct bch_dev *ca, 135 struct bch_dev_usage u) 136 { 137 u64 want_free = ca->mi.nbuckets >> 7; 138 u64 free = max_t(s64, 0, 139 u.d[BCH_DATA_free].buckets 140 + u.d[BCH_DATA_need_discard].buckets 141 - bch2_dev_buckets_reserved(ca, RESERVE_none)); 142 143 return clamp_t(s64, want_free - free, 0, u.d[BCH_DATA_cached].buckets); 144 } 145 146 void bch2_do_invalidates(struct bch_fs *); 147 148 int bch2_fs_freespace_init(struct bch_fs *); 149 150 void bch2_recalc_capacity(struct bch_fs *); 151 152 void bch2_dev_allocator_remove(struct bch_fs *, struct bch_dev *); 153 void bch2_dev_allocator_add(struct bch_fs *, struct bch_dev *); 154 155 void bch2_fs_allocator_background_init(struct bch_fs *); 156 157 #endif /* _BCACHEFS_ALLOC_BACKGROUND_H */ 158