1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _BCACHEFS_EC_H 3 #define _BCACHEFS_EC_H 4 5 #include "ec_types.h" 6 #include "buckets_types.h" 7 #include "extents_types.h" 8 9 enum bkey_invalid_flags; 10 11 int bch2_stripe_invalid(struct bch_fs *, struct bkey_s_c, 12 enum bkey_invalid_flags, struct printbuf *); 13 void bch2_stripe_to_text(struct printbuf *, struct bch_fs *, 14 struct bkey_s_c); 15 int bch2_trigger_stripe(struct btree_trans *, enum btree_id, unsigned, 16 struct bkey_s_c, struct bkey_s, unsigned); 17 18 #define bch2_bkey_ops_stripe ((struct bkey_ops) { \ 19 .key_invalid = bch2_stripe_invalid, \ 20 .val_to_text = bch2_stripe_to_text, \ 21 .swab = bch2_ptr_swab, \ 22 .trigger = bch2_trigger_stripe, \ 23 .min_val_size = 8, \ 24 }) 25 26 static inline unsigned stripe_csums_per_device(const struct bch_stripe *s) 27 { 28 return DIV_ROUND_UP(le16_to_cpu(s->sectors), 29 1 << s->csum_granularity_bits); 30 } 31 32 static inline unsigned stripe_csum_offset(const struct bch_stripe *s, 33 unsigned dev, unsigned csum_idx) 34 { 35 EBUG_ON(s->csum_type >= BCH_CSUM_NR); 36 37 unsigned csum_bytes = bch_crc_bytes[s->csum_type]; 38 39 return sizeof(struct bch_stripe) + 40 sizeof(struct bch_extent_ptr) * s->nr_blocks + 41 (dev * stripe_csums_per_device(s) + csum_idx) * csum_bytes; 42 } 43 44 static inline unsigned stripe_blockcount_offset(const struct bch_stripe *s, 45 unsigned idx) 46 { 47 return stripe_csum_offset(s, s->nr_blocks, 0) + 48 sizeof(u16) * idx; 49 } 50 51 static inline unsigned stripe_blockcount_get(const struct bch_stripe *s, 52 unsigned idx) 53 { 54 return le16_to_cpup((void *) s + stripe_blockcount_offset(s, idx)); 55 } 56 57 static inline void stripe_blockcount_set(struct bch_stripe *s, 58 unsigned idx, unsigned v) 59 { 60 __le16 *p = (void *) s + stripe_blockcount_offset(s, idx); 61 62 *p = cpu_to_le16(v); 63 } 64 65 static inline unsigned stripe_val_u64s(const struct bch_stripe *s) 66 { 67 return DIV_ROUND_UP(stripe_blockcount_offset(s, s->nr_blocks), 68 sizeof(u64)); 69 } 70 71 static inline void *stripe_csum(struct bch_stripe *s, 72 unsigned block, unsigned csum_idx) 73 { 74 EBUG_ON(block >= s->nr_blocks); 75 EBUG_ON(csum_idx >= stripe_csums_per_device(s)); 76 77 return (void *) s + stripe_csum_offset(s, block, csum_idx); 78 } 79 80 static inline struct bch_csum stripe_csum_get(struct bch_stripe *s, 81 unsigned block, unsigned csum_idx) 82 { 83 struct bch_csum csum = { 0 }; 84 85 memcpy(&csum, stripe_csum(s, block, csum_idx), bch_crc_bytes[s->csum_type]); 86 return csum; 87 } 88 89 static inline void stripe_csum_set(struct bch_stripe *s, 90 unsigned block, unsigned csum_idx, 91 struct bch_csum csum) 92 { 93 memcpy(stripe_csum(s, block, csum_idx), &csum, bch_crc_bytes[s->csum_type]); 94 } 95 96 static inline bool __bch2_ptr_matches_stripe(const struct bch_extent_ptr *stripe_ptr, 97 const struct bch_extent_ptr *data_ptr, 98 unsigned sectors) 99 { 100 return data_ptr->dev == stripe_ptr->dev && 101 data_ptr->gen == stripe_ptr->gen && 102 data_ptr->offset >= stripe_ptr->offset && 103 data_ptr->offset < stripe_ptr->offset + sectors; 104 } 105 106 static inline bool bch2_ptr_matches_stripe(const struct bch_stripe *s, 107 struct extent_ptr_decoded p) 108 { 109 unsigned nr_data = s->nr_blocks - s->nr_redundant; 110 111 BUG_ON(!p.has_ec); 112 113 if (p.ec.block >= nr_data) 114 return false; 115 116 return __bch2_ptr_matches_stripe(&s->ptrs[p.ec.block], &p.ptr, 117 le16_to_cpu(s->sectors)); 118 } 119 120 static inline bool bch2_ptr_matches_stripe_m(const struct gc_stripe *m, 121 struct extent_ptr_decoded p) 122 { 123 unsigned nr_data = m->nr_blocks - m->nr_redundant; 124 125 BUG_ON(!p.has_ec); 126 127 if (p.ec.block >= nr_data) 128 return false; 129 130 return __bch2_ptr_matches_stripe(&m->ptrs[p.ec.block], &p.ptr, 131 m->sectors); 132 } 133 134 struct bch_read_bio; 135 136 struct ec_stripe_buf { 137 /* might not be buffering the entire stripe: */ 138 unsigned offset; 139 unsigned size; 140 unsigned long valid[BITS_TO_LONGS(BCH_BKEY_PTRS_MAX)]; 141 142 void *data[BCH_BKEY_PTRS_MAX]; 143 144 __BKEY_PADDED(key, 255); 145 }; 146 147 struct ec_stripe_head; 148 149 enum ec_stripe_ref { 150 STRIPE_REF_io, 151 STRIPE_REF_stripe, 152 STRIPE_REF_NR 153 }; 154 155 struct ec_stripe_new { 156 struct bch_fs *c; 157 struct ec_stripe_head *h; 158 struct mutex lock; 159 struct list_head list; 160 161 struct hlist_node hash; 162 u64 idx; 163 164 struct closure iodone; 165 166 atomic_t ref[STRIPE_REF_NR]; 167 168 int err; 169 170 u8 nr_data; 171 u8 nr_parity; 172 bool allocated; 173 bool pending; 174 bool have_existing_stripe; 175 176 unsigned long blocks_gotten[BITS_TO_LONGS(BCH_BKEY_PTRS_MAX)]; 177 unsigned long blocks_allocated[BITS_TO_LONGS(BCH_BKEY_PTRS_MAX)]; 178 open_bucket_idx_t blocks[BCH_BKEY_PTRS_MAX]; 179 struct disk_reservation res; 180 181 struct ec_stripe_buf new_stripe; 182 struct ec_stripe_buf existing_stripe; 183 }; 184 185 struct ec_stripe_head { 186 struct list_head list; 187 struct mutex lock; 188 189 unsigned target; 190 unsigned algo; 191 unsigned redundancy; 192 enum bch_watermark watermark; 193 194 struct bch_devs_mask devs; 195 unsigned nr_active_devs; 196 197 unsigned blocksize; 198 199 struct dev_stripe_state block_stripe; 200 struct dev_stripe_state parity_stripe; 201 202 struct ec_stripe_new *s; 203 }; 204 205 int bch2_ec_read_extent(struct btree_trans *, struct bch_read_bio *); 206 207 void *bch2_writepoint_ec_buf(struct bch_fs *, struct write_point *); 208 209 void bch2_ec_bucket_cancel(struct bch_fs *, struct open_bucket *); 210 211 int bch2_ec_stripe_new_alloc(struct bch_fs *, struct ec_stripe_head *); 212 213 void bch2_ec_stripe_head_put(struct bch_fs *, struct ec_stripe_head *); 214 struct ec_stripe_head *bch2_ec_stripe_head_get(struct btree_trans *, 215 unsigned, unsigned, unsigned, 216 enum bch_watermark, struct closure *); 217 218 void bch2_stripes_heap_update(struct bch_fs *, struct stripe *, size_t); 219 void bch2_stripes_heap_del(struct bch_fs *, struct stripe *, size_t); 220 void bch2_stripes_heap_insert(struct bch_fs *, struct stripe *, size_t); 221 222 void bch2_do_stripe_deletes(struct bch_fs *); 223 void bch2_ec_do_stripe_creates(struct bch_fs *); 224 void bch2_ec_stripe_new_free(struct bch_fs *, struct ec_stripe_new *); 225 226 static inline void ec_stripe_new_get(struct ec_stripe_new *s, 227 enum ec_stripe_ref ref) 228 { 229 atomic_inc(&s->ref[ref]); 230 } 231 232 static inline void ec_stripe_new_put(struct bch_fs *c, struct ec_stripe_new *s, 233 enum ec_stripe_ref ref) 234 { 235 BUG_ON(atomic_read(&s->ref[ref]) <= 0); 236 237 if (atomic_dec_and_test(&s->ref[ref])) 238 switch (ref) { 239 case STRIPE_REF_stripe: 240 bch2_ec_stripe_new_free(c, s); 241 break; 242 case STRIPE_REF_io: 243 bch2_ec_do_stripe_creates(c); 244 break; 245 default: 246 BUG(); 247 } 248 } 249 250 void bch2_ec_stop_dev(struct bch_fs *, struct bch_dev *); 251 void bch2_fs_ec_stop(struct bch_fs *); 252 void bch2_fs_ec_flush(struct bch_fs *); 253 254 int bch2_stripes_read(struct bch_fs *); 255 256 void bch2_stripes_heap_to_text(struct printbuf *, struct bch_fs *); 257 void bch2_new_stripes_to_text(struct printbuf *, struct bch_fs *); 258 259 void bch2_fs_ec_exit(struct bch_fs *); 260 void bch2_fs_ec_init_early(struct bch_fs *); 261 int bch2_fs_ec_init(struct bch_fs *); 262 263 #endif /* _BCACHEFS_EC_H */ 264