1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _BCACHEFS_BTREE_UPDATE_H 3 #define _BCACHEFS_BTREE_UPDATE_H 4 5 #include "btree_iter.h" 6 #include "journal.h" 7 8 struct bch_fs; 9 struct btree; 10 11 void bch2_btree_node_prep_for_write(struct btree_trans *, 12 struct btree_path *, struct btree *); 13 bool bch2_btree_bset_insert_key(struct btree_trans *, struct btree_path *, 14 struct btree *, struct btree_node_iter *, 15 struct bkey_i *); 16 17 int bch2_btree_node_flush0(struct journal *, struct journal_entry_pin *, u64); 18 int bch2_btree_node_flush1(struct journal *, struct journal_entry_pin *, u64); 19 void bch2_btree_add_journal_pin(struct bch_fs *, struct btree *, u64); 20 21 void bch2_btree_insert_key_leaf(struct btree_trans *, struct btree_path *, 22 struct bkey_i *, u64); 23 24 enum btree_insert_flags { 25 /* First bits for bch_watermark: */ 26 __BTREE_INSERT_NOFAIL = BCH_WATERMARK_BITS, 27 __BTREE_INSERT_NOCHECK_RW, 28 __BTREE_INSERT_LAZY_RW, 29 __BTREE_INSERT_JOURNAL_REPLAY, 30 __BTREE_INSERT_JOURNAL_RECLAIM, 31 __BTREE_INSERT_NOWAIT, 32 __BTREE_INSERT_GC_LOCK_HELD, 33 __BCH_HASH_SET_MUST_CREATE, 34 __BCH_HASH_SET_MUST_REPLACE, 35 }; 36 37 /* Don't check for -ENOSPC: */ 38 #define BTREE_INSERT_NOFAIL BIT(__BTREE_INSERT_NOFAIL) 39 40 #define BTREE_INSERT_NOCHECK_RW BIT(__BTREE_INSERT_NOCHECK_RW) 41 #define BTREE_INSERT_LAZY_RW BIT(__BTREE_INSERT_LAZY_RW) 42 43 /* Insert is for journal replay - don't get journal reservations: */ 44 #define BTREE_INSERT_JOURNAL_REPLAY BIT(__BTREE_INSERT_JOURNAL_REPLAY) 45 46 /* Insert is being called from journal reclaim path: */ 47 #define BTREE_INSERT_JOURNAL_RECLAIM BIT(__BTREE_INSERT_JOURNAL_RECLAIM) 48 49 /* Don't block on allocation failure (for new btree nodes: */ 50 #define BTREE_INSERT_NOWAIT BIT(__BTREE_INSERT_NOWAIT) 51 #define BTREE_INSERT_GC_LOCK_HELD BIT(__BTREE_INSERT_GC_LOCK_HELD) 52 53 #define BCH_HASH_SET_MUST_CREATE BIT(__BCH_HASH_SET_MUST_CREATE) 54 #define BCH_HASH_SET_MUST_REPLACE BIT(__BCH_HASH_SET_MUST_REPLACE) 55 56 int bch2_btree_delete_extent_at(struct btree_trans *, struct btree_iter *, 57 unsigned, unsigned); 58 int bch2_btree_delete_at(struct btree_trans *, struct btree_iter *, unsigned); 59 int bch2_btree_delete_at_buffered(struct btree_trans *, enum btree_id, struct bpos); 60 int bch2_btree_delete(struct btree_trans *, enum btree_id, struct bpos, unsigned); 61 62 int bch2_btree_insert_nonextent(struct btree_trans *, enum btree_id, 63 struct bkey_i *, enum btree_update_flags); 64 65 int bch2_btree_insert_trans(struct btree_trans *, enum btree_id, struct bkey_i *, 66 enum btree_update_flags); 67 int bch2_btree_insert(struct bch_fs *, enum btree_id, struct bkey_i *, 68 struct disk_reservation *, int flags); 69 70 int bch2_btree_delete_range_trans(struct btree_trans *, enum btree_id, 71 struct bpos, struct bpos, unsigned, u64 *); 72 int bch2_btree_delete_range(struct bch_fs *, enum btree_id, 73 struct bpos, struct bpos, unsigned, u64 *); 74 75 int bch2_btree_bit_mod(struct btree_trans *, enum btree_id, struct bpos, bool); 76 77 int __bch2_insert_snapshot_whiteouts(struct btree_trans *, enum btree_id, 78 struct bpos, struct bpos); 79 80 /* 81 * For use when splitting extents in existing snapshots: 82 * 83 * If @old_pos is an interior snapshot node, iterate over descendent snapshot 84 * nodes: for every descendent snapshot in whiche @old_pos is overwritten and 85 * not visible, emit a whiteout at @new_pos. 86 */ 87 static inline int bch2_insert_snapshot_whiteouts(struct btree_trans *trans, 88 enum btree_id btree, 89 struct bpos old_pos, 90 struct bpos new_pos) 91 { 92 if (!btree_type_has_snapshots(btree) || 93 bkey_eq(old_pos, new_pos)) 94 return 0; 95 96 return __bch2_insert_snapshot_whiteouts(trans, btree, old_pos, new_pos); 97 } 98 99 int bch2_trans_update_extent_overwrite(struct btree_trans *, struct btree_iter *, 100 enum btree_update_flags, 101 struct bkey_s_c, struct bkey_s_c); 102 103 int bch2_bkey_get_empty_slot(struct btree_trans *, struct btree_iter *, 104 enum btree_id, struct bpos); 105 106 int __must_check bch2_trans_update(struct btree_trans *, struct btree_iter *, 107 struct bkey_i *, enum btree_update_flags); 108 int __must_check bch2_trans_update_seq(struct btree_trans *, u64, struct btree_iter *, 109 struct bkey_i *, enum btree_update_flags); 110 int __must_check bch2_trans_update_buffered(struct btree_trans *, 111 enum btree_id, struct bkey_i *); 112 113 void bch2_trans_commit_hook(struct btree_trans *, 114 struct btree_trans_commit_hook *); 115 int __bch2_trans_commit(struct btree_trans *, unsigned); 116 117 __printf(2, 3) int bch2_fs_log_msg(struct bch_fs *, const char *, ...); 118 __printf(2, 3) int bch2_journal_log_msg(struct bch_fs *, const char *, ...); 119 120 /** 121 * bch2_trans_commit - insert keys at given iterator positions 122 * 123 * This is main entry point for btree updates. 124 * 125 * Return values: 126 * -EROFS: filesystem read only 127 * -EIO: journal or btree node IO error 128 */ 129 static inline int bch2_trans_commit(struct btree_trans *trans, 130 struct disk_reservation *disk_res, 131 u64 *journal_seq, 132 unsigned flags) 133 { 134 trans->disk_res = disk_res; 135 trans->journal_seq = journal_seq; 136 137 return __bch2_trans_commit(trans, flags); 138 } 139 140 #define commit_do(_trans, _disk_res, _journal_seq, _flags, _do) \ 141 lockrestart_do(_trans, _do ?: bch2_trans_commit(_trans, (_disk_res),\ 142 (_journal_seq), (_flags))) 143 144 #define nested_commit_do(_trans, _disk_res, _journal_seq, _flags, _do) \ 145 nested_lockrestart_do(_trans, _do ?: bch2_trans_commit(_trans, (_disk_res),\ 146 (_journal_seq), (_flags))) 147 148 #define bch2_trans_run(_c, _do) \ 149 ({ \ 150 struct btree_trans *trans = bch2_trans_get(_c); \ 151 int _ret = (_do); \ 152 bch2_trans_put(trans); \ 153 _ret; \ 154 }) 155 156 #define bch2_trans_do(_c, _disk_res, _journal_seq, _flags, _do) \ 157 bch2_trans_run(_c, commit_do(trans, _disk_res, _journal_seq, _flags, _do)) 158 159 #define trans_for_each_update(_trans, _i) \ 160 for ((_i) = (_trans)->updates; \ 161 (_i) < (_trans)->updates + (_trans)->nr_updates; \ 162 (_i)++) 163 164 #define trans_for_each_wb_update(_trans, _i) \ 165 for ((_i) = (_trans)->wb_updates; \ 166 (_i) < (_trans)->wb_updates + (_trans)->nr_wb_updates; \ 167 (_i)++) 168 169 static inline void bch2_trans_reset_updates(struct btree_trans *trans) 170 { 171 struct btree_insert_entry *i; 172 173 trans_for_each_update(trans, i) 174 bch2_path_put(trans, i->path, true); 175 176 trans->extra_journal_res = 0; 177 trans->nr_updates = 0; 178 trans->nr_wb_updates = 0; 179 trans->wb_updates = NULL; 180 trans->hooks = NULL; 181 trans->extra_journal_entries.nr = 0; 182 183 if (trans->fs_usage_deltas) { 184 trans->fs_usage_deltas->used = 0; 185 memset((void *) trans->fs_usage_deltas + 186 offsetof(struct replicas_delta_list, memset_start), 0, 187 (void *) &trans->fs_usage_deltas->memset_end - 188 (void *) &trans->fs_usage_deltas->memset_start); 189 } 190 } 191 192 static inline struct bkey_i *__bch2_bkey_make_mut_noupdate(struct btree_trans *trans, struct bkey_s_c k, 193 unsigned type, unsigned min_bytes) 194 { 195 unsigned bytes = max_t(unsigned, min_bytes, bkey_bytes(k.k)); 196 struct bkey_i *mut; 197 198 if (type && k.k->type != type) 199 return ERR_PTR(-ENOENT); 200 201 mut = bch2_trans_kmalloc_nomemzero(trans, bytes); 202 if (!IS_ERR(mut)) { 203 bkey_reassemble(mut, k); 204 205 if (unlikely(bytes > bkey_bytes(k.k))) { 206 memset((void *) mut + bkey_bytes(k.k), 0, 207 bytes - bkey_bytes(k.k)); 208 mut->k.u64s = DIV_ROUND_UP(bytes, sizeof(u64)); 209 } 210 } 211 return mut; 212 } 213 214 static inline struct bkey_i *bch2_bkey_make_mut_noupdate(struct btree_trans *trans, struct bkey_s_c k) 215 { 216 return __bch2_bkey_make_mut_noupdate(trans, k, 0, 0); 217 } 218 219 #define bch2_bkey_make_mut_noupdate_typed(_trans, _k, _type) \ 220 bkey_i_to_##_type(__bch2_bkey_make_mut_noupdate(_trans, _k, \ 221 KEY_TYPE_##_type, sizeof(struct bkey_i_##_type))) 222 223 static inline struct bkey_i *__bch2_bkey_make_mut(struct btree_trans *trans, struct btree_iter *iter, 224 struct bkey_s_c *k, unsigned flags, 225 unsigned type, unsigned min_bytes) 226 { 227 struct bkey_i *mut = __bch2_bkey_make_mut_noupdate(trans, *k, type, min_bytes); 228 int ret; 229 230 if (IS_ERR(mut)) 231 return mut; 232 233 ret = bch2_trans_update(trans, iter, mut, flags); 234 if (ret) 235 return ERR_PTR(ret); 236 237 *k = bkey_i_to_s_c(mut); 238 return mut; 239 } 240 241 static inline struct bkey_i *bch2_bkey_make_mut(struct btree_trans *trans, struct btree_iter *iter, 242 struct bkey_s_c *k, unsigned flags) 243 { 244 return __bch2_bkey_make_mut(trans, iter, k, flags, 0, 0); 245 } 246 247 #define bch2_bkey_make_mut_typed(_trans, _iter, _k, _flags, _type) \ 248 bkey_i_to_##_type(__bch2_bkey_make_mut(_trans, _iter, _k, _flags,\ 249 KEY_TYPE_##_type, sizeof(struct bkey_i_##_type))) 250 251 static inline struct bkey_i *__bch2_bkey_get_mut_noupdate(struct btree_trans *trans, 252 struct btree_iter *iter, 253 unsigned btree_id, struct bpos pos, 254 unsigned flags, unsigned type, unsigned min_bytes) 255 { 256 struct bkey_s_c k = __bch2_bkey_get_iter(trans, iter, 257 btree_id, pos, flags|BTREE_ITER_INTENT, type); 258 struct bkey_i *ret = IS_ERR(k.k) 259 ? ERR_CAST(k.k) 260 : __bch2_bkey_make_mut_noupdate(trans, k, 0, min_bytes); 261 if (IS_ERR(ret)) 262 bch2_trans_iter_exit(trans, iter); 263 return ret; 264 } 265 266 static inline struct bkey_i *bch2_bkey_get_mut_noupdate(struct btree_trans *trans, 267 struct btree_iter *iter, 268 unsigned btree_id, struct bpos pos, 269 unsigned flags) 270 { 271 return __bch2_bkey_get_mut_noupdate(trans, iter, btree_id, pos, flags, 0, 0); 272 } 273 274 static inline struct bkey_i *__bch2_bkey_get_mut(struct btree_trans *trans, 275 struct btree_iter *iter, 276 unsigned btree_id, struct bpos pos, 277 unsigned flags, unsigned type, unsigned min_bytes) 278 { 279 struct bkey_i *mut = __bch2_bkey_get_mut_noupdate(trans, iter, 280 btree_id, pos, flags|BTREE_ITER_INTENT, type, min_bytes); 281 int ret; 282 283 if (IS_ERR(mut)) 284 return mut; 285 286 ret = bch2_trans_update(trans, iter, mut, flags); 287 if (ret) { 288 bch2_trans_iter_exit(trans, iter); 289 return ERR_PTR(ret); 290 } 291 292 return mut; 293 } 294 295 static inline struct bkey_i *bch2_bkey_get_mut_minsize(struct btree_trans *trans, 296 struct btree_iter *iter, 297 unsigned btree_id, struct bpos pos, 298 unsigned flags, unsigned min_bytes) 299 { 300 return __bch2_bkey_get_mut(trans, iter, btree_id, pos, flags, 0, min_bytes); 301 } 302 303 static inline struct bkey_i *bch2_bkey_get_mut(struct btree_trans *trans, 304 struct btree_iter *iter, 305 unsigned btree_id, struct bpos pos, 306 unsigned flags) 307 { 308 return __bch2_bkey_get_mut(trans, iter, btree_id, pos, flags, 0, 0); 309 } 310 311 #define bch2_bkey_get_mut_typed(_trans, _iter, _btree_id, _pos, _flags, _type)\ 312 bkey_i_to_##_type(__bch2_bkey_get_mut(_trans, _iter, \ 313 _btree_id, _pos, _flags, \ 314 KEY_TYPE_##_type, sizeof(struct bkey_i_##_type))) 315 316 static inline struct bkey_i *__bch2_bkey_alloc(struct btree_trans *trans, struct btree_iter *iter, 317 unsigned flags, unsigned type, unsigned val_size) 318 { 319 struct bkey_i *k = bch2_trans_kmalloc(trans, sizeof(*k) + val_size); 320 int ret; 321 322 if (IS_ERR(k)) 323 return k; 324 325 bkey_init(&k->k); 326 k->k.p = iter->pos; 327 k->k.type = type; 328 set_bkey_val_bytes(&k->k, val_size); 329 330 ret = bch2_trans_update(trans, iter, k, flags); 331 if (unlikely(ret)) 332 return ERR_PTR(ret); 333 return k; 334 } 335 336 #define bch2_bkey_alloc(_trans, _iter, _flags, _type) \ 337 bkey_i_to_##_type(__bch2_bkey_alloc(_trans, _iter, _flags, \ 338 KEY_TYPE_##_type, sizeof(struct bch_##_type))) 339 340 #endif /* _BCACHEFS_BTREE_UPDATE_H */ 341