1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _BCACHEFS_SNAPSHOT_H 3 #define _BCACHEFS_SNAPSHOT_H 4 5 enum bch_validate_flags; 6 7 void bch2_snapshot_tree_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c); 8 int bch2_snapshot_tree_invalid(struct bch_fs *, struct bkey_s_c, 9 enum bch_validate_flags, struct printbuf *); 10 11 #define bch2_bkey_ops_snapshot_tree ((struct bkey_ops) { \ 12 .key_invalid = bch2_snapshot_tree_invalid, \ 13 .val_to_text = bch2_snapshot_tree_to_text, \ 14 .min_val_size = 8, \ 15 }) 16 17 struct bkey_i_snapshot_tree *__bch2_snapshot_tree_create(struct btree_trans *); 18 19 int bch2_snapshot_tree_lookup(struct btree_trans *, u32, struct bch_snapshot_tree *); 20 21 void bch2_snapshot_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c); 22 int bch2_snapshot_invalid(struct bch_fs *, struct bkey_s_c, 23 enum bch_validate_flags, struct printbuf *); 24 int bch2_mark_snapshot(struct btree_trans *, enum btree_id, unsigned, 25 struct bkey_s_c, struct bkey_s, 26 enum btree_iter_update_trigger_flags); 27 28 #define bch2_bkey_ops_snapshot ((struct bkey_ops) { \ 29 .key_invalid = bch2_snapshot_invalid, \ 30 .val_to_text = bch2_snapshot_to_text, \ 31 .trigger = bch2_mark_snapshot, \ 32 .min_val_size = 24, \ 33 }) 34 35 static inline struct snapshot_t *__snapshot_t(struct snapshot_table *t, u32 id) 36 { 37 u32 idx = U32_MAX - id; 38 39 return likely(t && idx < t->nr) 40 ? &t->s[idx] 41 : NULL; 42 } 43 44 static inline const struct snapshot_t *snapshot_t(struct bch_fs *c, u32 id) 45 { 46 return __snapshot_t(rcu_dereference(c->snapshots), id); 47 } 48 49 static inline u32 bch2_snapshot_tree(struct bch_fs *c, u32 id) 50 { 51 rcu_read_lock(); 52 const struct snapshot_t *s = snapshot_t(c, id); 53 id = s ? s->tree : 0; 54 rcu_read_unlock(); 55 56 return id; 57 } 58 59 static inline u32 __bch2_snapshot_parent_early(struct bch_fs *c, u32 id) 60 { 61 const struct snapshot_t *s = snapshot_t(c, id); 62 return s ? s->parent : 0; 63 } 64 65 static inline u32 bch2_snapshot_parent_early(struct bch_fs *c, u32 id) 66 { 67 rcu_read_lock(); 68 id = __bch2_snapshot_parent_early(c, id); 69 rcu_read_unlock(); 70 71 return id; 72 } 73 74 static inline u32 __bch2_snapshot_parent(struct bch_fs *c, u32 id) 75 { 76 const struct snapshot_t *s = snapshot_t(c, id); 77 if (!s) 78 return 0; 79 80 u32 parent = s->parent; 81 if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG) && 82 parent && 83 s->depth != snapshot_t(c, parent)->depth + 1) 84 panic("id %u depth=%u parent %u depth=%u\n", 85 id, snapshot_t(c, id)->depth, 86 parent, snapshot_t(c, parent)->depth); 87 88 return parent; 89 } 90 91 static inline u32 bch2_snapshot_parent(struct bch_fs *c, u32 id) 92 { 93 rcu_read_lock(); 94 id = __bch2_snapshot_parent(c, id); 95 rcu_read_unlock(); 96 97 return id; 98 } 99 100 static inline u32 bch2_snapshot_nth_parent(struct bch_fs *c, u32 id, u32 n) 101 { 102 rcu_read_lock(); 103 while (n--) 104 id = __bch2_snapshot_parent(c, id); 105 rcu_read_unlock(); 106 107 return id; 108 } 109 110 u32 bch2_snapshot_skiplist_get(struct bch_fs *, u32); 111 112 static inline u32 bch2_snapshot_root(struct bch_fs *c, u32 id) 113 { 114 u32 parent; 115 116 rcu_read_lock(); 117 while ((parent = __bch2_snapshot_parent(c, id))) 118 id = parent; 119 rcu_read_unlock(); 120 121 return id; 122 } 123 124 static inline u32 __bch2_snapshot_equiv(struct bch_fs *c, u32 id) 125 { 126 const struct snapshot_t *s = snapshot_t(c, id); 127 return s ? s->equiv : 0; 128 } 129 130 static inline u32 bch2_snapshot_equiv(struct bch_fs *c, u32 id) 131 { 132 rcu_read_lock(); 133 id = __bch2_snapshot_equiv(c, id); 134 rcu_read_unlock(); 135 136 return id; 137 } 138 139 static inline int bch2_snapshot_is_internal_node(struct bch_fs *c, u32 id) 140 { 141 rcu_read_lock(); 142 const struct snapshot_t *s = snapshot_t(c, id); 143 int ret = s ? s->children[0] : -BCH_ERR_invalid_snapshot_node; 144 rcu_read_unlock(); 145 146 return ret; 147 } 148 149 static inline int bch2_snapshot_is_leaf(struct bch_fs *c, u32 id) 150 { 151 int ret = bch2_snapshot_is_internal_node(c, id); 152 if (ret < 0) 153 return ret; 154 return !ret; 155 } 156 157 static inline u32 bch2_snapshot_depth(struct bch_fs *c, u32 parent) 158 { 159 u32 depth; 160 161 rcu_read_lock(); 162 depth = parent ? snapshot_t(c, parent)->depth + 1 : 0; 163 rcu_read_unlock(); 164 165 return depth; 166 } 167 168 bool __bch2_snapshot_is_ancestor(struct bch_fs *, u32, u32); 169 170 static inline bool bch2_snapshot_is_ancestor(struct bch_fs *c, u32 id, u32 ancestor) 171 { 172 return id == ancestor 173 ? true 174 : __bch2_snapshot_is_ancestor(c, id, ancestor); 175 } 176 177 static inline bool bch2_snapshot_has_children(struct bch_fs *c, u32 id) 178 { 179 rcu_read_lock(); 180 const struct snapshot_t *t = snapshot_t(c, id); 181 bool ret = t && (t->children[0]|t->children[1]) != 0; 182 rcu_read_unlock(); 183 184 return ret; 185 } 186 187 static inline bool snapshot_list_has_id(snapshot_id_list *s, u32 id) 188 { 189 darray_for_each(*s, i) 190 if (*i == id) 191 return true; 192 return false; 193 } 194 195 static inline bool snapshot_list_has_ancestor(struct bch_fs *c, snapshot_id_list *s, u32 id) 196 { 197 darray_for_each(*s, i) 198 if (bch2_snapshot_is_ancestor(c, id, *i)) 199 return true; 200 return false; 201 } 202 203 static inline int snapshot_list_add(struct bch_fs *c, snapshot_id_list *s, u32 id) 204 { 205 BUG_ON(snapshot_list_has_id(s, id)); 206 int ret = darray_push(s, id); 207 if (ret) 208 bch_err(c, "error reallocating snapshot_id_list (size %zu)", s->size); 209 return ret; 210 } 211 212 static inline int snapshot_list_add_nodup(struct bch_fs *c, snapshot_id_list *s, u32 id) 213 { 214 int ret = snapshot_list_has_id(s, id) 215 ? 0 216 : darray_push(s, id); 217 if (ret) 218 bch_err(c, "error reallocating snapshot_id_list (size %zu)", s->size); 219 return ret; 220 } 221 222 static inline int snapshot_list_merge(struct bch_fs *c, snapshot_id_list *dst, snapshot_id_list *src) 223 { 224 darray_for_each(*src, i) { 225 int ret = snapshot_list_add_nodup(c, dst, *i); 226 if (ret) 227 return ret; 228 } 229 230 return 0; 231 } 232 233 int bch2_snapshot_lookup(struct btree_trans *trans, u32 id, 234 struct bch_snapshot *s); 235 int bch2_snapshot_get_subvol(struct btree_trans *, u32, 236 struct bch_subvolume *); 237 238 /* only exported for tests: */ 239 int bch2_snapshot_node_create(struct btree_trans *, u32, 240 u32 *, u32 *, unsigned); 241 242 int bch2_check_snapshot_trees(struct bch_fs *); 243 int bch2_check_snapshots(struct bch_fs *); 244 int bch2_reconstruct_snapshots(struct bch_fs *); 245 int bch2_check_key_has_snapshot(struct btree_trans *, struct btree_iter *, struct bkey_s_c); 246 247 int bch2_snapshot_node_set_deleted(struct btree_trans *, u32); 248 void bch2_delete_dead_snapshots_work(struct work_struct *); 249 250 int __bch2_key_has_snapshot_overwrites(struct btree_trans *, enum btree_id, struct bpos); 251 252 static inline int bch2_key_has_snapshot_overwrites(struct btree_trans *trans, 253 enum btree_id id, 254 struct bpos pos) 255 { 256 if (!btree_type_has_snapshots(id) || 257 bch2_snapshot_is_leaf(trans->c, pos.snapshot) > 0) 258 return 0; 259 260 return __bch2_key_has_snapshot_overwrites(trans, id, pos); 261 } 262 263 int bch2_propagate_key_to_snapshot_leaves(struct btree_trans *, enum btree_id, 264 struct bkey_s_c, struct bpos *); 265 266 int bch2_snapshots_read(struct bch_fs *); 267 void bch2_fs_snapshots_exit(struct bch_fs *); 268 269 #endif /* _BCACHEFS_SNAPSHOT_H */ 270