1 // SPDX-License-Identifier: GPL-2.0
2
3 #include "bcachefs.h"
4 #include "bkey_buf.h"
5 #include "btree_key_cache.h"
6 #include "btree_update.h"
7 #include "buckets.h"
8 #include "errcode.h"
9 #include "error.h"
10 #include "fs.h"
11 #include "recovery_passes.h"
12 #include "snapshot.h"
13
14 #include <linux/random.h>
15
16 /*
17 * Snapshot trees:
18 *
19 * Keys in BTREE_ID_snapshot_trees identify a whole tree of snapshot nodes; they
20 * exist to provide a stable identifier for the whole lifetime of a snapshot
21 * tree.
22 */
23
bch2_snapshot_tree_to_text(struct printbuf * out,struct bch_fs * c,struct bkey_s_c k)24 void bch2_snapshot_tree_to_text(struct printbuf *out, struct bch_fs *c,
25 struct bkey_s_c k)
26 {
27 struct bkey_s_c_snapshot_tree t = bkey_s_c_to_snapshot_tree(k);
28
29 prt_printf(out, "subvol %u root snapshot %u",
30 le32_to_cpu(t.v->master_subvol),
31 le32_to_cpu(t.v->root_snapshot));
32 }
33
bch2_snapshot_tree_validate(struct bch_fs * c,struct bkey_s_c k,enum bch_validate_flags flags)34 int bch2_snapshot_tree_validate(struct bch_fs *c, struct bkey_s_c k,
35 enum bch_validate_flags flags)
36 {
37 int ret = 0;
38
39 bkey_fsck_err_on(bkey_gt(k.k->p, POS(0, U32_MAX)) ||
40 bkey_lt(k.k->p, POS(0, 1)),
41 c, snapshot_tree_pos_bad,
42 "bad pos");
43 fsck_err:
44 return ret;
45 }
46
bch2_snapshot_tree_lookup(struct btree_trans * trans,u32 id,struct bch_snapshot_tree * s)47 int bch2_snapshot_tree_lookup(struct btree_trans *trans, u32 id,
48 struct bch_snapshot_tree *s)
49 {
50 int ret = bch2_bkey_get_val_typed(trans, BTREE_ID_snapshot_trees, POS(0, id),
51 BTREE_ITER_with_updates, snapshot_tree, s);
52
53 if (bch2_err_matches(ret, ENOENT))
54 ret = -BCH_ERR_ENOENT_snapshot_tree;
55 return ret;
56 }
57
58 struct bkey_i_snapshot_tree *
__bch2_snapshot_tree_create(struct btree_trans * trans)59 __bch2_snapshot_tree_create(struct btree_trans *trans)
60 {
61 struct btree_iter iter;
62 int ret = bch2_bkey_get_empty_slot(trans, &iter,
63 BTREE_ID_snapshot_trees, POS(0, U32_MAX));
64 struct bkey_i_snapshot_tree *s_t;
65
66 if (ret == -BCH_ERR_ENOSPC_btree_slot)
67 ret = -BCH_ERR_ENOSPC_snapshot_tree;
68 if (ret)
69 return ERR_PTR(ret);
70
71 s_t = bch2_bkey_alloc(trans, &iter, 0, snapshot_tree);
72 ret = PTR_ERR_OR_ZERO(s_t);
73 bch2_trans_iter_exit(trans, &iter);
74 return ret ? ERR_PTR(ret) : s_t;
75 }
76
bch2_snapshot_tree_create(struct btree_trans * trans,u32 root_id,u32 subvol_id,u32 * tree_id)77 static int bch2_snapshot_tree_create(struct btree_trans *trans,
78 u32 root_id, u32 subvol_id, u32 *tree_id)
79 {
80 struct bkey_i_snapshot_tree *n_tree =
81 __bch2_snapshot_tree_create(trans);
82
83 if (IS_ERR(n_tree))
84 return PTR_ERR(n_tree);
85
86 n_tree->v.master_subvol = cpu_to_le32(subvol_id);
87 n_tree->v.root_snapshot = cpu_to_le32(root_id);
88 *tree_id = n_tree->k.p.offset;
89 return 0;
90 }
91
92 /* Snapshot nodes: */
93
__bch2_snapshot_is_ancestor_early(struct snapshot_table * t,u32 id,u32 ancestor)94 static bool __bch2_snapshot_is_ancestor_early(struct snapshot_table *t, u32 id, u32 ancestor)
95 {
96 while (id && id < ancestor) {
97 const struct snapshot_t *s = __snapshot_t(t, id);
98 id = s ? s->parent : 0;
99 }
100 return id == ancestor;
101 }
102
bch2_snapshot_is_ancestor_early(struct bch_fs * c,u32 id,u32 ancestor)103 static bool bch2_snapshot_is_ancestor_early(struct bch_fs *c, u32 id, u32 ancestor)
104 {
105 rcu_read_lock();
106 bool ret = __bch2_snapshot_is_ancestor_early(rcu_dereference(c->snapshots), id, ancestor);
107 rcu_read_unlock();
108
109 return ret;
110 }
111
get_ancestor_below(struct snapshot_table * t,u32 id,u32 ancestor)112 static inline u32 get_ancestor_below(struct snapshot_table *t, u32 id, u32 ancestor)
113 {
114 const struct snapshot_t *s = __snapshot_t(t, id);
115 if (!s)
116 return 0;
117
118 if (s->skip[2] <= ancestor)
119 return s->skip[2];
120 if (s->skip[1] <= ancestor)
121 return s->skip[1];
122 if (s->skip[0] <= ancestor)
123 return s->skip[0];
124 return s->parent;
125 }
126
test_ancestor_bitmap(struct snapshot_table * t,u32 id,u32 ancestor)127 static bool test_ancestor_bitmap(struct snapshot_table *t, u32 id, u32 ancestor)
128 {
129 const struct snapshot_t *s = __snapshot_t(t, id);
130 if (!s)
131 return false;
132
133 return test_bit(ancestor - id - 1, s->is_ancestor);
134 }
135
__bch2_snapshot_is_ancestor(struct bch_fs * c,u32 id,u32 ancestor)136 bool __bch2_snapshot_is_ancestor(struct bch_fs *c, u32 id, u32 ancestor)
137 {
138 bool ret;
139
140 rcu_read_lock();
141 struct snapshot_table *t = rcu_dereference(c->snapshots);
142
143 if (unlikely(c->recovery_pass_done < BCH_RECOVERY_PASS_check_snapshots)) {
144 ret = __bch2_snapshot_is_ancestor_early(t, id, ancestor);
145 goto out;
146 }
147
148 while (id && id < ancestor - IS_ANCESTOR_BITMAP)
149 id = get_ancestor_below(t, id, ancestor);
150
151 ret = id && id < ancestor
152 ? test_ancestor_bitmap(t, id, ancestor)
153 : id == ancestor;
154
155 EBUG_ON(ret != __bch2_snapshot_is_ancestor_early(t, id, ancestor));
156 out:
157 rcu_read_unlock();
158
159 return ret;
160 }
161
__snapshot_t_mut(struct bch_fs * c,u32 id)162 static noinline struct snapshot_t *__snapshot_t_mut(struct bch_fs *c, u32 id)
163 {
164 size_t idx = U32_MAX - id;
165 struct snapshot_table *new, *old;
166
167 size_t new_bytes = kmalloc_size_roundup(struct_size(new, s, idx + 1));
168 size_t new_size = (new_bytes - sizeof(*new)) / sizeof(new->s[0]);
169
170 if (unlikely(new_bytes > INT_MAX))
171 return NULL;
172
173 new = kvzalloc(new_bytes, GFP_KERNEL);
174 if (!new)
175 return NULL;
176
177 new->nr = new_size;
178
179 old = rcu_dereference_protected(c->snapshots, true);
180 if (old)
181 memcpy(new->s, old->s, sizeof(old->s[0]) * old->nr);
182
183 rcu_assign_pointer(c->snapshots, new);
184 kvfree_rcu(old, rcu);
185
186 return &rcu_dereference_protected(c->snapshots,
187 lockdep_is_held(&c->snapshot_table_lock))->s[idx];
188 }
189
snapshot_t_mut(struct bch_fs * c,u32 id)190 static inline struct snapshot_t *snapshot_t_mut(struct bch_fs *c, u32 id)
191 {
192 size_t idx = U32_MAX - id;
193 struct snapshot_table *table =
194 rcu_dereference_protected(c->snapshots,
195 lockdep_is_held(&c->snapshot_table_lock));
196
197 lockdep_assert_held(&c->snapshot_table_lock);
198
199 if (likely(table && idx < table->nr))
200 return &table->s[idx];
201
202 return __snapshot_t_mut(c, id);
203 }
204
bch2_snapshot_to_text(struct printbuf * out,struct bch_fs * c,struct bkey_s_c k)205 void bch2_snapshot_to_text(struct printbuf *out, struct bch_fs *c,
206 struct bkey_s_c k)
207 {
208 struct bkey_s_c_snapshot s = bkey_s_c_to_snapshot(k);
209
210 prt_printf(out, "is_subvol %llu deleted %llu parent %10u children %10u %10u subvol %u tree %u",
211 BCH_SNAPSHOT_SUBVOL(s.v),
212 BCH_SNAPSHOT_DELETED(s.v),
213 le32_to_cpu(s.v->parent),
214 le32_to_cpu(s.v->children[0]),
215 le32_to_cpu(s.v->children[1]),
216 le32_to_cpu(s.v->subvol),
217 le32_to_cpu(s.v->tree));
218
219 if (bkey_val_bytes(k.k) > offsetof(struct bch_snapshot, depth))
220 prt_printf(out, " depth %u skiplist %u %u %u",
221 le32_to_cpu(s.v->depth),
222 le32_to_cpu(s.v->skip[0]),
223 le32_to_cpu(s.v->skip[1]),
224 le32_to_cpu(s.v->skip[2]));
225 }
226
bch2_snapshot_validate(struct bch_fs * c,struct bkey_s_c k,enum bch_validate_flags flags)227 int bch2_snapshot_validate(struct bch_fs *c, struct bkey_s_c k,
228 enum bch_validate_flags flags)
229 {
230 struct bkey_s_c_snapshot s;
231 u32 i, id;
232 int ret = 0;
233
234 bkey_fsck_err_on(bkey_gt(k.k->p, POS(0, U32_MAX)) ||
235 bkey_lt(k.k->p, POS(0, 1)),
236 c, snapshot_pos_bad,
237 "bad pos");
238
239 s = bkey_s_c_to_snapshot(k);
240
241 id = le32_to_cpu(s.v->parent);
242 bkey_fsck_err_on(id && id <= k.k->p.offset,
243 c, snapshot_parent_bad,
244 "bad parent node (%u <= %llu)",
245 id, k.k->p.offset);
246
247 bkey_fsck_err_on(le32_to_cpu(s.v->children[0]) < le32_to_cpu(s.v->children[1]),
248 c, snapshot_children_not_normalized,
249 "children not normalized");
250
251 bkey_fsck_err_on(s.v->children[0] && s.v->children[0] == s.v->children[1],
252 c, snapshot_child_duplicate,
253 "duplicate child nodes");
254
255 for (i = 0; i < 2; i++) {
256 id = le32_to_cpu(s.v->children[i]);
257
258 bkey_fsck_err_on(id >= k.k->p.offset,
259 c, snapshot_child_bad,
260 "bad child node (%u >= %llu)",
261 id, k.k->p.offset);
262 }
263
264 if (bkey_val_bytes(k.k) > offsetof(struct bch_snapshot, skip)) {
265 bkey_fsck_err_on(le32_to_cpu(s.v->skip[0]) > le32_to_cpu(s.v->skip[1]) ||
266 le32_to_cpu(s.v->skip[1]) > le32_to_cpu(s.v->skip[2]),
267 c, snapshot_skiplist_not_normalized,
268 "skiplist not normalized");
269
270 for (i = 0; i < ARRAY_SIZE(s.v->skip); i++) {
271 id = le32_to_cpu(s.v->skip[i]);
272
273 bkey_fsck_err_on(id && id < le32_to_cpu(s.v->parent),
274 c, snapshot_skiplist_bad,
275 "bad skiplist node %u", id);
276 }
277 }
278 fsck_err:
279 return ret;
280 }
281
__set_is_ancestor_bitmap(struct bch_fs * c,u32 id)282 static void __set_is_ancestor_bitmap(struct bch_fs *c, u32 id)
283 {
284 struct snapshot_t *t = snapshot_t_mut(c, id);
285 u32 parent = id;
286
287 while ((parent = bch2_snapshot_parent_early(c, parent)) &&
288 parent - id - 1 < IS_ANCESTOR_BITMAP)
289 __set_bit(parent - id - 1, t->is_ancestor);
290 }
291
set_is_ancestor_bitmap(struct bch_fs * c,u32 id)292 static void set_is_ancestor_bitmap(struct bch_fs *c, u32 id)
293 {
294 mutex_lock(&c->snapshot_table_lock);
295 __set_is_ancestor_bitmap(c, id);
296 mutex_unlock(&c->snapshot_table_lock);
297 }
298
__bch2_mark_snapshot(struct btree_trans * trans,enum btree_id btree,unsigned level,struct bkey_s_c old,struct bkey_s_c new,enum btree_iter_update_trigger_flags flags)299 static int __bch2_mark_snapshot(struct btree_trans *trans,
300 enum btree_id btree, unsigned level,
301 struct bkey_s_c old, struct bkey_s_c new,
302 enum btree_iter_update_trigger_flags flags)
303 {
304 struct bch_fs *c = trans->c;
305 struct snapshot_t *t;
306 u32 id = new.k->p.offset;
307 int ret = 0;
308
309 mutex_lock(&c->snapshot_table_lock);
310
311 t = snapshot_t_mut(c, id);
312 if (!t) {
313 ret = -BCH_ERR_ENOMEM_mark_snapshot;
314 goto err;
315 }
316
317 if (new.k->type == KEY_TYPE_snapshot) {
318 struct bkey_s_c_snapshot s = bkey_s_c_to_snapshot(new);
319
320 t->parent = le32_to_cpu(s.v->parent);
321 t->children[0] = le32_to_cpu(s.v->children[0]);
322 t->children[1] = le32_to_cpu(s.v->children[1]);
323 t->subvol = BCH_SNAPSHOT_SUBVOL(s.v) ? le32_to_cpu(s.v->subvol) : 0;
324 t->tree = le32_to_cpu(s.v->tree);
325
326 if (bkey_val_bytes(s.k) > offsetof(struct bch_snapshot, depth)) {
327 t->depth = le32_to_cpu(s.v->depth);
328 t->skip[0] = le32_to_cpu(s.v->skip[0]);
329 t->skip[1] = le32_to_cpu(s.v->skip[1]);
330 t->skip[2] = le32_to_cpu(s.v->skip[2]);
331 } else {
332 t->depth = 0;
333 t->skip[0] = 0;
334 t->skip[1] = 0;
335 t->skip[2] = 0;
336 }
337
338 __set_is_ancestor_bitmap(c, id);
339
340 if (BCH_SNAPSHOT_DELETED(s.v)) {
341 set_bit(BCH_FS_need_delete_dead_snapshots, &c->flags);
342 if (c->curr_recovery_pass > BCH_RECOVERY_PASS_delete_dead_snapshots)
343 bch2_delete_dead_snapshots_async(c);
344 }
345 } else {
346 memset(t, 0, sizeof(*t));
347 }
348 err:
349 mutex_unlock(&c->snapshot_table_lock);
350 return ret;
351 }
352
bch2_mark_snapshot(struct btree_trans * trans,enum btree_id btree,unsigned level,struct bkey_s_c old,struct bkey_s new,enum btree_iter_update_trigger_flags flags)353 int bch2_mark_snapshot(struct btree_trans *trans,
354 enum btree_id btree, unsigned level,
355 struct bkey_s_c old, struct bkey_s new,
356 enum btree_iter_update_trigger_flags flags)
357 {
358 return __bch2_mark_snapshot(trans, btree, level, old, new.s_c, flags);
359 }
360
bch2_snapshot_lookup(struct btree_trans * trans,u32 id,struct bch_snapshot * s)361 int bch2_snapshot_lookup(struct btree_trans *trans, u32 id,
362 struct bch_snapshot *s)
363 {
364 return bch2_bkey_get_val_typed(trans, BTREE_ID_snapshots, POS(0, id),
365 BTREE_ITER_with_updates, snapshot, s);
366 }
367
bch2_snapshot_live(struct btree_trans * trans,u32 id)368 static int bch2_snapshot_live(struct btree_trans *trans, u32 id)
369 {
370 struct bch_snapshot v;
371 int ret;
372
373 if (!id)
374 return 0;
375
376 ret = bch2_snapshot_lookup(trans, id, &v);
377 if (bch2_err_matches(ret, ENOENT))
378 bch_err(trans->c, "snapshot node %u not found", id);
379 if (ret)
380 return ret;
381
382 return !BCH_SNAPSHOT_DELETED(&v);
383 }
384
385 /*
386 * If @k is a snapshot with just one live child, it's part of a linear chain,
387 * which we consider to be an equivalence class: and then after snapshot
388 * deletion cleanup, there should only be a single key at a given position in
389 * this equivalence class.
390 *
391 * This sets the equivalence class of @k to be the child's equivalence class, if
392 * it's part of such a linear chain: this correctly sets equivalence classes on
393 * startup if we run leaf to root (i.e. in natural key order).
394 */
bch2_snapshot_set_equiv(struct btree_trans * trans,struct bkey_s_c k)395 static int bch2_snapshot_set_equiv(struct btree_trans *trans, struct bkey_s_c k)
396 {
397 struct bch_fs *c = trans->c;
398 unsigned i, nr_live = 0, live_idx = 0;
399 struct bkey_s_c_snapshot snap;
400 u32 id = k.k->p.offset, child[2];
401
402 if (k.k->type != KEY_TYPE_snapshot)
403 return 0;
404
405 snap = bkey_s_c_to_snapshot(k);
406
407 child[0] = le32_to_cpu(snap.v->children[0]);
408 child[1] = le32_to_cpu(snap.v->children[1]);
409
410 for (i = 0; i < 2; i++) {
411 int ret = bch2_snapshot_live(trans, child[i]);
412
413 if (ret < 0)
414 return ret;
415
416 if (ret)
417 live_idx = i;
418 nr_live += ret;
419 }
420
421 mutex_lock(&c->snapshot_table_lock);
422
423 snapshot_t_mut(c, id)->equiv = nr_live == 1
424 ? snapshot_t_mut(c, child[live_idx])->equiv
425 : id;
426
427 mutex_unlock(&c->snapshot_table_lock);
428
429 return 0;
430 }
431
432 /* fsck: */
433
bch2_snapshot_child(struct bch_fs * c,u32 id,unsigned child)434 static u32 bch2_snapshot_child(struct bch_fs *c, u32 id, unsigned child)
435 {
436 return snapshot_t(c, id)->children[child];
437 }
438
bch2_snapshot_left_child(struct bch_fs * c,u32 id)439 static u32 bch2_snapshot_left_child(struct bch_fs *c, u32 id)
440 {
441 return bch2_snapshot_child(c, id, 0);
442 }
443
bch2_snapshot_right_child(struct bch_fs * c,u32 id)444 static u32 bch2_snapshot_right_child(struct bch_fs *c, u32 id)
445 {
446 return bch2_snapshot_child(c, id, 1);
447 }
448
bch2_snapshot_tree_next(struct bch_fs * c,u32 id)449 static u32 bch2_snapshot_tree_next(struct bch_fs *c, u32 id)
450 {
451 u32 n, parent;
452
453 n = bch2_snapshot_left_child(c, id);
454 if (n)
455 return n;
456
457 while ((parent = bch2_snapshot_parent(c, id))) {
458 n = bch2_snapshot_right_child(c, parent);
459 if (n && n != id)
460 return n;
461 id = parent;
462 }
463
464 return 0;
465 }
466
bch2_snapshot_tree_oldest_subvol(struct bch_fs * c,u32 snapshot_root)467 static u32 bch2_snapshot_tree_oldest_subvol(struct bch_fs *c, u32 snapshot_root)
468 {
469 u32 id = snapshot_root;
470 u32 subvol = 0, s;
471
472 rcu_read_lock();
473 while (id) {
474 s = snapshot_t(c, id)->subvol;
475
476 if (s && (!subvol || s < subvol))
477 subvol = s;
478
479 id = bch2_snapshot_tree_next(c, id);
480 }
481 rcu_read_unlock();
482
483 return subvol;
484 }
485
bch2_snapshot_tree_master_subvol(struct btree_trans * trans,u32 snapshot_root,u32 * subvol_id)486 static int bch2_snapshot_tree_master_subvol(struct btree_trans *trans,
487 u32 snapshot_root, u32 *subvol_id)
488 {
489 struct bch_fs *c = trans->c;
490 struct btree_iter iter;
491 struct bkey_s_c k;
492 bool found = false;
493 int ret;
494
495 for_each_btree_key_norestart(trans, iter, BTREE_ID_subvolumes, POS_MIN,
496 0, k, ret) {
497 if (k.k->type != KEY_TYPE_subvolume)
498 continue;
499
500 struct bkey_s_c_subvolume s = bkey_s_c_to_subvolume(k);
501 if (!bch2_snapshot_is_ancestor(c, le32_to_cpu(s.v->snapshot), snapshot_root))
502 continue;
503 if (!BCH_SUBVOLUME_SNAP(s.v)) {
504 *subvol_id = s.k->p.offset;
505 found = true;
506 break;
507 }
508 }
509
510 bch2_trans_iter_exit(trans, &iter);
511
512 if (!ret && !found) {
513 struct bkey_i_subvolume *u;
514
515 *subvol_id = bch2_snapshot_tree_oldest_subvol(c, snapshot_root);
516
517 u = bch2_bkey_get_mut_typed(trans, &iter,
518 BTREE_ID_subvolumes, POS(0, *subvol_id),
519 0, subvolume);
520 ret = PTR_ERR_OR_ZERO(u);
521 if (ret)
522 return ret;
523
524 SET_BCH_SUBVOLUME_SNAP(&u->v, false);
525 }
526
527 return ret;
528 }
529
check_snapshot_tree(struct btree_trans * trans,struct btree_iter * iter,struct bkey_s_c k)530 static int check_snapshot_tree(struct btree_trans *trans,
531 struct btree_iter *iter,
532 struct bkey_s_c k)
533 {
534 struct bch_fs *c = trans->c;
535 struct bkey_s_c_snapshot_tree st;
536 struct bch_snapshot s;
537 struct bch_subvolume subvol;
538 struct printbuf buf = PRINTBUF;
539 u32 root_id;
540 int ret;
541
542 if (k.k->type != KEY_TYPE_snapshot_tree)
543 return 0;
544
545 st = bkey_s_c_to_snapshot_tree(k);
546 root_id = le32_to_cpu(st.v->root_snapshot);
547
548 ret = bch2_snapshot_lookup(trans, root_id, &s);
549 if (ret && !bch2_err_matches(ret, ENOENT))
550 goto err;
551
552 if (fsck_err_on(ret ||
553 root_id != bch2_snapshot_root(c, root_id) ||
554 st.k->p.offset != le32_to_cpu(s.tree),
555 trans, snapshot_tree_to_missing_snapshot,
556 "snapshot tree points to missing/incorrect snapshot:\n %s",
557 (bch2_bkey_val_to_text(&buf, c, st.s_c), buf.buf))) {
558 ret = bch2_btree_delete_at(trans, iter, 0);
559 goto err;
560 }
561
562 ret = bch2_subvolume_get(trans, le32_to_cpu(st.v->master_subvol),
563 false, 0, &subvol);
564 if (ret && !bch2_err_matches(ret, ENOENT))
565 goto err;
566
567 if (fsck_err_on(ret,
568 trans, snapshot_tree_to_missing_subvol,
569 "snapshot tree points to missing subvolume:\n %s",
570 (printbuf_reset(&buf),
571 bch2_bkey_val_to_text(&buf, c, st.s_c), buf.buf)) ||
572 fsck_err_on(!bch2_snapshot_is_ancestor(c,
573 le32_to_cpu(subvol.snapshot),
574 root_id),
575 trans, snapshot_tree_to_wrong_subvol,
576 "snapshot tree points to subvolume that does not point to snapshot in this tree:\n %s",
577 (printbuf_reset(&buf),
578 bch2_bkey_val_to_text(&buf, c, st.s_c), buf.buf)) ||
579 fsck_err_on(BCH_SUBVOLUME_SNAP(&subvol),
580 trans, snapshot_tree_to_snapshot_subvol,
581 "snapshot tree points to snapshot subvolume:\n %s",
582 (printbuf_reset(&buf),
583 bch2_bkey_val_to_text(&buf, c, st.s_c), buf.buf))) {
584 struct bkey_i_snapshot_tree *u;
585 u32 subvol_id;
586
587 ret = bch2_snapshot_tree_master_subvol(trans, root_id, &subvol_id);
588 bch_err_fn(c, ret);
589
590 if (bch2_err_matches(ret, ENOENT)) { /* nothing to be done here */
591 ret = 0;
592 goto err;
593 }
594
595 if (ret)
596 goto err;
597
598 u = bch2_bkey_make_mut_typed(trans, iter, &k, 0, snapshot_tree);
599 ret = PTR_ERR_OR_ZERO(u);
600 if (ret)
601 goto err;
602
603 u->v.master_subvol = cpu_to_le32(subvol_id);
604 st = snapshot_tree_i_to_s_c(u);
605 }
606 err:
607 fsck_err:
608 printbuf_exit(&buf);
609 return ret;
610 }
611
612 /*
613 * For each snapshot_tree, make sure it points to the root of a snapshot tree
614 * and that snapshot entry points back to it, or delete it.
615 *
616 * And, make sure it points to a subvolume within that snapshot tree, or correct
617 * it to point to the oldest subvolume within that snapshot tree.
618 */
bch2_check_snapshot_trees(struct bch_fs * c)619 int bch2_check_snapshot_trees(struct bch_fs *c)
620 {
621 int ret = bch2_trans_run(c,
622 for_each_btree_key_commit(trans, iter,
623 BTREE_ID_snapshot_trees, POS_MIN,
624 BTREE_ITER_prefetch, k,
625 NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
626 check_snapshot_tree(trans, &iter, k)));
627 bch_err_fn(c, ret);
628 return ret;
629 }
630
631 /*
632 * Look up snapshot tree for @tree_id and find root,
633 * make sure @snap_id is a descendent:
634 */
snapshot_tree_ptr_good(struct btree_trans * trans,u32 snap_id,u32 tree_id)635 static int snapshot_tree_ptr_good(struct btree_trans *trans,
636 u32 snap_id, u32 tree_id)
637 {
638 struct bch_snapshot_tree s_t;
639 int ret = bch2_snapshot_tree_lookup(trans, tree_id, &s_t);
640
641 if (bch2_err_matches(ret, ENOENT))
642 return 0;
643 if (ret)
644 return ret;
645
646 return bch2_snapshot_is_ancestor_early(trans->c, snap_id, le32_to_cpu(s_t.root_snapshot));
647 }
648
bch2_snapshot_skiplist_get(struct bch_fs * c,u32 id)649 u32 bch2_snapshot_skiplist_get(struct bch_fs *c, u32 id)
650 {
651 const struct snapshot_t *s;
652
653 if (!id)
654 return 0;
655
656 rcu_read_lock();
657 s = snapshot_t(c, id);
658 if (s->parent)
659 id = bch2_snapshot_nth_parent(c, id, get_random_u32_below(s->depth));
660 rcu_read_unlock();
661
662 return id;
663 }
664
snapshot_skiplist_good(struct btree_trans * trans,u32 id,struct bch_snapshot s)665 static int snapshot_skiplist_good(struct btree_trans *trans, u32 id, struct bch_snapshot s)
666 {
667 unsigned i;
668
669 for (i = 0; i < 3; i++)
670 if (!s.parent) {
671 if (s.skip[i])
672 return false;
673 } else {
674 if (!bch2_snapshot_is_ancestor_early(trans->c, id, le32_to_cpu(s.skip[i])))
675 return false;
676 }
677
678 return true;
679 }
680
681 /*
682 * snapshot_tree pointer was incorrect: look up root snapshot node, make sure
683 * its snapshot_tree pointer is correct (allocate new one if necessary), then
684 * update this node's pointer to root node's pointer:
685 */
snapshot_tree_ptr_repair(struct btree_trans * trans,struct btree_iter * iter,struct bkey_s_c k,struct bch_snapshot * s)686 static int snapshot_tree_ptr_repair(struct btree_trans *trans,
687 struct btree_iter *iter,
688 struct bkey_s_c k,
689 struct bch_snapshot *s)
690 {
691 struct bch_fs *c = trans->c;
692 struct btree_iter root_iter;
693 struct bch_snapshot_tree s_t;
694 struct bkey_s_c_snapshot root;
695 struct bkey_i_snapshot *u;
696 u32 root_id = bch2_snapshot_root(c, k.k->p.offset), tree_id;
697 int ret;
698
699 root = bch2_bkey_get_iter_typed(trans, &root_iter,
700 BTREE_ID_snapshots, POS(0, root_id),
701 BTREE_ITER_with_updates, snapshot);
702 ret = bkey_err(root);
703 if (ret)
704 goto err;
705
706 tree_id = le32_to_cpu(root.v->tree);
707
708 ret = bch2_snapshot_tree_lookup(trans, tree_id, &s_t);
709 if (ret && !bch2_err_matches(ret, ENOENT))
710 return ret;
711
712 if (ret || le32_to_cpu(s_t.root_snapshot) != root_id) {
713 u = bch2_bkey_make_mut_typed(trans, &root_iter, &root.s_c, 0, snapshot);
714 ret = PTR_ERR_OR_ZERO(u) ?:
715 bch2_snapshot_tree_create(trans, root_id,
716 bch2_snapshot_tree_oldest_subvol(c, root_id),
717 &tree_id);
718 if (ret)
719 goto err;
720
721 u->v.tree = cpu_to_le32(tree_id);
722 if (k.k->p.offset == root_id)
723 *s = u->v;
724 }
725
726 if (k.k->p.offset != root_id) {
727 u = bch2_bkey_make_mut_typed(trans, iter, &k, 0, snapshot);
728 ret = PTR_ERR_OR_ZERO(u);
729 if (ret)
730 goto err;
731
732 u->v.tree = cpu_to_le32(tree_id);
733 *s = u->v;
734 }
735 err:
736 bch2_trans_iter_exit(trans, &root_iter);
737 return ret;
738 }
739
check_snapshot(struct btree_trans * trans,struct btree_iter * iter,struct bkey_s_c k)740 static int check_snapshot(struct btree_trans *trans,
741 struct btree_iter *iter,
742 struct bkey_s_c k)
743 {
744 struct bch_fs *c = trans->c;
745 struct bch_snapshot s;
746 struct bch_subvolume subvol;
747 struct bch_snapshot v;
748 struct bkey_i_snapshot *u;
749 u32 parent_id = bch2_snapshot_parent_early(c, k.k->p.offset);
750 u32 real_depth;
751 struct printbuf buf = PRINTBUF;
752 u32 i, id;
753 int ret = 0;
754
755 if (k.k->type != KEY_TYPE_snapshot)
756 return 0;
757
758 memset(&s, 0, sizeof(s));
759 memcpy(&s, k.v, min(sizeof(s), bkey_val_bytes(k.k)));
760
761 id = le32_to_cpu(s.parent);
762 if (id) {
763 ret = bch2_snapshot_lookup(trans, id, &v);
764 if (bch2_err_matches(ret, ENOENT))
765 bch_err(c, "snapshot with nonexistent parent:\n %s",
766 (bch2_bkey_val_to_text(&buf, c, k), buf.buf));
767 if (ret)
768 goto err;
769
770 if (le32_to_cpu(v.children[0]) != k.k->p.offset &&
771 le32_to_cpu(v.children[1]) != k.k->p.offset) {
772 bch_err(c, "snapshot parent %u missing pointer to child %llu",
773 id, k.k->p.offset);
774 ret = -EINVAL;
775 goto err;
776 }
777 }
778
779 for (i = 0; i < 2 && s.children[i]; i++) {
780 id = le32_to_cpu(s.children[i]);
781
782 ret = bch2_snapshot_lookup(trans, id, &v);
783 if (bch2_err_matches(ret, ENOENT))
784 bch_err(c, "snapshot node %llu has nonexistent child %u",
785 k.k->p.offset, id);
786 if (ret)
787 goto err;
788
789 if (le32_to_cpu(v.parent) != k.k->p.offset) {
790 bch_err(c, "snapshot child %u has wrong parent (got %u should be %llu)",
791 id, le32_to_cpu(v.parent), k.k->p.offset);
792 ret = -EINVAL;
793 goto err;
794 }
795 }
796
797 bool should_have_subvol = BCH_SNAPSHOT_SUBVOL(&s) &&
798 !BCH_SNAPSHOT_DELETED(&s);
799
800 if (should_have_subvol) {
801 id = le32_to_cpu(s.subvol);
802 ret = bch2_subvolume_get(trans, id, 0, false, &subvol);
803 if (bch2_err_matches(ret, ENOENT))
804 bch_err(c, "snapshot points to nonexistent subvolume:\n %s",
805 (bch2_bkey_val_to_text(&buf, c, k), buf.buf));
806 if (ret)
807 goto err;
808
809 if (BCH_SNAPSHOT_SUBVOL(&s) != (le32_to_cpu(subvol.snapshot) == k.k->p.offset)) {
810 bch_err(c, "snapshot node %llu has wrong BCH_SNAPSHOT_SUBVOL",
811 k.k->p.offset);
812 ret = -EINVAL;
813 goto err;
814 }
815 } else {
816 if (fsck_err_on(s.subvol,
817 trans, snapshot_should_not_have_subvol,
818 "snapshot should not point to subvol:\n %s",
819 (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
820 u = bch2_bkey_make_mut_typed(trans, iter, &k, 0, snapshot);
821 ret = PTR_ERR_OR_ZERO(u);
822 if (ret)
823 goto err;
824
825 u->v.subvol = 0;
826 s = u->v;
827 }
828 }
829
830 ret = snapshot_tree_ptr_good(trans, k.k->p.offset, le32_to_cpu(s.tree));
831 if (ret < 0)
832 goto err;
833
834 if (fsck_err_on(!ret,
835 trans, snapshot_to_bad_snapshot_tree,
836 "snapshot points to missing/incorrect tree:\n %s",
837 (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
838 ret = snapshot_tree_ptr_repair(trans, iter, k, &s);
839 if (ret)
840 goto err;
841 }
842 ret = 0;
843
844 real_depth = bch2_snapshot_depth(c, parent_id);
845
846 if (fsck_err_on(le32_to_cpu(s.depth) != real_depth,
847 trans, snapshot_bad_depth,
848 "snapshot with incorrect depth field, should be %u:\n %s",
849 real_depth, (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
850 u = bch2_bkey_make_mut_typed(trans, iter, &k, 0, snapshot);
851 ret = PTR_ERR_OR_ZERO(u);
852 if (ret)
853 goto err;
854
855 u->v.depth = cpu_to_le32(real_depth);
856 s = u->v;
857 }
858
859 ret = snapshot_skiplist_good(trans, k.k->p.offset, s);
860 if (ret < 0)
861 goto err;
862
863 if (fsck_err_on(!ret,
864 trans, snapshot_bad_skiplist,
865 "snapshot with bad skiplist field:\n %s",
866 (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
867 u = bch2_bkey_make_mut_typed(trans, iter, &k, 0, snapshot);
868 ret = PTR_ERR_OR_ZERO(u);
869 if (ret)
870 goto err;
871
872 for (i = 0; i < ARRAY_SIZE(u->v.skip); i++)
873 u->v.skip[i] = cpu_to_le32(bch2_snapshot_skiplist_get(c, parent_id));
874
875 bubble_sort(u->v.skip, ARRAY_SIZE(u->v.skip), cmp_le32);
876 s = u->v;
877 }
878 ret = 0;
879 err:
880 fsck_err:
881 printbuf_exit(&buf);
882 return ret;
883 }
884
bch2_check_snapshots(struct bch_fs * c)885 int bch2_check_snapshots(struct bch_fs *c)
886 {
887 /*
888 * We iterate backwards as checking/fixing the depth field requires that
889 * the parent's depth already be correct:
890 */
891 int ret = bch2_trans_run(c,
892 for_each_btree_key_reverse_commit(trans, iter,
893 BTREE_ID_snapshots, POS_MAX,
894 BTREE_ITER_prefetch, k,
895 NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
896 check_snapshot(trans, &iter, k)));
897 bch_err_fn(c, ret);
898 return ret;
899 }
900
check_snapshot_exists(struct btree_trans * trans,u32 id)901 static int check_snapshot_exists(struct btree_trans *trans, u32 id)
902 {
903 struct bch_fs *c = trans->c;
904
905 if (bch2_snapshot_equiv(c, id))
906 return 0;
907
908 /* Do we need to reconstruct the snapshot_tree entry as well? */
909 struct btree_iter iter;
910 struct bkey_s_c k;
911 int ret = 0;
912 u32 tree_id = 0;
913
914 for_each_btree_key_norestart(trans, iter, BTREE_ID_snapshot_trees, POS_MIN,
915 0, k, ret) {
916 if (le32_to_cpu(bkey_s_c_to_snapshot_tree(k).v->root_snapshot) == id) {
917 tree_id = k.k->p.offset;
918 break;
919 }
920 }
921 bch2_trans_iter_exit(trans, &iter);
922
923 if (ret)
924 return ret;
925
926 if (!tree_id) {
927 ret = bch2_snapshot_tree_create(trans, id, 0, &tree_id);
928 if (ret)
929 return ret;
930 }
931
932 struct bkey_i_snapshot *snapshot = bch2_trans_kmalloc(trans, sizeof(*snapshot));
933 ret = PTR_ERR_OR_ZERO(snapshot);
934 if (ret)
935 return ret;
936
937 bkey_snapshot_init(&snapshot->k_i);
938 snapshot->k.p = POS(0, id);
939 snapshot->v.tree = cpu_to_le32(tree_id);
940 snapshot->v.btime.lo = cpu_to_le64(bch2_current_time(c));
941
942 for_each_btree_key_norestart(trans, iter, BTREE_ID_subvolumes, POS_MIN,
943 0, k, ret) {
944 if (le32_to_cpu(bkey_s_c_to_subvolume(k).v->snapshot) == id) {
945 snapshot->v.subvol = cpu_to_le32(k.k->p.offset);
946 SET_BCH_SNAPSHOT_SUBVOL(&snapshot->v, true);
947 break;
948 }
949 }
950 bch2_trans_iter_exit(trans, &iter);
951
952 return bch2_btree_insert_trans(trans, BTREE_ID_snapshots, &snapshot->k_i, 0) ?:
953 bch2_mark_snapshot(trans, BTREE_ID_snapshots, 0,
954 bkey_s_c_null, bkey_i_to_s(&snapshot->k_i), 0) ?:
955 bch2_snapshot_set_equiv(trans, bkey_i_to_s_c(&snapshot->k_i));
956 }
957
958 /* Figure out which snapshot nodes belong in the same tree: */
959 struct snapshot_tree_reconstruct {
960 enum btree_id btree;
961 struct bpos cur_pos;
962 snapshot_id_list cur_ids;
963 DARRAY(snapshot_id_list) trees;
964 };
965
snapshot_tree_reconstruct_exit(struct snapshot_tree_reconstruct * r)966 static void snapshot_tree_reconstruct_exit(struct snapshot_tree_reconstruct *r)
967 {
968 darray_for_each(r->trees, i)
969 darray_exit(i);
970 darray_exit(&r->trees);
971 darray_exit(&r->cur_ids);
972 }
973
same_snapshot(struct snapshot_tree_reconstruct * r,struct bpos pos)974 static inline bool same_snapshot(struct snapshot_tree_reconstruct *r, struct bpos pos)
975 {
976 return r->btree == BTREE_ID_inodes
977 ? r->cur_pos.offset == pos.offset
978 : r->cur_pos.inode == pos.inode;
979 }
980
snapshot_id_lists_have_common(snapshot_id_list * l,snapshot_id_list * r)981 static inline bool snapshot_id_lists_have_common(snapshot_id_list *l, snapshot_id_list *r)
982 {
983 darray_for_each(*l, i)
984 if (snapshot_list_has_id(r, *i))
985 return true;
986 return false;
987 }
988
snapshot_id_list_to_text(struct printbuf * out,snapshot_id_list * s)989 static void snapshot_id_list_to_text(struct printbuf *out, snapshot_id_list *s)
990 {
991 bool first = true;
992 darray_for_each(*s, i) {
993 if (!first)
994 prt_char(out, ' ');
995 first = false;
996 prt_printf(out, "%u", *i);
997 }
998 }
999
snapshot_tree_reconstruct_next(struct bch_fs * c,struct snapshot_tree_reconstruct * r)1000 static int snapshot_tree_reconstruct_next(struct bch_fs *c, struct snapshot_tree_reconstruct *r)
1001 {
1002 if (r->cur_ids.nr) {
1003 darray_for_each(r->trees, i)
1004 if (snapshot_id_lists_have_common(i, &r->cur_ids)) {
1005 int ret = snapshot_list_merge(c, i, &r->cur_ids);
1006 if (ret)
1007 return ret;
1008 goto out;
1009 }
1010 darray_push(&r->trees, r->cur_ids);
1011 darray_init(&r->cur_ids);
1012 }
1013 out:
1014 r->cur_ids.nr = 0;
1015 return 0;
1016 }
1017
get_snapshot_trees(struct bch_fs * c,struct snapshot_tree_reconstruct * r,struct bpos pos)1018 static int get_snapshot_trees(struct bch_fs *c, struct snapshot_tree_reconstruct *r, struct bpos pos)
1019 {
1020 if (!same_snapshot(r, pos))
1021 snapshot_tree_reconstruct_next(c, r);
1022 r->cur_pos = pos;
1023 return snapshot_list_add_nodup(c, &r->cur_ids, pos.snapshot);
1024 }
1025
bch2_reconstruct_snapshots(struct bch_fs * c)1026 int bch2_reconstruct_snapshots(struct bch_fs *c)
1027 {
1028 struct btree_trans *trans = bch2_trans_get(c);
1029 struct printbuf buf = PRINTBUF;
1030 struct snapshot_tree_reconstruct r = {};
1031 int ret = 0;
1032
1033 for (unsigned btree = 0; btree < BTREE_ID_NR; btree++) {
1034 if (btree_type_has_snapshots(btree)) {
1035 r.btree = btree;
1036
1037 ret = for_each_btree_key(trans, iter, btree, POS_MIN,
1038 BTREE_ITER_all_snapshots|BTREE_ITER_prefetch, k, ({
1039 get_snapshot_trees(c, &r, k.k->p);
1040 }));
1041 if (ret)
1042 goto err;
1043
1044 snapshot_tree_reconstruct_next(c, &r);
1045 }
1046 }
1047
1048 darray_for_each(r.trees, t) {
1049 printbuf_reset(&buf);
1050 snapshot_id_list_to_text(&buf, t);
1051
1052 darray_for_each(*t, id) {
1053 if (fsck_err_on(!bch2_snapshot_equiv(c, *id),
1054 trans, snapshot_node_missing,
1055 "snapshot node %u from tree %s missing, recreate?", *id, buf.buf)) {
1056 if (t->nr > 1) {
1057 bch_err(c, "cannot reconstruct snapshot trees with multiple nodes");
1058 ret = -BCH_ERR_fsck_repair_unimplemented;
1059 goto err;
1060 }
1061
1062 ret = commit_do(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
1063 check_snapshot_exists(trans, *id));
1064 if (ret)
1065 goto err;
1066 }
1067 }
1068 }
1069 fsck_err:
1070 err:
1071 bch2_trans_put(trans);
1072 snapshot_tree_reconstruct_exit(&r);
1073 printbuf_exit(&buf);
1074 bch_err_fn(c, ret);
1075 return ret;
1076 }
1077
bch2_check_key_has_snapshot(struct btree_trans * trans,struct btree_iter * iter,struct bkey_s_c k)1078 int bch2_check_key_has_snapshot(struct btree_trans *trans,
1079 struct btree_iter *iter,
1080 struct bkey_s_c k)
1081 {
1082 struct bch_fs *c = trans->c;
1083 struct printbuf buf = PRINTBUF;
1084 int ret = 0;
1085
1086 if (fsck_err_on(!bch2_snapshot_equiv(c, k.k->p.snapshot),
1087 trans, bkey_in_missing_snapshot,
1088 "key in missing snapshot %s, delete?",
1089 (bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
1090 ret = bch2_btree_delete_at(trans, iter,
1091 BTREE_UPDATE_internal_snapshot_node) ?: 1;
1092 fsck_err:
1093 printbuf_exit(&buf);
1094 return ret;
1095 }
1096
1097 /*
1098 * Mark a snapshot as deleted, for future cleanup:
1099 */
bch2_snapshot_node_set_deleted(struct btree_trans * trans,u32 id)1100 int bch2_snapshot_node_set_deleted(struct btree_trans *trans, u32 id)
1101 {
1102 struct btree_iter iter;
1103 struct bkey_i_snapshot *s;
1104 int ret = 0;
1105
1106 s = bch2_bkey_get_mut_typed(trans, &iter,
1107 BTREE_ID_snapshots, POS(0, id),
1108 0, snapshot);
1109 ret = PTR_ERR_OR_ZERO(s);
1110 if (unlikely(ret)) {
1111 bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT),
1112 trans->c, "missing snapshot %u", id);
1113 return ret;
1114 }
1115
1116 /* already deleted? */
1117 if (BCH_SNAPSHOT_DELETED(&s->v))
1118 goto err;
1119
1120 SET_BCH_SNAPSHOT_DELETED(&s->v, true);
1121 SET_BCH_SNAPSHOT_SUBVOL(&s->v, false);
1122 s->v.subvol = 0;
1123 err:
1124 bch2_trans_iter_exit(trans, &iter);
1125 return ret;
1126 }
1127
normalize_snapshot_child_pointers(struct bch_snapshot * s)1128 static inline void normalize_snapshot_child_pointers(struct bch_snapshot *s)
1129 {
1130 if (le32_to_cpu(s->children[0]) < le32_to_cpu(s->children[1]))
1131 swap(s->children[0], s->children[1]);
1132 }
1133
bch2_snapshot_node_delete(struct btree_trans * trans,u32 id)1134 static int bch2_snapshot_node_delete(struct btree_trans *trans, u32 id)
1135 {
1136 struct bch_fs *c = trans->c;
1137 struct btree_iter iter, p_iter = (struct btree_iter) { NULL };
1138 struct btree_iter c_iter = (struct btree_iter) { NULL };
1139 struct btree_iter tree_iter = (struct btree_iter) { NULL };
1140 struct bkey_s_c_snapshot s;
1141 u32 parent_id, child_id;
1142 unsigned i;
1143 int ret = 0;
1144
1145 s = bch2_bkey_get_iter_typed(trans, &iter, BTREE_ID_snapshots, POS(0, id),
1146 BTREE_ITER_intent, snapshot);
1147 ret = bkey_err(s);
1148 bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT), c,
1149 "missing snapshot %u", id);
1150
1151 if (ret)
1152 goto err;
1153
1154 BUG_ON(s.v->children[1]);
1155
1156 parent_id = le32_to_cpu(s.v->parent);
1157 child_id = le32_to_cpu(s.v->children[0]);
1158
1159 if (parent_id) {
1160 struct bkey_i_snapshot *parent;
1161
1162 parent = bch2_bkey_get_mut_typed(trans, &p_iter,
1163 BTREE_ID_snapshots, POS(0, parent_id),
1164 0, snapshot);
1165 ret = PTR_ERR_OR_ZERO(parent);
1166 bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT), c,
1167 "missing snapshot %u", parent_id);
1168 if (unlikely(ret))
1169 goto err;
1170
1171 /* find entry in parent->children for node being deleted */
1172 for (i = 0; i < 2; i++)
1173 if (le32_to_cpu(parent->v.children[i]) == id)
1174 break;
1175
1176 if (bch2_fs_inconsistent_on(i == 2, c,
1177 "snapshot %u missing child pointer to %u",
1178 parent_id, id))
1179 goto err;
1180
1181 parent->v.children[i] = cpu_to_le32(child_id);
1182
1183 normalize_snapshot_child_pointers(&parent->v);
1184 }
1185
1186 if (child_id) {
1187 struct bkey_i_snapshot *child;
1188
1189 child = bch2_bkey_get_mut_typed(trans, &c_iter,
1190 BTREE_ID_snapshots, POS(0, child_id),
1191 0, snapshot);
1192 ret = PTR_ERR_OR_ZERO(child);
1193 bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT), c,
1194 "missing snapshot %u", child_id);
1195 if (unlikely(ret))
1196 goto err;
1197
1198 child->v.parent = cpu_to_le32(parent_id);
1199
1200 if (!child->v.parent) {
1201 child->v.skip[0] = 0;
1202 child->v.skip[1] = 0;
1203 child->v.skip[2] = 0;
1204 }
1205 }
1206
1207 if (!parent_id) {
1208 /*
1209 * We're deleting the root of a snapshot tree: update the
1210 * snapshot_tree entry to point to the new root, or delete it if
1211 * this is the last snapshot ID in this tree:
1212 */
1213 struct bkey_i_snapshot_tree *s_t;
1214
1215 BUG_ON(s.v->children[1]);
1216
1217 s_t = bch2_bkey_get_mut_typed(trans, &tree_iter,
1218 BTREE_ID_snapshot_trees, POS(0, le32_to_cpu(s.v->tree)),
1219 0, snapshot_tree);
1220 ret = PTR_ERR_OR_ZERO(s_t);
1221 if (ret)
1222 goto err;
1223
1224 if (s.v->children[0]) {
1225 s_t->v.root_snapshot = s.v->children[0];
1226 } else {
1227 s_t->k.type = KEY_TYPE_deleted;
1228 set_bkey_val_u64s(&s_t->k, 0);
1229 }
1230 }
1231
1232 ret = bch2_btree_delete_at(trans, &iter, 0);
1233 err:
1234 bch2_trans_iter_exit(trans, &tree_iter);
1235 bch2_trans_iter_exit(trans, &p_iter);
1236 bch2_trans_iter_exit(trans, &c_iter);
1237 bch2_trans_iter_exit(trans, &iter);
1238 return ret;
1239 }
1240
create_snapids(struct btree_trans * trans,u32 parent,u32 tree,u32 * new_snapids,u32 * snapshot_subvols,unsigned nr_snapids)1241 static int create_snapids(struct btree_trans *trans, u32 parent, u32 tree,
1242 u32 *new_snapids,
1243 u32 *snapshot_subvols,
1244 unsigned nr_snapids)
1245 {
1246 struct bch_fs *c = trans->c;
1247 struct btree_iter iter;
1248 struct bkey_i_snapshot *n;
1249 struct bkey_s_c k;
1250 unsigned i, j;
1251 u32 depth = bch2_snapshot_depth(c, parent);
1252 int ret;
1253
1254 bch2_trans_iter_init(trans, &iter, BTREE_ID_snapshots,
1255 POS_MIN, BTREE_ITER_intent);
1256 k = bch2_btree_iter_peek(&iter);
1257 ret = bkey_err(k);
1258 if (ret)
1259 goto err;
1260
1261 for (i = 0; i < nr_snapids; i++) {
1262 k = bch2_btree_iter_prev_slot(&iter);
1263 ret = bkey_err(k);
1264 if (ret)
1265 goto err;
1266
1267 if (!k.k || !k.k->p.offset) {
1268 ret = -BCH_ERR_ENOSPC_snapshot_create;
1269 goto err;
1270 }
1271
1272 n = bch2_bkey_alloc(trans, &iter, 0, snapshot);
1273 ret = PTR_ERR_OR_ZERO(n);
1274 if (ret)
1275 goto err;
1276
1277 n->v.flags = 0;
1278 n->v.parent = cpu_to_le32(parent);
1279 n->v.subvol = cpu_to_le32(snapshot_subvols[i]);
1280 n->v.tree = cpu_to_le32(tree);
1281 n->v.depth = cpu_to_le32(depth);
1282 n->v.btime.lo = cpu_to_le64(bch2_current_time(c));
1283 n->v.btime.hi = 0;
1284
1285 for (j = 0; j < ARRAY_SIZE(n->v.skip); j++)
1286 n->v.skip[j] = cpu_to_le32(bch2_snapshot_skiplist_get(c, parent));
1287
1288 bubble_sort(n->v.skip, ARRAY_SIZE(n->v.skip), cmp_le32);
1289 SET_BCH_SNAPSHOT_SUBVOL(&n->v, true);
1290
1291 ret = __bch2_mark_snapshot(trans, BTREE_ID_snapshots, 0,
1292 bkey_s_c_null, bkey_i_to_s_c(&n->k_i), 0);
1293 if (ret)
1294 goto err;
1295
1296 new_snapids[i] = iter.pos.offset;
1297
1298 mutex_lock(&c->snapshot_table_lock);
1299 snapshot_t_mut(c, new_snapids[i])->equiv = new_snapids[i];
1300 mutex_unlock(&c->snapshot_table_lock);
1301 }
1302 err:
1303 bch2_trans_iter_exit(trans, &iter);
1304 return ret;
1305 }
1306
1307 /*
1308 * Create new snapshot IDs as children of an existing snapshot ID:
1309 */
bch2_snapshot_node_create_children(struct btree_trans * trans,u32 parent,u32 * new_snapids,u32 * snapshot_subvols,unsigned nr_snapids)1310 static int bch2_snapshot_node_create_children(struct btree_trans *trans, u32 parent,
1311 u32 *new_snapids,
1312 u32 *snapshot_subvols,
1313 unsigned nr_snapids)
1314 {
1315 struct btree_iter iter;
1316 struct bkey_i_snapshot *n_parent;
1317 int ret = 0;
1318
1319 n_parent = bch2_bkey_get_mut_typed(trans, &iter,
1320 BTREE_ID_snapshots, POS(0, parent),
1321 0, snapshot);
1322 ret = PTR_ERR_OR_ZERO(n_parent);
1323 if (unlikely(ret)) {
1324 if (bch2_err_matches(ret, ENOENT))
1325 bch_err(trans->c, "snapshot %u not found", parent);
1326 return ret;
1327 }
1328
1329 if (n_parent->v.children[0] || n_parent->v.children[1]) {
1330 bch_err(trans->c, "Trying to add child snapshot nodes to parent that already has children");
1331 ret = -EINVAL;
1332 goto err;
1333 }
1334
1335 ret = create_snapids(trans, parent, le32_to_cpu(n_parent->v.tree),
1336 new_snapids, snapshot_subvols, nr_snapids);
1337 if (ret)
1338 goto err;
1339
1340 n_parent->v.children[0] = cpu_to_le32(new_snapids[0]);
1341 n_parent->v.children[1] = cpu_to_le32(new_snapids[1]);
1342 n_parent->v.subvol = 0;
1343 SET_BCH_SNAPSHOT_SUBVOL(&n_parent->v, false);
1344 err:
1345 bch2_trans_iter_exit(trans, &iter);
1346 return ret;
1347 }
1348
1349 /*
1350 * Create a snapshot node that is the root of a new tree:
1351 */
bch2_snapshot_node_create_tree(struct btree_trans * trans,u32 * new_snapids,u32 * snapshot_subvols,unsigned nr_snapids)1352 static int bch2_snapshot_node_create_tree(struct btree_trans *trans,
1353 u32 *new_snapids,
1354 u32 *snapshot_subvols,
1355 unsigned nr_snapids)
1356 {
1357 struct bkey_i_snapshot_tree *n_tree;
1358 int ret;
1359
1360 n_tree = __bch2_snapshot_tree_create(trans);
1361 ret = PTR_ERR_OR_ZERO(n_tree) ?:
1362 create_snapids(trans, 0, n_tree->k.p.offset,
1363 new_snapids, snapshot_subvols, nr_snapids);
1364 if (ret)
1365 return ret;
1366
1367 n_tree->v.master_subvol = cpu_to_le32(snapshot_subvols[0]);
1368 n_tree->v.root_snapshot = cpu_to_le32(new_snapids[0]);
1369 return 0;
1370 }
1371
bch2_snapshot_node_create(struct btree_trans * trans,u32 parent,u32 * new_snapids,u32 * snapshot_subvols,unsigned nr_snapids)1372 int bch2_snapshot_node_create(struct btree_trans *trans, u32 parent,
1373 u32 *new_snapids,
1374 u32 *snapshot_subvols,
1375 unsigned nr_snapids)
1376 {
1377 BUG_ON((parent == 0) != (nr_snapids == 1));
1378 BUG_ON((parent != 0) != (nr_snapids == 2));
1379
1380 return parent
1381 ? bch2_snapshot_node_create_children(trans, parent,
1382 new_snapids, snapshot_subvols, nr_snapids)
1383 : bch2_snapshot_node_create_tree(trans,
1384 new_snapids, snapshot_subvols, nr_snapids);
1385
1386 }
1387
1388 /*
1389 * If we have an unlinked inode in an internal snapshot node, and the inode
1390 * really has been deleted in all child snapshots, how does this get cleaned up?
1391 *
1392 * first there is the problem of how keys that have been overwritten in all
1393 * child snapshots get deleted (unimplemented?), but inodes may perhaps be
1394 * special?
1395 *
1396 * also: unlinked inode in internal snapshot appears to not be getting deleted
1397 * correctly if inode doesn't exist in leaf snapshots
1398 *
1399 * solution:
1400 *
1401 * for a key in an interior snapshot node that needs work to be done that
1402 * requires it to be mutated: iterate over all descendent leaf nodes and copy
1403 * that key to snapshot leaf nodes, where we can mutate it
1404 */
1405
delete_dead_snapshots_process_key(struct btree_trans * trans,struct btree_iter * iter,struct bkey_s_c k,snapshot_id_list * deleted,snapshot_id_list * equiv_seen,struct bpos * last_pos)1406 static int delete_dead_snapshots_process_key(struct btree_trans *trans,
1407 struct btree_iter *iter,
1408 struct bkey_s_c k,
1409 snapshot_id_list *deleted,
1410 snapshot_id_list *equiv_seen,
1411 struct bpos *last_pos)
1412 {
1413 int ret = bch2_check_key_has_snapshot(trans, iter, k);
1414 if (ret)
1415 return ret < 0 ? ret : 0;
1416
1417 struct bch_fs *c = trans->c;
1418 u32 equiv = bch2_snapshot_equiv(c, k.k->p.snapshot);
1419 if (!equiv) /* key for invalid snapshot node, but we chose not to delete */
1420 return 0;
1421
1422 if (!bkey_eq(k.k->p, *last_pos))
1423 equiv_seen->nr = 0;
1424
1425 if (snapshot_list_has_id(deleted, k.k->p.snapshot))
1426 return bch2_btree_delete_at(trans, iter,
1427 BTREE_UPDATE_internal_snapshot_node);
1428
1429 if (!bpos_eq(*last_pos, k.k->p) &&
1430 snapshot_list_has_id(equiv_seen, equiv))
1431 return bch2_btree_delete_at(trans, iter,
1432 BTREE_UPDATE_internal_snapshot_node);
1433
1434 *last_pos = k.k->p;
1435
1436 ret = snapshot_list_add_nodup(c, equiv_seen, equiv);
1437 if (ret)
1438 return ret;
1439
1440 /*
1441 * When we have a linear chain of snapshot nodes, we consider
1442 * those to form an equivalence class: we're going to collapse
1443 * them all down to a single node, and keep the leaf-most node -
1444 * which has the same id as the equivalence class id.
1445 *
1446 * If there are multiple keys in different snapshots at the same
1447 * position, we're only going to keep the one in the newest
1448 * snapshot (we delete the others above) - the rest have been
1449 * overwritten and are redundant, and for the key we're going to keep we
1450 * need to move it to the equivalance class ID if it's not there
1451 * already.
1452 */
1453 if (equiv != k.k->p.snapshot) {
1454 struct bkey_i *new = bch2_bkey_make_mut_noupdate(trans, k);
1455 int ret = PTR_ERR_OR_ZERO(new);
1456 if (ret)
1457 return ret;
1458
1459 new->k.p.snapshot = equiv;
1460
1461 struct btree_iter new_iter;
1462 bch2_trans_iter_init(trans, &new_iter, iter->btree_id, new->k.p,
1463 BTREE_ITER_all_snapshots|
1464 BTREE_ITER_cached|
1465 BTREE_ITER_intent);
1466
1467 ret = bch2_btree_iter_traverse(&new_iter) ?:
1468 bch2_trans_update(trans, &new_iter, new,
1469 BTREE_UPDATE_internal_snapshot_node) ?:
1470 bch2_btree_delete_at(trans, iter,
1471 BTREE_UPDATE_internal_snapshot_node);
1472 bch2_trans_iter_exit(trans, &new_iter);
1473 if (ret)
1474 return ret;
1475 }
1476
1477 return 0;
1478 }
1479
bch2_snapshot_needs_delete(struct btree_trans * trans,struct bkey_s_c k)1480 static int bch2_snapshot_needs_delete(struct btree_trans *trans, struct bkey_s_c k)
1481 {
1482 struct bkey_s_c_snapshot snap;
1483 u32 children[2];
1484 int ret;
1485
1486 if (k.k->type != KEY_TYPE_snapshot)
1487 return 0;
1488
1489 snap = bkey_s_c_to_snapshot(k);
1490 if (BCH_SNAPSHOT_DELETED(snap.v) ||
1491 BCH_SNAPSHOT_SUBVOL(snap.v))
1492 return 0;
1493
1494 children[0] = le32_to_cpu(snap.v->children[0]);
1495 children[1] = le32_to_cpu(snap.v->children[1]);
1496
1497 ret = bch2_snapshot_live(trans, children[0]) ?:
1498 bch2_snapshot_live(trans, children[1]);
1499 if (ret < 0)
1500 return ret;
1501 return !ret;
1502 }
1503
1504 /*
1505 * For a given snapshot, if it doesn't have a subvolume that points to it, and
1506 * it doesn't have child snapshot nodes - it's now redundant and we can mark it
1507 * as deleted.
1508 */
bch2_delete_redundant_snapshot(struct btree_trans * trans,struct bkey_s_c k)1509 static int bch2_delete_redundant_snapshot(struct btree_trans *trans, struct bkey_s_c k)
1510 {
1511 int ret = bch2_snapshot_needs_delete(trans, k);
1512
1513 return ret <= 0
1514 ? ret
1515 : bch2_snapshot_node_set_deleted(trans, k.k->p.offset);
1516 }
1517
bch2_snapshot_nth_parent_skip(struct bch_fs * c,u32 id,u32 n,snapshot_id_list * skip)1518 static inline u32 bch2_snapshot_nth_parent_skip(struct bch_fs *c, u32 id, u32 n,
1519 snapshot_id_list *skip)
1520 {
1521 rcu_read_lock();
1522 while (snapshot_list_has_id(skip, id))
1523 id = __bch2_snapshot_parent(c, id);
1524
1525 while (n--) {
1526 do {
1527 id = __bch2_snapshot_parent(c, id);
1528 } while (snapshot_list_has_id(skip, id));
1529 }
1530 rcu_read_unlock();
1531
1532 return id;
1533 }
1534
bch2_fix_child_of_deleted_snapshot(struct btree_trans * trans,struct btree_iter * iter,struct bkey_s_c k,snapshot_id_list * deleted)1535 static int bch2_fix_child_of_deleted_snapshot(struct btree_trans *trans,
1536 struct btree_iter *iter, struct bkey_s_c k,
1537 snapshot_id_list *deleted)
1538 {
1539 struct bch_fs *c = trans->c;
1540 u32 nr_deleted_ancestors = 0;
1541 struct bkey_i_snapshot *s;
1542 int ret;
1543
1544 if (k.k->type != KEY_TYPE_snapshot)
1545 return 0;
1546
1547 if (snapshot_list_has_id(deleted, k.k->p.offset))
1548 return 0;
1549
1550 s = bch2_bkey_make_mut_noupdate_typed(trans, k, snapshot);
1551 ret = PTR_ERR_OR_ZERO(s);
1552 if (ret)
1553 return ret;
1554
1555 darray_for_each(*deleted, i)
1556 nr_deleted_ancestors += bch2_snapshot_is_ancestor(c, s->k.p.offset, *i);
1557
1558 if (!nr_deleted_ancestors)
1559 return 0;
1560
1561 le32_add_cpu(&s->v.depth, -nr_deleted_ancestors);
1562
1563 if (!s->v.depth) {
1564 s->v.skip[0] = 0;
1565 s->v.skip[1] = 0;
1566 s->v.skip[2] = 0;
1567 } else {
1568 u32 depth = le32_to_cpu(s->v.depth);
1569 u32 parent = bch2_snapshot_parent(c, s->k.p.offset);
1570
1571 for (unsigned j = 0; j < ARRAY_SIZE(s->v.skip); j++) {
1572 u32 id = le32_to_cpu(s->v.skip[j]);
1573
1574 if (snapshot_list_has_id(deleted, id)) {
1575 id = bch2_snapshot_nth_parent_skip(c,
1576 parent,
1577 depth > 1
1578 ? get_random_u32_below(depth - 1)
1579 : 0,
1580 deleted);
1581 s->v.skip[j] = cpu_to_le32(id);
1582 }
1583 }
1584
1585 bubble_sort(s->v.skip, ARRAY_SIZE(s->v.skip), cmp_le32);
1586 }
1587
1588 return bch2_trans_update(trans, iter, &s->k_i, 0);
1589 }
1590
bch2_delete_dead_snapshots(struct bch_fs * c)1591 int bch2_delete_dead_snapshots(struct bch_fs *c)
1592 {
1593 struct btree_trans *trans;
1594 snapshot_id_list deleted = { 0 };
1595 snapshot_id_list deleted_interior = { 0 };
1596 int ret = 0;
1597
1598 if (!test_and_clear_bit(BCH_FS_need_delete_dead_snapshots, &c->flags))
1599 return 0;
1600
1601 trans = bch2_trans_get(c);
1602
1603 /*
1604 * For every snapshot node: If we have no live children and it's not
1605 * pointed to by a subvolume, delete it:
1606 */
1607 ret = for_each_btree_key_commit(trans, iter, BTREE_ID_snapshots,
1608 POS_MIN, 0, k,
1609 NULL, NULL, 0,
1610 bch2_delete_redundant_snapshot(trans, k));
1611 bch_err_msg(c, ret, "deleting redundant snapshots");
1612 if (ret)
1613 goto err;
1614
1615 ret = for_each_btree_key(trans, iter, BTREE_ID_snapshots,
1616 POS_MIN, 0, k,
1617 bch2_snapshot_set_equiv(trans, k));
1618 bch_err_msg(c, ret, "in bch2_snapshots_set_equiv");
1619 if (ret)
1620 goto err;
1621
1622 ret = for_each_btree_key(trans, iter, BTREE_ID_snapshots,
1623 POS_MIN, 0, k, ({
1624 if (k.k->type != KEY_TYPE_snapshot)
1625 continue;
1626
1627 BCH_SNAPSHOT_DELETED(bkey_s_c_to_snapshot(k).v)
1628 ? snapshot_list_add(c, &deleted, k.k->p.offset)
1629 : 0;
1630 }));
1631 bch_err_msg(c, ret, "walking snapshots");
1632 if (ret)
1633 goto err;
1634
1635 for (unsigned btree = 0; btree < BTREE_ID_NR; btree++) {
1636 struct bpos last_pos = POS_MIN;
1637 snapshot_id_list equiv_seen = { 0 };
1638 struct disk_reservation res = { 0 };
1639
1640 if (!btree_type_has_snapshots(btree))
1641 continue;
1642
1643 ret = for_each_btree_key_commit(trans, iter,
1644 btree, POS_MIN,
1645 BTREE_ITER_prefetch|BTREE_ITER_all_snapshots, k,
1646 &res, NULL, BCH_TRANS_COMMIT_no_enospc,
1647 delete_dead_snapshots_process_key(trans, &iter, k, &deleted,
1648 &equiv_seen, &last_pos));
1649
1650 bch2_disk_reservation_put(c, &res);
1651 darray_exit(&equiv_seen);
1652
1653 bch_err_msg(c, ret, "deleting keys from dying snapshots");
1654 if (ret)
1655 goto err;
1656 }
1657
1658 bch2_trans_unlock(trans);
1659 down_write(&c->snapshot_create_lock);
1660
1661 ret = for_each_btree_key(trans, iter, BTREE_ID_snapshots,
1662 POS_MIN, 0, k, ({
1663 u32 snapshot = k.k->p.offset;
1664 u32 equiv = bch2_snapshot_equiv(c, snapshot);
1665
1666 equiv != snapshot
1667 ? snapshot_list_add(c, &deleted_interior, snapshot)
1668 : 0;
1669 }));
1670
1671 bch_err_msg(c, ret, "walking snapshots");
1672 if (ret)
1673 goto err_create_lock;
1674
1675 /*
1676 * Fixing children of deleted snapshots can't be done completely
1677 * atomically, if we crash between here and when we delete the interior
1678 * nodes some depth fields will be off:
1679 */
1680 ret = for_each_btree_key_commit(trans, iter, BTREE_ID_snapshots, POS_MIN,
1681 BTREE_ITER_intent, k,
1682 NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
1683 bch2_fix_child_of_deleted_snapshot(trans, &iter, k, &deleted_interior));
1684 if (ret)
1685 goto err_create_lock;
1686
1687 darray_for_each(deleted, i) {
1688 ret = commit_do(trans, NULL, NULL, 0,
1689 bch2_snapshot_node_delete(trans, *i));
1690 bch_err_msg(c, ret, "deleting snapshot %u", *i);
1691 if (ret)
1692 goto err_create_lock;
1693 }
1694
1695 darray_for_each(deleted_interior, i) {
1696 ret = commit_do(trans, NULL, NULL, 0,
1697 bch2_snapshot_node_delete(trans, *i));
1698 bch_err_msg(c, ret, "deleting snapshot %u", *i);
1699 if (ret)
1700 goto err_create_lock;
1701 }
1702 err_create_lock:
1703 up_write(&c->snapshot_create_lock);
1704 err:
1705 darray_exit(&deleted_interior);
1706 darray_exit(&deleted);
1707 bch2_trans_put(trans);
1708 bch_err_fn(c, ret);
1709 return ret;
1710 }
1711
bch2_delete_dead_snapshots_work(struct work_struct * work)1712 void bch2_delete_dead_snapshots_work(struct work_struct *work)
1713 {
1714 struct bch_fs *c = container_of(work, struct bch_fs, snapshot_delete_work);
1715
1716 set_worker_desc("bcachefs-delete-dead-snapshots/%s", c->name);
1717
1718 bch2_delete_dead_snapshots(c);
1719 bch2_write_ref_put(c, BCH_WRITE_REF_delete_dead_snapshots);
1720 }
1721
bch2_delete_dead_snapshots_async(struct bch_fs * c)1722 void bch2_delete_dead_snapshots_async(struct bch_fs *c)
1723 {
1724 if (bch2_write_ref_tryget(c, BCH_WRITE_REF_delete_dead_snapshots) &&
1725 !queue_work(c->write_ref_wq, &c->snapshot_delete_work))
1726 bch2_write_ref_put(c, BCH_WRITE_REF_delete_dead_snapshots);
1727 }
1728
__bch2_key_has_snapshot_overwrites(struct btree_trans * trans,enum btree_id id,struct bpos pos)1729 int __bch2_key_has_snapshot_overwrites(struct btree_trans *trans,
1730 enum btree_id id,
1731 struct bpos pos)
1732 {
1733 struct bch_fs *c = trans->c;
1734 struct btree_iter iter;
1735 struct bkey_s_c k;
1736 int ret;
1737
1738 bch2_trans_iter_init(trans, &iter, id, pos,
1739 BTREE_ITER_not_extents|
1740 BTREE_ITER_all_snapshots);
1741 while (1) {
1742 k = bch2_btree_iter_prev(&iter);
1743 ret = bkey_err(k);
1744 if (ret)
1745 break;
1746
1747 if (!k.k)
1748 break;
1749
1750 if (!bkey_eq(pos, k.k->p))
1751 break;
1752
1753 if (bch2_snapshot_is_ancestor(c, k.k->p.snapshot, pos.snapshot)) {
1754 ret = 1;
1755 break;
1756 }
1757 }
1758 bch2_trans_iter_exit(trans, &iter);
1759
1760 return ret;
1761 }
1762
bch2_check_snapshot_needs_deletion(struct btree_trans * trans,struct bkey_s_c k)1763 static int bch2_check_snapshot_needs_deletion(struct btree_trans *trans, struct bkey_s_c k)
1764 {
1765 struct bch_fs *c = trans->c;
1766 struct bkey_s_c_snapshot snap;
1767 int ret = 0;
1768
1769 if (k.k->type != KEY_TYPE_snapshot)
1770 return 0;
1771
1772 snap = bkey_s_c_to_snapshot(k);
1773 if (BCH_SNAPSHOT_DELETED(snap.v) ||
1774 bch2_snapshot_equiv(c, k.k->p.offset) != k.k->p.offset ||
1775 (ret = bch2_snapshot_needs_delete(trans, k)) > 0) {
1776 set_bit(BCH_FS_need_delete_dead_snapshots, &c->flags);
1777 return 0;
1778 }
1779
1780 return ret;
1781 }
1782
bch2_snapshots_read(struct bch_fs * c)1783 int bch2_snapshots_read(struct bch_fs *c)
1784 {
1785 int ret = bch2_trans_run(c,
1786 for_each_btree_key(trans, iter, BTREE_ID_snapshots,
1787 POS_MIN, 0, k,
1788 __bch2_mark_snapshot(trans, BTREE_ID_snapshots, 0, bkey_s_c_null, k, 0) ?:
1789 bch2_snapshot_set_equiv(trans, k) ?:
1790 bch2_check_snapshot_needs_deletion(trans, k)) ?:
1791 for_each_btree_key(trans, iter, BTREE_ID_snapshots,
1792 POS_MIN, 0, k,
1793 (set_is_ancestor_bitmap(c, k.k->p.offset), 0)));
1794 bch_err_fn(c, ret);
1795
1796 /*
1797 * It's important that we check if we need to reconstruct snapshots
1798 * before going RW, so we mark that pass as required in the superblock -
1799 * otherwise, we could end up deleting keys with missing snapshot nodes
1800 * instead
1801 */
1802 BUG_ON(!test_bit(BCH_FS_new_fs, &c->flags) &&
1803 test_bit(BCH_FS_may_go_rw, &c->flags));
1804
1805 if (bch2_err_matches(ret, EIO) ||
1806 (c->sb.btrees_lost_data & BIT_ULL(BTREE_ID_snapshots)))
1807 ret = bch2_run_explicit_recovery_pass_persistent(c, BCH_RECOVERY_PASS_reconstruct_snapshots);
1808
1809 return ret;
1810 }
1811
bch2_fs_snapshots_exit(struct bch_fs * c)1812 void bch2_fs_snapshots_exit(struct bch_fs *c)
1813 {
1814 kvfree(rcu_dereference_protected(c->snapshots, true));
1815 }
1816