xref: /linux/fs/bcachefs/snapshot.c (revision 3d0fe49454652117522f60bfbefb978ba0e5300b)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #include "bcachefs.h"
4 #include "bkey_buf.h"
5 #include "btree_key_cache.h"
6 #include "btree_update.h"
7 #include "buckets.h"
8 #include "errcode.h"
9 #include "error.h"
10 #include "fs.h"
11 #include "snapshot.h"
12 
13 #include <linux/random.h>
14 
15 /*
16  * Snapshot trees:
17  *
18  * Keys in BTREE_ID_snapshot_trees identify a whole tree of snapshot nodes; they
19  * exist to provide a stable identifier for the whole lifetime of a snapshot
20  * tree.
21  */
22 
23 void bch2_snapshot_tree_to_text(struct printbuf *out, struct bch_fs *c,
24 				struct bkey_s_c k)
25 {
26 	struct bkey_s_c_snapshot_tree t = bkey_s_c_to_snapshot_tree(k);
27 
28 	prt_printf(out, "subvol %u root snapshot %u",
29 		   le32_to_cpu(t.v->master_subvol),
30 		   le32_to_cpu(t.v->root_snapshot));
31 }
32 
33 int bch2_snapshot_tree_invalid(struct bch_fs *c, struct bkey_s_c k,
34 			       enum bkey_invalid_flags flags,
35 			       struct printbuf *err)
36 {
37 	int ret = 0;
38 
39 	bkey_fsck_err_on(bkey_gt(k.k->p, POS(0, U32_MAX)) ||
40 			 bkey_lt(k.k->p, POS(0, 1)), c, err,
41 			 snapshot_tree_pos_bad,
42 			 "bad pos");
43 fsck_err:
44 	return ret;
45 }
46 
47 int bch2_snapshot_tree_lookup(struct btree_trans *trans, u32 id,
48 			      struct bch_snapshot_tree *s)
49 {
50 	int ret = bch2_bkey_get_val_typed(trans, BTREE_ID_snapshot_trees, POS(0, id),
51 					  BTREE_ITER_WITH_UPDATES, snapshot_tree, s);
52 
53 	if (bch2_err_matches(ret, ENOENT))
54 		ret = -BCH_ERR_ENOENT_snapshot_tree;
55 	return ret;
56 }
57 
58 struct bkey_i_snapshot_tree *
59 __bch2_snapshot_tree_create(struct btree_trans *trans)
60 {
61 	struct btree_iter iter;
62 	int ret = bch2_bkey_get_empty_slot(trans, &iter,
63 			BTREE_ID_snapshot_trees, POS(0, U32_MAX));
64 	struct bkey_i_snapshot_tree *s_t;
65 
66 	if (ret == -BCH_ERR_ENOSPC_btree_slot)
67 		ret = -BCH_ERR_ENOSPC_snapshot_tree;
68 	if (ret)
69 		return ERR_PTR(ret);
70 
71 	s_t = bch2_bkey_alloc(trans, &iter, 0, snapshot_tree);
72 	ret = PTR_ERR_OR_ZERO(s_t);
73 	bch2_trans_iter_exit(trans, &iter);
74 	return ret ? ERR_PTR(ret) : s_t;
75 }
76 
77 static int bch2_snapshot_tree_create(struct btree_trans *trans,
78 				u32 root_id, u32 subvol_id, u32 *tree_id)
79 {
80 	struct bkey_i_snapshot_tree *n_tree =
81 		__bch2_snapshot_tree_create(trans);
82 
83 	if (IS_ERR(n_tree))
84 		return PTR_ERR(n_tree);
85 
86 	n_tree->v.master_subvol	= cpu_to_le32(subvol_id);
87 	n_tree->v.root_snapshot	= cpu_to_le32(root_id);
88 	*tree_id = n_tree->k.p.offset;
89 	return 0;
90 }
91 
92 /* Snapshot nodes: */
93 
94 static bool bch2_snapshot_is_ancestor_early(struct bch_fs *c, u32 id, u32 ancestor)
95 {
96 	struct snapshot_table *t;
97 
98 	rcu_read_lock();
99 	t = rcu_dereference(c->snapshots);
100 
101 	while (id && id < ancestor)
102 		id = __snapshot_t(t, id)->parent;
103 	rcu_read_unlock();
104 
105 	return id == ancestor;
106 }
107 
108 static inline u32 get_ancestor_below(struct snapshot_table *t, u32 id, u32 ancestor)
109 {
110 	const struct snapshot_t *s = __snapshot_t(t, id);
111 
112 	if (s->skip[2] <= ancestor)
113 		return s->skip[2];
114 	if (s->skip[1] <= ancestor)
115 		return s->skip[1];
116 	if (s->skip[0] <= ancestor)
117 		return s->skip[0];
118 	return s->parent;
119 }
120 
121 bool __bch2_snapshot_is_ancestor(struct bch_fs *c, u32 id, u32 ancestor)
122 {
123 	struct snapshot_table *t;
124 	bool ret;
125 
126 	EBUG_ON(c->curr_recovery_pass <= BCH_RECOVERY_PASS_check_snapshots);
127 
128 	rcu_read_lock();
129 	t = rcu_dereference(c->snapshots);
130 
131 	while (id && id < ancestor - IS_ANCESTOR_BITMAP)
132 		id = get_ancestor_below(t, id, ancestor);
133 
134 	if (id && id < ancestor) {
135 		ret = test_bit(ancestor - id - 1, __snapshot_t(t, id)->is_ancestor);
136 
137 		EBUG_ON(ret != bch2_snapshot_is_ancestor_early(c, id, ancestor));
138 	} else {
139 		ret = id == ancestor;
140 	}
141 
142 	rcu_read_unlock();
143 
144 	return ret;
145 }
146 
147 static noinline struct snapshot_t *__snapshot_t_mut(struct bch_fs *c, u32 id)
148 {
149 	size_t idx = U32_MAX - id;
150 	size_t new_size;
151 	struct snapshot_table *new, *old;
152 
153 	new_size = max(16UL, roundup_pow_of_two(idx + 1));
154 
155 	new = kvzalloc(struct_size(new, s, new_size), GFP_KERNEL);
156 	if (!new)
157 		return NULL;
158 
159 	old = rcu_dereference_protected(c->snapshots, true);
160 	if (old)
161 		memcpy(new->s,
162 		       rcu_dereference_protected(c->snapshots, true)->s,
163 		       sizeof(new->s[0]) * c->snapshot_table_size);
164 
165 	rcu_assign_pointer(c->snapshots, new);
166 	c->snapshot_table_size = new_size;
167 	kvfree_rcu_mightsleep(old);
168 
169 	return &rcu_dereference_protected(c->snapshots, true)->s[idx];
170 }
171 
172 static inline struct snapshot_t *snapshot_t_mut(struct bch_fs *c, u32 id)
173 {
174 	size_t idx = U32_MAX - id;
175 
176 	lockdep_assert_held(&c->snapshot_table_lock);
177 
178 	if (likely(idx < c->snapshot_table_size))
179 		return &rcu_dereference_protected(c->snapshots, true)->s[idx];
180 
181 	return __snapshot_t_mut(c, id);
182 }
183 
184 void bch2_snapshot_to_text(struct printbuf *out, struct bch_fs *c,
185 			   struct bkey_s_c k)
186 {
187 	struct bkey_s_c_snapshot s = bkey_s_c_to_snapshot(k);
188 
189 	prt_printf(out, "is_subvol %llu deleted %llu parent %10u children %10u %10u subvol %u tree %u",
190 	       BCH_SNAPSHOT_SUBVOL(s.v),
191 	       BCH_SNAPSHOT_DELETED(s.v),
192 	       le32_to_cpu(s.v->parent),
193 	       le32_to_cpu(s.v->children[0]),
194 	       le32_to_cpu(s.v->children[1]),
195 	       le32_to_cpu(s.v->subvol),
196 	       le32_to_cpu(s.v->tree));
197 
198 	if (bkey_val_bytes(k.k) > offsetof(struct bch_snapshot, depth))
199 		prt_printf(out, " depth %u skiplist %u %u %u",
200 			   le32_to_cpu(s.v->depth),
201 			   le32_to_cpu(s.v->skip[0]),
202 			   le32_to_cpu(s.v->skip[1]),
203 			   le32_to_cpu(s.v->skip[2]));
204 }
205 
206 int bch2_snapshot_invalid(struct bch_fs *c, struct bkey_s_c k,
207 			  enum bkey_invalid_flags flags,
208 			  struct printbuf *err)
209 {
210 	struct bkey_s_c_snapshot s;
211 	u32 i, id;
212 	int ret = 0;
213 
214 	bkey_fsck_err_on(bkey_gt(k.k->p, POS(0, U32_MAX)) ||
215 			 bkey_lt(k.k->p, POS(0, 1)), c, err,
216 			 snapshot_pos_bad,
217 			 "bad pos");
218 
219 	s = bkey_s_c_to_snapshot(k);
220 
221 	id = le32_to_cpu(s.v->parent);
222 	bkey_fsck_err_on(id && id <= k.k->p.offset, c, err,
223 			 snapshot_parent_bad,
224 			 "bad parent node (%u <= %llu)",
225 			 id, k.k->p.offset);
226 
227 	bkey_fsck_err_on(le32_to_cpu(s.v->children[0]) < le32_to_cpu(s.v->children[1]), c, err,
228 			 snapshot_children_not_normalized,
229 			 "children not normalized");
230 
231 	bkey_fsck_err_on(s.v->children[0] && s.v->children[0] == s.v->children[1], c, err,
232 			 snapshot_child_duplicate,
233 			 "duplicate child nodes");
234 
235 	for (i = 0; i < 2; i++) {
236 		id = le32_to_cpu(s.v->children[i]);
237 
238 		bkey_fsck_err_on(id >= k.k->p.offset, c, err,
239 				 snapshot_child_bad,
240 				 "bad child node (%u >= %llu)",
241 				 id, k.k->p.offset);
242 	}
243 
244 	if (bkey_val_bytes(k.k) > offsetof(struct bch_snapshot, skip)) {
245 		bkey_fsck_err_on(le32_to_cpu(s.v->skip[0]) > le32_to_cpu(s.v->skip[1]) ||
246 				 le32_to_cpu(s.v->skip[1]) > le32_to_cpu(s.v->skip[2]), c, err,
247 				 snapshot_skiplist_not_normalized,
248 				 "skiplist not normalized");
249 
250 		for (i = 0; i < ARRAY_SIZE(s.v->skip); i++) {
251 			id = le32_to_cpu(s.v->skip[i]);
252 
253 			bkey_fsck_err_on(id && id < le32_to_cpu(s.v->parent), c, err,
254 					 snapshot_skiplist_bad,
255 					 "bad skiplist node %u", id);
256 		}
257 	}
258 fsck_err:
259 	return ret;
260 }
261 
262 static void __set_is_ancestor_bitmap(struct bch_fs *c, u32 id)
263 {
264 	struct snapshot_t *t = snapshot_t_mut(c, id);
265 	u32 parent = id;
266 
267 	while ((parent = bch2_snapshot_parent_early(c, parent)) &&
268 	       parent - id - 1 < IS_ANCESTOR_BITMAP)
269 		__set_bit(parent - id - 1, t->is_ancestor);
270 }
271 
272 static void set_is_ancestor_bitmap(struct bch_fs *c, u32 id)
273 {
274 	mutex_lock(&c->snapshot_table_lock);
275 	__set_is_ancestor_bitmap(c, id);
276 	mutex_unlock(&c->snapshot_table_lock);
277 }
278 
279 int bch2_mark_snapshot(struct btree_trans *trans,
280 		       enum btree_id btree, unsigned level,
281 		       struct bkey_s_c old, struct bkey_s_c new,
282 		       unsigned flags)
283 {
284 	struct bch_fs *c = trans->c;
285 	struct snapshot_t *t;
286 	u32 id = new.k->p.offset;
287 	int ret = 0;
288 
289 	mutex_lock(&c->snapshot_table_lock);
290 
291 	t = snapshot_t_mut(c, id);
292 	if (!t) {
293 		ret = -BCH_ERR_ENOMEM_mark_snapshot;
294 		goto err;
295 	}
296 
297 	if (new.k->type == KEY_TYPE_snapshot) {
298 		struct bkey_s_c_snapshot s = bkey_s_c_to_snapshot(new);
299 
300 		t->parent	= le32_to_cpu(s.v->parent);
301 		t->children[0]	= le32_to_cpu(s.v->children[0]);
302 		t->children[1]	= le32_to_cpu(s.v->children[1]);
303 		t->subvol	= BCH_SNAPSHOT_SUBVOL(s.v) ? le32_to_cpu(s.v->subvol) : 0;
304 		t->tree		= le32_to_cpu(s.v->tree);
305 
306 		if (bkey_val_bytes(s.k) > offsetof(struct bch_snapshot, depth)) {
307 			t->depth	= le32_to_cpu(s.v->depth);
308 			t->skip[0]	= le32_to_cpu(s.v->skip[0]);
309 			t->skip[1]	= le32_to_cpu(s.v->skip[1]);
310 			t->skip[2]	= le32_to_cpu(s.v->skip[2]);
311 		} else {
312 			t->depth	= 0;
313 			t->skip[0]	= 0;
314 			t->skip[1]	= 0;
315 			t->skip[2]	= 0;
316 		}
317 
318 		__set_is_ancestor_bitmap(c, id);
319 
320 		if (BCH_SNAPSHOT_DELETED(s.v)) {
321 			set_bit(BCH_FS_NEED_DELETE_DEAD_SNAPSHOTS, &c->flags);
322 			if (c->curr_recovery_pass > BCH_RECOVERY_PASS_delete_dead_snapshots)
323 				bch2_delete_dead_snapshots_async(c);
324 		}
325 	} else {
326 		memset(t, 0, sizeof(*t));
327 	}
328 err:
329 	mutex_unlock(&c->snapshot_table_lock);
330 	return ret;
331 }
332 
333 int bch2_snapshot_lookup(struct btree_trans *trans, u32 id,
334 			 struct bch_snapshot *s)
335 {
336 	return bch2_bkey_get_val_typed(trans, BTREE_ID_snapshots, POS(0, id),
337 				       BTREE_ITER_WITH_UPDATES, snapshot, s);
338 }
339 
340 static int bch2_snapshot_live(struct btree_trans *trans, u32 id)
341 {
342 	struct bch_snapshot v;
343 	int ret;
344 
345 	if (!id)
346 		return 0;
347 
348 	ret = bch2_snapshot_lookup(trans, id, &v);
349 	if (bch2_err_matches(ret, ENOENT))
350 		bch_err(trans->c, "snapshot node %u not found", id);
351 	if (ret)
352 		return ret;
353 
354 	return !BCH_SNAPSHOT_DELETED(&v);
355 }
356 
357 /*
358  * If @k is a snapshot with just one live child, it's part of a linear chain,
359  * which we consider to be an equivalence class: and then after snapshot
360  * deletion cleanup, there should only be a single key at a given position in
361  * this equivalence class.
362  *
363  * This sets the equivalence class of @k to be the child's equivalence class, if
364  * it's part of such a linear chain: this correctly sets equivalence classes on
365  * startup if we run leaf to root (i.e. in natural key order).
366  */
367 static int bch2_snapshot_set_equiv(struct btree_trans *trans, struct bkey_s_c k)
368 {
369 	struct bch_fs *c = trans->c;
370 	unsigned i, nr_live = 0, live_idx = 0;
371 	struct bkey_s_c_snapshot snap;
372 	u32 id = k.k->p.offset, child[2];
373 
374 	if (k.k->type != KEY_TYPE_snapshot)
375 		return 0;
376 
377 	snap = bkey_s_c_to_snapshot(k);
378 
379 	child[0] = le32_to_cpu(snap.v->children[0]);
380 	child[1] = le32_to_cpu(snap.v->children[1]);
381 
382 	for (i = 0; i < 2; i++) {
383 		int ret = bch2_snapshot_live(trans, child[i]);
384 
385 		if (ret < 0)
386 			return ret;
387 
388 		if (ret)
389 			live_idx = i;
390 		nr_live += ret;
391 	}
392 
393 	mutex_lock(&c->snapshot_table_lock);
394 
395 	snapshot_t_mut(c, id)->equiv = nr_live == 1
396 		? snapshot_t_mut(c, child[live_idx])->equiv
397 		: id;
398 
399 	mutex_unlock(&c->snapshot_table_lock);
400 
401 	return 0;
402 }
403 
404 /* fsck: */
405 
406 static u32 bch2_snapshot_child(struct bch_fs *c, u32 id, unsigned child)
407 {
408 	return snapshot_t(c, id)->children[child];
409 }
410 
411 static u32 bch2_snapshot_left_child(struct bch_fs *c, u32 id)
412 {
413 	return bch2_snapshot_child(c, id, 0);
414 }
415 
416 static u32 bch2_snapshot_right_child(struct bch_fs *c, u32 id)
417 {
418 	return bch2_snapshot_child(c, id, 1);
419 }
420 
421 static u32 bch2_snapshot_tree_next(struct bch_fs *c, u32 id)
422 {
423 	u32 n, parent;
424 
425 	n = bch2_snapshot_left_child(c, id);
426 	if (n)
427 		return n;
428 
429 	while ((parent = bch2_snapshot_parent(c, id))) {
430 		n = bch2_snapshot_right_child(c, parent);
431 		if (n && n != id)
432 			return n;
433 		id = parent;
434 	}
435 
436 	return 0;
437 }
438 
439 static u32 bch2_snapshot_tree_oldest_subvol(struct bch_fs *c, u32 snapshot_root)
440 {
441 	u32 id = snapshot_root;
442 	u32 subvol = 0, s;
443 
444 	while (id) {
445 		s = snapshot_t(c, id)->subvol;
446 
447 		if (s && (!subvol || s < subvol))
448 			subvol = s;
449 
450 		id = bch2_snapshot_tree_next(c, id);
451 	}
452 
453 	return subvol;
454 }
455 
456 static int bch2_snapshot_tree_master_subvol(struct btree_trans *trans,
457 					    u32 snapshot_root, u32 *subvol_id)
458 {
459 	struct bch_fs *c = trans->c;
460 	struct btree_iter iter;
461 	struct bkey_s_c k;
462 	struct bkey_s_c_subvolume s;
463 	bool found = false;
464 	int ret;
465 
466 	for_each_btree_key_norestart(trans, iter, BTREE_ID_subvolumes, POS_MIN,
467 				     0, k, ret) {
468 		if (k.k->type != KEY_TYPE_subvolume)
469 			continue;
470 
471 		s = bkey_s_c_to_subvolume(k);
472 		if (!bch2_snapshot_is_ancestor(c, le32_to_cpu(s.v->snapshot), snapshot_root))
473 			continue;
474 		if (!BCH_SUBVOLUME_SNAP(s.v)) {
475 			*subvol_id = s.k->p.offset;
476 			found = true;
477 			break;
478 		}
479 	}
480 
481 	bch2_trans_iter_exit(trans, &iter);
482 
483 	if (!ret && !found) {
484 		struct bkey_i_subvolume *u;
485 
486 		*subvol_id = bch2_snapshot_tree_oldest_subvol(c, snapshot_root);
487 
488 		u = bch2_bkey_get_mut_typed(trans, &iter,
489 					    BTREE_ID_subvolumes, POS(0, *subvol_id),
490 					    0, subvolume);
491 		ret = PTR_ERR_OR_ZERO(u);
492 		if (ret)
493 			return ret;
494 
495 		SET_BCH_SUBVOLUME_SNAP(&u->v, false);
496 	}
497 
498 	return ret;
499 }
500 
501 static int check_snapshot_tree(struct btree_trans *trans,
502 			       struct btree_iter *iter,
503 			       struct bkey_s_c k)
504 {
505 	struct bch_fs *c = trans->c;
506 	struct bkey_s_c_snapshot_tree st;
507 	struct bch_snapshot s;
508 	struct bch_subvolume subvol;
509 	struct printbuf buf = PRINTBUF;
510 	u32 root_id;
511 	int ret;
512 
513 	if (k.k->type != KEY_TYPE_snapshot_tree)
514 		return 0;
515 
516 	st = bkey_s_c_to_snapshot_tree(k);
517 	root_id = le32_to_cpu(st.v->root_snapshot);
518 
519 	ret = bch2_snapshot_lookup(trans, root_id, &s);
520 	if (ret && !bch2_err_matches(ret, ENOENT))
521 		goto err;
522 
523 	if (fsck_err_on(ret ||
524 			root_id != bch2_snapshot_root(c, root_id) ||
525 			st.k->p.offset != le32_to_cpu(s.tree),
526 			c, snapshot_tree_to_missing_snapshot,
527 			"snapshot tree points to missing/incorrect snapshot:\n  %s",
528 			(bch2_bkey_val_to_text(&buf, c, st.s_c), buf.buf))) {
529 		ret = bch2_btree_delete_at(trans, iter, 0);
530 		goto err;
531 	}
532 
533 	ret = bch2_subvolume_get(trans, le32_to_cpu(st.v->master_subvol),
534 				 false, 0, &subvol);
535 	if (ret && !bch2_err_matches(ret, ENOENT))
536 		goto err;
537 
538 	if (fsck_err_on(ret,
539 			c, snapshot_tree_to_missing_subvol,
540 			"snapshot tree points to missing subvolume:\n  %s",
541 			(printbuf_reset(&buf),
542 			 bch2_bkey_val_to_text(&buf, c, st.s_c), buf.buf)) ||
543 	    fsck_err_on(!bch2_snapshot_is_ancestor_early(c,
544 						le32_to_cpu(subvol.snapshot),
545 						root_id),
546 			c, snapshot_tree_to_wrong_subvol,
547 			"snapshot tree points to subvolume that does not point to snapshot in this tree:\n  %s",
548 			(printbuf_reset(&buf),
549 			 bch2_bkey_val_to_text(&buf, c, st.s_c), buf.buf)) ||
550 	    fsck_err_on(BCH_SUBVOLUME_SNAP(&subvol),
551 			c, snapshot_tree_to_snapshot_subvol,
552 			"snapshot tree points to snapshot subvolume:\n  %s",
553 			(printbuf_reset(&buf),
554 			 bch2_bkey_val_to_text(&buf, c, st.s_c), buf.buf))) {
555 		struct bkey_i_snapshot_tree *u;
556 		u32 subvol_id;
557 
558 		ret = bch2_snapshot_tree_master_subvol(trans, root_id, &subvol_id);
559 		if (ret)
560 			goto err;
561 
562 		u = bch2_bkey_make_mut_typed(trans, iter, &k, 0, snapshot_tree);
563 		ret = PTR_ERR_OR_ZERO(u);
564 		if (ret)
565 			goto err;
566 
567 		u->v.master_subvol = cpu_to_le32(subvol_id);
568 		st = snapshot_tree_i_to_s_c(u);
569 	}
570 err:
571 fsck_err:
572 	printbuf_exit(&buf);
573 	return ret;
574 }
575 
576 /*
577  * For each snapshot_tree, make sure it points to the root of a snapshot tree
578  * and that snapshot entry points back to it, or delete it.
579  *
580  * And, make sure it points to a subvolume within that snapshot tree, or correct
581  * it to point to the oldest subvolume within that snapshot tree.
582  */
583 int bch2_check_snapshot_trees(struct bch_fs *c)
584 {
585 	struct btree_iter iter;
586 	struct bkey_s_c k;
587 	int ret;
588 
589 	ret = bch2_trans_run(c,
590 		for_each_btree_key_commit(trans, iter,
591 			BTREE_ID_snapshot_trees, POS_MIN,
592 			BTREE_ITER_PREFETCH, k,
593 			NULL, NULL, BTREE_INSERT_LAZY_RW|BTREE_INSERT_NOFAIL,
594 		check_snapshot_tree(trans, &iter, k)));
595 
596 	if (ret)
597 		bch_err(c, "error %i checking snapshot trees", ret);
598 	return ret;
599 }
600 
601 /*
602  * Look up snapshot tree for @tree_id and find root,
603  * make sure @snap_id is a descendent:
604  */
605 static int snapshot_tree_ptr_good(struct btree_trans *trans,
606 				  u32 snap_id, u32 tree_id)
607 {
608 	struct bch_snapshot_tree s_t;
609 	int ret = bch2_snapshot_tree_lookup(trans, tree_id, &s_t);
610 
611 	if (bch2_err_matches(ret, ENOENT))
612 		return 0;
613 	if (ret)
614 		return ret;
615 
616 	return bch2_snapshot_is_ancestor_early(trans->c, snap_id, le32_to_cpu(s_t.root_snapshot));
617 }
618 
619 u32 bch2_snapshot_skiplist_get(struct bch_fs *c, u32 id)
620 {
621 	const struct snapshot_t *s;
622 
623 	if (!id)
624 		return 0;
625 
626 	rcu_read_lock();
627 	s = snapshot_t(c, id);
628 	if (s->parent)
629 		id = bch2_snapshot_nth_parent(c, id, get_random_u32_below(s->depth));
630 	rcu_read_unlock();
631 
632 	return id;
633 }
634 
635 static int snapshot_skiplist_good(struct btree_trans *trans, u32 id, struct bch_snapshot s)
636 {
637 	unsigned i;
638 
639 	for (i = 0; i < 3; i++)
640 		if (!s.parent) {
641 			if (s.skip[i])
642 				return false;
643 		} else {
644 			if (!bch2_snapshot_is_ancestor_early(trans->c, id, le32_to_cpu(s.skip[i])))
645 				return false;
646 		}
647 
648 	return true;
649 }
650 
651 /*
652  * snapshot_tree pointer was incorrect: look up root snapshot node, make sure
653  * its snapshot_tree pointer is correct (allocate new one if necessary), then
654  * update this node's pointer to root node's pointer:
655  */
656 static int snapshot_tree_ptr_repair(struct btree_trans *trans,
657 				    struct btree_iter *iter,
658 				    struct bkey_s_c k,
659 				    struct bch_snapshot *s)
660 {
661 	struct bch_fs *c = trans->c;
662 	struct btree_iter root_iter;
663 	struct bch_snapshot_tree s_t;
664 	struct bkey_s_c_snapshot root;
665 	struct bkey_i_snapshot *u;
666 	u32 root_id = bch2_snapshot_root(c, k.k->p.offset), tree_id;
667 	int ret;
668 
669 	root = bch2_bkey_get_iter_typed(trans, &root_iter,
670 			       BTREE_ID_snapshots, POS(0, root_id),
671 			       BTREE_ITER_WITH_UPDATES, snapshot);
672 	ret = bkey_err(root);
673 	if (ret)
674 		goto err;
675 
676 	tree_id = le32_to_cpu(root.v->tree);
677 
678 	ret = bch2_snapshot_tree_lookup(trans, tree_id, &s_t);
679 	if (ret && !bch2_err_matches(ret, ENOENT))
680 		return ret;
681 
682 	if (ret || le32_to_cpu(s_t.root_snapshot) != root_id) {
683 		u = bch2_bkey_make_mut_typed(trans, &root_iter, &root.s_c, 0, snapshot);
684 		ret =   PTR_ERR_OR_ZERO(u) ?:
685 			bch2_snapshot_tree_create(trans, root_id,
686 				bch2_snapshot_tree_oldest_subvol(c, root_id),
687 				&tree_id);
688 		if (ret)
689 			goto err;
690 
691 		u->v.tree = cpu_to_le32(tree_id);
692 		if (k.k->p.offset == root_id)
693 			*s = u->v;
694 	}
695 
696 	if (k.k->p.offset != root_id) {
697 		u = bch2_bkey_make_mut_typed(trans, iter, &k, 0, snapshot);
698 		ret = PTR_ERR_OR_ZERO(u);
699 		if (ret)
700 			goto err;
701 
702 		u->v.tree = cpu_to_le32(tree_id);
703 		*s = u->v;
704 	}
705 err:
706 	bch2_trans_iter_exit(trans, &root_iter);
707 	return ret;
708 }
709 
710 static int check_snapshot(struct btree_trans *trans,
711 			  struct btree_iter *iter,
712 			  struct bkey_s_c k)
713 {
714 	struct bch_fs *c = trans->c;
715 	struct bch_snapshot s;
716 	struct bch_subvolume subvol;
717 	struct bch_snapshot v;
718 	struct bkey_i_snapshot *u;
719 	u32 parent_id = bch2_snapshot_parent_early(c, k.k->p.offset);
720 	u32 real_depth;
721 	struct printbuf buf = PRINTBUF;
722 	bool should_have_subvol;
723 	u32 i, id;
724 	int ret = 0;
725 
726 	if (k.k->type != KEY_TYPE_snapshot)
727 		return 0;
728 
729 	memset(&s, 0, sizeof(s));
730 	memcpy(&s, k.v, bkey_val_bytes(k.k));
731 
732 	id = le32_to_cpu(s.parent);
733 	if (id) {
734 		ret = bch2_snapshot_lookup(trans, id, &v);
735 		if (bch2_err_matches(ret, ENOENT))
736 			bch_err(c, "snapshot with nonexistent parent:\n  %s",
737 				(bch2_bkey_val_to_text(&buf, c, k), buf.buf));
738 		if (ret)
739 			goto err;
740 
741 		if (le32_to_cpu(v.children[0]) != k.k->p.offset &&
742 		    le32_to_cpu(v.children[1]) != k.k->p.offset) {
743 			bch_err(c, "snapshot parent %u missing pointer to child %llu",
744 				id, k.k->p.offset);
745 			ret = -EINVAL;
746 			goto err;
747 		}
748 	}
749 
750 	for (i = 0; i < 2 && s.children[i]; i++) {
751 		id = le32_to_cpu(s.children[i]);
752 
753 		ret = bch2_snapshot_lookup(trans, id, &v);
754 		if (bch2_err_matches(ret, ENOENT))
755 			bch_err(c, "snapshot node %llu has nonexistent child %u",
756 				k.k->p.offset, id);
757 		if (ret)
758 			goto err;
759 
760 		if (le32_to_cpu(v.parent) != k.k->p.offset) {
761 			bch_err(c, "snapshot child %u has wrong parent (got %u should be %llu)",
762 				id, le32_to_cpu(v.parent), k.k->p.offset);
763 			ret = -EINVAL;
764 			goto err;
765 		}
766 	}
767 
768 	should_have_subvol = BCH_SNAPSHOT_SUBVOL(&s) &&
769 		!BCH_SNAPSHOT_DELETED(&s);
770 
771 	if (should_have_subvol) {
772 		id = le32_to_cpu(s.subvol);
773 		ret = bch2_subvolume_get(trans, id, 0, false, &subvol);
774 		if (bch2_err_matches(ret, ENOENT))
775 			bch_err(c, "snapshot points to nonexistent subvolume:\n  %s",
776 				(bch2_bkey_val_to_text(&buf, c, k), buf.buf));
777 		if (ret)
778 			goto err;
779 
780 		if (BCH_SNAPSHOT_SUBVOL(&s) != (le32_to_cpu(subvol.snapshot) == k.k->p.offset)) {
781 			bch_err(c, "snapshot node %llu has wrong BCH_SNAPSHOT_SUBVOL",
782 				k.k->p.offset);
783 			ret = -EINVAL;
784 			goto err;
785 		}
786 	} else {
787 		if (fsck_err_on(s.subvol,
788 				c, snapshot_should_not_have_subvol,
789 				"snapshot should not point to subvol:\n  %s",
790 				(bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
791 			u = bch2_bkey_make_mut_typed(trans, iter, &k, 0, snapshot);
792 			ret = PTR_ERR_OR_ZERO(u);
793 			if (ret)
794 				goto err;
795 
796 			u->v.subvol = 0;
797 			s = u->v;
798 		}
799 	}
800 
801 	ret = snapshot_tree_ptr_good(trans, k.k->p.offset, le32_to_cpu(s.tree));
802 	if (ret < 0)
803 		goto err;
804 
805 	if (fsck_err_on(!ret, c, snapshot_to_bad_snapshot_tree,
806 			"snapshot points to missing/incorrect tree:\n  %s",
807 			(bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
808 		ret = snapshot_tree_ptr_repair(trans, iter, k, &s);
809 		if (ret)
810 			goto err;
811 	}
812 	ret = 0;
813 
814 	real_depth = bch2_snapshot_depth(c, parent_id);
815 
816 	if (le32_to_cpu(s.depth) != real_depth &&
817 	    (c->sb.version_upgrade_complete < bcachefs_metadata_version_snapshot_skiplists ||
818 	     fsck_err(c, snapshot_bad_depth,
819 		      "snapshot with incorrect depth field, should be %u:\n  %s",
820 		      real_depth, (bch2_bkey_val_to_text(&buf, c, k), buf.buf)))) {
821 		u = bch2_bkey_make_mut_typed(trans, iter, &k, 0, snapshot);
822 		ret = PTR_ERR_OR_ZERO(u);
823 		if (ret)
824 			goto err;
825 
826 		u->v.depth = cpu_to_le32(real_depth);
827 		s = u->v;
828 	}
829 
830 	ret = snapshot_skiplist_good(trans, k.k->p.offset, s);
831 	if (ret < 0)
832 		goto err;
833 
834 	if (!ret &&
835 	    (c->sb.version_upgrade_complete < bcachefs_metadata_version_snapshot_skiplists ||
836 	     fsck_err(c, snapshot_bad_skiplist,
837 		      "snapshot with bad skiplist field:\n  %s",
838 		      (bch2_bkey_val_to_text(&buf, c, k), buf.buf)))) {
839 		u = bch2_bkey_make_mut_typed(trans, iter, &k, 0, snapshot);
840 		ret = PTR_ERR_OR_ZERO(u);
841 		if (ret)
842 			goto err;
843 
844 		for (i = 0; i < ARRAY_SIZE(u->v.skip); i++)
845 			u->v.skip[i] = cpu_to_le32(bch2_snapshot_skiplist_get(c, parent_id));
846 
847 		bubble_sort(u->v.skip, ARRAY_SIZE(u->v.skip), cmp_le32);
848 		s = u->v;
849 	}
850 	ret = 0;
851 err:
852 fsck_err:
853 	printbuf_exit(&buf);
854 	return ret;
855 }
856 
857 int bch2_check_snapshots(struct bch_fs *c)
858 {
859 	struct btree_iter iter;
860 	struct bkey_s_c k;
861 	int ret;
862 
863 	/*
864 	 * We iterate backwards as checking/fixing the depth field requires that
865 	 * the parent's depth already be correct:
866 	 */
867 	ret = bch2_trans_run(c,
868 		for_each_btree_key_reverse_commit(trans, iter,
869 			BTREE_ID_snapshots, POS_MAX,
870 			BTREE_ITER_PREFETCH, k,
871 			NULL, NULL, BTREE_INSERT_LAZY_RW|BTREE_INSERT_NOFAIL,
872 		check_snapshot(trans, &iter, k)));
873 	if (ret)
874 		bch_err_fn(c, ret);
875 	return ret;
876 }
877 
878 /*
879  * Mark a snapshot as deleted, for future cleanup:
880  */
881 int bch2_snapshot_node_set_deleted(struct btree_trans *trans, u32 id)
882 {
883 	struct btree_iter iter;
884 	struct bkey_i_snapshot *s;
885 	int ret = 0;
886 
887 	s = bch2_bkey_get_mut_typed(trans, &iter,
888 				    BTREE_ID_snapshots, POS(0, id),
889 				    0, snapshot);
890 	ret = PTR_ERR_OR_ZERO(s);
891 	if (unlikely(ret)) {
892 		bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT),
893 					trans->c, "missing snapshot %u", id);
894 		return ret;
895 	}
896 
897 	/* already deleted? */
898 	if (BCH_SNAPSHOT_DELETED(&s->v))
899 		goto err;
900 
901 	SET_BCH_SNAPSHOT_DELETED(&s->v, true);
902 	SET_BCH_SNAPSHOT_SUBVOL(&s->v, false);
903 	s->v.subvol = 0;
904 err:
905 	bch2_trans_iter_exit(trans, &iter);
906 	return ret;
907 }
908 
909 static inline void normalize_snapshot_child_pointers(struct bch_snapshot *s)
910 {
911 	if (le32_to_cpu(s->children[0]) < le32_to_cpu(s->children[1]))
912 		swap(s->children[0], s->children[1]);
913 }
914 
915 static int bch2_snapshot_node_delete(struct btree_trans *trans, u32 id)
916 {
917 	struct bch_fs *c = trans->c;
918 	struct btree_iter iter, p_iter = (struct btree_iter) { NULL };
919 	struct btree_iter c_iter = (struct btree_iter) { NULL };
920 	struct btree_iter tree_iter = (struct btree_iter) { NULL };
921 	struct bkey_s_c_snapshot s;
922 	u32 parent_id, child_id;
923 	unsigned i;
924 	int ret = 0;
925 
926 	s = bch2_bkey_get_iter_typed(trans, &iter, BTREE_ID_snapshots, POS(0, id),
927 				     BTREE_ITER_INTENT, snapshot);
928 	ret = bkey_err(s);
929 	bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT), c,
930 				"missing snapshot %u", id);
931 
932 	if (ret)
933 		goto err;
934 
935 	BUG_ON(s.v->children[1]);
936 
937 	parent_id = le32_to_cpu(s.v->parent);
938 	child_id = le32_to_cpu(s.v->children[0]);
939 
940 	if (parent_id) {
941 		struct bkey_i_snapshot *parent;
942 
943 		parent = bch2_bkey_get_mut_typed(trans, &p_iter,
944 				     BTREE_ID_snapshots, POS(0, parent_id),
945 				     0, snapshot);
946 		ret = PTR_ERR_OR_ZERO(parent);
947 		bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT), c,
948 					"missing snapshot %u", parent_id);
949 		if (unlikely(ret))
950 			goto err;
951 
952 		/* find entry in parent->children for node being deleted */
953 		for (i = 0; i < 2; i++)
954 			if (le32_to_cpu(parent->v.children[i]) == id)
955 				break;
956 
957 		if (bch2_fs_inconsistent_on(i == 2, c,
958 					"snapshot %u missing child pointer to %u",
959 					parent_id, id))
960 			goto err;
961 
962 		parent->v.children[i] = le32_to_cpu(child_id);
963 
964 		normalize_snapshot_child_pointers(&parent->v);
965 	}
966 
967 	if (child_id) {
968 		struct bkey_i_snapshot *child;
969 
970 		child = bch2_bkey_get_mut_typed(trans, &c_iter,
971 				     BTREE_ID_snapshots, POS(0, child_id),
972 				     0, snapshot);
973 		ret = PTR_ERR_OR_ZERO(child);
974 		bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT), c,
975 					"missing snapshot %u", child_id);
976 		if (unlikely(ret))
977 			goto err;
978 
979 		child->v.parent = cpu_to_le32(parent_id);
980 
981 		if (!child->v.parent) {
982 			child->v.skip[0] = 0;
983 			child->v.skip[1] = 0;
984 			child->v.skip[2] = 0;
985 		}
986 	}
987 
988 	if (!parent_id) {
989 		/*
990 		 * We're deleting the root of a snapshot tree: update the
991 		 * snapshot_tree entry to point to the new root, or delete it if
992 		 * this is the last snapshot ID in this tree:
993 		 */
994 		struct bkey_i_snapshot_tree *s_t;
995 
996 		BUG_ON(s.v->children[1]);
997 
998 		s_t = bch2_bkey_get_mut_typed(trans, &tree_iter,
999 				BTREE_ID_snapshot_trees, POS(0, le32_to_cpu(s.v->tree)),
1000 				0, snapshot_tree);
1001 		ret = PTR_ERR_OR_ZERO(s_t);
1002 		if (ret)
1003 			goto err;
1004 
1005 		if (s.v->children[0]) {
1006 			s_t->v.root_snapshot = s.v->children[0];
1007 		} else {
1008 			s_t->k.type = KEY_TYPE_deleted;
1009 			set_bkey_val_u64s(&s_t->k, 0);
1010 		}
1011 	}
1012 
1013 	ret = bch2_btree_delete_at(trans, &iter, 0);
1014 err:
1015 	bch2_trans_iter_exit(trans, &tree_iter);
1016 	bch2_trans_iter_exit(trans, &p_iter);
1017 	bch2_trans_iter_exit(trans, &c_iter);
1018 	bch2_trans_iter_exit(trans, &iter);
1019 	return ret;
1020 }
1021 
1022 static int create_snapids(struct btree_trans *trans, u32 parent, u32 tree,
1023 			  u32 *new_snapids,
1024 			  u32 *snapshot_subvols,
1025 			  unsigned nr_snapids)
1026 {
1027 	struct bch_fs *c = trans->c;
1028 	struct btree_iter iter;
1029 	struct bkey_i_snapshot *n;
1030 	struct bkey_s_c k;
1031 	unsigned i, j;
1032 	u32 depth = bch2_snapshot_depth(c, parent);
1033 	int ret;
1034 
1035 	bch2_trans_iter_init(trans, &iter, BTREE_ID_snapshots,
1036 			     POS_MIN, BTREE_ITER_INTENT);
1037 	k = bch2_btree_iter_peek(&iter);
1038 	ret = bkey_err(k);
1039 	if (ret)
1040 		goto err;
1041 
1042 	for (i = 0; i < nr_snapids; i++) {
1043 		k = bch2_btree_iter_prev_slot(&iter);
1044 		ret = bkey_err(k);
1045 		if (ret)
1046 			goto err;
1047 
1048 		if (!k.k || !k.k->p.offset) {
1049 			ret = -BCH_ERR_ENOSPC_snapshot_create;
1050 			goto err;
1051 		}
1052 
1053 		n = bch2_bkey_alloc(trans, &iter, 0, snapshot);
1054 		ret = PTR_ERR_OR_ZERO(n);
1055 		if (ret)
1056 			goto err;
1057 
1058 		n->v.flags	= 0;
1059 		n->v.parent	= cpu_to_le32(parent);
1060 		n->v.subvol	= cpu_to_le32(snapshot_subvols[i]);
1061 		n->v.tree	= cpu_to_le32(tree);
1062 		n->v.depth	= cpu_to_le32(depth);
1063 
1064 		for (j = 0; j < ARRAY_SIZE(n->v.skip); j++)
1065 			n->v.skip[j] = cpu_to_le32(bch2_snapshot_skiplist_get(c, parent));
1066 
1067 		bubble_sort(n->v.skip, ARRAY_SIZE(n->v.skip), cmp_le32);
1068 		SET_BCH_SNAPSHOT_SUBVOL(&n->v, true);
1069 
1070 		ret = bch2_mark_snapshot(trans, BTREE_ID_snapshots, 0,
1071 					 bkey_s_c_null, bkey_i_to_s_c(&n->k_i), 0);
1072 		if (ret)
1073 			goto err;
1074 
1075 		new_snapids[i]	= iter.pos.offset;
1076 
1077 		mutex_lock(&c->snapshot_table_lock);
1078 		snapshot_t_mut(c, new_snapids[i])->equiv = new_snapids[i];
1079 		mutex_unlock(&c->snapshot_table_lock);
1080 	}
1081 err:
1082 	bch2_trans_iter_exit(trans, &iter);
1083 	return ret;
1084 }
1085 
1086 /*
1087  * Create new snapshot IDs as children of an existing snapshot ID:
1088  */
1089 static int bch2_snapshot_node_create_children(struct btree_trans *trans, u32 parent,
1090 			      u32 *new_snapids,
1091 			      u32 *snapshot_subvols,
1092 			      unsigned nr_snapids)
1093 {
1094 	struct btree_iter iter;
1095 	struct bkey_i_snapshot *n_parent;
1096 	int ret = 0;
1097 
1098 	n_parent = bch2_bkey_get_mut_typed(trans, &iter,
1099 			BTREE_ID_snapshots, POS(0, parent),
1100 			0, snapshot);
1101 	ret = PTR_ERR_OR_ZERO(n_parent);
1102 	if (unlikely(ret)) {
1103 		if (bch2_err_matches(ret, ENOENT))
1104 			bch_err(trans->c, "snapshot %u not found", parent);
1105 		return ret;
1106 	}
1107 
1108 	if (n_parent->v.children[0] || n_parent->v.children[1]) {
1109 		bch_err(trans->c, "Trying to add child snapshot nodes to parent that already has children");
1110 		ret = -EINVAL;
1111 		goto err;
1112 	}
1113 
1114 	ret = create_snapids(trans, parent, le32_to_cpu(n_parent->v.tree),
1115 			     new_snapids, snapshot_subvols, nr_snapids);
1116 	if (ret)
1117 		goto err;
1118 
1119 	n_parent->v.children[0] = cpu_to_le32(new_snapids[0]);
1120 	n_parent->v.children[1] = cpu_to_le32(new_snapids[1]);
1121 	n_parent->v.subvol = 0;
1122 	SET_BCH_SNAPSHOT_SUBVOL(&n_parent->v, false);
1123 err:
1124 	bch2_trans_iter_exit(trans, &iter);
1125 	return ret;
1126 }
1127 
1128 /*
1129  * Create a snapshot node that is the root of a new tree:
1130  */
1131 static int bch2_snapshot_node_create_tree(struct btree_trans *trans,
1132 			      u32 *new_snapids,
1133 			      u32 *snapshot_subvols,
1134 			      unsigned nr_snapids)
1135 {
1136 	struct bkey_i_snapshot_tree *n_tree;
1137 	int ret;
1138 
1139 	n_tree = __bch2_snapshot_tree_create(trans);
1140 	ret =   PTR_ERR_OR_ZERO(n_tree) ?:
1141 		create_snapids(trans, 0, n_tree->k.p.offset,
1142 			     new_snapids, snapshot_subvols, nr_snapids);
1143 	if (ret)
1144 		return ret;
1145 
1146 	n_tree->v.master_subvol	= cpu_to_le32(snapshot_subvols[0]);
1147 	n_tree->v.root_snapshot	= cpu_to_le32(new_snapids[0]);
1148 	return 0;
1149 }
1150 
1151 int bch2_snapshot_node_create(struct btree_trans *trans, u32 parent,
1152 			      u32 *new_snapids,
1153 			      u32 *snapshot_subvols,
1154 			      unsigned nr_snapids)
1155 {
1156 	BUG_ON((parent == 0) != (nr_snapids == 1));
1157 	BUG_ON((parent != 0) != (nr_snapids == 2));
1158 
1159 	return parent
1160 		? bch2_snapshot_node_create_children(trans, parent,
1161 				new_snapids, snapshot_subvols, nr_snapids)
1162 		: bch2_snapshot_node_create_tree(trans,
1163 				new_snapids, snapshot_subvols, nr_snapids);
1164 
1165 }
1166 
1167 /*
1168  * If we have an unlinked inode in an internal snapshot node, and the inode
1169  * really has been deleted in all child snapshots, how does this get cleaned up?
1170  *
1171  * first there is the problem of how keys that have been overwritten in all
1172  * child snapshots get deleted (unimplemented?), but inodes may perhaps be
1173  * special?
1174  *
1175  * also: unlinked inode in internal snapshot appears to not be getting deleted
1176  * correctly if inode doesn't exist in leaf snapshots
1177  *
1178  * solution:
1179  *
1180  * for a key in an interior snapshot node that needs work to be done that
1181  * requires it to be mutated: iterate over all descendent leaf nodes and copy
1182  * that key to snapshot leaf nodes, where we can mutate it
1183  */
1184 
1185 static int snapshot_delete_key(struct btree_trans *trans,
1186 			       struct btree_iter *iter,
1187 			       struct bkey_s_c k,
1188 			       snapshot_id_list *deleted,
1189 			       snapshot_id_list *equiv_seen,
1190 			       struct bpos *last_pos)
1191 {
1192 	struct bch_fs *c = trans->c;
1193 	u32 equiv = bch2_snapshot_equiv(c, k.k->p.snapshot);
1194 
1195 	if (!bkey_eq(k.k->p, *last_pos))
1196 		equiv_seen->nr = 0;
1197 	*last_pos = k.k->p;
1198 
1199 	if (snapshot_list_has_id(deleted, k.k->p.snapshot) ||
1200 	    snapshot_list_has_id(equiv_seen, equiv)) {
1201 		return bch2_btree_delete_at(trans, iter,
1202 					    BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
1203 	} else {
1204 		return snapshot_list_add(c, equiv_seen, equiv);
1205 	}
1206 }
1207 
1208 static int move_key_to_correct_snapshot(struct btree_trans *trans,
1209 			       struct btree_iter *iter,
1210 			       struct bkey_s_c k)
1211 {
1212 	struct bch_fs *c = trans->c;
1213 	u32 equiv = bch2_snapshot_equiv(c, k.k->p.snapshot);
1214 
1215 	/*
1216 	 * When we have a linear chain of snapshot nodes, we consider
1217 	 * those to form an equivalence class: we're going to collapse
1218 	 * them all down to a single node, and keep the leaf-most node -
1219 	 * which has the same id as the equivalence class id.
1220 	 *
1221 	 * If there are multiple keys in different snapshots at the same
1222 	 * position, we're only going to keep the one in the newest
1223 	 * snapshot - the rest have been overwritten and are redundant,
1224 	 * and for the key we're going to keep we need to move it to the
1225 	 * equivalance class ID if it's not there already.
1226 	 */
1227 	if (equiv != k.k->p.snapshot) {
1228 		struct bkey_i *new = bch2_bkey_make_mut_noupdate(trans, k);
1229 		struct btree_iter new_iter;
1230 		int ret;
1231 
1232 		ret = PTR_ERR_OR_ZERO(new);
1233 		if (ret)
1234 			return ret;
1235 
1236 		new->k.p.snapshot = equiv;
1237 
1238 		bch2_trans_iter_init(trans, &new_iter, iter->btree_id, new->k.p,
1239 				     BTREE_ITER_ALL_SNAPSHOTS|
1240 				     BTREE_ITER_CACHED|
1241 				     BTREE_ITER_INTENT);
1242 
1243 		ret =   bch2_btree_iter_traverse(&new_iter) ?:
1244 			bch2_trans_update(trans, &new_iter, new,
1245 					BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE) ?:
1246 			bch2_btree_delete_at(trans, iter,
1247 					BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
1248 		bch2_trans_iter_exit(trans, &new_iter);
1249 		if (ret)
1250 			return ret;
1251 	}
1252 
1253 	return 0;
1254 }
1255 
1256 static int bch2_snapshot_needs_delete(struct btree_trans *trans, struct bkey_s_c k)
1257 {
1258 	struct bkey_s_c_snapshot snap;
1259 	u32 children[2];
1260 	int ret;
1261 
1262 	if (k.k->type != KEY_TYPE_snapshot)
1263 		return 0;
1264 
1265 	snap = bkey_s_c_to_snapshot(k);
1266 	if (BCH_SNAPSHOT_DELETED(snap.v) ||
1267 	    BCH_SNAPSHOT_SUBVOL(snap.v))
1268 		return 0;
1269 
1270 	children[0] = le32_to_cpu(snap.v->children[0]);
1271 	children[1] = le32_to_cpu(snap.v->children[1]);
1272 
1273 	ret   = bch2_snapshot_live(trans, children[0]) ?:
1274 		bch2_snapshot_live(trans, children[1]);
1275 	if (ret < 0)
1276 		return ret;
1277 	return !ret;
1278 }
1279 
1280 /*
1281  * For a given snapshot, if it doesn't have a subvolume that points to it, and
1282  * it doesn't have child snapshot nodes - it's now redundant and we can mark it
1283  * as deleted.
1284  */
1285 static int bch2_delete_redundant_snapshot(struct btree_trans *trans, struct bkey_s_c k)
1286 {
1287 	int ret = bch2_snapshot_needs_delete(trans, k);
1288 
1289 	return ret <= 0
1290 		? ret
1291 		: bch2_snapshot_node_set_deleted(trans, k.k->p.offset);
1292 }
1293 
1294 static inline u32 bch2_snapshot_nth_parent_skip(struct bch_fs *c, u32 id, u32 n,
1295 						snapshot_id_list *skip)
1296 {
1297 	rcu_read_lock();
1298 	while (snapshot_list_has_id(skip, id))
1299 		id = __bch2_snapshot_parent(c, id);
1300 
1301 	while (n--) {
1302 		do {
1303 			id = __bch2_snapshot_parent(c, id);
1304 		} while (snapshot_list_has_id(skip, id));
1305 	}
1306 	rcu_read_unlock();
1307 
1308 	return id;
1309 }
1310 
1311 static int bch2_fix_child_of_deleted_snapshot(struct btree_trans *trans,
1312 					      struct btree_iter *iter, struct bkey_s_c k,
1313 					      snapshot_id_list *deleted)
1314 {
1315 	struct bch_fs *c = trans->c;
1316 	u32 nr_deleted_ancestors = 0;
1317 	struct bkey_i_snapshot *s;
1318 	u32 *i;
1319 	int ret;
1320 
1321 	if (k.k->type != KEY_TYPE_snapshot)
1322 		return 0;
1323 
1324 	if (snapshot_list_has_id(deleted, k.k->p.offset))
1325 		return 0;
1326 
1327 	s = bch2_bkey_make_mut_noupdate_typed(trans, k, snapshot);
1328 	ret = PTR_ERR_OR_ZERO(s);
1329 	if (ret)
1330 		return ret;
1331 
1332 	darray_for_each(*deleted, i)
1333 		nr_deleted_ancestors += bch2_snapshot_is_ancestor(c, s->k.p.offset, *i);
1334 
1335 	if (!nr_deleted_ancestors)
1336 		return 0;
1337 
1338 	le32_add_cpu(&s->v.depth, -nr_deleted_ancestors);
1339 
1340 	if (!s->v.depth) {
1341 		s->v.skip[0] = 0;
1342 		s->v.skip[1] = 0;
1343 		s->v.skip[2] = 0;
1344 	} else {
1345 		u32 depth = le32_to_cpu(s->v.depth);
1346 		u32 parent = bch2_snapshot_parent(c, s->k.p.offset);
1347 
1348 		for (unsigned j = 0; j < ARRAY_SIZE(s->v.skip); j++) {
1349 			u32 id = le32_to_cpu(s->v.skip[j]);
1350 
1351 			if (snapshot_list_has_id(deleted, id)) {
1352 				id = bch2_snapshot_nth_parent_skip(c,
1353 							parent,
1354 							depth > 1
1355 							? get_random_u32_below(depth - 1)
1356 							: 0,
1357 							deleted);
1358 				s->v.skip[j] = cpu_to_le32(id);
1359 			}
1360 		}
1361 
1362 		bubble_sort(s->v.skip, ARRAY_SIZE(s->v.skip), cmp_le32);
1363 	}
1364 
1365 	return bch2_trans_update(trans, iter, &s->k_i, 0);
1366 }
1367 
1368 int bch2_delete_dead_snapshots(struct bch_fs *c)
1369 {
1370 	struct btree_trans *trans;
1371 	struct btree_iter iter;
1372 	struct bkey_s_c k;
1373 	struct bkey_s_c_snapshot snap;
1374 	snapshot_id_list deleted = { 0 };
1375 	snapshot_id_list deleted_interior = { 0 };
1376 	u32 *i, id;
1377 	int ret = 0;
1378 
1379 	if (!test_and_clear_bit(BCH_FS_NEED_DELETE_DEAD_SNAPSHOTS, &c->flags))
1380 		return 0;
1381 
1382 	if (!test_bit(BCH_FS_STARTED, &c->flags)) {
1383 		ret = bch2_fs_read_write_early(c);
1384 		if (ret) {
1385 			bch_err_msg(c, ret, "deleting dead snapshots: error going rw");
1386 			return ret;
1387 		}
1388 	}
1389 
1390 	trans = bch2_trans_get(c);
1391 
1392 	/*
1393 	 * For every snapshot node: If we have no live children and it's not
1394 	 * pointed to by a subvolume, delete it:
1395 	 */
1396 	ret = for_each_btree_key_commit(trans, iter, BTREE_ID_snapshots,
1397 			POS_MIN, 0, k,
1398 			NULL, NULL, 0,
1399 		bch2_delete_redundant_snapshot(trans, k));
1400 	if (ret) {
1401 		bch_err_msg(c, ret, "deleting redundant snapshots");
1402 		goto err;
1403 	}
1404 
1405 	ret = for_each_btree_key2(trans, iter, BTREE_ID_snapshots,
1406 				  POS_MIN, 0, k,
1407 		bch2_snapshot_set_equiv(trans, k));
1408 	if (ret) {
1409 		bch_err_msg(c, ret, "in bch2_snapshots_set_equiv");
1410 		goto err;
1411 	}
1412 
1413 	for_each_btree_key(trans, iter, BTREE_ID_snapshots,
1414 			   POS_MIN, 0, k, ret) {
1415 		if (k.k->type != KEY_TYPE_snapshot)
1416 			continue;
1417 
1418 		snap = bkey_s_c_to_snapshot(k);
1419 		if (BCH_SNAPSHOT_DELETED(snap.v)) {
1420 			ret = snapshot_list_add(c, &deleted, k.k->p.offset);
1421 			if (ret)
1422 				break;
1423 		}
1424 	}
1425 	bch2_trans_iter_exit(trans, &iter);
1426 
1427 	if (ret) {
1428 		bch_err_msg(c, ret, "walking snapshots");
1429 		goto err;
1430 	}
1431 
1432 	for (id = 0; id < BTREE_ID_NR; id++) {
1433 		struct bpos last_pos = POS_MIN;
1434 		snapshot_id_list equiv_seen = { 0 };
1435 		struct disk_reservation res = { 0 };
1436 
1437 		if (!btree_type_has_snapshots(id))
1438 			continue;
1439 
1440 		/*
1441 		 * deleted inodes btree is maintained by a trigger on the inodes
1442 		 * btree - no work for us to do here, and it's not safe to scan
1443 		 * it because we'll see out of date keys due to the btree write
1444 		 * buffer:
1445 		 */
1446 		if (id == BTREE_ID_deleted_inodes)
1447 			continue;
1448 
1449 		ret = for_each_btree_key_commit(trans, iter,
1450 				id, POS_MIN,
1451 				BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS, k,
1452 				&res, NULL, BTREE_INSERT_NOFAIL,
1453 			snapshot_delete_key(trans, &iter, k, &deleted, &equiv_seen, &last_pos)) ?:
1454 		      for_each_btree_key_commit(trans, iter,
1455 				id, POS_MIN,
1456 				BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS, k,
1457 				&res, NULL, BTREE_INSERT_NOFAIL,
1458 			move_key_to_correct_snapshot(trans, &iter, k));
1459 
1460 		bch2_disk_reservation_put(c, &res);
1461 		darray_exit(&equiv_seen);
1462 
1463 		if (ret) {
1464 			bch_err_msg(c, ret, "deleting keys from dying snapshots");
1465 			goto err;
1466 		}
1467 	}
1468 
1469 	bch2_trans_unlock(trans);
1470 	down_write(&c->snapshot_create_lock);
1471 
1472 	for_each_btree_key(trans, iter, BTREE_ID_snapshots,
1473 			   POS_MIN, 0, k, ret) {
1474 		u32 snapshot = k.k->p.offset;
1475 		u32 equiv = bch2_snapshot_equiv(c, snapshot);
1476 
1477 		if (equiv != snapshot)
1478 			snapshot_list_add(c, &deleted_interior, snapshot);
1479 	}
1480 	bch2_trans_iter_exit(trans, &iter);
1481 
1482 	if (ret)
1483 		goto err_create_lock;
1484 
1485 	/*
1486 	 * Fixing children of deleted snapshots can't be done completely
1487 	 * atomically, if we crash between here and when we delete the interior
1488 	 * nodes some depth fields will be off:
1489 	 */
1490 	ret = for_each_btree_key_commit(trans, iter, BTREE_ID_snapshots, POS_MIN,
1491 				  BTREE_ITER_INTENT, k,
1492 				  NULL, NULL, BTREE_INSERT_NOFAIL,
1493 		bch2_fix_child_of_deleted_snapshot(trans, &iter, k, &deleted_interior));
1494 	if (ret)
1495 		goto err_create_lock;
1496 
1497 	darray_for_each(deleted, i) {
1498 		ret = commit_do(trans, NULL, NULL, 0,
1499 			bch2_snapshot_node_delete(trans, *i));
1500 		if (ret) {
1501 			bch_err_msg(c, ret, "deleting snapshot %u", *i);
1502 			goto err_create_lock;
1503 		}
1504 	}
1505 
1506 	darray_for_each(deleted_interior, i) {
1507 		ret = commit_do(trans, NULL, NULL, 0,
1508 			bch2_snapshot_node_delete(trans, *i));
1509 		if (ret) {
1510 			bch_err_msg(c, ret, "deleting snapshot %u", *i);
1511 			goto err_create_lock;
1512 		}
1513 	}
1514 err_create_lock:
1515 	up_write(&c->snapshot_create_lock);
1516 err:
1517 	darray_exit(&deleted_interior);
1518 	darray_exit(&deleted);
1519 	bch2_trans_put(trans);
1520 	if (ret)
1521 		bch_err_fn(c, ret);
1522 	return ret;
1523 }
1524 
1525 void bch2_delete_dead_snapshots_work(struct work_struct *work)
1526 {
1527 	struct bch_fs *c = container_of(work, struct bch_fs, snapshot_delete_work);
1528 
1529 	bch2_delete_dead_snapshots(c);
1530 	bch2_write_ref_put(c, BCH_WRITE_REF_delete_dead_snapshots);
1531 }
1532 
1533 void bch2_delete_dead_snapshots_async(struct bch_fs *c)
1534 {
1535 	if (bch2_write_ref_tryget(c, BCH_WRITE_REF_delete_dead_snapshots) &&
1536 	    !queue_work(c->write_ref_wq, &c->snapshot_delete_work))
1537 		bch2_write_ref_put(c, BCH_WRITE_REF_delete_dead_snapshots);
1538 }
1539 
1540 int __bch2_key_has_snapshot_overwrites(struct btree_trans *trans,
1541 				       enum btree_id id,
1542 				       struct bpos pos)
1543 {
1544 	struct bch_fs *c = trans->c;
1545 	struct btree_iter iter;
1546 	struct bkey_s_c k;
1547 	int ret;
1548 
1549 	bch2_trans_iter_init(trans, &iter, id, pos,
1550 			     BTREE_ITER_NOT_EXTENTS|
1551 			     BTREE_ITER_ALL_SNAPSHOTS);
1552 	while (1) {
1553 		k = bch2_btree_iter_prev(&iter);
1554 		ret = bkey_err(k);
1555 		if (ret)
1556 			break;
1557 
1558 		if (!k.k)
1559 			break;
1560 
1561 		if (!bkey_eq(pos, k.k->p))
1562 			break;
1563 
1564 		if (bch2_snapshot_is_ancestor(c, k.k->p.snapshot, pos.snapshot)) {
1565 			ret = 1;
1566 			break;
1567 		}
1568 	}
1569 	bch2_trans_iter_exit(trans, &iter);
1570 
1571 	return ret;
1572 }
1573 
1574 static u32 bch2_snapshot_smallest_child(struct bch_fs *c, u32 id)
1575 {
1576 	const struct snapshot_t *s = snapshot_t(c, id);
1577 
1578 	return s->children[1] ?: s->children[0];
1579 }
1580 
1581 static u32 bch2_snapshot_smallest_descendent(struct bch_fs *c, u32 id)
1582 {
1583 	u32 child;
1584 
1585 	while ((child = bch2_snapshot_smallest_child(c, id)))
1586 		id = child;
1587 	return id;
1588 }
1589 
1590 static int bch2_propagate_key_to_snapshot_leaf(struct btree_trans *trans,
1591 					       enum btree_id btree,
1592 					       struct bkey_s_c interior_k,
1593 					       u32 leaf_id, struct bpos *new_min_pos)
1594 {
1595 	struct btree_iter iter;
1596 	struct bpos pos = interior_k.k->p;
1597 	struct bkey_s_c k;
1598 	struct bkey_i *new;
1599 	int ret;
1600 
1601 	pos.snapshot = leaf_id;
1602 
1603 	bch2_trans_iter_init(trans, &iter, btree, pos, BTREE_ITER_INTENT);
1604 	k = bch2_btree_iter_peek_slot(&iter);
1605 	ret = bkey_err(k);
1606 	if (ret)
1607 		goto out;
1608 
1609 	/* key already overwritten in this snapshot? */
1610 	if (k.k->p.snapshot != interior_k.k->p.snapshot)
1611 		goto out;
1612 
1613 	if (bpos_eq(*new_min_pos, POS_MIN)) {
1614 		*new_min_pos = k.k->p;
1615 		new_min_pos->snapshot = leaf_id;
1616 	}
1617 
1618 	new = bch2_bkey_make_mut_noupdate(trans, interior_k);
1619 	ret = PTR_ERR_OR_ZERO(new);
1620 	if (ret)
1621 		goto out;
1622 
1623 	new->k.p.snapshot = leaf_id;
1624 	ret = bch2_trans_update(trans, &iter, new, 0);
1625 out:
1626 	bch2_trans_iter_exit(trans, &iter);
1627 	return ret;
1628 }
1629 
1630 int bch2_propagate_key_to_snapshot_leaves(struct btree_trans *trans,
1631 					  enum btree_id btree,
1632 					  struct bkey_s_c k,
1633 					  struct bpos *new_min_pos)
1634 {
1635 	struct bch_fs *c = trans->c;
1636 	struct bkey_buf sk;
1637 	u32 restart_count = trans->restart_count;
1638 	int ret = 0;
1639 
1640 	bch2_bkey_buf_init(&sk);
1641 	bch2_bkey_buf_reassemble(&sk, c, k);
1642 	k = bkey_i_to_s_c(sk.k);
1643 
1644 	*new_min_pos = POS_MIN;
1645 
1646 	for (u32 id = bch2_snapshot_smallest_descendent(c, k.k->p.snapshot);
1647 	     id < k.k->p.snapshot;
1648 	     id++) {
1649 		if (!bch2_snapshot_is_ancestor(c, id, k.k->p.snapshot) ||
1650 		    !bch2_snapshot_is_leaf(c, id))
1651 			continue;
1652 again:
1653 		ret =   btree_trans_too_many_iters(trans) ?:
1654 			bch2_propagate_key_to_snapshot_leaf(trans, btree, k, id, new_min_pos) ?:
1655 			bch2_trans_commit(trans, NULL, NULL, 0);
1656 		if (ret && bch2_err_matches(ret, BCH_ERR_transaction_restart)) {
1657 			bch2_trans_begin(trans);
1658 			goto again;
1659 		}
1660 
1661 		if (ret)
1662 			break;
1663 	}
1664 
1665 	bch2_bkey_buf_exit(&sk, c);
1666 
1667 	return ret ?: trans_was_restarted(trans, restart_count);
1668 }
1669 
1670 static int bch2_check_snapshot_needs_deletion(struct btree_trans *trans, struct bkey_s_c k)
1671 {
1672 	struct bch_fs *c = trans->c;
1673 	struct bkey_s_c_snapshot snap;
1674 	int ret = 0;
1675 
1676 	if (k.k->type != KEY_TYPE_snapshot)
1677 		return 0;
1678 
1679 	snap = bkey_s_c_to_snapshot(k);
1680 	if (BCH_SNAPSHOT_DELETED(snap.v) ||
1681 	    bch2_snapshot_equiv(c, k.k->p.offset) != k.k->p.offset ||
1682 	    (ret = bch2_snapshot_needs_delete(trans, k)) > 0) {
1683 		set_bit(BCH_FS_NEED_DELETE_DEAD_SNAPSHOTS, &c->flags);
1684 		return 0;
1685 	}
1686 
1687 	return ret;
1688 }
1689 
1690 int bch2_snapshots_read(struct bch_fs *c)
1691 {
1692 	struct btree_iter iter;
1693 	struct bkey_s_c k;
1694 	int ret = 0;
1695 
1696 	ret = bch2_trans_run(c,
1697 		for_each_btree_key2(trans, iter, BTREE_ID_snapshots,
1698 			   POS_MIN, 0, k,
1699 			bch2_mark_snapshot(trans, BTREE_ID_snapshots, 0, bkey_s_c_null, k, 0) ?:
1700 			bch2_snapshot_set_equiv(trans, k) ?:
1701 			bch2_check_snapshot_needs_deletion(trans, k)) ?:
1702 		for_each_btree_key2(trans, iter, BTREE_ID_snapshots,
1703 			   POS_MIN, 0, k,
1704 			   (set_is_ancestor_bitmap(c, k.k->p.offset), 0)));
1705 	if (ret)
1706 		bch_err_fn(c, ret);
1707 	return ret;
1708 }
1709 
1710 void bch2_fs_snapshots_exit(struct bch_fs *c)
1711 {
1712 	kfree(rcu_dereference_protected(c->snapshots, true));
1713 }
1714