1 // SPDX-License-Identifier: GPL-2.0 2 3 #include "bcachefs.h" 4 #include "bkey_buf.h" 5 #include "btree_key_cache.h" 6 #include "btree_update.h" 7 #include "buckets.h" 8 #include "errcode.h" 9 #include "error.h" 10 #include "fs.h" 11 #include "snapshot.h" 12 13 #include <linux/random.h> 14 15 /* 16 * Snapshot trees: 17 * 18 * Keys in BTREE_ID_snapshot_trees identify a whole tree of snapshot nodes; they 19 * exist to provide a stable identifier for the whole lifetime of a snapshot 20 * tree. 21 */ 22 23 void bch2_snapshot_tree_to_text(struct printbuf *out, struct bch_fs *c, 24 struct bkey_s_c k) 25 { 26 struct bkey_s_c_snapshot_tree t = bkey_s_c_to_snapshot_tree(k); 27 28 prt_printf(out, "subvol %u root snapshot %u", 29 le32_to_cpu(t.v->master_subvol), 30 le32_to_cpu(t.v->root_snapshot)); 31 } 32 33 int bch2_snapshot_tree_invalid(struct bch_fs *c, struct bkey_s_c k, 34 enum bkey_invalid_flags flags, 35 struct printbuf *err) 36 { 37 int ret = 0; 38 39 bkey_fsck_err_on(bkey_gt(k.k->p, POS(0, U32_MAX)) || 40 bkey_lt(k.k->p, POS(0, 1)), c, err, 41 snapshot_tree_pos_bad, 42 "bad pos"); 43 fsck_err: 44 return ret; 45 } 46 47 int bch2_snapshot_tree_lookup(struct btree_trans *trans, u32 id, 48 struct bch_snapshot_tree *s) 49 { 50 int ret = bch2_bkey_get_val_typed(trans, BTREE_ID_snapshot_trees, POS(0, id), 51 BTREE_ITER_WITH_UPDATES, snapshot_tree, s); 52 53 if (bch2_err_matches(ret, ENOENT)) 54 ret = -BCH_ERR_ENOENT_snapshot_tree; 55 return ret; 56 } 57 58 struct bkey_i_snapshot_tree * 59 __bch2_snapshot_tree_create(struct btree_trans *trans) 60 { 61 struct btree_iter iter; 62 int ret = bch2_bkey_get_empty_slot(trans, &iter, 63 BTREE_ID_snapshot_trees, POS(0, U32_MAX)); 64 struct bkey_i_snapshot_tree *s_t; 65 66 if (ret == -BCH_ERR_ENOSPC_btree_slot) 67 ret = -BCH_ERR_ENOSPC_snapshot_tree; 68 if (ret) 69 return ERR_PTR(ret); 70 71 s_t = bch2_bkey_alloc(trans, &iter, 0, snapshot_tree); 72 ret = PTR_ERR_OR_ZERO(s_t); 73 bch2_trans_iter_exit(trans, &iter); 74 return ret ? ERR_PTR(ret) : s_t; 75 } 76 77 static int bch2_snapshot_tree_create(struct btree_trans *trans, 78 u32 root_id, u32 subvol_id, u32 *tree_id) 79 { 80 struct bkey_i_snapshot_tree *n_tree = 81 __bch2_snapshot_tree_create(trans); 82 83 if (IS_ERR(n_tree)) 84 return PTR_ERR(n_tree); 85 86 n_tree->v.master_subvol = cpu_to_le32(subvol_id); 87 n_tree->v.root_snapshot = cpu_to_le32(root_id); 88 *tree_id = n_tree->k.p.offset; 89 return 0; 90 } 91 92 /* Snapshot nodes: */ 93 94 static bool __bch2_snapshot_is_ancestor_early(struct snapshot_table *t, u32 id, u32 ancestor) 95 { 96 while (id && id < ancestor) 97 id = __snapshot_t(t, id)->parent; 98 return id == ancestor; 99 } 100 101 static bool bch2_snapshot_is_ancestor_early(struct bch_fs *c, u32 id, u32 ancestor) 102 { 103 rcu_read_lock(); 104 bool ret = __bch2_snapshot_is_ancestor_early(rcu_dereference(c->snapshots), id, ancestor); 105 rcu_read_unlock(); 106 107 return ret; 108 } 109 110 static inline u32 get_ancestor_below(struct snapshot_table *t, u32 id, u32 ancestor) 111 { 112 const struct snapshot_t *s = __snapshot_t(t, id); 113 114 if (s->skip[2] <= ancestor) 115 return s->skip[2]; 116 if (s->skip[1] <= ancestor) 117 return s->skip[1]; 118 if (s->skip[0] <= ancestor) 119 return s->skip[0]; 120 return s->parent; 121 } 122 123 bool __bch2_snapshot_is_ancestor(struct bch_fs *c, u32 id, u32 ancestor) 124 { 125 bool ret; 126 127 rcu_read_lock(); 128 struct snapshot_table *t = rcu_dereference(c->snapshots); 129 130 if (unlikely(c->recovery_pass_done <= BCH_RECOVERY_PASS_check_snapshots)) { 131 ret = __bch2_snapshot_is_ancestor_early(t, id, ancestor); 132 goto out; 133 } 134 135 while (id && id < ancestor - IS_ANCESTOR_BITMAP) 136 id = get_ancestor_below(t, id, ancestor); 137 138 if (id && id < ancestor) { 139 ret = test_bit(ancestor - id - 1, __snapshot_t(t, id)->is_ancestor); 140 141 EBUG_ON(ret != __bch2_snapshot_is_ancestor_early(t, id, ancestor)); 142 } else { 143 ret = id == ancestor; 144 } 145 out: 146 rcu_read_unlock(); 147 148 return ret; 149 } 150 151 static noinline struct snapshot_t *__snapshot_t_mut(struct bch_fs *c, u32 id) 152 { 153 size_t idx = U32_MAX - id; 154 size_t new_size; 155 struct snapshot_table *new, *old; 156 157 new_size = max(16UL, roundup_pow_of_two(idx + 1)); 158 159 new = kvzalloc(struct_size(new, s, new_size), GFP_KERNEL); 160 if (!new) 161 return NULL; 162 163 old = rcu_dereference_protected(c->snapshots, true); 164 if (old) 165 memcpy(new->s, 166 rcu_dereference_protected(c->snapshots, true)->s, 167 sizeof(new->s[0]) * c->snapshot_table_size); 168 169 rcu_assign_pointer(c->snapshots, new); 170 c->snapshot_table_size = new_size; 171 kvfree_rcu_mightsleep(old); 172 173 return &rcu_dereference_protected(c->snapshots, true)->s[idx]; 174 } 175 176 static inline struct snapshot_t *snapshot_t_mut(struct bch_fs *c, u32 id) 177 { 178 size_t idx = U32_MAX - id; 179 180 lockdep_assert_held(&c->snapshot_table_lock); 181 182 if (likely(idx < c->snapshot_table_size)) 183 return &rcu_dereference_protected(c->snapshots, true)->s[idx]; 184 185 return __snapshot_t_mut(c, id); 186 } 187 188 void bch2_snapshot_to_text(struct printbuf *out, struct bch_fs *c, 189 struct bkey_s_c k) 190 { 191 struct bkey_s_c_snapshot s = bkey_s_c_to_snapshot(k); 192 193 prt_printf(out, "is_subvol %llu deleted %llu parent %10u children %10u %10u subvol %u tree %u", 194 BCH_SNAPSHOT_SUBVOL(s.v), 195 BCH_SNAPSHOT_DELETED(s.v), 196 le32_to_cpu(s.v->parent), 197 le32_to_cpu(s.v->children[0]), 198 le32_to_cpu(s.v->children[1]), 199 le32_to_cpu(s.v->subvol), 200 le32_to_cpu(s.v->tree)); 201 202 if (bkey_val_bytes(k.k) > offsetof(struct bch_snapshot, depth)) 203 prt_printf(out, " depth %u skiplist %u %u %u", 204 le32_to_cpu(s.v->depth), 205 le32_to_cpu(s.v->skip[0]), 206 le32_to_cpu(s.v->skip[1]), 207 le32_to_cpu(s.v->skip[2])); 208 } 209 210 int bch2_snapshot_invalid(struct bch_fs *c, struct bkey_s_c k, 211 enum bkey_invalid_flags flags, 212 struct printbuf *err) 213 { 214 struct bkey_s_c_snapshot s; 215 u32 i, id; 216 int ret = 0; 217 218 bkey_fsck_err_on(bkey_gt(k.k->p, POS(0, U32_MAX)) || 219 bkey_lt(k.k->p, POS(0, 1)), c, err, 220 snapshot_pos_bad, 221 "bad pos"); 222 223 s = bkey_s_c_to_snapshot(k); 224 225 id = le32_to_cpu(s.v->parent); 226 bkey_fsck_err_on(id && id <= k.k->p.offset, c, err, 227 snapshot_parent_bad, 228 "bad parent node (%u <= %llu)", 229 id, k.k->p.offset); 230 231 bkey_fsck_err_on(le32_to_cpu(s.v->children[0]) < le32_to_cpu(s.v->children[1]), c, err, 232 snapshot_children_not_normalized, 233 "children not normalized"); 234 235 bkey_fsck_err_on(s.v->children[0] && s.v->children[0] == s.v->children[1], c, err, 236 snapshot_child_duplicate, 237 "duplicate child nodes"); 238 239 for (i = 0; i < 2; i++) { 240 id = le32_to_cpu(s.v->children[i]); 241 242 bkey_fsck_err_on(id >= k.k->p.offset, c, err, 243 snapshot_child_bad, 244 "bad child node (%u >= %llu)", 245 id, k.k->p.offset); 246 } 247 248 if (bkey_val_bytes(k.k) > offsetof(struct bch_snapshot, skip)) { 249 bkey_fsck_err_on(le32_to_cpu(s.v->skip[0]) > le32_to_cpu(s.v->skip[1]) || 250 le32_to_cpu(s.v->skip[1]) > le32_to_cpu(s.v->skip[2]), c, err, 251 snapshot_skiplist_not_normalized, 252 "skiplist not normalized"); 253 254 for (i = 0; i < ARRAY_SIZE(s.v->skip); i++) { 255 id = le32_to_cpu(s.v->skip[i]); 256 257 bkey_fsck_err_on(id && id < le32_to_cpu(s.v->parent), c, err, 258 snapshot_skiplist_bad, 259 "bad skiplist node %u", id); 260 } 261 } 262 fsck_err: 263 return ret; 264 } 265 266 static void __set_is_ancestor_bitmap(struct bch_fs *c, u32 id) 267 { 268 struct snapshot_t *t = snapshot_t_mut(c, id); 269 u32 parent = id; 270 271 while ((parent = bch2_snapshot_parent_early(c, parent)) && 272 parent - id - 1 < IS_ANCESTOR_BITMAP) 273 __set_bit(parent - id - 1, t->is_ancestor); 274 } 275 276 static void set_is_ancestor_bitmap(struct bch_fs *c, u32 id) 277 { 278 mutex_lock(&c->snapshot_table_lock); 279 __set_is_ancestor_bitmap(c, id); 280 mutex_unlock(&c->snapshot_table_lock); 281 } 282 283 static int __bch2_mark_snapshot(struct btree_trans *trans, 284 enum btree_id btree, unsigned level, 285 struct bkey_s_c old, struct bkey_s_c new, 286 unsigned flags) 287 { 288 struct bch_fs *c = trans->c; 289 struct snapshot_t *t; 290 u32 id = new.k->p.offset; 291 int ret = 0; 292 293 mutex_lock(&c->snapshot_table_lock); 294 295 t = snapshot_t_mut(c, id); 296 if (!t) { 297 ret = -BCH_ERR_ENOMEM_mark_snapshot; 298 goto err; 299 } 300 301 if (new.k->type == KEY_TYPE_snapshot) { 302 struct bkey_s_c_snapshot s = bkey_s_c_to_snapshot(new); 303 304 t->parent = le32_to_cpu(s.v->parent); 305 t->children[0] = le32_to_cpu(s.v->children[0]); 306 t->children[1] = le32_to_cpu(s.v->children[1]); 307 t->subvol = BCH_SNAPSHOT_SUBVOL(s.v) ? le32_to_cpu(s.v->subvol) : 0; 308 t->tree = le32_to_cpu(s.v->tree); 309 310 if (bkey_val_bytes(s.k) > offsetof(struct bch_snapshot, depth)) { 311 t->depth = le32_to_cpu(s.v->depth); 312 t->skip[0] = le32_to_cpu(s.v->skip[0]); 313 t->skip[1] = le32_to_cpu(s.v->skip[1]); 314 t->skip[2] = le32_to_cpu(s.v->skip[2]); 315 } else { 316 t->depth = 0; 317 t->skip[0] = 0; 318 t->skip[1] = 0; 319 t->skip[2] = 0; 320 } 321 322 __set_is_ancestor_bitmap(c, id); 323 324 if (BCH_SNAPSHOT_DELETED(s.v)) { 325 set_bit(BCH_FS_need_delete_dead_snapshots, &c->flags); 326 if (c->curr_recovery_pass > BCH_RECOVERY_PASS_delete_dead_snapshots) 327 bch2_delete_dead_snapshots_async(c); 328 } 329 } else { 330 memset(t, 0, sizeof(*t)); 331 } 332 err: 333 mutex_unlock(&c->snapshot_table_lock); 334 return ret; 335 } 336 337 int bch2_mark_snapshot(struct btree_trans *trans, 338 enum btree_id btree, unsigned level, 339 struct bkey_s_c old, struct bkey_s new, 340 unsigned flags) 341 { 342 return __bch2_mark_snapshot(trans, btree, level, old, new.s_c, flags); 343 } 344 345 int bch2_snapshot_lookup(struct btree_trans *trans, u32 id, 346 struct bch_snapshot *s) 347 { 348 return bch2_bkey_get_val_typed(trans, BTREE_ID_snapshots, POS(0, id), 349 BTREE_ITER_WITH_UPDATES, snapshot, s); 350 } 351 352 static int bch2_snapshot_live(struct btree_trans *trans, u32 id) 353 { 354 struct bch_snapshot v; 355 int ret; 356 357 if (!id) 358 return 0; 359 360 ret = bch2_snapshot_lookup(trans, id, &v); 361 if (bch2_err_matches(ret, ENOENT)) 362 bch_err(trans->c, "snapshot node %u not found", id); 363 if (ret) 364 return ret; 365 366 return !BCH_SNAPSHOT_DELETED(&v); 367 } 368 369 /* 370 * If @k is a snapshot with just one live child, it's part of a linear chain, 371 * which we consider to be an equivalence class: and then after snapshot 372 * deletion cleanup, there should only be a single key at a given position in 373 * this equivalence class. 374 * 375 * This sets the equivalence class of @k to be the child's equivalence class, if 376 * it's part of such a linear chain: this correctly sets equivalence classes on 377 * startup if we run leaf to root (i.e. in natural key order). 378 */ 379 static int bch2_snapshot_set_equiv(struct btree_trans *trans, struct bkey_s_c k) 380 { 381 struct bch_fs *c = trans->c; 382 unsigned i, nr_live = 0, live_idx = 0; 383 struct bkey_s_c_snapshot snap; 384 u32 id = k.k->p.offset, child[2]; 385 386 if (k.k->type != KEY_TYPE_snapshot) 387 return 0; 388 389 snap = bkey_s_c_to_snapshot(k); 390 391 child[0] = le32_to_cpu(snap.v->children[0]); 392 child[1] = le32_to_cpu(snap.v->children[1]); 393 394 for (i = 0; i < 2; i++) { 395 int ret = bch2_snapshot_live(trans, child[i]); 396 397 if (ret < 0) 398 return ret; 399 400 if (ret) 401 live_idx = i; 402 nr_live += ret; 403 } 404 405 mutex_lock(&c->snapshot_table_lock); 406 407 snapshot_t_mut(c, id)->equiv = nr_live == 1 408 ? snapshot_t_mut(c, child[live_idx])->equiv 409 : id; 410 411 mutex_unlock(&c->snapshot_table_lock); 412 413 return 0; 414 } 415 416 /* fsck: */ 417 418 static u32 bch2_snapshot_child(struct bch_fs *c, u32 id, unsigned child) 419 { 420 return snapshot_t(c, id)->children[child]; 421 } 422 423 static u32 bch2_snapshot_left_child(struct bch_fs *c, u32 id) 424 { 425 return bch2_snapshot_child(c, id, 0); 426 } 427 428 static u32 bch2_snapshot_right_child(struct bch_fs *c, u32 id) 429 { 430 return bch2_snapshot_child(c, id, 1); 431 } 432 433 static u32 bch2_snapshot_tree_next(struct bch_fs *c, u32 id) 434 { 435 u32 n, parent; 436 437 n = bch2_snapshot_left_child(c, id); 438 if (n) 439 return n; 440 441 while ((parent = bch2_snapshot_parent(c, id))) { 442 n = bch2_snapshot_right_child(c, parent); 443 if (n && n != id) 444 return n; 445 id = parent; 446 } 447 448 return 0; 449 } 450 451 static u32 bch2_snapshot_tree_oldest_subvol(struct bch_fs *c, u32 snapshot_root) 452 { 453 u32 id = snapshot_root; 454 u32 subvol = 0, s; 455 456 while (id) { 457 s = snapshot_t(c, id)->subvol; 458 459 if (s && (!subvol || s < subvol)) 460 subvol = s; 461 462 id = bch2_snapshot_tree_next(c, id); 463 } 464 465 return subvol; 466 } 467 468 static int bch2_snapshot_tree_master_subvol(struct btree_trans *trans, 469 u32 snapshot_root, u32 *subvol_id) 470 { 471 struct bch_fs *c = trans->c; 472 struct btree_iter iter; 473 struct bkey_s_c k; 474 bool found = false; 475 int ret; 476 477 for_each_btree_key_norestart(trans, iter, BTREE_ID_subvolumes, POS_MIN, 478 0, k, ret) { 479 if (k.k->type != KEY_TYPE_subvolume) 480 continue; 481 482 struct bkey_s_c_subvolume s = bkey_s_c_to_subvolume(k); 483 if (!bch2_snapshot_is_ancestor(c, le32_to_cpu(s.v->snapshot), snapshot_root)) 484 continue; 485 if (!BCH_SUBVOLUME_SNAP(s.v)) { 486 *subvol_id = s.k->p.offset; 487 found = true; 488 break; 489 } 490 } 491 492 bch2_trans_iter_exit(trans, &iter); 493 494 if (!ret && !found) { 495 struct bkey_i_subvolume *u; 496 497 *subvol_id = bch2_snapshot_tree_oldest_subvol(c, snapshot_root); 498 499 u = bch2_bkey_get_mut_typed(trans, &iter, 500 BTREE_ID_subvolumes, POS(0, *subvol_id), 501 0, subvolume); 502 ret = PTR_ERR_OR_ZERO(u); 503 if (ret) 504 return ret; 505 506 SET_BCH_SUBVOLUME_SNAP(&u->v, false); 507 } 508 509 return ret; 510 } 511 512 static int check_snapshot_tree(struct btree_trans *trans, 513 struct btree_iter *iter, 514 struct bkey_s_c k) 515 { 516 struct bch_fs *c = trans->c; 517 struct bkey_s_c_snapshot_tree st; 518 struct bch_snapshot s; 519 struct bch_subvolume subvol; 520 struct printbuf buf = PRINTBUF; 521 u32 root_id; 522 int ret; 523 524 if (k.k->type != KEY_TYPE_snapshot_tree) 525 return 0; 526 527 st = bkey_s_c_to_snapshot_tree(k); 528 root_id = le32_to_cpu(st.v->root_snapshot); 529 530 ret = bch2_snapshot_lookup(trans, root_id, &s); 531 if (ret && !bch2_err_matches(ret, ENOENT)) 532 goto err; 533 534 if (fsck_err_on(ret || 535 root_id != bch2_snapshot_root(c, root_id) || 536 st.k->p.offset != le32_to_cpu(s.tree), 537 c, snapshot_tree_to_missing_snapshot, 538 "snapshot tree points to missing/incorrect snapshot:\n %s", 539 (bch2_bkey_val_to_text(&buf, c, st.s_c), buf.buf))) { 540 ret = bch2_btree_delete_at(trans, iter, 0); 541 goto err; 542 } 543 544 ret = bch2_subvolume_get(trans, le32_to_cpu(st.v->master_subvol), 545 false, 0, &subvol); 546 if (ret && !bch2_err_matches(ret, ENOENT)) 547 goto err; 548 549 if (fsck_err_on(ret, 550 c, snapshot_tree_to_missing_subvol, 551 "snapshot tree points to missing subvolume:\n %s", 552 (printbuf_reset(&buf), 553 bch2_bkey_val_to_text(&buf, c, st.s_c), buf.buf)) || 554 fsck_err_on(!bch2_snapshot_is_ancestor(c, 555 le32_to_cpu(subvol.snapshot), 556 root_id), 557 c, snapshot_tree_to_wrong_subvol, 558 "snapshot tree points to subvolume that does not point to snapshot in this tree:\n %s", 559 (printbuf_reset(&buf), 560 bch2_bkey_val_to_text(&buf, c, st.s_c), buf.buf)) || 561 fsck_err_on(BCH_SUBVOLUME_SNAP(&subvol), 562 c, snapshot_tree_to_snapshot_subvol, 563 "snapshot tree points to snapshot subvolume:\n %s", 564 (printbuf_reset(&buf), 565 bch2_bkey_val_to_text(&buf, c, st.s_c), buf.buf))) { 566 struct bkey_i_snapshot_tree *u; 567 u32 subvol_id; 568 569 ret = bch2_snapshot_tree_master_subvol(trans, root_id, &subvol_id); 570 if (ret) 571 goto err; 572 573 u = bch2_bkey_make_mut_typed(trans, iter, &k, 0, snapshot_tree); 574 ret = PTR_ERR_OR_ZERO(u); 575 if (ret) 576 goto err; 577 578 u->v.master_subvol = cpu_to_le32(subvol_id); 579 st = snapshot_tree_i_to_s_c(u); 580 } 581 err: 582 fsck_err: 583 printbuf_exit(&buf); 584 return ret; 585 } 586 587 /* 588 * For each snapshot_tree, make sure it points to the root of a snapshot tree 589 * and that snapshot entry points back to it, or delete it. 590 * 591 * And, make sure it points to a subvolume within that snapshot tree, or correct 592 * it to point to the oldest subvolume within that snapshot tree. 593 */ 594 int bch2_check_snapshot_trees(struct bch_fs *c) 595 { 596 int ret = bch2_trans_run(c, 597 for_each_btree_key_commit(trans, iter, 598 BTREE_ID_snapshot_trees, POS_MIN, 599 BTREE_ITER_PREFETCH, k, 600 NULL, NULL, BCH_TRANS_COMMIT_no_enospc, 601 check_snapshot_tree(trans, &iter, k))); 602 bch_err_fn(c, ret); 603 return ret; 604 } 605 606 /* 607 * Look up snapshot tree for @tree_id and find root, 608 * make sure @snap_id is a descendent: 609 */ 610 static int snapshot_tree_ptr_good(struct btree_trans *trans, 611 u32 snap_id, u32 tree_id) 612 { 613 struct bch_snapshot_tree s_t; 614 int ret = bch2_snapshot_tree_lookup(trans, tree_id, &s_t); 615 616 if (bch2_err_matches(ret, ENOENT)) 617 return 0; 618 if (ret) 619 return ret; 620 621 return bch2_snapshot_is_ancestor_early(trans->c, snap_id, le32_to_cpu(s_t.root_snapshot)); 622 } 623 624 u32 bch2_snapshot_skiplist_get(struct bch_fs *c, u32 id) 625 { 626 const struct snapshot_t *s; 627 628 if (!id) 629 return 0; 630 631 rcu_read_lock(); 632 s = snapshot_t(c, id); 633 if (s->parent) 634 id = bch2_snapshot_nth_parent(c, id, get_random_u32_below(s->depth)); 635 rcu_read_unlock(); 636 637 return id; 638 } 639 640 static int snapshot_skiplist_good(struct btree_trans *trans, u32 id, struct bch_snapshot s) 641 { 642 unsigned i; 643 644 for (i = 0; i < 3; i++) 645 if (!s.parent) { 646 if (s.skip[i]) 647 return false; 648 } else { 649 if (!bch2_snapshot_is_ancestor_early(trans->c, id, le32_to_cpu(s.skip[i]))) 650 return false; 651 } 652 653 return true; 654 } 655 656 /* 657 * snapshot_tree pointer was incorrect: look up root snapshot node, make sure 658 * its snapshot_tree pointer is correct (allocate new one if necessary), then 659 * update this node's pointer to root node's pointer: 660 */ 661 static int snapshot_tree_ptr_repair(struct btree_trans *trans, 662 struct btree_iter *iter, 663 struct bkey_s_c k, 664 struct bch_snapshot *s) 665 { 666 struct bch_fs *c = trans->c; 667 struct btree_iter root_iter; 668 struct bch_snapshot_tree s_t; 669 struct bkey_s_c_snapshot root; 670 struct bkey_i_snapshot *u; 671 u32 root_id = bch2_snapshot_root(c, k.k->p.offset), tree_id; 672 int ret; 673 674 root = bch2_bkey_get_iter_typed(trans, &root_iter, 675 BTREE_ID_snapshots, POS(0, root_id), 676 BTREE_ITER_WITH_UPDATES, snapshot); 677 ret = bkey_err(root); 678 if (ret) 679 goto err; 680 681 tree_id = le32_to_cpu(root.v->tree); 682 683 ret = bch2_snapshot_tree_lookup(trans, tree_id, &s_t); 684 if (ret && !bch2_err_matches(ret, ENOENT)) 685 return ret; 686 687 if (ret || le32_to_cpu(s_t.root_snapshot) != root_id) { 688 u = bch2_bkey_make_mut_typed(trans, &root_iter, &root.s_c, 0, snapshot); 689 ret = PTR_ERR_OR_ZERO(u) ?: 690 bch2_snapshot_tree_create(trans, root_id, 691 bch2_snapshot_tree_oldest_subvol(c, root_id), 692 &tree_id); 693 if (ret) 694 goto err; 695 696 u->v.tree = cpu_to_le32(tree_id); 697 if (k.k->p.offset == root_id) 698 *s = u->v; 699 } 700 701 if (k.k->p.offset != root_id) { 702 u = bch2_bkey_make_mut_typed(trans, iter, &k, 0, snapshot); 703 ret = PTR_ERR_OR_ZERO(u); 704 if (ret) 705 goto err; 706 707 u->v.tree = cpu_to_le32(tree_id); 708 *s = u->v; 709 } 710 err: 711 bch2_trans_iter_exit(trans, &root_iter); 712 return ret; 713 } 714 715 static int check_snapshot(struct btree_trans *trans, 716 struct btree_iter *iter, 717 struct bkey_s_c k) 718 { 719 struct bch_fs *c = trans->c; 720 struct bch_snapshot s; 721 struct bch_subvolume subvol; 722 struct bch_snapshot v; 723 struct bkey_i_snapshot *u; 724 u32 parent_id = bch2_snapshot_parent_early(c, k.k->p.offset); 725 u32 real_depth; 726 struct printbuf buf = PRINTBUF; 727 bool should_have_subvol; 728 u32 i, id; 729 int ret = 0; 730 731 if (k.k->type != KEY_TYPE_snapshot) 732 return 0; 733 734 memset(&s, 0, sizeof(s)); 735 memcpy(&s, k.v, min(sizeof(s), bkey_val_bytes(k.k))); 736 737 id = le32_to_cpu(s.parent); 738 if (id) { 739 ret = bch2_snapshot_lookup(trans, id, &v); 740 if (bch2_err_matches(ret, ENOENT)) 741 bch_err(c, "snapshot with nonexistent parent:\n %s", 742 (bch2_bkey_val_to_text(&buf, c, k), buf.buf)); 743 if (ret) 744 goto err; 745 746 if (le32_to_cpu(v.children[0]) != k.k->p.offset && 747 le32_to_cpu(v.children[1]) != k.k->p.offset) { 748 bch_err(c, "snapshot parent %u missing pointer to child %llu", 749 id, k.k->p.offset); 750 ret = -EINVAL; 751 goto err; 752 } 753 } 754 755 for (i = 0; i < 2 && s.children[i]; i++) { 756 id = le32_to_cpu(s.children[i]); 757 758 ret = bch2_snapshot_lookup(trans, id, &v); 759 if (bch2_err_matches(ret, ENOENT)) 760 bch_err(c, "snapshot node %llu has nonexistent child %u", 761 k.k->p.offset, id); 762 if (ret) 763 goto err; 764 765 if (le32_to_cpu(v.parent) != k.k->p.offset) { 766 bch_err(c, "snapshot child %u has wrong parent (got %u should be %llu)", 767 id, le32_to_cpu(v.parent), k.k->p.offset); 768 ret = -EINVAL; 769 goto err; 770 } 771 } 772 773 should_have_subvol = BCH_SNAPSHOT_SUBVOL(&s) && 774 !BCH_SNAPSHOT_DELETED(&s); 775 776 if (should_have_subvol) { 777 id = le32_to_cpu(s.subvol); 778 ret = bch2_subvolume_get(trans, id, 0, false, &subvol); 779 if (bch2_err_matches(ret, ENOENT)) 780 bch_err(c, "snapshot points to nonexistent subvolume:\n %s", 781 (bch2_bkey_val_to_text(&buf, c, k), buf.buf)); 782 if (ret) 783 goto err; 784 785 if (BCH_SNAPSHOT_SUBVOL(&s) != (le32_to_cpu(subvol.snapshot) == k.k->p.offset)) { 786 bch_err(c, "snapshot node %llu has wrong BCH_SNAPSHOT_SUBVOL", 787 k.k->p.offset); 788 ret = -EINVAL; 789 goto err; 790 } 791 } else { 792 if (fsck_err_on(s.subvol, 793 c, snapshot_should_not_have_subvol, 794 "snapshot should not point to subvol:\n %s", 795 (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) { 796 u = bch2_bkey_make_mut_typed(trans, iter, &k, 0, snapshot); 797 ret = PTR_ERR_OR_ZERO(u); 798 if (ret) 799 goto err; 800 801 u->v.subvol = 0; 802 s = u->v; 803 } 804 } 805 806 ret = snapshot_tree_ptr_good(trans, k.k->p.offset, le32_to_cpu(s.tree)); 807 if (ret < 0) 808 goto err; 809 810 if (fsck_err_on(!ret, c, snapshot_to_bad_snapshot_tree, 811 "snapshot points to missing/incorrect tree:\n %s", 812 (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) { 813 ret = snapshot_tree_ptr_repair(trans, iter, k, &s); 814 if (ret) 815 goto err; 816 } 817 ret = 0; 818 819 real_depth = bch2_snapshot_depth(c, parent_id); 820 821 if (fsck_err_on(le32_to_cpu(s.depth) != real_depth, 822 c, snapshot_bad_depth, 823 "snapshot with incorrect depth field, should be %u:\n %s", 824 real_depth, (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) { 825 u = bch2_bkey_make_mut_typed(trans, iter, &k, 0, snapshot); 826 ret = PTR_ERR_OR_ZERO(u); 827 if (ret) 828 goto err; 829 830 u->v.depth = cpu_to_le32(real_depth); 831 s = u->v; 832 } 833 834 ret = snapshot_skiplist_good(trans, k.k->p.offset, s); 835 if (ret < 0) 836 goto err; 837 838 if (fsck_err_on(!ret, c, snapshot_bad_skiplist, 839 "snapshot with bad skiplist field:\n %s", 840 (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) { 841 u = bch2_bkey_make_mut_typed(trans, iter, &k, 0, snapshot); 842 ret = PTR_ERR_OR_ZERO(u); 843 if (ret) 844 goto err; 845 846 for (i = 0; i < ARRAY_SIZE(u->v.skip); i++) 847 u->v.skip[i] = cpu_to_le32(bch2_snapshot_skiplist_get(c, parent_id)); 848 849 bubble_sort(u->v.skip, ARRAY_SIZE(u->v.skip), cmp_le32); 850 s = u->v; 851 } 852 ret = 0; 853 err: 854 fsck_err: 855 printbuf_exit(&buf); 856 return ret; 857 } 858 859 int bch2_check_snapshots(struct bch_fs *c) 860 { 861 /* 862 * We iterate backwards as checking/fixing the depth field requires that 863 * the parent's depth already be correct: 864 */ 865 int ret = bch2_trans_run(c, 866 for_each_btree_key_reverse_commit(trans, iter, 867 BTREE_ID_snapshots, POS_MAX, 868 BTREE_ITER_PREFETCH, k, 869 NULL, NULL, BCH_TRANS_COMMIT_no_enospc, 870 check_snapshot(trans, &iter, k))); 871 bch_err_fn(c, ret); 872 return ret; 873 } 874 875 /* 876 * Mark a snapshot as deleted, for future cleanup: 877 */ 878 int bch2_snapshot_node_set_deleted(struct btree_trans *trans, u32 id) 879 { 880 struct btree_iter iter; 881 struct bkey_i_snapshot *s; 882 int ret = 0; 883 884 s = bch2_bkey_get_mut_typed(trans, &iter, 885 BTREE_ID_snapshots, POS(0, id), 886 0, snapshot); 887 ret = PTR_ERR_OR_ZERO(s); 888 if (unlikely(ret)) { 889 bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT), 890 trans->c, "missing snapshot %u", id); 891 return ret; 892 } 893 894 /* already deleted? */ 895 if (BCH_SNAPSHOT_DELETED(&s->v)) 896 goto err; 897 898 SET_BCH_SNAPSHOT_DELETED(&s->v, true); 899 SET_BCH_SNAPSHOT_SUBVOL(&s->v, false); 900 s->v.subvol = 0; 901 err: 902 bch2_trans_iter_exit(trans, &iter); 903 return ret; 904 } 905 906 static inline void normalize_snapshot_child_pointers(struct bch_snapshot *s) 907 { 908 if (le32_to_cpu(s->children[0]) < le32_to_cpu(s->children[1])) 909 swap(s->children[0], s->children[1]); 910 } 911 912 static int bch2_snapshot_node_delete(struct btree_trans *trans, u32 id) 913 { 914 struct bch_fs *c = trans->c; 915 struct btree_iter iter, p_iter = (struct btree_iter) { NULL }; 916 struct btree_iter c_iter = (struct btree_iter) { NULL }; 917 struct btree_iter tree_iter = (struct btree_iter) { NULL }; 918 struct bkey_s_c_snapshot s; 919 u32 parent_id, child_id; 920 unsigned i; 921 int ret = 0; 922 923 s = bch2_bkey_get_iter_typed(trans, &iter, BTREE_ID_snapshots, POS(0, id), 924 BTREE_ITER_INTENT, snapshot); 925 ret = bkey_err(s); 926 bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT), c, 927 "missing snapshot %u", id); 928 929 if (ret) 930 goto err; 931 932 BUG_ON(s.v->children[1]); 933 934 parent_id = le32_to_cpu(s.v->parent); 935 child_id = le32_to_cpu(s.v->children[0]); 936 937 if (parent_id) { 938 struct bkey_i_snapshot *parent; 939 940 parent = bch2_bkey_get_mut_typed(trans, &p_iter, 941 BTREE_ID_snapshots, POS(0, parent_id), 942 0, snapshot); 943 ret = PTR_ERR_OR_ZERO(parent); 944 bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT), c, 945 "missing snapshot %u", parent_id); 946 if (unlikely(ret)) 947 goto err; 948 949 /* find entry in parent->children for node being deleted */ 950 for (i = 0; i < 2; i++) 951 if (le32_to_cpu(parent->v.children[i]) == id) 952 break; 953 954 if (bch2_fs_inconsistent_on(i == 2, c, 955 "snapshot %u missing child pointer to %u", 956 parent_id, id)) 957 goto err; 958 959 parent->v.children[i] = cpu_to_le32(child_id); 960 961 normalize_snapshot_child_pointers(&parent->v); 962 } 963 964 if (child_id) { 965 struct bkey_i_snapshot *child; 966 967 child = bch2_bkey_get_mut_typed(trans, &c_iter, 968 BTREE_ID_snapshots, POS(0, child_id), 969 0, snapshot); 970 ret = PTR_ERR_OR_ZERO(child); 971 bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT), c, 972 "missing snapshot %u", child_id); 973 if (unlikely(ret)) 974 goto err; 975 976 child->v.parent = cpu_to_le32(parent_id); 977 978 if (!child->v.parent) { 979 child->v.skip[0] = 0; 980 child->v.skip[1] = 0; 981 child->v.skip[2] = 0; 982 } 983 } 984 985 if (!parent_id) { 986 /* 987 * We're deleting the root of a snapshot tree: update the 988 * snapshot_tree entry to point to the new root, or delete it if 989 * this is the last snapshot ID in this tree: 990 */ 991 struct bkey_i_snapshot_tree *s_t; 992 993 BUG_ON(s.v->children[1]); 994 995 s_t = bch2_bkey_get_mut_typed(trans, &tree_iter, 996 BTREE_ID_snapshot_trees, POS(0, le32_to_cpu(s.v->tree)), 997 0, snapshot_tree); 998 ret = PTR_ERR_OR_ZERO(s_t); 999 if (ret) 1000 goto err; 1001 1002 if (s.v->children[0]) { 1003 s_t->v.root_snapshot = s.v->children[0]; 1004 } else { 1005 s_t->k.type = KEY_TYPE_deleted; 1006 set_bkey_val_u64s(&s_t->k, 0); 1007 } 1008 } 1009 1010 ret = bch2_btree_delete_at(trans, &iter, 0); 1011 err: 1012 bch2_trans_iter_exit(trans, &tree_iter); 1013 bch2_trans_iter_exit(trans, &p_iter); 1014 bch2_trans_iter_exit(trans, &c_iter); 1015 bch2_trans_iter_exit(trans, &iter); 1016 return ret; 1017 } 1018 1019 static int create_snapids(struct btree_trans *trans, u32 parent, u32 tree, 1020 u32 *new_snapids, 1021 u32 *snapshot_subvols, 1022 unsigned nr_snapids) 1023 { 1024 struct bch_fs *c = trans->c; 1025 struct btree_iter iter; 1026 struct bkey_i_snapshot *n; 1027 struct bkey_s_c k; 1028 unsigned i, j; 1029 u32 depth = bch2_snapshot_depth(c, parent); 1030 int ret; 1031 1032 bch2_trans_iter_init(trans, &iter, BTREE_ID_snapshots, 1033 POS_MIN, BTREE_ITER_INTENT); 1034 k = bch2_btree_iter_peek(&iter); 1035 ret = bkey_err(k); 1036 if (ret) 1037 goto err; 1038 1039 for (i = 0; i < nr_snapids; i++) { 1040 k = bch2_btree_iter_prev_slot(&iter); 1041 ret = bkey_err(k); 1042 if (ret) 1043 goto err; 1044 1045 if (!k.k || !k.k->p.offset) { 1046 ret = -BCH_ERR_ENOSPC_snapshot_create; 1047 goto err; 1048 } 1049 1050 n = bch2_bkey_alloc(trans, &iter, 0, snapshot); 1051 ret = PTR_ERR_OR_ZERO(n); 1052 if (ret) 1053 goto err; 1054 1055 n->v.flags = 0; 1056 n->v.parent = cpu_to_le32(parent); 1057 n->v.subvol = cpu_to_le32(snapshot_subvols[i]); 1058 n->v.tree = cpu_to_le32(tree); 1059 n->v.depth = cpu_to_le32(depth); 1060 n->v.btime.lo = cpu_to_le64(bch2_current_time(c)); 1061 n->v.btime.hi = 0; 1062 1063 for (j = 0; j < ARRAY_SIZE(n->v.skip); j++) 1064 n->v.skip[j] = cpu_to_le32(bch2_snapshot_skiplist_get(c, parent)); 1065 1066 bubble_sort(n->v.skip, ARRAY_SIZE(n->v.skip), cmp_le32); 1067 SET_BCH_SNAPSHOT_SUBVOL(&n->v, true); 1068 1069 ret = __bch2_mark_snapshot(trans, BTREE_ID_snapshots, 0, 1070 bkey_s_c_null, bkey_i_to_s_c(&n->k_i), 0); 1071 if (ret) 1072 goto err; 1073 1074 new_snapids[i] = iter.pos.offset; 1075 1076 mutex_lock(&c->snapshot_table_lock); 1077 snapshot_t_mut(c, new_snapids[i])->equiv = new_snapids[i]; 1078 mutex_unlock(&c->snapshot_table_lock); 1079 } 1080 err: 1081 bch2_trans_iter_exit(trans, &iter); 1082 return ret; 1083 } 1084 1085 /* 1086 * Create new snapshot IDs as children of an existing snapshot ID: 1087 */ 1088 static int bch2_snapshot_node_create_children(struct btree_trans *trans, u32 parent, 1089 u32 *new_snapids, 1090 u32 *snapshot_subvols, 1091 unsigned nr_snapids) 1092 { 1093 struct btree_iter iter; 1094 struct bkey_i_snapshot *n_parent; 1095 int ret = 0; 1096 1097 n_parent = bch2_bkey_get_mut_typed(trans, &iter, 1098 BTREE_ID_snapshots, POS(0, parent), 1099 0, snapshot); 1100 ret = PTR_ERR_OR_ZERO(n_parent); 1101 if (unlikely(ret)) { 1102 if (bch2_err_matches(ret, ENOENT)) 1103 bch_err(trans->c, "snapshot %u not found", parent); 1104 return ret; 1105 } 1106 1107 if (n_parent->v.children[0] || n_parent->v.children[1]) { 1108 bch_err(trans->c, "Trying to add child snapshot nodes to parent that already has children"); 1109 ret = -EINVAL; 1110 goto err; 1111 } 1112 1113 ret = create_snapids(trans, parent, le32_to_cpu(n_parent->v.tree), 1114 new_snapids, snapshot_subvols, nr_snapids); 1115 if (ret) 1116 goto err; 1117 1118 n_parent->v.children[0] = cpu_to_le32(new_snapids[0]); 1119 n_parent->v.children[1] = cpu_to_le32(new_snapids[1]); 1120 n_parent->v.subvol = 0; 1121 SET_BCH_SNAPSHOT_SUBVOL(&n_parent->v, false); 1122 err: 1123 bch2_trans_iter_exit(trans, &iter); 1124 return ret; 1125 } 1126 1127 /* 1128 * Create a snapshot node that is the root of a new tree: 1129 */ 1130 static int bch2_snapshot_node_create_tree(struct btree_trans *trans, 1131 u32 *new_snapids, 1132 u32 *snapshot_subvols, 1133 unsigned nr_snapids) 1134 { 1135 struct bkey_i_snapshot_tree *n_tree; 1136 int ret; 1137 1138 n_tree = __bch2_snapshot_tree_create(trans); 1139 ret = PTR_ERR_OR_ZERO(n_tree) ?: 1140 create_snapids(trans, 0, n_tree->k.p.offset, 1141 new_snapids, snapshot_subvols, nr_snapids); 1142 if (ret) 1143 return ret; 1144 1145 n_tree->v.master_subvol = cpu_to_le32(snapshot_subvols[0]); 1146 n_tree->v.root_snapshot = cpu_to_le32(new_snapids[0]); 1147 return 0; 1148 } 1149 1150 int bch2_snapshot_node_create(struct btree_trans *trans, u32 parent, 1151 u32 *new_snapids, 1152 u32 *snapshot_subvols, 1153 unsigned nr_snapids) 1154 { 1155 BUG_ON((parent == 0) != (nr_snapids == 1)); 1156 BUG_ON((parent != 0) != (nr_snapids == 2)); 1157 1158 return parent 1159 ? bch2_snapshot_node_create_children(trans, parent, 1160 new_snapids, snapshot_subvols, nr_snapids) 1161 : bch2_snapshot_node_create_tree(trans, 1162 new_snapids, snapshot_subvols, nr_snapids); 1163 1164 } 1165 1166 /* 1167 * If we have an unlinked inode in an internal snapshot node, and the inode 1168 * really has been deleted in all child snapshots, how does this get cleaned up? 1169 * 1170 * first there is the problem of how keys that have been overwritten in all 1171 * child snapshots get deleted (unimplemented?), but inodes may perhaps be 1172 * special? 1173 * 1174 * also: unlinked inode in internal snapshot appears to not be getting deleted 1175 * correctly if inode doesn't exist in leaf snapshots 1176 * 1177 * solution: 1178 * 1179 * for a key in an interior snapshot node that needs work to be done that 1180 * requires it to be mutated: iterate over all descendent leaf nodes and copy 1181 * that key to snapshot leaf nodes, where we can mutate it 1182 */ 1183 1184 static int snapshot_delete_key(struct btree_trans *trans, 1185 struct btree_iter *iter, 1186 struct bkey_s_c k, 1187 snapshot_id_list *deleted, 1188 snapshot_id_list *equiv_seen, 1189 struct bpos *last_pos) 1190 { 1191 struct bch_fs *c = trans->c; 1192 u32 equiv = bch2_snapshot_equiv(c, k.k->p.snapshot); 1193 1194 if (!bkey_eq(k.k->p, *last_pos)) 1195 equiv_seen->nr = 0; 1196 *last_pos = k.k->p; 1197 1198 if (snapshot_list_has_id(deleted, k.k->p.snapshot) || 1199 snapshot_list_has_id(equiv_seen, equiv)) { 1200 return bch2_btree_delete_at(trans, iter, 1201 BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE); 1202 } else { 1203 return snapshot_list_add(c, equiv_seen, equiv); 1204 } 1205 } 1206 1207 static int move_key_to_correct_snapshot(struct btree_trans *trans, 1208 struct btree_iter *iter, 1209 struct bkey_s_c k) 1210 { 1211 struct bch_fs *c = trans->c; 1212 u32 equiv = bch2_snapshot_equiv(c, k.k->p.snapshot); 1213 1214 /* 1215 * When we have a linear chain of snapshot nodes, we consider 1216 * those to form an equivalence class: we're going to collapse 1217 * them all down to a single node, and keep the leaf-most node - 1218 * which has the same id as the equivalence class id. 1219 * 1220 * If there are multiple keys in different snapshots at the same 1221 * position, we're only going to keep the one in the newest 1222 * snapshot - the rest have been overwritten and are redundant, 1223 * and for the key we're going to keep we need to move it to the 1224 * equivalance class ID if it's not there already. 1225 */ 1226 if (equiv != k.k->p.snapshot) { 1227 struct bkey_i *new = bch2_bkey_make_mut_noupdate(trans, k); 1228 struct btree_iter new_iter; 1229 int ret; 1230 1231 ret = PTR_ERR_OR_ZERO(new); 1232 if (ret) 1233 return ret; 1234 1235 new->k.p.snapshot = equiv; 1236 1237 bch2_trans_iter_init(trans, &new_iter, iter->btree_id, new->k.p, 1238 BTREE_ITER_ALL_SNAPSHOTS| 1239 BTREE_ITER_CACHED| 1240 BTREE_ITER_INTENT); 1241 1242 ret = bch2_btree_iter_traverse(&new_iter) ?: 1243 bch2_trans_update(trans, &new_iter, new, 1244 BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE) ?: 1245 bch2_btree_delete_at(trans, iter, 1246 BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE); 1247 bch2_trans_iter_exit(trans, &new_iter); 1248 if (ret) 1249 return ret; 1250 } 1251 1252 return 0; 1253 } 1254 1255 static int bch2_snapshot_needs_delete(struct btree_trans *trans, struct bkey_s_c k) 1256 { 1257 struct bkey_s_c_snapshot snap; 1258 u32 children[2]; 1259 int ret; 1260 1261 if (k.k->type != KEY_TYPE_snapshot) 1262 return 0; 1263 1264 snap = bkey_s_c_to_snapshot(k); 1265 if (BCH_SNAPSHOT_DELETED(snap.v) || 1266 BCH_SNAPSHOT_SUBVOL(snap.v)) 1267 return 0; 1268 1269 children[0] = le32_to_cpu(snap.v->children[0]); 1270 children[1] = le32_to_cpu(snap.v->children[1]); 1271 1272 ret = bch2_snapshot_live(trans, children[0]) ?: 1273 bch2_snapshot_live(trans, children[1]); 1274 if (ret < 0) 1275 return ret; 1276 return !ret; 1277 } 1278 1279 /* 1280 * For a given snapshot, if it doesn't have a subvolume that points to it, and 1281 * it doesn't have child snapshot nodes - it's now redundant and we can mark it 1282 * as deleted. 1283 */ 1284 static int bch2_delete_redundant_snapshot(struct btree_trans *trans, struct bkey_s_c k) 1285 { 1286 int ret = bch2_snapshot_needs_delete(trans, k); 1287 1288 return ret <= 0 1289 ? ret 1290 : bch2_snapshot_node_set_deleted(trans, k.k->p.offset); 1291 } 1292 1293 static inline u32 bch2_snapshot_nth_parent_skip(struct bch_fs *c, u32 id, u32 n, 1294 snapshot_id_list *skip) 1295 { 1296 rcu_read_lock(); 1297 while (snapshot_list_has_id(skip, id)) 1298 id = __bch2_snapshot_parent(c, id); 1299 1300 while (n--) { 1301 do { 1302 id = __bch2_snapshot_parent(c, id); 1303 } while (snapshot_list_has_id(skip, id)); 1304 } 1305 rcu_read_unlock(); 1306 1307 return id; 1308 } 1309 1310 static int bch2_fix_child_of_deleted_snapshot(struct btree_trans *trans, 1311 struct btree_iter *iter, struct bkey_s_c k, 1312 snapshot_id_list *deleted) 1313 { 1314 struct bch_fs *c = trans->c; 1315 u32 nr_deleted_ancestors = 0; 1316 struct bkey_i_snapshot *s; 1317 int ret; 1318 1319 if (k.k->type != KEY_TYPE_snapshot) 1320 return 0; 1321 1322 if (snapshot_list_has_id(deleted, k.k->p.offset)) 1323 return 0; 1324 1325 s = bch2_bkey_make_mut_noupdate_typed(trans, k, snapshot); 1326 ret = PTR_ERR_OR_ZERO(s); 1327 if (ret) 1328 return ret; 1329 1330 darray_for_each(*deleted, i) 1331 nr_deleted_ancestors += bch2_snapshot_is_ancestor(c, s->k.p.offset, *i); 1332 1333 if (!nr_deleted_ancestors) 1334 return 0; 1335 1336 le32_add_cpu(&s->v.depth, -nr_deleted_ancestors); 1337 1338 if (!s->v.depth) { 1339 s->v.skip[0] = 0; 1340 s->v.skip[1] = 0; 1341 s->v.skip[2] = 0; 1342 } else { 1343 u32 depth = le32_to_cpu(s->v.depth); 1344 u32 parent = bch2_snapshot_parent(c, s->k.p.offset); 1345 1346 for (unsigned j = 0; j < ARRAY_SIZE(s->v.skip); j++) { 1347 u32 id = le32_to_cpu(s->v.skip[j]); 1348 1349 if (snapshot_list_has_id(deleted, id)) { 1350 id = bch2_snapshot_nth_parent_skip(c, 1351 parent, 1352 depth > 1 1353 ? get_random_u32_below(depth - 1) 1354 : 0, 1355 deleted); 1356 s->v.skip[j] = cpu_to_le32(id); 1357 } 1358 } 1359 1360 bubble_sort(s->v.skip, ARRAY_SIZE(s->v.skip), cmp_le32); 1361 } 1362 1363 return bch2_trans_update(trans, iter, &s->k_i, 0); 1364 } 1365 1366 int bch2_delete_dead_snapshots(struct bch_fs *c) 1367 { 1368 struct btree_trans *trans; 1369 snapshot_id_list deleted = { 0 }; 1370 snapshot_id_list deleted_interior = { 0 }; 1371 u32 id; 1372 int ret = 0; 1373 1374 if (!test_and_clear_bit(BCH_FS_need_delete_dead_snapshots, &c->flags)) 1375 return 0; 1376 1377 if (!test_bit(BCH_FS_started, &c->flags)) { 1378 ret = bch2_fs_read_write_early(c); 1379 bch_err_msg(c, ret, "deleting dead snapshots: error going rw"); 1380 if (ret) 1381 return ret; 1382 } 1383 1384 trans = bch2_trans_get(c); 1385 1386 /* 1387 * For every snapshot node: If we have no live children and it's not 1388 * pointed to by a subvolume, delete it: 1389 */ 1390 ret = for_each_btree_key_commit(trans, iter, BTREE_ID_snapshots, 1391 POS_MIN, 0, k, 1392 NULL, NULL, 0, 1393 bch2_delete_redundant_snapshot(trans, k)); 1394 bch_err_msg(c, ret, "deleting redundant snapshots"); 1395 if (ret) 1396 goto err; 1397 1398 ret = for_each_btree_key(trans, iter, BTREE_ID_snapshots, 1399 POS_MIN, 0, k, 1400 bch2_snapshot_set_equiv(trans, k)); 1401 bch_err_msg(c, ret, "in bch2_snapshots_set_equiv"); 1402 if (ret) 1403 goto err; 1404 1405 ret = for_each_btree_key(trans, iter, BTREE_ID_snapshots, 1406 POS_MIN, 0, k, ({ 1407 if (k.k->type != KEY_TYPE_snapshot) 1408 continue; 1409 1410 BCH_SNAPSHOT_DELETED(bkey_s_c_to_snapshot(k).v) 1411 ? snapshot_list_add(c, &deleted, k.k->p.offset) 1412 : 0; 1413 })); 1414 bch_err_msg(c, ret, "walking snapshots"); 1415 if (ret) 1416 goto err; 1417 1418 for (id = 0; id < BTREE_ID_NR; id++) { 1419 struct bpos last_pos = POS_MIN; 1420 snapshot_id_list equiv_seen = { 0 }; 1421 struct disk_reservation res = { 0 }; 1422 1423 if (!btree_type_has_snapshots(id)) 1424 continue; 1425 1426 /* 1427 * deleted inodes btree is maintained by a trigger on the inodes 1428 * btree - no work for us to do here, and it's not safe to scan 1429 * it because we'll see out of date keys due to the btree write 1430 * buffer: 1431 */ 1432 if (id == BTREE_ID_deleted_inodes) 1433 continue; 1434 1435 ret = for_each_btree_key_commit(trans, iter, 1436 id, POS_MIN, 1437 BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS, k, 1438 &res, NULL, BCH_TRANS_COMMIT_no_enospc, 1439 snapshot_delete_key(trans, &iter, k, &deleted, &equiv_seen, &last_pos)) ?: 1440 for_each_btree_key_commit(trans, iter, 1441 id, POS_MIN, 1442 BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS, k, 1443 &res, NULL, BCH_TRANS_COMMIT_no_enospc, 1444 move_key_to_correct_snapshot(trans, &iter, k)); 1445 1446 bch2_disk_reservation_put(c, &res); 1447 darray_exit(&equiv_seen); 1448 1449 bch_err_msg(c, ret, "deleting keys from dying snapshots"); 1450 if (ret) 1451 goto err; 1452 } 1453 1454 bch2_trans_unlock(trans); 1455 down_write(&c->snapshot_create_lock); 1456 1457 ret = for_each_btree_key(trans, iter, BTREE_ID_snapshots, 1458 POS_MIN, 0, k, ({ 1459 u32 snapshot = k.k->p.offset; 1460 u32 equiv = bch2_snapshot_equiv(c, snapshot); 1461 1462 equiv != snapshot 1463 ? snapshot_list_add(c, &deleted_interior, snapshot) 1464 : 0; 1465 })); 1466 1467 bch_err_msg(c, ret, "walking snapshots"); 1468 if (ret) 1469 goto err_create_lock; 1470 1471 /* 1472 * Fixing children of deleted snapshots can't be done completely 1473 * atomically, if we crash between here and when we delete the interior 1474 * nodes some depth fields will be off: 1475 */ 1476 ret = for_each_btree_key_commit(trans, iter, BTREE_ID_snapshots, POS_MIN, 1477 BTREE_ITER_INTENT, k, 1478 NULL, NULL, BCH_TRANS_COMMIT_no_enospc, 1479 bch2_fix_child_of_deleted_snapshot(trans, &iter, k, &deleted_interior)); 1480 if (ret) 1481 goto err_create_lock; 1482 1483 darray_for_each(deleted, i) { 1484 ret = commit_do(trans, NULL, NULL, 0, 1485 bch2_snapshot_node_delete(trans, *i)); 1486 bch_err_msg(c, ret, "deleting snapshot %u", *i); 1487 if (ret) 1488 goto err_create_lock; 1489 } 1490 1491 darray_for_each(deleted_interior, i) { 1492 ret = commit_do(trans, NULL, NULL, 0, 1493 bch2_snapshot_node_delete(trans, *i)); 1494 bch_err_msg(c, ret, "deleting snapshot %u", *i); 1495 if (ret) 1496 goto err_create_lock; 1497 } 1498 err_create_lock: 1499 up_write(&c->snapshot_create_lock); 1500 err: 1501 darray_exit(&deleted_interior); 1502 darray_exit(&deleted); 1503 bch2_trans_put(trans); 1504 bch_err_fn(c, ret); 1505 return ret; 1506 } 1507 1508 void bch2_delete_dead_snapshots_work(struct work_struct *work) 1509 { 1510 struct bch_fs *c = container_of(work, struct bch_fs, snapshot_delete_work); 1511 1512 bch2_delete_dead_snapshots(c); 1513 bch2_write_ref_put(c, BCH_WRITE_REF_delete_dead_snapshots); 1514 } 1515 1516 void bch2_delete_dead_snapshots_async(struct bch_fs *c) 1517 { 1518 if (bch2_write_ref_tryget(c, BCH_WRITE_REF_delete_dead_snapshots) && 1519 !queue_work(c->write_ref_wq, &c->snapshot_delete_work)) 1520 bch2_write_ref_put(c, BCH_WRITE_REF_delete_dead_snapshots); 1521 } 1522 1523 int __bch2_key_has_snapshot_overwrites(struct btree_trans *trans, 1524 enum btree_id id, 1525 struct bpos pos) 1526 { 1527 struct bch_fs *c = trans->c; 1528 struct btree_iter iter; 1529 struct bkey_s_c k; 1530 int ret; 1531 1532 bch2_trans_iter_init(trans, &iter, id, pos, 1533 BTREE_ITER_NOT_EXTENTS| 1534 BTREE_ITER_ALL_SNAPSHOTS); 1535 while (1) { 1536 k = bch2_btree_iter_prev(&iter); 1537 ret = bkey_err(k); 1538 if (ret) 1539 break; 1540 1541 if (!k.k) 1542 break; 1543 1544 if (!bkey_eq(pos, k.k->p)) 1545 break; 1546 1547 if (bch2_snapshot_is_ancestor(c, k.k->p.snapshot, pos.snapshot)) { 1548 ret = 1; 1549 break; 1550 } 1551 } 1552 bch2_trans_iter_exit(trans, &iter); 1553 1554 return ret; 1555 } 1556 1557 static u32 bch2_snapshot_smallest_child(struct bch_fs *c, u32 id) 1558 { 1559 const struct snapshot_t *s = snapshot_t(c, id); 1560 1561 return s->children[1] ?: s->children[0]; 1562 } 1563 1564 static u32 bch2_snapshot_smallest_descendent(struct bch_fs *c, u32 id) 1565 { 1566 u32 child; 1567 1568 while ((child = bch2_snapshot_smallest_child(c, id))) 1569 id = child; 1570 return id; 1571 } 1572 1573 static int bch2_propagate_key_to_snapshot_leaf(struct btree_trans *trans, 1574 enum btree_id btree, 1575 struct bkey_s_c interior_k, 1576 u32 leaf_id, struct bpos *new_min_pos) 1577 { 1578 struct btree_iter iter; 1579 struct bpos pos = interior_k.k->p; 1580 struct bkey_s_c k; 1581 struct bkey_i *new; 1582 int ret; 1583 1584 pos.snapshot = leaf_id; 1585 1586 bch2_trans_iter_init(trans, &iter, btree, pos, BTREE_ITER_INTENT); 1587 k = bch2_btree_iter_peek_slot(&iter); 1588 ret = bkey_err(k); 1589 if (ret) 1590 goto out; 1591 1592 /* key already overwritten in this snapshot? */ 1593 if (k.k->p.snapshot != interior_k.k->p.snapshot) 1594 goto out; 1595 1596 if (bpos_eq(*new_min_pos, POS_MIN)) { 1597 *new_min_pos = k.k->p; 1598 new_min_pos->snapshot = leaf_id; 1599 } 1600 1601 new = bch2_bkey_make_mut_noupdate(trans, interior_k); 1602 ret = PTR_ERR_OR_ZERO(new); 1603 if (ret) 1604 goto out; 1605 1606 new->k.p.snapshot = leaf_id; 1607 ret = bch2_trans_update(trans, &iter, new, 0); 1608 out: 1609 bch2_trans_iter_exit(trans, &iter); 1610 return ret; 1611 } 1612 1613 int bch2_propagate_key_to_snapshot_leaves(struct btree_trans *trans, 1614 enum btree_id btree, 1615 struct bkey_s_c k, 1616 struct bpos *new_min_pos) 1617 { 1618 struct bch_fs *c = trans->c; 1619 struct bkey_buf sk; 1620 u32 restart_count = trans->restart_count; 1621 int ret = 0; 1622 1623 bch2_bkey_buf_init(&sk); 1624 bch2_bkey_buf_reassemble(&sk, c, k); 1625 k = bkey_i_to_s_c(sk.k); 1626 1627 *new_min_pos = POS_MIN; 1628 1629 for (u32 id = bch2_snapshot_smallest_descendent(c, k.k->p.snapshot); 1630 id < k.k->p.snapshot; 1631 id++) { 1632 if (!bch2_snapshot_is_ancestor(c, id, k.k->p.snapshot) || 1633 !bch2_snapshot_is_leaf(c, id)) 1634 continue; 1635 again: 1636 ret = btree_trans_too_many_iters(trans) ?: 1637 bch2_propagate_key_to_snapshot_leaf(trans, btree, k, id, new_min_pos) ?: 1638 bch2_trans_commit(trans, NULL, NULL, 0); 1639 if (ret && bch2_err_matches(ret, BCH_ERR_transaction_restart)) { 1640 bch2_trans_begin(trans); 1641 goto again; 1642 } 1643 1644 if (ret) 1645 break; 1646 } 1647 1648 bch2_bkey_buf_exit(&sk, c); 1649 1650 return ret ?: trans_was_restarted(trans, restart_count); 1651 } 1652 1653 static int bch2_check_snapshot_needs_deletion(struct btree_trans *trans, struct bkey_s_c k) 1654 { 1655 struct bch_fs *c = trans->c; 1656 struct bkey_s_c_snapshot snap; 1657 int ret = 0; 1658 1659 if (k.k->type != KEY_TYPE_snapshot) 1660 return 0; 1661 1662 snap = bkey_s_c_to_snapshot(k); 1663 if (BCH_SNAPSHOT_DELETED(snap.v) || 1664 bch2_snapshot_equiv(c, k.k->p.offset) != k.k->p.offset || 1665 (ret = bch2_snapshot_needs_delete(trans, k)) > 0) { 1666 set_bit(BCH_FS_need_delete_dead_snapshots, &c->flags); 1667 return 0; 1668 } 1669 1670 return ret; 1671 } 1672 1673 int bch2_snapshots_read(struct bch_fs *c) 1674 { 1675 int ret = bch2_trans_run(c, 1676 for_each_btree_key(trans, iter, BTREE_ID_snapshots, 1677 POS_MIN, 0, k, 1678 __bch2_mark_snapshot(trans, BTREE_ID_snapshots, 0, bkey_s_c_null, k, 0) ?: 1679 bch2_snapshot_set_equiv(trans, k) ?: 1680 bch2_check_snapshot_needs_deletion(trans, k)) ?: 1681 for_each_btree_key(trans, iter, BTREE_ID_snapshots, 1682 POS_MIN, 0, k, 1683 (set_is_ancestor_bitmap(c, k.k->p.offset), 0))); 1684 bch_err_fn(c, ret); 1685 return ret; 1686 } 1687 1688 void bch2_fs_snapshots_exit(struct bch_fs *c) 1689 { 1690 kvfree(rcu_dereference_protected(c->snapshots, true)); 1691 } 1692