1 // SPDX-License-Identifier: GPL-2.0
2 #include "bcachefs.h"
3 #include "bbpos.h"
4 #include "alloc_background.h"
5 #include "backpointers.h"
6 #include "bkey_buf.h"
7 #include "btree_cache.h"
8 #include "btree_update.h"
9 #include "btree_update_interior.h"
10 #include "btree_write_buffer.h"
11 #include "checksum.h"
12 #include "disk_accounting.h"
13 #include "error.h"
14 #include "progress.h"
15
16 #include <linux/mm.h>
17
bch2_backpointer_validate(struct bch_fs * c,struct bkey_s_c k,struct bkey_validate_context from)18 int bch2_backpointer_validate(struct bch_fs *c, struct bkey_s_c k,
19 struct bkey_validate_context from)
20 {
21 struct bkey_s_c_backpointer bp = bkey_s_c_to_backpointer(k);
22 int ret = 0;
23
24 bkey_fsck_err_on(bp.v->level > BTREE_MAX_DEPTH,
25 c, backpointer_level_bad,
26 "backpointer level bad: %u >= %u",
27 bp.v->level, BTREE_MAX_DEPTH);
28
29 bkey_fsck_err_on(bp.k->p.inode == BCH_SB_MEMBER_INVALID,
30 c, backpointer_dev_bad,
31 "backpointer for BCH_SB_MEMBER_INVALID");
32 fsck_err:
33 return ret;
34 }
35
bch2_backpointer_to_text(struct printbuf * out,struct bch_fs * c,struct bkey_s_c k)36 void bch2_backpointer_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c k)
37 {
38 struct bkey_s_c_backpointer bp = bkey_s_c_to_backpointer(k);
39
40 rcu_read_lock();
41 struct bch_dev *ca = bch2_dev_rcu_noerror(c, bp.k->p.inode);
42 if (ca) {
43 u32 bucket_offset;
44 struct bpos bucket = bp_pos_to_bucket_and_offset(ca, bp.k->p, &bucket_offset);
45 rcu_read_unlock();
46 prt_printf(out, "bucket=%llu:%llu:%u ", bucket.inode, bucket.offset, bucket_offset);
47 } else {
48 rcu_read_unlock();
49 prt_printf(out, "sector=%llu:%llu ", bp.k->p.inode, bp.k->p.offset >> MAX_EXTENT_COMPRESS_RATIO_SHIFT);
50 }
51
52 bch2_btree_id_level_to_text(out, bp.v->btree_id, bp.v->level);
53 prt_str(out, " data_type=");
54 bch2_prt_data_type(out, bp.v->data_type);
55 prt_printf(out, " suboffset=%u len=%u gen=%u pos=",
56 (u32) bp.k->p.offset & ~(~0U << MAX_EXTENT_COMPRESS_RATIO_SHIFT),
57 bp.v->bucket_len,
58 bp.v->bucket_gen);
59 bch2_bpos_to_text(out, bp.v->pos);
60 }
61
bch2_backpointer_swab(struct bkey_s k)62 void bch2_backpointer_swab(struct bkey_s k)
63 {
64 struct bkey_s_backpointer bp = bkey_s_to_backpointer(k);
65
66 bp.v->bucket_len = swab32(bp.v->bucket_len);
67 bch2_bpos_swab(&bp.v->pos);
68 }
69
extent_matches_bp(struct bch_fs * c,enum btree_id btree_id,unsigned level,struct bkey_s_c k,struct bkey_s_c_backpointer bp)70 static bool extent_matches_bp(struct bch_fs *c,
71 enum btree_id btree_id, unsigned level,
72 struct bkey_s_c k,
73 struct bkey_s_c_backpointer bp)
74 {
75 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
76 const union bch_extent_entry *entry;
77 struct extent_ptr_decoded p;
78
79 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
80 struct bkey_i_backpointer bp2;
81 bch2_extent_ptr_to_bp(c, btree_id, level, k, p, entry, &bp2);
82
83 if (bpos_eq(bp.k->p, bp2.k.p) &&
84 !memcmp(bp.v, &bp2.v, sizeof(bp2.v)))
85 return true;
86 }
87
88 return false;
89 }
90
backpointer_mod_err(struct btree_trans * trans,struct bkey_s_c orig_k,struct bkey_i_backpointer * new_bp,struct bkey_s_c found_bp,bool insert)91 static noinline int backpointer_mod_err(struct btree_trans *trans,
92 struct bkey_s_c orig_k,
93 struct bkey_i_backpointer *new_bp,
94 struct bkey_s_c found_bp,
95 bool insert)
96 {
97 struct bch_fs *c = trans->c;
98 struct printbuf buf = PRINTBUF;
99
100 if (insert) {
101 prt_printf(&buf, "existing backpointer found when inserting ");
102 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&new_bp->k_i));
103 prt_newline(&buf);
104 printbuf_indent_add(&buf, 2);
105
106 prt_printf(&buf, "found ");
107 bch2_bkey_val_to_text(&buf, c, found_bp);
108 prt_newline(&buf);
109
110 prt_printf(&buf, "for ");
111 bch2_bkey_val_to_text(&buf, c, orig_k);
112
113 bch_err(c, "%s", buf.buf);
114 } else if (c->curr_recovery_pass > BCH_RECOVERY_PASS_check_extents_to_backpointers) {
115 prt_printf(&buf, "backpointer not found when deleting\n");
116 printbuf_indent_add(&buf, 2);
117
118 prt_printf(&buf, "searching for ");
119 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&new_bp->k_i));
120 prt_newline(&buf);
121
122 prt_printf(&buf, "got ");
123 bch2_bkey_val_to_text(&buf, c, found_bp);
124 prt_newline(&buf);
125
126 prt_printf(&buf, "for ");
127 bch2_bkey_val_to_text(&buf, c, orig_k);
128
129 bch_err(c, "%s", buf.buf);
130 }
131
132 printbuf_exit(&buf);
133
134 if (c->curr_recovery_pass > BCH_RECOVERY_PASS_check_extents_to_backpointers) {
135 return bch2_inconsistent_error(c) ? BCH_ERR_erofs_unfixed_errors : 0;
136 } else {
137 return 0;
138 }
139 }
140
bch2_bucket_backpointer_mod_nowritebuffer(struct btree_trans * trans,struct bkey_s_c orig_k,struct bkey_i_backpointer * bp,bool insert)141 int bch2_bucket_backpointer_mod_nowritebuffer(struct btree_trans *trans,
142 struct bkey_s_c orig_k,
143 struct bkey_i_backpointer *bp,
144 bool insert)
145 {
146 struct btree_iter bp_iter;
147 struct bkey_s_c k = bch2_bkey_get_iter(trans, &bp_iter, BTREE_ID_backpointers,
148 bp->k.p,
149 BTREE_ITER_intent|
150 BTREE_ITER_slots|
151 BTREE_ITER_with_updates);
152 int ret = bkey_err(k);
153 if (ret)
154 return ret;
155
156 if (insert
157 ? k.k->type
158 : (k.k->type != KEY_TYPE_backpointer ||
159 memcmp(bkey_s_c_to_backpointer(k).v, &bp->v, sizeof(bp->v)))) {
160 ret = backpointer_mod_err(trans, orig_k, bp, k, insert);
161 if (ret)
162 goto err;
163 }
164
165 if (!insert) {
166 bp->k.type = KEY_TYPE_deleted;
167 set_bkey_val_u64s(&bp->k, 0);
168 }
169
170 ret = bch2_trans_update(trans, &bp_iter, &bp->k_i, 0);
171 err:
172 bch2_trans_iter_exit(trans, &bp_iter);
173 return ret;
174 }
175
bch2_backpointer_del(struct btree_trans * trans,struct bpos pos)176 static int bch2_backpointer_del(struct btree_trans *trans, struct bpos pos)
177 {
178 return (likely(!bch2_backpointers_no_use_write_buffer)
179 ? bch2_btree_delete_at_buffered(trans, BTREE_ID_backpointers, pos)
180 : bch2_btree_delete(trans, BTREE_ID_backpointers, pos, 0)) ?:
181 bch2_trans_commit(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc);
182 }
183
bch2_backpointers_maybe_flush(struct btree_trans * trans,struct bkey_s_c visiting_k,struct bkey_buf * last_flushed)184 static inline int bch2_backpointers_maybe_flush(struct btree_trans *trans,
185 struct bkey_s_c visiting_k,
186 struct bkey_buf *last_flushed)
187 {
188 return likely(!bch2_backpointers_no_use_write_buffer)
189 ? bch2_btree_write_buffer_maybe_flush(trans, visiting_k, last_flushed)
190 : 0;
191 }
192
backpointer_target_not_found(struct btree_trans * trans,struct bkey_s_c_backpointer bp,struct bkey_s_c target_k,struct bkey_buf * last_flushed)193 static int backpointer_target_not_found(struct btree_trans *trans,
194 struct bkey_s_c_backpointer bp,
195 struct bkey_s_c target_k,
196 struct bkey_buf *last_flushed)
197 {
198 struct bch_fs *c = trans->c;
199 struct printbuf buf = PRINTBUF;
200 int ret = 0;
201
202 /*
203 * If we're using the btree write buffer, the backpointer we were
204 * looking at may have already been deleted - failure to find what it
205 * pointed to is not an error:
206 */
207 ret = last_flushed
208 ? bch2_backpointers_maybe_flush(trans, bp.s_c, last_flushed)
209 : 0;
210 if (ret)
211 return ret;
212
213 prt_printf(&buf, "backpointer doesn't match %s it points to:\n ",
214 bp.v->level ? "btree node" : "extent");
215 bch2_bkey_val_to_text(&buf, c, bp.s_c);
216
217 prt_printf(&buf, "\n ");
218 bch2_bkey_val_to_text(&buf, c, target_k);
219
220 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(target_k);
221 const union bch_extent_entry *entry;
222 struct extent_ptr_decoded p;
223 bkey_for_each_ptr_decode(target_k.k, ptrs, p, entry)
224 if (p.ptr.dev == bp.k->p.inode) {
225 prt_printf(&buf, "\n ");
226 struct bkey_i_backpointer bp2;
227 bch2_extent_ptr_to_bp(c, bp.v->btree_id, bp.v->level, target_k, p, entry, &bp2);
228 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&bp2.k_i));
229 }
230
231 if (fsck_err(trans, backpointer_to_missing_ptr,
232 "%s", buf.buf))
233 ret = bch2_backpointer_del(trans, bp.k->p);
234 fsck_err:
235 printbuf_exit(&buf);
236 return ret;
237 }
238
bch2_backpointer_get_key(struct btree_trans * trans,struct bkey_s_c_backpointer bp,struct btree_iter * iter,unsigned iter_flags,struct bkey_buf * last_flushed)239 struct bkey_s_c bch2_backpointer_get_key(struct btree_trans *trans,
240 struct bkey_s_c_backpointer bp,
241 struct btree_iter *iter,
242 unsigned iter_flags,
243 struct bkey_buf *last_flushed)
244 {
245 struct bch_fs *c = trans->c;
246
247 if (unlikely(bp.v->btree_id >= btree_id_nr_alive(c)))
248 return bkey_s_c_null;
249
250 bch2_trans_node_iter_init(trans, iter,
251 bp.v->btree_id,
252 bp.v->pos,
253 0,
254 bp.v->level,
255 iter_flags);
256 struct bkey_s_c k = bch2_btree_iter_peek_slot(iter);
257 if (bkey_err(k)) {
258 bch2_trans_iter_exit(trans, iter);
259 return k;
260 }
261
262 if (k.k &&
263 extent_matches_bp(c, bp.v->btree_id, bp.v->level, k, bp))
264 return k;
265
266 bch2_trans_iter_exit(trans, iter);
267
268 if (!bp.v->level) {
269 int ret = backpointer_target_not_found(trans, bp, k, last_flushed);
270 return ret ? bkey_s_c_err(ret) : bkey_s_c_null;
271 } else {
272 struct btree *b = bch2_backpointer_get_node(trans, bp, iter, last_flushed);
273 if (b == ERR_PTR(-BCH_ERR_backpointer_to_overwritten_btree_node))
274 return bkey_s_c_null;
275 if (IS_ERR_OR_NULL(b))
276 return ((struct bkey_s_c) { .k = ERR_CAST(b) });
277
278 return bkey_i_to_s_c(&b->key);
279 }
280 }
281
bch2_backpointer_get_node(struct btree_trans * trans,struct bkey_s_c_backpointer bp,struct btree_iter * iter,struct bkey_buf * last_flushed)282 struct btree *bch2_backpointer_get_node(struct btree_trans *trans,
283 struct bkey_s_c_backpointer bp,
284 struct btree_iter *iter,
285 struct bkey_buf *last_flushed)
286 {
287 struct bch_fs *c = trans->c;
288
289 BUG_ON(!bp.v->level);
290
291 bch2_trans_node_iter_init(trans, iter,
292 bp.v->btree_id,
293 bp.v->pos,
294 0,
295 bp.v->level - 1,
296 0);
297 struct btree *b = bch2_btree_iter_peek_node(iter);
298 if (IS_ERR_OR_NULL(b))
299 goto err;
300
301 BUG_ON(b->c.level != bp.v->level - 1);
302
303 if (extent_matches_bp(c, bp.v->btree_id, bp.v->level,
304 bkey_i_to_s_c(&b->key), bp))
305 return b;
306
307 if (btree_node_will_make_reachable(b)) {
308 b = ERR_PTR(-BCH_ERR_backpointer_to_overwritten_btree_node);
309 } else {
310 int ret = backpointer_target_not_found(trans, bp, bkey_i_to_s_c(&b->key), last_flushed);
311 b = ret ? ERR_PTR(ret) : NULL;
312 }
313 err:
314 bch2_trans_iter_exit(trans, iter);
315 return b;
316 }
317
bch2_check_backpointer_has_valid_bucket(struct btree_trans * trans,struct bkey_s_c k,struct bkey_buf * last_flushed)318 static int bch2_check_backpointer_has_valid_bucket(struct btree_trans *trans, struct bkey_s_c k,
319 struct bkey_buf *last_flushed)
320 {
321 if (k.k->type != KEY_TYPE_backpointer)
322 return 0;
323
324 struct bch_fs *c = trans->c;
325 struct btree_iter alloc_iter = { NULL };
326 struct bkey_s_c alloc_k;
327 struct printbuf buf = PRINTBUF;
328 int ret = 0;
329
330 struct bpos bucket;
331 if (!bp_pos_to_bucket_nodev_noerror(c, k.k->p, &bucket)) {
332 ret = bch2_backpointers_maybe_flush(trans, k, last_flushed);
333 if (ret)
334 goto out;
335
336 if (fsck_err(trans, backpointer_to_missing_device,
337 "backpointer for missing device:\n%s",
338 (bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
339 ret = bch2_backpointer_del(trans, k.k->p);
340 goto out;
341 }
342
343 alloc_k = bch2_bkey_get_iter(trans, &alloc_iter, BTREE_ID_alloc, bucket, 0);
344 ret = bkey_err(alloc_k);
345 if (ret)
346 goto out;
347
348 if (alloc_k.k->type != KEY_TYPE_alloc_v4) {
349 ret = bch2_backpointers_maybe_flush(trans, k, last_flushed);
350 if (ret)
351 goto out;
352
353 if (fsck_err(trans, backpointer_to_missing_alloc,
354 "backpointer for nonexistent alloc key: %llu:%llu:0\n%s",
355 alloc_iter.pos.inode, alloc_iter.pos.offset,
356 (bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
357 ret = bch2_backpointer_del(trans, k.k->p);
358 }
359 out:
360 fsck_err:
361 bch2_trans_iter_exit(trans, &alloc_iter);
362 printbuf_exit(&buf);
363 return ret;
364 }
365
366 /* verify that every backpointer has a corresponding alloc key */
bch2_check_btree_backpointers(struct bch_fs * c)367 int bch2_check_btree_backpointers(struct bch_fs *c)
368 {
369 struct bkey_buf last_flushed;
370 bch2_bkey_buf_init(&last_flushed);
371 bkey_init(&last_flushed.k->k);
372
373 int ret = bch2_trans_run(c,
374 for_each_btree_key_commit(trans, iter,
375 BTREE_ID_backpointers, POS_MIN, 0, k,
376 NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
377 bch2_check_backpointer_has_valid_bucket(trans, k, &last_flushed)));
378
379 bch2_bkey_buf_exit(&last_flushed, c);
380 bch_err_fn(c, ret);
381 return ret;
382 }
383
384 struct extents_to_bp_state {
385 struct bpos bp_start;
386 struct bpos bp_end;
387 struct bkey_buf last_flushed;
388 };
389
drop_dev_and_update(struct btree_trans * trans,enum btree_id btree,struct bkey_s_c extent,unsigned dev)390 static int drop_dev_and_update(struct btree_trans *trans, enum btree_id btree,
391 struct bkey_s_c extent, unsigned dev)
392 {
393 struct bkey_i *n = bch2_bkey_make_mut_noupdate(trans, extent);
394 int ret = PTR_ERR_OR_ZERO(n);
395 if (ret)
396 return ret;
397
398 bch2_bkey_drop_device(bkey_i_to_s(n), dev);
399 return bch2_btree_insert_trans(trans, btree, n, 0);
400 }
401
check_extent_checksum(struct btree_trans * trans,enum btree_id btree,struct bkey_s_c extent,enum btree_id o_btree,struct bkey_s_c extent2,unsigned dev)402 static int check_extent_checksum(struct btree_trans *trans,
403 enum btree_id btree, struct bkey_s_c extent,
404 enum btree_id o_btree, struct bkey_s_c extent2, unsigned dev)
405 {
406 struct bch_fs *c = trans->c;
407 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(extent);
408 const union bch_extent_entry *entry;
409 struct extent_ptr_decoded p;
410 struct printbuf buf = PRINTBUF;
411 void *data_buf = NULL;
412 struct bio *bio = NULL;
413 size_t bytes;
414 int ret = 0;
415
416 if (bkey_is_btree_ptr(extent.k))
417 return false;
418
419 bkey_for_each_ptr_decode(extent.k, ptrs, p, entry)
420 if (p.ptr.dev == dev)
421 goto found;
422 BUG();
423 found:
424 if (!p.crc.csum_type)
425 return false;
426
427 bytes = p.crc.compressed_size << 9;
428
429 struct bch_dev *ca = bch2_dev_get_ioref(c, dev, READ);
430 if (!ca)
431 return false;
432
433 data_buf = kvmalloc(bytes, GFP_KERNEL);
434 if (!data_buf) {
435 ret = -ENOMEM;
436 goto err;
437 }
438
439 bio = bio_alloc(ca->disk_sb.bdev, buf_pages(data_buf, bytes), REQ_OP_READ, GFP_KERNEL);
440 bio->bi_iter.bi_sector = p.ptr.offset;
441 bch2_bio_map(bio, data_buf, bytes);
442 ret = submit_bio_wait(bio);
443 if (ret)
444 goto err;
445
446 prt_str(&buf, "extents pointing to same space, but first extent checksum bad:");
447 prt_printf(&buf, "\n ");
448 bch2_btree_id_to_text(&buf, btree);
449 prt_str(&buf, " ");
450 bch2_bkey_val_to_text(&buf, c, extent);
451 prt_printf(&buf, "\n ");
452 bch2_btree_id_to_text(&buf, o_btree);
453 prt_str(&buf, " ");
454 bch2_bkey_val_to_text(&buf, c, extent2);
455
456 struct nonce nonce = extent_nonce(extent.k->bversion, p.crc);
457 struct bch_csum csum = bch2_checksum(c, p.crc.csum_type, nonce, data_buf, bytes);
458 if (fsck_err_on(bch2_crc_cmp(csum, p.crc.csum),
459 trans, dup_backpointer_to_bad_csum_extent,
460 "%s", buf.buf))
461 ret = drop_dev_and_update(trans, btree, extent, dev) ?: 1;
462 fsck_err:
463 err:
464 if (bio)
465 bio_put(bio);
466 kvfree(data_buf);
467 percpu_ref_put(&ca->io_ref);
468 printbuf_exit(&buf);
469 return ret;
470 }
471
check_bp_exists(struct btree_trans * trans,struct extents_to_bp_state * s,struct bkey_i_backpointer * bp,struct bkey_s_c orig_k)472 static int check_bp_exists(struct btree_trans *trans,
473 struct extents_to_bp_state *s,
474 struct bkey_i_backpointer *bp,
475 struct bkey_s_c orig_k)
476 {
477 struct bch_fs *c = trans->c;
478 struct btree_iter other_extent_iter = {};
479 struct printbuf buf = PRINTBUF;
480
481 if (bpos_lt(bp->k.p, s->bp_start) ||
482 bpos_gt(bp->k.p, s->bp_end))
483 return 0;
484
485 struct btree_iter bp_iter;
486 struct bkey_s_c bp_k = bch2_bkey_get_iter(trans, &bp_iter, BTREE_ID_backpointers, bp->k.p, 0);
487 int ret = bkey_err(bp_k);
488 if (ret)
489 goto err;
490
491 if (bp_k.k->type != KEY_TYPE_backpointer ||
492 memcmp(bkey_s_c_to_backpointer(bp_k).v, &bp->v, sizeof(bp->v))) {
493 ret = bch2_btree_write_buffer_maybe_flush(trans, orig_k, &s->last_flushed);
494 if (ret)
495 goto err;
496
497 goto check_existing_bp;
498 }
499 out:
500 err:
501 fsck_err:
502 bch2_trans_iter_exit(trans, &other_extent_iter);
503 bch2_trans_iter_exit(trans, &bp_iter);
504 printbuf_exit(&buf);
505 return ret;
506 check_existing_bp:
507 /* Do we have a backpointer for a different extent? */
508 if (bp_k.k->type != KEY_TYPE_backpointer)
509 goto missing;
510
511 struct bkey_s_c_backpointer other_bp = bkey_s_c_to_backpointer(bp_k);
512
513 struct bkey_s_c other_extent =
514 bch2_backpointer_get_key(trans, other_bp, &other_extent_iter, 0, NULL);
515 ret = bkey_err(other_extent);
516 if (ret == -BCH_ERR_backpointer_to_overwritten_btree_node)
517 ret = 0;
518 if (ret)
519 goto err;
520
521 if (!other_extent.k)
522 goto missing;
523
524 rcu_read_lock();
525 struct bch_dev *ca = bch2_dev_rcu_noerror(c, bp->k.p.inode);
526 if (ca) {
527 struct bkey_ptrs_c other_extent_ptrs = bch2_bkey_ptrs_c(other_extent);
528 bkey_for_each_ptr(other_extent_ptrs, ptr)
529 if (ptr->dev == bp->k.p.inode &&
530 dev_ptr_stale_rcu(ca, ptr)) {
531 ret = drop_dev_and_update(trans, other_bp.v->btree_id,
532 other_extent, bp->k.p.inode);
533 if (ret)
534 goto err;
535 goto out;
536 }
537 }
538 rcu_read_unlock();
539
540 if (bch2_extents_match(orig_k, other_extent)) {
541 printbuf_reset(&buf);
542 prt_printf(&buf, "duplicate versions of same extent, deleting smaller\n ");
543 bch2_bkey_val_to_text(&buf, c, orig_k);
544 prt_str(&buf, "\n ");
545 bch2_bkey_val_to_text(&buf, c, other_extent);
546 bch_err(c, "%s", buf.buf);
547
548 if (other_extent.k->size <= orig_k.k->size) {
549 ret = drop_dev_and_update(trans, other_bp.v->btree_id,
550 other_extent, bp->k.p.inode);
551 if (ret)
552 goto err;
553 goto out;
554 } else {
555 ret = drop_dev_and_update(trans, bp->v.btree_id, orig_k, bp->k.p.inode);
556 if (ret)
557 goto err;
558 goto missing;
559 }
560 }
561
562 ret = check_extent_checksum(trans,
563 other_bp.v->btree_id, other_extent,
564 bp->v.btree_id, orig_k,
565 bp->k.p.inode);
566 if (ret < 0)
567 goto err;
568 if (ret) {
569 ret = 0;
570 goto missing;
571 }
572
573 ret = check_extent_checksum(trans, bp->v.btree_id, orig_k,
574 other_bp.v->btree_id, other_extent, bp->k.p.inode);
575 if (ret < 0)
576 goto err;
577 if (ret) {
578 ret = 0;
579 goto out;
580 }
581
582 printbuf_reset(&buf);
583 prt_printf(&buf, "duplicate extents pointing to same space on dev %llu\n ", bp->k.p.inode);
584 bch2_bkey_val_to_text(&buf, c, orig_k);
585 prt_str(&buf, "\n ");
586 bch2_bkey_val_to_text(&buf, c, other_extent);
587 bch_err(c, "%s", buf.buf);
588 ret = -BCH_ERR_fsck_repair_unimplemented;
589 goto err;
590 missing:
591 printbuf_reset(&buf);
592 prt_str(&buf, "missing backpointer\n for: ");
593 bch2_bkey_val_to_text(&buf, c, orig_k);
594 prt_printf(&buf, "\n want: ");
595 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&bp->k_i));
596 prt_printf(&buf, "\n got: ");
597 bch2_bkey_val_to_text(&buf, c, bp_k);
598
599 if (fsck_err(trans, ptr_to_missing_backpointer, "%s", buf.buf))
600 ret = bch2_bucket_backpointer_mod(trans, orig_k, bp, true);
601
602 goto out;
603 }
604
check_extent_to_backpointers(struct btree_trans * trans,struct extents_to_bp_state * s,enum btree_id btree,unsigned level,struct bkey_s_c k)605 static int check_extent_to_backpointers(struct btree_trans *trans,
606 struct extents_to_bp_state *s,
607 enum btree_id btree, unsigned level,
608 struct bkey_s_c k)
609 {
610 struct bch_fs *c = trans->c;
611 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
612 const union bch_extent_entry *entry;
613 struct extent_ptr_decoded p;
614
615 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
616 if (p.ptr.dev == BCH_SB_MEMBER_INVALID)
617 continue;
618
619 rcu_read_lock();
620 struct bch_dev *ca = bch2_dev_rcu_noerror(c, p.ptr.dev);
621 bool check = ca && test_bit(PTR_BUCKET_NR(ca, &p.ptr), ca->bucket_backpointer_mismatches);
622 bool empty = ca && test_bit(PTR_BUCKET_NR(ca, &p.ptr), ca->bucket_backpointer_empty);
623
624 bool stale = p.ptr.cached && (!ca || dev_ptr_stale_rcu(ca, &p.ptr));
625 rcu_read_unlock();
626
627 if ((check || empty) && !stale) {
628 struct bkey_i_backpointer bp;
629 bch2_extent_ptr_to_bp(c, btree, level, k, p, entry, &bp);
630
631 int ret = check
632 ? check_bp_exists(trans, s, &bp, k)
633 : bch2_bucket_backpointer_mod(trans, k, &bp, true);
634 if (ret)
635 return ret;
636 }
637 }
638
639 return 0;
640 }
641
check_btree_root_to_backpointers(struct btree_trans * trans,struct extents_to_bp_state * s,enum btree_id btree_id,int * level)642 static int check_btree_root_to_backpointers(struct btree_trans *trans,
643 struct extents_to_bp_state *s,
644 enum btree_id btree_id,
645 int *level)
646 {
647 struct bch_fs *c = trans->c;
648 struct btree_iter iter;
649 struct btree *b;
650 struct bkey_s_c k;
651 int ret;
652 retry:
653 bch2_trans_node_iter_init(trans, &iter, btree_id, POS_MIN,
654 0, bch2_btree_id_root(c, btree_id)->b->c.level, 0);
655 b = bch2_btree_iter_peek_node(&iter);
656 ret = PTR_ERR_OR_ZERO(b);
657 if (ret)
658 goto err;
659
660 if (b != btree_node_root(c, b)) {
661 bch2_trans_iter_exit(trans, &iter);
662 goto retry;
663 }
664
665 *level = b->c.level;
666
667 k = bkey_i_to_s_c(&b->key);
668 ret = check_extent_to_backpointers(trans, s, btree_id, b->c.level + 1, k);
669 err:
670 bch2_trans_iter_exit(trans, &iter);
671 return ret;
672 }
673
bp_to_bbpos(struct bch_backpointer bp)674 static inline struct bbpos bp_to_bbpos(struct bch_backpointer bp)
675 {
676 return (struct bbpos) {
677 .btree = bp.btree_id,
678 .pos = bp.pos,
679 };
680 }
681
mem_may_pin_bytes(struct bch_fs * c)682 static u64 mem_may_pin_bytes(struct bch_fs *c)
683 {
684 struct sysinfo i;
685 si_meminfo(&i);
686
687 u64 mem_bytes = i.totalram * i.mem_unit;
688 return div_u64(mem_bytes * c->opts.fsck_memory_usage_percent, 100);
689 }
690
btree_nodes_fit_in_ram(struct bch_fs * c)691 static size_t btree_nodes_fit_in_ram(struct bch_fs *c)
692 {
693 return div_u64(mem_may_pin_bytes(c), c->opts.btree_node_size);
694 }
695
bch2_get_btree_in_memory_pos(struct btree_trans * trans,u64 btree_leaf_mask,u64 btree_interior_mask,struct bbpos start,struct bbpos * end)696 static int bch2_get_btree_in_memory_pos(struct btree_trans *trans,
697 u64 btree_leaf_mask,
698 u64 btree_interior_mask,
699 struct bbpos start, struct bbpos *end)
700 {
701 struct bch_fs *c = trans->c;
702 s64 mem_may_pin = mem_may_pin_bytes(c);
703 int ret = 0;
704
705 bch2_btree_cache_unpin(c);
706
707 btree_interior_mask |= btree_leaf_mask;
708
709 c->btree_cache.pinned_nodes_mask[0] = btree_leaf_mask;
710 c->btree_cache.pinned_nodes_mask[1] = btree_interior_mask;
711 c->btree_cache.pinned_nodes_start = start;
712 c->btree_cache.pinned_nodes_end = *end = BBPOS_MAX;
713
714 for (enum btree_id btree = start.btree;
715 btree < BTREE_ID_NR && !ret;
716 btree++) {
717 unsigned depth = (BIT_ULL(btree) & btree_leaf_mask) ? 0 : 1;
718
719 if (!(BIT_ULL(btree) & btree_leaf_mask) &&
720 !(BIT_ULL(btree) & btree_interior_mask))
721 continue;
722
723 ret = __for_each_btree_node(trans, iter, btree,
724 btree == start.btree ? start.pos : POS_MIN,
725 0, depth, BTREE_ITER_prefetch, b, ({
726 mem_may_pin -= btree_buf_bytes(b);
727 if (mem_may_pin <= 0) {
728 c->btree_cache.pinned_nodes_end = *end =
729 BBPOS(btree, b->key.k.p);
730 break;
731 }
732 bch2_node_pin(c, b);
733 0;
734 }));
735 }
736
737 return ret;
738 }
739
bch2_check_extents_to_backpointers_pass(struct btree_trans * trans,struct extents_to_bp_state * s)740 static int bch2_check_extents_to_backpointers_pass(struct btree_trans *trans,
741 struct extents_to_bp_state *s)
742 {
743 struct bch_fs *c = trans->c;
744 struct progress_indicator_state progress;
745 int ret = 0;
746
747 bch2_progress_init(&progress, trans->c, BIT_ULL(BTREE_ID_extents)|BIT_ULL(BTREE_ID_reflink));
748
749 for (enum btree_id btree_id = 0;
750 btree_id < btree_id_nr_alive(c);
751 btree_id++) {
752 int level, depth = btree_type_has_ptrs(btree_id) ? 0 : 1;
753
754 ret = commit_do(trans, NULL, NULL,
755 BCH_TRANS_COMMIT_no_enospc,
756 check_btree_root_to_backpointers(trans, s, btree_id, &level));
757 if (ret)
758 return ret;
759
760 while (level >= depth) {
761 struct btree_iter iter;
762 bch2_trans_node_iter_init(trans, &iter, btree_id, POS_MIN, 0, level,
763 BTREE_ITER_prefetch);
764
765 ret = for_each_btree_key_continue(trans, iter, 0, k, ({
766 bch2_progress_update_iter(trans, &progress, &iter, "extents_to_backpointers");
767 check_extent_to_backpointers(trans, s, btree_id, level, k) ?:
768 bch2_trans_commit(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc);
769 }));
770 if (ret)
771 return ret;
772
773 --level;
774 }
775 }
776
777 return 0;
778 }
779
780 enum alloc_sector_counter {
781 ALLOC_dirty,
782 ALLOC_cached,
783 ALLOC_stripe,
784 ALLOC_SECTORS_NR
785 };
786
data_type_to_alloc_counter(enum bch_data_type t)787 static int data_type_to_alloc_counter(enum bch_data_type t)
788 {
789 switch (t) {
790 case BCH_DATA_btree:
791 case BCH_DATA_user:
792 return ALLOC_dirty;
793 case BCH_DATA_cached:
794 return ALLOC_cached;
795 case BCH_DATA_stripe:
796 case BCH_DATA_parity:
797 return ALLOC_stripe;
798 default:
799 return -1;
800 }
801 }
802
803 static int check_bucket_backpointers_to_extents(struct btree_trans *, struct bch_dev *, struct bpos);
804
check_bucket_backpointer_mismatch(struct btree_trans * trans,struct bkey_s_c alloc_k,struct bkey_buf * last_flushed)805 static int check_bucket_backpointer_mismatch(struct btree_trans *trans, struct bkey_s_c alloc_k,
806 struct bkey_buf *last_flushed)
807 {
808 struct bch_fs *c = trans->c;
809 struct bch_alloc_v4 a_convert;
810 const struct bch_alloc_v4 *a = bch2_alloc_to_v4(alloc_k, &a_convert);
811 bool need_commit = false;
812
813 if (a->data_type == BCH_DATA_sb ||
814 a->data_type == BCH_DATA_journal ||
815 a->data_type == BCH_DATA_parity)
816 return 0;
817
818 u32 sectors[ALLOC_SECTORS_NR];
819 memset(sectors, 0, sizeof(sectors));
820
821 struct bch_dev *ca = bch2_dev_bucket_tryget_noerror(trans->c, alloc_k.k->p);
822 if (!ca)
823 return 0;
824
825 struct btree_iter iter;
826 struct bkey_s_c bp_k;
827 int ret = 0;
828 for_each_btree_key_max_norestart(trans, iter, BTREE_ID_backpointers,
829 bucket_pos_to_bp_start(ca, alloc_k.k->p),
830 bucket_pos_to_bp_end(ca, alloc_k.k->p), 0, bp_k, ret) {
831 if (bp_k.k->type != KEY_TYPE_backpointer)
832 continue;
833
834 struct bkey_s_c_backpointer bp = bkey_s_c_to_backpointer(bp_k);
835
836 if (c->sb.version_upgrade_complete >= bcachefs_metadata_version_backpointer_bucket_gen &&
837 (bp.v->bucket_gen != a->gen ||
838 bp.v->pad)) {
839 ret = bch2_backpointer_del(trans, bp_k.k->p);
840 if (ret)
841 break;
842
843 need_commit = true;
844 continue;
845 }
846
847 if (bp.v->bucket_gen != a->gen)
848 continue;
849
850 int alloc_counter = data_type_to_alloc_counter(bp.v->data_type);
851 if (alloc_counter < 0)
852 continue;
853
854 sectors[alloc_counter] += bp.v->bucket_len;
855 };
856 bch2_trans_iter_exit(trans, &iter);
857 if (ret)
858 goto err;
859
860 if (need_commit) {
861 ret = bch2_trans_commit(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc);
862 if (ret)
863 goto err;
864 }
865
866 if (sectors[ALLOC_dirty] != a->dirty_sectors ||
867 sectors[ALLOC_cached] != a->cached_sectors ||
868 sectors[ALLOC_stripe] != a->stripe_sectors) {
869 if (c->sb.version_upgrade_complete >= bcachefs_metadata_version_backpointer_bucket_gen) {
870 ret = bch2_backpointers_maybe_flush(trans, alloc_k, last_flushed);
871 if (ret)
872 goto err;
873 }
874
875 if (sectors[ALLOC_dirty] > a->dirty_sectors ||
876 sectors[ALLOC_cached] > a->cached_sectors ||
877 sectors[ALLOC_stripe] > a->stripe_sectors) {
878 ret = check_bucket_backpointers_to_extents(trans, ca, alloc_k.k->p) ?:
879 -BCH_ERR_transaction_restart_nested;
880 goto err;
881 }
882
883 if (!sectors[ALLOC_dirty] &&
884 !sectors[ALLOC_stripe] &&
885 !sectors[ALLOC_cached])
886 __set_bit(alloc_k.k->p.offset, ca->bucket_backpointer_empty);
887 else
888 __set_bit(alloc_k.k->p.offset, ca->bucket_backpointer_mismatches);
889 }
890 err:
891 bch2_dev_put(ca);
892 return ret;
893 }
894
backpointer_node_has_missing(struct bch_fs * c,struct bkey_s_c k)895 static bool backpointer_node_has_missing(struct bch_fs *c, struct bkey_s_c k)
896 {
897 switch (k.k->type) {
898 case KEY_TYPE_btree_ptr_v2: {
899 bool ret = false;
900
901 rcu_read_lock();
902 struct bpos pos = bkey_s_c_to_btree_ptr_v2(k).v->min_key;
903 while (pos.inode <= k.k->p.inode) {
904 if (pos.inode >= c->sb.nr_devices)
905 break;
906
907 struct bch_dev *ca = bch2_dev_rcu_noerror(c, pos.inode);
908 if (!ca)
909 goto next;
910
911 struct bpos bucket = bp_pos_to_bucket(ca, pos);
912 bucket.offset = find_next_bit(ca->bucket_backpointer_mismatches,
913 ca->mi.nbuckets, bucket.offset);
914 if (bucket.offset == ca->mi.nbuckets)
915 goto next;
916
917 ret = bpos_le(bucket_pos_to_bp_end(ca, bucket), k.k->p);
918 if (ret)
919 break;
920 next:
921 pos = SPOS(pos.inode + 1, 0, 0);
922 }
923 rcu_read_unlock();
924
925 return ret;
926 }
927 case KEY_TYPE_btree_ptr:
928 return true;
929 default:
930 return false;
931 }
932 }
933
btree_node_get_and_pin(struct btree_trans * trans,struct bkey_i * k,enum btree_id btree,unsigned level)934 static int btree_node_get_and_pin(struct btree_trans *trans, struct bkey_i *k,
935 enum btree_id btree, unsigned level)
936 {
937 struct btree_iter iter;
938 bch2_trans_node_iter_init(trans, &iter, btree, k->k.p, 0, level, 0);
939 struct btree *b = bch2_btree_iter_peek_node(&iter);
940 int ret = PTR_ERR_OR_ZERO(b);
941 if (ret)
942 goto err;
943
944 if (b)
945 bch2_node_pin(trans->c, b);
946 err:
947 bch2_trans_iter_exit(trans, &iter);
948 return ret;
949 }
950
bch2_pin_backpointer_nodes_with_missing(struct btree_trans * trans,struct bpos start,struct bpos * end)951 static int bch2_pin_backpointer_nodes_with_missing(struct btree_trans *trans,
952 struct bpos start, struct bpos *end)
953 {
954 struct bch_fs *c = trans->c;
955 int ret = 0;
956
957 struct bkey_buf tmp;
958 bch2_bkey_buf_init(&tmp);
959
960 bch2_btree_cache_unpin(c);
961
962 *end = SPOS_MAX;
963
964 s64 mem_may_pin = mem_may_pin_bytes(c);
965 struct btree_iter iter;
966 bch2_trans_node_iter_init(trans, &iter, BTREE_ID_backpointers, start,
967 0, 1, BTREE_ITER_prefetch);
968 ret = for_each_btree_key_continue(trans, iter, 0, k, ({
969 if (!backpointer_node_has_missing(c, k))
970 continue;
971
972 mem_may_pin -= c->opts.btree_node_size;
973 if (mem_may_pin <= 0)
974 break;
975
976 bch2_bkey_buf_reassemble(&tmp, c, k);
977 struct btree_path *path = btree_iter_path(trans, &iter);
978
979 BUG_ON(path->level != 1);
980
981 bch2_btree_node_prefetch(trans, path, tmp.k, path->btree_id, path->level - 1);
982 }));
983 if (ret)
984 return ret;
985
986 struct bpos pinned = SPOS_MAX;
987 mem_may_pin = mem_may_pin_bytes(c);
988 bch2_trans_node_iter_init(trans, &iter, BTREE_ID_backpointers, start,
989 0, 1, BTREE_ITER_prefetch);
990 ret = for_each_btree_key_continue(trans, iter, 0, k, ({
991 if (!backpointer_node_has_missing(c, k))
992 continue;
993
994 mem_may_pin -= c->opts.btree_node_size;
995 if (mem_may_pin <= 0) {
996 *end = pinned;
997 break;
998 }
999
1000 bch2_bkey_buf_reassemble(&tmp, c, k);
1001 struct btree_path *path = btree_iter_path(trans, &iter);
1002
1003 BUG_ON(path->level != 1);
1004
1005 int ret2 = btree_node_get_and_pin(trans, tmp.k, path->btree_id, path->level - 1);
1006
1007 if (!ret2)
1008 pinned = tmp.k->k.p;
1009
1010 ret;
1011 }));
1012 if (ret)
1013 return ret;
1014
1015 return ret;
1016 }
1017
bch2_check_extents_to_backpointers(struct bch_fs * c)1018 int bch2_check_extents_to_backpointers(struct bch_fs *c)
1019 {
1020 int ret = 0;
1021
1022 /*
1023 * Can't allow devices to come/go/resize while we have bucket bitmaps
1024 * allocated
1025 */
1026 lockdep_assert_held(&c->state_lock);
1027
1028 for_each_member_device(c, ca) {
1029 BUG_ON(ca->bucket_backpointer_mismatches);
1030 ca->bucket_backpointer_mismatches = kvcalloc(BITS_TO_LONGS(ca->mi.nbuckets),
1031 sizeof(unsigned long),
1032 GFP_KERNEL);
1033 ca->bucket_backpointer_empty = kvcalloc(BITS_TO_LONGS(ca->mi.nbuckets),
1034 sizeof(unsigned long),
1035 GFP_KERNEL);
1036 if (!ca->bucket_backpointer_mismatches ||
1037 !ca->bucket_backpointer_empty) {
1038 bch2_dev_put(ca);
1039 ret = -BCH_ERR_ENOMEM_backpointer_mismatches_bitmap;
1040 goto err_free_bitmaps;
1041 }
1042 }
1043
1044 struct btree_trans *trans = bch2_trans_get(c);
1045 struct extents_to_bp_state s = { .bp_start = POS_MIN };
1046
1047 bch2_bkey_buf_init(&s.last_flushed);
1048 bkey_init(&s.last_flushed.k->k);
1049
1050 ret = for_each_btree_key(trans, iter, BTREE_ID_alloc,
1051 POS_MIN, BTREE_ITER_prefetch, k, ({
1052 check_bucket_backpointer_mismatch(trans, k, &s.last_flushed);
1053 }));
1054 if (ret)
1055 goto err;
1056
1057 u64 nr_buckets = 0, nr_mismatches = 0, nr_empty = 0;
1058 for_each_member_device(c, ca) {
1059 nr_buckets += ca->mi.nbuckets;
1060 nr_mismatches += bitmap_weight(ca->bucket_backpointer_mismatches, ca->mi.nbuckets);
1061 nr_empty += bitmap_weight(ca->bucket_backpointer_empty, ca->mi.nbuckets);
1062 }
1063
1064 if (!nr_mismatches && !nr_empty)
1065 goto err;
1066
1067 bch_info(c, "scanning for missing backpointers in %llu/%llu buckets",
1068 nr_mismatches + nr_empty, nr_buckets);
1069
1070 while (1) {
1071 ret = bch2_pin_backpointer_nodes_with_missing(trans, s.bp_start, &s.bp_end);
1072 if (ret)
1073 break;
1074
1075 if ( bpos_eq(s.bp_start, POS_MIN) &&
1076 !bpos_eq(s.bp_end, SPOS_MAX))
1077 bch_verbose(c, "%s(): alloc info does not fit in ram, running in multiple passes with %zu nodes per pass",
1078 __func__, btree_nodes_fit_in_ram(c));
1079
1080 if (!bpos_eq(s.bp_start, POS_MIN) ||
1081 !bpos_eq(s.bp_end, SPOS_MAX)) {
1082 struct printbuf buf = PRINTBUF;
1083
1084 prt_str(&buf, "check_extents_to_backpointers(): ");
1085 bch2_bpos_to_text(&buf, s.bp_start);
1086 prt_str(&buf, "-");
1087 bch2_bpos_to_text(&buf, s.bp_end);
1088
1089 bch_verbose(c, "%s", buf.buf);
1090 printbuf_exit(&buf);
1091 }
1092
1093 ret = bch2_check_extents_to_backpointers_pass(trans, &s);
1094 if (ret || bpos_eq(s.bp_end, SPOS_MAX))
1095 break;
1096
1097 s.bp_start = bpos_successor(s.bp_end);
1098 }
1099 err:
1100 bch2_trans_put(trans);
1101 bch2_bkey_buf_exit(&s.last_flushed, c);
1102 bch2_btree_cache_unpin(c);
1103 err_free_bitmaps:
1104 for_each_member_device(c, ca) {
1105 kvfree(ca->bucket_backpointer_empty);
1106 ca->bucket_backpointer_empty = NULL;
1107 kvfree(ca->bucket_backpointer_mismatches);
1108 ca->bucket_backpointer_mismatches = NULL;
1109 }
1110
1111 bch_err_fn(c, ret);
1112 return ret;
1113 }
1114
check_one_backpointer(struct btree_trans * trans,struct bbpos start,struct bbpos end,struct bkey_s_c bp_k,struct bkey_buf * last_flushed)1115 static int check_one_backpointer(struct btree_trans *trans,
1116 struct bbpos start,
1117 struct bbpos end,
1118 struct bkey_s_c bp_k,
1119 struct bkey_buf *last_flushed)
1120 {
1121 if (bp_k.k->type != KEY_TYPE_backpointer)
1122 return 0;
1123
1124 struct bkey_s_c_backpointer bp = bkey_s_c_to_backpointer(bp_k);
1125 struct bbpos pos = bp_to_bbpos(*bp.v);
1126
1127 if (bbpos_cmp(pos, start) < 0 ||
1128 bbpos_cmp(pos, end) > 0)
1129 return 0;
1130
1131 struct btree_iter iter;
1132 struct bkey_s_c k = bch2_backpointer_get_key(trans, bp, &iter, 0, last_flushed);
1133 int ret = bkey_err(k);
1134 if (ret == -BCH_ERR_backpointer_to_overwritten_btree_node)
1135 return 0;
1136 if (ret)
1137 return ret;
1138
1139 bch2_trans_iter_exit(trans, &iter);
1140 return ret;
1141 }
1142
check_bucket_backpointers_to_extents(struct btree_trans * trans,struct bch_dev * ca,struct bpos bucket)1143 static int check_bucket_backpointers_to_extents(struct btree_trans *trans,
1144 struct bch_dev *ca, struct bpos bucket)
1145 {
1146 u32 restart_count = trans->restart_count;
1147 struct bkey_buf last_flushed;
1148 bch2_bkey_buf_init(&last_flushed);
1149 bkey_init(&last_flushed.k->k);
1150
1151 int ret = for_each_btree_key_max(trans, iter, BTREE_ID_backpointers,
1152 bucket_pos_to_bp_start(ca, bucket),
1153 bucket_pos_to_bp_end(ca, bucket),
1154 0, k,
1155 check_one_backpointer(trans, BBPOS_MIN, BBPOS_MAX, k, &last_flushed)
1156 );
1157
1158 bch2_bkey_buf_exit(&last_flushed, trans->c);
1159 return ret ?: trans_was_restarted(trans, restart_count);
1160 }
1161
bch2_check_backpointers_to_extents_pass(struct btree_trans * trans,struct bbpos start,struct bbpos end)1162 static int bch2_check_backpointers_to_extents_pass(struct btree_trans *trans,
1163 struct bbpos start,
1164 struct bbpos end)
1165 {
1166 struct bch_fs *c = trans->c;
1167 struct bkey_buf last_flushed;
1168 struct progress_indicator_state progress;
1169
1170 bch2_bkey_buf_init(&last_flushed);
1171 bkey_init(&last_flushed.k->k);
1172 bch2_progress_init(&progress, trans->c, BIT_ULL(BTREE_ID_backpointers));
1173
1174 int ret = for_each_btree_key(trans, iter, BTREE_ID_backpointers,
1175 POS_MIN, BTREE_ITER_prefetch, k, ({
1176 bch2_progress_update_iter(trans, &progress, &iter, "backpointers_to_extents");
1177 check_one_backpointer(trans, start, end, k, &last_flushed);
1178 }));
1179
1180 bch2_bkey_buf_exit(&last_flushed, c);
1181 return ret;
1182 }
1183
bch2_check_backpointers_to_extents(struct bch_fs * c)1184 int bch2_check_backpointers_to_extents(struct bch_fs *c)
1185 {
1186 struct btree_trans *trans = bch2_trans_get(c);
1187 struct bbpos start = (struct bbpos) { .btree = 0, .pos = POS_MIN, }, end;
1188 int ret;
1189
1190 while (1) {
1191 ret = bch2_get_btree_in_memory_pos(trans,
1192 BIT_ULL(BTREE_ID_extents)|
1193 BIT_ULL(BTREE_ID_reflink),
1194 ~0,
1195 start, &end);
1196 if (ret)
1197 break;
1198
1199 if (!bbpos_cmp(start, BBPOS_MIN) &&
1200 bbpos_cmp(end, BBPOS_MAX))
1201 bch_verbose(c, "%s(): extents do not fit in ram, running in multiple passes with %zu nodes per pass",
1202 __func__, btree_nodes_fit_in_ram(c));
1203
1204 if (bbpos_cmp(start, BBPOS_MIN) ||
1205 bbpos_cmp(end, BBPOS_MAX)) {
1206 struct printbuf buf = PRINTBUF;
1207
1208 prt_str(&buf, "check_backpointers_to_extents(): ");
1209 bch2_bbpos_to_text(&buf, start);
1210 prt_str(&buf, "-");
1211 bch2_bbpos_to_text(&buf, end);
1212
1213 bch_verbose(c, "%s", buf.buf);
1214 printbuf_exit(&buf);
1215 }
1216
1217 ret = bch2_check_backpointers_to_extents_pass(trans, start, end);
1218 if (ret || !bbpos_cmp(end, BBPOS_MAX))
1219 break;
1220
1221 start = bbpos_successor(end);
1222 }
1223 bch2_trans_put(trans);
1224
1225 bch2_btree_cache_unpin(c);
1226
1227 bch_err_fn(c, ret);
1228 return ret;
1229 }
1230