xref: /linux/fs/bcachefs/backpointers.c (revision ea518afc992032f7570c0a89ac9240b387dc0faf)
1 // SPDX-License-Identifier: GPL-2.0
2 #include "bcachefs.h"
3 #include "bbpos.h"
4 #include "alloc_background.h"
5 #include "backpointers.h"
6 #include "bkey_buf.h"
7 #include "btree_cache.h"
8 #include "btree_update.h"
9 #include "btree_update_interior.h"
10 #include "btree_write_buffer.h"
11 #include "error.h"
12 
13 #include <linux/mm.h>
14 
15 static bool extent_matches_bp(struct bch_fs *c,
16 			      enum btree_id btree_id, unsigned level,
17 			      struct bkey_s_c k,
18 			      struct bpos bucket,
19 			      struct bch_backpointer bp)
20 {
21 	struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
22 	const union bch_extent_entry *entry;
23 	struct extent_ptr_decoded p;
24 
25 	bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
26 		struct bpos bucket2;
27 		struct bch_backpointer bp2;
28 
29 		if (p.ptr.cached)
30 			continue;
31 
32 		bch2_extent_ptr_to_bp(c, btree_id, level, k, p,
33 				      &bucket2, &bp2);
34 		if (bpos_eq(bucket, bucket2) &&
35 		    !memcmp(&bp, &bp2, sizeof(bp)))
36 			return true;
37 	}
38 
39 	return false;
40 }
41 
42 int bch2_backpointer_invalid(struct bch_fs *c, struct bkey_s_c k,
43 			     enum bkey_invalid_flags flags,
44 			     struct printbuf *err)
45 {
46 	struct bkey_s_c_backpointer bp = bkey_s_c_to_backpointer(k);
47 	struct bpos bucket = bp_pos_to_bucket(c, bp.k->p);
48 	int ret = 0;
49 
50 	bkey_fsck_err_on(!bpos_eq(bp.k->p, bucket_pos_to_bp(c, bucket, bp.v->bucket_offset)),
51 			 c, err,
52 			 backpointer_pos_wrong,
53 			 "backpointer at wrong pos");
54 fsck_err:
55 	return ret;
56 }
57 
58 void bch2_backpointer_to_text(struct printbuf *out, const struct bch_backpointer *bp)
59 {
60 	prt_printf(out, "btree=%s l=%u offset=%llu:%u len=%u pos=",
61 	       bch2_btree_id_str(bp->btree_id),
62 	       bp->level,
63 	       (u64) (bp->bucket_offset >> MAX_EXTENT_COMPRESS_RATIO_SHIFT),
64 	       (u32) bp->bucket_offset & ~(~0U << MAX_EXTENT_COMPRESS_RATIO_SHIFT),
65 	       bp->bucket_len);
66 	bch2_bpos_to_text(out, bp->pos);
67 }
68 
69 void bch2_backpointer_k_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c k)
70 {
71 	prt_str(out, "bucket=");
72 	bch2_bpos_to_text(out, bp_pos_to_bucket(c, k.k->p));
73 	prt_str(out, " ");
74 
75 	bch2_backpointer_to_text(out, bkey_s_c_to_backpointer(k).v);
76 }
77 
78 void bch2_backpointer_swab(struct bkey_s k)
79 {
80 	struct bkey_s_backpointer bp = bkey_s_to_backpointer(k);
81 
82 	bp.v->bucket_offset	= swab40(bp.v->bucket_offset);
83 	bp.v->bucket_len	= swab32(bp.v->bucket_len);
84 	bch2_bpos_swab(&bp.v->pos);
85 }
86 
87 static noinline int backpointer_mod_err(struct btree_trans *trans,
88 					struct bch_backpointer bp,
89 					struct bkey_s_c bp_k,
90 					struct bkey_s_c orig_k,
91 					bool insert)
92 {
93 	struct bch_fs *c = trans->c;
94 	struct printbuf buf = PRINTBUF;
95 
96 	if (insert) {
97 		prt_printf(&buf, "existing backpointer found when inserting ");
98 		bch2_backpointer_to_text(&buf, &bp);
99 		prt_newline(&buf);
100 		printbuf_indent_add(&buf, 2);
101 
102 		prt_printf(&buf, "found ");
103 		bch2_bkey_val_to_text(&buf, c, bp_k);
104 		prt_newline(&buf);
105 
106 		prt_printf(&buf, "for ");
107 		bch2_bkey_val_to_text(&buf, c, orig_k);
108 
109 		bch_err(c, "%s", buf.buf);
110 	} else if (c->curr_recovery_pass > BCH_RECOVERY_PASS_check_extents_to_backpointers) {
111 		prt_printf(&buf, "backpointer not found when deleting");
112 		prt_newline(&buf);
113 		printbuf_indent_add(&buf, 2);
114 
115 		prt_printf(&buf, "searching for ");
116 		bch2_backpointer_to_text(&buf, &bp);
117 		prt_newline(&buf);
118 
119 		prt_printf(&buf, "got ");
120 		bch2_bkey_val_to_text(&buf, c, bp_k);
121 		prt_newline(&buf);
122 
123 		prt_printf(&buf, "for ");
124 		bch2_bkey_val_to_text(&buf, c, orig_k);
125 
126 		bch_err(c, "%s", buf.buf);
127 	}
128 
129 	printbuf_exit(&buf);
130 
131 	if (c->curr_recovery_pass > BCH_RECOVERY_PASS_check_extents_to_backpointers) {
132 		bch2_inconsistent_error(c);
133 		return -EIO;
134 	} else {
135 		return 0;
136 	}
137 }
138 
139 int bch2_bucket_backpointer_mod_nowritebuffer(struct btree_trans *trans,
140 				struct bpos bucket,
141 				struct bch_backpointer bp,
142 				struct bkey_s_c orig_k,
143 				bool insert)
144 {
145 	struct btree_iter bp_iter;
146 	struct bkey_s_c k;
147 	struct bkey_i_backpointer *bp_k;
148 	int ret;
149 
150 	bp_k = bch2_trans_kmalloc_nomemzero(trans, sizeof(struct bkey_i_backpointer));
151 	ret = PTR_ERR_OR_ZERO(bp_k);
152 	if (ret)
153 		return ret;
154 
155 	bkey_backpointer_init(&bp_k->k_i);
156 	bp_k->k.p = bucket_pos_to_bp(trans->c, bucket, bp.bucket_offset);
157 	bp_k->v = bp;
158 
159 	if (!insert) {
160 		bp_k->k.type = KEY_TYPE_deleted;
161 		set_bkey_val_u64s(&bp_k->k, 0);
162 	}
163 
164 	k = bch2_bkey_get_iter(trans, &bp_iter, BTREE_ID_backpointers,
165 			       bp_k->k.p,
166 			       BTREE_ITER_INTENT|
167 			       BTREE_ITER_SLOTS|
168 			       BTREE_ITER_WITH_UPDATES);
169 	ret = bkey_err(k);
170 	if (ret)
171 		goto err;
172 
173 	if (insert
174 	    ? k.k->type
175 	    : (k.k->type != KEY_TYPE_backpointer ||
176 	       memcmp(bkey_s_c_to_backpointer(k).v, &bp, sizeof(bp)))) {
177 		ret = backpointer_mod_err(trans, bp, k, orig_k, insert);
178 		if (ret)
179 			goto err;
180 	}
181 
182 	ret = bch2_trans_update(trans, &bp_iter, &bp_k->k_i, 0);
183 err:
184 	bch2_trans_iter_exit(trans, &bp_iter);
185 	return ret;
186 }
187 
188 /*
189  * Find the next backpointer >= *bp_offset:
190  */
191 int bch2_get_next_backpointer(struct btree_trans *trans,
192 			      struct bpos bucket, int gen,
193 			      struct bpos *bp_pos,
194 			      struct bch_backpointer *bp,
195 			      unsigned iter_flags)
196 {
197 	struct bch_fs *c = trans->c;
198 	struct bpos bp_end_pos = bucket_pos_to_bp(c, bpos_nosnap_successor(bucket), 0);
199 	struct btree_iter alloc_iter = { NULL }, bp_iter = { NULL };
200 	struct bkey_s_c k;
201 	int ret = 0;
202 
203 	if (bpos_ge(*bp_pos, bp_end_pos))
204 		goto done;
205 
206 	if (gen >= 0) {
207 		k = bch2_bkey_get_iter(trans, &alloc_iter, BTREE_ID_alloc,
208 				       bucket, BTREE_ITER_CACHED|iter_flags);
209 		ret = bkey_err(k);
210 		if (ret)
211 			goto out;
212 
213 		if (k.k->type != KEY_TYPE_alloc_v4 ||
214 		    bkey_s_c_to_alloc_v4(k).v->gen != gen)
215 			goto done;
216 	}
217 
218 	*bp_pos = bpos_max(*bp_pos, bucket_pos_to_bp(c, bucket, 0));
219 
220 	for_each_btree_key_norestart(trans, bp_iter, BTREE_ID_backpointers,
221 				     *bp_pos, iter_flags, k, ret) {
222 		if (bpos_ge(k.k->p, bp_end_pos))
223 			break;
224 
225 		*bp_pos = k.k->p;
226 		*bp = *bkey_s_c_to_backpointer(k).v;
227 		goto out;
228 	}
229 done:
230 	*bp_pos = SPOS_MAX;
231 out:
232 	bch2_trans_iter_exit(trans, &bp_iter);
233 	bch2_trans_iter_exit(trans, &alloc_iter);
234 	return ret;
235 }
236 
237 static void backpointer_not_found(struct btree_trans *trans,
238 				  struct bpos bp_pos,
239 				  struct bch_backpointer bp,
240 				  struct bkey_s_c k)
241 {
242 	struct bch_fs *c = trans->c;
243 	struct printbuf buf = PRINTBUF;
244 	struct bpos bucket = bp_pos_to_bucket(c, bp_pos);
245 
246 	/*
247 	 * If we're using the btree write buffer, the backpointer we were
248 	 * looking at may have already been deleted - failure to find what it
249 	 * pointed to is not an error:
250 	 */
251 	if (likely(!bch2_backpointers_no_use_write_buffer))
252 		return;
253 
254 	prt_printf(&buf, "backpointer doesn't match %s it points to:\n  ",
255 		   bp.level ? "btree node" : "extent");
256 	prt_printf(&buf, "bucket: ");
257 	bch2_bpos_to_text(&buf, bucket);
258 	prt_printf(&buf, "\n  ");
259 
260 	prt_printf(&buf, "backpointer pos: ");
261 	bch2_bpos_to_text(&buf, bp_pos);
262 	prt_printf(&buf, "\n  ");
263 
264 	bch2_backpointer_to_text(&buf, &bp);
265 	prt_printf(&buf, "\n  ");
266 	bch2_bkey_val_to_text(&buf, c, k);
267 	if (c->curr_recovery_pass >= BCH_RECOVERY_PASS_check_extents_to_backpointers)
268 		bch_err_ratelimited(c, "%s", buf.buf);
269 	else
270 		bch2_trans_inconsistent(trans, "%s", buf.buf);
271 
272 	printbuf_exit(&buf);
273 }
274 
275 struct bkey_s_c bch2_backpointer_get_key(struct btree_trans *trans,
276 					 struct btree_iter *iter,
277 					 struct bpos bp_pos,
278 					 struct bch_backpointer bp,
279 					 unsigned iter_flags)
280 {
281 	if (likely(!bp.level)) {
282 		struct bch_fs *c = trans->c;
283 		struct bpos bucket = bp_pos_to_bucket(c, bp_pos);
284 		struct bkey_s_c k;
285 
286 		bch2_trans_node_iter_init(trans, iter,
287 					  bp.btree_id,
288 					  bp.pos,
289 					  0, 0,
290 					  iter_flags);
291 		k = bch2_btree_iter_peek_slot(iter);
292 		if (bkey_err(k)) {
293 			bch2_trans_iter_exit(trans, iter);
294 			return k;
295 		}
296 
297 		if (k.k && extent_matches_bp(c, bp.btree_id, bp.level, k, bucket, bp))
298 			return k;
299 
300 		bch2_trans_iter_exit(trans, iter);
301 		backpointer_not_found(trans, bp_pos, bp, k);
302 		return bkey_s_c_null;
303 	} else {
304 		struct btree *b = bch2_backpointer_get_node(trans, iter, bp_pos, bp);
305 
306 		if (IS_ERR_OR_NULL(b)) {
307 			bch2_trans_iter_exit(trans, iter);
308 			return IS_ERR(b) ? bkey_s_c_err(PTR_ERR(b)) : bkey_s_c_null;
309 		}
310 		return bkey_i_to_s_c(&b->key);
311 	}
312 }
313 
314 struct btree *bch2_backpointer_get_node(struct btree_trans *trans,
315 					struct btree_iter *iter,
316 					struct bpos bp_pos,
317 					struct bch_backpointer bp)
318 {
319 	struct bch_fs *c = trans->c;
320 	struct bpos bucket = bp_pos_to_bucket(c, bp_pos);
321 	struct btree *b;
322 
323 	BUG_ON(!bp.level);
324 
325 	bch2_trans_node_iter_init(trans, iter,
326 				  bp.btree_id,
327 				  bp.pos,
328 				  0,
329 				  bp.level - 1,
330 				  0);
331 	b = bch2_btree_iter_peek_node(iter);
332 	if (IS_ERR_OR_NULL(b))
333 		goto err;
334 
335 	BUG_ON(b->c.level != bp.level - 1);
336 
337 	if (extent_matches_bp(c, bp.btree_id, bp.level,
338 			      bkey_i_to_s_c(&b->key),
339 			      bucket, bp))
340 		return b;
341 
342 	if (btree_node_will_make_reachable(b)) {
343 		b = ERR_PTR(-BCH_ERR_backpointer_to_overwritten_btree_node);
344 	} else {
345 		backpointer_not_found(trans, bp_pos, bp, bkey_i_to_s_c(&b->key));
346 		b = NULL;
347 	}
348 err:
349 	bch2_trans_iter_exit(trans, iter);
350 	return b;
351 }
352 
353 static int bch2_check_btree_backpointer(struct btree_trans *trans, struct btree_iter *bp_iter,
354 					struct bkey_s_c k)
355 {
356 	struct bch_fs *c = trans->c;
357 	struct btree_iter alloc_iter = { NULL };
358 	struct bkey_s_c alloc_k;
359 	struct printbuf buf = PRINTBUF;
360 	int ret = 0;
361 
362 	if (fsck_err_on(!bch2_dev_exists2(c, k.k->p.inode), c,
363 			backpointer_to_missing_device,
364 			"backpointer for missing device:\n%s",
365 			(bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
366 		ret = bch2_btree_delete_at(trans, bp_iter, 0);
367 		goto out;
368 	}
369 
370 	alloc_k = bch2_bkey_get_iter(trans, &alloc_iter, BTREE_ID_alloc,
371 				     bp_pos_to_bucket(c, k.k->p), 0);
372 	ret = bkey_err(alloc_k);
373 	if (ret)
374 		goto out;
375 
376 	if (fsck_err_on(alloc_k.k->type != KEY_TYPE_alloc_v4, c,
377 			backpointer_to_missing_alloc,
378 			"backpointer for nonexistent alloc key: %llu:%llu:0\n%s",
379 			alloc_iter.pos.inode, alloc_iter.pos.offset,
380 			(bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf))) {
381 		ret = bch2_btree_delete_at(trans, bp_iter, 0);
382 		goto out;
383 	}
384 out:
385 fsck_err:
386 	bch2_trans_iter_exit(trans, &alloc_iter);
387 	printbuf_exit(&buf);
388 	return ret;
389 }
390 
391 /* verify that every backpointer has a corresponding alloc key */
392 int bch2_check_btree_backpointers(struct bch_fs *c)
393 {
394 	int ret = bch2_trans_run(c,
395 		for_each_btree_key_commit(trans, iter,
396 			BTREE_ID_backpointers, POS_MIN, 0, k,
397 			NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
398 		  bch2_check_btree_backpointer(trans, &iter, k)));
399 	bch_err_fn(c, ret);
400 	return ret;
401 }
402 
403 static inline bool bkey_and_val_eq(struct bkey_s_c l, struct bkey_s_c r)
404 {
405 	return bpos_eq(l.k->p, r.k->p) &&
406 		bkey_bytes(l.k) == bkey_bytes(r.k) &&
407 		!memcmp(l.v, r.v, bkey_val_bytes(l.k));
408 }
409 
410 struct extents_to_bp_state {
411 	struct bpos	bucket_start;
412 	struct bpos	bucket_end;
413 	struct bkey_buf last_flushed;
414 };
415 
416 static int check_bp_exists(struct btree_trans *trans,
417 			   struct extents_to_bp_state *s,
418 			   struct bpos bucket,
419 			   struct bch_backpointer bp,
420 			   struct bkey_s_c orig_k)
421 {
422 	struct bch_fs *c = trans->c;
423 	struct btree_iter bp_iter = { NULL };
424 	struct printbuf buf = PRINTBUF;
425 	struct bkey_s_c bp_k;
426 	struct bkey_buf tmp;
427 	int ret;
428 
429 	bch2_bkey_buf_init(&tmp);
430 
431 	if (bpos_lt(bucket, s->bucket_start) ||
432 	    bpos_gt(bucket, s->bucket_end))
433 		return 0;
434 
435 	if (!bch2_dev_bucket_exists(c, bucket))
436 		goto missing;
437 
438 	bp_k = bch2_bkey_get_iter(trans, &bp_iter, BTREE_ID_backpointers,
439 				  bucket_pos_to_bp(c, bucket, bp.bucket_offset),
440 				  0);
441 	ret = bkey_err(bp_k);
442 	if (ret)
443 		goto err;
444 
445 	if (bp_k.k->type != KEY_TYPE_backpointer ||
446 	    memcmp(bkey_s_c_to_backpointer(bp_k).v, &bp, sizeof(bp))) {
447 		bch2_bkey_buf_reassemble(&tmp, c, orig_k);
448 
449 		if (!bkey_and_val_eq(orig_k, bkey_i_to_s_c(s->last_flushed.k))) {
450 			if (bp.level) {
451 				bch2_trans_unlock(trans);
452 				bch2_btree_interior_updates_flush(c);
453 			}
454 
455 			ret = bch2_btree_write_buffer_flush_sync(trans);
456 			if (ret)
457 				goto err;
458 
459 			bch2_bkey_buf_copy(&s->last_flushed, c, tmp.k);
460 			ret = -BCH_ERR_transaction_restart_write_buffer_flush;
461 			goto out;
462 		}
463 		goto missing;
464 	}
465 out:
466 err:
467 fsck_err:
468 	bch2_trans_iter_exit(trans, &bp_iter);
469 	bch2_bkey_buf_exit(&tmp, c);
470 	printbuf_exit(&buf);
471 	return ret;
472 missing:
473 	prt_printf(&buf, "missing backpointer for btree=%s l=%u ",
474 	       bch2_btree_id_str(bp.btree_id), bp.level);
475 	bch2_bkey_val_to_text(&buf, c, orig_k);
476 	prt_printf(&buf, "\nbp pos ");
477 	bch2_bpos_to_text(&buf, bp_iter.pos);
478 
479 	if (c->opts.reconstruct_alloc ||
480 	    fsck_err(c, ptr_to_missing_backpointer, "%s", buf.buf))
481 		ret = bch2_bucket_backpointer_mod(trans, bucket, bp, orig_k, true);
482 
483 	goto out;
484 }
485 
486 static int check_extent_to_backpointers(struct btree_trans *trans,
487 					struct extents_to_bp_state *s,
488 					enum btree_id btree, unsigned level,
489 					struct bkey_s_c k)
490 {
491 	struct bch_fs *c = trans->c;
492 	struct bkey_ptrs_c ptrs;
493 	const union bch_extent_entry *entry;
494 	struct extent_ptr_decoded p;
495 	int ret;
496 
497 	ptrs = bch2_bkey_ptrs_c(k);
498 	bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
499 		struct bpos bucket_pos;
500 		struct bch_backpointer bp;
501 
502 		if (p.ptr.cached)
503 			continue;
504 
505 		bch2_extent_ptr_to_bp(c, btree, level,
506 				      k, p, &bucket_pos, &bp);
507 
508 		ret = check_bp_exists(trans, s, bucket_pos, bp, k);
509 		if (ret)
510 			return ret;
511 	}
512 
513 	return 0;
514 }
515 
516 static int check_btree_root_to_backpointers(struct btree_trans *trans,
517 					    struct extents_to_bp_state *s,
518 					    enum btree_id btree_id,
519 					    int *level)
520 {
521 	struct bch_fs *c = trans->c;
522 	struct btree_iter iter;
523 	struct btree *b;
524 	struct bkey_s_c k;
525 	int ret;
526 retry:
527 	bch2_trans_node_iter_init(trans, &iter, btree_id, POS_MIN,
528 				  0, bch2_btree_id_root(c, btree_id)->b->c.level, 0);
529 	b = bch2_btree_iter_peek_node(&iter);
530 	ret = PTR_ERR_OR_ZERO(b);
531 	if (ret)
532 		goto err;
533 
534 	if (b != btree_node_root(c, b)) {
535 		bch2_trans_iter_exit(trans, &iter);
536 		goto retry;
537 	}
538 
539 	*level = b->c.level;
540 
541 	k = bkey_i_to_s_c(&b->key);
542 	ret = check_extent_to_backpointers(trans, s, btree_id, b->c.level + 1, k);
543 err:
544 	bch2_trans_iter_exit(trans, &iter);
545 	return ret;
546 }
547 
548 static inline struct bbpos bp_to_bbpos(struct bch_backpointer bp)
549 {
550 	return (struct bbpos) {
551 		.btree	= bp.btree_id,
552 		.pos	= bp.pos,
553 	};
554 }
555 
556 static size_t btree_nodes_fit_in_ram(struct bch_fs *c)
557 {
558 	struct sysinfo i;
559 	u64 mem_bytes;
560 
561 	si_meminfo(&i);
562 	mem_bytes = i.totalram * i.mem_unit;
563 	return div_u64(mem_bytes >> 1, c->opts.btree_node_size);
564 }
565 
566 static int bch2_get_btree_in_memory_pos(struct btree_trans *trans,
567 					unsigned btree_leaf_mask,
568 					unsigned btree_interior_mask,
569 					struct bbpos start, struct bbpos *end)
570 {
571 	struct btree_iter iter;
572 	struct bkey_s_c k;
573 	size_t btree_nodes = btree_nodes_fit_in_ram(trans->c);
574 	enum btree_id btree;
575 	int ret = 0;
576 
577 	for (btree = start.btree; btree < BTREE_ID_NR && !ret; btree++) {
578 		unsigned depth = ((1U << btree) & btree_leaf_mask) ? 1 : 2;
579 
580 		if (!((1U << btree) & btree_leaf_mask) &&
581 		    !((1U << btree) & btree_interior_mask))
582 			continue;
583 
584 		bch2_trans_node_iter_init(trans, &iter, btree,
585 					  btree == start.btree ? start.pos : POS_MIN,
586 					  0, depth, 0);
587 		/*
588 		 * for_each_btree_key_contineu() doesn't check the return value
589 		 * from bch2_btree_iter_advance(), which is needed when
590 		 * iterating over interior nodes where we'll see keys at
591 		 * SPOS_MAX:
592 		 */
593 		do {
594 			k = __bch2_btree_iter_peek_and_restart(trans, &iter, 0);
595 			ret = bkey_err(k);
596 			if (!k.k || ret)
597 				break;
598 
599 			--btree_nodes;
600 			if (!btree_nodes) {
601 				*end = BBPOS(btree, k.k->p);
602 				bch2_trans_iter_exit(trans, &iter);
603 				return 0;
604 			}
605 		} while (bch2_btree_iter_advance(&iter));
606 		bch2_trans_iter_exit(trans, &iter);
607 	}
608 
609 	*end = BBPOS_MAX;
610 	return ret;
611 }
612 
613 static int bch2_check_extents_to_backpointers_pass(struct btree_trans *trans,
614 						   struct extents_to_bp_state *s)
615 {
616 	struct bch_fs *c = trans->c;
617 	int ret = 0;
618 
619 	for (enum btree_id btree_id = 0;
620 	     btree_id < btree_id_nr_alive(c);
621 	     btree_id++) {
622 		int level, depth = btree_type_has_ptrs(btree_id) ? 0 : 1;
623 
624 		ret = commit_do(trans, NULL, NULL,
625 				BCH_TRANS_COMMIT_no_enospc,
626 				check_btree_root_to_backpointers(trans, s, btree_id, &level));
627 		if (ret)
628 			return ret;
629 
630 		while (level >= depth) {
631 			struct btree_iter iter;
632 			bch2_trans_node_iter_init(trans, &iter, btree_id, POS_MIN, 0,
633 						  level,
634 						  BTREE_ITER_PREFETCH);
635 			while (1) {
636 				bch2_trans_begin(trans);
637 
638 				struct bkey_s_c k = bch2_btree_iter_peek(&iter);
639 				if (!k.k)
640 					break;
641 				ret = bkey_err(k) ?:
642 					check_extent_to_backpointers(trans, s, btree_id, level, k) ?:
643 					bch2_trans_commit(trans, NULL, NULL,
644 							  BCH_TRANS_COMMIT_no_enospc);
645 				if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) {
646 					ret = 0;
647 					continue;
648 				}
649 				if (ret)
650 					break;
651 				if (bpos_eq(iter.pos, SPOS_MAX))
652 					break;
653 				bch2_btree_iter_advance(&iter);
654 			}
655 			bch2_trans_iter_exit(trans, &iter);
656 
657 			if (ret)
658 				return ret;
659 
660 			--level;
661 		}
662 	}
663 
664 	return 0;
665 }
666 
667 static struct bpos bucket_pos_to_bp_safe(const struct bch_fs *c,
668 					 struct bpos bucket)
669 {
670 	return bch2_dev_exists2(c, bucket.inode)
671 		? bucket_pos_to_bp(c, bucket, 0)
672 		: bucket;
673 }
674 
675 static int bch2_get_alloc_in_memory_pos(struct btree_trans *trans,
676 					struct bpos start, struct bpos *end)
677 {
678 	struct btree_iter alloc_iter;
679 	struct btree_iter bp_iter;
680 	struct bkey_s_c alloc_k, bp_k;
681 	size_t btree_nodes = btree_nodes_fit_in_ram(trans->c);
682 	bool alloc_end = false, bp_end = false;
683 	int ret = 0;
684 
685 	bch2_trans_node_iter_init(trans, &alloc_iter, BTREE_ID_alloc,
686 				  start, 0, 1, 0);
687 	bch2_trans_node_iter_init(trans, &bp_iter, BTREE_ID_backpointers,
688 				  bucket_pos_to_bp_safe(trans->c, start), 0, 1, 0);
689 	while (1) {
690 		alloc_k = !alloc_end
691 			? __bch2_btree_iter_peek_and_restart(trans, &alloc_iter, 0)
692 			: bkey_s_c_null;
693 		bp_k = !bp_end
694 			? __bch2_btree_iter_peek_and_restart(trans, &bp_iter, 0)
695 			: bkey_s_c_null;
696 
697 		ret = bkey_err(alloc_k) ?: bkey_err(bp_k);
698 		if ((!alloc_k.k && !bp_k.k) || ret) {
699 			*end = SPOS_MAX;
700 			break;
701 		}
702 
703 		--btree_nodes;
704 		if (!btree_nodes) {
705 			*end = alloc_k.k ? alloc_k.k->p : SPOS_MAX;
706 			break;
707 		}
708 
709 		if (bpos_lt(alloc_iter.pos, SPOS_MAX) &&
710 		    bpos_lt(bucket_pos_to_bp_safe(trans->c, alloc_iter.pos), bp_iter.pos)) {
711 			if (!bch2_btree_iter_advance(&alloc_iter))
712 				alloc_end = true;
713 		} else {
714 			if (!bch2_btree_iter_advance(&bp_iter))
715 				bp_end = true;
716 		}
717 	}
718 	bch2_trans_iter_exit(trans, &bp_iter);
719 	bch2_trans_iter_exit(trans, &alloc_iter);
720 	return ret;
721 }
722 
723 int bch2_check_extents_to_backpointers(struct bch_fs *c)
724 {
725 	struct btree_trans *trans = bch2_trans_get(c);
726 	struct extents_to_bp_state s = { .bucket_start = POS_MIN };
727 	int ret;
728 
729 	bch2_bkey_buf_init(&s.last_flushed);
730 	bkey_init(&s.last_flushed.k->k);
731 
732 	while (1) {
733 		ret = bch2_get_alloc_in_memory_pos(trans, s.bucket_start, &s.bucket_end);
734 		if (ret)
735 			break;
736 
737 		if ( bpos_eq(s.bucket_start, POS_MIN) &&
738 		    !bpos_eq(s.bucket_end, SPOS_MAX))
739 			bch_verbose(c, "%s(): alloc info does not fit in ram, running in multiple passes with %zu nodes per pass",
740 				    __func__, btree_nodes_fit_in_ram(c));
741 
742 		if (!bpos_eq(s.bucket_start, POS_MIN) ||
743 		    !bpos_eq(s.bucket_end, SPOS_MAX)) {
744 			struct printbuf buf = PRINTBUF;
745 
746 			prt_str(&buf, "check_extents_to_backpointers(): ");
747 			bch2_bpos_to_text(&buf, s.bucket_start);
748 			prt_str(&buf, "-");
749 			bch2_bpos_to_text(&buf, s.bucket_end);
750 
751 			bch_verbose(c, "%s", buf.buf);
752 			printbuf_exit(&buf);
753 		}
754 
755 		ret = bch2_check_extents_to_backpointers_pass(trans, &s);
756 		if (ret || bpos_eq(s.bucket_end, SPOS_MAX))
757 			break;
758 
759 		s.bucket_start = bpos_successor(s.bucket_end);
760 	}
761 	bch2_trans_put(trans);
762 	bch2_bkey_buf_exit(&s.last_flushed, c);
763 
764 	bch_err_fn(c, ret);
765 	return ret;
766 }
767 
768 static int check_one_backpointer(struct btree_trans *trans,
769 				 struct bbpos start,
770 				 struct bbpos end,
771 				 struct bkey_s_c_backpointer bp,
772 				 struct bpos *last_flushed_pos)
773 {
774 	struct bch_fs *c = trans->c;
775 	struct btree_iter iter;
776 	struct bbpos pos = bp_to_bbpos(*bp.v);
777 	struct bkey_s_c k;
778 	struct printbuf buf = PRINTBUF;
779 	int ret;
780 
781 	if (bbpos_cmp(pos, start) < 0 ||
782 	    bbpos_cmp(pos, end) > 0)
783 		return 0;
784 
785 	k = bch2_backpointer_get_key(trans, &iter, bp.k->p, *bp.v, 0);
786 	ret = bkey_err(k);
787 	if (ret == -BCH_ERR_backpointer_to_overwritten_btree_node)
788 		return 0;
789 	if (ret)
790 		return ret;
791 
792 	if (!k.k && !bpos_eq(*last_flushed_pos, bp.k->p)) {
793 		*last_flushed_pos = bp.k->p;
794 		ret = bch2_btree_write_buffer_flush_sync(trans) ?:
795 			-BCH_ERR_transaction_restart_write_buffer_flush;
796 		goto out;
797 	}
798 
799 	if (fsck_err_on(!k.k, c,
800 			backpointer_to_missing_ptr,
801 			"backpointer for missing %s\n  %s",
802 			bp.v->level ? "btree node" : "extent",
803 			(bch2_bkey_val_to_text(&buf, c, bp.s_c), buf.buf))) {
804 		ret = bch2_btree_delete_at_buffered(trans, BTREE_ID_backpointers, bp.k->p);
805 		goto out;
806 	}
807 out:
808 fsck_err:
809 	bch2_trans_iter_exit(trans, &iter);
810 	printbuf_exit(&buf);
811 	return ret;
812 }
813 
814 static int bch2_check_backpointers_to_extents_pass(struct btree_trans *trans,
815 						   struct bbpos start,
816 						   struct bbpos end)
817 {
818 	struct bpos last_flushed_pos = SPOS_MAX;
819 
820 	return for_each_btree_key_commit(trans, iter, BTREE_ID_backpointers,
821 				  POS_MIN, BTREE_ITER_PREFETCH, k,
822 				  NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
823 		check_one_backpointer(trans, start, end,
824 				      bkey_s_c_to_backpointer(k),
825 				      &last_flushed_pos));
826 }
827 
828 int bch2_check_backpointers_to_extents(struct bch_fs *c)
829 {
830 	struct btree_trans *trans = bch2_trans_get(c);
831 	struct bbpos start = (struct bbpos) { .btree = 0, .pos = POS_MIN, }, end;
832 	int ret;
833 
834 	while (1) {
835 		ret = bch2_get_btree_in_memory_pos(trans,
836 						   (1U << BTREE_ID_extents)|
837 						   (1U << BTREE_ID_reflink),
838 						   ~0,
839 						   start, &end);
840 		if (ret)
841 			break;
842 
843 		if (!bbpos_cmp(start, BBPOS_MIN) &&
844 		    bbpos_cmp(end, BBPOS_MAX))
845 			bch_verbose(c, "%s(): extents do not fit in ram, running in multiple passes with %zu nodes per pass",
846 				    __func__, btree_nodes_fit_in_ram(c));
847 
848 		if (bbpos_cmp(start, BBPOS_MIN) ||
849 		    bbpos_cmp(end, BBPOS_MAX)) {
850 			struct printbuf buf = PRINTBUF;
851 
852 			prt_str(&buf, "check_backpointers_to_extents(): ");
853 			bch2_bbpos_to_text(&buf, start);
854 			prt_str(&buf, "-");
855 			bch2_bbpos_to_text(&buf, end);
856 
857 			bch_verbose(c, "%s", buf.buf);
858 			printbuf_exit(&buf);
859 		}
860 
861 		ret = bch2_check_backpointers_to_extents_pass(trans, start, end);
862 		if (ret || !bbpos_cmp(end, BBPOS_MAX))
863 			break;
864 
865 		start = bbpos_successor(end);
866 	}
867 	bch2_trans_put(trans);
868 
869 	bch_err_fn(c, ret);
870 	return ret;
871 }
872