xref: /linux/fs/bcachefs/backpointers.c (revision 7c057d35098613b2936c361aa8289590fef987ba)
1 // SPDX-License-Identifier: GPL-2.0
2 #include "bcachefs.h"
3 #include "bbpos.h"
4 #include "alloc_background.h"
5 #include "backpointers.h"
6 #include "btree_cache.h"
7 #include "btree_update.h"
8 #include "btree_write_buffer.h"
9 #include "error.h"
10 
11 #include <linux/mm.h>
12 
13 static bool extent_matches_bp(struct bch_fs *c,
14 			      enum btree_id btree_id, unsigned level,
15 			      struct bkey_s_c k,
16 			      struct bpos bucket,
17 			      struct bch_backpointer bp)
18 {
19 	struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
20 	const union bch_extent_entry *entry;
21 	struct extent_ptr_decoded p;
22 
23 	bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
24 		struct bpos bucket2;
25 		struct bch_backpointer bp2;
26 
27 		if (p.ptr.cached)
28 			continue;
29 
30 		bch2_extent_ptr_to_bp(c, btree_id, level, k, p,
31 				      &bucket2, &bp2);
32 		if (bpos_eq(bucket, bucket2) &&
33 		    !memcmp(&bp, &bp2, sizeof(bp)))
34 			return true;
35 	}
36 
37 	return false;
38 }
39 
40 int bch2_backpointer_invalid(const struct bch_fs *c, struct bkey_s_c k,
41 			     int rw, struct printbuf *err)
42 {
43 	struct bkey_s_c_backpointer bp = bkey_s_c_to_backpointer(k);
44 	struct bpos bucket = bp_pos_to_bucket(c, bp.k->p);
45 
46 	if (bkey_val_bytes(bp.k) < sizeof(*bp.v)) {
47 		prt_str(err, "incorrect value size");
48 		return -BCH_ERR_invalid_bkey;
49 	}
50 
51 	if (!bpos_eq(bp.k->p, bucket_pos_to_bp(c, bucket, bp.v->bucket_offset))) {
52 		prt_str(err, "backpointer at wrong pos");
53 		return -BCH_ERR_invalid_bkey;
54 	}
55 
56 	return 0;
57 }
58 
59 void bch2_backpointer_to_text(struct printbuf *out, const struct bch_backpointer *bp)
60 {
61 	prt_printf(out, "btree=%s l=%u offset=%llu:%u len=%u pos=",
62 	       bch2_btree_ids[bp->btree_id],
63 	       bp->level,
64 	       (u64) (bp->bucket_offset >> MAX_EXTENT_COMPRESS_RATIO_SHIFT),
65 	       (u32) bp->bucket_offset & ~(~0U << MAX_EXTENT_COMPRESS_RATIO_SHIFT),
66 	       bp->bucket_len);
67 	bch2_bpos_to_text(out, bp->pos);
68 }
69 
70 void bch2_backpointer_k_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c k)
71 {
72 	bch2_backpointer_to_text(out, bkey_s_c_to_backpointer(k).v);
73 }
74 
75 void bch2_backpointer_swab(struct bkey_s k)
76 {
77 	struct bkey_s_backpointer bp = bkey_s_to_backpointer(k);
78 
79 	bp.v->bucket_offset	= swab32(bp.v->bucket_offset);
80 	bp.v->bucket_len	= swab32(bp.v->bucket_len);
81 	bch2_bpos_swab(&bp.v->pos);
82 }
83 
84 #define BACKPOINTER_OFFSET_MAX	((1ULL << 40) - 1)
85 
86 static inline int backpointer_cmp(struct bch_backpointer l, struct bch_backpointer r)
87 {
88 	return cmp_int(l.bucket_offset, r.bucket_offset);
89 }
90 
91 static int bch2_backpointer_del_by_offset(struct btree_trans *trans,
92 					  struct bpos bucket,
93 					  u64 bp_offset,
94 					  struct bch_backpointer bp)
95 {
96 	struct bch_fs *c = trans->c;
97 	struct btree_iter iter;
98 	struct bkey_s_c k;
99 	int ret;
100 
101 	if (bp_offset < BACKPOINTER_OFFSET_MAX) {
102 		struct bch_backpointer *bps;
103 		struct bkey_i_alloc_v4 *a;
104 		unsigned i, nr;
105 
106 		bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc,
107 				     bucket,
108 				     BTREE_ITER_INTENT|
109 				     BTREE_ITER_SLOTS|
110 				     BTREE_ITER_WITH_UPDATES);
111 		k = bch2_btree_iter_peek_slot(&iter);
112 		ret = bkey_err(k);
113 		if (ret)
114 			goto err;
115 
116 		if (k.k->type != KEY_TYPE_alloc_v4) {
117 			ret = -ENOENT;
118 			goto err;
119 		}
120 
121 		a = bch2_alloc_to_v4_mut(trans, k);
122 		ret = PTR_ERR_OR_ZERO(a);
123 		if (ret)
124 			goto err;
125 		bps = alloc_v4_backpointers(&a->v);
126 		nr = BCH_ALLOC_V4_NR_BACKPOINTERS(&a->v);
127 
128 		for (i = 0; i < nr; i++) {
129 			if (bps[i].bucket_offset == bp_offset)
130 				goto found;
131 			if (bps[i].bucket_offset > bp_offset)
132 				break;
133 		}
134 
135 		ret = -ENOENT;
136 		goto err;
137 found:
138 		if (memcmp(&bps[i], &bp, sizeof(bp))) {
139 			ret = -ENOENT;
140 			goto err;
141 		}
142 		array_remove_item(bps, nr, i);
143 		SET_BCH_ALLOC_V4_NR_BACKPOINTERS(&a->v, nr);
144 		set_alloc_v4_u64s(a);
145 		ret = bch2_trans_update(trans, &iter, &a->k_i, 0);
146 	} else {
147 		bp_offset -= BACKPOINTER_OFFSET_MAX;
148 
149 		bch2_trans_iter_init(trans, &iter, BTREE_ID_backpointers,
150 				     bucket_pos_to_bp(c, bucket, bp_offset),
151 				     BTREE_ITER_INTENT|
152 				     BTREE_ITER_SLOTS|
153 				     BTREE_ITER_WITH_UPDATES);
154 		k = bch2_btree_iter_peek_slot(&iter);
155 		ret = bkey_err(k);
156 		if (ret)
157 			goto err;
158 
159 		if (k.k->type != KEY_TYPE_backpointer ||
160 		    memcmp(bkey_s_c_to_backpointer(k).v, &bp, sizeof(bp))) {
161 			ret = -ENOENT;
162 			goto err;
163 		}
164 
165 		ret = bch2_btree_delete_at(trans, &iter, 0);
166 	}
167 err:
168 	bch2_trans_iter_exit(trans, &iter);
169 	return ret;
170 }
171 
172 bool bch2_bucket_backpointer_del(struct btree_trans *trans,
173 				 struct bkey_i_alloc_v4 *a,
174 				 struct bch_backpointer bp)
175 {
176 	struct bch_backpointer *bps = alloc_v4_backpointers(&a->v);
177 	unsigned i, nr = BCH_ALLOC_V4_NR_BACKPOINTERS(&a->v);
178 
179 	for (i = 0; i < nr; i++) {
180 		int cmp = backpointer_cmp(bps[i], bp) ?:
181 			memcmp(&bps[i], &bp, sizeof(bp));
182 		if (!cmp) {
183 			array_remove_item(bps, nr, i);
184 			SET_BCH_ALLOC_V4_NR_BACKPOINTERS(&a->v, nr);
185 			set_alloc_v4_u64s(a);
186 			return true;
187 		}
188 		if (cmp >= 0)
189 			break;
190 	}
191 
192 	return false;
193 }
194 
195 static noinline int backpointer_mod_err(struct btree_trans *trans,
196 					struct bch_backpointer bp,
197 					struct bkey_s_c bp_k,
198 					struct bkey_s_c orig_k,
199 					bool insert)
200 {
201 	struct bch_fs *c = trans->c;
202 	struct printbuf buf = PRINTBUF;
203 
204 	if (insert) {
205 		prt_printf(&buf, "existing backpointer found when inserting ");
206 		bch2_backpointer_to_text(&buf, &bp);
207 		prt_newline(&buf);
208 		printbuf_indent_add(&buf, 2);
209 
210 		prt_printf(&buf, "found ");
211 		bch2_bkey_val_to_text(&buf, c, bp_k);
212 		prt_newline(&buf);
213 
214 		prt_printf(&buf, "for ");
215 		bch2_bkey_val_to_text(&buf, c, orig_k);
216 
217 		bch_err(c, "%s", buf.buf);
218 	} else if (test_bit(BCH_FS_CHECK_BACKPOINTERS_DONE, &c->flags)) {
219 		prt_printf(&buf, "backpointer not found when deleting");
220 		prt_newline(&buf);
221 		printbuf_indent_add(&buf, 2);
222 
223 		prt_printf(&buf, "searching for ");
224 		bch2_backpointer_to_text(&buf, &bp);
225 		prt_newline(&buf);
226 
227 		prt_printf(&buf, "got ");
228 		bch2_bkey_val_to_text(&buf, c, bp_k);
229 		prt_newline(&buf);
230 
231 		prt_printf(&buf, "for ");
232 		bch2_bkey_val_to_text(&buf, c, orig_k);
233 
234 		bch_err(c, "%s", buf.buf);
235 	}
236 
237 	printbuf_exit(&buf);
238 
239 	if (test_bit(BCH_FS_CHECK_BACKPOINTERS_DONE, &c->flags)) {
240 		bch2_inconsistent_error(c);
241 		return -EIO;
242 	} else {
243 		return 0;
244 	}
245 }
246 
247 int bch2_bucket_backpointer_mod_nowritebuffer(struct btree_trans *trans,
248 				struct bkey_i_alloc_v4 *a,
249 				struct bch_backpointer bp,
250 				struct bkey_s_c orig_k,
251 				bool insert)
252 {
253 	struct bch_fs *c = trans->c;
254 	struct bkey_i_backpointer *bp_k;
255 	struct btree_iter bp_iter;
256 	struct bkey_s_c k;
257 	int ret;
258 
259 	bp_k = bch2_trans_kmalloc_nomemzero(trans, sizeof(struct bkey_i_backpointer));
260 	ret = PTR_ERR_OR_ZERO(bp_k);
261 	if (ret)
262 		return ret;
263 
264 	bkey_backpointer_init(&bp_k->k_i);
265 	bp_k->k.p = bucket_pos_to_bp(c, a->k.p, bp.bucket_offset);
266 	bp_k->v = bp;
267 
268 	if (!insert) {
269 		bp_k->k.type = KEY_TYPE_deleted;
270 		set_bkey_val_u64s(&bp_k->k, 0);
271 	}
272 
273 	bch2_trans_iter_init(trans, &bp_iter, BTREE_ID_backpointers,
274 			     bucket_pos_to_bp(c, a->k.p, bp.bucket_offset),
275 			     BTREE_ITER_INTENT|
276 			     BTREE_ITER_SLOTS|
277 			     BTREE_ITER_WITH_UPDATES);
278 	k = bch2_btree_iter_peek_slot(&bp_iter);
279 	ret = bkey_err(k);
280 	if (ret)
281 		goto err;
282 
283 	if (insert
284 	    ? k.k->type
285 	    : (k.k->type != KEY_TYPE_backpointer ||
286 	       memcmp(bkey_s_c_to_backpointer(k).v, &bp, sizeof(bp)))) {
287 		ret = backpointer_mod_err(trans, bp, k, orig_k, insert);
288 		if (ret)
289 			goto err;
290 	}
291 
292 	ret = bch2_trans_update(trans, &bp_iter, &bp_k->k_i, 0);
293 err:
294 	bch2_trans_iter_exit(trans, &bp_iter);
295 	return ret;
296 }
297 
298 /*
299  * Find the next backpointer >= *bp_offset:
300  */
301 int bch2_get_next_backpointer(struct btree_trans *trans,
302 			      struct bpos bucket, int gen,
303 			      u64 *bp_offset,
304 			      struct bch_backpointer *dst,
305 			      unsigned iter_flags)
306 {
307 	struct bch_fs *c = trans->c;
308 	struct bpos bp_pos, bp_end_pos;
309 	struct btree_iter alloc_iter, bp_iter = { NULL };
310 	struct bkey_s_c k;
311 	struct bkey_s_c_alloc_v4 a;
312 	size_t i;
313 	int ret;
314 
315 	if (*bp_offset == U64_MAX)
316 		return 0;
317 
318 	bp_pos = bucket_pos_to_bp(c, bucket,
319 				  max(*bp_offset, BACKPOINTER_OFFSET_MAX) - BACKPOINTER_OFFSET_MAX);
320 	bp_end_pos = bucket_pos_to_bp(c, bpos_nosnap_successor(bucket), 0);
321 
322 	bch2_trans_iter_init(trans, &alloc_iter, BTREE_ID_alloc,
323 			     bucket, BTREE_ITER_CACHED);
324 	k = bch2_btree_iter_peek_slot(&alloc_iter);
325 	ret = bkey_err(k);
326 	if (ret)
327 		goto out;
328 
329 	if (k.k->type != KEY_TYPE_alloc_v4)
330 		goto done;
331 
332 	a = bkey_s_c_to_alloc_v4(k);
333 	if (gen >= 0 && a.v->gen != gen)
334 		goto done;
335 
336 	for (i = 0; i < BCH_ALLOC_V4_NR_BACKPOINTERS(a.v); i++) {
337 		if (alloc_v4_backpointers_c(a.v)[i].bucket_offset < *bp_offset)
338 			continue;
339 
340 		*dst = alloc_v4_backpointers_c(a.v)[i];
341 		*bp_offset = dst->bucket_offset;
342 		goto out;
343 	}
344 
345 	for_each_btree_key_norestart(trans, bp_iter, BTREE_ID_backpointers,
346 				     bp_pos, 0, k, ret) {
347 		if (bpos_ge(k.k->p, bp_end_pos))
348 			break;
349 
350 		if (k.k->type != KEY_TYPE_backpointer)
351 			continue;
352 
353 		*dst = *bkey_s_c_to_backpointer(k).v;
354 		*bp_offset = dst->bucket_offset + BACKPOINTER_OFFSET_MAX;
355 		goto out;
356 	}
357 done:
358 	*bp_offset = U64_MAX;
359 out:
360 	bch2_trans_iter_exit(trans, &bp_iter);
361 	bch2_trans_iter_exit(trans, &alloc_iter);
362 	return ret;
363 }
364 
365 static void backpointer_not_found(struct btree_trans *trans,
366 				  struct bpos bucket,
367 				  u64 bp_offset,
368 				  struct bch_backpointer bp,
369 				  struct bkey_s_c k,
370 				  const char *thing_it_points_to)
371 {
372 	struct bch_fs *c = trans->c;
373 	struct printbuf buf = PRINTBUF;
374 
375 	if (likely(!bch2_backpointers_no_use_write_buffer))
376 		return;
377 
378 	prt_printf(&buf, "backpointer doesn't match %s it points to:\n  ",
379 		   thing_it_points_to);
380 	prt_printf(&buf, "bucket: ");
381 	bch2_bpos_to_text(&buf, bucket);
382 	prt_printf(&buf, "\n  ");
383 
384 	if (bp_offset >= BACKPOINTER_OFFSET_MAX) {
385 		struct bpos bp_pos =
386 			bucket_pos_to_bp(c, bucket,
387 					bp_offset - BACKPOINTER_OFFSET_MAX);
388 		prt_printf(&buf, "backpointer pos: ");
389 		bch2_bpos_to_text(&buf, bp_pos);
390 		prt_printf(&buf, "\n  ");
391 	}
392 
393 	bch2_backpointer_to_text(&buf, &bp);
394 	prt_printf(&buf, "\n  ");
395 	bch2_bkey_val_to_text(&buf, c, k);
396 	if (!test_bit(BCH_FS_CHECK_BACKPOINTERS_DONE, &c->flags))
397 		bch_err_ratelimited(c, "%s", buf.buf);
398 	else
399 		bch2_trans_inconsistent(trans, "%s", buf.buf);
400 
401 	printbuf_exit(&buf);
402 }
403 
404 struct bkey_s_c bch2_backpointer_get_key(struct btree_trans *trans,
405 					 struct btree_iter *iter,
406 					 struct bpos bucket,
407 					 u64 bp_offset,
408 					 struct bch_backpointer bp)
409 {
410 	struct bch_fs *c = trans->c;
411 	struct bkey_s_c k;
412 
413 	bch2_trans_node_iter_init(trans, iter,
414 				  bp.btree_id,
415 				  bp.pos,
416 				  0,
417 				  min(bp.level, c->btree_roots[bp.btree_id].level),
418 				  0);
419 	k = bch2_btree_iter_peek_slot(iter);
420 	if (bkey_err(k)) {
421 		bch2_trans_iter_exit(trans, iter);
422 		return k;
423 	}
424 
425 	if (bp.level == c->btree_roots[bp.btree_id].level + 1)
426 		k = bkey_i_to_s_c(&c->btree_roots[bp.btree_id].key);
427 
428 	if (k.k && extent_matches_bp(c, bp.btree_id, bp.level, k, bucket, bp))
429 		return k;
430 
431 	bch2_trans_iter_exit(trans, iter);
432 
433 	if (unlikely(bch2_backpointers_no_use_write_buffer)) {
434 		if (bp.level) {
435 			struct btree *b;
436 
437 			/*
438 			 * If a backpointer for a btree node wasn't found, it may be
439 			 * because it was overwritten by a new btree node that hasn't
440 			 * been written out yet - backpointer_get_node() checks for
441 			 * this:
442 			 */
443 			b = bch2_backpointer_get_node(trans, iter, bucket, bp_offset, bp);
444 			if (!IS_ERR_OR_NULL(b))
445 				return bkey_i_to_s_c(&b->key);
446 
447 			bch2_trans_iter_exit(trans, iter);
448 
449 			if (IS_ERR(b))
450 				return bkey_s_c_err(PTR_ERR(b));
451 			return bkey_s_c_null;
452 		}
453 
454 		backpointer_not_found(trans, bucket, bp_offset, bp, k, "extent");
455 	}
456 
457 	return bkey_s_c_null;
458 }
459 
460 struct btree *bch2_backpointer_get_node(struct btree_trans *trans,
461 					struct btree_iter *iter,
462 					struct bpos bucket,
463 					u64 bp_offset,
464 					struct bch_backpointer bp)
465 {
466 	struct bch_fs *c = trans->c;
467 	struct btree *b;
468 
469 	BUG_ON(!bp.level);
470 
471 	bch2_trans_node_iter_init(trans, iter,
472 				  bp.btree_id,
473 				  bp.pos,
474 				  0,
475 				  bp.level - 1,
476 				  0);
477 	b = bch2_btree_iter_peek_node(iter);
478 	if (IS_ERR(b))
479 		goto err;
480 
481 	if (b && extent_matches_bp(c, bp.btree_id, bp.level,
482 				   bkey_i_to_s_c(&b->key),
483 				   bucket, bp))
484 		return b;
485 
486 	if (b && btree_node_will_make_reachable(b)) {
487 		b = ERR_PTR(-BCH_ERR_backpointer_to_overwritten_btree_node);
488 	} else {
489 		backpointer_not_found(trans, bucket, bp_offset, bp,
490 				      bkey_i_to_s_c(&b->key), "btree node");
491 		b = NULL;
492 	}
493 err:
494 	bch2_trans_iter_exit(trans, iter);
495 	return b;
496 }
497 
498 static int bch2_check_btree_backpointer(struct btree_trans *trans, struct btree_iter *bp_iter,
499 					struct bkey_s_c k)
500 {
501 	struct bch_fs *c = trans->c;
502 	struct btree_iter alloc_iter = { NULL };
503 	struct bch_dev *ca;
504 	struct bkey_s_c alloc_k;
505 	struct printbuf buf = PRINTBUF;
506 	int ret = 0;
507 
508 	if (fsck_err_on(!bch2_dev_exists2(c, k.k->p.inode), c,
509 			"backpointer for mising device:\n%s",
510 			(bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
511 		ret = bch2_btree_delete_at(trans, bp_iter, 0);
512 		goto out;
513 	}
514 
515 	ca = bch_dev_bkey_exists(c, k.k->p.inode);
516 
517 	bch2_trans_iter_init(trans, &alloc_iter, BTREE_ID_alloc,
518 			     bp_pos_to_bucket(c, k.k->p), 0);
519 
520 	alloc_k = bch2_btree_iter_peek_slot(&alloc_iter);
521 	ret = bkey_err(alloc_k);
522 	if (ret)
523 		goto out;
524 
525 	if (fsck_err_on(alloc_k.k->type != KEY_TYPE_alloc_v4, c,
526 			"backpointer for nonexistent alloc key: %llu:%llu:0\n%s",
527 			alloc_iter.pos.inode, alloc_iter.pos.offset,
528 			(bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf))) {
529 		ret = bch2_btree_delete_at(trans, bp_iter, 0);
530 		goto out;
531 	}
532 out:
533 fsck_err:
534 	bch2_trans_iter_exit(trans, &alloc_iter);
535 	printbuf_exit(&buf);
536 	return ret;
537 }
538 
539 /* verify that every backpointer has a corresponding alloc key */
540 int bch2_check_btree_backpointers(struct bch_fs *c)
541 {
542 	struct btree_iter iter;
543 	struct bkey_s_c k;
544 
545 	return bch2_trans_run(c,
546 		for_each_btree_key_commit(&trans, iter,
547 			BTREE_ID_backpointers, POS_MIN, 0, k,
548 			NULL, NULL, BTREE_INSERT_LAZY_RW|BTREE_INSERT_NOFAIL,
549 		  bch2_check_btree_backpointer(&trans, &iter, k)));
550 }
551 
552 static int check_bp_exists(struct btree_trans *trans,
553 			   struct bpos bucket_pos,
554 			   struct bch_backpointer bp,
555 			   struct bkey_s_c orig_k,
556 			   struct bpos bucket_start,
557 			   struct bpos bucket_end,
558 			   struct bpos *last_flushed_pos)
559 {
560 	struct bch_fs *c = trans->c;
561 	struct btree_iter alloc_iter, bp_iter = { NULL };
562 	struct printbuf buf = PRINTBUF;
563 	struct bkey_s_c alloc_k, bp_k;
564 	int ret;
565 
566 	if (bpos_lt(bucket_pos, bucket_start) ||
567 	    bpos_gt(bucket_pos, bucket_end))
568 		return 0;
569 
570 	bch2_trans_iter_init(trans, &alloc_iter, BTREE_ID_alloc, bucket_pos, 0);
571 	alloc_k = bch2_btree_iter_peek_slot(&alloc_iter);
572 	ret = bkey_err(alloc_k);
573 	if (ret)
574 		goto err;
575 
576 	if (alloc_k.k->type == KEY_TYPE_alloc_v4) {
577 		struct bkey_s_c_alloc_v4 a = bkey_s_c_to_alloc_v4(alloc_k);
578 		const struct bch_backpointer *bps = alloc_v4_backpointers_c(a.v);
579 		unsigned i, nr = BCH_ALLOC_V4_NR_BACKPOINTERS(a.v);
580 
581 		for (i = 0; i < nr; i++) {
582 			int cmp = backpointer_cmp(bps[i], bp) ?:
583 				memcmp(&bps[i], &bp, sizeof(bp));
584 			if (!cmp)
585 				goto out;
586 			if (cmp >= 0)
587 				break;
588 		}
589 	} else {
590 		goto missing;
591 	}
592 
593 	bch2_trans_iter_init(trans, &bp_iter, BTREE_ID_backpointers,
594 			     bucket_pos_to_bp(c, bucket_pos, bp.bucket_offset),
595 			     0);
596 	bp_k = bch2_btree_iter_peek_slot(&bp_iter);
597 	ret = bkey_err(bp_k);
598 	if (ret)
599 		goto err;
600 
601 	if (bp_k.k->type != KEY_TYPE_backpointer ||
602 	    memcmp(bkey_s_c_to_backpointer(bp_k).v, &bp, sizeof(bp))) {
603 		if (!bpos_eq(*last_flushed_pos, orig_k.k->p)) {
604 			*last_flushed_pos = orig_k.k->p;
605 			ret = bch2_btree_write_buffer_flush_sync(trans) ?:
606 				-BCH_ERR_transaction_restart_write_buffer_flush;
607 			goto out;
608 		}
609 		goto missing;
610 	}
611 out:
612 err:
613 fsck_err:
614 	bch2_trans_iter_exit(trans, &bp_iter);
615 	bch2_trans_iter_exit(trans, &alloc_iter);
616 	printbuf_exit(&buf);
617 	return ret;
618 missing:
619 
620 	prt_printf(&buf, "missing backpointer for btree=%s l=%u ",
621 	       bch2_btree_ids[bp.btree_id], bp.level);
622 	bch2_bkey_val_to_text(&buf, c, orig_k);
623 	prt_printf(&buf, "\nbp pos ");
624 	bch2_bpos_to_text(&buf, bp_iter.pos);
625 
626 	if (c->sb.version < bcachefs_metadata_version_backpointers ||
627 	    c->opts.reconstruct_alloc ||
628 	    fsck_err(c, "%s", buf.buf)) {
629 		struct bkey_i_alloc_v4 *a = bch2_alloc_to_v4_mut(trans, alloc_k);
630 
631 		ret   = PTR_ERR_OR_ZERO(a) ?:
632 			bch2_bucket_backpointer_mod(trans, a, bp, orig_k, true);
633 	}
634 
635 	goto out;
636 }
637 
638 static int check_extent_to_backpointers(struct btree_trans *trans,
639 					struct btree_iter *iter,
640 					struct bpos bucket_start,
641 					struct bpos bucket_end,
642 					struct bpos *last_flushed_pos)
643 {
644 	struct bch_fs *c = trans->c;
645 	struct bkey_ptrs_c ptrs;
646 	const union bch_extent_entry *entry;
647 	struct extent_ptr_decoded p;
648 	struct bkey_s_c k;
649 	int ret;
650 
651 	k = bch2_btree_iter_peek_all_levels(iter);
652 	ret = bkey_err(k);
653 	if (ret)
654 		return ret;
655 	if (!k.k)
656 		return 0;
657 
658 	ptrs = bch2_bkey_ptrs_c(k);
659 	bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
660 		struct bpos bucket_pos;
661 		struct bch_backpointer bp;
662 
663 		if (p.ptr.cached)
664 			continue;
665 
666 		bch2_extent_ptr_to_bp(c, iter->btree_id, iter->path->level,
667 				      k, p, &bucket_pos, &bp);
668 
669 		ret = check_bp_exists(trans, bucket_pos, bp, k,
670 				      bucket_start, bucket_end,
671 				      last_flushed_pos);
672 		if (ret)
673 			return ret;
674 	}
675 
676 	return 0;
677 }
678 
679 static int check_btree_root_to_backpointers(struct btree_trans *trans,
680 					    enum btree_id btree_id,
681 					    struct bpos bucket_start,
682 					    struct bpos bucket_end,
683 					    struct bpos *last_flushed_pos)
684 {
685 	struct bch_fs *c = trans->c;
686 	struct btree_iter iter;
687 	struct btree *b;
688 	struct bkey_s_c k;
689 	struct bkey_ptrs_c ptrs;
690 	struct extent_ptr_decoded p;
691 	const union bch_extent_entry *entry;
692 	int ret;
693 
694 	bch2_trans_node_iter_init(trans, &iter, btree_id, POS_MIN, 0,
695 				  c->btree_roots[btree_id].level, 0);
696 	b = bch2_btree_iter_peek_node(&iter);
697 	ret = PTR_ERR_OR_ZERO(b);
698 	if (ret)
699 		goto err;
700 
701 	BUG_ON(b != btree_node_root(c, b));
702 
703 	k = bkey_i_to_s_c(&b->key);
704 	ptrs = bch2_bkey_ptrs_c(k);
705 	bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
706 		struct bpos bucket_pos;
707 		struct bch_backpointer bp;
708 
709 		if (p.ptr.cached)
710 			continue;
711 
712 		bch2_extent_ptr_to_bp(c, iter.btree_id, iter.path->level + 1,
713 				      k, p, &bucket_pos, &bp);
714 
715 		ret = check_bp_exists(trans, bucket_pos, bp, k,
716 				      bucket_start, bucket_end,
717 				      last_flushed_pos);
718 		if (ret)
719 			goto err;
720 	}
721 err:
722 	bch2_trans_iter_exit(trans, &iter);
723 	return ret;
724 }
725 
726 static inline struct bbpos bp_to_bbpos(struct bch_backpointer bp)
727 {
728 	return (struct bbpos) {
729 		.btree	= bp.btree_id,
730 		.pos	= bp.pos,
731 	};
732 }
733 
734 static size_t btree_nodes_fit_in_ram(struct bch_fs *c)
735 {
736 	struct sysinfo i;
737 	u64 mem_bytes;
738 
739 	si_meminfo(&i);
740 	mem_bytes = i.totalram * i.mem_unit;
741 	return (mem_bytes >> 1) / btree_bytes(c);
742 }
743 
744 int bch2_get_btree_in_memory_pos(struct btree_trans *trans,
745 				 unsigned btree_leaf_mask,
746 				 unsigned btree_interior_mask,
747 				 struct bbpos start, struct bbpos *end)
748 {
749 	struct btree_iter iter;
750 	struct bkey_s_c k;
751 	size_t btree_nodes = btree_nodes_fit_in_ram(trans->c);
752 	enum btree_id btree;
753 	int ret = 0;
754 
755 	for (btree = start.btree; btree < BTREE_ID_NR && !ret; btree++) {
756 		unsigned depth = ((1U << btree) & btree_leaf_mask) ? 1 : 2;
757 
758 		if (!((1U << btree) & btree_leaf_mask) &&
759 		    !((1U << btree) & btree_interior_mask))
760 			continue;
761 
762 		bch2_trans_node_iter_init(trans, &iter, btree,
763 					  btree == start.btree ? start.pos : POS_MIN,
764 					  0, depth, 0);
765 		/*
766 		 * for_each_btree_key_contineu() doesn't check the return value
767 		 * from bch2_btree_iter_advance(), which is needed when
768 		 * iterating over interior nodes where we'll see keys at
769 		 * SPOS_MAX:
770 		 */
771 		do {
772 			k = __bch2_btree_iter_peek_and_restart(trans, &iter, 0);
773 			ret = bkey_err(k);
774 			if (!k.k || ret)
775 				break;
776 
777 			--btree_nodes;
778 			if (!btree_nodes) {
779 				*end = BBPOS(btree, k.k->p);
780 				bch2_trans_iter_exit(trans, &iter);
781 				return 0;
782 			}
783 		} while (bch2_btree_iter_advance(&iter));
784 		bch2_trans_iter_exit(trans, &iter);
785 	}
786 
787 	*end = BBPOS_MAX;
788 	return ret;
789 }
790 
791 static int bch2_check_extents_to_backpointers_pass(struct btree_trans *trans,
792 						   struct bpos bucket_start,
793 						   struct bpos bucket_end)
794 {
795 	struct btree_iter iter;
796 	enum btree_id btree_id;
797 	struct bpos last_flushed_pos = SPOS_MAX;
798 	int ret = 0;
799 
800 	for (btree_id = 0; btree_id < BTREE_ID_NR; btree_id++) {
801 		unsigned depth = btree_type_has_ptrs(btree_id) ? 0 : 1;
802 
803 		bch2_trans_node_iter_init(trans, &iter, btree_id, POS_MIN, 0,
804 					  depth,
805 					  BTREE_ITER_ALL_LEVELS|
806 					  BTREE_ITER_PREFETCH);
807 
808 		do {
809 			ret = commit_do(trans, NULL, NULL,
810 					BTREE_INSERT_LAZY_RW|
811 					BTREE_INSERT_NOFAIL,
812 					check_extent_to_backpointers(trans, &iter,
813 								bucket_start, bucket_end,
814 								&last_flushed_pos));
815 			if (ret)
816 				break;
817 		} while (!bch2_btree_iter_advance(&iter));
818 
819 		bch2_trans_iter_exit(trans, &iter);
820 
821 		if (ret)
822 			break;
823 
824 		ret = commit_do(trans, NULL, NULL,
825 				BTREE_INSERT_LAZY_RW|
826 				BTREE_INSERT_NOFAIL,
827 				check_btree_root_to_backpointers(trans, btree_id,
828 							bucket_start, bucket_end,
829 							&last_flushed_pos));
830 		if (ret)
831 			break;
832 	}
833 	return ret;
834 }
835 
836 static struct bpos bucket_pos_to_bp_safe(const struct bch_fs *c,
837 					 struct bpos bucket)
838 {
839 	return bch2_dev_exists2(c, bucket.inode)
840 		? bucket_pos_to_bp(c, bucket, 0)
841 		: bucket;
842 }
843 
844 int bch2_get_alloc_in_memory_pos(struct btree_trans *trans,
845 				 struct bpos start, struct bpos *end)
846 {
847 	struct btree_iter alloc_iter;
848 	struct btree_iter bp_iter;
849 	struct bkey_s_c alloc_k, bp_k;
850 	size_t btree_nodes = btree_nodes_fit_in_ram(trans->c);
851 	bool alloc_end = false, bp_end = false;
852 	int ret = 0;
853 
854 	bch2_trans_node_iter_init(trans, &alloc_iter, BTREE_ID_alloc,
855 				  start, 0, 1, 0);
856 	bch2_trans_node_iter_init(trans, &bp_iter, BTREE_ID_backpointers,
857 				  bucket_pos_to_bp_safe(trans->c, start), 0, 1, 0);
858 	while (1) {
859 		alloc_k = !alloc_end
860 			? __bch2_btree_iter_peek_and_restart(trans, &alloc_iter, 0)
861 			: bkey_s_c_null;
862 		bp_k = !bp_end
863 			? __bch2_btree_iter_peek_and_restart(trans, &bp_iter, 0)
864 			: bkey_s_c_null;
865 
866 		ret = bkey_err(alloc_k) ?: bkey_err(bp_k);
867 		if ((!alloc_k.k && !bp_k.k) || ret) {
868 			*end = SPOS_MAX;
869 			break;
870 		}
871 
872 		--btree_nodes;
873 		if (!btree_nodes) {
874 			*end = alloc_k.k->p;
875 			break;
876 		}
877 
878 		if (bpos_lt(alloc_iter.pos, SPOS_MAX) &&
879 		    bpos_lt(bucket_pos_to_bp_safe(trans->c, alloc_iter.pos), bp_iter.pos)) {
880 			if (!bch2_btree_iter_advance(&alloc_iter))
881 				alloc_end = true;
882 		} else {
883 			if (!bch2_btree_iter_advance(&bp_iter))
884 				bp_end = true;
885 		}
886 	}
887 	bch2_trans_iter_exit(trans, &bp_iter);
888 	bch2_trans_iter_exit(trans, &alloc_iter);
889 	return ret;
890 }
891 
892 int bch2_check_extents_to_backpointers(struct bch_fs *c)
893 {
894 	struct btree_trans trans;
895 	struct bpos start = POS_MIN, end;
896 	int ret;
897 
898 	bch2_trans_init(&trans, c, 0, 0);
899 	while (1) {
900 		ret = bch2_get_alloc_in_memory_pos(&trans, start, &end);
901 		if (ret)
902 			break;
903 
904 		if (bpos_eq(start, POS_MIN) && !bpos_eq(end, SPOS_MAX))
905 			bch_verbose(c, "%s(): alloc info does not fit in ram, running in multiple passes with %zu nodes per pass",
906 				    __func__, btree_nodes_fit_in_ram(c));
907 
908 		if (!bpos_eq(start, POS_MIN) || !bpos_eq(end, SPOS_MAX)) {
909 			struct printbuf buf = PRINTBUF;
910 
911 			prt_str(&buf, "check_extents_to_backpointers(): ");
912 			bch2_bpos_to_text(&buf, start);
913 			prt_str(&buf, "-");
914 			bch2_bpos_to_text(&buf, end);
915 
916 			bch_verbose(c, "%s", buf.buf);
917 			printbuf_exit(&buf);
918 		}
919 
920 		ret = bch2_check_extents_to_backpointers_pass(&trans, start, end);
921 		if (ret || bpos_eq(end, SPOS_MAX))
922 			break;
923 
924 		start = bpos_successor(end);
925 	}
926 	bch2_trans_exit(&trans);
927 
928 	return ret;
929 }
930 
931 static int check_one_backpointer(struct btree_trans *trans,
932 				 struct bpos bucket,
933 				 u64 *bp_offset,
934 				 struct bbpos start,
935 				 struct bbpos end)
936 {
937 	struct btree_iter iter;
938 	struct bch_backpointer bp;
939 	struct bbpos pos;
940 	struct bkey_s_c k;
941 	struct printbuf buf = PRINTBUF;
942 	int ret;
943 
944 	ret = bch2_get_next_backpointer(trans, bucket, -1, bp_offset, &bp, 0);
945 	if (ret || *bp_offset == U64_MAX)
946 		return ret;
947 
948 	pos = bp_to_bbpos(bp);
949 	if (bbpos_cmp(pos, start) < 0 ||
950 	    bbpos_cmp(pos, end) > 0)
951 		return 0;
952 
953 	k = bch2_backpointer_get_key(trans, &iter, bucket, *bp_offset, bp);
954 	ret = bkey_err(k);
955 	if (ret == -BCH_ERR_backpointer_to_overwritten_btree_node)
956 		return 0;
957 	if (ret)
958 		return ret;
959 
960 	if (fsck_err_on(!k.k, trans->c,
961 			"%s backpointer points to missing extent\n%s",
962 			*bp_offset < BACKPOINTER_OFFSET_MAX ? "alloc" : "btree",
963 			(bch2_backpointer_to_text(&buf, &bp), buf.buf))) {
964 		ret = bch2_backpointer_del_by_offset(trans, bucket, *bp_offset, bp);
965 		if (ret == -ENOENT)
966 			bch_err(trans->c, "backpointer at %llu not found", *bp_offset);
967 	}
968 
969 	bch2_trans_iter_exit(trans, &iter);
970 fsck_err:
971 	printbuf_exit(&buf);
972 	return ret;
973 }
974 
975 static int bch2_check_backpointers_to_extents_pass(struct btree_trans *trans,
976 						   struct bbpos start,
977 						   struct bbpos end)
978 {
979 	struct btree_iter iter;
980 	struct bkey_s_c k;
981 	int ret = 0;
982 
983 	for_each_btree_key(trans, iter, BTREE_ID_alloc, POS_MIN,
984 			   BTREE_ITER_PREFETCH, k, ret) {
985 		u64 bp_offset = 0;
986 
987 		while (!(ret = commit_do(trans, NULL, NULL,
988 					 BTREE_INSERT_LAZY_RW|
989 					 BTREE_INSERT_NOFAIL,
990 				check_one_backpointer(trans, iter.pos, &bp_offset, start, end))) &&
991 		       bp_offset < U64_MAX)
992 			bp_offset++;
993 
994 		if (ret)
995 			break;
996 	}
997 	bch2_trans_iter_exit(trans, &iter);
998 	return ret < 0 ? ret : 0;
999 }
1000 
1001 int bch2_check_backpointers_to_extents(struct bch_fs *c)
1002 {
1003 	struct btree_trans trans;
1004 	struct bbpos start = (struct bbpos) { .btree = 0, .pos = POS_MIN, }, end;
1005 	int ret;
1006 
1007 	bch2_trans_init(&trans, c, 0, 0);
1008 	while (1) {
1009 		ret = bch2_get_btree_in_memory_pos(&trans,
1010 						   (1U << BTREE_ID_extents)|
1011 						   (1U << BTREE_ID_reflink),
1012 						   ~0,
1013 						   start, &end);
1014 		if (ret)
1015 			break;
1016 
1017 		if (!bbpos_cmp(start, BBPOS_MIN) &&
1018 		    bbpos_cmp(end, BBPOS_MAX))
1019 			bch_verbose(c, "%s(): extents do not fit in ram, running in multiple passes with %zu nodes per pass",
1020 				    __func__, btree_nodes_fit_in_ram(c));
1021 
1022 		if (bbpos_cmp(start, BBPOS_MIN) ||
1023 		    bbpos_cmp(end, BBPOS_MAX)) {
1024 			struct printbuf buf = PRINTBUF;
1025 
1026 			prt_str(&buf, "check_backpointers_to_extents(): ");
1027 			bch2_bbpos_to_text(&buf, start);
1028 			prt_str(&buf, "-");
1029 			bch2_bbpos_to_text(&buf, end);
1030 
1031 			bch_verbose(c, "%s", buf.buf);
1032 			printbuf_exit(&buf);
1033 		}
1034 
1035 		ret = bch2_check_backpointers_to_extents_pass(&trans, start, end);
1036 		if (ret || !bbpos_cmp(end, BBPOS_MAX))
1037 			break;
1038 
1039 		start = bbpos_successor(end);
1040 	}
1041 	bch2_trans_exit(&trans);
1042 
1043 	return ret;
1044 }
1045