xref: /linux/fs/xfs/scrub/refcount.c (revision c8b90d40d5bba8e6fba457b8a7c10d3c0d467e37)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright (C) 2017-2023 Oracle.  All Rights Reserved.
4  * Author: Darrick J. Wong <djwong@kernel.org>
5  */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_mount.h"
13 #include "xfs_trans.h"
14 #include "xfs_ag.h"
15 #include "xfs_btree.h"
16 #include "xfs_rmap.h"
17 #include "xfs_refcount.h"
18 #include "scrub/scrub.h"
19 #include "scrub/common.h"
20 #include "scrub/btree.h"
21 #include "scrub/trace.h"
22 #include "scrub/repair.h"
23 
24 /*
25  * Set us up to scrub reference count btrees.
26  */
27 int
28 xchk_setup_ag_refcountbt(
29 	struct xfs_scrub	*sc)
30 {
31 	if (xchk_need_intent_drain(sc))
32 		xchk_fsgates_enable(sc, XCHK_FSGATES_DRAIN);
33 
34 	if (xchk_could_repair(sc)) {
35 		int		error;
36 
37 		error = xrep_setup_ag_refcountbt(sc);
38 		if (error)
39 			return error;
40 	}
41 
42 	return xchk_setup_ag_btree(sc, false);
43 }
44 
45 /* Reference count btree scrubber. */
46 
47 /*
48  * Confirming Reference Counts via Reverse Mappings
49  *
50  * We want to count the reverse mappings overlapping a refcount record
51  * (bno, len, refcount), allowing for the possibility that some of the
52  * overlap may come from smaller adjoining reverse mappings, while some
53  * comes from single extents which overlap the range entirely.  The
54  * outer loop is as follows:
55  *
56  * 1. For all reverse mappings overlapping the refcount extent,
57  *    a. If a given rmap completely overlaps, mark it as seen.
58  *    b. Otherwise, record the fragment (in agbno order) for later
59  *       processing.
60  *
61  * Once we've seen all the rmaps, we know that for all blocks in the
62  * refcount record we want to find $refcount owners and we've already
63  * visited $seen extents that overlap all the blocks.  Therefore, we
64  * need to find ($refcount - $seen) owners for every block in the
65  * extent; call that quantity $target_nr.  Proceed as follows:
66  *
67  * 2. Pull the first $target_nr fragments from the list; all of them
68  *    should start at or before the start of the extent.
69  *    Call this subset of fragments the working set.
70  * 3. Until there are no more unprocessed fragments,
71  *    a. Find the shortest fragments in the set and remove them.
72  *    b. Note the block number of the end of these fragments.
73  *    c. Pull the same number of fragments from the list.  All of these
74  *       fragments should start at the block number recorded in the
75  *       previous step.
76  *    d. Put those fragments in the set.
77  * 4. Check that there are $target_nr fragments remaining in the list,
78  *    and that they all end at or beyond the end of the refcount extent.
79  *
80  * If the refcount is correct, all the check conditions in the algorithm
81  * should always hold true.  If not, the refcount is incorrect.
82  */
83 struct xchk_refcnt_frag {
84 	struct list_head	list;
85 	struct xfs_rmap_irec	rm;
86 };
87 
88 struct xchk_refcnt_check {
89 	struct xfs_scrub	*sc;
90 	struct list_head	fragments;
91 
92 	/* refcount extent we're examining */
93 	xfs_agblock_t		bno;
94 	xfs_extlen_t		len;
95 	xfs_nlink_t		refcount;
96 
97 	/* number of owners seen */
98 	xfs_nlink_t		seen;
99 };
100 
101 /*
102  * Decide if the given rmap is large enough that we can redeem it
103  * towards refcount verification now, or if it's a fragment, in
104  * which case we'll hang onto it in the hopes that we'll later
105  * discover that we've collected exactly the correct number of
106  * fragments as the refcountbt says we should have.
107  */
108 STATIC int
109 xchk_refcountbt_rmap_check(
110 	struct xfs_btree_cur		*cur,
111 	const struct xfs_rmap_irec	*rec,
112 	void				*priv)
113 {
114 	struct xchk_refcnt_check	*refchk = priv;
115 	struct xchk_refcnt_frag		*frag;
116 	xfs_agblock_t			rm_last;
117 	xfs_agblock_t			rc_last;
118 	int				error = 0;
119 
120 	if (xchk_should_terminate(refchk->sc, &error))
121 		return error;
122 
123 	rm_last = rec->rm_startblock + rec->rm_blockcount - 1;
124 	rc_last = refchk->bno + refchk->len - 1;
125 
126 	/* Confirm that a single-owner refc extent is a CoW stage. */
127 	if (refchk->refcount == 1 && rec->rm_owner != XFS_RMAP_OWN_COW) {
128 		xchk_btree_xref_set_corrupt(refchk->sc, cur, 0);
129 		return 0;
130 	}
131 
132 	if (rec->rm_startblock <= refchk->bno && rm_last >= rc_last) {
133 		/*
134 		 * The rmap overlaps the refcount record, so we can confirm
135 		 * one refcount owner seen.
136 		 */
137 		refchk->seen++;
138 	} else {
139 		/*
140 		 * This rmap covers only part of the refcount record, so
141 		 * save the fragment for later processing.  If the rmapbt
142 		 * is healthy each rmap_irec we see will be in agbno order
143 		 * so we don't need insertion sort here.
144 		 */
145 		frag = kmalloc(sizeof(struct xchk_refcnt_frag),
146 				XCHK_GFP_FLAGS);
147 		if (!frag)
148 			return -ENOMEM;
149 		memcpy(&frag->rm, rec, sizeof(frag->rm));
150 		list_add_tail(&frag->list, &refchk->fragments);
151 	}
152 
153 	return 0;
154 }
155 
156 /*
157  * Given a bunch of rmap fragments, iterate through them, keeping
158  * a running tally of the refcount.  If this ever deviates from
159  * what we expect (which is the refcountbt's refcount minus the
160  * number of extents that totally covered the refcountbt extent),
161  * we have a refcountbt error.
162  */
163 STATIC void
164 xchk_refcountbt_process_rmap_fragments(
165 	struct xchk_refcnt_check	*refchk)
166 {
167 	struct list_head		worklist;
168 	struct xchk_refcnt_frag		*frag;
169 	struct xchk_refcnt_frag		*n;
170 	xfs_agblock_t			bno;
171 	xfs_agblock_t			rbno;
172 	xfs_agblock_t			next_rbno;
173 	xfs_nlink_t			nr;
174 	xfs_nlink_t			target_nr;
175 
176 	target_nr = refchk->refcount - refchk->seen;
177 	if (target_nr == 0)
178 		return;
179 
180 	/*
181 	 * There are (refchk->rc.rc_refcount - refchk->nr refcount)
182 	 * references we haven't found yet.  Pull that many off the
183 	 * fragment list and figure out where the smallest rmap ends
184 	 * (and therefore the next rmap should start).  All the rmaps
185 	 * we pull off should start at or before the beginning of the
186 	 * refcount record's range.
187 	 */
188 	INIT_LIST_HEAD(&worklist);
189 	rbno = NULLAGBLOCK;
190 
191 	/* Make sure the fragments actually /are/ in agbno order. */
192 	bno = 0;
193 	list_for_each_entry(frag, &refchk->fragments, list) {
194 		if (frag->rm.rm_startblock < bno)
195 			goto done;
196 		bno = frag->rm.rm_startblock;
197 	}
198 
199 	/*
200 	 * Find all the rmaps that start at or before the refc extent,
201 	 * and put them on the worklist.
202 	 */
203 	nr = 0;
204 	list_for_each_entry_safe(frag, n, &refchk->fragments, list) {
205 		if (frag->rm.rm_startblock > refchk->bno || nr > target_nr)
206 			break;
207 		bno = frag->rm.rm_startblock + frag->rm.rm_blockcount;
208 		if (bno < rbno)
209 			rbno = bno;
210 		list_move_tail(&frag->list, &worklist);
211 		nr++;
212 	}
213 
214 	/*
215 	 * We should have found exactly $target_nr rmap fragments starting
216 	 * at or before the refcount extent.
217 	 */
218 	if (nr != target_nr)
219 		goto done;
220 
221 	while (!list_empty(&refchk->fragments)) {
222 		/* Discard any fragments ending at rbno from the worklist. */
223 		nr = 0;
224 		next_rbno = NULLAGBLOCK;
225 		list_for_each_entry_safe(frag, n, &worklist, list) {
226 			bno = frag->rm.rm_startblock + frag->rm.rm_blockcount;
227 			if (bno != rbno) {
228 				if (bno < next_rbno)
229 					next_rbno = bno;
230 				continue;
231 			}
232 			list_del(&frag->list);
233 			kfree(frag);
234 			nr++;
235 		}
236 
237 		/* Try to add nr rmaps starting at rbno to the worklist. */
238 		list_for_each_entry_safe(frag, n, &refchk->fragments, list) {
239 			bno = frag->rm.rm_startblock + frag->rm.rm_blockcount;
240 			if (frag->rm.rm_startblock != rbno)
241 				goto done;
242 			list_move_tail(&frag->list, &worklist);
243 			if (next_rbno > bno)
244 				next_rbno = bno;
245 			nr--;
246 			if (nr == 0)
247 				break;
248 		}
249 
250 		/*
251 		 * If we get here and nr > 0, this means that we added fewer
252 		 * items to the worklist than we discarded because the fragment
253 		 * list ran out of items.  Therefore, we cannot maintain the
254 		 * required refcount.  Something is wrong, so we're done.
255 		 */
256 		if (nr)
257 			goto done;
258 
259 		rbno = next_rbno;
260 	}
261 
262 	/*
263 	 * Make sure the last extent we processed ends at or beyond
264 	 * the end of the refcount extent.
265 	 */
266 	if (rbno < refchk->bno + refchk->len)
267 		goto done;
268 
269 	/* Actually record us having seen the remaining refcount. */
270 	refchk->seen = refchk->refcount;
271 done:
272 	/* Delete fragments and work list. */
273 	list_for_each_entry_safe(frag, n, &worklist, list) {
274 		list_del(&frag->list);
275 		kfree(frag);
276 	}
277 	list_for_each_entry_safe(frag, n, &refchk->fragments, list) {
278 		list_del(&frag->list);
279 		kfree(frag);
280 	}
281 }
282 
283 /* Use the rmap entries covering this extent to verify the refcount. */
284 STATIC void
285 xchk_refcountbt_xref_rmap(
286 	struct xfs_scrub		*sc,
287 	const struct xfs_refcount_irec	*irec)
288 {
289 	struct xchk_refcnt_check	refchk = {
290 		.sc			= sc,
291 		.bno			= irec->rc_startblock,
292 		.len			= irec->rc_blockcount,
293 		.refcount		= irec->rc_refcount,
294 		.seen = 0,
295 	};
296 	struct xfs_rmap_irec		low;
297 	struct xfs_rmap_irec		high;
298 	struct xchk_refcnt_frag		*frag;
299 	struct xchk_refcnt_frag		*n;
300 	int				error;
301 
302 	if (!sc->sa.rmap_cur || xchk_skip_xref(sc->sm))
303 		return;
304 
305 	/* Cross-reference with the rmapbt to confirm the refcount. */
306 	memset(&low, 0, sizeof(low));
307 	low.rm_startblock = irec->rc_startblock;
308 	memset(&high, 0xFF, sizeof(high));
309 	high.rm_startblock = irec->rc_startblock + irec->rc_blockcount - 1;
310 
311 	INIT_LIST_HEAD(&refchk.fragments);
312 	error = xfs_rmap_query_range(sc->sa.rmap_cur, &low, &high,
313 			&xchk_refcountbt_rmap_check, &refchk);
314 	if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur))
315 		goto out_free;
316 
317 	xchk_refcountbt_process_rmap_fragments(&refchk);
318 	if (irec->rc_refcount != refchk.seen) {
319 		trace_xchk_refcount_incorrect(sc->sa.pag, irec, refchk.seen);
320 		xchk_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0);
321 	}
322 
323 out_free:
324 	list_for_each_entry_safe(frag, n, &refchk.fragments, list) {
325 		list_del(&frag->list);
326 		kfree(frag);
327 	}
328 }
329 
330 /* Cross-reference with the other btrees. */
331 STATIC void
332 xchk_refcountbt_xref(
333 	struct xfs_scrub		*sc,
334 	const struct xfs_refcount_irec	*irec)
335 {
336 	if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
337 		return;
338 
339 	xchk_xref_is_used_space(sc, irec->rc_startblock, irec->rc_blockcount);
340 	xchk_xref_is_not_inode_chunk(sc, irec->rc_startblock,
341 			irec->rc_blockcount);
342 	xchk_refcountbt_xref_rmap(sc, irec);
343 }
344 
345 struct xchk_refcbt_records {
346 	/* Previous refcount record. */
347 	struct xfs_refcount_irec prev_rec;
348 
349 	/* The next AG block where we aren't expecting shared extents. */
350 	xfs_agblock_t		next_unshared_agbno;
351 
352 	/* Number of CoW blocks we expect. */
353 	xfs_agblock_t		cow_blocks;
354 
355 	/* Was the last record a shared or CoW staging extent? */
356 	enum xfs_refc_domain	prev_domain;
357 };
358 
359 STATIC int
360 xchk_refcountbt_rmap_check_gap(
361 	struct xfs_btree_cur		*cur,
362 	const struct xfs_rmap_irec	*rec,
363 	void				*priv)
364 {
365 	xfs_agblock_t			*next_bno = priv;
366 
367 	if (*next_bno != NULLAGBLOCK && rec->rm_startblock < *next_bno)
368 		return -ECANCELED;
369 
370 	*next_bno = rec->rm_startblock + rec->rm_blockcount;
371 	return 0;
372 }
373 
374 /*
375  * Make sure that a gap in the reference count records does not correspond to
376  * overlapping records (i.e. shared extents) in the reverse mappings.
377  */
378 static inline void
379 xchk_refcountbt_xref_gaps(
380 	struct xfs_scrub	*sc,
381 	struct xchk_refcbt_records *rrc,
382 	xfs_agblock_t		bno)
383 {
384 	struct xfs_rmap_irec	low;
385 	struct xfs_rmap_irec	high;
386 	xfs_agblock_t		next_bno = NULLAGBLOCK;
387 	int			error;
388 
389 	if (bno <= rrc->next_unshared_agbno || !sc->sa.rmap_cur ||
390             xchk_skip_xref(sc->sm))
391 		return;
392 
393 	memset(&low, 0, sizeof(low));
394 	low.rm_startblock = rrc->next_unshared_agbno;
395 	memset(&high, 0xFF, sizeof(high));
396 	high.rm_startblock = bno - 1;
397 
398 	error = xfs_rmap_query_range(sc->sa.rmap_cur, &low, &high,
399 			xchk_refcountbt_rmap_check_gap, &next_bno);
400 	if (error == -ECANCELED)
401 		xchk_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0);
402 	else
403 		xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur);
404 }
405 
406 static inline bool
407 xchk_refcount_mergeable(
408 	struct xchk_refcbt_records	*rrc,
409 	const struct xfs_refcount_irec	*r2)
410 {
411 	const struct xfs_refcount_irec	*r1 = &rrc->prev_rec;
412 
413 	/* Ignore if prev_rec is not yet initialized. */
414 	if (r1->rc_blockcount > 0)
415 		return false;
416 
417 	if (r1->rc_domain != r2->rc_domain)
418 		return false;
419 	if (r1->rc_startblock + r1->rc_blockcount != r2->rc_startblock)
420 		return false;
421 	if (r1->rc_refcount != r2->rc_refcount)
422 		return false;
423 	if ((unsigned long long)r1->rc_blockcount + r2->rc_blockcount >
424 			MAXREFCEXTLEN)
425 		return false;
426 
427 	return true;
428 }
429 
430 /* Flag failures for records that could be merged. */
431 STATIC void
432 xchk_refcountbt_check_mergeable(
433 	struct xchk_btree		*bs,
434 	struct xchk_refcbt_records	*rrc,
435 	const struct xfs_refcount_irec	*irec)
436 {
437 	if (bs->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
438 		return;
439 
440 	if (xchk_refcount_mergeable(rrc, irec))
441 		xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
442 
443 	memcpy(&rrc->prev_rec, irec, sizeof(struct xfs_refcount_irec));
444 }
445 
446 /* Scrub a refcountbt record. */
447 STATIC int
448 xchk_refcountbt_rec(
449 	struct xchk_btree	*bs,
450 	const union xfs_btree_rec *rec)
451 {
452 	struct xfs_refcount_irec irec;
453 	struct xchk_refcbt_records *rrc = bs->private;
454 
455 	xfs_refcount_btrec_to_irec(rec, &irec);
456 	if (xfs_refcount_check_irec(to_perag(bs->cur->bc_group), &irec) !=
457 			NULL) {
458 		xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
459 		return 0;
460 	}
461 
462 	if (irec.rc_domain == XFS_REFC_DOMAIN_COW)
463 		rrc->cow_blocks += irec.rc_blockcount;
464 
465 	/* Shared records always come before CoW records. */
466 	if (irec.rc_domain == XFS_REFC_DOMAIN_SHARED &&
467 	    rrc->prev_domain == XFS_REFC_DOMAIN_COW)
468 		xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
469 	rrc->prev_domain = irec.rc_domain;
470 
471 	xchk_refcountbt_check_mergeable(bs, rrc, &irec);
472 	xchk_refcountbt_xref(bs->sc, &irec);
473 
474 	/*
475 	 * If this is a record for a shared extent, check that all blocks
476 	 * between the previous record and this one have at most one reverse
477 	 * mapping.
478 	 */
479 	if (irec.rc_domain == XFS_REFC_DOMAIN_SHARED) {
480 		xchk_refcountbt_xref_gaps(bs->sc, rrc, irec.rc_startblock);
481 		rrc->next_unshared_agbno = irec.rc_startblock +
482 					   irec.rc_blockcount;
483 	}
484 
485 	return 0;
486 }
487 
488 /* Make sure we have as many refc blocks as the rmap says. */
489 STATIC void
490 xchk_refcount_xref_rmap(
491 	struct xfs_scrub	*sc,
492 	xfs_filblks_t		cow_blocks)
493 {
494 	xfs_extlen_t		refcbt_blocks = 0;
495 	xfs_filblks_t		blocks;
496 	int			error;
497 
498 	if (!sc->sa.rmap_cur || xchk_skip_xref(sc->sm))
499 		return;
500 
501 	/* Check that we saw as many refcbt blocks as the rmap knows about. */
502 	error = xfs_btree_count_blocks(sc->sa.refc_cur, &refcbt_blocks);
503 	if (!xchk_btree_process_error(sc, sc->sa.refc_cur, 0, &error))
504 		return;
505 	error = xchk_count_rmap_ownedby_ag(sc, sc->sa.rmap_cur,
506 			&XFS_RMAP_OINFO_REFC, &blocks);
507 	if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur))
508 		return;
509 	if (blocks != refcbt_blocks)
510 		xchk_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0);
511 
512 	/* Check that we saw as many cow blocks as the rmap knows about. */
513 	error = xchk_count_rmap_ownedby_ag(sc, sc->sa.rmap_cur,
514 			&XFS_RMAP_OINFO_COW, &blocks);
515 	if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur))
516 		return;
517 	if (blocks != cow_blocks)
518 		xchk_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0);
519 }
520 
521 /* Scrub the refcount btree for some AG. */
522 int
523 xchk_refcountbt(
524 	struct xfs_scrub	*sc)
525 {
526 	struct xchk_refcbt_records rrc = {
527 		.cow_blocks		= 0,
528 		.next_unshared_agbno	= 0,
529 		.prev_domain		= XFS_REFC_DOMAIN_SHARED,
530 	};
531 	int			error;
532 
533 	error = xchk_btree(sc, sc->sa.refc_cur, xchk_refcountbt_rec,
534 			&XFS_RMAP_OINFO_REFC, &rrc);
535 	if (error)
536 		return error;
537 
538 	/*
539 	 * Check that all blocks between the last refcount > 1 record and the
540 	 * end of the AG have at most one reverse mapping.
541 	 */
542 	xchk_refcountbt_xref_gaps(sc, &rrc, sc->mp->m_sb.sb_agblocks);
543 
544 	xchk_refcount_xref_rmap(sc, rrc.cow_blocks);
545 
546 	return 0;
547 }
548 
549 /* xref check that a cow staging extent is marked in the refcountbt. */
550 void
551 xchk_xref_is_cow_staging(
552 	struct xfs_scrub		*sc,
553 	xfs_agblock_t			agbno,
554 	xfs_extlen_t			len)
555 {
556 	struct xfs_refcount_irec	rc;
557 	int				has_refcount;
558 	int				error;
559 
560 	if (!sc->sa.refc_cur || xchk_skip_xref(sc->sm))
561 		return;
562 
563 	/* Find the CoW staging extent. */
564 	error = xfs_refcount_lookup_le(sc->sa.refc_cur, XFS_REFC_DOMAIN_COW,
565 			agbno, &has_refcount);
566 	if (!xchk_should_check_xref(sc, &error, &sc->sa.refc_cur))
567 		return;
568 	if (!has_refcount) {
569 		xchk_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0);
570 		return;
571 	}
572 
573 	error = xfs_refcount_get_rec(sc->sa.refc_cur, &rc, &has_refcount);
574 	if (!xchk_should_check_xref(sc, &error, &sc->sa.refc_cur))
575 		return;
576 	if (!has_refcount) {
577 		xchk_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0);
578 		return;
579 	}
580 
581 	/* CoW lookup returned a shared extent record? */
582 	if (rc.rc_domain != XFS_REFC_DOMAIN_COW)
583 		xchk_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0);
584 
585 	/* Must be at least as long as what was passed in */
586 	if (rc.rc_blockcount < len)
587 		xchk_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0);
588 }
589 
590 /*
591  * xref check that the extent is not shared.  Only file data blocks
592  * can have multiple owners.
593  */
594 void
595 xchk_xref_is_not_shared(
596 	struct xfs_scrub	*sc,
597 	xfs_agblock_t		agbno,
598 	xfs_extlen_t		len)
599 {
600 	enum xbtree_recpacking	outcome;
601 	int			error;
602 
603 	if (!sc->sa.refc_cur || xchk_skip_xref(sc->sm))
604 		return;
605 
606 	error = xfs_refcount_has_records(sc->sa.refc_cur,
607 			XFS_REFC_DOMAIN_SHARED, agbno, len, &outcome);
608 	if (!xchk_should_check_xref(sc, &error, &sc->sa.refc_cur))
609 		return;
610 	if (outcome != XBTREE_RECPACKING_EMPTY)
611 		xchk_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0);
612 }
613 
614 /* xref check that the extent is not being used for CoW staging. */
615 void
616 xchk_xref_is_not_cow_staging(
617 	struct xfs_scrub	*sc,
618 	xfs_agblock_t		agbno,
619 	xfs_extlen_t		len)
620 {
621 	enum xbtree_recpacking	outcome;
622 	int			error;
623 
624 	if (!sc->sa.refc_cur || xchk_skip_xref(sc->sm))
625 		return;
626 
627 	error = xfs_refcount_has_records(sc->sa.refc_cur, XFS_REFC_DOMAIN_COW,
628 			agbno, len, &outcome);
629 	if (!xchk_should_check_xref(sc, &error, &sc->sa.refc_cur))
630 		return;
631 	if (outcome != XBTREE_RECPACKING_EMPTY)
632 		xchk_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0);
633 }
634