1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright (C) 2017-2023 Oracle. All Rights Reserved.
4 * Author: Darrick J. Wong <djwong@kernel.org>
5 */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_mount.h"
13 #include "xfs_trans.h"
14 #include "xfs_ag.h"
15 #include "xfs_btree.h"
16 #include "xfs_rmap.h"
17 #include "xfs_refcount.h"
18 #include "scrub/scrub.h"
19 #include "scrub/common.h"
20 #include "scrub/btree.h"
21 #include "scrub/trace.h"
22 #include "scrub/repair.h"
23
24 /*
25 * Set us up to scrub reference count btrees.
26 */
27 int
xchk_setup_ag_refcountbt(struct xfs_scrub * sc)28 xchk_setup_ag_refcountbt(
29 struct xfs_scrub *sc)
30 {
31 if (xchk_need_intent_drain(sc))
32 xchk_fsgates_enable(sc, XCHK_FSGATES_DRAIN);
33
34 if (xchk_could_repair(sc)) {
35 int error;
36
37 error = xrep_setup_ag_refcountbt(sc);
38 if (error)
39 return error;
40 }
41
42 return xchk_setup_ag_btree(sc, false);
43 }
44
45 /* Reference count btree scrubber. */
46
47 /*
48 * Confirming Reference Counts via Reverse Mappings
49 *
50 * We want to count the reverse mappings overlapping a refcount record
51 * (bno, len, refcount), allowing for the possibility that some of the
52 * overlap may come from smaller adjoining reverse mappings, while some
53 * comes from single extents which overlap the range entirely. The
54 * outer loop is as follows:
55 *
56 * 1. For all reverse mappings overlapping the refcount extent,
57 * a. If a given rmap completely overlaps, mark it as seen.
58 * b. Otherwise, record the fragment (in agbno order) for later
59 * processing.
60 *
61 * Once we've seen all the rmaps, we know that for all blocks in the
62 * refcount record we want to find $refcount owners and we've already
63 * visited $seen extents that overlap all the blocks. Therefore, we
64 * need to find ($refcount - $seen) owners for every block in the
65 * extent; call that quantity $target_nr. Proceed as follows:
66 *
67 * 2. Pull the first $target_nr fragments from the list; all of them
68 * should start at or before the start of the extent.
69 * Call this subset of fragments the working set.
70 * 3. Until there are no more unprocessed fragments,
71 * a. Find the shortest fragments in the set and remove them.
72 * b. Note the block number of the end of these fragments.
73 * c. Pull the same number of fragments from the list. All of these
74 * fragments should start at the block number recorded in the
75 * previous step.
76 * d. Put those fragments in the set.
77 * 4. Check that there are $target_nr fragments remaining in the list,
78 * and that they all end at or beyond the end of the refcount extent.
79 *
80 * If the refcount is correct, all the check conditions in the algorithm
81 * should always hold true. If not, the refcount is incorrect.
82 */
83 struct xchk_refcnt_frag {
84 struct list_head list;
85 struct xfs_rmap_irec rm;
86 };
87
88 struct xchk_refcnt_check {
89 struct xfs_scrub *sc;
90 struct list_head fragments;
91
92 /* refcount extent we're examining */
93 xfs_agblock_t bno;
94 xfs_extlen_t len;
95 xfs_nlink_t refcount;
96
97 /* number of owners seen */
98 xfs_nlink_t seen;
99 };
100
101 /*
102 * Decide if the given rmap is large enough that we can redeem it
103 * towards refcount verification now, or if it's a fragment, in
104 * which case we'll hang onto it in the hopes that we'll later
105 * discover that we've collected exactly the correct number of
106 * fragments as the refcountbt says we should have.
107 */
108 STATIC int
xchk_refcountbt_rmap_check(struct xfs_btree_cur * cur,const struct xfs_rmap_irec * rec,void * priv)109 xchk_refcountbt_rmap_check(
110 struct xfs_btree_cur *cur,
111 const struct xfs_rmap_irec *rec,
112 void *priv)
113 {
114 struct xchk_refcnt_check *refchk = priv;
115 struct xchk_refcnt_frag *frag;
116 xfs_agblock_t rm_last;
117 xfs_agblock_t rc_last;
118 int error = 0;
119
120 if (xchk_should_terminate(refchk->sc, &error))
121 return error;
122
123 rm_last = rec->rm_startblock + rec->rm_blockcount - 1;
124 rc_last = refchk->bno + refchk->len - 1;
125
126 /* Confirm that a single-owner refc extent is a CoW stage. */
127 if (refchk->refcount == 1 && rec->rm_owner != XFS_RMAP_OWN_COW) {
128 xchk_btree_xref_set_corrupt(refchk->sc, cur, 0);
129 return 0;
130 }
131
132 if (rec->rm_startblock <= refchk->bno && rm_last >= rc_last) {
133 /*
134 * The rmap overlaps the refcount record, so we can confirm
135 * one refcount owner seen.
136 */
137 refchk->seen++;
138 } else {
139 /*
140 * This rmap covers only part of the refcount record, so
141 * save the fragment for later processing. If the rmapbt
142 * is healthy each rmap_irec we see will be in agbno order
143 * so we don't need insertion sort here.
144 */
145 frag = kmalloc(sizeof(struct xchk_refcnt_frag),
146 XCHK_GFP_FLAGS);
147 if (!frag)
148 return -ENOMEM;
149 memcpy(&frag->rm, rec, sizeof(frag->rm));
150 list_add_tail(&frag->list, &refchk->fragments);
151 }
152
153 return 0;
154 }
155
156 /*
157 * Given a bunch of rmap fragments, iterate through them, keeping
158 * a running tally of the refcount. If this ever deviates from
159 * what we expect (which is the refcountbt's refcount minus the
160 * number of extents that totally covered the refcountbt extent),
161 * we have a refcountbt error.
162 */
163 STATIC void
xchk_refcountbt_process_rmap_fragments(struct xchk_refcnt_check * refchk)164 xchk_refcountbt_process_rmap_fragments(
165 struct xchk_refcnt_check *refchk)
166 {
167 struct list_head worklist;
168 struct xchk_refcnt_frag *frag;
169 struct xchk_refcnt_frag *n;
170 xfs_agblock_t bno;
171 xfs_agblock_t rbno;
172 xfs_agblock_t next_rbno;
173 xfs_nlink_t nr;
174 xfs_nlink_t target_nr;
175
176 target_nr = refchk->refcount - refchk->seen;
177 if (target_nr == 0)
178 return;
179
180 /*
181 * There are (refchk->rc.rc_refcount - refchk->nr refcount)
182 * references we haven't found yet. Pull that many off the
183 * fragment list and figure out where the smallest rmap ends
184 * (and therefore the next rmap should start). All the rmaps
185 * we pull off should start at or before the beginning of the
186 * refcount record's range.
187 */
188 INIT_LIST_HEAD(&worklist);
189 rbno = NULLAGBLOCK;
190
191 /* Make sure the fragments actually /are/ in agbno order. */
192 bno = 0;
193 list_for_each_entry(frag, &refchk->fragments, list) {
194 if (frag->rm.rm_startblock < bno)
195 goto done;
196 bno = frag->rm.rm_startblock;
197 }
198
199 /*
200 * Find all the rmaps that start at or before the refc extent,
201 * and put them on the worklist.
202 */
203 nr = 0;
204 list_for_each_entry_safe(frag, n, &refchk->fragments, list) {
205 if (frag->rm.rm_startblock > refchk->bno || nr > target_nr)
206 break;
207 bno = frag->rm.rm_startblock + frag->rm.rm_blockcount;
208 if (bno < rbno)
209 rbno = bno;
210 list_move_tail(&frag->list, &worklist);
211 nr++;
212 }
213
214 /*
215 * We should have found exactly $target_nr rmap fragments starting
216 * at or before the refcount extent.
217 */
218 if (nr != target_nr)
219 goto done;
220
221 while (!list_empty(&refchk->fragments)) {
222 /* Discard any fragments ending at rbno from the worklist. */
223 nr = 0;
224 next_rbno = NULLAGBLOCK;
225 list_for_each_entry_safe(frag, n, &worklist, list) {
226 bno = frag->rm.rm_startblock + frag->rm.rm_blockcount;
227 if (bno != rbno) {
228 if (bno < next_rbno)
229 next_rbno = bno;
230 continue;
231 }
232 list_del(&frag->list);
233 kfree(frag);
234 nr++;
235 }
236
237 /* Try to add nr rmaps starting at rbno to the worklist. */
238 list_for_each_entry_safe(frag, n, &refchk->fragments, list) {
239 bno = frag->rm.rm_startblock + frag->rm.rm_blockcount;
240 if (frag->rm.rm_startblock != rbno)
241 goto done;
242 list_move_tail(&frag->list, &worklist);
243 if (next_rbno > bno)
244 next_rbno = bno;
245 nr--;
246 if (nr == 0)
247 break;
248 }
249
250 /*
251 * If we get here and nr > 0, this means that we added fewer
252 * items to the worklist than we discarded because the fragment
253 * list ran out of items. Therefore, we cannot maintain the
254 * required refcount. Something is wrong, so we're done.
255 */
256 if (nr)
257 goto done;
258
259 rbno = next_rbno;
260 }
261
262 /*
263 * Make sure the last extent we processed ends at or beyond
264 * the end of the refcount extent.
265 */
266 if (rbno < refchk->bno + refchk->len)
267 goto done;
268
269 /* Actually record us having seen the remaining refcount. */
270 refchk->seen = refchk->refcount;
271 done:
272 /* Delete fragments and work list. */
273 list_for_each_entry_safe(frag, n, &worklist, list) {
274 list_del(&frag->list);
275 kfree(frag);
276 }
277 list_for_each_entry_safe(frag, n, &refchk->fragments, list) {
278 list_del(&frag->list);
279 kfree(frag);
280 }
281 }
282
283 /* Use the rmap entries covering this extent to verify the refcount. */
284 STATIC void
xchk_refcountbt_xref_rmap(struct xfs_scrub * sc,const struct xfs_refcount_irec * irec)285 xchk_refcountbt_xref_rmap(
286 struct xfs_scrub *sc,
287 const struct xfs_refcount_irec *irec)
288 {
289 struct xchk_refcnt_check refchk = {
290 .sc = sc,
291 .bno = irec->rc_startblock,
292 .len = irec->rc_blockcount,
293 .refcount = irec->rc_refcount,
294 .seen = 0,
295 };
296 struct xfs_rmap_irec low;
297 struct xfs_rmap_irec high;
298 struct xchk_refcnt_frag *frag;
299 struct xchk_refcnt_frag *n;
300 int error;
301
302 if (!sc->sa.rmap_cur || xchk_skip_xref(sc->sm))
303 return;
304
305 /* Cross-reference with the rmapbt to confirm the refcount. */
306 memset(&low, 0, sizeof(low));
307 low.rm_startblock = irec->rc_startblock;
308 memset(&high, 0xFF, sizeof(high));
309 high.rm_startblock = irec->rc_startblock + irec->rc_blockcount - 1;
310
311 INIT_LIST_HEAD(&refchk.fragments);
312 error = xfs_rmap_query_range(sc->sa.rmap_cur, &low, &high,
313 &xchk_refcountbt_rmap_check, &refchk);
314 if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur))
315 goto out_free;
316
317 xchk_refcountbt_process_rmap_fragments(&refchk);
318 if (irec->rc_refcount != refchk.seen) {
319 trace_xchk_refcount_incorrect(sc->sa.pag, irec, refchk.seen);
320 xchk_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0);
321 }
322
323 out_free:
324 list_for_each_entry_safe(frag, n, &refchk.fragments, list) {
325 list_del(&frag->list);
326 kfree(frag);
327 }
328 }
329
330 /* Cross-reference with the other btrees. */
331 STATIC void
xchk_refcountbt_xref(struct xfs_scrub * sc,const struct xfs_refcount_irec * irec)332 xchk_refcountbt_xref(
333 struct xfs_scrub *sc,
334 const struct xfs_refcount_irec *irec)
335 {
336 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
337 return;
338
339 xchk_xref_is_used_space(sc, irec->rc_startblock, irec->rc_blockcount);
340 xchk_xref_is_not_inode_chunk(sc, irec->rc_startblock,
341 irec->rc_blockcount);
342 xchk_refcountbt_xref_rmap(sc, irec);
343 }
344
345 struct xchk_refcbt_records {
346 /* Previous refcount record. */
347 struct xfs_refcount_irec prev_rec;
348
349 /* The next AG block where we aren't expecting shared extents. */
350 xfs_agblock_t next_unshared_agbno;
351
352 /* Number of CoW blocks we expect. */
353 xfs_agblock_t cow_blocks;
354
355 /* Was the last record a shared or CoW staging extent? */
356 enum xfs_refc_domain prev_domain;
357 };
358
359 STATIC int
xchk_refcountbt_rmap_check_gap(struct xfs_btree_cur * cur,const struct xfs_rmap_irec * rec,void * priv)360 xchk_refcountbt_rmap_check_gap(
361 struct xfs_btree_cur *cur,
362 const struct xfs_rmap_irec *rec,
363 void *priv)
364 {
365 xfs_agblock_t *next_bno = priv;
366
367 if (*next_bno != NULLAGBLOCK && rec->rm_startblock < *next_bno)
368 return -ECANCELED;
369
370 *next_bno = rec->rm_startblock + rec->rm_blockcount;
371 return 0;
372 }
373
374 /*
375 * Make sure that a gap in the reference count records does not correspond to
376 * overlapping records (i.e. shared extents) in the reverse mappings.
377 */
378 static inline void
xchk_refcountbt_xref_gaps(struct xfs_scrub * sc,struct xchk_refcbt_records * rrc,xfs_agblock_t bno)379 xchk_refcountbt_xref_gaps(
380 struct xfs_scrub *sc,
381 struct xchk_refcbt_records *rrc,
382 xfs_agblock_t bno)
383 {
384 struct xfs_rmap_irec low;
385 struct xfs_rmap_irec high;
386 xfs_agblock_t next_bno = NULLAGBLOCK;
387 int error;
388
389 if (bno <= rrc->next_unshared_agbno || !sc->sa.rmap_cur ||
390 xchk_skip_xref(sc->sm))
391 return;
392
393 memset(&low, 0, sizeof(low));
394 low.rm_startblock = rrc->next_unshared_agbno;
395 memset(&high, 0xFF, sizeof(high));
396 high.rm_startblock = bno - 1;
397
398 error = xfs_rmap_query_range(sc->sa.rmap_cur, &low, &high,
399 xchk_refcountbt_rmap_check_gap, &next_bno);
400 if (error == -ECANCELED)
401 xchk_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0);
402 else
403 xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur);
404 }
405
406 static inline bool
xchk_refcount_mergeable(struct xchk_refcbt_records * rrc,const struct xfs_refcount_irec * r2)407 xchk_refcount_mergeable(
408 struct xchk_refcbt_records *rrc,
409 const struct xfs_refcount_irec *r2)
410 {
411 const struct xfs_refcount_irec *r1 = &rrc->prev_rec;
412
413 /* Ignore if prev_rec is not yet initialized. */
414 if (r1->rc_blockcount > 0)
415 return false;
416
417 if (r1->rc_domain != r2->rc_domain)
418 return false;
419 if (r1->rc_startblock + r1->rc_blockcount != r2->rc_startblock)
420 return false;
421 if (r1->rc_refcount != r2->rc_refcount)
422 return false;
423 if ((unsigned long long)r1->rc_blockcount + r2->rc_blockcount >
424 MAXREFCEXTLEN)
425 return false;
426
427 return true;
428 }
429
430 /* Flag failures for records that could be merged. */
431 STATIC void
xchk_refcountbt_check_mergeable(struct xchk_btree * bs,struct xchk_refcbt_records * rrc,const struct xfs_refcount_irec * irec)432 xchk_refcountbt_check_mergeable(
433 struct xchk_btree *bs,
434 struct xchk_refcbt_records *rrc,
435 const struct xfs_refcount_irec *irec)
436 {
437 if (bs->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
438 return;
439
440 if (xchk_refcount_mergeable(rrc, irec))
441 xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
442
443 memcpy(&rrc->prev_rec, irec, sizeof(struct xfs_refcount_irec));
444 }
445
446 /* Scrub a refcountbt record. */
447 STATIC int
xchk_refcountbt_rec(struct xchk_btree * bs,const union xfs_btree_rec * rec)448 xchk_refcountbt_rec(
449 struct xchk_btree *bs,
450 const union xfs_btree_rec *rec)
451 {
452 struct xfs_refcount_irec irec;
453 struct xchk_refcbt_records *rrc = bs->private;
454
455 xfs_refcount_btrec_to_irec(rec, &irec);
456 if (xfs_refcount_check_irec(bs->cur->bc_ag.pag, &irec) != NULL) {
457 xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
458 return 0;
459 }
460
461 if (irec.rc_domain == XFS_REFC_DOMAIN_COW)
462 rrc->cow_blocks += irec.rc_blockcount;
463
464 /* Shared records always come before CoW records. */
465 if (irec.rc_domain == XFS_REFC_DOMAIN_SHARED &&
466 rrc->prev_domain == XFS_REFC_DOMAIN_COW)
467 xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
468 rrc->prev_domain = irec.rc_domain;
469
470 xchk_refcountbt_check_mergeable(bs, rrc, &irec);
471 xchk_refcountbt_xref(bs->sc, &irec);
472
473 /*
474 * If this is a record for a shared extent, check that all blocks
475 * between the previous record and this one have at most one reverse
476 * mapping.
477 */
478 if (irec.rc_domain == XFS_REFC_DOMAIN_SHARED) {
479 xchk_refcountbt_xref_gaps(bs->sc, rrc, irec.rc_startblock);
480 rrc->next_unshared_agbno = irec.rc_startblock +
481 irec.rc_blockcount;
482 }
483
484 return 0;
485 }
486
487 /* Make sure we have as many refc blocks as the rmap says. */
488 STATIC void
xchk_refcount_xref_rmap(struct xfs_scrub * sc,xfs_filblks_t cow_blocks)489 xchk_refcount_xref_rmap(
490 struct xfs_scrub *sc,
491 xfs_filblks_t cow_blocks)
492 {
493 xfs_extlen_t refcbt_blocks = 0;
494 xfs_filblks_t blocks;
495 int error;
496
497 if (!sc->sa.rmap_cur || xchk_skip_xref(sc->sm))
498 return;
499
500 /* Check that we saw as many refcbt blocks as the rmap knows about. */
501 error = xfs_btree_count_blocks(sc->sa.refc_cur, &refcbt_blocks);
502 if (!xchk_btree_process_error(sc, sc->sa.refc_cur, 0, &error))
503 return;
504 error = xchk_count_rmap_ownedby_ag(sc, sc->sa.rmap_cur,
505 &XFS_RMAP_OINFO_REFC, &blocks);
506 if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur))
507 return;
508 if (blocks != refcbt_blocks)
509 xchk_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0);
510
511 /* Check that we saw as many cow blocks as the rmap knows about. */
512 error = xchk_count_rmap_ownedby_ag(sc, sc->sa.rmap_cur,
513 &XFS_RMAP_OINFO_COW, &blocks);
514 if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur))
515 return;
516 if (blocks != cow_blocks)
517 xchk_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0);
518 }
519
520 /* Scrub the refcount btree for some AG. */
521 int
xchk_refcountbt(struct xfs_scrub * sc)522 xchk_refcountbt(
523 struct xfs_scrub *sc)
524 {
525 struct xchk_refcbt_records rrc = {
526 .cow_blocks = 0,
527 .next_unshared_agbno = 0,
528 .prev_domain = XFS_REFC_DOMAIN_SHARED,
529 };
530 int error;
531
532 error = xchk_btree(sc, sc->sa.refc_cur, xchk_refcountbt_rec,
533 &XFS_RMAP_OINFO_REFC, &rrc);
534 if (error)
535 return error;
536
537 /*
538 * Check that all blocks between the last refcount > 1 record and the
539 * end of the AG have at most one reverse mapping.
540 */
541 xchk_refcountbt_xref_gaps(sc, &rrc, sc->mp->m_sb.sb_agblocks);
542
543 xchk_refcount_xref_rmap(sc, rrc.cow_blocks);
544
545 return 0;
546 }
547
548 /* xref check that a cow staging extent is marked in the refcountbt. */
549 void
xchk_xref_is_cow_staging(struct xfs_scrub * sc,xfs_agblock_t agbno,xfs_extlen_t len)550 xchk_xref_is_cow_staging(
551 struct xfs_scrub *sc,
552 xfs_agblock_t agbno,
553 xfs_extlen_t len)
554 {
555 struct xfs_refcount_irec rc;
556 int has_refcount;
557 int error;
558
559 if (!sc->sa.refc_cur || xchk_skip_xref(sc->sm))
560 return;
561
562 /* Find the CoW staging extent. */
563 error = xfs_refcount_lookup_le(sc->sa.refc_cur, XFS_REFC_DOMAIN_COW,
564 agbno, &has_refcount);
565 if (!xchk_should_check_xref(sc, &error, &sc->sa.refc_cur))
566 return;
567 if (!has_refcount) {
568 xchk_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0);
569 return;
570 }
571
572 error = xfs_refcount_get_rec(sc->sa.refc_cur, &rc, &has_refcount);
573 if (!xchk_should_check_xref(sc, &error, &sc->sa.refc_cur))
574 return;
575 if (!has_refcount) {
576 xchk_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0);
577 return;
578 }
579
580 /* CoW lookup returned a shared extent record? */
581 if (rc.rc_domain != XFS_REFC_DOMAIN_COW)
582 xchk_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0);
583
584 /* Must be at least as long as what was passed in */
585 if (rc.rc_blockcount < len)
586 xchk_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0);
587 }
588
589 /*
590 * xref check that the extent is not shared. Only file data blocks
591 * can have multiple owners.
592 */
593 void
xchk_xref_is_not_shared(struct xfs_scrub * sc,xfs_agblock_t agbno,xfs_extlen_t len)594 xchk_xref_is_not_shared(
595 struct xfs_scrub *sc,
596 xfs_agblock_t agbno,
597 xfs_extlen_t len)
598 {
599 enum xbtree_recpacking outcome;
600 int error;
601
602 if (!sc->sa.refc_cur || xchk_skip_xref(sc->sm))
603 return;
604
605 error = xfs_refcount_has_records(sc->sa.refc_cur,
606 XFS_REFC_DOMAIN_SHARED, agbno, len, &outcome);
607 if (!xchk_should_check_xref(sc, &error, &sc->sa.refc_cur))
608 return;
609 if (outcome != XBTREE_RECPACKING_EMPTY)
610 xchk_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0);
611 }
612
613 /* xref check that the extent is not being used for CoW staging. */
614 void
xchk_xref_is_not_cow_staging(struct xfs_scrub * sc,xfs_agblock_t agbno,xfs_extlen_t len)615 xchk_xref_is_not_cow_staging(
616 struct xfs_scrub *sc,
617 xfs_agblock_t agbno,
618 xfs_extlen_t len)
619 {
620 enum xbtree_recpacking outcome;
621 int error;
622
623 if (!sc->sa.refc_cur || xchk_skip_xref(sc->sm))
624 return;
625
626 error = xfs_refcount_has_records(sc->sa.refc_cur, XFS_REFC_DOMAIN_COW,
627 agbno, len, &outcome);
628 if (!xchk_should_check_xref(sc, &error, &sc->sa.refc_cur))
629 return;
630 if (outcome != XBTREE_RECPACKING_EMPTY)
631 xchk_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0);
632 }
633