1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright (C) 2017-2023 Oracle. All Rights Reserved. 4 * Author: Darrick J. Wong <djwong@kernel.org> 5 */ 6 #include "xfs.h" 7 #include "xfs_fs.h" 8 #include "xfs_shared.h" 9 #include "xfs_format.h" 10 #include "xfs_log_format.h" 11 #include "xfs_trans_resv.h" 12 #include "xfs_mount.h" 13 #include "xfs_trans.h" 14 #include "xfs_btree.h" 15 #include "xfs_rmap.h" 16 #include "xfs_refcount.h" 17 #include "xfs_ag.h" 18 #include "xfs_bit.h" 19 #include "xfs_alloc.h" 20 #include "xfs_alloc_btree.h" 21 #include "xfs_ialloc_btree.h" 22 #include "xfs_refcount_btree.h" 23 #include "scrub/scrub.h" 24 #include "scrub/common.h" 25 #include "scrub/btree.h" 26 #include "scrub/bitmap.h" 27 #include "scrub/agb_bitmap.h" 28 29 /* 30 * Set us up to scrub reverse mapping btrees. 31 */ 32 int 33 xchk_setup_ag_rmapbt( 34 struct xfs_scrub *sc) 35 { 36 if (xchk_need_intent_drain(sc)) 37 xchk_fsgates_enable(sc, XCHK_FSGATES_DRAIN); 38 39 return xchk_setup_ag_btree(sc, false); 40 } 41 42 /* Reverse-mapping scrubber. */ 43 44 struct xchk_rmap { 45 /* 46 * The furthest-reaching of the rmapbt records that we've already 47 * processed. This enables us to detect overlapping records for space 48 * allocations that cannot be shared. 49 */ 50 struct xfs_rmap_irec overlap_rec; 51 52 /* 53 * The previous rmapbt record, so that we can check for two records 54 * that could be one. 55 */ 56 struct xfs_rmap_irec prev_rec; 57 58 /* Bitmaps containing all blocks for each type of AG metadata. */ 59 struct xagb_bitmap fs_owned; 60 struct xagb_bitmap log_owned; 61 struct xagb_bitmap ag_owned; 62 struct xagb_bitmap inobt_owned; 63 struct xagb_bitmap refcbt_owned; 64 65 /* Did we complete the AG space metadata bitmaps? */ 66 bool bitmaps_complete; 67 }; 68 69 /* Cross-reference a rmap against the refcount btree. */ 70 STATIC void 71 xchk_rmapbt_xref_refc( 72 struct xfs_scrub *sc, 73 struct xfs_rmap_irec *irec) 74 { 75 xfs_agblock_t fbno; 76 xfs_extlen_t flen; 77 bool non_inode; 78 bool is_bmbt; 79 bool is_attr; 80 bool is_unwritten; 81 int error; 82 83 if (!sc->sa.refc_cur || xchk_skip_xref(sc->sm)) 84 return; 85 86 non_inode = XFS_RMAP_NON_INODE_OWNER(irec->rm_owner); 87 is_bmbt = irec->rm_flags & XFS_RMAP_BMBT_BLOCK; 88 is_attr = irec->rm_flags & XFS_RMAP_ATTR_FORK; 89 is_unwritten = irec->rm_flags & XFS_RMAP_UNWRITTEN; 90 91 /* If this is shared, must be a data fork extent. */ 92 error = xfs_refcount_find_shared(sc->sa.refc_cur, irec->rm_startblock, 93 irec->rm_blockcount, &fbno, &flen, false); 94 if (!xchk_should_check_xref(sc, &error, &sc->sa.refc_cur)) 95 return; 96 if (flen != 0 && (non_inode || is_attr || is_bmbt || is_unwritten)) 97 xchk_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0); 98 } 99 100 /* Cross-reference with the other btrees. */ 101 STATIC void 102 xchk_rmapbt_xref( 103 struct xfs_scrub *sc, 104 struct xfs_rmap_irec *irec) 105 { 106 xfs_agblock_t agbno = irec->rm_startblock; 107 xfs_extlen_t len = irec->rm_blockcount; 108 109 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) 110 return; 111 112 xchk_xref_is_used_space(sc, agbno, len); 113 if (irec->rm_owner == XFS_RMAP_OWN_INODES) 114 xchk_xref_is_inode_chunk(sc, agbno, len); 115 else 116 xchk_xref_is_not_inode_chunk(sc, agbno, len); 117 if (irec->rm_owner == XFS_RMAP_OWN_COW) 118 xchk_xref_is_cow_staging(sc, irec->rm_startblock, 119 irec->rm_blockcount); 120 else 121 xchk_rmapbt_xref_refc(sc, irec); 122 } 123 124 /* 125 * Check for bogus UNWRITTEN flags in the rmapbt node block keys. 126 * 127 * In reverse mapping records, the file mapping extent state 128 * (XFS_RMAP_OFF_UNWRITTEN) is a record attribute, not a key field. It is not 129 * involved in lookups in any way. In older kernels, the functions that 130 * convert rmapbt records to keys forgot to filter out the extent state bit, 131 * even though the key comparison functions have filtered the flag correctly. 132 * If we spot an rmap key with the unwritten bit set in rm_offset, we should 133 * mark the btree as needing optimization to rebuild the btree without those 134 * flags. 135 */ 136 STATIC void 137 xchk_rmapbt_check_unwritten_in_keyflags( 138 struct xchk_btree *bs) 139 { 140 struct xfs_scrub *sc = bs->sc; 141 struct xfs_btree_cur *cur = bs->cur; 142 struct xfs_btree_block *keyblock; 143 union xfs_btree_key *lkey, *hkey; 144 __be64 badflag = cpu_to_be64(XFS_RMAP_OFF_UNWRITTEN); 145 unsigned int level; 146 147 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_PREEN) 148 return; 149 150 for (level = 1; level < cur->bc_nlevels; level++) { 151 struct xfs_buf *bp; 152 unsigned int ptr; 153 154 /* Only check the first time we've seen this node block. */ 155 if (cur->bc_levels[level].ptr > 1) 156 continue; 157 158 keyblock = xfs_btree_get_block(cur, level, &bp); 159 for (ptr = 1; ptr <= be16_to_cpu(keyblock->bb_numrecs); ptr++) { 160 lkey = xfs_btree_key_addr(cur, ptr, keyblock); 161 162 if (lkey->rmap.rm_offset & badflag) { 163 xchk_btree_set_preen(sc, cur, level); 164 break; 165 } 166 167 hkey = xfs_btree_high_key_addr(cur, ptr, keyblock); 168 if (hkey->rmap.rm_offset & badflag) { 169 xchk_btree_set_preen(sc, cur, level); 170 break; 171 } 172 } 173 } 174 } 175 176 static inline bool 177 xchk_rmapbt_is_shareable( 178 struct xfs_scrub *sc, 179 const struct xfs_rmap_irec *irec) 180 { 181 if (!xfs_has_reflink(sc->mp)) 182 return false; 183 if (XFS_RMAP_NON_INODE_OWNER(irec->rm_owner)) 184 return false; 185 if (irec->rm_flags & (XFS_RMAP_BMBT_BLOCK | XFS_RMAP_ATTR_FORK | 186 XFS_RMAP_UNWRITTEN)) 187 return false; 188 return true; 189 } 190 191 /* Flag failures for records that overlap but cannot. */ 192 STATIC void 193 xchk_rmapbt_check_overlapping( 194 struct xchk_btree *bs, 195 struct xchk_rmap *cr, 196 const struct xfs_rmap_irec *irec) 197 { 198 xfs_agblock_t pnext, inext; 199 200 if (bs->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) 201 return; 202 203 /* No previous record? */ 204 if (cr->overlap_rec.rm_blockcount == 0) 205 goto set_prev; 206 207 /* Do overlap_rec and irec overlap? */ 208 pnext = cr->overlap_rec.rm_startblock + cr->overlap_rec.rm_blockcount; 209 if (pnext <= irec->rm_startblock) 210 goto set_prev; 211 212 /* Overlap is only allowed if both records are data fork mappings. */ 213 if (!xchk_rmapbt_is_shareable(bs->sc, &cr->overlap_rec) || 214 !xchk_rmapbt_is_shareable(bs->sc, irec)) 215 xchk_btree_set_corrupt(bs->sc, bs->cur, 0); 216 217 /* Save whichever rmap record extends furthest. */ 218 inext = irec->rm_startblock + irec->rm_blockcount; 219 if (pnext > inext) 220 return; 221 222 set_prev: 223 memcpy(&cr->overlap_rec, irec, sizeof(struct xfs_rmap_irec)); 224 } 225 226 /* Decide if two reverse-mapping records can be merged. */ 227 static inline bool 228 xchk_rmap_mergeable( 229 struct xchk_rmap *cr, 230 const struct xfs_rmap_irec *r2) 231 { 232 const struct xfs_rmap_irec *r1 = &cr->prev_rec; 233 234 /* Ignore if prev_rec is not yet initialized. */ 235 if (cr->prev_rec.rm_blockcount == 0) 236 return false; 237 238 if (r1->rm_owner != r2->rm_owner) 239 return false; 240 if (r1->rm_startblock + r1->rm_blockcount != r2->rm_startblock) 241 return false; 242 if ((unsigned long long)r1->rm_blockcount + r2->rm_blockcount > 243 XFS_RMAP_LEN_MAX) 244 return false; 245 if (XFS_RMAP_NON_INODE_OWNER(r2->rm_owner)) 246 return true; 247 /* must be an inode owner below here */ 248 if (r1->rm_flags != r2->rm_flags) 249 return false; 250 if (r1->rm_flags & XFS_RMAP_BMBT_BLOCK) 251 return true; 252 return r1->rm_offset + r1->rm_blockcount == r2->rm_offset; 253 } 254 255 /* Flag failures for records that could be merged. */ 256 STATIC void 257 xchk_rmapbt_check_mergeable( 258 struct xchk_btree *bs, 259 struct xchk_rmap *cr, 260 const struct xfs_rmap_irec *irec) 261 { 262 if (bs->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) 263 return; 264 265 if (xchk_rmap_mergeable(cr, irec)) 266 xchk_btree_set_corrupt(bs->sc, bs->cur, 0); 267 268 memcpy(&cr->prev_rec, irec, sizeof(struct xfs_rmap_irec)); 269 } 270 271 /* Compare an rmap for AG metadata against the metadata walk. */ 272 STATIC int 273 xchk_rmapbt_mark_bitmap( 274 struct xchk_btree *bs, 275 struct xchk_rmap *cr, 276 const struct xfs_rmap_irec *irec) 277 { 278 struct xfs_scrub *sc = bs->sc; 279 struct xagb_bitmap *bmp = NULL; 280 xfs_extlen_t fsbcount = irec->rm_blockcount; 281 282 /* 283 * Skip corrupt records. It is essential that we detect records in the 284 * btree that cannot overlap but do, flag those as CORRUPT, and skip 285 * the bitmap comparison to avoid generating false XCORRUPT reports. 286 */ 287 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) 288 return 0; 289 290 /* 291 * If the AG metadata walk didn't complete, there's no point in 292 * comparing against partial results. 293 */ 294 if (!cr->bitmaps_complete) 295 return 0; 296 297 switch (irec->rm_owner) { 298 case XFS_RMAP_OWN_FS: 299 bmp = &cr->fs_owned; 300 break; 301 case XFS_RMAP_OWN_LOG: 302 bmp = &cr->log_owned; 303 break; 304 case XFS_RMAP_OWN_AG: 305 bmp = &cr->ag_owned; 306 break; 307 case XFS_RMAP_OWN_INOBT: 308 bmp = &cr->inobt_owned; 309 break; 310 case XFS_RMAP_OWN_REFC: 311 bmp = &cr->refcbt_owned; 312 break; 313 } 314 315 if (!bmp) 316 return 0; 317 318 if (xagb_bitmap_test(bmp, irec->rm_startblock, &fsbcount)) { 319 /* 320 * The start of this reverse mapping corresponds to a set 321 * region in the bitmap. If the mapping covers more area than 322 * the set region, then it covers space that wasn't found by 323 * the AG metadata walk. 324 */ 325 if (fsbcount < irec->rm_blockcount) 326 xchk_btree_xref_set_corrupt(bs->sc, 327 bs->sc->sa.rmap_cur, 0); 328 } else { 329 /* 330 * The start of this reverse mapping does not correspond to a 331 * completely set region in the bitmap. The region wasn't 332 * fully set by walking the AG metadata, so this is a 333 * cross-referencing corruption. 334 */ 335 xchk_btree_xref_set_corrupt(bs->sc, bs->sc->sa.rmap_cur, 0); 336 } 337 338 /* Unset the region so that we can detect missing rmap records. */ 339 return xagb_bitmap_clear(bmp, irec->rm_startblock, irec->rm_blockcount); 340 } 341 342 /* Scrub an rmapbt record. */ 343 STATIC int 344 xchk_rmapbt_rec( 345 struct xchk_btree *bs, 346 const union xfs_btree_rec *rec) 347 { 348 struct xchk_rmap *cr = bs->private; 349 struct xfs_rmap_irec irec; 350 351 if (xfs_rmap_btrec_to_irec(rec, &irec) != NULL || 352 xfs_rmap_check_irec(bs->cur, &irec) != NULL) { 353 xchk_btree_set_corrupt(bs->sc, bs->cur, 0); 354 return 0; 355 } 356 357 xchk_rmapbt_check_unwritten_in_keyflags(bs); 358 xchk_rmapbt_check_mergeable(bs, cr, &irec); 359 xchk_rmapbt_check_overlapping(bs, cr, &irec); 360 xchk_rmapbt_xref(bs->sc, &irec); 361 362 return xchk_rmapbt_mark_bitmap(bs, cr, &irec); 363 } 364 365 /* Add an AGFL block to the rmap list. */ 366 STATIC int 367 xchk_rmapbt_walk_agfl( 368 struct xfs_mount *mp, 369 xfs_agblock_t agbno, 370 void *priv) 371 { 372 struct xagb_bitmap *bitmap = priv; 373 374 return xagb_bitmap_set(bitmap, agbno, 1); 375 } 376 377 /* 378 * Set up bitmaps mapping all the AG metadata to compare with the rmapbt 379 * records. 380 * 381 * Grab our own btree cursors here if the scrub setup function didn't give us a 382 * btree cursor due to reports of poor health. We need to find out if the 383 * rmapbt disagrees with primary metadata btrees to tag the rmapbt as being 384 * XCORRUPT. 385 */ 386 STATIC int 387 xchk_rmapbt_walk_ag_metadata( 388 struct xfs_scrub *sc, 389 struct xchk_rmap *cr) 390 { 391 struct xfs_mount *mp = sc->mp; 392 struct xfs_buf *agfl_bp; 393 struct xfs_agf *agf = sc->sa.agf_bp->b_addr; 394 struct xfs_btree_cur *cur; 395 int error; 396 397 /* OWN_FS: AG headers */ 398 error = xagb_bitmap_set(&cr->fs_owned, XFS_SB_BLOCK(mp), 399 XFS_AGFL_BLOCK(mp) - XFS_SB_BLOCK(mp) + 1); 400 if (error) 401 goto out; 402 403 /* OWN_LOG: Internal log */ 404 if (xfs_ag_contains_log(mp, sc->sa.pag->pag_agno)) { 405 error = xagb_bitmap_set(&cr->log_owned, 406 XFS_FSB_TO_AGBNO(mp, mp->m_sb.sb_logstart), 407 mp->m_sb.sb_logblocks); 408 if (error) 409 goto out; 410 } 411 412 /* OWN_AG: bnobt, cntbt, rmapbt, and AGFL */ 413 cur = sc->sa.bno_cur; 414 if (!cur) 415 cur = xfs_bnobt_init_cursor(sc->mp, sc->tp, sc->sa.agf_bp, 416 sc->sa.pag); 417 error = xagb_bitmap_set_btblocks(&cr->ag_owned, cur); 418 if (cur != sc->sa.bno_cur) 419 xfs_btree_del_cursor(cur, error); 420 if (error) 421 goto out; 422 423 cur = sc->sa.cnt_cur; 424 if (!cur) 425 cur = xfs_cntbt_init_cursor(sc->mp, sc->tp, sc->sa.agf_bp, 426 sc->sa.pag); 427 error = xagb_bitmap_set_btblocks(&cr->ag_owned, cur); 428 if (cur != sc->sa.cnt_cur) 429 xfs_btree_del_cursor(cur, error); 430 if (error) 431 goto out; 432 433 error = xagb_bitmap_set_btblocks(&cr->ag_owned, sc->sa.rmap_cur); 434 if (error) 435 goto out; 436 437 error = xfs_alloc_read_agfl(sc->sa.pag, sc->tp, &agfl_bp); 438 if (error) 439 goto out; 440 441 error = xfs_agfl_walk(sc->mp, agf, agfl_bp, xchk_rmapbt_walk_agfl, 442 &cr->ag_owned); 443 xfs_trans_brelse(sc->tp, agfl_bp); 444 if (error) 445 goto out; 446 447 /* OWN_INOBT: inobt, finobt */ 448 cur = sc->sa.ino_cur; 449 if (!cur) 450 cur = xfs_inobt_init_cursor(sc->sa.pag, sc->tp, sc->sa.agi_bp); 451 error = xagb_bitmap_set_btblocks(&cr->inobt_owned, cur); 452 if (cur != sc->sa.ino_cur) 453 xfs_btree_del_cursor(cur, error); 454 if (error) 455 goto out; 456 457 if (xfs_has_finobt(sc->mp)) { 458 cur = sc->sa.fino_cur; 459 if (!cur) 460 cur = xfs_finobt_init_cursor(sc->sa.pag, sc->tp, 461 sc->sa.agi_bp); 462 error = xagb_bitmap_set_btblocks(&cr->inobt_owned, cur); 463 if (cur != sc->sa.fino_cur) 464 xfs_btree_del_cursor(cur, error); 465 if (error) 466 goto out; 467 } 468 469 /* OWN_REFC: refcountbt */ 470 if (xfs_has_reflink(sc->mp)) { 471 cur = sc->sa.refc_cur; 472 if (!cur) 473 cur = xfs_refcountbt_init_cursor(sc->mp, sc->tp, 474 sc->sa.agf_bp, sc->sa.pag); 475 error = xagb_bitmap_set_btblocks(&cr->refcbt_owned, cur); 476 if (cur != sc->sa.refc_cur) 477 xfs_btree_del_cursor(cur, error); 478 if (error) 479 goto out; 480 } 481 482 out: 483 /* 484 * If there's an error, set XFAIL and disable the bitmap 485 * cross-referencing checks, but proceed with the scrub anyway. 486 */ 487 if (error) 488 xchk_btree_xref_process_error(sc, sc->sa.rmap_cur, 489 sc->sa.rmap_cur->bc_nlevels - 1, &error); 490 else 491 cr->bitmaps_complete = true; 492 return 0; 493 } 494 495 /* 496 * Check for set regions in the bitmaps; if there are any, the rmap records do 497 * not describe all the AG metadata. 498 */ 499 STATIC void 500 xchk_rmapbt_check_bitmaps( 501 struct xfs_scrub *sc, 502 struct xchk_rmap *cr) 503 { 504 struct xfs_btree_cur *cur = sc->sa.rmap_cur; 505 unsigned int level; 506 507 if (sc->sm->sm_flags & (XFS_SCRUB_OFLAG_CORRUPT | 508 XFS_SCRUB_OFLAG_XFAIL)) 509 return; 510 if (!cur) 511 return; 512 level = cur->bc_nlevels - 1; 513 514 /* 515 * Any bitmap with bits still set indicates that the reverse mapping 516 * doesn't cover the entire primary structure. 517 */ 518 if (xagb_bitmap_hweight(&cr->fs_owned) != 0) 519 xchk_btree_xref_set_corrupt(sc, cur, level); 520 521 if (xagb_bitmap_hweight(&cr->log_owned) != 0) 522 xchk_btree_xref_set_corrupt(sc, cur, level); 523 524 if (xagb_bitmap_hweight(&cr->ag_owned) != 0) 525 xchk_btree_xref_set_corrupt(sc, cur, level); 526 527 if (xagb_bitmap_hweight(&cr->inobt_owned) != 0) 528 xchk_btree_xref_set_corrupt(sc, cur, level); 529 530 if (xagb_bitmap_hweight(&cr->refcbt_owned) != 0) 531 xchk_btree_xref_set_corrupt(sc, cur, level); 532 } 533 534 /* Scrub the rmap btree for some AG. */ 535 int 536 xchk_rmapbt( 537 struct xfs_scrub *sc) 538 { 539 struct xchk_rmap *cr; 540 int error; 541 542 cr = kzalloc(sizeof(struct xchk_rmap), XCHK_GFP_FLAGS); 543 if (!cr) 544 return -ENOMEM; 545 546 xagb_bitmap_init(&cr->fs_owned); 547 xagb_bitmap_init(&cr->log_owned); 548 xagb_bitmap_init(&cr->ag_owned); 549 xagb_bitmap_init(&cr->inobt_owned); 550 xagb_bitmap_init(&cr->refcbt_owned); 551 552 error = xchk_rmapbt_walk_ag_metadata(sc, cr); 553 if (error) 554 goto out; 555 556 error = xchk_btree(sc, sc->sa.rmap_cur, xchk_rmapbt_rec, 557 &XFS_RMAP_OINFO_AG, cr); 558 if (error) 559 goto out; 560 561 xchk_rmapbt_check_bitmaps(sc, cr); 562 563 out: 564 xagb_bitmap_destroy(&cr->refcbt_owned); 565 xagb_bitmap_destroy(&cr->inobt_owned); 566 xagb_bitmap_destroy(&cr->ag_owned); 567 xagb_bitmap_destroy(&cr->log_owned); 568 xagb_bitmap_destroy(&cr->fs_owned); 569 kfree(cr); 570 return error; 571 } 572 573 /* xref check that the extent is owned only by a given owner */ 574 void 575 xchk_xref_is_only_owned_by( 576 struct xfs_scrub *sc, 577 xfs_agblock_t bno, 578 xfs_extlen_t len, 579 const struct xfs_owner_info *oinfo) 580 { 581 struct xfs_rmap_matches res; 582 int error; 583 584 if (!sc->sa.rmap_cur || xchk_skip_xref(sc->sm)) 585 return; 586 587 error = xfs_rmap_count_owners(sc->sa.rmap_cur, bno, len, oinfo, &res); 588 if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur)) 589 return; 590 if (res.matches != 1) 591 xchk_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0); 592 if (res.bad_non_owner_matches) 593 xchk_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0); 594 if (res.non_owner_matches) 595 xchk_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0); 596 } 597 598 /* xref check that the extent is not owned by a given owner */ 599 void 600 xchk_xref_is_not_owned_by( 601 struct xfs_scrub *sc, 602 xfs_agblock_t bno, 603 xfs_extlen_t len, 604 const struct xfs_owner_info *oinfo) 605 { 606 struct xfs_rmap_matches res; 607 int error; 608 609 if (!sc->sa.rmap_cur || xchk_skip_xref(sc->sm)) 610 return; 611 612 error = xfs_rmap_count_owners(sc->sa.rmap_cur, bno, len, oinfo, &res); 613 if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur)) 614 return; 615 if (res.matches != 0) 616 xchk_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0); 617 if (res.bad_non_owner_matches) 618 xchk_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0); 619 } 620 621 /* xref check that the extent has no reverse mapping at all */ 622 void 623 xchk_xref_has_no_owner( 624 struct xfs_scrub *sc, 625 xfs_agblock_t bno, 626 xfs_extlen_t len) 627 { 628 enum xbtree_recpacking outcome; 629 int error; 630 631 if (!sc->sa.rmap_cur || xchk_skip_xref(sc->sm)) 632 return; 633 634 error = xfs_rmap_has_records(sc->sa.rmap_cur, bno, len, &outcome); 635 if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur)) 636 return; 637 if (outcome != XBTREE_RECPACKING_EMPTY) 638 xchk_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0); 639 } 640