1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Copyright (C) 2017 Oracle. All Rights Reserved. 4 * Author: Darrick J. Wong <darrick.wong@oracle.com> 5 */ 6 #include "xfs.h" 7 #include "xfs_fs.h" 8 #include "xfs_shared.h" 9 #include "xfs_format.h" 10 #include "xfs_trans_resv.h" 11 #include "xfs_mount.h" 12 #include "xfs_btree.h" 13 #include "xfs_log_format.h" 14 #include "xfs_trans.h" 15 #include "xfs_inode.h" 16 #include "xfs_ialloc.h" 17 #include "xfs_ialloc_btree.h" 18 #include "xfs_icache.h" 19 #include "xfs_rmap.h" 20 #include "scrub/scrub.h" 21 #include "scrub/common.h" 22 #include "scrub/btree.h" 23 #include "scrub/trace.h" 24 #include "xfs_ag.h" 25 26 /* 27 * Set us up to scrub inode btrees. 28 * If we detect a discrepancy between the inobt and the inode, 29 * try again after forcing logged inode cores out to disk. 30 */ 31 int 32 xchk_setup_ag_iallocbt( 33 struct xfs_scrub *sc) 34 { 35 return xchk_setup_ag_btree(sc, sc->flags & XCHK_TRY_HARDER); 36 } 37 38 /* Inode btree scrubber. */ 39 40 struct xchk_iallocbt { 41 /* Number of inodes we see while scanning inobt. */ 42 unsigned long long inodes; 43 44 /* Expected next startino, for big block filesystems. */ 45 xfs_agino_t next_startino; 46 47 /* Expected end of the current inode cluster. */ 48 xfs_agino_t next_cluster_ino; 49 }; 50 51 /* 52 * If we're checking the finobt, cross-reference with the inobt. 53 * Otherwise we're checking the inobt; if there is an finobt, make sure 54 * we have a record or not depending on freecount. 55 */ 56 static inline void 57 xchk_iallocbt_chunk_xref_other( 58 struct xfs_scrub *sc, 59 struct xfs_inobt_rec_incore *irec, 60 xfs_agino_t agino) 61 { 62 struct xfs_btree_cur **pcur; 63 bool has_irec; 64 int error; 65 66 if (sc->sm->sm_type == XFS_SCRUB_TYPE_FINOBT) 67 pcur = &sc->sa.ino_cur; 68 else 69 pcur = &sc->sa.fino_cur; 70 if (!(*pcur)) 71 return; 72 error = xfs_ialloc_has_inode_record(*pcur, agino, agino, &has_irec); 73 if (!xchk_should_check_xref(sc, &error, pcur)) 74 return; 75 if (((irec->ir_freecount > 0 && !has_irec) || 76 (irec->ir_freecount == 0 && has_irec))) 77 xchk_btree_xref_set_corrupt(sc, *pcur, 0); 78 } 79 80 /* Cross-reference with the other btrees. */ 81 STATIC void 82 xchk_iallocbt_chunk_xref( 83 struct xfs_scrub *sc, 84 struct xfs_inobt_rec_incore *irec, 85 xfs_agino_t agino, 86 xfs_agblock_t agbno, 87 xfs_extlen_t len) 88 { 89 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) 90 return; 91 92 xchk_xref_is_used_space(sc, agbno, len); 93 xchk_iallocbt_chunk_xref_other(sc, irec, agino); 94 xchk_xref_is_owned_by(sc, agbno, len, &XFS_RMAP_OINFO_INODES); 95 xchk_xref_is_not_shared(sc, agbno, len); 96 } 97 98 /* Is this chunk worth checking? */ 99 STATIC bool 100 xchk_iallocbt_chunk( 101 struct xchk_btree *bs, 102 struct xfs_inobt_rec_incore *irec, 103 xfs_agino_t agino, 104 xfs_extlen_t len) 105 { 106 struct xfs_mount *mp = bs->cur->bc_mp; 107 struct xfs_perag *pag = bs->cur->bc_ag.pag; 108 xfs_agblock_t bno; 109 110 bno = XFS_AGINO_TO_AGBNO(mp, agino); 111 112 if (!xfs_verify_agbext(pag, bno, len)) 113 xchk_btree_set_corrupt(bs->sc, bs->cur, 0); 114 115 xchk_iallocbt_chunk_xref(bs->sc, irec, agino, bno, len); 116 117 return true; 118 } 119 120 /* Count the number of free inodes. */ 121 static unsigned int 122 xchk_iallocbt_freecount( 123 xfs_inofree_t freemask) 124 { 125 BUILD_BUG_ON(sizeof(freemask) != sizeof(__u64)); 126 return hweight64(freemask); 127 } 128 129 /* 130 * Check that an inode's allocation status matches ir_free in the inobt 131 * record. First we try querying the in-core inode state, and if the inode 132 * isn't loaded we examine the on-disk inode directly. 133 * 134 * Since there can be 1:M and M:1 mappings between inobt records and inode 135 * clusters, we pass in the inode location information as an inobt record; 136 * the index of an inode cluster within the inobt record (as well as the 137 * cluster buffer itself); and the index of the inode within the cluster. 138 * 139 * @irec is the inobt record. 140 * @irec_ino is the inode offset from the start of the record. 141 * @dip is the on-disk inode. 142 */ 143 STATIC int 144 xchk_iallocbt_check_cluster_ifree( 145 struct xchk_btree *bs, 146 struct xfs_inobt_rec_incore *irec, 147 unsigned int irec_ino, 148 struct xfs_dinode *dip) 149 { 150 struct xfs_mount *mp = bs->cur->bc_mp; 151 xfs_ino_t fsino; 152 xfs_agino_t agino; 153 bool irec_free; 154 bool ino_inuse; 155 bool freemask_ok; 156 int error = 0; 157 158 if (xchk_should_terminate(bs->sc, &error)) 159 return error; 160 161 /* 162 * Given an inobt record and the offset of an inode from the start of 163 * the record, compute which fs inode we're talking about. 164 */ 165 agino = irec->ir_startino + irec_ino; 166 fsino = XFS_AGINO_TO_INO(mp, bs->cur->bc_ag.pag->pag_agno, agino); 167 irec_free = (irec->ir_free & XFS_INOBT_MASK(irec_ino)); 168 169 if (be16_to_cpu(dip->di_magic) != XFS_DINODE_MAGIC || 170 (dip->di_version >= 3 && be64_to_cpu(dip->di_ino) != fsino)) { 171 xchk_btree_set_corrupt(bs->sc, bs->cur, 0); 172 goto out; 173 } 174 175 error = xfs_icache_inode_is_allocated(mp, bs->cur->bc_tp, fsino, 176 &ino_inuse); 177 if (error == -ENODATA) { 178 /* Not cached, just read the disk buffer */ 179 freemask_ok = irec_free ^ !!(dip->di_mode); 180 if (!(bs->sc->flags & XCHK_TRY_HARDER) && !freemask_ok) 181 return -EDEADLOCK; 182 } else if (error < 0) { 183 /* 184 * Inode is only half assembled, or there was an IO error, 185 * or the verifier failed, so don't bother trying to check. 186 * The inode scrubber can deal with this. 187 */ 188 goto out; 189 } else { 190 /* Inode is all there. */ 191 freemask_ok = irec_free ^ ino_inuse; 192 } 193 if (!freemask_ok) 194 xchk_btree_set_corrupt(bs->sc, bs->cur, 0); 195 out: 196 return 0; 197 } 198 199 /* 200 * Check that the holemask and freemask of a hypothetical inode cluster match 201 * what's actually on disk. If sparse inodes are enabled, the cluster does 202 * not actually have to map to inodes if the corresponding holemask bit is set. 203 * 204 * @cluster_base is the first inode in the cluster within the @irec. 205 */ 206 STATIC int 207 xchk_iallocbt_check_cluster( 208 struct xchk_btree *bs, 209 struct xfs_inobt_rec_incore *irec, 210 unsigned int cluster_base) 211 { 212 struct xfs_imap imap; 213 struct xfs_mount *mp = bs->cur->bc_mp; 214 struct xfs_buf *cluster_bp; 215 unsigned int nr_inodes; 216 xfs_agnumber_t agno = bs->cur->bc_ag.pag->pag_agno; 217 xfs_agblock_t agbno; 218 unsigned int cluster_index; 219 uint16_t cluster_mask = 0; 220 uint16_t ir_holemask; 221 int error = 0; 222 223 nr_inodes = min_t(unsigned int, XFS_INODES_PER_CHUNK, 224 M_IGEO(mp)->inodes_per_cluster); 225 226 /* Map this inode cluster */ 227 agbno = XFS_AGINO_TO_AGBNO(mp, irec->ir_startino + cluster_base); 228 229 /* Compute a bitmask for this cluster that can be used for holemask. */ 230 for (cluster_index = 0; 231 cluster_index < nr_inodes; 232 cluster_index += XFS_INODES_PER_HOLEMASK_BIT) 233 cluster_mask |= XFS_INOBT_MASK((cluster_base + cluster_index) / 234 XFS_INODES_PER_HOLEMASK_BIT); 235 236 /* 237 * Map the first inode of this cluster to a buffer and offset. 238 * Be careful about inobt records that don't align with the start of 239 * the inode buffer when block sizes are large enough to hold multiple 240 * inode chunks. When this happens, cluster_base will be zero but 241 * ir_startino can be large enough to make im_boffset nonzero. 242 */ 243 ir_holemask = (irec->ir_holemask & cluster_mask); 244 imap.im_blkno = XFS_AGB_TO_DADDR(mp, agno, agbno); 245 imap.im_len = XFS_FSB_TO_BB(mp, M_IGEO(mp)->blocks_per_cluster); 246 imap.im_boffset = XFS_INO_TO_OFFSET(mp, irec->ir_startino) << 247 mp->m_sb.sb_inodelog; 248 249 if (imap.im_boffset != 0 && cluster_base != 0) { 250 ASSERT(imap.im_boffset == 0 || cluster_base == 0); 251 xchk_btree_set_corrupt(bs->sc, bs->cur, 0); 252 return 0; 253 } 254 255 trace_xchk_iallocbt_check_cluster(mp, agno, irec->ir_startino, 256 imap.im_blkno, imap.im_len, cluster_base, nr_inodes, 257 cluster_mask, ir_holemask, 258 XFS_INO_TO_OFFSET(mp, irec->ir_startino + 259 cluster_base)); 260 261 /* The whole cluster must be a hole or not a hole. */ 262 if (ir_holemask != cluster_mask && ir_holemask != 0) { 263 xchk_btree_set_corrupt(bs->sc, bs->cur, 0); 264 return 0; 265 } 266 267 /* If any part of this is a hole, skip it. */ 268 if (ir_holemask) { 269 xchk_xref_is_not_owned_by(bs->sc, agbno, 270 M_IGEO(mp)->blocks_per_cluster, 271 &XFS_RMAP_OINFO_INODES); 272 return 0; 273 } 274 275 xchk_xref_is_owned_by(bs->sc, agbno, M_IGEO(mp)->blocks_per_cluster, 276 &XFS_RMAP_OINFO_INODES); 277 278 /* Grab the inode cluster buffer. */ 279 error = xfs_imap_to_bp(mp, bs->cur->bc_tp, &imap, &cluster_bp); 280 if (!xchk_btree_xref_process_error(bs->sc, bs->cur, 0, &error)) 281 return error; 282 283 /* Check free status of each inode within this cluster. */ 284 for (cluster_index = 0; cluster_index < nr_inodes; cluster_index++) { 285 struct xfs_dinode *dip; 286 287 if (imap.im_boffset >= BBTOB(cluster_bp->b_length)) { 288 xchk_btree_set_corrupt(bs->sc, bs->cur, 0); 289 break; 290 } 291 292 dip = xfs_buf_offset(cluster_bp, imap.im_boffset); 293 error = xchk_iallocbt_check_cluster_ifree(bs, irec, 294 cluster_base + cluster_index, dip); 295 if (error) 296 break; 297 imap.im_boffset += mp->m_sb.sb_inodesize; 298 } 299 300 xfs_trans_brelse(bs->cur->bc_tp, cluster_bp); 301 return error; 302 } 303 304 /* 305 * For all the inode clusters that could map to this inobt record, make sure 306 * that the holemask makes sense and that the allocation status of each inode 307 * matches the freemask. 308 */ 309 STATIC int 310 xchk_iallocbt_check_clusters( 311 struct xchk_btree *bs, 312 struct xfs_inobt_rec_incore *irec) 313 { 314 unsigned int cluster_base; 315 int error = 0; 316 317 /* 318 * For the common case where this inobt record maps to multiple inode 319 * clusters this will call _check_cluster for each cluster. 320 * 321 * For the case that multiple inobt records map to a single cluster, 322 * this will call _check_cluster once. 323 */ 324 for (cluster_base = 0; 325 cluster_base < XFS_INODES_PER_CHUNK; 326 cluster_base += M_IGEO(bs->sc->mp)->inodes_per_cluster) { 327 error = xchk_iallocbt_check_cluster(bs, irec, cluster_base); 328 if (error) 329 break; 330 } 331 332 return error; 333 } 334 335 /* 336 * Make sure this inode btree record is aligned properly. Because a fs block 337 * contains multiple inodes, we check that the inobt record is aligned to the 338 * correct inode, not just the correct block on disk. This results in a finer 339 * grained corruption check. 340 */ 341 STATIC void 342 xchk_iallocbt_rec_alignment( 343 struct xchk_btree *bs, 344 struct xfs_inobt_rec_incore *irec) 345 { 346 struct xfs_mount *mp = bs->sc->mp; 347 struct xchk_iallocbt *iabt = bs->private; 348 struct xfs_ino_geometry *igeo = M_IGEO(mp); 349 350 /* 351 * finobt records have different positioning requirements than inobt 352 * records: each finobt record must have a corresponding inobt record. 353 * That is checked in the xref function, so for now we only catch the 354 * obvious case where the record isn't at all aligned properly. 355 * 356 * Note that if a fs block contains more than a single chunk of inodes, 357 * we will have finobt records only for those chunks containing free 358 * inodes, and therefore expect chunk alignment of finobt records. 359 * Otherwise, we expect that the finobt record is aligned to the 360 * cluster alignment as told by the superblock. 361 */ 362 if (bs->cur->bc_btnum == XFS_BTNUM_FINO) { 363 unsigned int imask; 364 365 imask = min_t(unsigned int, XFS_INODES_PER_CHUNK, 366 igeo->cluster_align_inodes) - 1; 367 if (irec->ir_startino & imask) 368 xchk_btree_set_corrupt(bs->sc, bs->cur, 0); 369 return; 370 } 371 372 if (iabt->next_startino != NULLAGINO) { 373 /* 374 * We're midway through a cluster of inodes that is mapped by 375 * multiple inobt records. Did we get the record for the next 376 * irec in the sequence? 377 */ 378 if (irec->ir_startino != iabt->next_startino) { 379 xchk_btree_set_corrupt(bs->sc, bs->cur, 0); 380 return; 381 } 382 383 iabt->next_startino += XFS_INODES_PER_CHUNK; 384 385 /* Are we done with the cluster? */ 386 if (iabt->next_startino >= iabt->next_cluster_ino) { 387 iabt->next_startino = NULLAGINO; 388 iabt->next_cluster_ino = NULLAGINO; 389 } 390 return; 391 } 392 393 /* inobt records must be aligned to cluster and inoalignmnt size. */ 394 if (irec->ir_startino & (igeo->cluster_align_inodes - 1)) { 395 xchk_btree_set_corrupt(bs->sc, bs->cur, 0); 396 return; 397 } 398 399 if (irec->ir_startino & (igeo->inodes_per_cluster - 1)) { 400 xchk_btree_set_corrupt(bs->sc, bs->cur, 0); 401 return; 402 } 403 404 if (igeo->inodes_per_cluster <= XFS_INODES_PER_CHUNK) 405 return; 406 407 /* 408 * If this is the start of an inode cluster that can be mapped by 409 * multiple inobt records, the next inobt record must follow exactly 410 * after this one. 411 */ 412 iabt->next_startino = irec->ir_startino + XFS_INODES_PER_CHUNK; 413 iabt->next_cluster_ino = irec->ir_startino + igeo->inodes_per_cluster; 414 } 415 416 /* Scrub an inobt/finobt record. */ 417 STATIC int 418 xchk_iallocbt_rec( 419 struct xchk_btree *bs, 420 const union xfs_btree_rec *rec) 421 { 422 struct xfs_mount *mp = bs->cur->bc_mp; 423 struct xfs_perag *pag = bs->cur->bc_ag.pag; 424 struct xchk_iallocbt *iabt = bs->private; 425 struct xfs_inobt_rec_incore irec; 426 uint64_t holes; 427 xfs_agino_t agino; 428 xfs_extlen_t len; 429 int holecount; 430 int i; 431 int error = 0; 432 unsigned int real_freecount; 433 uint16_t holemask; 434 435 xfs_inobt_btrec_to_irec(mp, rec, &irec); 436 437 if (irec.ir_count > XFS_INODES_PER_CHUNK || 438 irec.ir_freecount > XFS_INODES_PER_CHUNK) 439 xchk_btree_set_corrupt(bs->sc, bs->cur, 0); 440 441 real_freecount = irec.ir_freecount + 442 (XFS_INODES_PER_CHUNK - irec.ir_count); 443 if (real_freecount != xchk_iallocbt_freecount(irec.ir_free)) 444 xchk_btree_set_corrupt(bs->sc, bs->cur, 0); 445 446 agino = irec.ir_startino; 447 /* Record has to be properly aligned within the AG. */ 448 if (!xfs_verify_agino(pag, agino) || 449 !xfs_verify_agino(pag, agino + XFS_INODES_PER_CHUNK - 1)) { 450 xchk_btree_set_corrupt(bs->sc, bs->cur, 0); 451 goto out; 452 } 453 454 xchk_iallocbt_rec_alignment(bs, &irec); 455 if (bs->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) 456 goto out; 457 458 iabt->inodes += irec.ir_count; 459 460 /* Handle non-sparse inodes */ 461 if (!xfs_inobt_issparse(irec.ir_holemask)) { 462 len = XFS_B_TO_FSB(mp, 463 XFS_INODES_PER_CHUNK * mp->m_sb.sb_inodesize); 464 if (irec.ir_count != XFS_INODES_PER_CHUNK) 465 xchk_btree_set_corrupt(bs->sc, bs->cur, 0); 466 467 if (!xchk_iallocbt_chunk(bs, &irec, agino, len)) 468 goto out; 469 goto check_clusters; 470 } 471 472 /* Check each chunk of a sparse inode cluster. */ 473 holemask = irec.ir_holemask; 474 holecount = 0; 475 len = XFS_B_TO_FSB(mp, 476 XFS_INODES_PER_HOLEMASK_BIT * mp->m_sb.sb_inodesize); 477 holes = ~xfs_inobt_irec_to_allocmask(&irec); 478 if ((holes & irec.ir_free) != holes || 479 irec.ir_freecount > irec.ir_count) 480 xchk_btree_set_corrupt(bs->sc, bs->cur, 0); 481 482 for (i = 0; i < XFS_INOBT_HOLEMASK_BITS; i++) { 483 if (holemask & 1) 484 holecount += XFS_INODES_PER_HOLEMASK_BIT; 485 else if (!xchk_iallocbt_chunk(bs, &irec, agino, len)) 486 break; 487 holemask >>= 1; 488 agino += XFS_INODES_PER_HOLEMASK_BIT; 489 } 490 491 if (holecount > XFS_INODES_PER_CHUNK || 492 holecount + irec.ir_count != XFS_INODES_PER_CHUNK) 493 xchk_btree_set_corrupt(bs->sc, bs->cur, 0); 494 495 check_clusters: 496 error = xchk_iallocbt_check_clusters(bs, &irec); 497 if (error) 498 goto out; 499 500 out: 501 return error; 502 } 503 504 /* 505 * Make sure the inode btrees are as large as the rmap thinks they are. 506 * Don't bother if we're missing btree cursors, as we're already corrupt. 507 */ 508 STATIC void 509 xchk_iallocbt_xref_rmap_btreeblks( 510 struct xfs_scrub *sc, 511 int which) 512 { 513 xfs_filblks_t blocks; 514 xfs_extlen_t inobt_blocks = 0; 515 xfs_extlen_t finobt_blocks = 0; 516 int error; 517 518 if (!sc->sa.ino_cur || !sc->sa.rmap_cur || 519 (xfs_has_finobt(sc->mp) && !sc->sa.fino_cur) || 520 xchk_skip_xref(sc->sm)) 521 return; 522 523 /* Check that we saw as many inobt blocks as the rmap says. */ 524 error = xfs_btree_count_blocks(sc->sa.ino_cur, &inobt_blocks); 525 if (!xchk_process_error(sc, 0, 0, &error)) 526 return; 527 528 if (sc->sa.fino_cur) { 529 error = xfs_btree_count_blocks(sc->sa.fino_cur, &finobt_blocks); 530 if (!xchk_process_error(sc, 0, 0, &error)) 531 return; 532 } 533 534 error = xchk_count_rmap_ownedby_ag(sc, sc->sa.rmap_cur, 535 &XFS_RMAP_OINFO_INOBT, &blocks); 536 if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur)) 537 return; 538 if (blocks != inobt_blocks + finobt_blocks) 539 xchk_btree_set_corrupt(sc, sc->sa.ino_cur, 0); 540 } 541 542 /* 543 * Make sure that the inobt records point to the same number of blocks as 544 * the rmap says are owned by inodes. 545 */ 546 STATIC void 547 xchk_iallocbt_xref_rmap_inodes( 548 struct xfs_scrub *sc, 549 int which, 550 unsigned long long inodes) 551 { 552 xfs_filblks_t blocks; 553 xfs_filblks_t inode_blocks; 554 int error; 555 556 if (!sc->sa.rmap_cur || xchk_skip_xref(sc->sm)) 557 return; 558 559 /* Check that we saw as many inode blocks as the rmap knows about. */ 560 error = xchk_count_rmap_ownedby_ag(sc, sc->sa.rmap_cur, 561 &XFS_RMAP_OINFO_INODES, &blocks); 562 if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur)) 563 return; 564 inode_blocks = XFS_B_TO_FSB(sc->mp, inodes * sc->mp->m_sb.sb_inodesize); 565 if (blocks != inode_blocks) 566 xchk_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0); 567 } 568 569 /* Scrub the inode btrees for some AG. */ 570 STATIC int 571 xchk_iallocbt( 572 struct xfs_scrub *sc, 573 xfs_btnum_t which) 574 { 575 struct xfs_btree_cur *cur; 576 struct xchk_iallocbt iabt = { 577 .inodes = 0, 578 .next_startino = NULLAGINO, 579 .next_cluster_ino = NULLAGINO, 580 }; 581 int error; 582 583 cur = which == XFS_BTNUM_INO ? sc->sa.ino_cur : sc->sa.fino_cur; 584 error = xchk_btree(sc, cur, xchk_iallocbt_rec, &XFS_RMAP_OINFO_INOBT, 585 &iabt); 586 if (error) 587 return error; 588 589 xchk_iallocbt_xref_rmap_btreeblks(sc, which); 590 591 /* 592 * If we're scrubbing the inode btree, inode_blocks is the number of 593 * blocks pointed to by all the inode chunk records. Therefore, we 594 * should compare to the number of inode chunk blocks that the rmap 595 * knows about. We can't do this for the finobt since it only points 596 * to inode chunks with free inodes. 597 */ 598 if (which == XFS_BTNUM_INO) 599 xchk_iallocbt_xref_rmap_inodes(sc, which, iabt.inodes); 600 601 return error; 602 } 603 604 int 605 xchk_inobt( 606 struct xfs_scrub *sc) 607 { 608 return xchk_iallocbt(sc, XFS_BTNUM_INO); 609 } 610 611 int 612 xchk_finobt( 613 struct xfs_scrub *sc) 614 { 615 return xchk_iallocbt(sc, XFS_BTNUM_FINO); 616 } 617 618 /* See if an inode btree has (or doesn't have) an inode chunk record. */ 619 static inline void 620 xchk_xref_inode_check( 621 struct xfs_scrub *sc, 622 xfs_agblock_t agbno, 623 xfs_extlen_t len, 624 struct xfs_btree_cur **icur, 625 bool should_have_inodes) 626 { 627 bool has_inodes; 628 int error; 629 630 if (!(*icur) || xchk_skip_xref(sc->sm)) 631 return; 632 633 error = xfs_ialloc_has_inodes_at_extent(*icur, agbno, len, &has_inodes); 634 if (!xchk_should_check_xref(sc, &error, icur)) 635 return; 636 if (has_inodes != should_have_inodes) 637 xchk_btree_xref_set_corrupt(sc, *icur, 0); 638 } 639 640 /* xref check that the extent is not covered by inodes */ 641 void 642 xchk_xref_is_not_inode_chunk( 643 struct xfs_scrub *sc, 644 xfs_agblock_t agbno, 645 xfs_extlen_t len) 646 { 647 xchk_xref_inode_check(sc, agbno, len, &sc->sa.ino_cur, false); 648 xchk_xref_inode_check(sc, agbno, len, &sc->sa.fino_cur, false); 649 } 650 651 /* xref check that the extent is covered by inodes */ 652 void 653 xchk_xref_is_inode_chunk( 654 struct xfs_scrub *sc, 655 xfs_agblock_t agbno, 656 xfs_extlen_t len) 657 { 658 xchk_xref_inode_check(sc, agbno, len, &sc->sa.ino_cur, true); 659 } 660