1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright (C) 2017-2023 Oracle. All Rights Reserved. 4 * Author: Darrick J. Wong <djwong@kernel.org> 5 */ 6 #include "xfs.h" 7 #include "xfs_fs.h" 8 #include "xfs_shared.h" 9 #include "xfs_format.h" 10 #include "xfs_trans_resv.h" 11 #include "xfs_mount.h" 12 #include "xfs_btree.h" 13 #include "xfs_log_format.h" 14 #include "xfs_trans.h" 15 #include "xfs_inode.h" 16 #include "xfs_icache.h" 17 #include "xfs_alloc.h" 18 #include "xfs_alloc_btree.h" 19 #include "xfs_ialloc.h" 20 #include "xfs_ialloc_btree.h" 21 #include "xfs_refcount_btree.h" 22 #include "xfs_rmap.h" 23 #include "xfs_rmap_btree.h" 24 #include "xfs_log.h" 25 #include "xfs_trans_priv.h" 26 #include "xfs_da_format.h" 27 #include "xfs_da_btree.h" 28 #include "xfs_dir2_priv.h" 29 #include "xfs_dir2.h" 30 #include "xfs_attr.h" 31 #include "xfs_reflink.h" 32 #include "xfs_ag.h" 33 #include "xfs_error.h" 34 #include "xfs_quota.h" 35 #include "xfs_exchmaps.h" 36 #include "xfs_rtbitmap.h" 37 #include "xfs_rtgroup.h" 38 #include "xfs_rtrmap_btree.h" 39 #include "xfs_bmap_util.h" 40 #include "xfs_rtrefcount_btree.h" 41 #include "scrub/scrub.h" 42 #include "scrub/common.h" 43 #include "scrub/trace.h" 44 #include "scrub/repair.h" 45 #include "scrub/health.h" 46 #include "scrub/tempfile.h" 47 48 /* Common code for the metadata scrubbers. */ 49 50 /* 51 * Handling operational errors. 52 * 53 * The *_process_error() family of functions are used to process error return 54 * codes from functions called as part of a scrub operation. 55 * 56 * If there's no error, we return true to tell the caller that it's ok 57 * to move on to the next check in its list. 58 * 59 * For non-verifier errors (e.g. ENOMEM) we return false to tell the 60 * caller that something bad happened, and we preserve *error so that 61 * the caller can return the *error up the stack to userspace. 62 * 63 * Verifier errors (EFSBADCRC/EFSCORRUPTED) are recorded by setting 64 * OFLAG_CORRUPT in sm_flags and the *error is cleared. In other words, 65 * we track verifier errors (and failed scrub checks) via OFLAG_CORRUPT, 66 * not via return codes. We return false to tell the caller that 67 * something bad happened. Since the error has been cleared, the caller 68 * will (presumably) return that zero and scrubbing will move on to 69 * whatever's next. 70 * 71 * ftrace can be used to record the precise metadata location and the 72 * approximate code location of the failed operation. 73 */ 74 75 /* Check for operational errors. */ 76 static bool 77 __xchk_process_error( 78 struct xfs_scrub *sc, 79 xfs_agnumber_t agno, 80 xfs_agblock_t bno, 81 int *error, 82 __u32 errflag, 83 void *ret_ip) 84 { 85 switch (*error) { 86 case 0: 87 return true; 88 case -EDEADLOCK: 89 case -ECHRNG: 90 /* Used to restart an op with deadlock avoidance. */ 91 trace_xchk_deadlock_retry( 92 sc->ip ? sc->ip : XFS_I(file_inode(sc->file)), 93 sc->sm, *error); 94 break; 95 case -ECANCELED: 96 /* 97 * ECANCELED here means that the caller set one of the scrub 98 * outcome flags (corrupt, xfail, xcorrupt) and wants to exit 99 * quickly. Set error to zero and do not continue. 100 */ 101 trace_xchk_op_error(sc, agno, bno, *error, ret_ip); 102 *error = 0; 103 break; 104 case -EFSBADCRC: 105 case -EFSCORRUPTED: 106 /* Note the badness but don't abort. */ 107 sc->sm->sm_flags |= errflag; 108 *error = 0; 109 fallthrough; 110 default: 111 trace_xchk_op_error(sc, agno, bno, *error, ret_ip); 112 break; 113 } 114 return false; 115 } 116 117 bool 118 xchk_process_error( 119 struct xfs_scrub *sc, 120 xfs_agnumber_t agno, 121 xfs_agblock_t bno, 122 int *error) 123 { 124 return __xchk_process_error(sc, agno, bno, error, 125 XFS_SCRUB_OFLAG_CORRUPT, __return_address); 126 } 127 128 bool 129 xchk_process_rt_error( 130 struct xfs_scrub *sc, 131 xfs_rgnumber_t rgno, 132 xfs_rgblock_t rgbno, 133 int *error) 134 { 135 return __xchk_process_error(sc, rgno, rgbno, error, 136 XFS_SCRUB_OFLAG_CORRUPT, __return_address); 137 } 138 139 bool 140 xchk_xref_process_error( 141 struct xfs_scrub *sc, 142 xfs_agnumber_t agno, 143 xfs_agblock_t bno, 144 int *error) 145 { 146 return __xchk_process_error(sc, agno, bno, error, 147 XFS_SCRUB_OFLAG_XFAIL, __return_address); 148 } 149 150 /* Check for operational errors for a file offset. */ 151 static bool 152 __xchk_fblock_process_error( 153 struct xfs_scrub *sc, 154 int whichfork, 155 xfs_fileoff_t offset, 156 int *error, 157 __u32 errflag, 158 void *ret_ip) 159 { 160 switch (*error) { 161 case 0: 162 return true; 163 case -EDEADLOCK: 164 case -ECHRNG: 165 /* Used to restart an op with deadlock avoidance. */ 166 trace_xchk_deadlock_retry(sc->ip, sc->sm, *error); 167 break; 168 case -ECANCELED: 169 /* 170 * ECANCELED here means that the caller set one of the scrub 171 * outcome flags (corrupt, xfail, xcorrupt) and wants to exit 172 * quickly. Set error to zero and do not continue. 173 */ 174 trace_xchk_file_op_error(sc, whichfork, offset, *error, 175 ret_ip); 176 *error = 0; 177 break; 178 case -EFSBADCRC: 179 case -EFSCORRUPTED: 180 /* Note the badness but don't abort. */ 181 sc->sm->sm_flags |= errflag; 182 *error = 0; 183 fallthrough; 184 default: 185 trace_xchk_file_op_error(sc, whichfork, offset, *error, 186 ret_ip); 187 break; 188 } 189 return false; 190 } 191 192 bool 193 xchk_fblock_process_error( 194 struct xfs_scrub *sc, 195 int whichfork, 196 xfs_fileoff_t offset, 197 int *error) 198 { 199 return __xchk_fblock_process_error(sc, whichfork, offset, error, 200 XFS_SCRUB_OFLAG_CORRUPT, __return_address); 201 } 202 203 bool 204 xchk_fblock_xref_process_error( 205 struct xfs_scrub *sc, 206 int whichfork, 207 xfs_fileoff_t offset, 208 int *error) 209 { 210 return __xchk_fblock_process_error(sc, whichfork, offset, error, 211 XFS_SCRUB_OFLAG_XFAIL, __return_address); 212 } 213 214 /* 215 * Handling scrub corruption/optimization/warning checks. 216 * 217 * The *_set_{corrupt,preen,warning}() family of functions are used to 218 * record the presence of metadata that is incorrect (corrupt), could be 219 * optimized somehow (preen), or should be flagged for administrative 220 * review but is not incorrect (warn). 221 * 222 * ftrace can be used to record the precise metadata location and 223 * approximate code location of the failed check. 224 */ 225 226 /* Record a block which could be optimized. */ 227 void 228 xchk_block_set_preen( 229 struct xfs_scrub *sc, 230 struct xfs_buf *bp) 231 { 232 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_PREEN; 233 trace_xchk_block_preen(sc, xfs_buf_daddr(bp), __return_address); 234 } 235 236 /* 237 * Record an inode which could be optimized. The trace data will 238 * include the block given by bp if bp is given; otherwise it will use 239 * the block location of the inode record itself. 240 */ 241 void 242 xchk_ino_set_preen( 243 struct xfs_scrub *sc, 244 xfs_ino_t ino) 245 { 246 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_PREEN; 247 trace_xchk_ino_preen(sc, ino, __return_address); 248 } 249 250 /* Record something being wrong with the filesystem primary superblock. */ 251 void 252 xchk_set_corrupt( 253 struct xfs_scrub *sc) 254 { 255 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT; 256 trace_xchk_fs_error(sc, 0, __return_address); 257 } 258 259 /* Record a corrupt block. */ 260 void 261 xchk_block_set_corrupt( 262 struct xfs_scrub *sc, 263 struct xfs_buf *bp) 264 { 265 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT; 266 trace_xchk_block_error(sc, xfs_buf_daddr(bp), __return_address); 267 } 268 269 #ifdef CONFIG_XFS_QUOTA 270 /* Record a corrupt quota counter. */ 271 void 272 xchk_qcheck_set_corrupt( 273 struct xfs_scrub *sc, 274 unsigned int dqtype, 275 xfs_dqid_t id) 276 { 277 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT; 278 trace_xchk_qcheck_error(sc, dqtype, id, __return_address); 279 } 280 #endif 281 282 /* Record a corruption while cross-referencing. */ 283 void 284 xchk_block_xref_set_corrupt( 285 struct xfs_scrub *sc, 286 struct xfs_buf *bp) 287 { 288 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_XCORRUPT; 289 trace_xchk_block_error(sc, xfs_buf_daddr(bp), __return_address); 290 } 291 292 /* 293 * Record a corrupt inode. The trace data will include the block given 294 * by bp if bp is given; otherwise it will use the block location of the 295 * inode record itself. 296 */ 297 void 298 xchk_ino_set_corrupt( 299 struct xfs_scrub *sc, 300 xfs_ino_t ino) 301 { 302 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT; 303 trace_xchk_ino_error(sc, ino, __return_address); 304 } 305 306 /* Record a corruption while cross-referencing with an inode. */ 307 void 308 xchk_ino_xref_set_corrupt( 309 struct xfs_scrub *sc, 310 xfs_ino_t ino) 311 { 312 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_XCORRUPT; 313 trace_xchk_ino_error(sc, ino, __return_address); 314 } 315 316 /* Record corruption in a block indexed by a file fork. */ 317 void 318 xchk_fblock_set_corrupt( 319 struct xfs_scrub *sc, 320 int whichfork, 321 xfs_fileoff_t offset) 322 { 323 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT; 324 trace_xchk_fblock_error(sc, whichfork, offset, __return_address); 325 } 326 327 /* Record a corruption while cross-referencing a fork block. */ 328 void 329 xchk_fblock_xref_set_corrupt( 330 struct xfs_scrub *sc, 331 int whichfork, 332 xfs_fileoff_t offset) 333 { 334 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_XCORRUPT; 335 trace_xchk_fblock_error(sc, whichfork, offset, __return_address); 336 } 337 338 /* 339 * Warn about inodes that need administrative review but is not 340 * incorrect. 341 */ 342 void 343 xchk_ino_set_warning( 344 struct xfs_scrub *sc, 345 xfs_ino_t ino) 346 { 347 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_WARNING; 348 trace_xchk_ino_warning(sc, ino, __return_address); 349 } 350 351 /* Warn about a block indexed by a file fork that needs review. */ 352 void 353 xchk_fblock_set_warning( 354 struct xfs_scrub *sc, 355 int whichfork, 356 xfs_fileoff_t offset) 357 { 358 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_WARNING; 359 trace_xchk_fblock_warning(sc, whichfork, offset, __return_address); 360 } 361 362 /* Signal an incomplete scrub. */ 363 void 364 xchk_set_incomplete( 365 struct xfs_scrub *sc) 366 { 367 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_INCOMPLETE; 368 trace_xchk_incomplete(sc, __return_address); 369 } 370 371 /* 372 * rmap scrubbing -- compute the number of blocks with a given owner, 373 * at least according to the reverse mapping data. 374 */ 375 376 struct xchk_rmap_ownedby_info { 377 const struct xfs_owner_info *oinfo; 378 xfs_filblks_t *blocks; 379 }; 380 381 STATIC int 382 xchk_count_rmap_ownedby_irec( 383 struct xfs_btree_cur *cur, 384 const struct xfs_rmap_irec *rec, 385 void *priv) 386 { 387 struct xchk_rmap_ownedby_info *sroi = priv; 388 bool irec_attr; 389 bool oinfo_attr; 390 391 irec_attr = rec->rm_flags & XFS_RMAP_ATTR_FORK; 392 oinfo_attr = sroi->oinfo->oi_flags & XFS_OWNER_INFO_ATTR_FORK; 393 394 if (rec->rm_owner != sroi->oinfo->oi_owner) 395 return 0; 396 397 if (XFS_RMAP_NON_INODE_OWNER(rec->rm_owner) || irec_attr == oinfo_attr) 398 (*sroi->blocks) += rec->rm_blockcount; 399 400 return 0; 401 } 402 403 /* 404 * Calculate the number of blocks the rmap thinks are owned by something. 405 * The caller should pass us an rmapbt cursor. 406 */ 407 int 408 xchk_count_rmap_ownedby_ag( 409 struct xfs_scrub *sc, 410 struct xfs_btree_cur *cur, 411 const struct xfs_owner_info *oinfo, 412 xfs_filblks_t *blocks) 413 { 414 struct xchk_rmap_ownedby_info sroi = { 415 .oinfo = oinfo, 416 .blocks = blocks, 417 }; 418 419 *blocks = 0; 420 return xfs_rmap_query_all(cur, xchk_count_rmap_ownedby_irec, 421 &sroi); 422 } 423 424 /* 425 * AG scrubbing 426 * 427 * These helpers facilitate locking an allocation group's header 428 * buffers, setting up cursors for all btrees that are present, and 429 * cleaning everything up once we're through. 430 */ 431 432 /* Decide if we want to return an AG header read failure. */ 433 static inline bool 434 want_ag_read_header_failure( 435 struct xfs_scrub *sc, 436 unsigned int type) 437 { 438 /* Return all AG header read failures when scanning btrees. */ 439 if (sc->sm->sm_type != XFS_SCRUB_TYPE_AGF && 440 sc->sm->sm_type != XFS_SCRUB_TYPE_AGFL && 441 sc->sm->sm_type != XFS_SCRUB_TYPE_AGI) 442 return true; 443 /* 444 * If we're scanning a given type of AG header, we only want to 445 * see read failures from that specific header. We'd like the 446 * other headers to cross-check them, but this isn't required. 447 */ 448 if (sc->sm->sm_type == type) 449 return true; 450 return false; 451 } 452 453 /* 454 * Grab the AG header buffers for the attached perag structure. 455 * 456 * The headers should be released by xchk_ag_free, but as a fail safe we attach 457 * all the buffers we grab to the scrub transaction so they'll all be freed 458 * when we cancel it. 459 */ 460 static inline int 461 xchk_perag_read_headers( 462 struct xfs_scrub *sc, 463 struct xchk_ag *sa) 464 { 465 int error; 466 467 error = xfs_ialloc_read_agi(sa->pag, sc->tp, 0, &sa->agi_bp); 468 if (error && want_ag_read_header_failure(sc, XFS_SCRUB_TYPE_AGI)) 469 return error; 470 471 error = xfs_alloc_read_agf(sa->pag, sc->tp, 0, &sa->agf_bp); 472 if (error && want_ag_read_header_failure(sc, XFS_SCRUB_TYPE_AGF)) 473 return error; 474 475 return 0; 476 } 477 478 /* 479 * Grab the AG headers for the attached perag structure and wait for pending 480 * intents to drain. 481 */ 482 int 483 xchk_perag_drain_and_lock( 484 struct xfs_scrub *sc) 485 { 486 struct xchk_ag *sa = &sc->sa; 487 int error = 0; 488 489 ASSERT(sa->pag != NULL); 490 ASSERT(sa->agi_bp == NULL); 491 ASSERT(sa->agf_bp == NULL); 492 493 do { 494 if (xchk_should_terminate(sc, &error)) 495 return error; 496 497 error = xchk_perag_read_headers(sc, sa); 498 if (error) 499 return error; 500 501 /* 502 * If we've grabbed an inode for scrubbing then we assume that 503 * holding its ILOCK will suffice to coordinate with any intent 504 * chains involving this inode. 505 */ 506 if (sc->ip) 507 return 0; 508 509 /* 510 * Decide if this AG is quiet enough for all metadata to be 511 * consistent with each other. XFS allows the AG header buffer 512 * locks to cycle across transaction rolls while processing 513 * chains of deferred ops, which means that there could be 514 * other threads in the middle of processing a chain of 515 * deferred ops. For regular operations we are careful about 516 * ordering operations to prevent collisions between threads 517 * (which is why we don't need a per-AG lock), but scrub and 518 * repair have to serialize against chained operations. 519 * 520 * We just locked all the AG headers buffers; now take a look 521 * to see if there are any intents in progress. If there are, 522 * drop the AG headers and wait for the intents to drain. 523 * Since we hold all the AG header locks for the duration of 524 * the scrub, this is the only time we have to sample the 525 * intents counter; any threads increasing it after this point 526 * can't possibly be in the middle of a chain of AG metadata 527 * updates. 528 * 529 * Obviously, this should be slanted against scrub and in favor 530 * of runtime threads. 531 */ 532 if (!xfs_group_intent_busy(pag_group(sa->pag))) 533 return 0; 534 535 if (sa->agf_bp) { 536 xfs_trans_brelse(sc->tp, sa->agf_bp); 537 sa->agf_bp = NULL; 538 } 539 540 if (sa->agi_bp) { 541 xfs_trans_brelse(sc->tp, sa->agi_bp); 542 sa->agi_bp = NULL; 543 } 544 545 if (!(sc->flags & XCHK_FSGATES_DRAIN)) 546 return -ECHRNG; 547 error = xfs_group_intent_drain(pag_group(sa->pag)); 548 if (error == -ERESTARTSYS) 549 error = -EINTR; 550 } while (!error); 551 552 return error; 553 } 554 555 /* 556 * Grab the per-AG structure, grab all AG header buffers, and wait until there 557 * aren't any pending intents. Returns -ENOENT if we can't grab the perag 558 * structure. 559 */ 560 int 561 xchk_ag_read_headers( 562 struct xfs_scrub *sc, 563 xfs_agnumber_t agno, 564 struct xchk_ag *sa) 565 { 566 struct xfs_mount *mp = sc->mp; 567 568 ASSERT(!sa->pag); 569 sa->pag = xfs_perag_get(mp, agno); 570 if (!sa->pag) 571 return -ENOENT; 572 573 return xchk_perag_drain_and_lock(sc); 574 } 575 576 /* Release all the AG btree cursors. */ 577 void 578 xchk_ag_btcur_free( 579 struct xchk_ag *sa) 580 { 581 if (sa->refc_cur) 582 xfs_btree_del_cursor(sa->refc_cur, XFS_BTREE_ERROR); 583 if (sa->rmap_cur) 584 xfs_btree_del_cursor(sa->rmap_cur, XFS_BTREE_ERROR); 585 if (sa->fino_cur) 586 xfs_btree_del_cursor(sa->fino_cur, XFS_BTREE_ERROR); 587 if (sa->ino_cur) 588 xfs_btree_del_cursor(sa->ino_cur, XFS_BTREE_ERROR); 589 if (sa->cnt_cur) 590 xfs_btree_del_cursor(sa->cnt_cur, XFS_BTREE_ERROR); 591 if (sa->bno_cur) 592 xfs_btree_del_cursor(sa->bno_cur, XFS_BTREE_ERROR); 593 594 sa->refc_cur = NULL; 595 sa->rmap_cur = NULL; 596 sa->fino_cur = NULL; 597 sa->ino_cur = NULL; 598 sa->bno_cur = NULL; 599 sa->cnt_cur = NULL; 600 } 601 602 /* Initialize all the btree cursors for an AG. */ 603 void 604 xchk_ag_btcur_init( 605 struct xfs_scrub *sc, 606 struct xchk_ag *sa) 607 { 608 struct xfs_mount *mp = sc->mp; 609 610 if (sa->agf_bp) { 611 /* Set up a bnobt cursor for cross-referencing. */ 612 sa->bno_cur = xfs_bnobt_init_cursor(mp, sc->tp, sa->agf_bp, 613 sa->pag); 614 xchk_ag_btree_del_cursor_if_sick(sc, &sa->bno_cur, 615 XFS_SCRUB_TYPE_BNOBT); 616 617 /* Set up a cntbt cursor for cross-referencing. */ 618 sa->cnt_cur = xfs_cntbt_init_cursor(mp, sc->tp, sa->agf_bp, 619 sa->pag); 620 xchk_ag_btree_del_cursor_if_sick(sc, &sa->cnt_cur, 621 XFS_SCRUB_TYPE_CNTBT); 622 623 /* Set up a rmapbt cursor for cross-referencing. */ 624 if (xfs_has_rmapbt(mp)) { 625 sa->rmap_cur = xfs_rmapbt_init_cursor(mp, sc->tp, 626 sa->agf_bp, sa->pag); 627 xchk_ag_btree_del_cursor_if_sick(sc, &sa->rmap_cur, 628 XFS_SCRUB_TYPE_RMAPBT); 629 } 630 631 /* Set up a refcountbt cursor for cross-referencing. */ 632 if (xfs_has_reflink(mp)) { 633 sa->refc_cur = xfs_refcountbt_init_cursor(mp, sc->tp, 634 sa->agf_bp, sa->pag); 635 xchk_ag_btree_del_cursor_if_sick(sc, &sa->refc_cur, 636 XFS_SCRUB_TYPE_REFCNTBT); 637 } 638 } 639 640 if (sa->agi_bp) { 641 /* Set up a inobt cursor for cross-referencing. */ 642 sa->ino_cur = xfs_inobt_init_cursor(sa->pag, sc->tp, 643 sa->agi_bp); 644 xchk_ag_btree_del_cursor_if_sick(sc, &sa->ino_cur, 645 XFS_SCRUB_TYPE_INOBT); 646 647 /* Set up a finobt cursor for cross-referencing. */ 648 if (xfs_has_finobt(mp)) { 649 sa->fino_cur = xfs_finobt_init_cursor(sa->pag, sc->tp, 650 sa->agi_bp); 651 xchk_ag_btree_del_cursor_if_sick(sc, &sa->fino_cur, 652 XFS_SCRUB_TYPE_FINOBT); 653 } 654 } 655 } 656 657 /* Release the AG header context and btree cursors. */ 658 void 659 xchk_ag_free( 660 struct xfs_scrub *sc, 661 struct xchk_ag *sa) 662 { 663 xchk_ag_btcur_free(sa); 664 xrep_reset_perag_resv(sc); 665 if (sa->agf_bp) { 666 xfs_trans_brelse(sc->tp, sa->agf_bp); 667 sa->agf_bp = NULL; 668 } 669 if (sa->agi_bp) { 670 xfs_trans_brelse(sc->tp, sa->agi_bp); 671 sa->agi_bp = NULL; 672 } 673 if (sa->pag) { 674 xfs_perag_put(sa->pag); 675 sa->pag = NULL; 676 } 677 } 678 679 /* 680 * For scrub, grab the perag structure, the AGI, and the AGF headers, in that 681 * order. Locking order requires us to get the AGI before the AGF. We use the 682 * transaction to avoid deadlocking on crosslinked metadata buffers; either the 683 * caller passes one in (bmap scrub) or we have to create a transaction 684 * ourselves. Returns ENOENT if the perag struct cannot be grabbed. 685 */ 686 int 687 xchk_ag_init( 688 struct xfs_scrub *sc, 689 xfs_agnumber_t agno, 690 struct xchk_ag *sa) 691 { 692 int error; 693 694 error = xchk_ag_read_headers(sc, agno, sa); 695 if (error) 696 return error; 697 698 xchk_ag_btcur_init(sc, sa); 699 return 0; 700 } 701 702 #ifdef CONFIG_XFS_RT 703 /* 704 * For scrubbing a realtime group, grab all the in-core resources we'll need to 705 * check the metadata, which means taking the ILOCK of the realtime group's 706 * metadata inodes. Callers must not join these inodes to the transaction with 707 * non-zero lockflags or concurrency problems will result. The @rtglock_flags 708 * argument takes XFS_RTGLOCK_* flags. 709 */ 710 int 711 xchk_rtgroup_init( 712 struct xfs_scrub *sc, 713 xfs_rgnumber_t rgno, 714 struct xchk_rt *sr) 715 { 716 ASSERT(sr->rtg == NULL); 717 ASSERT(sr->rtlock_flags == 0); 718 719 sr->rtg = xfs_rtgroup_get(sc->mp, rgno); 720 if (!sr->rtg) 721 return -ENOENT; 722 return 0; 723 } 724 725 /* Lock all the rt group metadata inode ILOCKs and wait for intents. */ 726 int 727 xchk_rtgroup_lock( 728 struct xfs_scrub *sc, 729 struct xchk_rt *sr, 730 unsigned int rtglock_flags) 731 { 732 int error = 0; 733 734 ASSERT(sr->rtg != NULL); 735 736 /* 737 * If we're /only/ locking the rtbitmap in shared mode, then we're 738 * obviously not trying to compare records in two metadata inodes. 739 * There's no need to drain intents here because the caller (most 740 * likely the rgsuper scanner) doesn't need that level of consistency. 741 */ 742 if (rtglock_flags == XFS_RTGLOCK_BITMAP_SHARED) { 743 xfs_rtgroup_lock(sr->rtg, rtglock_flags); 744 sr->rtlock_flags = rtglock_flags; 745 return 0; 746 } 747 748 do { 749 if (xchk_should_terminate(sc, &error)) 750 return error; 751 752 xfs_rtgroup_lock(sr->rtg, rtglock_flags); 753 754 /* 755 * If we've grabbed a non-metadata file for scrubbing, we 756 * assume that holding its ILOCK will suffice to coordinate 757 * with any rt intent chains involving this inode. 758 */ 759 if (sc->ip && !xfs_is_internal_inode(sc->ip)) 760 break; 761 762 /* 763 * Decide if the rt group is quiet enough for all metadata to 764 * be consistent with each other. Regular file IO doesn't get 765 * to lock all the rt inodes at the same time, which means that 766 * there could be other threads in the middle of processing a 767 * chain of deferred ops. 768 * 769 * We just locked all the metadata inodes for this rt group; 770 * now take a look to see if there are any intents in progress. 771 * If there are, drop the rt group inode locks and wait for the 772 * intents to drain. Since we hold the rt group inode locks 773 * for the duration of the scrub, this is the only time we have 774 * to sample the intents counter; any threads increasing it 775 * after this point can't possibly be in the middle of a chain 776 * of rt metadata updates. 777 * 778 * Obviously, this should be slanted against scrub and in favor 779 * of runtime threads. 780 */ 781 if (!xfs_group_intent_busy(rtg_group(sr->rtg))) 782 break; 783 784 xfs_rtgroup_unlock(sr->rtg, rtglock_flags); 785 786 if (!(sc->flags & XCHK_FSGATES_DRAIN)) 787 return -ECHRNG; 788 error = xfs_group_intent_drain(rtg_group(sr->rtg)); 789 if (error) { 790 if (error == -ERESTARTSYS) 791 error = -EINTR; 792 return error; 793 } 794 } while (1); 795 796 sr->rtlock_flags = rtglock_flags; 797 798 if (xfs_has_rtrmapbt(sc->mp) && (rtglock_flags & XFS_RTGLOCK_RMAP)) 799 sr->rmap_cur = xfs_rtrmapbt_init_cursor(sc->tp, sr->rtg); 800 801 if (xfs_has_rtreflink(sc->mp) && (rtglock_flags & XFS_RTGLOCK_REFCOUNT)) 802 sr->refc_cur = xfs_rtrefcountbt_init_cursor(sc->tp, sr->rtg); 803 804 return 0; 805 } 806 807 /* 808 * Free all the btree cursors and other incore data relating to the realtime 809 * group. This has to be done /before/ committing (or cancelling) the scrub 810 * transaction. 811 */ 812 void 813 xchk_rtgroup_btcur_free( 814 struct xchk_rt *sr) 815 { 816 if (sr->rmap_cur) 817 xfs_btree_del_cursor(sr->rmap_cur, XFS_BTREE_ERROR); 818 if (sr->refc_cur) 819 xfs_btree_del_cursor(sr->refc_cur, XFS_BTREE_ERROR); 820 821 sr->refc_cur = NULL; 822 sr->rmap_cur = NULL; 823 } 824 825 /* 826 * Unlock the realtime group. This must be done /after/ committing (or 827 * cancelling) the scrub transaction. 828 */ 829 void 830 xchk_rtgroup_unlock( 831 struct xchk_rt *sr) 832 { 833 ASSERT(sr->rtg != NULL); 834 835 if (sr->rtlock_flags) { 836 xfs_rtgroup_unlock(sr->rtg, sr->rtlock_flags); 837 sr->rtlock_flags = 0; 838 } 839 } 840 841 /* 842 * Unlock the realtime group and release its resources. This must be done 843 * /after/ committing (or cancelling) the scrub transaction. 844 */ 845 void 846 xchk_rtgroup_free( 847 struct xfs_scrub *sc, 848 struct xchk_rt *sr) 849 { 850 ASSERT(sr->rtg != NULL); 851 852 xchk_rtgroup_unlock(sr); 853 854 xfs_rtgroup_put(sr->rtg); 855 sr->rtg = NULL; 856 } 857 #endif /* CONFIG_XFS_RT */ 858 859 /* Per-scrubber setup functions */ 860 861 void 862 xchk_trans_cancel( 863 struct xfs_scrub *sc) 864 { 865 xfs_trans_cancel(sc->tp); 866 sc->tp = NULL; 867 } 868 869 int 870 xchk_trans_alloc_empty( 871 struct xfs_scrub *sc) 872 { 873 return xfs_trans_alloc_empty(sc->mp, &sc->tp); 874 } 875 876 /* 877 * Grab an empty transaction so that we can re-grab locked buffers if 878 * one of our btrees turns out to be cyclic. 879 * 880 * If we're going to repair something, we need to ask for the largest possible 881 * log reservation so that we can handle the worst case scenario for metadata 882 * updates while rebuilding a metadata item. We also need to reserve as many 883 * blocks in the head transaction as we think we're going to need to rebuild 884 * the metadata object. 885 */ 886 int 887 xchk_trans_alloc( 888 struct xfs_scrub *sc, 889 uint resblks) 890 { 891 if (sc->sm->sm_flags & XFS_SCRUB_IFLAG_REPAIR) 892 return xfs_trans_alloc(sc->mp, &M_RES(sc->mp)->tr_itruncate, 893 resblks, 0, 0, &sc->tp); 894 895 return xchk_trans_alloc_empty(sc); 896 } 897 898 /* Set us up with a transaction and an empty context. */ 899 int 900 xchk_setup_fs( 901 struct xfs_scrub *sc) 902 { 903 uint resblks; 904 905 resblks = xrep_calc_ag_resblks(sc); 906 return xchk_trans_alloc(sc, resblks); 907 } 908 909 /* Set us up with a transaction and an empty context to repair rt metadata. */ 910 int 911 xchk_setup_rt( 912 struct xfs_scrub *sc) 913 { 914 return xchk_trans_alloc(sc, xrep_calc_rtgroup_resblks(sc)); 915 } 916 917 /* Set us up with AG headers and btree cursors. */ 918 int 919 xchk_setup_ag_btree( 920 struct xfs_scrub *sc, 921 bool force_log) 922 { 923 struct xfs_mount *mp = sc->mp; 924 int error; 925 926 /* 927 * If the caller asks us to checkpont the log, do so. This 928 * expensive operation should be performed infrequently and only 929 * as a last resort. Any caller that sets force_log should 930 * document why they need to do so. 931 */ 932 if (force_log) { 933 error = xchk_checkpoint_log(mp); 934 if (error) 935 return error; 936 } 937 938 error = xchk_setup_fs(sc); 939 if (error) 940 return error; 941 942 return xchk_ag_init(sc, sc->sm->sm_agno, &sc->sa); 943 } 944 945 /* Push everything out of the log onto disk. */ 946 int 947 xchk_checkpoint_log( 948 struct xfs_mount *mp) 949 { 950 int error; 951 952 error = xfs_log_force(mp, XFS_LOG_SYNC); 953 if (error) 954 return error; 955 xfs_ail_push_all_sync(mp->m_ail); 956 return 0; 957 } 958 959 /* Verify that an inode is allocated ondisk, then return its cached inode. */ 960 int 961 xchk_iget( 962 struct xfs_scrub *sc, 963 xfs_ino_t inum, 964 struct xfs_inode **ipp) 965 { 966 ASSERT(sc->tp != NULL); 967 968 return xfs_iget(sc->mp, sc->tp, inum, XCHK_IGET_FLAGS, 0, ipp); 969 } 970 971 /* 972 * Try to grab an inode in a manner that avoids races with physical inode 973 * allocation. If we can't, return the locked AGI buffer so that the caller 974 * can single-step the loading process to see where things went wrong. 975 * Callers must have a valid scrub transaction. 976 * 977 * If the iget succeeds, return 0, a NULL AGI, and the inode. 978 * 979 * If the iget fails, return the error, the locked AGI, and a NULL inode. This 980 * can include -EINVAL and -ENOENT for invalid inode numbers or inodes that are 981 * no longer allocated; or any other corruption or runtime error. 982 * 983 * If the AGI read fails, return the error, a NULL AGI, and NULL inode. 984 * 985 * If a fatal signal is pending, return -EINTR, a NULL AGI, and a NULL inode. 986 */ 987 int 988 xchk_iget_agi( 989 struct xfs_scrub *sc, 990 xfs_ino_t inum, 991 struct xfs_buf **agi_bpp, 992 struct xfs_inode **ipp) 993 { 994 struct xfs_mount *mp = sc->mp; 995 struct xfs_trans *tp = sc->tp; 996 struct xfs_perag *pag; 997 int error; 998 999 ASSERT(sc->tp != NULL); 1000 1001 again: 1002 *agi_bpp = NULL; 1003 *ipp = NULL; 1004 error = 0; 1005 1006 if (xchk_should_terminate(sc, &error)) 1007 return error; 1008 1009 /* 1010 * Attach the AGI buffer to the scrub transaction to avoid deadlocks 1011 * in the iget cache miss path. 1012 */ 1013 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, inum)); 1014 error = xfs_ialloc_read_agi(pag, tp, 0, agi_bpp); 1015 xfs_perag_put(pag); 1016 if (error) 1017 return error; 1018 1019 error = xfs_iget(mp, tp, inum, XFS_IGET_NORETRY | XCHK_IGET_FLAGS, 0, 1020 ipp); 1021 if (error == -EAGAIN) { 1022 /* 1023 * The inode may be in core but temporarily unavailable and may 1024 * require the AGI buffer before it can be returned. Drop the 1025 * AGI buffer and retry the lookup. 1026 * 1027 * Incore lookup will fail with EAGAIN on a cache hit if the 1028 * inode is queued to the inactivation list. The inactivation 1029 * worker may remove the inode from the unlinked list and hence 1030 * needs the AGI. 1031 * 1032 * Hence xchk_iget_agi() needs to drop the AGI lock on EAGAIN 1033 * to allow inodegc to make progress and move the inode to 1034 * IRECLAIMABLE state where xfs_iget will be able to return it 1035 * again if it can lock the inode. 1036 */ 1037 xfs_trans_brelse(tp, *agi_bpp); 1038 delay(1); 1039 goto again; 1040 } 1041 if (error) 1042 return error; 1043 1044 /* We got the inode, so we can release the AGI. */ 1045 ASSERT(*ipp != NULL); 1046 xfs_trans_brelse(tp, *agi_bpp); 1047 *agi_bpp = NULL; 1048 return 0; 1049 } 1050 1051 #ifdef CONFIG_XFS_QUOTA 1052 /* 1053 * Try to attach dquots to this inode if we think we might want to repair it. 1054 * Callers must not hold any ILOCKs. If the dquots are broken and cannot be 1055 * attached, a quotacheck will be scheduled. 1056 */ 1057 int 1058 xchk_ino_dqattach( 1059 struct xfs_scrub *sc) 1060 { 1061 ASSERT(sc->tp != NULL); 1062 ASSERT(sc->ip != NULL); 1063 1064 if (!xchk_could_repair(sc)) 1065 return 0; 1066 1067 return xrep_ino_dqattach(sc); 1068 } 1069 #endif 1070 1071 /* Install an inode that we opened by handle for scrubbing. */ 1072 int 1073 xchk_install_handle_inode( 1074 struct xfs_scrub *sc, 1075 struct xfs_inode *ip) 1076 { 1077 if (VFS_I(ip)->i_generation != sc->sm->sm_gen) { 1078 xchk_irele(sc, ip); 1079 return -ENOENT; 1080 } 1081 1082 sc->ip = ip; 1083 return 0; 1084 } 1085 1086 /* 1087 * Install an already-referenced inode for scrubbing. Get our own reference to 1088 * the inode to make disposal simpler. The inode must not be in I_FREEING or 1089 * I_WILL_FREE state! 1090 */ 1091 int 1092 xchk_install_live_inode( 1093 struct xfs_scrub *sc, 1094 struct xfs_inode *ip) 1095 { 1096 if (!igrab(VFS_I(ip))) { 1097 xchk_ino_set_corrupt(sc, ip->i_ino); 1098 return -EFSCORRUPTED; 1099 } 1100 1101 sc->ip = ip; 1102 return 0; 1103 } 1104 1105 /* 1106 * In preparation to scrub metadata structures that hang off of an inode, 1107 * grab either the inode referenced in the scrub control structure or the 1108 * inode passed in. If the inumber does not reference an allocated inode 1109 * record, the function returns ENOENT to end the scrub early. The inode 1110 * is not locked. 1111 */ 1112 int 1113 xchk_iget_for_scrubbing( 1114 struct xfs_scrub *sc) 1115 { 1116 struct xfs_imap imap; 1117 struct xfs_mount *mp = sc->mp; 1118 struct xfs_perag *pag; 1119 struct xfs_buf *agi_bp; 1120 struct xfs_inode *ip_in = XFS_I(file_inode(sc->file)); 1121 struct xfs_inode *ip = NULL; 1122 xfs_agnumber_t agno = XFS_INO_TO_AGNO(mp, sc->sm->sm_ino); 1123 int error; 1124 1125 ASSERT(sc->tp == NULL); 1126 1127 /* We want to scan the inode we already had opened. */ 1128 if (sc->sm->sm_ino == 0 || sc->sm->sm_ino == ip_in->i_ino) 1129 return xchk_install_live_inode(sc, ip_in); 1130 1131 /* 1132 * On pre-metadir filesystems, reject internal metadata files. For 1133 * metadir filesystems, limited scrubbing of any file in the metadata 1134 * directory tree by handle is allowed, because that is the only way to 1135 * validate the lack of parent pointers in the sb-root metadata inodes. 1136 */ 1137 if (!xfs_has_metadir(mp) && xfs_is_sb_inum(mp, sc->sm->sm_ino)) 1138 return -ENOENT; 1139 /* Reject obviously bad inode numbers. */ 1140 if (!xfs_verify_ino(sc->mp, sc->sm->sm_ino)) 1141 return -ENOENT; 1142 1143 /* Try a safe untrusted iget. */ 1144 error = xchk_iget_safe(sc, sc->sm->sm_ino, &ip); 1145 if (!error) 1146 return xchk_install_handle_inode(sc, ip); 1147 if (error == -ENOENT) 1148 return error; 1149 if (error != -EINVAL) 1150 goto out_error; 1151 1152 /* 1153 * EINVAL with IGET_UNTRUSTED probably means one of several things: 1154 * userspace gave us an inode number that doesn't correspond to fs 1155 * space; the inode btree lacks a record for this inode; or there is a 1156 * record, and it says this inode is free. 1157 * 1158 * We want to look up this inode in the inobt to distinguish two 1159 * scenarios: (1) the inobt says the inode is free, in which case 1160 * there's nothing to do; and (2) the inobt says the inode is 1161 * allocated, but loading it failed due to corruption. 1162 * 1163 * Allocate a transaction and grab the AGI to prevent inobt activity 1164 * in this AG. Retry the iget in case someone allocated a new inode 1165 * after the first iget failed. 1166 */ 1167 error = xchk_trans_alloc(sc, 0); 1168 if (error) 1169 goto out_error; 1170 1171 error = xchk_iget_agi(sc, sc->sm->sm_ino, &agi_bp, &ip); 1172 if (error == 0) { 1173 /* Actually got the inode, so install it. */ 1174 xchk_trans_cancel(sc); 1175 return xchk_install_handle_inode(sc, ip); 1176 } 1177 if (error == -ENOENT) 1178 goto out_gone; 1179 if (error != -EINVAL) 1180 goto out_cancel; 1181 1182 /* Ensure that we have protected against inode allocation/freeing. */ 1183 if (agi_bp == NULL) { 1184 ASSERT(agi_bp != NULL); 1185 error = -ECANCELED; 1186 goto out_cancel; 1187 } 1188 1189 /* 1190 * Untrusted iget failed a second time. Let's try an inobt lookup. 1191 * If the inobt thinks this the inode neither can exist inside the 1192 * filesystem nor is allocated, return ENOENT to signal that the check 1193 * can be skipped. 1194 * 1195 * If the lookup returns corruption, we'll mark this inode corrupt and 1196 * exit to userspace. There's little chance of fixing anything until 1197 * the inobt is straightened out, but there's nothing we can do here. 1198 * 1199 * If the lookup encounters any other error, exit to userspace. 1200 * 1201 * If the lookup succeeds, something else must be very wrong in the fs 1202 * such that setting up the incore inode failed in some strange way. 1203 * Treat those as corruptions. 1204 */ 1205 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, sc->sm->sm_ino)); 1206 if (!pag) { 1207 error = -EFSCORRUPTED; 1208 goto out_cancel; 1209 } 1210 1211 error = xfs_imap(pag, sc->tp, sc->sm->sm_ino, &imap, 1212 XFS_IGET_UNTRUSTED); 1213 xfs_perag_put(pag); 1214 if (error == -EINVAL || error == -ENOENT) 1215 goto out_gone; 1216 if (!error) 1217 error = -EFSCORRUPTED; 1218 1219 out_cancel: 1220 xchk_trans_cancel(sc); 1221 out_error: 1222 trace_xchk_op_error(sc, agno, XFS_INO_TO_AGBNO(mp, sc->sm->sm_ino), 1223 error, __return_address); 1224 return error; 1225 out_gone: 1226 /* The file is gone, so there's nothing to check. */ 1227 xchk_trans_cancel(sc); 1228 return -ENOENT; 1229 } 1230 1231 /* Release an inode, possibly dropping it in the process. */ 1232 void 1233 xchk_irele( 1234 struct xfs_scrub *sc, 1235 struct xfs_inode *ip) 1236 { 1237 if (sc->tp) { 1238 /* 1239 * If we are in a transaction, we /cannot/ drop the inode 1240 * ourselves, because the VFS will trigger writeback, which 1241 * can require a transaction. Clear DONTCACHE to force the 1242 * inode to the LRU, where someone else can take care of 1243 * dropping it. 1244 * 1245 * Note that when we grabbed our reference to the inode, it 1246 * could have had an active ref and DONTCACHE set if a sysadmin 1247 * is trying to coerce a change in file access mode. icache 1248 * hits do not clear DONTCACHE, so we must do it here. 1249 */ 1250 spin_lock(&VFS_I(ip)->i_lock); 1251 VFS_I(ip)->i_state &= ~I_DONTCACHE; 1252 spin_unlock(&VFS_I(ip)->i_lock); 1253 } 1254 1255 xfs_irele(ip); 1256 } 1257 1258 /* 1259 * Set us up to scrub metadata mapped by a file's fork. Callers must not use 1260 * this to operate on user-accessible regular file data because the MMAPLOCK is 1261 * not taken. 1262 */ 1263 int 1264 xchk_setup_inode_contents( 1265 struct xfs_scrub *sc, 1266 unsigned int resblks) 1267 { 1268 int error; 1269 1270 error = xchk_iget_for_scrubbing(sc); 1271 if (error) 1272 return error; 1273 1274 error = xrep_tempfile_adjust_directory_tree(sc); 1275 if (error) 1276 return error; 1277 1278 /* Lock the inode so the VFS cannot touch this file. */ 1279 xchk_ilock(sc, XFS_IOLOCK_EXCL); 1280 1281 error = xchk_trans_alloc(sc, resblks); 1282 if (error) 1283 goto out; 1284 1285 error = xchk_ino_dqattach(sc); 1286 if (error) 1287 goto out; 1288 1289 xchk_ilock(sc, XFS_ILOCK_EXCL); 1290 out: 1291 /* scrub teardown will unlock and release the inode for us */ 1292 return error; 1293 } 1294 1295 void 1296 xchk_ilock( 1297 struct xfs_scrub *sc, 1298 unsigned int ilock_flags) 1299 { 1300 xfs_ilock(sc->ip, ilock_flags); 1301 sc->ilock_flags |= ilock_flags; 1302 } 1303 1304 bool 1305 xchk_ilock_nowait( 1306 struct xfs_scrub *sc, 1307 unsigned int ilock_flags) 1308 { 1309 if (xfs_ilock_nowait(sc->ip, ilock_flags)) { 1310 sc->ilock_flags |= ilock_flags; 1311 return true; 1312 } 1313 1314 return false; 1315 } 1316 1317 void 1318 xchk_iunlock( 1319 struct xfs_scrub *sc, 1320 unsigned int ilock_flags) 1321 { 1322 sc->ilock_flags &= ~ilock_flags; 1323 xfs_iunlock(sc->ip, ilock_flags); 1324 } 1325 1326 /* 1327 * Predicate that decides if we need to evaluate the cross-reference check. 1328 * If there was an error accessing the cross-reference btree, just delete 1329 * the cursor and skip the check. 1330 */ 1331 bool 1332 xchk_should_check_xref( 1333 struct xfs_scrub *sc, 1334 int *error, 1335 struct xfs_btree_cur **curpp) 1336 { 1337 /* No point in xref if we already know we're corrupt. */ 1338 if (xchk_skip_xref(sc->sm)) 1339 return false; 1340 1341 if (*error == 0) 1342 return true; 1343 1344 if (curpp) { 1345 /* If we've already given up on xref, just bail out. */ 1346 if (!*curpp) 1347 return false; 1348 1349 /* xref error, delete cursor and bail out. */ 1350 xfs_btree_del_cursor(*curpp, XFS_BTREE_ERROR); 1351 *curpp = NULL; 1352 } 1353 1354 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_XFAIL; 1355 trace_xchk_xref_error(sc, *error, __return_address); 1356 1357 /* 1358 * Errors encountered during cross-referencing with another 1359 * data structure should not cause this scrubber to abort. 1360 */ 1361 *error = 0; 1362 return false; 1363 } 1364 1365 /* Run the structure verifiers on in-memory buffers to detect bad memory. */ 1366 void 1367 xchk_buffer_recheck( 1368 struct xfs_scrub *sc, 1369 struct xfs_buf *bp) 1370 { 1371 xfs_failaddr_t fa; 1372 1373 if (bp->b_ops == NULL) { 1374 xchk_block_set_corrupt(sc, bp); 1375 return; 1376 } 1377 if (bp->b_ops->verify_struct == NULL) { 1378 xchk_set_incomplete(sc); 1379 return; 1380 } 1381 fa = bp->b_ops->verify_struct(bp); 1382 if (!fa) 1383 return; 1384 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT; 1385 trace_xchk_block_error(sc, xfs_buf_daddr(bp), fa); 1386 } 1387 1388 static inline int 1389 xchk_metadata_inode_subtype( 1390 struct xfs_scrub *sc, 1391 unsigned int scrub_type) 1392 { 1393 struct xfs_scrub_subord *sub; 1394 int error; 1395 1396 sub = xchk_scrub_create_subord(sc, scrub_type); 1397 error = sub->sc.ops->scrub(&sub->sc); 1398 xchk_scrub_free_subord(sub); 1399 return error; 1400 } 1401 1402 /* 1403 * Scrub the attr/data forks of a metadata inode. The metadata inode must be 1404 * pointed to by sc->ip and the ILOCK must be held. 1405 */ 1406 int 1407 xchk_metadata_inode_forks( 1408 struct xfs_scrub *sc) 1409 { 1410 bool shared; 1411 int error; 1412 1413 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) 1414 return 0; 1415 1416 /* Check the inode record. */ 1417 error = xchk_metadata_inode_subtype(sc, XFS_SCRUB_TYPE_INODE); 1418 if (error || (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)) 1419 return error; 1420 1421 /* Metadata inodes don't live on the rt device. */ 1422 if (sc->ip->i_diflags & XFS_DIFLAG_REALTIME) { 1423 xchk_ino_set_corrupt(sc, sc->ip->i_ino); 1424 return 0; 1425 } 1426 1427 /* They should never participate in reflink. */ 1428 if (xfs_is_reflink_inode(sc->ip)) { 1429 xchk_ino_set_corrupt(sc, sc->ip->i_ino); 1430 return 0; 1431 } 1432 1433 /* Invoke the data fork scrubber. */ 1434 error = xchk_metadata_inode_subtype(sc, XFS_SCRUB_TYPE_BMBTD); 1435 if (error || (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)) 1436 return error; 1437 1438 /* Look for incorrect shared blocks. */ 1439 if (xfs_has_reflink(sc->mp)) { 1440 error = xfs_reflink_inode_has_shared_extents(sc->tp, sc->ip, 1441 &shared); 1442 if (!xchk_fblock_process_error(sc, XFS_DATA_FORK, 0, 1443 &error)) 1444 return error; 1445 if (shared) 1446 xchk_ino_set_corrupt(sc, sc->ip->i_ino); 1447 } 1448 1449 /* 1450 * Metadata files can only have extended attributes on metadir 1451 * filesystems, either for parent pointers or for actual xattr data. 1452 */ 1453 if (xfs_inode_hasattr(sc->ip)) { 1454 if (!xfs_has_metadir(sc->mp)) { 1455 xchk_ino_set_corrupt(sc, sc->ip->i_ino); 1456 return 0; 1457 } 1458 1459 error = xchk_metadata_inode_subtype(sc, XFS_SCRUB_TYPE_BMBTA); 1460 if (error || (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)) 1461 return error; 1462 } 1463 1464 return 0; 1465 } 1466 1467 /* 1468 * Enable filesystem hooks (i.e. runtime code patching) before starting a scrub 1469 * operation. Callers must not hold any locks that intersect with the CPU 1470 * hotplug lock (e.g. writeback locks) because code patching must halt the CPUs 1471 * to change kernel code. 1472 */ 1473 void 1474 xchk_fsgates_enable( 1475 struct xfs_scrub *sc, 1476 unsigned int scrub_fsgates) 1477 { 1478 ASSERT(!(scrub_fsgates & ~XCHK_FSGATES_ALL)); 1479 ASSERT(!(sc->flags & scrub_fsgates)); 1480 1481 trace_xchk_fsgates_enable(sc, scrub_fsgates); 1482 1483 if (scrub_fsgates & XCHK_FSGATES_DRAIN) 1484 xfs_defer_drain_wait_enable(); 1485 1486 if (scrub_fsgates & XCHK_FSGATES_QUOTA) 1487 xfs_dqtrx_hook_enable(); 1488 1489 if (scrub_fsgates & XCHK_FSGATES_DIRENTS) 1490 xfs_dir_hook_enable(); 1491 1492 if (scrub_fsgates & XCHK_FSGATES_RMAP) 1493 xfs_rmap_hook_enable(); 1494 1495 sc->flags |= scrub_fsgates; 1496 } 1497 1498 /* 1499 * Decide if this is this a cached inode that's also allocated. The caller 1500 * must hold a reference to an AG and the AGI buffer lock to prevent inodes 1501 * from being allocated or freed. 1502 * 1503 * Look up an inode by number in the given file system. If the inode number 1504 * is invalid, return -EINVAL. If the inode is not in cache, return -ENODATA. 1505 * If the inode is being reclaimed, return -ENODATA because we know the inode 1506 * cache cannot be updating the ondisk metadata. 1507 * 1508 * Otherwise, the incore inode is the one we want, and it is either live, 1509 * somewhere in the inactivation machinery, or reclaimable. The inode is 1510 * allocated if i_mode is nonzero. In all three cases, the cached inode will 1511 * be more up to date than the ondisk inode buffer, so we must use the incore 1512 * i_mode. 1513 */ 1514 int 1515 xchk_inode_is_allocated( 1516 struct xfs_scrub *sc, 1517 xfs_agino_t agino, 1518 bool *inuse) 1519 { 1520 struct xfs_mount *mp = sc->mp; 1521 struct xfs_perag *pag = sc->sa.pag; 1522 xfs_ino_t ino; 1523 struct xfs_inode *ip; 1524 int error; 1525 1526 /* caller must hold perag reference */ 1527 if (pag == NULL) { 1528 ASSERT(pag != NULL); 1529 return -EINVAL; 1530 } 1531 1532 /* caller must have AGI buffer */ 1533 if (sc->sa.agi_bp == NULL) { 1534 ASSERT(sc->sa.agi_bp != NULL); 1535 return -EINVAL; 1536 } 1537 1538 /* reject inode numbers outside existing AGs */ 1539 ino = xfs_agino_to_ino(pag, agino); 1540 if (!xfs_verify_ino(mp, ino)) 1541 return -EINVAL; 1542 1543 error = -ENODATA; 1544 rcu_read_lock(); 1545 ip = radix_tree_lookup(&pag->pag_ici_root, agino); 1546 if (!ip) { 1547 /* cache miss */ 1548 goto out_rcu; 1549 } 1550 1551 /* 1552 * If the inode number doesn't match, the incore inode got reused 1553 * during an RCU grace period and the radix tree hasn't been updated. 1554 * This isn't the inode we want. 1555 */ 1556 spin_lock(&ip->i_flags_lock); 1557 if (ip->i_ino != ino) 1558 goto out_skip; 1559 1560 trace_xchk_inode_is_allocated(ip); 1561 1562 /* 1563 * We have an incore inode that matches the inode we want, and the 1564 * caller holds the perag structure and the AGI buffer. Let's check 1565 * our assumptions below: 1566 */ 1567 1568 #ifdef DEBUG 1569 /* 1570 * (1) If the incore inode is live (i.e. referenced from the dcache), 1571 * it will not be INEW, nor will it be in the inactivation or reclaim 1572 * machinery. The ondisk inode had better be allocated. This is the 1573 * most trivial case. 1574 */ 1575 if (!(ip->i_flags & (XFS_NEED_INACTIVE | XFS_INEW | XFS_IRECLAIMABLE | 1576 XFS_INACTIVATING))) { 1577 /* live inode */ 1578 ASSERT(VFS_I(ip)->i_mode != 0); 1579 } 1580 1581 /* 1582 * If the incore inode is INEW, there are several possibilities: 1583 * 1584 * (2) For a file that is being created, note that we allocate the 1585 * ondisk inode before allocating, initializing, and adding the incore 1586 * inode to the radix tree. 1587 * 1588 * (3) If the incore inode is being recycled, the inode has to be 1589 * allocated because we don't allow freed inodes to be recycled. 1590 * Recycling doesn't touch i_mode. 1591 */ 1592 if (ip->i_flags & XFS_INEW) { 1593 /* created on disk already or recycling */ 1594 ASSERT(VFS_I(ip)->i_mode != 0); 1595 } 1596 1597 /* 1598 * (4) If the inode is queued for inactivation (NEED_INACTIVE) but 1599 * inactivation has not started (!INACTIVATING), it is still allocated. 1600 */ 1601 if ((ip->i_flags & XFS_NEED_INACTIVE) && 1602 !(ip->i_flags & XFS_INACTIVATING)) { 1603 /* definitely before difree */ 1604 ASSERT(VFS_I(ip)->i_mode != 0); 1605 } 1606 #endif 1607 1608 /* 1609 * If the incore inode is undergoing inactivation (INACTIVATING), there 1610 * are two possibilities: 1611 * 1612 * (5) It is before the point where it would get freed ondisk, in which 1613 * case i_mode is still nonzero. 1614 * 1615 * (6) It has already been freed, in which case i_mode is zero. 1616 * 1617 * We don't take the ILOCK here, but difree and dialloc update the AGI, 1618 * and we've taken the AGI buffer lock, which prevents that from 1619 * happening. 1620 */ 1621 1622 /* 1623 * (7) Inodes undergoing inactivation (INACTIVATING) or queued for 1624 * reclaim (IRECLAIMABLE) could be allocated or free. i_mode still 1625 * reflects the ondisk state. 1626 */ 1627 1628 /* 1629 * (8) If the inode is in IFLUSHING, it's safe to query i_mode because 1630 * the flush code uses i_mode to format the ondisk inode. 1631 */ 1632 1633 /* 1634 * (9) If the inode is in IRECLAIM and was reachable via the radix 1635 * tree, it still has the same i_mode as it did before it entered 1636 * reclaim. The inode object is still alive because we hold the RCU 1637 * read lock. 1638 */ 1639 1640 *inuse = VFS_I(ip)->i_mode != 0; 1641 error = 0; 1642 1643 out_skip: 1644 spin_unlock(&ip->i_flags_lock); 1645 out_rcu: 1646 rcu_read_unlock(); 1647 return error; 1648 } 1649 1650 /* Is this inode a root directory for either tree? */ 1651 bool 1652 xchk_inode_is_dirtree_root(const struct xfs_inode *ip) 1653 { 1654 struct xfs_mount *mp = ip->i_mount; 1655 1656 return ip == mp->m_rootip || 1657 (xfs_has_metadir(mp) && ip == mp->m_metadirip); 1658 } 1659 1660 /* Does the superblock point down to this inode? */ 1661 bool 1662 xchk_inode_is_sb_rooted(const struct xfs_inode *ip) 1663 { 1664 return xchk_inode_is_dirtree_root(ip) || 1665 xfs_is_sb_inum(ip->i_mount, ip->i_ino); 1666 } 1667 1668 /* What is the root directory inumber for this inode? */ 1669 xfs_ino_t 1670 xchk_inode_rootdir_inum(const struct xfs_inode *ip) 1671 { 1672 struct xfs_mount *mp = ip->i_mount; 1673 1674 if (xfs_is_metadir_inode(ip)) 1675 return mp->m_metadirip->i_ino; 1676 return mp->m_rootip->i_ino; 1677 } 1678 1679 static int 1680 xchk_meta_btree_count_blocks( 1681 struct xfs_scrub *sc, 1682 xfs_extnum_t *nextents, 1683 xfs_filblks_t *count) 1684 { 1685 struct xfs_btree_cur *cur; 1686 int error; 1687 1688 if (!sc->sr.rtg) { 1689 ASSERT(0); 1690 return -EFSCORRUPTED; 1691 } 1692 1693 switch (sc->ip->i_metatype) { 1694 case XFS_METAFILE_RTRMAP: 1695 cur = xfs_rtrmapbt_init_cursor(sc->tp, sc->sr.rtg); 1696 break; 1697 case XFS_METAFILE_RTREFCOUNT: 1698 cur = xfs_rtrefcountbt_init_cursor(sc->tp, sc->sr.rtg); 1699 break; 1700 default: 1701 ASSERT(0); 1702 return -EFSCORRUPTED; 1703 } 1704 1705 error = xfs_btree_count_blocks(cur, count); 1706 xfs_btree_del_cursor(cur, error); 1707 if (!error) { 1708 *nextents = 0; 1709 (*count)--; /* don't count the btree iroot */ 1710 } 1711 return error; 1712 } 1713 1714 /* Count the blocks used by a file, even if it's a metadata inode. */ 1715 int 1716 xchk_inode_count_blocks( 1717 struct xfs_scrub *sc, 1718 int whichfork, 1719 xfs_extnum_t *nextents, 1720 xfs_filblks_t *count) 1721 { 1722 struct xfs_ifork *ifp = xfs_ifork_ptr(sc->ip, whichfork); 1723 1724 if (!ifp) { 1725 *nextents = 0; 1726 *count = 0; 1727 return 0; 1728 } 1729 1730 if (ifp->if_format == XFS_DINODE_FMT_META_BTREE) { 1731 ASSERT(whichfork == XFS_DATA_FORK); 1732 return xchk_meta_btree_count_blocks(sc, nextents, count); 1733 } 1734 1735 return xfs_bmap_count_blocks(sc->tp, sc->ip, whichfork, nextents, 1736 count); 1737 } 1738