1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright (C) 2017-2023 Oracle. All Rights Reserved. 4 * Author: Darrick J. Wong <djwong@kernel.org> 5 */ 6 #include "xfs.h" 7 #include "xfs_fs.h" 8 #include "xfs_shared.h" 9 #include "xfs_format.h" 10 #include "xfs_trans_resv.h" 11 #include "xfs_mount.h" 12 #include "xfs_btree.h" 13 #include "xfs_log_format.h" 14 #include "xfs_trans.h" 15 #include "xfs_inode.h" 16 #include "xfs_icache.h" 17 #include "xfs_alloc.h" 18 #include "xfs_alloc_btree.h" 19 #include "xfs_ialloc.h" 20 #include "xfs_ialloc_btree.h" 21 #include "xfs_refcount_btree.h" 22 #include "xfs_rmap.h" 23 #include "xfs_rmap_btree.h" 24 #include "xfs_log.h" 25 #include "xfs_trans_priv.h" 26 #include "xfs_da_format.h" 27 #include "xfs_da_btree.h" 28 #include "xfs_dir2_priv.h" 29 #include "xfs_dir2.h" 30 #include "xfs_attr.h" 31 #include "xfs_reflink.h" 32 #include "xfs_ag.h" 33 #include "xfs_error.h" 34 #include "xfs_quota.h" 35 #include "xfs_exchmaps.h" 36 #include "xfs_rtbitmap.h" 37 #include "xfs_rtgroup.h" 38 #include "xfs_rtrmap_btree.h" 39 #include "xfs_bmap_util.h" 40 #include "xfs_rtrefcount_btree.h" 41 #include "scrub/scrub.h" 42 #include "scrub/common.h" 43 #include "scrub/trace.h" 44 #include "scrub/repair.h" 45 #include "scrub/health.h" 46 #include "scrub/tempfile.h" 47 48 /* Common code for the metadata scrubbers. */ 49 50 /* 51 * Handling operational errors. 52 * 53 * The *_process_error() family of functions are used to process error return 54 * codes from functions called as part of a scrub operation. 55 * 56 * If there's no error, we return true to tell the caller that it's ok 57 * to move on to the next check in its list. 58 * 59 * For non-verifier errors (e.g. ENOMEM) we return false to tell the 60 * caller that something bad happened, and we preserve *error so that 61 * the caller can return the *error up the stack to userspace. 62 * 63 * Verifier errors (EFSBADCRC/EFSCORRUPTED) are recorded by setting 64 * OFLAG_CORRUPT in sm_flags and the *error is cleared. In other words, 65 * we track verifier errors (and failed scrub checks) via OFLAG_CORRUPT, 66 * not via return codes. We return false to tell the caller that 67 * something bad happened. Since the error has been cleared, the caller 68 * will (presumably) return that zero and scrubbing will move on to 69 * whatever's next. 70 * 71 * ftrace can be used to record the precise metadata location and the 72 * approximate code location of the failed operation. 73 */ 74 75 /* Check for operational errors. */ 76 static bool 77 __xchk_process_error( 78 struct xfs_scrub *sc, 79 xfs_agnumber_t agno, 80 xfs_agblock_t bno, 81 int *error, 82 __u32 errflag, 83 void *ret_ip) 84 { 85 switch (*error) { 86 case 0: 87 return true; 88 case -EDEADLOCK: 89 case -ECHRNG: 90 /* Used to restart an op with deadlock avoidance. */ 91 trace_xchk_deadlock_retry( 92 sc->ip ? sc->ip : XFS_I(file_inode(sc->file)), 93 sc->sm, *error); 94 break; 95 case -ECANCELED: 96 /* 97 * ECANCELED here means that the caller set one of the scrub 98 * outcome flags (corrupt, xfail, xcorrupt) and wants to exit 99 * quickly. Set error to zero and do not continue. 100 */ 101 trace_xchk_op_error(sc, agno, bno, *error, ret_ip); 102 *error = 0; 103 break; 104 case -EFSBADCRC: 105 case -EFSCORRUPTED: 106 /* Note the badness but don't abort. */ 107 sc->sm->sm_flags |= errflag; 108 *error = 0; 109 fallthrough; 110 default: 111 trace_xchk_op_error(sc, agno, bno, *error, ret_ip); 112 break; 113 } 114 return false; 115 } 116 117 bool 118 xchk_process_error( 119 struct xfs_scrub *sc, 120 xfs_agnumber_t agno, 121 xfs_agblock_t bno, 122 int *error) 123 { 124 return __xchk_process_error(sc, agno, bno, error, 125 XFS_SCRUB_OFLAG_CORRUPT, __return_address); 126 } 127 128 bool 129 xchk_process_rt_error( 130 struct xfs_scrub *sc, 131 xfs_rgnumber_t rgno, 132 xfs_rgblock_t rgbno, 133 int *error) 134 { 135 return __xchk_process_error(sc, rgno, rgbno, error, 136 XFS_SCRUB_OFLAG_CORRUPT, __return_address); 137 } 138 139 bool 140 xchk_xref_process_error( 141 struct xfs_scrub *sc, 142 xfs_agnumber_t agno, 143 xfs_agblock_t bno, 144 int *error) 145 { 146 return __xchk_process_error(sc, agno, bno, error, 147 XFS_SCRUB_OFLAG_XFAIL, __return_address); 148 } 149 150 /* Check for operational errors for a file offset. */ 151 static bool 152 __xchk_fblock_process_error( 153 struct xfs_scrub *sc, 154 int whichfork, 155 xfs_fileoff_t offset, 156 int *error, 157 __u32 errflag, 158 void *ret_ip) 159 { 160 switch (*error) { 161 case 0: 162 return true; 163 case -EDEADLOCK: 164 case -ECHRNG: 165 /* Used to restart an op with deadlock avoidance. */ 166 trace_xchk_deadlock_retry(sc->ip, sc->sm, *error); 167 break; 168 case -ECANCELED: 169 /* 170 * ECANCELED here means that the caller set one of the scrub 171 * outcome flags (corrupt, xfail, xcorrupt) and wants to exit 172 * quickly. Set error to zero and do not continue. 173 */ 174 trace_xchk_file_op_error(sc, whichfork, offset, *error, 175 ret_ip); 176 *error = 0; 177 break; 178 case -EFSBADCRC: 179 case -EFSCORRUPTED: 180 /* Note the badness but don't abort. */ 181 sc->sm->sm_flags |= errflag; 182 *error = 0; 183 fallthrough; 184 default: 185 trace_xchk_file_op_error(sc, whichfork, offset, *error, 186 ret_ip); 187 break; 188 } 189 return false; 190 } 191 192 bool 193 xchk_fblock_process_error( 194 struct xfs_scrub *sc, 195 int whichfork, 196 xfs_fileoff_t offset, 197 int *error) 198 { 199 return __xchk_fblock_process_error(sc, whichfork, offset, error, 200 XFS_SCRUB_OFLAG_CORRUPT, __return_address); 201 } 202 203 bool 204 xchk_fblock_xref_process_error( 205 struct xfs_scrub *sc, 206 int whichfork, 207 xfs_fileoff_t offset, 208 int *error) 209 { 210 return __xchk_fblock_process_error(sc, whichfork, offset, error, 211 XFS_SCRUB_OFLAG_XFAIL, __return_address); 212 } 213 214 /* 215 * Handling scrub corruption/optimization/warning checks. 216 * 217 * The *_set_{corrupt,preen,warning}() family of functions are used to 218 * record the presence of metadata that is incorrect (corrupt), could be 219 * optimized somehow (preen), or should be flagged for administrative 220 * review but is not incorrect (warn). 221 * 222 * ftrace can be used to record the precise metadata location and 223 * approximate code location of the failed check. 224 */ 225 226 /* Record a block which could be optimized. */ 227 void 228 xchk_block_set_preen( 229 struct xfs_scrub *sc, 230 struct xfs_buf *bp) 231 { 232 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_PREEN; 233 trace_xchk_block_preen(sc, xfs_buf_daddr(bp), __return_address); 234 } 235 236 /* 237 * Record an inode which could be optimized. The trace data will 238 * include the block given by bp if bp is given; otherwise it will use 239 * the block location of the inode record itself. 240 */ 241 void 242 xchk_ino_set_preen( 243 struct xfs_scrub *sc, 244 xfs_ino_t ino) 245 { 246 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_PREEN; 247 trace_xchk_ino_preen(sc, ino, __return_address); 248 } 249 250 /* Record something being wrong with the filesystem primary superblock. */ 251 void 252 xchk_set_corrupt( 253 struct xfs_scrub *sc) 254 { 255 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT; 256 trace_xchk_fs_error(sc, 0, __return_address); 257 } 258 259 /* Record a corrupt block. */ 260 void 261 xchk_block_set_corrupt( 262 struct xfs_scrub *sc, 263 struct xfs_buf *bp) 264 { 265 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT; 266 trace_xchk_block_error(sc, xfs_buf_daddr(bp), __return_address); 267 } 268 269 #ifdef CONFIG_XFS_QUOTA 270 /* Record a corrupt quota counter. */ 271 void 272 xchk_qcheck_set_corrupt( 273 struct xfs_scrub *sc, 274 unsigned int dqtype, 275 xfs_dqid_t id) 276 { 277 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT; 278 trace_xchk_qcheck_error(sc, dqtype, id, __return_address); 279 } 280 #endif 281 282 /* Record a corruption while cross-referencing. */ 283 void 284 xchk_block_xref_set_corrupt( 285 struct xfs_scrub *sc, 286 struct xfs_buf *bp) 287 { 288 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_XCORRUPT; 289 trace_xchk_block_error(sc, xfs_buf_daddr(bp), __return_address); 290 } 291 292 /* 293 * Record a corrupt inode. The trace data will include the block given 294 * by bp if bp is given; otherwise it will use the block location of the 295 * inode record itself. 296 */ 297 void 298 xchk_ino_set_corrupt( 299 struct xfs_scrub *sc, 300 xfs_ino_t ino) 301 { 302 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT; 303 trace_xchk_ino_error(sc, ino, __return_address); 304 } 305 306 /* Record a corruption while cross-referencing with an inode. */ 307 void 308 xchk_ino_xref_set_corrupt( 309 struct xfs_scrub *sc, 310 xfs_ino_t ino) 311 { 312 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_XCORRUPT; 313 trace_xchk_ino_error(sc, ino, __return_address); 314 } 315 316 /* Record corruption in a block indexed by a file fork. */ 317 void 318 xchk_fblock_set_corrupt( 319 struct xfs_scrub *sc, 320 int whichfork, 321 xfs_fileoff_t offset) 322 { 323 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT; 324 trace_xchk_fblock_error(sc, whichfork, offset, __return_address); 325 } 326 327 /* Record a corruption while cross-referencing a fork block. */ 328 void 329 xchk_fblock_xref_set_corrupt( 330 struct xfs_scrub *sc, 331 int whichfork, 332 xfs_fileoff_t offset) 333 { 334 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_XCORRUPT; 335 trace_xchk_fblock_error(sc, whichfork, offset, __return_address); 336 } 337 338 /* 339 * Warn about inodes that need administrative review but is not 340 * incorrect. 341 */ 342 void 343 xchk_ino_set_warning( 344 struct xfs_scrub *sc, 345 xfs_ino_t ino) 346 { 347 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_WARNING; 348 trace_xchk_ino_warning(sc, ino, __return_address); 349 } 350 351 /* Warn about a block indexed by a file fork that needs review. */ 352 void 353 xchk_fblock_set_warning( 354 struct xfs_scrub *sc, 355 int whichfork, 356 xfs_fileoff_t offset) 357 { 358 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_WARNING; 359 trace_xchk_fblock_warning(sc, whichfork, offset, __return_address); 360 } 361 362 /* Signal an incomplete scrub. */ 363 void 364 xchk_set_incomplete( 365 struct xfs_scrub *sc) 366 { 367 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_INCOMPLETE; 368 trace_xchk_incomplete(sc, __return_address); 369 } 370 371 /* 372 * rmap scrubbing -- compute the number of blocks with a given owner, 373 * at least according to the reverse mapping data. 374 */ 375 376 struct xchk_rmap_ownedby_info { 377 const struct xfs_owner_info *oinfo; 378 xfs_filblks_t *blocks; 379 }; 380 381 STATIC int 382 xchk_count_rmap_ownedby_irec( 383 struct xfs_btree_cur *cur, 384 const struct xfs_rmap_irec *rec, 385 void *priv) 386 { 387 struct xchk_rmap_ownedby_info *sroi = priv; 388 bool irec_attr; 389 bool oinfo_attr; 390 391 irec_attr = rec->rm_flags & XFS_RMAP_ATTR_FORK; 392 oinfo_attr = sroi->oinfo->oi_flags & XFS_OWNER_INFO_ATTR_FORK; 393 394 if (rec->rm_owner != sroi->oinfo->oi_owner) 395 return 0; 396 397 if (XFS_RMAP_NON_INODE_OWNER(rec->rm_owner) || irec_attr == oinfo_attr) 398 (*sroi->blocks) += rec->rm_blockcount; 399 400 return 0; 401 } 402 403 /* 404 * Calculate the number of blocks the rmap thinks are owned by something. 405 * The caller should pass us an rmapbt cursor. 406 */ 407 int 408 xchk_count_rmap_ownedby_ag( 409 struct xfs_scrub *sc, 410 struct xfs_btree_cur *cur, 411 const struct xfs_owner_info *oinfo, 412 xfs_filblks_t *blocks) 413 { 414 struct xchk_rmap_ownedby_info sroi = { 415 .oinfo = oinfo, 416 .blocks = blocks, 417 }; 418 419 *blocks = 0; 420 return xfs_rmap_query_all(cur, xchk_count_rmap_ownedby_irec, 421 &sroi); 422 } 423 424 /* 425 * AG scrubbing 426 * 427 * These helpers facilitate locking an allocation group's header 428 * buffers, setting up cursors for all btrees that are present, and 429 * cleaning everything up once we're through. 430 */ 431 432 /* Decide if we want to return an AG header read failure. */ 433 static inline bool 434 want_ag_read_header_failure( 435 struct xfs_scrub *sc, 436 unsigned int type) 437 { 438 /* Return all AG header read failures when scanning btrees. */ 439 if (sc->sm->sm_type != XFS_SCRUB_TYPE_AGF && 440 sc->sm->sm_type != XFS_SCRUB_TYPE_AGFL && 441 sc->sm->sm_type != XFS_SCRUB_TYPE_AGI) 442 return true; 443 /* 444 * If we're scanning a given type of AG header, we only want to 445 * see read failures from that specific header. We'd like the 446 * other headers to cross-check them, but this isn't required. 447 */ 448 if (sc->sm->sm_type == type) 449 return true; 450 return false; 451 } 452 453 /* 454 * Grab the AG header buffers for the attached perag structure. 455 * 456 * The headers should be released by xchk_ag_free, but as a fail safe we attach 457 * all the buffers we grab to the scrub transaction so they'll all be freed 458 * when we cancel it. 459 */ 460 static inline int 461 xchk_perag_read_headers( 462 struct xfs_scrub *sc, 463 struct xchk_ag *sa) 464 { 465 int error; 466 467 error = xfs_ialloc_read_agi(sa->pag, sc->tp, 0, &sa->agi_bp); 468 if (error && want_ag_read_header_failure(sc, XFS_SCRUB_TYPE_AGI)) 469 return error; 470 471 error = xfs_alloc_read_agf(sa->pag, sc->tp, 0, &sa->agf_bp); 472 if (error && want_ag_read_header_failure(sc, XFS_SCRUB_TYPE_AGF)) 473 return error; 474 475 return 0; 476 } 477 478 /* 479 * Grab the AG headers for the attached perag structure and wait for pending 480 * intents to drain. 481 */ 482 int 483 xchk_perag_drain_and_lock( 484 struct xfs_scrub *sc) 485 { 486 struct xchk_ag *sa = &sc->sa; 487 int error = 0; 488 489 ASSERT(sa->pag != NULL); 490 ASSERT(sa->agi_bp == NULL); 491 ASSERT(sa->agf_bp == NULL); 492 493 do { 494 if (xchk_should_terminate(sc, &error)) 495 return error; 496 497 error = xchk_perag_read_headers(sc, sa); 498 if (error) 499 return error; 500 501 /* 502 * If we've grabbed an inode for scrubbing then we assume that 503 * holding its ILOCK will suffice to coordinate with any intent 504 * chains involving this inode. 505 */ 506 if (sc->ip) 507 return 0; 508 509 /* 510 * Decide if this AG is quiet enough for all metadata to be 511 * consistent with each other. XFS allows the AG header buffer 512 * locks to cycle across transaction rolls while processing 513 * chains of deferred ops, which means that there could be 514 * other threads in the middle of processing a chain of 515 * deferred ops. For regular operations we are careful about 516 * ordering operations to prevent collisions between threads 517 * (which is why we don't need a per-AG lock), but scrub and 518 * repair have to serialize against chained operations. 519 * 520 * We just locked all the AG headers buffers; now take a look 521 * to see if there are any intents in progress. If there are, 522 * drop the AG headers and wait for the intents to drain. 523 * Since we hold all the AG header locks for the duration of 524 * the scrub, this is the only time we have to sample the 525 * intents counter; any threads increasing it after this point 526 * can't possibly be in the middle of a chain of AG metadata 527 * updates. 528 * 529 * Obviously, this should be slanted against scrub and in favor 530 * of runtime threads. 531 */ 532 if (!xfs_group_intent_busy(pag_group(sa->pag))) 533 return 0; 534 535 if (sa->agf_bp) { 536 xfs_trans_brelse(sc->tp, sa->agf_bp); 537 sa->agf_bp = NULL; 538 } 539 540 if (sa->agi_bp) { 541 xfs_trans_brelse(sc->tp, sa->agi_bp); 542 sa->agi_bp = NULL; 543 } 544 545 if (!(sc->flags & XCHK_FSGATES_DRAIN)) 546 return -ECHRNG; 547 error = xfs_group_intent_drain(pag_group(sa->pag)); 548 if (error == -ERESTARTSYS) 549 error = -EINTR; 550 } while (!error); 551 552 return error; 553 } 554 555 /* 556 * Grab the per-AG structure, grab all AG header buffers, and wait until there 557 * aren't any pending intents. Returns -ENOENT if we can't grab the perag 558 * structure. 559 */ 560 int 561 xchk_ag_read_headers( 562 struct xfs_scrub *sc, 563 xfs_agnumber_t agno, 564 struct xchk_ag *sa) 565 { 566 struct xfs_mount *mp = sc->mp; 567 568 ASSERT(!sa->pag); 569 sa->pag = xfs_perag_get(mp, agno); 570 if (!sa->pag) 571 return -ENOENT; 572 573 return xchk_perag_drain_and_lock(sc); 574 } 575 576 /* Release all the AG btree cursors. */ 577 void 578 xchk_ag_btcur_free( 579 struct xchk_ag *sa) 580 { 581 if (sa->refc_cur) 582 xfs_btree_del_cursor(sa->refc_cur, XFS_BTREE_ERROR); 583 if (sa->rmap_cur) 584 xfs_btree_del_cursor(sa->rmap_cur, XFS_BTREE_ERROR); 585 if (sa->fino_cur) 586 xfs_btree_del_cursor(sa->fino_cur, XFS_BTREE_ERROR); 587 if (sa->ino_cur) 588 xfs_btree_del_cursor(sa->ino_cur, XFS_BTREE_ERROR); 589 if (sa->cnt_cur) 590 xfs_btree_del_cursor(sa->cnt_cur, XFS_BTREE_ERROR); 591 if (sa->bno_cur) 592 xfs_btree_del_cursor(sa->bno_cur, XFS_BTREE_ERROR); 593 594 sa->refc_cur = NULL; 595 sa->rmap_cur = NULL; 596 sa->fino_cur = NULL; 597 sa->ino_cur = NULL; 598 sa->bno_cur = NULL; 599 sa->cnt_cur = NULL; 600 } 601 602 /* Initialize all the btree cursors for an AG. */ 603 void 604 xchk_ag_btcur_init( 605 struct xfs_scrub *sc, 606 struct xchk_ag *sa) 607 { 608 struct xfs_mount *mp = sc->mp; 609 610 if (sa->agf_bp) { 611 /* Set up a bnobt cursor for cross-referencing. */ 612 sa->bno_cur = xfs_bnobt_init_cursor(mp, sc->tp, sa->agf_bp, 613 sa->pag); 614 xchk_ag_btree_del_cursor_if_sick(sc, &sa->bno_cur, 615 XFS_SCRUB_TYPE_BNOBT); 616 617 /* Set up a cntbt cursor for cross-referencing. */ 618 sa->cnt_cur = xfs_cntbt_init_cursor(mp, sc->tp, sa->agf_bp, 619 sa->pag); 620 xchk_ag_btree_del_cursor_if_sick(sc, &sa->cnt_cur, 621 XFS_SCRUB_TYPE_CNTBT); 622 623 /* Set up a rmapbt cursor for cross-referencing. */ 624 if (xfs_has_rmapbt(mp)) { 625 sa->rmap_cur = xfs_rmapbt_init_cursor(mp, sc->tp, 626 sa->agf_bp, sa->pag); 627 xchk_ag_btree_del_cursor_if_sick(sc, &sa->rmap_cur, 628 XFS_SCRUB_TYPE_RMAPBT); 629 } 630 631 /* Set up a refcountbt cursor for cross-referencing. */ 632 if (xfs_has_reflink(mp)) { 633 sa->refc_cur = xfs_refcountbt_init_cursor(mp, sc->tp, 634 sa->agf_bp, sa->pag); 635 xchk_ag_btree_del_cursor_if_sick(sc, &sa->refc_cur, 636 XFS_SCRUB_TYPE_REFCNTBT); 637 } 638 } 639 640 if (sa->agi_bp) { 641 /* Set up a inobt cursor for cross-referencing. */ 642 sa->ino_cur = xfs_inobt_init_cursor(sa->pag, sc->tp, 643 sa->agi_bp); 644 xchk_ag_btree_del_cursor_if_sick(sc, &sa->ino_cur, 645 XFS_SCRUB_TYPE_INOBT); 646 647 /* Set up a finobt cursor for cross-referencing. */ 648 if (xfs_has_finobt(mp)) { 649 sa->fino_cur = xfs_finobt_init_cursor(sa->pag, sc->tp, 650 sa->agi_bp); 651 xchk_ag_btree_del_cursor_if_sick(sc, &sa->fino_cur, 652 XFS_SCRUB_TYPE_FINOBT); 653 } 654 } 655 } 656 657 /* Release the AG header context and btree cursors. */ 658 void 659 xchk_ag_free( 660 struct xfs_scrub *sc, 661 struct xchk_ag *sa) 662 { 663 xchk_ag_btcur_free(sa); 664 xrep_reset_perag_resv(sc); 665 if (sa->agf_bp) { 666 xfs_trans_brelse(sc->tp, sa->agf_bp); 667 sa->agf_bp = NULL; 668 } 669 if (sa->agi_bp) { 670 xfs_trans_brelse(sc->tp, sa->agi_bp); 671 sa->agi_bp = NULL; 672 } 673 if (sa->pag) { 674 xfs_perag_put(sa->pag); 675 sa->pag = NULL; 676 } 677 } 678 679 /* 680 * For scrub, grab the perag structure, the AGI, and the AGF headers, in that 681 * order. Locking order requires us to get the AGI before the AGF. We use the 682 * transaction to avoid deadlocking on crosslinked metadata buffers; either the 683 * caller passes one in (bmap scrub) or we have to create a transaction 684 * ourselves. Returns ENOENT if the perag struct cannot be grabbed. 685 */ 686 int 687 xchk_ag_init( 688 struct xfs_scrub *sc, 689 xfs_agnumber_t agno, 690 struct xchk_ag *sa) 691 { 692 int error; 693 694 error = xchk_ag_read_headers(sc, agno, sa); 695 if (error) 696 return error; 697 698 xchk_ag_btcur_init(sc, sa); 699 return 0; 700 } 701 702 #ifdef CONFIG_XFS_RT 703 /* 704 * For scrubbing a realtime group, grab all the in-core resources we'll need to 705 * check the metadata, which means taking the ILOCK of the realtime group's 706 * metadata inodes. Callers must not join these inodes to the transaction with 707 * non-zero lockflags or concurrency problems will result. The @rtglock_flags 708 * argument takes XFS_RTGLOCK_* flags. 709 */ 710 int 711 xchk_rtgroup_init( 712 struct xfs_scrub *sc, 713 xfs_rgnumber_t rgno, 714 struct xchk_rt *sr) 715 { 716 ASSERT(sr->rtg == NULL); 717 ASSERT(sr->rtlock_flags == 0); 718 719 sr->rtg = xfs_rtgroup_get(sc->mp, rgno); 720 if (!sr->rtg) 721 return -ENOENT; 722 return 0; 723 } 724 725 /* Lock all the rt group metadata inode ILOCKs and wait for intents. */ 726 int 727 xchk_rtgroup_lock( 728 struct xfs_scrub *sc, 729 struct xchk_rt *sr, 730 unsigned int rtglock_flags) 731 { 732 int error = 0; 733 734 ASSERT(sr->rtg != NULL); 735 736 /* 737 * If we're /only/ locking the rtbitmap in shared mode, then we're 738 * obviously not trying to compare records in two metadata inodes. 739 * There's no need to drain intents here because the caller (most 740 * likely the rgsuper scanner) doesn't need that level of consistency. 741 */ 742 if (rtglock_flags == XFS_RTGLOCK_BITMAP_SHARED) { 743 xfs_rtgroup_lock(sr->rtg, rtglock_flags); 744 sr->rtlock_flags = rtglock_flags; 745 return 0; 746 } 747 748 do { 749 if (xchk_should_terminate(sc, &error)) 750 return error; 751 752 xfs_rtgroup_lock(sr->rtg, rtglock_flags); 753 754 /* 755 * If we've grabbed a non-metadata file for scrubbing, we 756 * assume that holding its ILOCK will suffice to coordinate 757 * with any rt intent chains involving this inode. 758 */ 759 if (sc->ip && !xfs_is_internal_inode(sc->ip)) 760 break; 761 762 /* 763 * Decide if the rt group is quiet enough for all metadata to 764 * be consistent with each other. Regular file IO doesn't get 765 * to lock all the rt inodes at the same time, which means that 766 * there could be other threads in the middle of processing a 767 * chain of deferred ops. 768 * 769 * We just locked all the metadata inodes for this rt group; 770 * now take a look to see if there are any intents in progress. 771 * If there are, drop the rt group inode locks and wait for the 772 * intents to drain. Since we hold the rt group inode locks 773 * for the duration of the scrub, this is the only time we have 774 * to sample the intents counter; any threads increasing it 775 * after this point can't possibly be in the middle of a chain 776 * of rt metadata updates. 777 * 778 * Obviously, this should be slanted against scrub and in favor 779 * of runtime threads. 780 */ 781 if (!xfs_group_intent_busy(rtg_group(sr->rtg))) 782 break; 783 784 xfs_rtgroup_unlock(sr->rtg, rtglock_flags); 785 786 if (!(sc->flags & XCHK_FSGATES_DRAIN)) 787 return -ECHRNG; 788 error = xfs_group_intent_drain(rtg_group(sr->rtg)); 789 if (error) { 790 if (error == -ERESTARTSYS) 791 error = -EINTR; 792 return error; 793 } 794 } while (1); 795 796 sr->rtlock_flags = rtglock_flags; 797 798 if (xfs_has_rtrmapbt(sc->mp) && (rtglock_flags & XFS_RTGLOCK_RMAP)) 799 sr->rmap_cur = xfs_rtrmapbt_init_cursor(sc->tp, sr->rtg); 800 801 if (xfs_has_rtreflink(sc->mp) && (rtglock_flags & XFS_RTGLOCK_REFCOUNT)) 802 sr->refc_cur = xfs_rtrefcountbt_init_cursor(sc->tp, sr->rtg); 803 804 return 0; 805 } 806 807 /* 808 * Free all the btree cursors and other incore data relating to the realtime 809 * group. This has to be done /before/ committing (or cancelling) the scrub 810 * transaction. 811 */ 812 void 813 xchk_rtgroup_btcur_free( 814 struct xchk_rt *sr) 815 { 816 if (sr->rmap_cur) 817 xfs_btree_del_cursor(sr->rmap_cur, XFS_BTREE_ERROR); 818 if (sr->refc_cur) 819 xfs_btree_del_cursor(sr->refc_cur, XFS_BTREE_ERROR); 820 821 sr->refc_cur = NULL; 822 sr->rmap_cur = NULL; 823 } 824 825 /* 826 * Unlock the realtime group. This must be done /after/ committing (or 827 * cancelling) the scrub transaction. 828 */ 829 void 830 xchk_rtgroup_unlock( 831 struct xchk_rt *sr) 832 { 833 ASSERT(sr->rtg != NULL); 834 835 if (sr->rtlock_flags) { 836 xfs_rtgroup_unlock(sr->rtg, sr->rtlock_flags); 837 sr->rtlock_flags = 0; 838 } 839 } 840 841 /* 842 * Unlock the realtime group and release its resources. This must be done 843 * /after/ committing (or cancelling) the scrub transaction. 844 */ 845 void 846 xchk_rtgroup_free( 847 struct xfs_scrub *sc, 848 struct xchk_rt *sr) 849 { 850 ASSERT(sr->rtg != NULL); 851 852 xchk_rtgroup_unlock(sr); 853 854 xfs_rtgroup_put(sr->rtg); 855 sr->rtg = NULL; 856 } 857 #endif /* CONFIG_XFS_RT */ 858 859 /* Per-scrubber setup functions */ 860 861 void 862 xchk_trans_cancel( 863 struct xfs_scrub *sc) 864 { 865 xfs_trans_cancel(sc->tp); 866 sc->tp = NULL; 867 } 868 869 void 870 xchk_trans_alloc_empty( 871 struct xfs_scrub *sc) 872 { 873 sc->tp = xfs_trans_alloc_empty(sc->mp); 874 } 875 876 /* 877 * Grab an empty transaction so that we can re-grab locked buffers if 878 * one of our btrees turns out to be cyclic. 879 * 880 * If we're going to repair something, we need to ask for the largest possible 881 * log reservation so that we can handle the worst case scenario for metadata 882 * updates while rebuilding a metadata item. We also need to reserve as many 883 * blocks in the head transaction as we think we're going to need to rebuild 884 * the metadata object. 885 */ 886 int 887 xchk_trans_alloc( 888 struct xfs_scrub *sc, 889 uint resblks) 890 { 891 if (sc->sm->sm_flags & XFS_SCRUB_IFLAG_REPAIR) 892 return xfs_trans_alloc(sc->mp, &M_RES(sc->mp)->tr_itruncate, 893 resblks, 0, 0, &sc->tp); 894 895 xchk_trans_alloc_empty(sc); 896 return 0; 897 } 898 899 /* Set us up with a transaction and an empty context. */ 900 int 901 xchk_setup_fs( 902 struct xfs_scrub *sc) 903 { 904 uint resblks; 905 906 resblks = xrep_calc_ag_resblks(sc); 907 return xchk_trans_alloc(sc, resblks); 908 } 909 910 /* Set us up with a transaction and an empty context to repair rt metadata. */ 911 int 912 xchk_setup_rt( 913 struct xfs_scrub *sc) 914 { 915 return xchk_trans_alloc(sc, xrep_calc_rtgroup_resblks(sc)); 916 } 917 918 /* Set us up with AG headers and btree cursors. */ 919 int 920 xchk_setup_ag_btree( 921 struct xfs_scrub *sc, 922 bool force_log) 923 { 924 struct xfs_mount *mp = sc->mp; 925 int error; 926 927 /* 928 * If the caller asks us to checkpont the log, do so. This 929 * expensive operation should be performed infrequently and only 930 * as a last resort. Any caller that sets force_log should 931 * document why they need to do so. 932 */ 933 if (force_log) { 934 error = xchk_checkpoint_log(mp); 935 if (error) 936 return error; 937 } 938 939 error = xchk_setup_fs(sc); 940 if (error) 941 return error; 942 943 return xchk_ag_init(sc, sc->sm->sm_agno, &sc->sa); 944 } 945 946 /* Push everything out of the log onto disk. */ 947 int 948 xchk_checkpoint_log( 949 struct xfs_mount *mp) 950 { 951 int error; 952 953 error = xfs_log_force(mp, XFS_LOG_SYNC); 954 if (error) 955 return error; 956 xfs_ail_push_all_sync(mp->m_ail); 957 return 0; 958 } 959 960 /* Verify that an inode is allocated ondisk, then return its cached inode. */ 961 int 962 xchk_iget( 963 struct xfs_scrub *sc, 964 xfs_ino_t inum, 965 struct xfs_inode **ipp) 966 { 967 ASSERT(sc->tp != NULL); 968 969 return xfs_iget(sc->mp, sc->tp, inum, XCHK_IGET_FLAGS, 0, ipp); 970 } 971 972 /* 973 * Try to grab an inode in a manner that avoids races with physical inode 974 * allocation. If we can't, return the locked AGI buffer so that the caller 975 * can single-step the loading process to see where things went wrong. 976 * Callers must have a valid scrub transaction. 977 * 978 * If the iget succeeds, return 0, a NULL AGI, and the inode. 979 * 980 * If the iget fails, return the error, the locked AGI, and a NULL inode. This 981 * can include -EINVAL and -ENOENT for invalid inode numbers or inodes that are 982 * no longer allocated; or any other corruption or runtime error. 983 * 984 * If the AGI read fails, return the error, a NULL AGI, and NULL inode. 985 * 986 * If a fatal signal is pending, return -EINTR, a NULL AGI, and a NULL inode. 987 */ 988 int 989 xchk_iget_agi( 990 struct xfs_scrub *sc, 991 xfs_ino_t inum, 992 struct xfs_buf **agi_bpp, 993 struct xfs_inode **ipp) 994 { 995 struct xfs_mount *mp = sc->mp; 996 struct xfs_trans *tp = sc->tp; 997 struct xfs_perag *pag; 998 int error; 999 1000 ASSERT(sc->tp != NULL); 1001 1002 again: 1003 *agi_bpp = NULL; 1004 *ipp = NULL; 1005 error = 0; 1006 1007 if (xchk_should_terminate(sc, &error)) 1008 return error; 1009 1010 /* 1011 * Attach the AGI buffer to the scrub transaction to avoid deadlocks 1012 * in the iget cache miss path. 1013 */ 1014 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, inum)); 1015 error = xfs_ialloc_read_agi(pag, tp, 0, agi_bpp); 1016 xfs_perag_put(pag); 1017 if (error) 1018 return error; 1019 1020 error = xfs_iget(mp, tp, inum, XFS_IGET_NORETRY | XCHK_IGET_FLAGS, 0, 1021 ipp); 1022 if (error == -EAGAIN) { 1023 /* 1024 * The inode may be in core but temporarily unavailable and may 1025 * require the AGI buffer before it can be returned. Drop the 1026 * AGI buffer and retry the lookup. 1027 * 1028 * Incore lookup will fail with EAGAIN on a cache hit if the 1029 * inode is queued to the inactivation list. The inactivation 1030 * worker may remove the inode from the unlinked list and hence 1031 * needs the AGI. 1032 * 1033 * Hence xchk_iget_agi() needs to drop the AGI lock on EAGAIN 1034 * to allow inodegc to make progress and move the inode to 1035 * IRECLAIMABLE state where xfs_iget will be able to return it 1036 * again if it can lock the inode. 1037 */ 1038 xfs_trans_brelse(tp, *agi_bpp); 1039 delay(1); 1040 goto again; 1041 } 1042 if (error) 1043 return error; 1044 1045 /* We got the inode, so we can release the AGI. */ 1046 ASSERT(*ipp != NULL); 1047 xfs_trans_brelse(tp, *agi_bpp); 1048 *agi_bpp = NULL; 1049 return 0; 1050 } 1051 1052 #ifdef CONFIG_XFS_QUOTA 1053 /* 1054 * Try to attach dquots to this inode if we think we might want to repair it. 1055 * Callers must not hold any ILOCKs. If the dquots are broken and cannot be 1056 * attached, a quotacheck will be scheduled. 1057 */ 1058 int 1059 xchk_ino_dqattach( 1060 struct xfs_scrub *sc) 1061 { 1062 ASSERT(sc->tp != NULL); 1063 ASSERT(sc->ip != NULL); 1064 1065 if (!xchk_could_repair(sc)) 1066 return 0; 1067 1068 return xrep_ino_dqattach(sc); 1069 } 1070 #endif 1071 1072 /* Install an inode that we opened by handle for scrubbing. */ 1073 int 1074 xchk_install_handle_inode( 1075 struct xfs_scrub *sc, 1076 struct xfs_inode *ip) 1077 { 1078 if (VFS_I(ip)->i_generation != sc->sm->sm_gen) { 1079 xchk_irele(sc, ip); 1080 return -ENOENT; 1081 } 1082 1083 sc->ip = ip; 1084 return 0; 1085 } 1086 1087 /* 1088 * Install an already-referenced inode for scrubbing. Get our own reference to 1089 * the inode to make disposal simpler. The inode must not be in I_FREEING or 1090 * I_WILL_FREE state! 1091 */ 1092 int 1093 xchk_install_live_inode( 1094 struct xfs_scrub *sc, 1095 struct xfs_inode *ip) 1096 { 1097 if (!igrab(VFS_I(ip))) { 1098 xchk_ino_set_corrupt(sc, ip->i_ino); 1099 return -EFSCORRUPTED; 1100 } 1101 1102 sc->ip = ip; 1103 return 0; 1104 } 1105 1106 /* 1107 * In preparation to scrub metadata structures that hang off of an inode, 1108 * grab either the inode referenced in the scrub control structure or the 1109 * inode passed in. If the inumber does not reference an allocated inode 1110 * record, the function returns ENOENT to end the scrub early. The inode 1111 * is not locked. 1112 */ 1113 int 1114 xchk_iget_for_scrubbing( 1115 struct xfs_scrub *sc) 1116 { 1117 struct xfs_imap imap; 1118 struct xfs_mount *mp = sc->mp; 1119 struct xfs_perag *pag; 1120 struct xfs_buf *agi_bp; 1121 struct xfs_inode *ip_in = XFS_I(file_inode(sc->file)); 1122 struct xfs_inode *ip = NULL; 1123 xfs_agnumber_t agno = XFS_INO_TO_AGNO(mp, sc->sm->sm_ino); 1124 int error; 1125 1126 ASSERT(sc->tp == NULL); 1127 1128 /* We want to scan the inode we already had opened. */ 1129 if (sc->sm->sm_ino == 0 || sc->sm->sm_ino == ip_in->i_ino) 1130 return xchk_install_live_inode(sc, ip_in); 1131 1132 /* 1133 * On pre-metadir filesystems, reject internal metadata files. For 1134 * metadir filesystems, limited scrubbing of any file in the metadata 1135 * directory tree by handle is allowed, because that is the only way to 1136 * validate the lack of parent pointers in the sb-root metadata inodes. 1137 */ 1138 if (!xfs_has_metadir(mp) && xfs_is_sb_inum(mp, sc->sm->sm_ino)) 1139 return -ENOENT; 1140 /* Reject obviously bad inode numbers. */ 1141 if (!xfs_verify_ino(sc->mp, sc->sm->sm_ino)) 1142 return -ENOENT; 1143 1144 /* Try a safe untrusted iget. */ 1145 error = xchk_iget_safe(sc, sc->sm->sm_ino, &ip); 1146 if (!error) 1147 return xchk_install_handle_inode(sc, ip); 1148 if (error == -ENOENT) 1149 return error; 1150 if (error != -EINVAL) 1151 goto out_error; 1152 1153 /* 1154 * EINVAL with IGET_UNTRUSTED probably means one of several things: 1155 * userspace gave us an inode number that doesn't correspond to fs 1156 * space; the inode btree lacks a record for this inode; or there is a 1157 * record, and it says this inode is free. 1158 * 1159 * We want to look up this inode in the inobt to distinguish two 1160 * scenarios: (1) the inobt says the inode is free, in which case 1161 * there's nothing to do; and (2) the inobt says the inode is 1162 * allocated, but loading it failed due to corruption. 1163 * 1164 * Allocate a transaction and grab the AGI to prevent inobt activity 1165 * in this AG. Retry the iget in case someone allocated a new inode 1166 * after the first iget failed. 1167 */ 1168 error = xchk_trans_alloc(sc, 0); 1169 if (error) 1170 goto out_error; 1171 1172 error = xchk_iget_agi(sc, sc->sm->sm_ino, &agi_bp, &ip); 1173 if (error == 0) { 1174 /* Actually got the inode, so install it. */ 1175 xchk_trans_cancel(sc); 1176 return xchk_install_handle_inode(sc, ip); 1177 } 1178 if (error == -ENOENT) 1179 goto out_gone; 1180 if (error != -EINVAL) 1181 goto out_cancel; 1182 1183 /* Ensure that we have protected against inode allocation/freeing. */ 1184 if (agi_bp == NULL) { 1185 ASSERT(agi_bp != NULL); 1186 error = -ECANCELED; 1187 goto out_cancel; 1188 } 1189 1190 /* 1191 * Untrusted iget failed a second time. Let's try an inobt lookup. 1192 * If the inobt thinks this the inode neither can exist inside the 1193 * filesystem nor is allocated, return ENOENT to signal that the check 1194 * can be skipped. 1195 * 1196 * If the lookup returns corruption, we'll mark this inode corrupt and 1197 * exit to userspace. There's little chance of fixing anything until 1198 * the inobt is straightened out, but there's nothing we can do here. 1199 * 1200 * If the lookup encounters any other error, exit to userspace. 1201 * 1202 * If the lookup succeeds, something else must be very wrong in the fs 1203 * such that setting up the incore inode failed in some strange way. 1204 * Treat those as corruptions. 1205 */ 1206 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, sc->sm->sm_ino)); 1207 if (!pag) { 1208 error = -EFSCORRUPTED; 1209 goto out_cancel; 1210 } 1211 1212 error = xfs_imap(pag, sc->tp, sc->sm->sm_ino, &imap, 1213 XFS_IGET_UNTRUSTED); 1214 xfs_perag_put(pag); 1215 if (error == -EINVAL || error == -ENOENT) 1216 goto out_gone; 1217 if (!error) 1218 error = -EFSCORRUPTED; 1219 1220 out_cancel: 1221 xchk_trans_cancel(sc); 1222 out_error: 1223 trace_xchk_op_error(sc, agno, XFS_INO_TO_AGBNO(mp, sc->sm->sm_ino), 1224 error, __return_address); 1225 return error; 1226 out_gone: 1227 /* The file is gone, so there's nothing to check. */ 1228 xchk_trans_cancel(sc); 1229 return -ENOENT; 1230 } 1231 1232 /* Release an inode, possibly dropping it in the process. */ 1233 void 1234 xchk_irele( 1235 struct xfs_scrub *sc, 1236 struct xfs_inode *ip) 1237 { 1238 if (sc->tp) { 1239 /* 1240 * If we are in a transaction, we /cannot/ drop the inode 1241 * ourselves, because the VFS will trigger writeback, which 1242 * can require a transaction. Clear DONTCACHE to force the 1243 * inode to the LRU, where someone else can take care of 1244 * dropping it. 1245 * 1246 * Note that when we grabbed our reference to the inode, it 1247 * could have had an active ref and DONTCACHE set if a sysadmin 1248 * is trying to coerce a change in file access mode. icache 1249 * hits do not clear DONTCACHE, so we must do it here. 1250 */ 1251 spin_lock(&VFS_I(ip)->i_lock); 1252 VFS_I(ip)->i_state &= ~I_DONTCACHE; 1253 spin_unlock(&VFS_I(ip)->i_lock); 1254 } 1255 1256 xfs_irele(ip); 1257 } 1258 1259 /* 1260 * Set us up to scrub metadata mapped by a file's fork. Callers must not use 1261 * this to operate on user-accessible regular file data because the MMAPLOCK is 1262 * not taken. 1263 */ 1264 int 1265 xchk_setup_inode_contents( 1266 struct xfs_scrub *sc, 1267 unsigned int resblks) 1268 { 1269 int error; 1270 1271 error = xchk_iget_for_scrubbing(sc); 1272 if (error) 1273 return error; 1274 1275 error = xrep_tempfile_adjust_directory_tree(sc); 1276 if (error) 1277 return error; 1278 1279 /* Lock the inode so the VFS cannot touch this file. */ 1280 xchk_ilock(sc, XFS_IOLOCK_EXCL); 1281 1282 error = xchk_trans_alloc(sc, resblks); 1283 if (error) 1284 goto out; 1285 1286 error = xchk_ino_dqattach(sc); 1287 if (error) 1288 goto out; 1289 1290 xchk_ilock(sc, XFS_ILOCK_EXCL); 1291 out: 1292 /* scrub teardown will unlock and release the inode for us */ 1293 return error; 1294 } 1295 1296 void 1297 xchk_ilock( 1298 struct xfs_scrub *sc, 1299 unsigned int ilock_flags) 1300 { 1301 xfs_ilock(sc->ip, ilock_flags); 1302 sc->ilock_flags |= ilock_flags; 1303 } 1304 1305 bool 1306 xchk_ilock_nowait( 1307 struct xfs_scrub *sc, 1308 unsigned int ilock_flags) 1309 { 1310 if (xfs_ilock_nowait(sc->ip, ilock_flags)) { 1311 sc->ilock_flags |= ilock_flags; 1312 return true; 1313 } 1314 1315 return false; 1316 } 1317 1318 void 1319 xchk_iunlock( 1320 struct xfs_scrub *sc, 1321 unsigned int ilock_flags) 1322 { 1323 sc->ilock_flags &= ~ilock_flags; 1324 xfs_iunlock(sc->ip, ilock_flags); 1325 } 1326 1327 /* 1328 * Predicate that decides if we need to evaluate the cross-reference check. 1329 * If there was an error accessing the cross-reference btree, just delete 1330 * the cursor and skip the check. 1331 */ 1332 bool 1333 xchk_should_check_xref( 1334 struct xfs_scrub *sc, 1335 int *error, 1336 struct xfs_btree_cur **curpp) 1337 { 1338 /* No point in xref if we already know we're corrupt. */ 1339 if (xchk_skip_xref(sc->sm)) 1340 return false; 1341 1342 if (*error == 0) 1343 return true; 1344 1345 if (curpp) { 1346 /* If we've already given up on xref, just bail out. */ 1347 if (!*curpp) 1348 return false; 1349 1350 /* xref error, delete cursor and bail out. */ 1351 xfs_btree_del_cursor(*curpp, XFS_BTREE_ERROR); 1352 *curpp = NULL; 1353 } 1354 1355 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_XFAIL; 1356 trace_xchk_xref_error(sc, *error, __return_address); 1357 1358 /* 1359 * Errors encountered during cross-referencing with another 1360 * data structure should not cause this scrubber to abort. 1361 */ 1362 *error = 0; 1363 return false; 1364 } 1365 1366 /* Run the structure verifiers on in-memory buffers to detect bad memory. */ 1367 void 1368 xchk_buffer_recheck( 1369 struct xfs_scrub *sc, 1370 struct xfs_buf *bp) 1371 { 1372 xfs_failaddr_t fa; 1373 1374 if (bp->b_ops == NULL) { 1375 xchk_block_set_corrupt(sc, bp); 1376 return; 1377 } 1378 if (bp->b_ops->verify_struct == NULL) { 1379 xchk_set_incomplete(sc); 1380 return; 1381 } 1382 fa = bp->b_ops->verify_struct(bp); 1383 if (!fa) 1384 return; 1385 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT; 1386 trace_xchk_block_error(sc, xfs_buf_daddr(bp), fa); 1387 } 1388 1389 static inline int 1390 xchk_metadata_inode_subtype( 1391 struct xfs_scrub *sc, 1392 unsigned int scrub_type) 1393 { 1394 struct xfs_scrub_subord *sub; 1395 int error; 1396 1397 sub = xchk_scrub_create_subord(sc, scrub_type); 1398 error = sub->sc.ops->scrub(&sub->sc); 1399 xchk_scrub_free_subord(sub); 1400 return error; 1401 } 1402 1403 /* 1404 * Scrub the attr/data forks of a metadata inode. The metadata inode must be 1405 * pointed to by sc->ip and the ILOCK must be held. 1406 */ 1407 int 1408 xchk_metadata_inode_forks( 1409 struct xfs_scrub *sc) 1410 { 1411 bool shared; 1412 int error; 1413 1414 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) 1415 return 0; 1416 1417 /* Check the inode record. */ 1418 error = xchk_metadata_inode_subtype(sc, XFS_SCRUB_TYPE_INODE); 1419 if (error || (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)) 1420 return error; 1421 1422 /* Metadata inodes don't live on the rt device. */ 1423 if (sc->ip->i_diflags & XFS_DIFLAG_REALTIME) { 1424 xchk_ino_set_corrupt(sc, sc->ip->i_ino); 1425 return 0; 1426 } 1427 1428 /* They should never participate in reflink. */ 1429 if (xfs_is_reflink_inode(sc->ip)) { 1430 xchk_ino_set_corrupt(sc, sc->ip->i_ino); 1431 return 0; 1432 } 1433 1434 /* Invoke the data fork scrubber. */ 1435 error = xchk_metadata_inode_subtype(sc, XFS_SCRUB_TYPE_BMBTD); 1436 if (error || (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)) 1437 return error; 1438 1439 /* Look for incorrect shared blocks. */ 1440 if (xfs_has_reflink(sc->mp)) { 1441 error = xfs_reflink_inode_has_shared_extents(sc->tp, sc->ip, 1442 &shared); 1443 if (!xchk_fblock_process_error(sc, XFS_DATA_FORK, 0, 1444 &error)) 1445 return error; 1446 if (shared) 1447 xchk_ino_set_corrupt(sc, sc->ip->i_ino); 1448 } 1449 1450 /* 1451 * Metadata files can only have extended attributes on metadir 1452 * filesystems, either for parent pointers or for actual xattr data. 1453 */ 1454 if (xfs_inode_hasattr(sc->ip)) { 1455 if (!xfs_has_metadir(sc->mp)) { 1456 xchk_ino_set_corrupt(sc, sc->ip->i_ino); 1457 return 0; 1458 } 1459 1460 error = xchk_metadata_inode_subtype(sc, XFS_SCRUB_TYPE_BMBTA); 1461 if (error || (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)) 1462 return error; 1463 } 1464 1465 return 0; 1466 } 1467 1468 /* 1469 * Enable filesystem hooks (i.e. runtime code patching) before starting a scrub 1470 * operation. Callers must not hold any locks that intersect with the CPU 1471 * hotplug lock (e.g. writeback locks) because code patching must halt the CPUs 1472 * to change kernel code. 1473 */ 1474 void 1475 xchk_fsgates_enable( 1476 struct xfs_scrub *sc, 1477 unsigned int scrub_fsgates) 1478 { 1479 ASSERT(!(scrub_fsgates & ~XCHK_FSGATES_ALL)); 1480 ASSERT(!(sc->flags & scrub_fsgates)); 1481 1482 trace_xchk_fsgates_enable(sc, scrub_fsgates); 1483 1484 if (scrub_fsgates & XCHK_FSGATES_DRAIN) 1485 xfs_defer_drain_wait_enable(); 1486 1487 if (scrub_fsgates & XCHK_FSGATES_QUOTA) 1488 xfs_dqtrx_hook_enable(); 1489 1490 if (scrub_fsgates & XCHK_FSGATES_DIRENTS) 1491 xfs_dir_hook_enable(); 1492 1493 if (scrub_fsgates & XCHK_FSGATES_RMAP) 1494 xfs_rmap_hook_enable(); 1495 1496 sc->flags |= scrub_fsgates; 1497 } 1498 1499 /* 1500 * Decide if this is this a cached inode that's also allocated. The caller 1501 * must hold a reference to an AG and the AGI buffer lock to prevent inodes 1502 * from being allocated or freed. 1503 * 1504 * Look up an inode by number in the given file system. If the inode number 1505 * is invalid, return -EINVAL. If the inode is not in cache, return -ENODATA. 1506 * If the inode is being reclaimed, return -ENODATA because we know the inode 1507 * cache cannot be updating the ondisk metadata. 1508 * 1509 * Otherwise, the incore inode is the one we want, and it is either live, 1510 * somewhere in the inactivation machinery, or reclaimable. The inode is 1511 * allocated if i_mode is nonzero. In all three cases, the cached inode will 1512 * be more up to date than the ondisk inode buffer, so we must use the incore 1513 * i_mode. 1514 */ 1515 int 1516 xchk_inode_is_allocated( 1517 struct xfs_scrub *sc, 1518 xfs_agino_t agino, 1519 bool *inuse) 1520 { 1521 struct xfs_mount *mp = sc->mp; 1522 struct xfs_perag *pag = sc->sa.pag; 1523 xfs_ino_t ino; 1524 struct xfs_inode *ip; 1525 int error; 1526 1527 /* caller must hold perag reference */ 1528 if (pag == NULL) { 1529 ASSERT(pag != NULL); 1530 return -EINVAL; 1531 } 1532 1533 /* caller must have AGI buffer */ 1534 if (sc->sa.agi_bp == NULL) { 1535 ASSERT(sc->sa.agi_bp != NULL); 1536 return -EINVAL; 1537 } 1538 1539 /* reject inode numbers outside existing AGs */ 1540 ino = xfs_agino_to_ino(pag, agino); 1541 if (!xfs_verify_ino(mp, ino)) 1542 return -EINVAL; 1543 1544 error = -ENODATA; 1545 rcu_read_lock(); 1546 ip = radix_tree_lookup(&pag->pag_ici_root, agino); 1547 if (!ip) { 1548 /* cache miss */ 1549 goto out_rcu; 1550 } 1551 1552 /* 1553 * If the inode number doesn't match, the incore inode got reused 1554 * during an RCU grace period and the radix tree hasn't been updated. 1555 * This isn't the inode we want. 1556 */ 1557 spin_lock(&ip->i_flags_lock); 1558 if (ip->i_ino != ino) 1559 goto out_skip; 1560 1561 trace_xchk_inode_is_allocated(ip); 1562 1563 /* 1564 * We have an incore inode that matches the inode we want, and the 1565 * caller holds the perag structure and the AGI buffer. Let's check 1566 * our assumptions below: 1567 */ 1568 1569 #ifdef DEBUG 1570 /* 1571 * (1) If the incore inode is live (i.e. referenced from the dcache), 1572 * it will not be INEW, nor will it be in the inactivation or reclaim 1573 * machinery. The ondisk inode had better be allocated. This is the 1574 * most trivial case. 1575 */ 1576 if (!(ip->i_flags & (XFS_NEED_INACTIVE | XFS_INEW | XFS_IRECLAIMABLE | 1577 XFS_INACTIVATING))) { 1578 /* live inode */ 1579 ASSERT(VFS_I(ip)->i_mode != 0); 1580 } 1581 1582 /* 1583 * If the incore inode is INEW, there are several possibilities: 1584 * 1585 * (2) For a file that is being created, note that we allocate the 1586 * ondisk inode before allocating, initializing, and adding the incore 1587 * inode to the radix tree. 1588 * 1589 * (3) If the incore inode is being recycled, the inode has to be 1590 * allocated because we don't allow freed inodes to be recycled. 1591 * Recycling doesn't touch i_mode. 1592 */ 1593 if (ip->i_flags & XFS_INEW) { 1594 /* created on disk already or recycling */ 1595 ASSERT(VFS_I(ip)->i_mode != 0); 1596 } 1597 1598 /* 1599 * (4) If the inode is queued for inactivation (NEED_INACTIVE) but 1600 * inactivation has not started (!INACTIVATING), it is still allocated. 1601 */ 1602 if ((ip->i_flags & XFS_NEED_INACTIVE) && 1603 !(ip->i_flags & XFS_INACTIVATING)) { 1604 /* definitely before difree */ 1605 ASSERT(VFS_I(ip)->i_mode != 0); 1606 } 1607 #endif 1608 1609 /* 1610 * If the incore inode is undergoing inactivation (INACTIVATING), there 1611 * are two possibilities: 1612 * 1613 * (5) It is before the point where it would get freed ondisk, in which 1614 * case i_mode is still nonzero. 1615 * 1616 * (6) It has already been freed, in which case i_mode is zero. 1617 * 1618 * We don't take the ILOCK here, but difree and dialloc update the AGI, 1619 * and we've taken the AGI buffer lock, which prevents that from 1620 * happening. 1621 */ 1622 1623 /* 1624 * (7) Inodes undergoing inactivation (INACTIVATING) or queued for 1625 * reclaim (IRECLAIMABLE) could be allocated or free. i_mode still 1626 * reflects the ondisk state. 1627 */ 1628 1629 /* 1630 * (8) If the inode is in IFLUSHING, it's safe to query i_mode because 1631 * the flush code uses i_mode to format the ondisk inode. 1632 */ 1633 1634 /* 1635 * (9) If the inode is in IRECLAIM and was reachable via the radix 1636 * tree, it still has the same i_mode as it did before it entered 1637 * reclaim. The inode object is still alive because we hold the RCU 1638 * read lock. 1639 */ 1640 1641 *inuse = VFS_I(ip)->i_mode != 0; 1642 error = 0; 1643 1644 out_skip: 1645 spin_unlock(&ip->i_flags_lock); 1646 out_rcu: 1647 rcu_read_unlock(); 1648 return error; 1649 } 1650 1651 /* Is this inode a root directory for either tree? */ 1652 bool 1653 xchk_inode_is_dirtree_root(const struct xfs_inode *ip) 1654 { 1655 struct xfs_mount *mp = ip->i_mount; 1656 1657 return ip == mp->m_rootip || 1658 (xfs_has_metadir(mp) && ip == mp->m_metadirip); 1659 } 1660 1661 /* Does the superblock point down to this inode? */ 1662 bool 1663 xchk_inode_is_sb_rooted(const struct xfs_inode *ip) 1664 { 1665 return xchk_inode_is_dirtree_root(ip) || 1666 xfs_is_sb_inum(ip->i_mount, ip->i_ino); 1667 } 1668 1669 /* What is the root directory inumber for this inode? */ 1670 xfs_ino_t 1671 xchk_inode_rootdir_inum(const struct xfs_inode *ip) 1672 { 1673 struct xfs_mount *mp = ip->i_mount; 1674 1675 if (xfs_is_metadir_inode(ip)) 1676 return mp->m_metadirip->i_ino; 1677 return mp->m_rootip->i_ino; 1678 } 1679 1680 static int 1681 xchk_meta_btree_count_blocks( 1682 struct xfs_scrub *sc, 1683 xfs_extnum_t *nextents, 1684 xfs_filblks_t *count) 1685 { 1686 struct xfs_btree_cur *cur; 1687 int error; 1688 1689 if (!sc->sr.rtg) { 1690 ASSERT(0); 1691 return -EFSCORRUPTED; 1692 } 1693 1694 switch (sc->ip->i_metatype) { 1695 case XFS_METAFILE_RTRMAP: 1696 cur = xfs_rtrmapbt_init_cursor(sc->tp, sc->sr.rtg); 1697 break; 1698 case XFS_METAFILE_RTREFCOUNT: 1699 cur = xfs_rtrefcountbt_init_cursor(sc->tp, sc->sr.rtg); 1700 break; 1701 default: 1702 ASSERT(0); 1703 return -EFSCORRUPTED; 1704 } 1705 1706 error = xfs_btree_count_blocks(cur, count); 1707 xfs_btree_del_cursor(cur, error); 1708 if (!error) { 1709 *nextents = 0; 1710 (*count)--; /* don't count the btree iroot */ 1711 } 1712 return error; 1713 } 1714 1715 /* Count the blocks used by a file, even if it's a metadata inode. */ 1716 int 1717 xchk_inode_count_blocks( 1718 struct xfs_scrub *sc, 1719 int whichfork, 1720 xfs_extnum_t *nextents, 1721 xfs_filblks_t *count) 1722 { 1723 struct xfs_ifork *ifp = xfs_ifork_ptr(sc->ip, whichfork); 1724 1725 if (!ifp) { 1726 *nextents = 0; 1727 *count = 0; 1728 return 0; 1729 } 1730 1731 if (ifp->if_format == XFS_DINODE_FMT_META_BTREE) { 1732 ASSERT(whichfork == XFS_DATA_FORK); 1733 return xchk_meta_btree_count_blocks(sc, nextents, count); 1734 } 1735 1736 return xfs_bmap_count_blocks(sc->tp, sc->ip, whichfork, nextents, 1737 count); 1738 } 1739