1 /* 2 * Copyright (C) 2017 Oracle. All Rights Reserved. 3 * 4 * Author: Darrick J. Wong <darrick.wong@oracle.com> 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 2 9 * of the License, or (at your option) any later version. 10 * 11 * This program is distributed in the hope that it would be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, write the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. 19 */ 20 #include "xfs.h" 21 #include "xfs_fs.h" 22 #include "xfs_shared.h" 23 #include "xfs_format.h" 24 #include "xfs_trans_resv.h" 25 #include "xfs_mount.h" 26 #include "xfs_defer.h" 27 #include "xfs_btree.h" 28 #include "xfs_bit.h" 29 #include "xfs_log_format.h" 30 #include "xfs_trans.h" 31 #include "xfs_sb.h" 32 #include "xfs_inode.h" 33 #include "xfs_icache.h" 34 #include "xfs_itable.h" 35 #include "xfs_alloc.h" 36 #include "xfs_alloc_btree.h" 37 #include "xfs_bmap.h" 38 #include "xfs_bmap_btree.h" 39 #include "xfs_ialloc.h" 40 #include "xfs_ialloc_btree.h" 41 #include "xfs_refcount.h" 42 #include "xfs_refcount_btree.h" 43 #include "xfs_rmap.h" 44 #include "xfs_rmap_btree.h" 45 #include "xfs_log.h" 46 #include "xfs_trans_priv.h" 47 #include "scrub/xfs_scrub.h" 48 #include "scrub/scrub.h" 49 #include "scrub/common.h" 50 #include "scrub/trace.h" 51 #include "scrub/btree.h" 52 53 /* Common code for the metadata scrubbers. */ 54 55 /* 56 * Handling operational errors. 57 * 58 * The *_process_error() family of functions are used to process error return 59 * codes from functions called as part of a scrub operation. 60 * 61 * If there's no error, we return true to tell the caller that it's ok 62 * to move on to the next check in its list. 63 * 64 * For non-verifier errors (e.g. ENOMEM) we return false to tell the 65 * caller that something bad happened, and we preserve *error so that 66 * the caller can return the *error up the stack to userspace. 67 * 68 * Verifier errors (EFSBADCRC/EFSCORRUPTED) are recorded by setting 69 * OFLAG_CORRUPT in sm_flags and the *error is cleared. In other words, 70 * we track verifier errors (and failed scrub checks) via OFLAG_CORRUPT, 71 * not via return codes. We return false to tell the caller that 72 * something bad happened. Since the error has been cleared, the caller 73 * will (presumably) return that zero and scrubbing will move on to 74 * whatever's next. 75 * 76 * ftrace can be used to record the precise metadata location and the 77 * approximate code location of the failed operation. 78 */ 79 80 /* Check for operational errors. */ 81 static bool 82 __xfs_scrub_process_error( 83 struct xfs_scrub_context *sc, 84 xfs_agnumber_t agno, 85 xfs_agblock_t bno, 86 int *error, 87 __u32 errflag, 88 void *ret_ip) 89 { 90 switch (*error) { 91 case 0: 92 return true; 93 case -EDEADLOCK: 94 /* Used to restart an op with deadlock avoidance. */ 95 trace_xfs_scrub_deadlock_retry(sc->ip, sc->sm, *error); 96 break; 97 case -EFSBADCRC: 98 case -EFSCORRUPTED: 99 /* Note the badness but don't abort. */ 100 sc->sm->sm_flags |= errflag; 101 *error = 0; 102 /* fall through */ 103 default: 104 trace_xfs_scrub_op_error(sc, agno, bno, *error, 105 ret_ip); 106 break; 107 } 108 return false; 109 } 110 111 bool 112 xfs_scrub_process_error( 113 struct xfs_scrub_context *sc, 114 xfs_agnumber_t agno, 115 xfs_agblock_t bno, 116 int *error) 117 { 118 return __xfs_scrub_process_error(sc, agno, bno, error, 119 XFS_SCRUB_OFLAG_CORRUPT, __return_address); 120 } 121 122 bool 123 xfs_scrub_xref_process_error( 124 struct xfs_scrub_context *sc, 125 xfs_agnumber_t agno, 126 xfs_agblock_t bno, 127 int *error) 128 { 129 return __xfs_scrub_process_error(sc, agno, bno, error, 130 XFS_SCRUB_OFLAG_XFAIL, __return_address); 131 } 132 133 /* Check for operational errors for a file offset. */ 134 static bool 135 __xfs_scrub_fblock_process_error( 136 struct xfs_scrub_context *sc, 137 int whichfork, 138 xfs_fileoff_t offset, 139 int *error, 140 __u32 errflag, 141 void *ret_ip) 142 { 143 switch (*error) { 144 case 0: 145 return true; 146 case -EDEADLOCK: 147 /* Used to restart an op with deadlock avoidance. */ 148 trace_xfs_scrub_deadlock_retry(sc->ip, sc->sm, *error); 149 break; 150 case -EFSBADCRC: 151 case -EFSCORRUPTED: 152 /* Note the badness but don't abort. */ 153 sc->sm->sm_flags |= errflag; 154 *error = 0; 155 /* fall through */ 156 default: 157 trace_xfs_scrub_file_op_error(sc, whichfork, offset, *error, 158 ret_ip); 159 break; 160 } 161 return false; 162 } 163 164 bool 165 xfs_scrub_fblock_process_error( 166 struct xfs_scrub_context *sc, 167 int whichfork, 168 xfs_fileoff_t offset, 169 int *error) 170 { 171 return __xfs_scrub_fblock_process_error(sc, whichfork, offset, error, 172 XFS_SCRUB_OFLAG_CORRUPT, __return_address); 173 } 174 175 bool 176 xfs_scrub_fblock_xref_process_error( 177 struct xfs_scrub_context *sc, 178 int whichfork, 179 xfs_fileoff_t offset, 180 int *error) 181 { 182 return __xfs_scrub_fblock_process_error(sc, whichfork, offset, error, 183 XFS_SCRUB_OFLAG_XFAIL, __return_address); 184 } 185 186 /* 187 * Handling scrub corruption/optimization/warning checks. 188 * 189 * The *_set_{corrupt,preen,warning}() family of functions are used to 190 * record the presence of metadata that is incorrect (corrupt), could be 191 * optimized somehow (preen), or should be flagged for administrative 192 * review but is not incorrect (warn). 193 * 194 * ftrace can be used to record the precise metadata location and 195 * approximate code location of the failed check. 196 */ 197 198 /* Record a block which could be optimized. */ 199 void 200 xfs_scrub_block_set_preen( 201 struct xfs_scrub_context *sc, 202 struct xfs_buf *bp) 203 { 204 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_PREEN; 205 trace_xfs_scrub_block_preen(sc, bp->b_bn, __return_address); 206 } 207 208 /* 209 * Record an inode which could be optimized. The trace data will 210 * include the block given by bp if bp is given; otherwise it will use 211 * the block location of the inode record itself. 212 */ 213 void 214 xfs_scrub_ino_set_preen( 215 struct xfs_scrub_context *sc, 216 xfs_ino_t ino, 217 struct xfs_buf *bp) 218 { 219 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_PREEN; 220 trace_xfs_scrub_ino_preen(sc, ino, bp ? bp->b_bn : 0, 221 __return_address); 222 } 223 224 /* Record a corrupt block. */ 225 void 226 xfs_scrub_block_set_corrupt( 227 struct xfs_scrub_context *sc, 228 struct xfs_buf *bp) 229 { 230 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT; 231 trace_xfs_scrub_block_error(sc, bp->b_bn, __return_address); 232 } 233 234 /* Record a corruption while cross-referencing. */ 235 void 236 xfs_scrub_block_xref_set_corrupt( 237 struct xfs_scrub_context *sc, 238 struct xfs_buf *bp) 239 { 240 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_XCORRUPT; 241 trace_xfs_scrub_block_error(sc, bp->b_bn, __return_address); 242 } 243 244 /* 245 * Record a corrupt inode. The trace data will include the block given 246 * by bp if bp is given; otherwise it will use the block location of the 247 * inode record itself. 248 */ 249 void 250 xfs_scrub_ino_set_corrupt( 251 struct xfs_scrub_context *sc, 252 xfs_ino_t ino, 253 struct xfs_buf *bp) 254 { 255 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT; 256 trace_xfs_scrub_ino_error(sc, ino, bp ? bp->b_bn : 0, __return_address); 257 } 258 259 /* Record a corruption while cross-referencing with an inode. */ 260 void 261 xfs_scrub_ino_xref_set_corrupt( 262 struct xfs_scrub_context *sc, 263 xfs_ino_t ino, 264 struct xfs_buf *bp) 265 { 266 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_XCORRUPT; 267 trace_xfs_scrub_ino_error(sc, ino, bp ? bp->b_bn : 0, __return_address); 268 } 269 270 /* Record corruption in a block indexed by a file fork. */ 271 void 272 xfs_scrub_fblock_set_corrupt( 273 struct xfs_scrub_context *sc, 274 int whichfork, 275 xfs_fileoff_t offset) 276 { 277 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT; 278 trace_xfs_scrub_fblock_error(sc, whichfork, offset, __return_address); 279 } 280 281 /* Record a corruption while cross-referencing a fork block. */ 282 void 283 xfs_scrub_fblock_xref_set_corrupt( 284 struct xfs_scrub_context *sc, 285 int whichfork, 286 xfs_fileoff_t offset) 287 { 288 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_XCORRUPT; 289 trace_xfs_scrub_fblock_error(sc, whichfork, offset, __return_address); 290 } 291 292 /* 293 * Warn about inodes that need administrative review but is not 294 * incorrect. 295 */ 296 void 297 xfs_scrub_ino_set_warning( 298 struct xfs_scrub_context *sc, 299 xfs_ino_t ino, 300 struct xfs_buf *bp) 301 { 302 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_WARNING; 303 trace_xfs_scrub_ino_warning(sc, ino, bp ? bp->b_bn : 0, 304 __return_address); 305 } 306 307 /* Warn about a block indexed by a file fork that needs review. */ 308 void 309 xfs_scrub_fblock_set_warning( 310 struct xfs_scrub_context *sc, 311 int whichfork, 312 xfs_fileoff_t offset) 313 { 314 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_WARNING; 315 trace_xfs_scrub_fblock_warning(sc, whichfork, offset, __return_address); 316 } 317 318 /* Signal an incomplete scrub. */ 319 void 320 xfs_scrub_set_incomplete( 321 struct xfs_scrub_context *sc) 322 { 323 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_INCOMPLETE; 324 trace_xfs_scrub_incomplete(sc, __return_address); 325 } 326 327 /* 328 * rmap scrubbing -- compute the number of blocks with a given owner, 329 * at least according to the reverse mapping data. 330 */ 331 332 struct xfs_scrub_rmap_ownedby_info { 333 struct xfs_owner_info *oinfo; 334 xfs_filblks_t *blocks; 335 }; 336 337 STATIC int 338 xfs_scrub_count_rmap_ownedby_irec( 339 struct xfs_btree_cur *cur, 340 struct xfs_rmap_irec *rec, 341 void *priv) 342 { 343 struct xfs_scrub_rmap_ownedby_info *sroi = priv; 344 bool irec_attr; 345 bool oinfo_attr; 346 347 irec_attr = rec->rm_flags & XFS_RMAP_ATTR_FORK; 348 oinfo_attr = sroi->oinfo->oi_flags & XFS_OWNER_INFO_ATTR_FORK; 349 350 if (rec->rm_owner != sroi->oinfo->oi_owner) 351 return 0; 352 353 if (XFS_RMAP_NON_INODE_OWNER(rec->rm_owner) || irec_attr == oinfo_attr) 354 (*sroi->blocks) += rec->rm_blockcount; 355 356 return 0; 357 } 358 359 /* 360 * Calculate the number of blocks the rmap thinks are owned by something. 361 * The caller should pass us an rmapbt cursor. 362 */ 363 int 364 xfs_scrub_count_rmap_ownedby_ag( 365 struct xfs_scrub_context *sc, 366 struct xfs_btree_cur *cur, 367 struct xfs_owner_info *oinfo, 368 xfs_filblks_t *blocks) 369 { 370 struct xfs_scrub_rmap_ownedby_info sroi; 371 372 sroi.oinfo = oinfo; 373 *blocks = 0; 374 sroi.blocks = blocks; 375 376 return xfs_rmap_query_all(cur, xfs_scrub_count_rmap_ownedby_irec, 377 &sroi); 378 } 379 380 /* 381 * AG scrubbing 382 * 383 * These helpers facilitate locking an allocation group's header 384 * buffers, setting up cursors for all btrees that are present, and 385 * cleaning everything up once we're through. 386 */ 387 388 /* Decide if we want to return an AG header read failure. */ 389 static inline bool 390 want_ag_read_header_failure( 391 struct xfs_scrub_context *sc, 392 unsigned int type) 393 { 394 /* Return all AG header read failures when scanning btrees. */ 395 if (sc->sm->sm_type != XFS_SCRUB_TYPE_AGF && 396 sc->sm->sm_type != XFS_SCRUB_TYPE_AGFL && 397 sc->sm->sm_type != XFS_SCRUB_TYPE_AGI) 398 return true; 399 /* 400 * If we're scanning a given type of AG header, we only want to 401 * see read failures from that specific header. We'd like the 402 * other headers to cross-check them, but this isn't required. 403 */ 404 if (sc->sm->sm_type == type) 405 return true; 406 return false; 407 } 408 409 /* 410 * Grab all the headers for an AG. 411 * 412 * The headers should be released by xfs_scrub_ag_free, but as a fail 413 * safe we attach all the buffers we grab to the scrub transaction so 414 * they'll all be freed when we cancel it. 415 */ 416 int 417 xfs_scrub_ag_read_headers( 418 struct xfs_scrub_context *sc, 419 xfs_agnumber_t agno, 420 struct xfs_buf **agi, 421 struct xfs_buf **agf, 422 struct xfs_buf **agfl) 423 { 424 struct xfs_mount *mp = sc->mp; 425 int error; 426 427 error = xfs_ialloc_read_agi(mp, sc->tp, agno, agi); 428 if (error && want_ag_read_header_failure(sc, XFS_SCRUB_TYPE_AGI)) 429 goto out; 430 431 error = xfs_alloc_read_agf(mp, sc->tp, agno, 0, agf); 432 if (error && want_ag_read_header_failure(sc, XFS_SCRUB_TYPE_AGF)) 433 goto out; 434 435 error = xfs_alloc_read_agfl(mp, sc->tp, agno, agfl); 436 if (error && want_ag_read_header_failure(sc, XFS_SCRUB_TYPE_AGFL)) 437 goto out; 438 error = 0; 439 out: 440 return error; 441 } 442 443 /* Release all the AG btree cursors. */ 444 void 445 xfs_scrub_ag_btcur_free( 446 struct xfs_scrub_ag *sa) 447 { 448 if (sa->refc_cur) 449 xfs_btree_del_cursor(sa->refc_cur, XFS_BTREE_ERROR); 450 if (sa->rmap_cur) 451 xfs_btree_del_cursor(sa->rmap_cur, XFS_BTREE_ERROR); 452 if (sa->fino_cur) 453 xfs_btree_del_cursor(sa->fino_cur, XFS_BTREE_ERROR); 454 if (sa->ino_cur) 455 xfs_btree_del_cursor(sa->ino_cur, XFS_BTREE_ERROR); 456 if (sa->cnt_cur) 457 xfs_btree_del_cursor(sa->cnt_cur, XFS_BTREE_ERROR); 458 if (sa->bno_cur) 459 xfs_btree_del_cursor(sa->bno_cur, XFS_BTREE_ERROR); 460 461 sa->refc_cur = NULL; 462 sa->rmap_cur = NULL; 463 sa->fino_cur = NULL; 464 sa->ino_cur = NULL; 465 sa->bno_cur = NULL; 466 sa->cnt_cur = NULL; 467 } 468 469 /* Initialize all the btree cursors for an AG. */ 470 int 471 xfs_scrub_ag_btcur_init( 472 struct xfs_scrub_context *sc, 473 struct xfs_scrub_ag *sa) 474 { 475 struct xfs_mount *mp = sc->mp; 476 xfs_agnumber_t agno = sa->agno; 477 478 if (sa->agf_bp) { 479 /* Set up a bnobt cursor for cross-referencing. */ 480 sa->bno_cur = xfs_allocbt_init_cursor(mp, sc->tp, sa->agf_bp, 481 agno, XFS_BTNUM_BNO); 482 if (!sa->bno_cur) 483 goto err; 484 485 /* Set up a cntbt cursor for cross-referencing. */ 486 sa->cnt_cur = xfs_allocbt_init_cursor(mp, sc->tp, sa->agf_bp, 487 agno, XFS_BTNUM_CNT); 488 if (!sa->cnt_cur) 489 goto err; 490 } 491 492 /* Set up a inobt cursor for cross-referencing. */ 493 if (sa->agi_bp) { 494 sa->ino_cur = xfs_inobt_init_cursor(mp, sc->tp, sa->agi_bp, 495 agno, XFS_BTNUM_INO); 496 if (!sa->ino_cur) 497 goto err; 498 } 499 500 /* Set up a finobt cursor for cross-referencing. */ 501 if (sa->agi_bp && xfs_sb_version_hasfinobt(&mp->m_sb)) { 502 sa->fino_cur = xfs_inobt_init_cursor(mp, sc->tp, sa->agi_bp, 503 agno, XFS_BTNUM_FINO); 504 if (!sa->fino_cur) 505 goto err; 506 } 507 508 /* Set up a rmapbt cursor for cross-referencing. */ 509 if (sa->agf_bp && xfs_sb_version_hasrmapbt(&mp->m_sb)) { 510 sa->rmap_cur = xfs_rmapbt_init_cursor(mp, sc->tp, sa->agf_bp, 511 agno); 512 if (!sa->rmap_cur) 513 goto err; 514 } 515 516 /* Set up a refcountbt cursor for cross-referencing. */ 517 if (sa->agf_bp && xfs_sb_version_hasreflink(&mp->m_sb)) { 518 sa->refc_cur = xfs_refcountbt_init_cursor(mp, sc->tp, 519 sa->agf_bp, agno, NULL); 520 if (!sa->refc_cur) 521 goto err; 522 } 523 524 return 0; 525 err: 526 return -ENOMEM; 527 } 528 529 /* Release the AG header context and btree cursors. */ 530 void 531 xfs_scrub_ag_free( 532 struct xfs_scrub_context *sc, 533 struct xfs_scrub_ag *sa) 534 { 535 xfs_scrub_ag_btcur_free(sa); 536 if (sa->agfl_bp) { 537 xfs_trans_brelse(sc->tp, sa->agfl_bp); 538 sa->agfl_bp = NULL; 539 } 540 if (sa->agf_bp) { 541 xfs_trans_brelse(sc->tp, sa->agf_bp); 542 sa->agf_bp = NULL; 543 } 544 if (sa->agi_bp) { 545 xfs_trans_brelse(sc->tp, sa->agi_bp); 546 sa->agi_bp = NULL; 547 } 548 sa->agno = NULLAGNUMBER; 549 } 550 551 /* 552 * For scrub, grab the AGI and the AGF headers, in that order. Locking 553 * order requires us to get the AGI before the AGF. We use the 554 * transaction to avoid deadlocking on crosslinked metadata buffers; 555 * either the caller passes one in (bmap scrub) or we have to create a 556 * transaction ourselves. 557 */ 558 int 559 xfs_scrub_ag_init( 560 struct xfs_scrub_context *sc, 561 xfs_agnumber_t agno, 562 struct xfs_scrub_ag *sa) 563 { 564 int error; 565 566 sa->agno = agno; 567 error = xfs_scrub_ag_read_headers(sc, agno, &sa->agi_bp, 568 &sa->agf_bp, &sa->agfl_bp); 569 if (error) 570 return error; 571 572 return xfs_scrub_ag_btcur_init(sc, sa); 573 } 574 575 /* Per-scrubber setup functions */ 576 577 /* Set us up with a transaction and an empty context. */ 578 int 579 xfs_scrub_setup_fs( 580 struct xfs_scrub_context *sc, 581 struct xfs_inode *ip) 582 { 583 return xfs_scrub_trans_alloc(sc->sm, sc->mp, &sc->tp); 584 } 585 586 /* Set us up with AG headers and btree cursors. */ 587 int 588 xfs_scrub_setup_ag_btree( 589 struct xfs_scrub_context *sc, 590 struct xfs_inode *ip, 591 bool force_log) 592 { 593 struct xfs_mount *mp = sc->mp; 594 int error; 595 596 /* 597 * If the caller asks us to checkpont the log, do so. This 598 * expensive operation should be performed infrequently and only 599 * as a last resort. Any caller that sets force_log should 600 * document why they need to do so. 601 */ 602 if (force_log) { 603 error = xfs_scrub_checkpoint_log(mp); 604 if (error) 605 return error; 606 } 607 608 error = xfs_scrub_setup_fs(sc, ip); 609 if (error) 610 return error; 611 612 return xfs_scrub_ag_init(sc, sc->sm->sm_agno, &sc->sa); 613 } 614 615 /* Push everything out of the log onto disk. */ 616 int 617 xfs_scrub_checkpoint_log( 618 struct xfs_mount *mp) 619 { 620 int error; 621 622 error = _xfs_log_force(mp, XFS_LOG_SYNC, NULL); 623 if (error) 624 return error; 625 xfs_ail_push_all_sync(mp->m_ail); 626 return 0; 627 } 628 629 /* 630 * Given an inode and the scrub control structure, grab either the 631 * inode referenced in the control structure or the inode passed in. 632 * The inode is not locked. 633 */ 634 int 635 xfs_scrub_get_inode( 636 struct xfs_scrub_context *sc, 637 struct xfs_inode *ip_in) 638 { 639 struct xfs_imap imap; 640 struct xfs_mount *mp = sc->mp; 641 struct xfs_inode *ip = NULL; 642 int error; 643 644 /* We want to scan the inode we already had opened. */ 645 if (sc->sm->sm_ino == 0 || sc->sm->sm_ino == ip_in->i_ino) { 646 sc->ip = ip_in; 647 return 0; 648 } 649 650 /* Look up the inode, see if the generation number matches. */ 651 if (xfs_internal_inum(mp, sc->sm->sm_ino)) 652 return -ENOENT; 653 error = xfs_iget(mp, NULL, sc->sm->sm_ino, 654 XFS_IGET_UNTRUSTED | XFS_IGET_DONTCACHE, 0, &ip); 655 switch (error) { 656 case -ENOENT: 657 /* Inode doesn't exist, just bail out. */ 658 return error; 659 case 0: 660 /* Got an inode, continue. */ 661 break; 662 case -EINVAL: 663 /* 664 * -EINVAL with IGET_UNTRUSTED could mean one of several 665 * things: userspace gave us an inode number that doesn't 666 * correspond to fs space, or doesn't have an inobt entry; 667 * or it could simply mean that the inode buffer failed the 668 * read verifiers. 669 * 670 * Try just the inode mapping lookup -- if it succeeds, then 671 * the inode buffer verifier failed and something needs fixing. 672 * Otherwise, we really couldn't find it so tell userspace 673 * that it no longer exists. 674 */ 675 error = xfs_imap(sc->mp, sc->tp, sc->sm->sm_ino, &imap, 676 XFS_IGET_UNTRUSTED | XFS_IGET_DONTCACHE); 677 if (error) 678 return -ENOENT; 679 error = -EFSCORRUPTED; 680 /* fall through */ 681 default: 682 trace_xfs_scrub_op_error(sc, 683 XFS_INO_TO_AGNO(mp, sc->sm->sm_ino), 684 XFS_INO_TO_AGBNO(mp, sc->sm->sm_ino), 685 error, __return_address); 686 return error; 687 } 688 if (VFS_I(ip)->i_generation != sc->sm->sm_gen) { 689 iput(VFS_I(ip)); 690 return -ENOENT; 691 } 692 693 sc->ip = ip; 694 return 0; 695 } 696 697 /* Set us up to scrub a file's contents. */ 698 int 699 xfs_scrub_setup_inode_contents( 700 struct xfs_scrub_context *sc, 701 struct xfs_inode *ip, 702 unsigned int resblks) 703 { 704 struct xfs_mount *mp = sc->mp; 705 int error; 706 707 error = xfs_scrub_get_inode(sc, ip); 708 if (error) 709 return error; 710 711 /* Got the inode, lock it and we're ready to go. */ 712 sc->ilock_flags = XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL; 713 xfs_ilock(sc->ip, sc->ilock_flags); 714 error = xfs_scrub_trans_alloc(sc->sm, mp, &sc->tp); 715 if (error) 716 goto out; 717 sc->ilock_flags |= XFS_ILOCK_EXCL; 718 xfs_ilock(sc->ip, XFS_ILOCK_EXCL); 719 720 out: 721 /* scrub teardown will unlock and release the inode for us */ 722 return error; 723 } 724 725 /* 726 * Predicate that decides if we need to evaluate the cross-reference check. 727 * If there was an error accessing the cross-reference btree, just delete 728 * the cursor and skip the check. 729 */ 730 bool 731 xfs_scrub_should_check_xref( 732 struct xfs_scrub_context *sc, 733 int *error, 734 struct xfs_btree_cur **curpp) 735 { 736 if (*error == 0) 737 return true; 738 739 if (curpp) { 740 /* If we've already given up on xref, just bail out. */ 741 if (!*curpp) 742 return false; 743 744 /* xref error, delete cursor and bail out. */ 745 xfs_btree_del_cursor(*curpp, XFS_BTREE_ERROR); 746 *curpp = NULL; 747 } 748 749 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_XFAIL; 750 trace_xfs_scrub_xref_error(sc, *error, __return_address); 751 752 /* 753 * Errors encountered during cross-referencing with another 754 * data structure should not cause this scrubber to abort. 755 */ 756 *error = 0; 757 return false; 758 } 759 760 /* Run the structure verifiers on in-memory buffers to detect bad memory. */ 761 void 762 xfs_scrub_buffer_recheck( 763 struct xfs_scrub_context *sc, 764 struct xfs_buf *bp) 765 { 766 xfs_failaddr_t fa; 767 768 if (bp->b_ops == NULL) { 769 xfs_scrub_block_set_corrupt(sc, bp); 770 return; 771 } 772 if (bp->b_ops->verify_struct == NULL) { 773 xfs_scrub_set_incomplete(sc); 774 return; 775 } 776 fa = bp->b_ops->verify_struct(bp); 777 if (!fa) 778 return; 779 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT; 780 trace_xfs_scrub_block_error(sc, bp->b_bn, fa); 781 } 782