1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Copyright (C) 2019 Oracle. All Rights Reserved. 4 * Author: Darrick J. Wong <darrick.wong@oracle.com> 5 */ 6 #include "xfs_platform.h" 7 #include "xfs_fs.h" 8 #include "xfs_shared.h" 9 #include "xfs_format.h" 10 #include "xfs_log_format.h" 11 #include "xfs_trans_resv.h" 12 #include "xfs_mount.h" 13 #include "xfs_inode.h" 14 #include "xfs_trace.h" 15 #include "xfs_health.h" 16 #include "xfs_ag.h" 17 #include "xfs_btree.h" 18 #include "xfs_da_format.h" 19 #include "xfs_da_btree.h" 20 #include "xfs_quota_defs.h" 21 #include "xfs_rtgroup.h" 22 #include "xfs_healthmon.h" 23 24 #include <linux/fserror.h> 25 26 static void 27 xfs_health_unmount_group( 28 struct xfs_group *xg, 29 bool *warn) 30 { 31 unsigned int sick = 0; 32 unsigned int checked = 0; 33 34 xfs_group_measure_sickness(xg, &sick, &checked); 35 if (sick) { 36 trace_xfs_group_unfixed_corruption(xg, sick); 37 *warn = true; 38 } 39 } 40 41 /* 42 * Warn about metadata corruption that we detected but haven't fixed, and 43 * make sure we're not sitting on anything that would get in the way of 44 * recovery. 45 */ 46 void 47 xfs_health_unmount( 48 struct xfs_mount *mp) 49 { 50 struct xfs_perag *pag = NULL; 51 struct xfs_rtgroup *rtg = NULL; 52 unsigned int sick = 0; 53 unsigned int checked = 0; 54 bool warn = false; 55 56 if (xfs_is_shutdown(mp)) 57 return; 58 59 /* Measure AG corruption levels. */ 60 while ((pag = xfs_perag_next(mp, pag))) 61 xfs_health_unmount_group(pag_group(pag), &warn); 62 63 /* Measure realtime group corruption levels. */ 64 while ((rtg = xfs_rtgroup_next(mp, rtg))) 65 xfs_health_unmount_group(rtg_group(rtg), &warn); 66 67 /* 68 * Measure fs corruption and keep the sample around for the warning. 69 * See the note below for why we exempt FS_COUNTERS. 70 */ 71 xfs_fs_measure_sickness(mp, &sick, &checked); 72 if (sick & ~XFS_SICK_FS_COUNTERS) { 73 trace_xfs_fs_unfixed_corruption(mp, sick); 74 warn = true; 75 } 76 77 if (warn) { 78 xfs_warn(mp, 79 "Uncorrected metadata errors detected; please run xfs_repair."); 80 81 /* 82 * We discovered uncorrected metadata problems at some point 83 * during this filesystem mount and have advised the 84 * administrator to run repair once the unmount completes. 85 * 86 * However, we must be careful -- when FSCOUNTERS are flagged 87 * unhealthy, the unmount procedure omits writing the clean 88 * unmount record to the log so that the next mount will run 89 * recovery and recompute the summary counters. In other 90 * words, we leave a dirty log to get the counters fixed. 91 * 92 * Unfortunately, xfs_repair cannot recover dirty logs, so if 93 * there were filesystem problems, FSCOUNTERS was flagged, and 94 * the administrator takes our advice to run xfs_repair, 95 * they'll have to zap the log before repairing structures. 96 * We don't really want to encourage this, so we mark the 97 * FSCOUNTERS healthy so that a subsequent repair run won't see 98 * a dirty log. 99 */ 100 if (sick & XFS_SICK_FS_COUNTERS) 101 xfs_fs_mark_healthy(mp, XFS_SICK_FS_COUNTERS); 102 } 103 } 104 105 /* Mark unhealthy per-fs metadata. */ 106 void 107 xfs_fs_mark_sick( 108 struct xfs_mount *mp, 109 unsigned int mask) 110 { 111 unsigned int old_mask; 112 113 ASSERT(!(mask & ~XFS_SICK_FS_ALL)); 114 trace_xfs_fs_mark_sick(mp, mask); 115 116 spin_lock(&mp->m_sb_lock); 117 old_mask = mp->m_fs_sick; 118 mp->m_fs_sick |= mask; 119 spin_unlock(&mp->m_sb_lock); 120 121 fserror_report_metadata(mp->m_super, -EFSCORRUPTED, GFP_NOFS); 122 if (mask) 123 xfs_healthmon_report_fs(mp, XFS_HEALTHMON_SICK, old_mask, mask); 124 } 125 126 /* Mark per-fs metadata as having been checked and found unhealthy by fsck. */ 127 void 128 xfs_fs_mark_corrupt( 129 struct xfs_mount *mp, 130 unsigned int mask) 131 { 132 unsigned int old_mask; 133 134 ASSERT(!(mask & ~XFS_SICK_FS_ALL)); 135 trace_xfs_fs_mark_corrupt(mp, mask); 136 137 spin_lock(&mp->m_sb_lock); 138 old_mask = mp->m_fs_sick; 139 mp->m_fs_sick |= mask; 140 mp->m_fs_checked |= mask; 141 spin_unlock(&mp->m_sb_lock); 142 143 fserror_report_metadata(mp->m_super, -EFSCORRUPTED, GFP_NOFS); 144 if (mask) 145 xfs_healthmon_report_fs(mp, XFS_HEALTHMON_CORRUPT, old_mask, 146 mask); 147 } 148 149 /* Mark a per-fs metadata healed. */ 150 void 151 xfs_fs_mark_healthy( 152 struct xfs_mount *mp, 153 unsigned int mask) 154 { 155 unsigned int old_mask; 156 157 ASSERT(!(mask & ~XFS_SICK_FS_ALL)); 158 trace_xfs_fs_mark_healthy(mp, mask); 159 160 spin_lock(&mp->m_sb_lock); 161 old_mask = mp->m_fs_sick; 162 mp->m_fs_sick &= ~mask; 163 if (!(mp->m_fs_sick & XFS_SICK_FS_PRIMARY)) 164 mp->m_fs_sick &= ~XFS_SICK_FS_SECONDARY; 165 mp->m_fs_checked |= mask; 166 spin_unlock(&mp->m_sb_lock); 167 168 if (mask) 169 xfs_healthmon_report_fs(mp, XFS_HEALTHMON_HEALTHY, old_mask, 170 mask); 171 } 172 173 /* Sample which per-fs metadata are unhealthy. */ 174 void 175 xfs_fs_measure_sickness( 176 struct xfs_mount *mp, 177 unsigned int *sick, 178 unsigned int *checked) 179 { 180 spin_lock(&mp->m_sb_lock); 181 *sick = mp->m_fs_sick; 182 *checked = mp->m_fs_checked; 183 spin_unlock(&mp->m_sb_lock); 184 } 185 186 /* Mark unhealthy per-ag metadata given a raw AG number. */ 187 void 188 xfs_agno_mark_sick( 189 struct xfs_mount *mp, 190 xfs_agnumber_t agno, 191 unsigned int mask) 192 { 193 struct xfs_perag *pag = xfs_perag_get(mp, agno); 194 195 /* per-ag structure not set up yet? */ 196 if (!pag) 197 return; 198 199 xfs_ag_mark_sick(pag, mask); 200 xfs_perag_put(pag); 201 } 202 203 static inline void 204 xfs_group_check_mask( 205 struct xfs_group *xg, 206 unsigned int mask) 207 { 208 if (xg->xg_type == XG_TYPE_AG) 209 ASSERT(!(mask & ~XFS_SICK_AG_ALL)); 210 else 211 ASSERT(!(mask & ~XFS_SICK_RG_ALL)); 212 } 213 214 /* Mark unhealthy per-ag metadata. */ 215 void 216 xfs_group_mark_sick( 217 struct xfs_group *xg, 218 unsigned int mask) 219 { 220 unsigned int old_mask; 221 222 xfs_group_check_mask(xg, mask); 223 trace_xfs_group_mark_sick(xg, mask); 224 225 spin_lock(&xg->xg_state_lock); 226 old_mask = xg->xg_sick; 227 xg->xg_sick |= mask; 228 spin_unlock(&xg->xg_state_lock); 229 230 fserror_report_metadata(xg->xg_mount->m_super, -EFSCORRUPTED, GFP_NOFS); 231 if (mask) 232 xfs_healthmon_report_group(xg, XFS_HEALTHMON_SICK, old_mask, 233 mask); 234 } 235 236 /* 237 * Mark per-group metadata as having been checked and found unhealthy by fsck. 238 */ 239 void 240 xfs_group_mark_corrupt( 241 struct xfs_group *xg, 242 unsigned int mask) 243 { 244 unsigned int old_mask; 245 246 xfs_group_check_mask(xg, mask); 247 trace_xfs_group_mark_corrupt(xg, mask); 248 249 spin_lock(&xg->xg_state_lock); 250 old_mask = xg->xg_sick; 251 xg->xg_sick |= mask; 252 xg->xg_checked |= mask; 253 spin_unlock(&xg->xg_state_lock); 254 255 fserror_report_metadata(xg->xg_mount->m_super, -EFSCORRUPTED, GFP_NOFS); 256 if (mask) 257 xfs_healthmon_report_group(xg, XFS_HEALTHMON_CORRUPT, old_mask, 258 mask); 259 } 260 261 /* 262 * Mark per-group metadata ok. 263 */ 264 void 265 xfs_group_mark_healthy( 266 struct xfs_group *xg, 267 unsigned int mask) 268 { 269 unsigned int old_mask; 270 271 xfs_group_check_mask(xg, mask); 272 trace_xfs_group_mark_healthy(xg, mask); 273 274 spin_lock(&xg->xg_state_lock); 275 old_mask = xg->xg_sick; 276 xg->xg_sick &= ~mask; 277 if (!(xg->xg_sick & XFS_SICK_AG_PRIMARY)) 278 xg->xg_sick &= ~XFS_SICK_AG_SECONDARY; 279 xg->xg_checked |= mask; 280 spin_unlock(&xg->xg_state_lock); 281 282 if (mask) 283 xfs_healthmon_report_group(xg, XFS_HEALTHMON_HEALTHY, old_mask, 284 mask); 285 } 286 287 /* Sample which per-ag metadata are unhealthy. */ 288 void 289 xfs_group_measure_sickness( 290 struct xfs_group *xg, 291 unsigned int *sick, 292 unsigned int *checked) 293 { 294 spin_lock(&xg->xg_state_lock); 295 *sick = xg->xg_sick; 296 *checked = xg->xg_checked; 297 spin_unlock(&xg->xg_state_lock); 298 } 299 300 /* Mark unhealthy per-rtgroup metadata given a raw rt group number. */ 301 void 302 xfs_rgno_mark_sick( 303 struct xfs_mount *mp, 304 xfs_rgnumber_t rgno, 305 unsigned int mask) 306 { 307 struct xfs_rtgroup *rtg = xfs_rtgroup_get(mp, rgno); 308 309 /* per-rtgroup structure not set up yet? */ 310 if (!rtg) 311 return; 312 313 xfs_group_mark_sick(rtg_group(rtg), mask); 314 xfs_rtgroup_put(rtg); 315 } 316 317 /* Mark the unhealthy parts of an inode. */ 318 void 319 xfs_inode_mark_sick( 320 struct xfs_inode *ip, 321 unsigned int mask) 322 { 323 unsigned int old_mask; 324 325 ASSERT(!(mask & ~XFS_SICK_INO_ALL)); 326 trace_xfs_inode_mark_sick(ip, mask); 327 328 spin_lock(&ip->i_flags_lock); 329 old_mask = ip->i_sick; 330 ip->i_sick |= mask; 331 spin_unlock(&ip->i_flags_lock); 332 333 /* 334 * Keep this inode around so we don't lose the sickness report. Scrub 335 * grabs inodes with DONTCACHE assuming that most inode are ok, which 336 * is not the case here. 337 */ 338 spin_lock(&VFS_I(ip)->i_lock); 339 inode_state_clear(VFS_I(ip), I_DONTCACHE); 340 spin_unlock(&VFS_I(ip)->i_lock); 341 342 fserror_report_file_metadata(VFS_I(ip), -EFSCORRUPTED, GFP_NOFS); 343 if (mask) 344 xfs_healthmon_report_inode(ip, XFS_HEALTHMON_SICK, old_mask, 345 mask); 346 } 347 348 /* Mark inode metadata as having been checked and found unhealthy by fsck. */ 349 void 350 xfs_inode_mark_corrupt( 351 struct xfs_inode *ip, 352 unsigned int mask) 353 { 354 unsigned int old_mask; 355 356 ASSERT(!(mask & ~XFS_SICK_INO_ALL)); 357 trace_xfs_inode_mark_corrupt(ip, mask); 358 359 spin_lock(&ip->i_flags_lock); 360 old_mask = ip->i_sick; 361 ip->i_sick |= mask; 362 ip->i_checked |= mask; 363 spin_unlock(&ip->i_flags_lock); 364 365 /* 366 * Keep this inode around so we don't lose the sickness report. Scrub 367 * grabs inodes with DONTCACHE assuming that most inode are ok, which 368 * is not the case here. 369 */ 370 spin_lock(&VFS_I(ip)->i_lock); 371 inode_state_clear(VFS_I(ip), I_DONTCACHE); 372 spin_unlock(&VFS_I(ip)->i_lock); 373 374 fserror_report_file_metadata(VFS_I(ip), -EFSCORRUPTED, GFP_NOFS); 375 if (mask) 376 xfs_healthmon_report_inode(ip, XFS_HEALTHMON_CORRUPT, old_mask, 377 mask); 378 } 379 380 /* Mark parts of an inode healed. */ 381 void 382 xfs_inode_mark_healthy( 383 struct xfs_inode *ip, 384 unsigned int mask) 385 { 386 unsigned int old_mask; 387 388 ASSERT(!(mask & ~XFS_SICK_INO_ALL)); 389 trace_xfs_inode_mark_healthy(ip, mask); 390 391 spin_lock(&ip->i_flags_lock); 392 old_mask = ip->i_sick; 393 ip->i_sick &= ~mask; 394 if (!(ip->i_sick & XFS_SICK_INO_PRIMARY)) 395 ip->i_sick &= ~XFS_SICK_INO_SECONDARY; 396 ip->i_checked |= mask; 397 spin_unlock(&ip->i_flags_lock); 398 399 if (mask) 400 xfs_healthmon_report_inode(ip, XFS_HEALTHMON_HEALTHY, old_mask, 401 mask); 402 } 403 404 /* Sample which parts of an inode are unhealthy. */ 405 void 406 xfs_inode_measure_sickness( 407 struct xfs_inode *ip, 408 unsigned int *sick, 409 unsigned int *checked) 410 { 411 spin_lock(&ip->i_flags_lock); 412 *sick = ip->i_sick; 413 *checked = ip->i_checked; 414 spin_unlock(&ip->i_flags_lock); 415 } 416 417 /* Mappings between internal sick masks and ioctl sick masks. */ 418 419 struct ioctl_sick_map { 420 unsigned int sick_mask; 421 unsigned int ioctl_mask; 422 }; 423 424 #define for_each_sick_map(map, m) \ 425 for ((m) = (map); (m) < (map) + ARRAY_SIZE(map); (m)++) 426 427 static const struct ioctl_sick_map fs_map[] = { 428 { XFS_SICK_FS_COUNTERS, XFS_FSOP_GEOM_SICK_COUNTERS}, 429 { XFS_SICK_FS_UQUOTA, XFS_FSOP_GEOM_SICK_UQUOTA }, 430 { XFS_SICK_FS_GQUOTA, XFS_FSOP_GEOM_SICK_GQUOTA }, 431 { XFS_SICK_FS_PQUOTA, XFS_FSOP_GEOM_SICK_PQUOTA }, 432 { XFS_SICK_FS_QUOTACHECK, XFS_FSOP_GEOM_SICK_QUOTACHECK }, 433 { XFS_SICK_FS_NLINKS, XFS_FSOP_GEOM_SICK_NLINKS }, 434 { XFS_SICK_FS_METADIR, XFS_FSOP_GEOM_SICK_METADIR }, 435 { XFS_SICK_FS_METAPATH, XFS_FSOP_GEOM_SICK_METAPATH }, 436 }; 437 438 static const struct ioctl_sick_map rt_map[] = { 439 { XFS_SICK_RG_BITMAP, XFS_FSOP_GEOM_SICK_RT_BITMAP }, 440 { XFS_SICK_RG_SUMMARY, XFS_FSOP_GEOM_SICK_RT_SUMMARY }, 441 }; 442 443 static inline void 444 xfgeo_health_tick( 445 struct xfs_fsop_geom *geo, 446 unsigned int sick, 447 unsigned int checked, 448 const struct ioctl_sick_map *m) 449 { 450 if (checked & m->sick_mask) 451 geo->checked |= m->ioctl_mask; 452 if (sick & m->sick_mask) 453 geo->sick |= m->ioctl_mask; 454 } 455 456 /* Fill out fs geometry health info. */ 457 void 458 xfs_fsop_geom_health( 459 struct xfs_mount *mp, 460 struct xfs_fsop_geom *geo) 461 { 462 struct xfs_rtgroup *rtg = NULL; 463 const struct ioctl_sick_map *m; 464 unsigned int sick; 465 unsigned int checked; 466 467 geo->sick = 0; 468 geo->checked = 0; 469 470 xfs_fs_measure_sickness(mp, &sick, &checked); 471 for_each_sick_map(fs_map, m) 472 xfgeo_health_tick(geo, sick, checked, m); 473 474 while ((rtg = xfs_rtgroup_next(mp, rtg))) { 475 xfs_group_measure_sickness(rtg_group(rtg), &sick, &checked); 476 for_each_sick_map(rt_map, m) 477 xfgeo_health_tick(geo, sick, checked, m); 478 } 479 } 480 481 /* 482 * Translate XFS_SICK_FS_* into XFS_FSOP_GEOM_SICK_* except for the rt free 483 * space codes, which are sent via the rtgroup events. 484 */ 485 unsigned int 486 xfs_healthmon_fs_mask( 487 unsigned int sick_mask) 488 { 489 const struct ioctl_sick_map *m; 490 unsigned int ioctl_mask = 0; 491 492 for_each_sick_map(fs_map, m) { 493 if (sick_mask & m->sick_mask) 494 ioctl_mask |= m->ioctl_mask; 495 } 496 497 return ioctl_mask; 498 } 499 500 static const struct ioctl_sick_map ag_map[] = { 501 { XFS_SICK_AG_SB, XFS_AG_GEOM_SICK_SB }, 502 { XFS_SICK_AG_AGF, XFS_AG_GEOM_SICK_AGF }, 503 { XFS_SICK_AG_AGFL, XFS_AG_GEOM_SICK_AGFL }, 504 { XFS_SICK_AG_AGI, XFS_AG_GEOM_SICK_AGI }, 505 { XFS_SICK_AG_BNOBT, XFS_AG_GEOM_SICK_BNOBT }, 506 { XFS_SICK_AG_CNTBT, XFS_AG_GEOM_SICK_CNTBT }, 507 { XFS_SICK_AG_INOBT, XFS_AG_GEOM_SICK_INOBT }, 508 { XFS_SICK_AG_FINOBT, XFS_AG_GEOM_SICK_FINOBT }, 509 { XFS_SICK_AG_RMAPBT, XFS_AG_GEOM_SICK_RMAPBT }, 510 { XFS_SICK_AG_REFCNTBT, XFS_AG_GEOM_SICK_REFCNTBT }, 511 { XFS_SICK_AG_INODES, XFS_AG_GEOM_SICK_INODES }, 512 }; 513 514 /* Fill out ag geometry health info. */ 515 void 516 xfs_ag_geom_health( 517 struct xfs_perag *pag, 518 struct xfs_ag_geometry *ageo) 519 { 520 const struct ioctl_sick_map *m; 521 unsigned int sick; 522 unsigned int checked; 523 524 ageo->ag_sick = 0; 525 ageo->ag_checked = 0; 526 527 xfs_group_measure_sickness(pag_group(pag), &sick, &checked); 528 for_each_sick_map(ag_map, m) { 529 if (checked & m->sick_mask) 530 ageo->ag_checked |= m->ioctl_mask; 531 if (sick & m->sick_mask) 532 ageo->ag_sick |= m->ioctl_mask; 533 } 534 } 535 536 /* Translate XFS_SICK_AG_* into XFS_AG_GEOM_SICK_*. */ 537 unsigned int 538 xfs_healthmon_perag_mask( 539 unsigned int sick_mask) 540 { 541 const struct ioctl_sick_map *m; 542 unsigned int ioctl_mask = 0; 543 544 for_each_sick_map(ag_map, m) { 545 if (sick_mask & m->sick_mask) 546 ioctl_mask |= m->ioctl_mask; 547 } 548 549 return ioctl_mask; 550 } 551 552 static const struct ioctl_sick_map rtgroup_map[] = { 553 { XFS_SICK_RG_SUPER, XFS_RTGROUP_GEOM_SICK_SUPER }, 554 { XFS_SICK_RG_BITMAP, XFS_RTGROUP_GEOM_SICK_BITMAP }, 555 { XFS_SICK_RG_SUMMARY, XFS_RTGROUP_GEOM_SICK_SUMMARY }, 556 { XFS_SICK_RG_RMAPBT, XFS_RTGROUP_GEOM_SICK_RMAPBT }, 557 { XFS_SICK_RG_REFCNTBT, XFS_RTGROUP_GEOM_SICK_REFCNTBT }, 558 }; 559 560 /* Fill out rtgroup geometry health info. */ 561 void 562 xfs_rtgroup_geom_health( 563 struct xfs_rtgroup *rtg, 564 struct xfs_rtgroup_geometry *rgeo) 565 { 566 const struct ioctl_sick_map *m; 567 unsigned int sick; 568 unsigned int checked; 569 570 rgeo->rg_sick = 0; 571 rgeo->rg_checked = 0; 572 573 xfs_group_measure_sickness(rtg_group(rtg), &sick, &checked); 574 for_each_sick_map(rtgroup_map, m) { 575 if (checked & m->sick_mask) 576 rgeo->rg_checked |= m->ioctl_mask; 577 if (sick & m->sick_mask) 578 rgeo->rg_sick |= m->ioctl_mask; 579 } 580 } 581 582 /* Translate XFS_SICK_RG_* into XFS_RTGROUP_GEOM_SICK_*. */ 583 unsigned int 584 xfs_healthmon_rtgroup_mask( 585 unsigned int sick_mask) 586 { 587 const struct ioctl_sick_map *m; 588 unsigned int ioctl_mask = 0; 589 590 for_each_sick_map(rtgroup_map, m) { 591 if (sick_mask & m->sick_mask) 592 ioctl_mask |= m->ioctl_mask; 593 } 594 595 return ioctl_mask; 596 } 597 598 static const struct ioctl_sick_map ino_map[] = { 599 { XFS_SICK_INO_CORE, XFS_BS_SICK_INODE }, 600 { XFS_SICK_INO_BMBTD, XFS_BS_SICK_BMBTD }, 601 { XFS_SICK_INO_BMBTA, XFS_BS_SICK_BMBTA }, 602 { XFS_SICK_INO_BMBTC, XFS_BS_SICK_BMBTC }, 603 { XFS_SICK_INO_DIR, XFS_BS_SICK_DIR }, 604 { XFS_SICK_INO_XATTR, XFS_BS_SICK_XATTR }, 605 { XFS_SICK_INO_SYMLINK, XFS_BS_SICK_SYMLINK }, 606 { XFS_SICK_INO_PARENT, XFS_BS_SICK_PARENT }, 607 { XFS_SICK_INO_BMBTD_ZAPPED, XFS_BS_SICK_BMBTD }, 608 { XFS_SICK_INO_BMBTA_ZAPPED, XFS_BS_SICK_BMBTA }, 609 { XFS_SICK_INO_DIR_ZAPPED, XFS_BS_SICK_DIR }, 610 { XFS_SICK_INO_SYMLINK_ZAPPED, XFS_BS_SICK_SYMLINK }, 611 { XFS_SICK_INO_DIRTREE, XFS_BS_SICK_DIRTREE }, 612 }; 613 614 /* Fill out bulkstat health info. */ 615 void 616 xfs_bulkstat_health( 617 struct xfs_inode *ip, 618 struct xfs_bulkstat *bs) 619 { 620 const struct ioctl_sick_map *m; 621 unsigned int sick; 622 unsigned int checked; 623 624 bs->bs_sick = 0; 625 bs->bs_checked = 0; 626 627 xfs_inode_measure_sickness(ip, &sick, &checked); 628 for_each_sick_map(ino_map, m) { 629 if (checked & m->sick_mask) 630 bs->bs_checked |= m->ioctl_mask; 631 if (sick & m->sick_mask) 632 bs->bs_sick |= m->ioctl_mask; 633 } 634 } 635 636 /* Translate XFS_SICK_INO_* into XFS_BS_SICK_*. */ 637 unsigned int 638 xfs_healthmon_inode_mask( 639 unsigned int sick_mask) 640 { 641 const struct ioctl_sick_map *m; 642 unsigned int ioctl_mask = 0; 643 644 for_each_sick_map(ino_map, m) { 645 if (sick_mask & m->sick_mask) 646 ioctl_mask |= m->ioctl_mask; 647 } 648 649 return ioctl_mask; 650 } 651 652 /* Mark a block mapping sick. */ 653 void 654 xfs_bmap_mark_sick( 655 struct xfs_inode *ip, 656 int whichfork) 657 { 658 unsigned int mask; 659 660 switch (whichfork) { 661 case XFS_DATA_FORK: 662 mask = XFS_SICK_INO_BMBTD; 663 break; 664 case XFS_ATTR_FORK: 665 mask = XFS_SICK_INO_BMBTA; 666 break; 667 case XFS_COW_FORK: 668 mask = XFS_SICK_INO_BMBTC; 669 break; 670 default: 671 ASSERT(0); 672 return; 673 } 674 675 xfs_inode_mark_sick(ip, mask); 676 } 677 678 /* Record observations of btree corruption with the health tracking system. */ 679 void 680 xfs_btree_mark_sick( 681 struct xfs_btree_cur *cur) 682 { 683 if (xfs_btree_is_bmap(cur->bc_ops)) { 684 xfs_bmap_mark_sick(cur->bc_ino.ip, cur->bc_ino.whichfork); 685 /* no health state tracking for ephemeral btrees */ 686 } else if (cur->bc_ops->type != XFS_BTREE_TYPE_MEM) { 687 ASSERT(cur->bc_group); 688 ASSERT(cur->bc_ops->sick_mask); 689 xfs_group_mark_sick(cur->bc_group, cur->bc_ops->sick_mask); 690 } 691 } 692 693 /* 694 * Record observations of dir/attr btree corruption with the health tracking 695 * system. 696 */ 697 void 698 xfs_dirattr_mark_sick( 699 struct xfs_inode *ip, 700 int whichfork) 701 { 702 unsigned int mask; 703 704 switch (whichfork) { 705 case XFS_DATA_FORK: 706 mask = XFS_SICK_INO_DIR; 707 break; 708 case XFS_ATTR_FORK: 709 mask = XFS_SICK_INO_XATTR; 710 break; 711 default: 712 ASSERT(0); 713 return; 714 } 715 716 xfs_inode_mark_sick(ip, mask); 717 } 718 719 /* 720 * Record observations of dir/attr btree corruption with the health tracking 721 * system. 722 */ 723 void 724 xfs_da_mark_sick( 725 struct xfs_da_args *args) 726 { 727 xfs_dirattr_mark_sick(args->dp, args->whichfork); 728 } 729