xref: /linux/fs/xfs/xfs_health.c (revision 8457669db968c98edb781892d73fa559e1efcbd4)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright (C) 2019 Oracle.  All Rights Reserved.
4  * Author: Darrick J. Wong <darrick.wong@oracle.com>
5  */
6 #include "xfs_platform.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_mount.h"
13 #include "xfs_inode.h"
14 #include "xfs_trace.h"
15 #include "xfs_health.h"
16 #include "xfs_ag.h"
17 #include "xfs_btree.h"
18 #include "xfs_da_format.h"
19 #include "xfs_da_btree.h"
20 #include "xfs_quota_defs.h"
21 #include "xfs_rtgroup.h"
22 #include "xfs_healthmon.h"
23 
24 #include <linux/fserror.h>
25 
26 static void
xfs_health_unmount_group(struct xfs_group * xg,bool * warn)27 xfs_health_unmount_group(
28 	struct xfs_group	*xg,
29 	bool			*warn)
30 {
31 	unsigned int		sick = 0;
32 	unsigned int		checked = 0;
33 
34 	xfs_group_measure_sickness(xg, &sick, &checked);
35 	if (sick) {
36 		trace_xfs_group_unfixed_corruption(xg, sick);
37 		*warn = true;
38 	}
39 }
40 
41 /*
42  * Warn about metadata corruption that we detected but haven't fixed, and
43  * make sure we're not sitting on anything that would get in the way of
44  * recovery.
45  */
46 void
xfs_health_unmount(struct xfs_mount * mp)47 xfs_health_unmount(
48 	struct xfs_mount	*mp)
49 {
50 	struct xfs_perag	*pag = NULL;
51 	struct xfs_rtgroup	*rtg = NULL;
52 	unsigned int		sick = 0;
53 	unsigned int		checked = 0;
54 	bool			warn = false;
55 
56 	if (xfs_is_shutdown(mp))
57 		return;
58 
59 	/* Measure AG corruption levels. */
60 	while ((pag = xfs_perag_next(mp, pag)))
61 		xfs_health_unmount_group(pag_group(pag), &warn);
62 
63 	/* Measure realtime group corruption levels. */
64 	while ((rtg = xfs_rtgroup_next(mp, rtg)))
65 		xfs_health_unmount_group(rtg_group(rtg), &warn);
66 
67 	/*
68 	 * Measure fs corruption and keep the sample around for the warning.
69 	 * See the note below for why we exempt FS_COUNTERS.
70 	 */
71 	xfs_fs_measure_sickness(mp, &sick, &checked);
72 	if (sick & ~XFS_SICK_FS_COUNTERS) {
73 		trace_xfs_fs_unfixed_corruption(mp, sick);
74 		warn = true;
75 	}
76 
77 	if (warn) {
78 		xfs_warn(mp,
79 "Uncorrected metadata errors detected; please run xfs_repair.");
80 
81 		/*
82 		 * We discovered uncorrected metadata problems at some point
83 		 * during this filesystem mount and have advised the
84 		 * administrator to run repair once the unmount completes.
85 		 *
86 		 * However, we must be careful -- when FSCOUNTERS are flagged
87 		 * unhealthy, the unmount procedure omits writing the clean
88 		 * unmount record to the log so that the next mount will run
89 		 * recovery and recompute the summary counters.  In other
90 		 * words, we leave a dirty log to get the counters fixed.
91 		 *
92 		 * Unfortunately, xfs_repair cannot recover dirty logs, so if
93 		 * there were filesystem problems, FSCOUNTERS was flagged, and
94 		 * the administrator takes our advice to run xfs_repair,
95 		 * they'll have to zap the log before repairing structures.
96 		 * We don't really want to encourage this, so we mark the
97 		 * FSCOUNTERS healthy so that a subsequent repair run won't see
98 		 * a dirty log.
99 		 */
100 		if (sick & XFS_SICK_FS_COUNTERS)
101 			xfs_fs_mark_healthy(mp, XFS_SICK_FS_COUNTERS);
102 	}
103 }
104 
105 /* Mark unhealthy per-fs metadata. */
106 void
xfs_fs_mark_sick(struct xfs_mount * mp,unsigned int mask)107 xfs_fs_mark_sick(
108 	struct xfs_mount	*mp,
109 	unsigned int		mask)
110 {
111 	unsigned int		old_mask;
112 
113 	ASSERT(!(mask & ~XFS_SICK_FS_ALL));
114 	trace_xfs_fs_mark_sick(mp, mask);
115 
116 	spin_lock(&mp->m_sb_lock);
117 	old_mask = mp->m_fs_sick;
118 	mp->m_fs_sick |= mask;
119 	spin_unlock(&mp->m_sb_lock);
120 
121 	fserror_report_metadata(mp->m_super, -EFSCORRUPTED, GFP_NOFS);
122 	if (mask)
123 		xfs_healthmon_report_fs(mp, XFS_HEALTHMON_SICK, old_mask, mask);
124 }
125 
126 /* Mark per-fs metadata as having been checked and found unhealthy by fsck. */
127 void
xfs_fs_mark_corrupt(struct xfs_mount * mp,unsigned int mask)128 xfs_fs_mark_corrupt(
129 	struct xfs_mount	*mp,
130 	unsigned int		mask)
131 {
132 	unsigned int		old_mask;
133 
134 	ASSERT(!(mask & ~XFS_SICK_FS_ALL));
135 	trace_xfs_fs_mark_corrupt(mp, mask);
136 
137 	spin_lock(&mp->m_sb_lock);
138 	old_mask = mp->m_fs_sick;
139 	mp->m_fs_sick |= mask;
140 	mp->m_fs_checked |= mask;
141 	spin_unlock(&mp->m_sb_lock);
142 
143 	fserror_report_metadata(mp->m_super, -EFSCORRUPTED, GFP_NOFS);
144 	if (mask)
145 		xfs_healthmon_report_fs(mp, XFS_HEALTHMON_CORRUPT, old_mask,
146 				mask);
147 }
148 
149 /* Mark a per-fs metadata healed. */
150 void
xfs_fs_mark_healthy(struct xfs_mount * mp,unsigned int mask)151 xfs_fs_mark_healthy(
152 	struct xfs_mount	*mp,
153 	unsigned int		mask)
154 {
155 	unsigned int		old_mask;
156 
157 	ASSERT(!(mask & ~XFS_SICK_FS_ALL));
158 	trace_xfs_fs_mark_healthy(mp, mask);
159 
160 	spin_lock(&mp->m_sb_lock);
161 	old_mask = mp->m_fs_sick;
162 	mp->m_fs_sick &= ~mask;
163 	if (!(mp->m_fs_sick & XFS_SICK_FS_PRIMARY))
164 		mp->m_fs_sick &= ~XFS_SICK_FS_SECONDARY;
165 	mp->m_fs_checked |= mask;
166 	spin_unlock(&mp->m_sb_lock);
167 
168 	if (mask)
169 		xfs_healthmon_report_fs(mp, XFS_HEALTHMON_HEALTHY, old_mask,
170 				mask);
171 }
172 
173 /* Sample which per-fs metadata are unhealthy. */
174 void
xfs_fs_measure_sickness(struct xfs_mount * mp,unsigned int * sick,unsigned int * checked)175 xfs_fs_measure_sickness(
176 	struct xfs_mount	*mp,
177 	unsigned int		*sick,
178 	unsigned int		*checked)
179 {
180 	spin_lock(&mp->m_sb_lock);
181 	*sick = mp->m_fs_sick;
182 	*checked = mp->m_fs_checked;
183 	spin_unlock(&mp->m_sb_lock);
184 }
185 
186 /* Mark unhealthy per-ag metadata given a raw AG number. */
187 void
xfs_agno_mark_sick(struct xfs_mount * mp,xfs_agnumber_t agno,unsigned int mask)188 xfs_agno_mark_sick(
189 	struct xfs_mount	*mp,
190 	xfs_agnumber_t		agno,
191 	unsigned int		mask)
192 {
193 	struct xfs_perag	*pag = xfs_perag_get(mp, agno);
194 
195 	/* per-ag structure not set up yet? */
196 	if (!pag)
197 		return;
198 
199 	xfs_ag_mark_sick(pag, mask);
200 	xfs_perag_put(pag);
201 }
202 
203 static inline void
xfs_group_check_mask(struct xfs_group * xg,unsigned int mask)204 xfs_group_check_mask(
205 	struct xfs_group	*xg,
206 	unsigned int		mask)
207 {
208 	if (xg->xg_type == XG_TYPE_AG)
209 		ASSERT(!(mask & ~XFS_SICK_AG_ALL));
210 	else
211 		ASSERT(!(mask & ~XFS_SICK_RG_ALL));
212 }
213 
214 /* Mark unhealthy per-ag metadata. */
215 void
xfs_group_mark_sick(struct xfs_group * xg,unsigned int mask)216 xfs_group_mark_sick(
217 	struct xfs_group	*xg,
218 	unsigned int		mask)
219 {
220 	unsigned int		old_mask;
221 
222 	xfs_group_check_mask(xg, mask);
223 	trace_xfs_group_mark_sick(xg, mask);
224 
225 	spin_lock(&xg->xg_state_lock);
226 	old_mask = xg->xg_sick;
227 	xg->xg_sick |= mask;
228 	spin_unlock(&xg->xg_state_lock);
229 
230 	fserror_report_metadata(xg->xg_mount->m_super, -EFSCORRUPTED, GFP_NOFS);
231 	if (mask)
232 		xfs_healthmon_report_group(xg, XFS_HEALTHMON_SICK, old_mask,
233 				mask);
234 }
235 
236 /*
237  * Mark per-group metadata as having been checked and found unhealthy by fsck.
238  */
239 void
xfs_group_mark_corrupt(struct xfs_group * xg,unsigned int mask)240 xfs_group_mark_corrupt(
241 	struct xfs_group	*xg,
242 	unsigned int		mask)
243 {
244 	unsigned int		old_mask;
245 
246 	xfs_group_check_mask(xg, mask);
247 	trace_xfs_group_mark_corrupt(xg, mask);
248 
249 	spin_lock(&xg->xg_state_lock);
250 	old_mask = xg->xg_sick;
251 	xg->xg_sick |= mask;
252 	xg->xg_checked |= mask;
253 	spin_unlock(&xg->xg_state_lock);
254 
255 	fserror_report_metadata(xg->xg_mount->m_super, -EFSCORRUPTED, GFP_NOFS);
256 	if (mask)
257 		xfs_healthmon_report_group(xg, XFS_HEALTHMON_CORRUPT, old_mask,
258 				mask);
259 }
260 
261 /*
262  * Mark per-group metadata ok.
263  */
264 void
xfs_group_mark_healthy(struct xfs_group * xg,unsigned int mask)265 xfs_group_mark_healthy(
266 	struct xfs_group	*xg,
267 	unsigned int		mask)
268 {
269 	unsigned int		old_mask;
270 
271 	xfs_group_check_mask(xg, mask);
272 	trace_xfs_group_mark_healthy(xg, mask);
273 
274 	spin_lock(&xg->xg_state_lock);
275 	old_mask = xg->xg_sick;
276 	xg->xg_sick &= ~mask;
277 	if (!(xg->xg_sick & XFS_SICK_AG_PRIMARY))
278 		xg->xg_sick &= ~XFS_SICK_AG_SECONDARY;
279 	xg->xg_checked |= mask;
280 	spin_unlock(&xg->xg_state_lock);
281 
282 	if (mask)
283 		xfs_healthmon_report_group(xg, XFS_HEALTHMON_HEALTHY, old_mask,
284 				mask);
285 }
286 
287 /* Sample which per-ag metadata are unhealthy. */
288 void
xfs_group_measure_sickness(struct xfs_group * xg,unsigned int * sick,unsigned int * checked)289 xfs_group_measure_sickness(
290 	struct xfs_group	*xg,
291 	unsigned int		*sick,
292 	unsigned int		*checked)
293 {
294 	spin_lock(&xg->xg_state_lock);
295 	*sick = xg->xg_sick;
296 	*checked = xg->xg_checked;
297 	spin_unlock(&xg->xg_state_lock);
298 }
299 
300 /* Mark unhealthy per-rtgroup metadata given a raw rt group number. */
301 void
xfs_rgno_mark_sick(struct xfs_mount * mp,xfs_rgnumber_t rgno,unsigned int mask)302 xfs_rgno_mark_sick(
303 	struct xfs_mount	*mp,
304 	xfs_rgnumber_t		rgno,
305 	unsigned int		mask)
306 {
307 	struct xfs_rtgroup	*rtg = xfs_rtgroup_get(mp, rgno);
308 
309 	/* per-rtgroup structure not set up yet? */
310 	if (!rtg)
311 		return;
312 
313 	xfs_group_mark_sick(rtg_group(rtg), mask);
314 	xfs_rtgroup_put(rtg);
315 }
316 
xfs_inode_report_fserror(struct xfs_inode * ip)317 static inline void xfs_inode_report_fserror(struct xfs_inode *ip)
318 {
319 	/*
320 	 * Do not report inodes being constructed or freed, or metadata inodes,
321 	 * to fsnotify.
322 	 */
323 	if (xfs_iflags_test(ip, XFS_INEW | XFS_IRECLAIM) ||
324 	    xfs_is_internal_inode(ip)) {
325 		fserror_report_metadata(ip->i_mount->m_super, -EFSCORRUPTED,
326 				GFP_NOFS);
327 		return;
328 	}
329 
330 	fserror_report_file_metadata(VFS_I(ip), -EFSCORRUPTED, GFP_NOFS);
331 }
332 
333 /* Mark the unhealthy parts of an inode. */
334 void
xfs_inode_mark_sick(struct xfs_inode * ip,unsigned int mask)335 xfs_inode_mark_sick(
336 	struct xfs_inode	*ip,
337 	unsigned int		mask)
338 {
339 	unsigned int		old_mask;
340 
341 	ASSERT(!(mask & ~XFS_SICK_INO_ALL));
342 	trace_xfs_inode_mark_sick(ip, mask);
343 
344 	spin_lock(&ip->i_flags_lock);
345 	old_mask = ip->i_sick;
346 	ip->i_sick |= mask;
347 	spin_unlock(&ip->i_flags_lock);
348 
349 	/*
350 	 * Keep this inode around so we don't lose the sickness report.  Scrub
351 	 * grabs inodes with DONTCACHE assuming that most inode are ok, which
352 	 * is not the case here.
353 	 */
354 	spin_lock(&VFS_I(ip)->i_lock);
355 	inode_state_clear(VFS_I(ip), I_DONTCACHE);
356 	spin_unlock(&VFS_I(ip)->i_lock);
357 
358 	xfs_inode_report_fserror(ip);
359 	if (mask)
360 		xfs_healthmon_report_inode(ip, XFS_HEALTHMON_SICK, old_mask,
361 				mask);
362 }
363 
364 /* Mark inode metadata as having been checked and found unhealthy by fsck. */
365 void
xfs_inode_mark_corrupt(struct xfs_inode * ip,unsigned int mask)366 xfs_inode_mark_corrupt(
367 	struct xfs_inode	*ip,
368 	unsigned int		mask)
369 {
370 	unsigned int		old_mask;
371 
372 	ASSERT(!(mask & ~XFS_SICK_INO_ALL));
373 	trace_xfs_inode_mark_corrupt(ip, mask);
374 
375 	spin_lock(&ip->i_flags_lock);
376 	old_mask = ip->i_sick;
377 	ip->i_sick |= mask;
378 	ip->i_checked |= mask;
379 	spin_unlock(&ip->i_flags_lock);
380 
381 	/*
382 	 * Keep this inode around so we don't lose the sickness report.  Scrub
383 	 * grabs inodes with DONTCACHE assuming that most inode are ok, which
384 	 * is not the case here.
385 	 */
386 	spin_lock(&VFS_I(ip)->i_lock);
387 	inode_state_clear(VFS_I(ip), I_DONTCACHE);
388 	spin_unlock(&VFS_I(ip)->i_lock);
389 
390 	xfs_inode_report_fserror(ip);
391 	if (mask)
392 		xfs_healthmon_report_inode(ip, XFS_HEALTHMON_CORRUPT, old_mask,
393 				mask);
394 }
395 
396 /* Mark parts of an inode healed. */
397 void
xfs_inode_mark_healthy(struct xfs_inode * ip,unsigned int mask)398 xfs_inode_mark_healthy(
399 	struct xfs_inode	*ip,
400 	unsigned int		mask)
401 {
402 	unsigned int		old_mask;
403 
404 	ASSERT(!(mask & ~XFS_SICK_INO_ALL));
405 	trace_xfs_inode_mark_healthy(ip, mask);
406 
407 	spin_lock(&ip->i_flags_lock);
408 	old_mask = ip->i_sick;
409 	ip->i_sick &= ~mask;
410 	if (!(ip->i_sick & XFS_SICK_INO_PRIMARY))
411 		ip->i_sick &= ~XFS_SICK_INO_SECONDARY;
412 	ip->i_checked |= mask;
413 	spin_unlock(&ip->i_flags_lock);
414 
415 	if (mask)
416 		xfs_healthmon_report_inode(ip, XFS_HEALTHMON_HEALTHY, old_mask,
417 				mask);
418 }
419 
420 /* Sample which parts of an inode are unhealthy. */
421 void
xfs_inode_measure_sickness(struct xfs_inode * ip,unsigned int * sick,unsigned int * checked)422 xfs_inode_measure_sickness(
423 	struct xfs_inode	*ip,
424 	unsigned int		*sick,
425 	unsigned int		*checked)
426 {
427 	spin_lock(&ip->i_flags_lock);
428 	*sick = ip->i_sick;
429 	*checked = ip->i_checked;
430 	spin_unlock(&ip->i_flags_lock);
431 }
432 
433 /* Mappings between internal sick masks and ioctl sick masks. */
434 
435 struct ioctl_sick_map {
436 	unsigned int		sick_mask;
437 	unsigned int		ioctl_mask;
438 };
439 
440 #define for_each_sick_map(map, m) \
441 	for ((m) = (map); (m) < (map) + ARRAY_SIZE(map); (m)++)
442 
443 static const struct ioctl_sick_map fs_map[] = {
444 	{ XFS_SICK_FS_COUNTERS,	XFS_FSOP_GEOM_SICK_COUNTERS},
445 	{ XFS_SICK_FS_UQUOTA,	XFS_FSOP_GEOM_SICK_UQUOTA },
446 	{ XFS_SICK_FS_GQUOTA,	XFS_FSOP_GEOM_SICK_GQUOTA },
447 	{ XFS_SICK_FS_PQUOTA,	XFS_FSOP_GEOM_SICK_PQUOTA },
448 	{ XFS_SICK_FS_QUOTACHECK, XFS_FSOP_GEOM_SICK_QUOTACHECK },
449 	{ XFS_SICK_FS_NLINKS,	XFS_FSOP_GEOM_SICK_NLINKS },
450 	{ XFS_SICK_FS_METADIR,	XFS_FSOP_GEOM_SICK_METADIR },
451 	{ XFS_SICK_FS_METAPATH,	XFS_FSOP_GEOM_SICK_METAPATH },
452 };
453 
454 static const struct ioctl_sick_map rt_map[] = {
455 	{ XFS_SICK_RG_BITMAP,	XFS_FSOP_GEOM_SICK_RT_BITMAP },
456 	{ XFS_SICK_RG_SUMMARY,	XFS_FSOP_GEOM_SICK_RT_SUMMARY },
457 };
458 
459 static inline void
xfgeo_health_tick(struct xfs_fsop_geom * geo,unsigned int sick,unsigned int checked,const struct ioctl_sick_map * m)460 xfgeo_health_tick(
461 	struct xfs_fsop_geom		*geo,
462 	unsigned int			sick,
463 	unsigned int			checked,
464 	const struct ioctl_sick_map	*m)
465 {
466 	if (checked & m->sick_mask)
467 		geo->checked |= m->ioctl_mask;
468 	if (sick & m->sick_mask)
469 		geo->sick |= m->ioctl_mask;
470 }
471 
472 /* Fill out fs geometry health info. */
473 void
xfs_fsop_geom_health(struct xfs_mount * mp,struct xfs_fsop_geom * geo)474 xfs_fsop_geom_health(
475 	struct xfs_mount		*mp,
476 	struct xfs_fsop_geom		*geo)
477 {
478 	struct xfs_rtgroup		*rtg = NULL;
479 	const struct ioctl_sick_map	*m;
480 	unsigned int			sick;
481 	unsigned int			checked;
482 
483 	geo->sick = 0;
484 	geo->checked = 0;
485 
486 	xfs_fs_measure_sickness(mp, &sick, &checked);
487 	for_each_sick_map(fs_map, m)
488 		xfgeo_health_tick(geo, sick, checked, m);
489 
490 	while ((rtg = xfs_rtgroup_next(mp, rtg))) {
491 		xfs_group_measure_sickness(rtg_group(rtg), &sick, &checked);
492 		for_each_sick_map(rt_map, m)
493 			xfgeo_health_tick(geo, sick, checked, m);
494 	}
495 }
496 
497 /*
498  * Translate XFS_SICK_FS_* into XFS_FSOP_GEOM_SICK_* except for the rt free
499  * space codes, which are sent via the rtgroup events.
500  */
501 unsigned int
xfs_healthmon_fs_mask(unsigned int sick_mask)502 xfs_healthmon_fs_mask(
503 	unsigned int			sick_mask)
504 {
505 	const struct ioctl_sick_map	*m;
506 	unsigned int			ioctl_mask = 0;
507 
508 	for_each_sick_map(fs_map, m) {
509 		if (sick_mask & m->sick_mask)
510 			ioctl_mask |= m->ioctl_mask;
511 	}
512 
513 	return ioctl_mask;
514 }
515 
516 static const struct ioctl_sick_map ag_map[] = {
517 	{ XFS_SICK_AG_SB,	XFS_AG_GEOM_SICK_SB },
518 	{ XFS_SICK_AG_AGF,	XFS_AG_GEOM_SICK_AGF },
519 	{ XFS_SICK_AG_AGFL,	XFS_AG_GEOM_SICK_AGFL },
520 	{ XFS_SICK_AG_AGI,	XFS_AG_GEOM_SICK_AGI },
521 	{ XFS_SICK_AG_BNOBT,	XFS_AG_GEOM_SICK_BNOBT },
522 	{ XFS_SICK_AG_CNTBT,	XFS_AG_GEOM_SICK_CNTBT },
523 	{ XFS_SICK_AG_INOBT,	XFS_AG_GEOM_SICK_INOBT },
524 	{ XFS_SICK_AG_FINOBT,	XFS_AG_GEOM_SICK_FINOBT },
525 	{ XFS_SICK_AG_RMAPBT,	XFS_AG_GEOM_SICK_RMAPBT },
526 	{ XFS_SICK_AG_REFCNTBT,	XFS_AG_GEOM_SICK_REFCNTBT },
527 	{ XFS_SICK_AG_INODES,	XFS_AG_GEOM_SICK_INODES },
528 };
529 
530 /* Fill out ag geometry health info. */
531 void
xfs_ag_geom_health(struct xfs_perag * pag,struct xfs_ag_geometry * ageo)532 xfs_ag_geom_health(
533 	struct xfs_perag		*pag,
534 	struct xfs_ag_geometry		*ageo)
535 {
536 	const struct ioctl_sick_map	*m;
537 	unsigned int			sick;
538 	unsigned int			checked;
539 
540 	ageo->ag_sick = 0;
541 	ageo->ag_checked = 0;
542 
543 	xfs_group_measure_sickness(pag_group(pag), &sick, &checked);
544 	for_each_sick_map(ag_map, m) {
545 		if (checked & m->sick_mask)
546 			ageo->ag_checked |= m->ioctl_mask;
547 		if (sick & m->sick_mask)
548 			ageo->ag_sick |= m->ioctl_mask;
549 	}
550 }
551 
552 /* Translate XFS_SICK_AG_* into XFS_AG_GEOM_SICK_*. */
553 unsigned int
xfs_healthmon_perag_mask(unsigned int sick_mask)554 xfs_healthmon_perag_mask(
555 	unsigned int			sick_mask)
556 {
557 	const struct ioctl_sick_map	*m;
558 	unsigned int			ioctl_mask = 0;
559 
560 	for_each_sick_map(ag_map, m) {
561 		if (sick_mask & m->sick_mask)
562 			ioctl_mask |= m->ioctl_mask;
563 	}
564 
565 	return ioctl_mask;
566 }
567 
568 static const struct ioctl_sick_map rtgroup_map[] = {
569 	{ XFS_SICK_RG_SUPER,	XFS_RTGROUP_GEOM_SICK_SUPER },
570 	{ XFS_SICK_RG_BITMAP,	XFS_RTGROUP_GEOM_SICK_BITMAP },
571 	{ XFS_SICK_RG_SUMMARY,	XFS_RTGROUP_GEOM_SICK_SUMMARY },
572 	{ XFS_SICK_RG_RMAPBT,	XFS_RTGROUP_GEOM_SICK_RMAPBT },
573 	{ XFS_SICK_RG_REFCNTBT,	XFS_RTGROUP_GEOM_SICK_REFCNTBT },
574 };
575 
576 /* Fill out rtgroup geometry health info. */
577 void
xfs_rtgroup_geom_health(struct xfs_rtgroup * rtg,struct xfs_rtgroup_geometry * rgeo)578 xfs_rtgroup_geom_health(
579 	struct xfs_rtgroup	*rtg,
580 	struct xfs_rtgroup_geometry *rgeo)
581 {
582 	const struct ioctl_sick_map	*m;
583 	unsigned int			sick;
584 	unsigned int			checked;
585 
586 	rgeo->rg_sick = 0;
587 	rgeo->rg_checked = 0;
588 
589 	xfs_group_measure_sickness(rtg_group(rtg), &sick, &checked);
590 	for_each_sick_map(rtgroup_map, m) {
591 		if (checked & m->sick_mask)
592 			rgeo->rg_checked |= m->ioctl_mask;
593 		if (sick & m->sick_mask)
594 			rgeo->rg_sick |= m->ioctl_mask;
595 	}
596 }
597 
598 /* Translate XFS_SICK_RG_* into XFS_RTGROUP_GEOM_SICK_*. */
599 unsigned int
xfs_healthmon_rtgroup_mask(unsigned int sick_mask)600 xfs_healthmon_rtgroup_mask(
601 	unsigned int			sick_mask)
602 {
603 	const struct ioctl_sick_map	*m;
604 	unsigned int			ioctl_mask = 0;
605 
606 	for_each_sick_map(rtgroup_map, m) {
607 		if (sick_mask & m->sick_mask)
608 			ioctl_mask |= m->ioctl_mask;
609 	}
610 
611 	return ioctl_mask;
612 }
613 
614 static const struct ioctl_sick_map ino_map[] = {
615 	{ XFS_SICK_INO_CORE,	XFS_BS_SICK_INODE },
616 	{ XFS_SICK_INO_BMBTD,	XFS_BS_SICK_BMBTD },
617 	{ XFS_SICK_INO_BMBTA,	XFS_BS_SICK_BMBTA },
618 	{ XFS_SICK_INO_BMBTC,	XFS_BS_SICK_BMBTC },
619 	{ XFS_SICK_INO_DIR,	XFS_BS_SICK_DIR },
620 	{ XFS_SICK_INO_XATTR,	XFS_BS_SICK_XATTR },
621 	{ XFS_SICK_INO_SYMLINK,	XFS_BS_SICK_SYMLINK },
622 	{ XFS_SICK_INO_PARENT,	XFS_BS_SICK_PARENT },
623 	{ XFS_SICK_INO_BMBTD_ZAPPED,	XFS_BS_SICK_BMBTD },
624 	{ XFS_SICK_INO_BMBTA_ZAPPED,	XFS_BS_SICK_BMBTA },
625 	{ XFS_SICK_INO_DIR_ZAPPED,	XFS_BS_SICK_DIR },
626 	{ XFS_SICK_INO_SYMLINK_ZAPPED,	XFS_BS_SICK_SYMLINK },
627 	{ XFS_SICK_INO_DIRTREE,	XFS_BS_SICK_DIRTREE },
628 };
629 
630 /* Fill out bulkstat health info. */
631 void
xfs_bulkstat_health(struct xfs_inode * ip,struct xfs_bulkstat * bs)632 xfs_bulkstat_health(
633 	struct xfs_inode		*ip,
634 	struct xfs_bulkstat		*bs)
635 {
636 	const struct ioctl_sick_map	*m;
637 	unsigned int			sick;
638 	unsigned int			checked;
639 
640 	bs->bs_sick = 0;
641 	bs->bs_checked = 0;
642 
643 	xfs_inode_measure_sickness(ip, &sick, &checked);
644 	for_each_sick_map(ino_map, m) {
645 		if (checked & m->sick_mask)
646 			bs->bs_checked |= m->ioctl_mask;
647 		if (sick & m->sick_mask)
648 			bs->bs_sick |= m->ioctl_mask;
649 	}
650 }
651 
652 /* Translate XFS_SICK_INO_* into XFS_BS_SICK_*. */
653 unsigned int
xfs_healthmon_inode_mask(unsigned int sick_mask)654 xfs_healthmon_inode_mask(
655 	unsigned int			sick_mask)
656 {
657 	const struct ioctl_sick_map	*m;
658 	unsigned int			ioctl_mask = 0;
659 
660 	for_each_sick_map(ino_map, m) {
661 		if (sick_mask & m->sick_mask)
662 			ioctl_mask |= m->ioctl_mask;
663 	}
664 
665 	return ioctl_mask;
666 }
667 
668 /* Mark a block mapping sick. */
669 void
xfs_bmap_mark_sick(struct xfs_inode * ip,int whichfork)670 xfs_bmap_mark_sick(
671 	struct xfs_inode	*ip,
672 	int			whichfork)
673 {
674 	unsigned int		mask;
675 
676 	switch (whichfork) {
677 	case XFS_DATA_FORK:
678 		mask = XFS_SICK_INO_BMBTD;
679 		break;
680 	case XFS_ATTR_FORK:
681 		mask = XFS_SICK_INO_BMBTA;
682 		break;
683 	case XFS_COW_FORK:
684 		mask = XFS_SICK_INO_BMBTC;
685 		break;
686 	default:
687 		ASSERT(0);
688 		return;
689 	}
690 
691 	xfs_inode_mark_sick(ip, mask);
692 }
693 
694 /* Record observations of btree corruption with the health tracking system. */
695 void
xfs_btree_mark_sick(struct xfs_btree_cur * cur)696 xfs_btree_mark_sick(
697 	struct xfs_btree_cur		*cur)
698 {
699 	if (xfs_btree_is_bmap(cur->bc_ops)) {
700 		xfs_bmap_mark_sick(cur->bc_ino.ip, cur->bc_ino.whichfork);
701 	/* no health state tracking for ephemeral btrees */
702 	} else if (cur->bc_ops->type != XFS_BTREE_TYPE_MEM) {
703 		ASSERT(cur->bc_group);
704 		ASSERT(cur->bc_ops->sick_mask);
705 		xfs_group_mark_sick(cur->bc_group, cur->bc_ops->sick_mask);
706 	}
707 }
708 
709 /*
710  * Record observations of dir/attr btree corruption with the health tracking
711  * system.
712  */
713 void
xfs_dirattr_mark_sick(struct xfs_inode * ip,int whichfork)714 xfs_dirattr_mark_sick(
715 	struct xfs_inode	*ip,
716 	int			whichfork)
717 {
718 	unsigned int		mask;
719 
720 	switch (whichfork) {
721 	case XFS_DATA_FORK:
722 		mask = XFS_SICK_INO_DIR;
723 		break;
724 	case XFS_ATTR_FORK:
725 		mask = XFS_SICK_INO_XATTR;
726 		break;
727 	default:
728 		ASSERT(0);
729 		return;
730 	}
731 
732 	xfs_inode_mark_sick(ip, mask);
733 }
734 
735 /*
736  * Record observations of dir/attr btree corruption with the health tracking
737  * system.
738  */
739 void
xfs_da_mark_sick(struct xfs_da_args * args)740 xfs_da_mark_sick(
741 	struct xfs_da_args	*args)
742 {
743 	xfs_dirattr_mark_sick(args->dp, args->whichfork);
744 }
745