xref: /linux/fs/xfs/xfs_health.c (revision 7f71507851fc7764b36a3221839607d3a45c2025)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright (C) 2019 Oracle.  All Rights Reserved.
4  * Author: Darrick J. Wong <darrick.wong@oracle.com>
5  */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_mount.h"
13 #include "xfs_inode.h"
14 #include "xfs_trace.h"
15 #include "xfs_health.h"
16 #include "xfs_ag.h"
17 #include "xfs_btree.h"
18 #include "xfs_da_format.h"
19 #include "xfs_da_btree.h"
20 #include "xfs_quota_defs.h"
21 #include "xfs_rtgroup.h"
22 
23 static void
24 xfs_health_unmount_group(
25 	struct xfs_group	*xg,
26 	bool			*warn)
27 {
28 	unsigned int		sick = 0;
29 	unsigned int		checked = 0;
30 
31 	xfs_group_measure_sickness(xg, &sick, &checked);
32 	if (sick) {
33 		trace_xfs_group_unfixed_corruption(xg, sick);
34 		*warn = true;
35 	}
36 }
37 
38 /*
39  * Warn about metadata corruption that we detected but haven't fixed, and
40  * make sure we're not sitting on anything that would get in the way of
41  * recovery.
42  */
43 void
44 xfs_health_unmount(
45 	struct xfs_mount	*mp)
46 {
47 	struct xfs_perag	*pag = NULL;
48 	struct xfs_rtgroup	*rtg = NULL;
49 	unsigned int		sick = 0;
50 	unsigned int		checked = 0;
51 	bool			warn = false;
52 
53 	if (xfs_is_shutdown(mp))
54 		return;
55 
56 	/* Measure AG corruption levels. */
57 	while ((pag = xfs_perag_next(mp, pag)))
58 		xfs_health_unmount_group(pag_group(pag), &warn);
59 
60 	/* Measure realtime group corruption levels. */
61 	while ((rtg = xfs_rtgroup_next(mp, rtg)))
62 		xfs_health_unmount_group(rtg_group(rtg), &warn);
63 
64 	/*
65 	 * Measure fs corruption and keep the sample around for the warning.
66 	 * See the note below for why we exempt FS_COUNTERS.
67 	 */
68 	xfs_fs_measure_sickness(mp, &sick, &checked);
69 	if (sick & ~XFS_SICK_FS_COUNTERS) {
70 		trace_xfs_fs_unfixed_corruption(mp, sick);
71 		warn = true;
72 	}
73 
74 	if (warn) {
75 		xfs_warn(mp,
76 "Uncorrected metadata errors detected; please run xfs_repair.");
77 
78 		/*
79 		 * We discovered uncorrected metadata problems at some point
80 		 * during this filesystem mount and have advised the
81 		 * administrator to run repair once the unmount completes.
82 		 *
83 		 * However, we must be careful -- when FSCOUNTERS are flagged
84 		 * unhealthy, the unmount procedure omits writing the clean
85 		 * unmount record to the log so that the next mount will run
86 		 * recovery and recompute the summary counters.  In other
87 		 * words, we leave a dirty log to get the counters fixed.
88 		 *
89 		 * Unfortunately, xfs_repair cannot recover dirty logs, so if
90 		 * there were filesystem problems, FSCOUNTERS was flagged, and
91 		 * the administrator takes our advice to run xfs_repair,
92 		 * they'll have to zap the log before repairing structures.
93 		 * We don't really want to encourage this, so we mark the
94 		 * FSCOUNTERS healthy so that a subsequent repair run won't see
95 		 * a dirty log.
96 		 */
97 		if (sick & XFS_SICK_FS_COUNTERS)
98 			xfs_fs_mark_healthy(mp, XFS_SICK_FS_COUNTERS);
99 	}
100 }
101 
102 /* Mark unhealthy per-fs metadata. */
103 void
104 xfs_fs_mark_sick(
105 	struct xfs_mount	*mp,
106 	unsigned int		mask)
107 {
108 	ASSERT(!(mask & ~XFS_SICK_FS_ALL));
109 	trace_xfs_fs_mark_sick(mp, mask);
110 
111 	spin_lock(&mp->m_sb_lock);
112 	mp->m_fs_sick |= mask;
113 	spin_unlock(&mp->m_sb_lock);
114 }
115 
116 /* Mark per-fs metadata as having been checked and found unhealthy by fsck. */
117 void
118 xfs_fs_mark_corrupt(
119 	struct xfs_mount	*mp,
120 	unsigned int		mask)
121 {
122 	ASSERT(!(mask & ~XFS_SICK_FS_ALL));
123 	trace_xfs_fs_mark_corrupt(mp, mask);
124 
125 	spin_lock(&mp->m_sb_lock);
126 	mp->m_fs_sick |= mask;
127 	mp->m_fs_checked |= mask;
128 	spin_unlock(&mp->m_sb_lock);
129 }
130 
131 /* Mark a per-fs metadata healed. */
132 void
133 xfs_fs_mark_healthy(
134 	struct xfs_mount	*mp,
135 	unsigned int		mask)
136 {
137 	ASSERT(!(mask & ~XFS_SICK_FS_ALL));
138 	trace_xfs_fs_mark_healthy(mp, mask);
139 
140 	spin_lock(&mp->m_sb_lock);
141 	mp->m_fs_sick &= ~mask;
142 	if (!(mp->m_fs_sick & XFS_SICK_FS_PRIMARY))
143 		mp->m_fs_sick &= ~XFS_SICK_FS_SECONDARY;
144 	mp->m_fs_checked |= mask;
145 	spin_unlock(&mp->m_sb_lock);
146 }
147 
148 /* Sample which per-fs metadata are unhealthy. */
149 void
150 xfs_fs_measure_sickness(
151 	struct xfs_mount	*mp,
152 	unsigned int		*sick,
153 	unsigned int		*checked)
154 {
155 	spin_lock(&mp->m_sb_lock);
156 	*sick = mp->m_fs_sick;
157 	*checked = mp->m_fs_checked;
158 	spin_unlock(&mp->m_sb_lock);
159 }
160 
161 /* Mark unhealthy per-ag metadata given a raw AG number. */
162 void
163 xfs_agno_mark_sick(
164 	struct xfs_mount	*mp,
165 	xfs_agnumber_t		agno,
166 	unsigned int		mask)
167 {
168 	struct xfs_perag	*pag = xfs_perag_get(mp, agno);
169 
170 	/* per-ag structure not set up yet? */
171 	if (!pag)
172 		return;
173 
174 	xfs_ag_mark_sick(pag, mask);
175 	xfs_perag_put(pag);
176 }
177 
178 static inline void
179 xfs_group_check_mask(
180 	struct xfs_group	*xg,
181 	unsigned int		mask)
182 {
183 	if (xg->xg_type == XG_TYPE_AG)
184 		ASSERT(!(mask & ~XFS_SICK_AG_ALL));
185 	else
186 		ASSERT(!(mask & ~XFS_SICK_RG_ALL));
187 }
188 
189 /* Mark unhealthy per-ag metadata. */
190 void
191 xfs_group_mark_sick(
192 	struct xfs_group	*xg,
193 	unsigned int		mask)
194 {
195 	xfs_group_check_mask(xg, mask);
196 	trace_xfs_group_mark_sick(xg, mask);
197 
198 	spin_lock(&xg->xg_state_lock);
199 	xg->xg_sick |= mask;
200 	spin_unlock(&xg->xg_state_lock);
201 }
202 
203 /*
204  * Mark per-group metadata as having been checked and found unhealthy by fsck.
205  */
206 void
207 xfs_group_mark_corrupt(
208 	struct xfs_group	*xg,
209 	unsigned int		mask)
210 {
211 	xfs_group_check_mask(xg, mask);
212 	trace_xfs_group_mark_corrupt(xg, mask);
213 
214 	spin_lock(&xg->xg_state_lock);
215 	xg->xg_sick |= mask;
216 	xg->xg_checked |= mask;
217 	spin_unlock(&xg->xg_state_lock);
218 }
219 
220 /*
221  * Mark per-group metadata ok.
222  */
223 void
224 xfs_group_mark_healthy(
225 	struct xfs_group	*xg,
226 	unsigned int		mask)
227 {
228 	xfs_group_check_mask(xg, mask);
229 	trace_xfs_group_mark_healthy(xg, mask);
230 
231 	spin_lock(&xg->xg_state_lock);
232 	xg->xg_sick &= ~mask;
233 	if (!(xg->xg_sick & XFS_SICK_AG_PRIMARY))
234 		xg->xg_sick &= ~XFS_SICK_AG_SECONDARY;
235 	xg->xg_checked |= mask;
236 	spin_unlock(&xg->xg_state_lock);
237 }
238 
239 /* Sample which per-ag metadata are unhealthy. */
240 void
241 xfs_group_measure_sickness(
242 	struct xfs_group	*xg,
243 	unsigned int		*sick,
244 	unsigned int		*checked)
245 {
246 	spin_lock(&xg->xg_state_lock);
247 	*sick = xg->xg_sick;
248 	*checked = xg->xg_checked;
249 	spin_unlock(&xg->xg_state_lock);
250 }
251 
252 /* Mark unhealthy per-rtgroup metadata given a raw rt group number. */
253 void
254 xfs_rgno_mark_sick(
255 	struct xfs_mount	*mp,
256 	xfs_rgnumber_t		rgno,
257 	unsigned int		mask)
258 {
259 	struct xfs_rtgroup	*rtg = xfs_rtgroup_get(mp, rgno);
260 
261 	/* per-rtgroup structure not set up yet? */
262 	if (!rtg)
263 		return;
264 
265 	xfs_group_mark_sick(rtg_group(rtg), mask);
266 	xfs_rtgroup_put(rtg);
267 }
268 
269 /* Mark the unhealthy parts of an inode. */
270 void
271 xfs_inode_mark_sick(
272 	struct xfs_inode	*ip,
273 	unsigned int		mask)
274 {
275 	ASSERT(!(mask & ~XFS_SICK_INO_ALL));
276 	trace_xfs_inode_mark_sick(ip, mask);
277 
278 	spin_lock(&ip->i_flags_lock);
279 	ip->i_sick |= mask;
280 	spin_unlock(&ip->i_flags_lock);
281 
282 	/*
283 	 * Keep this inode around so we don't lose the sickness report.  Scrub
284 	 * grabs inodes with DONTCACHE assuming that most inode are ok, which
285 	 * is not the case here.
286 	 */
287 	spin_lock(&VFS_I(ip)->i_lock);
288 	VFS_I(ip)->i_state &= ~I_DONTCACHE;
289 	spin_unlock(&VFS_I(ip)->i_lock);
290 }
291 
292 /* Mark inode metadata as having been checked and found unhealthy by fsck. */
293 void
294 xfs_inode_mark_corrupt(
295 	struct xfs_inode	*ip,
296 	unsigned int		mask)
297 {
298 	ASSERT(!(mask & ~XFS_SICK_INO_ALL));
299 	trace_xfs_inode_mark_corrupt(ip, mask);
300 
301 	spin_lock(&ip->i_flags_lock);
302 	ip->i_sick |= mask;
303 	ip->i_checked |= mask;
304 	spin_unlock(&ip->i_flags_lock);
305 
306 	/*
307 	 * Keep this inode around so we don't lose the sickness report.  Scrub
308 	 * grabs inodes with DONTCACHE assuming that most inode are ok, which
309 	 * is not the case here.
310 	 */
311 	spin_lock(&VFS_I(ip)->i_lock);
312 	VFS_I(ip)->i_state &= ~I_DONTCACHE;
313 	spin_unlock(&VFS_I(ip)->i_lock);
314 }
315 
316 /* Mark parts of an inode healed. */
317 void
318 xfs_inode_mark_healthy(
319 	struct xfs_inode	*ip,
320 	unsigned int		mask)
321 {
322 	ASSERT(!(mask & ~XFS_SICK_INO_ALL));
323 	trace_xfs_inode_mark_healthy(ip, mask);
324 
325 	spin_lock(&ip->i_flags_lock);
326 	ip->i_sick &= ~mask;
327 	if (!(ip->i_sick & XFS_SICK_INO_PRIMARY))
328 		ip->i_sick &= ~XFS_SICK_INO_SECONDARY;
329 	ip->i_checked |= mask;
330 	spin_unlock(&ip->i_flags_lock);
331 }
332 
333 /* Sample which parts of an inode are unhealthy. */
334 void
335 xfs_inode_measure_sickness(
336 	struct xfs_inode	*ip,
337 	unsigned int		*sick,
338 	unsigned int		*checked)
339 {
340 	spin_lock(&ip->i_flags_lock);
341 	*sick = ip->i_sick;
342 	*checked = ip->i_checked;
343 	spin_unlock(&ip->i_flags_lock);
344 }
345 
346 /* Mappings between internal sick masks and ioctl sick masks. */
347 
348 struct ioctl_sick_map {
349 	unsigned int		sick_mask;
350 	unsigned int		ioctl_mask;
351 };
352 
353 #define for_each_sick_map(map, m) \
354 	for ((m) = (map); (m) < (map) + ARRAY_SIZE(map); (m)++)
355 
356 static const struct ioctl_sick_map fs_map[] = {
357 	{ XFS_SICK_FS_COUNTERS,	XFS_FSOP_GEOM_SICK_COUNTERS},
358 	{ XFS_SICK_FS_UQUOTA,	XFS_FSOP_GEOM_SICK_UQUOTA },
359 	{ XFS_SICK_FS_GQUOTA,	XFS_FSOP_GEOM_SICK_GQUOTA },
360 	{ XFS_SICK_FS_PQUOTA,	XFS_FSOP_GEOM_SICK_PQUOTA },
361 	{ XFS_SICK_FS_QUOTACHECK, XFS_FSOP_GEOM_SICK_QUOTACHECK },
362 	{ XFS_SICK_FS_NLINKS,	XFS_FSOP_GEOM_SICK_NLINKS },
363 	{ XFS_SICK_FS_METADIR,	XFS_FSOP_GEOM_SICK_METADIR },
364 	{ XFS_SICK_FS_METAPATH,	XFS_FSOP_GEOM_SICK_METAPATH },
365 };
366 
367 static const struct ioctl_sick_map rt_map[] = {
368 	{ XFS_SICK_RG_BITMAP,	XFS_FSOP_GEOM_SICK_RT_BITMAP },
369 	{ XFS_SICK_RG_SUMMARY,	XFS_FSOP_GEOM_SICK_RT_SUMMARY },
370 };
371 
372 static inline void
373 xfgeo_health_tick(
374 	struct xfs_fsop_geom		*geo,
375 	unsigned int			sick,
376 	unsigned int			checked,
377 	const struct ioctl_sick_map	*m)
378 {
379 	if (checked & m->sick_mask)
380 		geo->checked |= m->ioctl_mask;
381 	if (sick & m->sick_mask)
382 		geo->sick |= m->ioctl_mask;
383 }
384 
385 /* Fill out fs geometry health info. */
386 void
387 xfs_fsop_geom_health(
388 	struct xfs_mount		*mp,
389 	struct xfs_fsop_geom		*geo)
390 {
391 	struct xfs_rtgroup		*rtg = NULL;
392 	const struct ioctl_sick_map	*m;
393 	unsigned int			sick;
394 	unsigned int			checked;
395 
396 	geo->sick = 0;
397 	geo->checked = 0;
398 
399 	xfs_fs_measure_sickness(mp, &sick, &checked);
400 	for_each_sick_map(fs_map, m)
401 		xfgeo_health_tick(geo, sick, checked, m);
402 
403 	while ((rtg = xfs_rtgroup_next(mp, rtg))) {
404 		xfs_group_measure_sickness(rtg_group(rtg), &sick, &checked);
405 		for_each_sick_map(rt_map, m)
406 			xfgeo_health_tick(geo, sick, checked, m);
407 	}
408 }
409 
410 static const struct ioctl_sick_map ag_map[] = {
411 	{ XFS_SICK_AG_SB,	XFS_AG_GEOM_SICK_SB },
412 	{ XFS_SICK_AG_AGF,	XFS_AG_GEOM_SICK_AGF },
413 	{ XFS_SICK_AG_AGFL,	XFS_AG_GEOM_SICK_AGFL },
414 	{ XFS_SICK_AG_AGI,	XFS_AG_GEOM_SICK_AGI },
415 	{ XFS_SICK_AG_BNOBT,	XFS_AG_GEOM_SICK_BNOBT },
416 	{ XFS_SICK_AG_CNTBT,	XFS_AG_GEOM_SICK_CNTBT },
417 	{ XFS_SICK_AG_INOBT,	XFS_AG_GEOM_SICK_INOBT },
418 	{ XFS_SICK_AG_FINOBT,	XFS_AG_GEOM_SICK_FINOBT },
419 	{ XFS_SICK_AG_RMAPBT,	XFS_AG_GEOM_SICK_RMAPBT },
420 	{ XFS_SICK_AG_REFCNTBT,	XFS_AG_GEOM_SICK_REFCNTBT },
421 	{ XFS_SICK_AG_INODES,	XFS_AG_GEOM_SICK_INODES },
422 };
423 
424 /* Fill out ag geometry health info. */
425 void
426 xfs_ag_geom_health(
427 	struct xfs_perag		*pag,
428 	struct xfs_ag_geometry		*ageo)
429 {
430 	const struct ioctl_sick_map	*m;
431 	unsigned int			sick;
432 	unsigned int			checked;
433 
434 	ageo->ag_sick = 0;
435 	ageo->ag_checked = 0;
436 
437 	xfs_group_measure_sickness(pag_group(pag), &sick, &checked);
438 	for_each_sick_map(ag_map, m) {
439 		if (checked & m->sick_mask)
440 			ageo->ag_checked |= m->ioctl_mask;
441 		if (sick & m->sick_mask)
442 			ageo->ag_sick |= m->ioctl_mask;
443 	}
444 }
445 
446 static const struct ioctl_sick_map rtgroup_map[] = {
447 	{ XFS_SICK_RG_SUPER,	XFS_RTGROUP_GEOM_SICK_SUPER },
448 	{ XFS_SICK_RG_BITMAP,	XFS_RTGROUP_GEOM_SICK_BITMAP },
449 	{ XFS_SICK_RG_SUMMARY,	XFS_RTGROUP_GEOM_SICK_SUMMARY },
450 };
451 
452 /* Fill out rtgroup geometry health info. */
453 void
454 xfs_rtgroup_geom_health(
455 	struct xfs_rtgroup	*rtg,
456 	struct xfs_rtgroup_geometry *rgeo)
457 {
458 	const struct ioctl_sick_map	*m;
459 	unsigned int			sick;
460 	unsigned int			checked;
461 
462 	rgeo->rg_sick = 0;
463 	rgeo->rg_checked = 0;
464 
465 	xfs_group_measure_sickness(rtg_group(rtg), &sick, &checked);
466 	for_each_sick_map(rtgroup_map, m) {
467 		if (checked & m->sick_mask)
468 			rgeo->rg_checked |= m->ioctl_mask;
469 		if (sick & m->sick_mask)
470 			rgeo->rg_sick |= m->ioctl_mask;
471 	}
472 }
473 
474 static const struct ioctl_sick_map ino_map[] = {
475 	{ XFS_SICK_INO_CORE,	XFS_BS_SICK_INODE },
476 	{ XFS_SICK_INO_BMBTD,	XFS_BS_SICK_BMBTD },
477 	{ XFS_SICK_INO_BMBTA,	XFS_BS_SICK_BMBTA },
478 	{ XFS_SICK_INO_BMBTC,	XFS_BS_SICK_BMBTC },
479 	{ XFS_SICK_INO_DIR,	XFS_BS_SICK_DIR },
480 	{ XFS_SICK_INO_XATTR,	XFS_BS_SICK_XATTR },
481 	{ XFS_SICK_INO_SYMLINK,	XFS_BS_SICK_SYMLINK },
482 	{ XFS_SICK_INO_PARENT,	XFS_BS_SICK_PARENT },
483 	{ XFS_SICK_INO_BMBTD_ZAPPED,	XFS_BS_SICK_BMBTD },
484 	{ XFS_SICK_INO_BMBTA_ZAPPED,	XFS_BS_SICK_BMBTA },
485 	{ XFS_SICK_INO_DIR_ZAPPED,	XFS_BS_SICK_DIR },
486 	{ XFS_SICK_INO_SYMLINK_ZAPPED,	XFS_BS_SICK_SYMLINK },
487 	{ XFS_SICK_INO_DIRTREE,	XFS_BS_SICK_DIRTREE },
488 };
489 
490 /* Fill out bulkstat health info. */
491 void
492 xfs_bulkstat_health(
493 	struct xfs_inode		*ip,
494 	struct xfs_bulkstat		*bs)
495 {
496 	const struct ioctl_sick_map	*m;
497 	unsigned int			sick;
498 	unsigned int			checked;
499 
500 	bs->bs_sick = 0;
501 	bs->bs_checked = 0;
502 
503 	xfs_inode_measure_sickness(ip, &sick, &checked);
504 	for_each_sick_map(ino_map, m) {
505 		if (checked & m->sick_mask)
506 			bs->bs_checked |= m->ioctl_mask;
507 		if (sick & m->sick_mask)
508 			bs->bs_sick |= m->ioctl_mask;
509 	}
510 }
511 
512 /* Mark a block mapping sick. */
513 void
514 xfs_bmap_mark_sick(
515 	struct xfs_inode	*ip,
516 	int			whichfork)
517 {
518 	unsigned int		mask;
519 
520 	switch (whichfork) {
521 	case XFS_DATA_FORK:
522 		mask = XFS_SICK_INO_BMBTD;
523 		break;
524 	case XFS_ATTR_FORK:
525 		mask = XFS_SICK_INO_BMBTA;
526 		break;
527 	case XFS_COW_FORK:
528 		mask = XFS_SICK_INO_BMBTC;
529 		break;
530 	default:
531 		ASSERT(0);
532 		return;
533 	}
534 
535 	xfs_inode_mark_sick(ip, mask);
536 }
537 
538 /* Record observations of btree corruption with the health tracking system. */
539 void
540 xfs_btree_mark_sick(
541 	struct xfs_btree_cur		*cur)
542 {
543 	if (xfs_btree_is_bmap(cur->bc_ops)) {
544 		xfs_bmap_mark_sick(cur->bc_ino.ip, cur->bc_ino.whichfork);
545 	/* no health state tracking for ephemeral btrees */
546 	} else if (cur->bc_ops->type != XFS_BTREE_TYPE_MEM) {
547 		ASSERT(cur->bc_group);
548 		ASSERT(cur->bc_ops->sick_mask);
549 		xfs_group_mark_sick(cur->bc_group, cur->bc_ops->sick_mask);
550 	}
551 }
552 
553 /*
554  * Record observations of dir/attr btree corruption with the health tracking
555  * system.
556  */
557 void
558 xfs_dirattr_mark_sick(
559 	struct xfs_inode	*ip,
560 	int			whichfork)
561 {
562 	unsigned int		mask;
563 
564 	switch (whichfork) {
565 	case XFS_DATA_FORK:
566 		mask = XFS_SICK_INO_DIR;
567 		break;
568 	case XFS_ATTR_FORK:
569 		mask = XFS_SICK_INO_XATTR;
570 		break;
571 	default:
572 		ASSERT(0);
573 		return;
574 	}
575 
576 	xfs_inode_mark_sick(ip, mask);
577 }
578 
579 /*
580  * Record observations of dir/attr btree corruption with the health tracking
581  * system.
582  */
583 void
584 xfs_da_mark_sick(
585 	struct xfs_da_args	*args)
586 {
587 	xfs_dirattr_mark_sick(args->dp, args->whichfork);
588 }
589