xref: /linux/fs/xfs/scrub/scrub.h (revision 7f71507851fc7764b36a3221839607d3a45c2025)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright (C) 2017-2023 Oracle.  All Rights Reserved.
4  * Author: Darrick J. Wong <djwong@kernel.org>
5  */
6 #ifndef __XFS_SCRUB_SCRUB_H__
7 #define __XFS_SCRUB_SCRUB_H__
8 
9 struct xfs_scrub;
10 
11 struct xchk_relax {
12 	unsigned long	next_resched;
13 	unsigned int	resched_nr;
14 	bool		interruptible;
15 };
16 
17 /* Yield to the scheduler at most 10x per second. */
18 #define XCHK_RELAX_NEXT		(jiffies + (HZ / 10))
19 
20 #define INIT_XCHK_RELAX	\
21 	(struct xchk_relax){ \
22 		.next_resched	= XCHK_RELAX_NEXT, \
23 		.resched_nr	= 0, \
24 		.interruptible	= true, \
25 	}
26 
27 /*
28  * Relax during a scrub operation and exit if there's a fatal signal pending.
29  *
30  * If preemption is disabled, we need to yield to the scheduler every now and
31  * then so that we don't run afoul of the soft lockup watchdog or RCU stall
32  * detector.  cond_resched calls are somewhat expensive (~5ns) so we want to
33  * ratelimit this to 10x per second.  Amortize the cost of the other checks by
34  * only doing it once every 100 calls.
35  */
36 static inline int xchk_maybe_relax(struct xchk_relax *widget)
37 {
38 	/* Amortize the cost of scheduling and checking signals. */
39 	if (likely(++widget->resched_nr < 100))
40 		return 0;
41 	widget->resched_nr = 0;
42 
43 	if (unlikely(widget->next_resched <= jiffies)) {
44 		cond_resched();
45 		widget->next_resched = XCHK_RELAX_NEXT;
46 	}
47 
48 	if (widget->interruptible && fatal_signal_pending(current))
49 		return -EINTR;
50 
51 	return 0;
52 }
53 
54 /*
55  * Standard flags for allocating memory within scrub.  NOFS context is
56  * configured by the process allocation scope.  Scrub and repair must be able
57  * to back out gracefully if there isn't enough memory.  Force-cast to avoid
58  * complaints from static checkers.
59  */
60 #define XCHK_GFP_FLAGS	((__force gfp_t)(GFP_KERNEL | __GFP_NOWARN | \
61 					 __GFP_RETRY_MAYFAIL))
62 
63 /*
64  * For opening files by handle for fsck operations, we don't trust the inumber
65  * or the allocation state; therefore, perform an untrusted lookup.  We don't
66  * want these inodes to pollute the cache, so mark them for immediate removal.
67  */
68 #define XCHK_IGET_FLAGS	(XFS_IGET_UNTRUSTED | XFS_IGET_DONTCACHE)
69 
70 /* Type info and names for the scrub types. */
71 enum xchk_type {
72 	ST_NONE = 1,	/* disabled */
73 	ST_PERAG,	/* per-AG metadata */
74 	ST_FS,		/* per-FS metadata */
75 	ST_INODE,	/* per-inode metadata */
76 	ST_GENERIC,	/* determined by the scrubber */
77 	ST_RTGROUP,	/* rtgroup metadata */
78 };
79 
80 struct xchk_meta_ops {
81 	/* Acquire whatever resources are needed for the operation. */
82 	int		(*setup)(struct xfs_scrub *sc);
83 
84 	/* Examine metadata for errors. */
85 	int		(*scrub)(struct xfs_scrub *);
86 
87 	/* Repair or optimize the metadata. */
88 	int		(*repair)(struct xfs_scrub *);
89 
90 	/*
91 	 * Re-scrub the metadata we repaired, in case there's extra work that
92 	 * we need to do to check our repair work.  If this is NULL, we'll use
93 	 * the ->scrub function pointer, assuming that the regular scrub is
94 	 * sufficient.
95 	 */
96 	int		(*repair_eval)(struct xfs_scrub *sc);
97 
98 	/* Decide if we even have this piece of metadata. */
99 	bool		(*has)(struct xfs_mount *);
100 
101 	/* type describing required/allowed inputs */
102 	enum xchk_type	type;
103 };
104 
105 /* Buffer pointers and btree cursors for an entire AG. */
106 struct xchk_ag {
107 	struct xfs_perag	*pag;
108 
109 	/* AG btree roots */
110 	struct xfs_buf		*agf_bp;
111 	struct xfs_buf		*agi_bp;
112 
113 	/* AG btrees */
114 	struct xfs_btree_cur	*bno_cur;
115 	struct xfs_btree_cur	*cnt_cur;
116 	struct xfs_btree_cur	*ino_cur;
117 	struct xfs_btree_cur	*fino_cur;
118 	struct xfs_btree_cur	*rmap_cur;
119 	struct xfs_btree_cur	*refc_cur;
120 };
121 
122 /* Inode lock state for the RT volume. */
123 struct xchk_rt {
124 	/* incore rtgroup, if applicable */
125 	struct xfs_rtgroup	*rtg;
126 
127 	/* XFS_RTGLOCK_* lock state if locked */
128 	unsigned int		rtlock_flags;
129 };
130 
131 struct xfs_scrub {
132 	/* General scrub state. */
133 	struct xfs_mount		*mp;
134 	struct xfs_scrub_metadata	*sm;
135 	const struct xchk_meta_ops	*ops;
136 	struct xfs_trans		*tp;
137 
138 	/* File that scrub was called with. */
139 	struct file			*file;
140 
141 	/*
142 	 * File that is undergoing the scrub operation.  This can differ from
143 	 * the file that scrub was called with if we're checking file-based fs
144 	 * metadata (e.g. rt bitmaps) or if we're doing a scrub-by-handle for
145 	 * something that can't be opened directly (e.g. symlinks).
146 	 */
147 	struct xfs_inode		*ip;
148 
149 	/* Kernel memory buffer used by scrubbers; freed at teardown. */
150 	void				*buf;
151 
152 	/*
153 	 * Clean up resources owned by whatever is in the buffer.  Cleanup can
154 	 * be deferred with this hook as a means for scrub functions to pass
155 	 * data to repair functions.  This function must not free the buffer
156 	 * itself.
157 	 */
158 	void				(*buf_cleanup)(void *buf);
159 
160 	/* xfile used by the scrubbers; freed at teardown. */
161 	struct xfile			*xfile;
162 
163 	/* buffer target for in-memory btrees; also freed at teardown. */
164 	struct xfs_buftarg		*xmbtp;
165 
166 	/* Lock flags for @ip. */
167 	uint				ilock_flags;
168 
169 	/* The orphanage, for stashing files that have lost their parent. */
170 	uint				orphanage_ilock_flags;
171 	struct xfs_inode		*orphanage;
172 
173 	/* A temporary file on this filesystem, for staging new metadata. */
174 	struct xfs_inode		*tempip;
175 	uint				temp_ilock_flags;
176 
177 	/* See the XCHK/XREP state flags below. */
178 	unsigned int			flags;
179 
180 	/*
181 	 * The XFS_SICK_* flags that correspond to the metadata being scrubbed
182 	 * or repaired.  We will use this mask to update the in-core fs health
183 	 * status with whatever we find.
184 	 */
185 	unsigned int			sick_mask;
186 
187 	/* next time we want to cond_resched() */
188 	struct xchk_relax		relax;
189 
190 	/* State tracking for single-AG operations. */
191 	struct xchk_ag			sa;
192 
193 	/* State tracking for realtime operations. */
194 	struct xchk_rt			sr;
195 };
196 
197 /* XCHK state flags grow up from zero, XREP state flags grown down from 2^31 */
198 #define XCHK_TRY_HARDER		(1U << 0)  /* can't get resources, try again */
199 #define XCHK_HAVE_FREEZE_PROT	(1U << 1)  /* do we have freeze protection? */
200 #define XCHK_FSGATES_DRAIN	(1U << 2)  /* defer ops draining enabled */
201 #define XCHK_NEED_DRAIN		(1U << 3)  /* scrub needs to drain defer ops */
202 #define XCHK_FSGATES_QUOTA	(1U << 4)  /* quota live update enabled */
203 #define XCHK_FSGATES_DIRENTS	(1U << 5)  /* directory live update enabled */
204 #define XCHK_FSGATES_RMAP	(1U << 6)  /* rmapbt live update enabled */
205 #define XREP_RESET_PERAG_RESV	(1U << 30) /* must reset AG space reservation */
206 #define XREP_ALREADY_FIXED	(1U << 31) /* checking our repair work */
207 
208 /*
209  * The XCHK_FSGATES* flags reflect functionality in the main filesystem that
210  * are only enabled for this particular online fsck.  When not in use, the
211  * features are gated off via dynamic code patching, which is why the state
212  * must be enabled during scrub setup and can only be torn down afterwards.
213  */
214 #define XCHK_FSGATES_ALL	(XCHK_FSGATES_DRAIN | \
215 				 XCHK_FSGATES_QUOTA | \
216 				 XCHK_FSGATES_DIRENTS | \
217 				 XCHK_FSGATES_RMAP)
218 
219 struct xfs_scrub_subord {
220 	struct xfs_scrub	sc;
221 	struct xfs_scrub	*parent_sc;
222 	unsigned int		old_smtype;
223 	unsigned int		old_smflags;
224 };
225 
226 struct xfs_scrub_subord *xchk_scrub_create_subord(struct xfs_scrub *sc,
227 		unsigned int subtype);
228 void xchk_scrub_free_subord(struct xfs_scrub_subord *sub);
229 
230 /*
231  * We /could/ terminate a scrub/repair operation early.  If we're not
232  * in a good place to continue (fatal signal, etc.) then bail out.
233  * Note that we're careful not to make any judgements about *error.
234  */
235 static inline bool
236 xchk_should_terminate(
237 	struct xfs_scrub	*sc,
238 	int			*error)
239 {
240 	if (xchk_maybe_relax(&sc->relax)) {
241 		if (*error == 0)
242 			*error = -EINTR;
243 		return true;
244 	}
245 	return false;
246 }
247 
248 static inline int xchk_nothing(struct xfs_scrub *sc)
249 {
250 	return -ENOENT;
251 }
252 
253 /* Metadata scrubbers */
254 int xchk_tester(struct xfs_scrub *sc);
255 int xchk_superblock(struct xfs_scrub *sc);
256 int xchk_agf(struct xfs_scrub *sc);
257 int xchk_agfl(struct xfs_scrub *sc);
258 int xchk_agi(struct xfs_scrub *sc);
259 int xchk_allocbt(struct xfs_scrub *sc);
260 int xchk_iallocbt(struct xfs_scrub *sc);
261 int xchk_rmapbt(struct xfs_scrub *sc);
262 int xchk_refcountbt(struct xfs_scrub *sc);
263 int xchk_inode(struct xfs_scrub *sc);
264 int xchk_bmap_data(struct xfs_scrub *sc);
265 int xchk_bmap_attr(struct xfs_scrub *sc);
266 int xchk_bmap_cow(struct xfs_scrub *sc);
267 int xchk_directory(struct xfs_scrub *sc);
268 int xchk_xattr(struct xfs_scrub *sc);
269 int xchk_symlink(struct xfs_scrub *sc);
270 int xchk_parent(struct xfs_scrub *sc);
271 int xchk_dirtree(struct xfs_scrub *sc);
272 int xchk_metapath(struct xfs_scrub *sc);
273 #ifdef CONFIG_XFS_RT
274 int xchk_rtbitmap(struct xfs_scrub *sc);
275 int xchk_rtsummary(struct xfs_scrub *sc);
276 int xchk_rgsuperblock(struct xfs_scrub *sc);
277 #else
278 # define xchk_rtbitmap		xchk_nothing
279 # define xchk_rtsummary		xchk_nothing
280 # define xchk_rgsuperblock	xchk_nothing
281 #endif
282 #ifdef CONFIG_XFS_QUOTA
283 int xchk_quota(struct xfs_scrub *sc);
284 int xchk_quotacheck(struct xfs_scrub *sc);
285 #else
286 # define xchk_quota		xchk_nothing
287 # define xchk_quotacheck	xchk_nothing
288 #endif
289 int xchk_fscounters(struct xfs_scrub *sc);
290 int xchk_nlinks(struct xfs_scrub *sc);
291 
292 /* cross-referencing helpers */
293 void xchk_xref_is_used_space(struct xfs_scrub *sc, xfs_agblock_t agbno,
294 		xfs_extlen_t len);
295 void xchk_xref_is_not_inode_chunk(struct xfs_scrub *sc, xfs_agblock_t agbno,
296 		xfs_extlen_t len);
297 void xchk_xref_is_inode_chunk(struct xfs_scrub *sc, xfs_agblock_t agbno,
298 		xfs_extlen_t len);
299 void xchk_xref_is_only_owned_by(struct xfs_scrub *sc, xfs_agblock_t agbno,
300 		xfs_extlen_t len, const struct xfs_owner_info *oinfo);
301 void xchk_xref_is_not_owned_by(struct xfs_scrub *sc, xfs_agblock_t agbno,
302 		xfs_extlen_t len, const struct xfs_owner_info *oinfo);
303 void xchk_xref_has_no_owner(struct xfs_scrub *sc, xfs_agblock_t agbno,
304 		xfs_extlen_t len);
305 void xchk_xref_is_cow_staging(struct xfs_scrub *sc, xfs_agblock_t bno,
306 		xfs_extlen_t len);
307 void xchk_xref_is_not_shared(struct xfs_scrub *sc, xfs_agblock_t bno,
308 		xfs_extlen_t len);
309 void xchk_xref_is_not_cow_staging(struct xfs_scrub *sc, xfs_agblock_t bno,
310 		xfs_extlen_t len);
311 #ifdef CONFIG_XFS_RT
312 void xchk_xref_is_used_rt_space(struct xfs_scrub *sc, xfs_rtblock_t rtbno,
313 		xfs_extlen_t len);
314 #else
315 # define xchk_xref_is_used_rt_space(sc, rtbno, len) do { } while (0)
316 #endif
317 
318 #endif	/* __XFS_SCRUB_SCRUB_H__ */
319