xref: /linux/fs/xfs/scrub/common.h (revision 663ea69540c8d7ba332c9a3129d7f3cf5de50d9b)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright (C) 2017-2023 Oracle.  All Rights Reserved.
4  * Author: Darrick J. Wong <djwong@kernel.org>
5  */
6 #ifndef __XFS_SCRUB_COMMON_H__
7 #define __XFS_SCRUB_COMMON_H__
8 
9 int xchk_trans_alloc(struct xfs_scrub *sc, uint resblks);
10 void xchk_trans_alloc_empty(struct xfs_scrub *sc);
11 void xchk_trans_cancel(struct xfs_scrub *sc);
12 
13 bool xchk_process_error(struct xfs_scrub *sc, xfs_agnumber_t agno,
14 		xfs_agblock_t bno, int *error);
15 bool xchk_process_rt_error(struct xfs_scrub *sc, xfs_rgnumber_t rgno,
16 		xfs_rgblock_t rgbno, int *error);
17 bool xchk_fblock_process_error(struct xfs_scrub *sc, int whichfork,
18 		xfs_fileoff_t offset, int *error);
19 
20 bool xchk_xref_process_error(struct xfs_scrub *sc,
21 		xfs_agnumber_t agno, xfs_agblock_t bno, int *error);
22 bool xchk_fblock_xref_process_error(struct xfs_scrub *sc,
23 		int whichfork, xfs_fileoff_t offset, int *error);
24 
25 void xchk_block_set_preen(struct xfs_scrub *sc,
26 		struct xfs_buf *bp);
27 void xchk_ino_set_preen(struct xfs_scrub *sc, xfs_ino_t ino);
28 void xchk_fblock_set_preen(struct xfs_scrub *sc,
29 		int whichfork, xfs_fileoff_t offset);
30 
31 void xchk_set_corrupt(struct xfs_scrub *sc);
32 void xchk_block_set_corrupt(struct xfs_scrub *sc,
33 		struct xfs_buf *bp);
34 void xchk_ino_set_corrupt(struct xfs_scrub *sc, xfs_ino_t ino);
35 void xchk_fblock_set_corrupt(struct xfs_scrub *sc, int whichfork,
36 		xfs_fileoff_t offset);
37 #ifdef CONFIG_XFS_QUOTA
38 void xchk_qcheck_set_corrupt(struct xfs_scrub *sc, unsigned int dqtype,
39 		xfs_dqid_t id);
40 #endif
41 
42 void xchk_block_xref_set_corrupt(struct xfs_scrub *sc,
43 		struct xfs_buf *bp);
44 void xchk_ino_xref_set_corrupt(struct xfs_scrub *sc,
45 		xfs_ino_t ino);
46 void xchk_fblock_xref_set_corrupt(struct xfs_scrub *sc,
47 		int whichfork, xfs_fileoff_t offset);
48 
49 void xchk_ino_set_warning(struct xfs_scrub *sc, xfs_ino_t ino);
50 void xchk_fblock_set_warning(struct xfs_scrub *sc, int whichfork,
51 		xfs_fileoff_t offset);
52 
53 void xchk_set_incomplete(struct xfs_scrub *sc);
54 int xchk_checkpoint_log(struct xfs_mount *mp);
55 
56 /* Are we set up for a cross-referencing check? */
57 bool xchk_should_check_xref(struct xfs_scrub *sc, int *error,
58 			   struct xfs_btree_cur **curpp);
59 
xchk_setup_nothing(struct xfs_scrub * sc)60 static inline int xchk_setup_nothing(struct xfs_scrub *sc)
61 {
62 	return -ENOENT;
63 }
64 
65 /* Setup functions */
66 int xchk_setup_agheader(struct xfs_scrub *sc);
67 int xchk_setup_fs(struct xfs_scrub *sc);
68 int xchk_setup_rt(struct xfs_scrub *sc);
69 int xchk_setup_ag_allocbt(struct xfs_scrub *sc);
70 int xchk_setup_ag_iallocbt(struct xfs_scrub *sc);
71 int xchk_setup_ag_rmapbt(struct xfs_scrub *sc);
72 int xchk_setup_ag_refcountbt(struct xfs_scrub *sc);
73 int xchk_setup_inode(struct xfs_scrub *sc);
74 int xchk_setup_inode_bmap(struct xfs_scrub *sc);
75 int xchk_setup_inode_bmap_data(struct xfs_scrub *sc);
76 int xchk_setup_directory(struct xfs_scrub *sc);
77 int xchk_setup_xattr(struct xfs_scrub *sc);
78 int xchk_setup_symlink(struct xfs_scrub *sc);
79 int xchk_setup_parent(struct xfs_scrub *sc);
80 int xchk_setup_dirtree(struct xfs_scrub *sc);
81 int xchk_setup_metapath(struct xfs_scrub *sc);
82 #ifdef CONFIG_XFS_RT
83 int xchk_setup_rtbitmap(struct xfs_scrub *sc);
84 int xchk_setup_rtsummary(struct xfs_scrub *sc);
85 int xchk_setup_rgsuperblock(struct xfs_scrub *sc);
86 int xchk_setup_rtrmapbt(struct xfs_scrub *sc);
87 int xchk_setup_rtrefcountbt(struct xfs_scrub *sc);
88 #else
89 # define xchk_setup_rtbitmap		xchk_setup_nothing
90 # define xchk_setup_rtsummary		xchk_setup_nothing
91 # define xchk_setup_rgsuperblock	xchk_setup_nothing
92 # define xchk_setup_rtrmapbt		xchk_setup_nothing
93 # define xchk_setup_rtrefcountbt	xchk_setup_nothing
94 #endif
95 #ifdef CONFIG_XFS_QUOTA
96 int xchk_ino_dqattach(struct xfs_scrub *sc);
97 int xchk_setup_quota(struct xfs_scrub *sc);
98 int xchk_setup_quotacheck(struct xfs_scrub *sc);
99 #else
100 static inline int
xchk_ino_dqattach(struct xfs_scrub * sc)101 xchk_ino_dqattach(struct xfs_scrub *sc)
102 {
103 	return 0;
104 }
105 # define xchk_setup_quota		xchk_setup_nothing
106 # define xchk_setup_quotacheck		xchk_setup_nothing
107 #endif
108 int xchk_setup_fscounters(struct xfs_scrub *sc);
109 int xchk_setup_nlinks(struct xfs_scrub *sc);
110 
111 void xchk_ag_free(struct xfs_scrub *sc, struct xchk_ag *sa);
112 int xchk_ag_init(struct xfs_scrub *sc, xfs_agnumber_t agno,
113 		struct xchk_ag *sa);
114 int xchk_perag_drain_and_lock(struct xfs_scrub *sc);
115 
116 /*
117  * Grab all AG resources, treating the inability to grab the perag structure as
118  * a fs corruption.  This is intended for callers checking an ondisk reference
119  * to a given AG, which means that the AG must still exist.
120  */
121 static inline int
xchk_ag_init_existing(struct xfs_scrub * sc,xfs_agnumber_t agno,struct xchk_ag * sa)122 xchk_ag_init_existing(
123 	struct xfs_scrub	*sc,
124 	xfs_agnumber_t		agno,
125 	struct xchk_ag		*sa)
126 {
127 	int			error = xchk_ag_init(sc, agno, sa);
128 
129 	return error == -ENOENT ? -EFSCORRUPTED : error;
130 }
131 
132 #ifdef CONFIG_XFS_RT
133 
134 /* All the locks we need to check an rtgroup. */
135 #define XCHK_RTGLOCK_ALL	(XFS_RTGLOCK_BITMAP | \
136 				 XFS_RTGLOCK_RMAP | \
137 				 XFS_RTGLOCK_REFCOUNT)
138 
139 int xchk_rtgroup_init(struct xfs_scrub *sc, xfs_rgnumber_t rgno,
140 		struct xchk_rt *sr);
141 
142 static inline int
xchk_rtgroup_init_existing(struct xfs_scrub * sc,xfs_rgnumber_t rgno,struct xchk_rt * sr)143 xchk_rtgroup_init_existing(
144 	struct xfs_scrub	*sc,
145 	xfs_rgnumber_t		rgno,
146 	struct xchk_rt		*sr)
147 {
148 	int			error = xchk_rtgroup_init(sc, rgno, sr);
149 
150 	return error == -ENOENT ? -EFSCORRUPTED : error;
151 }
152 
153 int xchk_rtgroup_lock(struct xfs_scrub *sc, struct xchk_rt *sr,
154 		unsigned int rtglock_flags);
155 void xchk_rtgroup_unlock(struct xchk_rt *sr);
156 void xchk_rtgroup_btcur_free(struct xchk_rt *sr);
157 void xchk_rtgroup_free(struct xfs_scrub *sc, struct xchk_rt *sr);
158 #else
159 # define xchk_rtgroup_init(sc, rgno, sr)		(-EFSCORRUPTED)
160 # define xchk_rtgroup_init_existing(sc, rgno, sr)	(-EFSCORRUPTED)
161 # define xchk_rtgroup_lock(sc, sr, lockflags)		(-EFSCORRUPTED)
162 # define xchk_rtgroup_unlock(sr)			do { } while (0)
163 # define xchk_rtgroup_btcur_free(sr)			do { } while (0)
164 # define xchk_rtgroup_free(sc, sr)			do { } while (0)
165 #endif /* CONFIG_XFS_RT */
166 
167 int xchk_ag_read_headers(struct xfs_scrub *sc, xfs_agnumber_t agno,
168 		struct xchk_ag *sa);
169 void xchk_ag_btcur_free(struct xchk_ag *sa);
170 void xchk_ag_btcur_init(struct xfs_scrub *sc, struct xchk_ag *sa);
171 int xchk_count_rmap_ownedby_ag(struct xfs_scrub *sc, struct xfs_btree_cur *cur,
172 		const struct xfs_owner_info *oinfo, xfs_filblks_t *blocks);
173 
174 int xchk_setup_ag_btree(struct xfs_scrub *sc, bool force_log);
175 int xchk_iget_for_scrubbing(struct xfs_scrub *sc);
176 int xchk_setup_inode_contents(struct xfs_scrub *sc, unsigned int resblks);
177 int xchk_install_live_inode(struct xfs_scrub *sc, struct xfs_inode *ip);
178 
179 void xchk_ilock(struct xfs_scrub *sc, unsigned int ilock_flags);
180 bool xchk_ilock_nowait(struct xfs_scrub *sc, unsigned int ilock_flags);
181 void xchk_iunlock(struct xfs_scrub *sc, unsigned int ilock_flags);
182 
183 void xchk_buffer_recheck(struct xfs_scrub *sc, struct xfs_buf *bp);
184 
185 /*
186  * Grab the inode at @inum.  The caller must have created a scrub transaction
187  * so that we can confirm the inumber by walking the inobt and not deadlock on
188  * a loop in the inobt.
189  */
190 int xchk_iget(struct xfs_scrub *sc, xfs_ino_t inum, struct xfs_inode **ipp);
191 int xchk_iget_agi(struct xfs_scrub *sc, xfs_ino_t inum,
192 		struct xfs_buf **agi_bpp, struct xfs_inode **ipp);
193 void xchk_irele(struct xfs_scrub *sc, struct xfs_inode *ip);
194 int xchk_install_handle_inode(struct xfs_scrub *sc, struct xfs_inode *ip);
195 
196 /*
197  * Safe version of (untrusted) xchk_iget that uses an empty transaction to
198  * avoid deadlocking on loops in the inobt.  This should only be used in a
199  * scrub or repair setup routine, and only prior to grabbing a transaction.
200  */
201 static inline int
xchk_iget_safe(struct xfs_scrub * sc,xfs_ino_t inum,struct xfs_inode ** ipp)202 xchk_iget_safe(struct xfs_scrub *sc, xfs_ino_t inum, struct xfs_inode **ipp)
203 {
204 	int	error;
205 
206 	ASSERT(sc->tp == NULL);
207 
208 	error = xchk_trans_alloc(sc, 0);
209 	if (error)
210 		return error;
211 	error = xchk_iget(sc, inum, ipp);
212 	xchk_trans_cancel(sc);
213 	return error;
214 }
215 
216 /*
217  * Don't bother cross-referencing if we already found corruption or cross
218  * referencing discrepancies.
219  */
xchk_skip_xref(struct xfs_scrub_metadata * sm)220 static inline bool xchk_skip_xref(struct xfs_scrub_metadata *sm)
221 {
222 	return sm->sm_flags & (XFS_SCRUB_OFLAG_CORRUPT |
223 			       XFS_SCRUB_OFLAG_XCORRUPT);
224 }
225 
226 bool xchk_dir_looks_zapped(struct xfs_inode *dp);
227 bool xchk_pptr_looks_zapped(struct xfs_inode *ip);
228 
229 /* Decide if a repair is required. */
xchk_needs_repair(const struct xfs_scrub_metadata * sm)230 static inline bool xchk_needs_repair(const struct xfs_scrub_metadata *sm)
231 {
232 	return sm->sm_flags & (XFS_SCRUB_OFLAG_CORRUPT |
233 			       XFS_SCRUB_OFLAG_XCORRUPT |
234 			       XFS_SCRUB_OFLAG_PREEN);
235 }
236 
237 /*
238  * "Should we prepare for a repair?"
239  *
240  * Return true if the caller permits us to repair metadata and we're not
241  * setting up for a post-repair evaluation.
242  */
xchk_could_repair(const struct xfs_scrub * sc)243 static inline bool xchk_could_repair(const struct xfs_scrub *sc)
244 {
245 	return (sc->sm->sm_flags & XFS_SCRUB_IFLAG_REPAIR) &&
246 		!(sc->flags & XREP_ALREADY_FIXED);
247 }
248 
249 int xchk_metadata_inode_forks(struct xfs_scrub *sc);
250 
251 /*
252  * Setting up a hook to wait for intents to drain is costly -- we have to take
253  * the CPU hotplug lock and force an i-cache flush on all CPUs once to set it
254  * up, and again to tear it down.  These costs add up quickly, so we only want
255  * to enable the drain waiter if the drain actually detected a conflict with
256  * running intent chains.
257  */
xchk_need_intent_drain(struct xfs_scrub * sc)258 static inline bool xchk_need_intent_drain(struct xfs_scrub *sc)
259 {
260 	return sc->flags & XCHK_NEED_DRAIN;
261 }
262 
263 void xchk_fsgates_enable(struct xfs_scrub *sc, unsigned int scrub_fshooks);
264 
265 int xchk_inode_is_allocated(struct xfs_scrub *sc, xfs_agino_t agino,
266 		bool *inuse);
267 int xchk_inode_count_blocks(struct xfs_scrub *sc, int whichfork,
268 		xfs_extnum_t *nextents, xfs_filblks_t *count);
269 
270 bool xchk_inode_is_dirtree_root(const struct xfs_inode *ip);
271 bool xchk_inode_is_sb_rooted(const struct xfs_inode *ip);
272 xfs_ino_t xchk_inode_rootdir_inum(const struct xfs_inode *ip);
273 
274 #endif	/* __XFS_SCRUB_COMMON_H__ */
275