1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright (C) 2017-2023 Oracle. All Rights Reserved.
4 * Author: Darrick J. Wong <djwong@kernel.org>
5 */
6 #ifndef __XFS_SCRUB_COMMON_H__
7 #define __XFS_SCRUB_COMMON_H__
8
9 int xchk_trans_alloc(struct xfs_scrub *sc, uint resblks);
10 int xchk_trans_alloc_empty(struct xfs_scrub *sc);
11 void xchk_trans_cancel(struct xfs_scrub *sc);
12
13 bool xchk_process_error(struct xfs_scrub *sc, xfs_agnumber_t agno,
14 xfs_agblock_t bno, int *error);
15 bool xchk_process_rt_error(struct xfs_scrub *sc, xfs_rgnumber_t rgno,
16 xfs_rgblock_t rgbno, int *error);
17 bool xchk_fblock_process_error(struct xfs_scrub *sc, int whichfork,
18 xfs_fileoff_t offset, int *error);
19
20 bool xchk_xref_process_error(struct xfs_scrub *sc,
21 xfs_agnumber_t agno, xfs_agblock_t bno, int *error);
22 bool xchk_fblock_xref_process_error(struct xfs_scrub *sc,
23 int whichfork, xfs_fileoff_t offset, int *error);
24
25 void xchk_block_set_preen(struct xfs_scrub *sc,
26 struct xfs_buf *bp);
27 void xchk_ino_set_preen(struct xfs_scrub *sc, xfs_ino_t ino);
28
29 void xchk_set_corrupt(struct xfs_scrub *sc);
30 void xchk_block_set_corrupt(struct xfs_scrub *sc,
31 struct xfs_buf *bp);
32 void xchk_ino_set_corrupt(struct xfs_scrub *sc, xfs_ino_t ino);
33 void xchk_fblock_set_corrupt(struct xfs_scrub *sc, int whichfork,
34 xfs_fileoff_t offset);
35 #ifdef CONFIG_XFS_QUOTA
36 void xchk_qcheck_set_corrupt(struct xfs_scrub *sc, unsigned int dqtype,
37 xfs_dqid_t id);
38 #endif
39
40 void xchk_block_xref_set_corrupt(struct xfs_scrub *sc,
41 struct xfs_buf *bp);
42 void xchk_ino_xref_set_corrupt(struct xfs_scrub *sc,
43 xfs_ino_t ino);
44 void xchk_fblock_xref_set_corrupt(struct xfs_scrub *sc,
45 int whichfork, xfs_fileoff_t offset);
46
47 void xchk_ino_set_warning(struct xfs_scrub *sc, xfs_ino_t ino);
48 void xchk_fblock_set_warning(struct xfs_scrub *sc, int whichfork,
49 xfs_fileoff_t offset);
50
51 void xchk_set_incomplete(struct xfs_scrub *sc);
52 int xchk_checkpoint_log(struct xfs_mount *mp);
53
54 /* Are we set up for a cross-referencing check? */
55 bool xchk_should_check_xref(struct xfs_scrub *sc, int *error,
56 struct xfs_btree_cur **curpp);
57
xchk_setup_nothing(struct xfs_scrub * sc)58 static inline int xchk_setup_nothing(struct xfs_scrub *sc)
59 {
60 return -ENOENT;
61 }
62
63 /* Setup functions */
64 int xchk_setup_agheader(struct xfs_scrub *sc);
65 int xchk_setup_fs(struct xfs_scrub *sc);
66 int xchk_setup_rt(struct xfs_scrub *sc);
67 int xchk_setup_ag_allocbt(struct xfs_scrub *sc);
68 int xchk_setup_ag_iallocbt(struct xfs_scrub *sc);
69 int xchk_setup_ag_rmapbt(struct xfs_scrub *sc);
70 int xchk_setup_ag_refcountbt(struct xfs_scrub *sc);
71 int xchk_setup_inode(struct xfs_scrub *sc);
72 int xchk_setup_inode_bmap(struct xfs_scrub *sc);
73 int xchk_setup_inode_bmap_data(struct xfs_scrub *sc);
74 int xchk_setup_directory(struct xfs_scrub *sc);
75 int xchk_setup_xattr(struct xfs_scrub *sc);
76 int xchk_setup_symlink(struct xfs_scrub *sc);
77 int xchk_setup_parent(struct xfs_scrub *sc);
78 int xchk_setup_dirtree(struct xfs_scrub *sc);
79 int xchk_setup_metapath(struct xfs_scrub *sc);
80 #ifdef CONFIG_XFS_RT
81 int xchk_setup_rtbitmap(struct xfs_scrub *sc);
82 int xchk_setup_rtsummary(struct xfs_scrub *sc);
83 int xchk_setup_rgsuperblock(struct xfs_scrub *sc);
84 int xchk_setup_rtrmapbt(struct xfs_scrub *sc);
85 int xchk_setup_rtrefcountbt(struct xfs_scrub *sc);
86 #else
87 # define xchk_setup_rtbitmap xchk_setup_nothing
88 # define xchk_setup_rtsummary xchk_setup_nothing
89 # define xchk_setup_rgsuperblock xchk_setup_nothing
90 # define xchk_setup_rtrmapbt xchk_setup_nothing
91 # define xchk_setup_rtrefcountbt xchk_setup_nothing
92 #endif
93 #ifdef CONFIG_XFS_QUOTA
94 int xchk_ino_dqattach(struct xfs_scrub *sc);
95 int xchk_setup_quota(struct xfs_scrub *sc);
96 int xchk_setup_quotacheck(struct xfs_scrub *sc);
97 #else
98 static inline int
xchk_ino_dqattach(struct xfs_scrub * sc)99 xchk_ino_dqattach(struct xfs_scrub *sc)
100 {
101 return 0;
102 }
103 # define xchk_setup_quota xchk_setup_nothing
104 # define xchk_setup_quotacheck xchk_setup_nothing
105 #endif
106 int xchk_setup_fscounters(struct xfs_scrub *sc);
107 int xchk_setup_nlinks(struct xfs_scrub *sc);
108
109 void xchk_ag_free(struct xfs_scrub *sc, struct xchk_ag *sa);
110 int xchk_ag_init(struct xfs_scrub *sc, xfs_agnumber_t agno,
111 struct xchk_ag *sa);
112 int xchk_perag_drain_and_lock(struct xfs_scrub *sc);
113
114 /*
115 * Grab all AG resources, treating the inability to grab the perag structure as
116 * a fs corruption. This is intended for callers checking an ondisk reference
117 * to a given AG, which means that the AG must still exist.
118 */
119 static inline int
xchk_ag_init_existing(struct xfs_scrub * sc,xfs_agnumber_t agno,struct xchk_ag * sa)120 xchk_ag_init_existing(
121 struct xfs_scrub *sc,
122 xfs_agnumber_t agno,
123 struct xchk_ag *sa)
124 {
125 int error = xchk_ag_init(sc, agno, sa);
126
127 return error == -ENOENT ? -EFSCORRUPTED : error;
128 }
129
130 #ifdef CONFIG_XFS_RT
131
132 /* All the locks we need to check an rtgroup. */
133 #define XCHK_RTGLOCK_ALL (XFS_RTGLOCK_BITMAP | \
134 XFS_RTGLOCK_RMAP | \
135 XFS_RTGLOCK_REFCOUNT)
136
137 int xchk_rtgroup_init(struct xfs_scrub *sc, xfs_rgnumber_t rgno,
138 struct xchk_rt *sr);
139
140 static inline int
xchk_rtgroup_init_existing(struct xfs_scrub * sc,xfs_rgnumber_t rgno,struct xchk_rt * sr)141 xchk_rtgroup_init_existing(
142 struct xfs_scrub *sc,
143 xfs_rgnumber_t rgno,
144 struct xchk_rt *sr)
145 {
146 int error = xchk_rtgroup_init(sc, rgno, sr);
147
148 return error == -ENOENT ? -EFSCORRUPTED : error;
149 }
150
151 int xchk_rtgroup_lock(struct xfs_scrub *sc, struct xchk_rt *sr,
152 unsigned int rtglock_flags);
153 void xchk_rtgroup_unlock(struct xchk_rt *sr);
154 void xchk_rtgroup_btcur_free(struct xchk_rt *sr);
155 void xchk_rtgroup_free(struct xfs_scrub *sc, struct xchk_rt *sr);
156 #else
157 # define xchk_rtgroup_init(sc, rgno, sr) (-EFSCORRUPTED)
158 # define xchk_rtgroup_init_existing(sc, rgno, sr) (-EFSCORRUPTED)
159 # define xchk_rtgroup_lock(sc, sr, lockflags) (-EFSCORRUPTED)
160 # define xchk_rtgroup_unlock(sr) do { } while (0)
161 # define xchk_rtgroup_btcur_free(sr) do { } while (0)
162 # define xchk_rtgroup_free(sc, sr) do { } while (0)
163 #endif /* CONFIG_XFS_RT */
164
165 int xchk_ag_read_headers(struct xfs_scrub *sc, xfs_agnumber_t agno,
166 struct xchk_ag *sa);
167 void xchk_ag_btcur_free(struct xchk_ag *sa);
168 void xchk_ag_btcur_init(struct xfs_scrub *sc, struct xchk_ag *sa);
169 int xchk_count_rmap_ownedby_ag(struct xfs_scrub *sc, struct xfs_btree_cur *cur,
170 const struct xfs_owner_info *oinfo, xfs_filblks_t *blocks);
171
172 int xchk_setup_ag_btree(struct xfs_scrub *sc, bool force_log);
173 int xchk_iget_for_scrubbing(struct xfs_scrub *sc);
174 int xchk_setup_inode_contents(struct xfs_scrub *sc, unsigned int resblks);
175 int xchk_install_live_inode(struct xfs_scrub *sc, struct xfs_inode *ip);
176
177 void xchk_ilock(struct xfs_scrub *sc, unsigned int ilock_flags);
178 bool xchk_ilock_nowait(struct xfs_scrub *sc, unsigned int ilock_flags);
179 void xchk_iunlock(struct xfs_scrub *sc, unsigned int ilock_flags);
180
181 void xchk_buffer_recheck(struct xfs_scrub *sc, struct xfs_buf *bp);
182
183 /*
184 * Grab the inode at @inum. The caller must have created a scrub transaction
185 * so that we can confirm the inumber by walking the inobt and not deadlock on
186 * a loop in the inobt.
187 */
188 int xchk_iget(struct xfs_scrub *sc, xfs_ino_t inum, struct xfs_inode **ipp);
189 int xchk_iget_agi(struct xfs_scrub *sc, xfs_ino_t inum,
190 struct xfs_buf **agi_bpp, struct xfs_inode **ipp);
191 void xchk_irele(struct xfs_scrub *sc, struct xfs_inode *ip);
192 int xchk_install_handle_inode(struct xfs_scrub *sc, struct xfs_inode *ip);
193
194 /*
195 * Safe version of (untrusted) xchk_iget that uses an empty transaction to
196 * avoid deadlocking on loops in the inobt. This should only be used in a
197 * scrub or repair setup routine, and only prior to grabbing a transaction.
198 */
199 static inline int
xchk_iget_safe(struct xfs_scrub * sc,xfs_ino_t inum,struct xfs_inode ** ipp)200 xchk_iget_safe(struct xfs_scrub *sc, xfs_ino_t inum, struct xfs_inode **ipp)
201 {
202 int error;
203
204 ASSERT(sc->tp == NULL);
205
206 error = xchk_trans_alloc(sc, 0);
207 if (error)
208 return error;
209 error = xchk_iget(sc, inum, ipp);
210 xchk_trans_cancel(sc);
211 return error;
212 }
213
214 /*
215 * Don't bother cross-referencing if we already found corruption or cross
216 * referencing discrepancies.
217 */
xchk_skip_xref(struct xfs_scrub_metadata * sm)218 static inline bool xchk_skip_xref(struct xfs_scrub_metadata *sm)
219 {
220 return sm->sm_flags & (XFS_SCRUB_OFLAG_CORRUPT |
221 XFS_SCRUB_OFLAG_XCORRUPT);
222 }
223
224 bool xchk_dir_looks_zapped(struct xfs_inode *dp);
225 bool xchk_pptr_looks_zapped(struct xfs_inode *ip);
226
227 /* Decide if a repair is required. */
xchk_needs_repair(const struct xfs_scrub_metadata * sm)228 static inline bool xchk_needs_repair(const struct xfs_scrub_metadata *sm)
229 {
230 return sm->sm_flags & (XFS_SCRUB_OFLAG_CORRUPT |
231 XFS_SCRUB_OFLAG_XCORRUPT |
232 XFS_SCRUB_OFLAG_PREEN);
233 }
234
235 /*
236 * "Should we prepare for a repair?"
237 *
238 * Return true if the caller permits us to repair metadata and we're not
239 * setting up for a post-repair evaluation.
240 */
xchk_could_repair(const struct xfs_scrub * sc)241 static inline bool xchk_could_repair(const struct xfs_scrub *sc)
242 {
243 return (sc->sm->sm_flags & XFS_SCRUB_IFLAG_REPAIR) &&
244 !(sc->flags & XREP_ALREADY_FIXED);
245 }
246
247 int xchk_metadata_inode_forks(struct xfs_scrub *sc);
248
249 /*
250 * Helper macros to allocate and format xfile description strings.
251 * Callers must kfree the pointer returned.
252 */
253 #define xchk_xfile_descr(sc, fmt, ...) \
254 kasprintf(XCHK_GFP_FLAGS, "XFS (%s): " fmt, \
255 (sc)->mp->m_super->s_id, ##__VA_ARGS__)
256 #define xchk_xfile_ag_descr(sc, fmt, ...) \
257 kasprintf(XCHK_GFP_FLAGS, "XFS (%s): AG 0x%x " fmt, \
258 (sc)->mp->m_super->s_id, \
259 (sc)->sa.pag ? \
260 pag_agno((sc)->sa.pag) : (sc)->sm->sm_agno, \
261 ##__VA_ARGS__)
262 #define xchk_xfile_ino_descr(sc, fmt, ...) \
263 kasprintf(XCHK_GFP_FLAGS, "XFS (%s): inode 0x%llx " fmt, \
264 (sc)->mp->m_super->s_id, \
265 (sc)->ip ? (sc)->ip->i_ino : (sc)->sm->sm_ino, \
266 ##__VA_ARGS__)
267 #define xchk_xfile_rtgroup_descr(sc, fmt, ...) \
268 kasprintf(XCHK_GFP_FLAGS, "XFS (%s): rtgroup 0x%x " fmt, \
269 (sc)->mp->m_super->s_id, \
270 (sc)->sa.pag ? \
271 rtg_rgno((sc)->sr.rtg) : (sc)->sm->sm_agno, \
272 ##__VA_ARGS__)
273
274 /*
275 * Setting up a hook to wait for intents to drain is costly -- we have to take
276 * the CPU hotplug lock and force an i-cache flush on all CPUs once to set it
277 * up, and again to tear it down. These costs add up quickly, so we only want
278 * to enable the drain waiter if the drain actually detected a conflict with
279 * running intent chains.
280 */
xchk_need_intent_drain(struct xfs_scrub * sc)281 static inline bool xchk_need_intent_drain(struct xfs_scrub *sc)
282 {
283 return sc->flags & XCHK_NEED_DRAIN;
284 }
285
286 void xchk_fsgates_enable(struct xfs_scrub *sc, unsigned int scrub_fshooks);
287
288 int xchk_inode_is_allocated(struct xfs_scrub *sc, xfs_agino_t agino,
289 bool *inuse);
290 int xchk_inode_count_blocks(struct xfs_scrub *sc, int whichfork,
291 xfs_extnum_t *nextents, xfs_filblks_t *count);
292
293 bool xchk_inode_is_dirtree_root(const struct xfs_inode *ip);
294 bool xchk_inode_is_sb_rooted(const struct xfs_inode *ip);
295 xfs_ino_t xchk_inode_rootdir_inum(const struct xfs_inode *ip);
296
297 #endif /* __XFS_SCRUB_COMMON_H__ */
298