1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright (c) 2018-2024 Oracle. All Rights Reserved.
4 * Author: Darrick J. Wong <djwong@kernel.org>
5 */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_trans_resv.h"
11 #include "xfs_mount.h"
12 #include "xfs_defer.h"
13 #include "xfs_btree.h"
14 #include "xfs_bit.h"
15 #include "xfs_log_format.h"
16 #include "xfs_trans.h"
17 #include "xfs_sb.h"
18 #include "xfs_rmap.h"
19 #include "xfs_rmap_btree.h"
20 #include "xfs_rtrmap_btree.h"
21 #include "xfs_inode.h"
22 #include "xfs_rtalloc.h"
23 #include "xfs_rtgroup.h"
24 #include "xfs_metafile.h"
25 #include "xfs_refcount.h"
26 #include "scrub/xfs_scrub.h"
27 #include "scrub/scrub.h"
28 #include "scrub/common.h"
29 #include "scrub/btree.h"
30 #include "scrub/trace.h"
31 #include "scrub/repair.h"
32
33 /* Set us up with the realtime metadata locked. */
34 int
xchk_setup_rtrmapbt(struct xfs_scrub * sc)35 xchk_setup_rtrmapbt(
36 struct xfs_scrub *sc)
37 {
38 int error;
39
40 if (xchk_need_intent_drain(sc))
41 xchk_fsgates_enable(sc, XCHK_FSGATES_DRAIN);
42
43 if (xchk_could_repair(sc)) {
44 error = xrep_setup_rtrmapbt(sc);
45 if (error)
46 return error;
47 }
48
49 error = xchk_rtgroup_init(sc, sc->sm->sm_agno, &sc->sr);
50 if (error)
51 return error;
52
53 error = xchk_setup_rt(sc);
54 if (error)
55 return error;
56
57 error = xchk_install_live_inode(sc, rtg_rmap(sc->sr.rtg));
58 if (error)
59 return error;
60
61 return xchk_rtgroup_lock(sc, &sc->sr, XCHK_RTGLOCK_ALL);
62 }
63
64 /* Realtime reverse mapping. */
65
66 struct xchk_rtrmap {
67 /*
68 * The furthest-reaching of the rmapbt records that we've already
69 * processed. This enables us to detect overlapping records for space
70 * allocations that cannot be shared.
71 */
72 struct xfs_rmap_irec overlap_rec;
73
74 /*
75 * The previous rmapbt record, so that we can check for two records
76 * that could be one.
77 */
78 struct xfs_rmap_irec prev_rec;
79 };
80
81 static inline bool
xchk_rtrmapbt_is_shareable(struct xfs_scrub * sc,const struct xfs_rmap_irec * irec)82 xchk_rtrmapbt_is_shareable(
83 struct xfs_scrub *sc,
84 const struct xfs_rmap_irec *irec)
85 {
86 if (!xfs_has_rtreflink(sc->mp))
87 return false;
88 if (irec->rm_flags & XFS_RMAP_UNWRITTEN)
89 return false;
90 return true;
91 }
92
93 /* Flag failures for records that overlap but cannot. */
94 STATIC void
xchk_rtrmapbt_check_overlapping(struct xchk_btree * bs,struct xchk_rtrmap * cr,const struct xfs_rmap_irec * irec)95 xchk_rtrmapbt_check_overlapping(
96 struct xchk_btree *bs,
97 struct xchk_rtrmap *cr,
98 const struct xfs_rmap_irec *irec)
99 {
100 xfs_rtblock_t pnext, inext;
101
102 if (bs->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
103 return;
104
105 /* No previous record? */
106 if (cr->overlap_rec.rm_blockcount == 0)
107 goto set_prev;
108
109 /* Do overlap_rec and irec overlap? */
110 pnext = cr->overlap_rec.rm_startblock + cr->overlap_rec.rm_blockcount;
111 if (pnext <= irec->rm_startblock)
112 goto set_prev;
113
114 /* Overlap is only allowed if both records are data fork mappings. */
115 if (!xchk_rtrmapbt_is_shareable(bs->sc, &cr->overlap_rec) ||
116 !xchk_rtrmapbt_is_shareable(bs->sc, irec))
117 xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
118
119 /* Save whichever rmap record extends furthest. */
120 inext = irec->rm_startblock + irec->rm_blockcount;
121 if (pnext > inext)
122 return;
123
124 set_prev:
125 memcpy(&cr->overlap_rec, irec, sizeof(struct xfs_rmap_irec));
126 }
127
128 /* Decide if two reverse-mapping records can be merged. */
129 static inline bool
xchk_rtrmap_mergeable(struct xchk_rtrmap * cr,const struct xfs_rmap_irec * r2)130 xchk_rtrmap_mergeable(
131 struct xchk_rtrmap *cr,
132 const struct xfs_rmap_irec *r2)
133 {
134 const struct xfs_rmap_irec *r1 = &cr->prev_rec;
135
136 /* Ignore if prev_rec is not yet initialized. */
137 if (cr->prev_rec.rm_blockcount == 0)
138 return false;
139
140 if (r1->rm_owner != r2->rm_owner)
141 return false;
142 if (r1->rm_startblock + r1->rm_blockcount != r2->rm_startblock)
143 return false;
144 if ((unsigned long long)r1->rm_blockcount + r2->rm_blockcount >
145 XFS_RMAP_LEN_MAX)
146 return false;
147 if (r1->rm_flags != r2->rm_flags)
148 return false;
149 return r1->rm_offset + r1->rm_blockcount == r2->rm_offset;
150 }
151
152 /* Flag failures for records that could be merged. */
153 STATIC void
xchk_rtrmapbt_check_mergeable(struct xchk_btree * bs,struct xchk_rtrmap * cr,const struct xfs_rmap_irec * irec)154 xchk_rtrmapbt_check_mergeable(
155 struct xchk_btree *bs,
156 struct xchk_rtrmap *cr,
157 const struct xfs_rmap_irec *irec)
158 {
159 if (bs->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
160 return;
161
162 if (xchk_rtrmap_mergeable(cr, irec))
163 xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
164
165 memcpy(&cr->prev_rec, irec, sizeof(struct xfs_rmap_irec));
166 }
167
168 /* Cross-reference a rmap against the refcount btree. */
169 STATIC void
xchk_rtrmapbt_xref_rtrefc(struct xfs_scrub * sc,struct xfs_rmap_irec * irec)170 xchk_rtrmapbt_xref_rtrefc(
171 struct xfs_scrub *sc,
172 struct xfs_rmap_irec *irec)
173 {
174 xfs_rgblock_t fbno;
175 xfs_extlen_t flen;
176 bool is_inode;
177 bool is_bmbt;
178 bool is_attr;
179 bool is_unwritten;
180 int error;
181
182 if (!sc->sr.refc_cur || xchk_skip_xref(sc->sm))
183 return;
184
185 is_inode = !XFS_RMAP_NON_INODE_OWNER(irec->rm_owner);
186 is_bmbt = irec->rm_flags & XFS_RMAP_BMBT_BLOCK;
187 is_attr = irec->rm_flags & XFS_RMAP_ATTR_FORK;
188 is_unwritten = irec->rm_flags & XFS_RMAP_UNWRITTEN;
189
190 /* If this is shared, must be a data fork extent. */
191 error = xfs_refcount_find_shared(sc->sr.refc_cur, irec->rm_startblock,
192 irec->rm_blockcount, &fbno, &flen, false);
193 if (!xchk_should_check_xref(sc, &error, &sc->sr.refc_cur))
194 return;
195 if (flen != 0 && (!is_inode || is_attr || is_bmbt || is_unwritten))
196 xchk_btree_xref_set_corrupt(sc, sc->sr.refc_cur, 0);
197 }
198
199 /* Cross-reference with other metadata. */
200 STATIC void
xchk_rtrmapbt_xref(struct xfs_scrub * sc,struct xfs_rmap_irec * irec)201 xchk_rtrmapbt_xref(
202 struct xfs_scrub *sc,
203 struct xfs_rmap_irec *irec)
204 {
205 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
206 return;
207
208 xchk_xref_is_used_rt_space(sc,
209 xfs_rgbno_to_rtb(sc->sr.rtg, irec->rm_startblock),
210 irec->rm_blockcount);
211 if (irec->rm_owner == XFS_RMAP_OWN_COW)
212 xchk_xref_is_cow_staging(sc, irec->rm_startblock,
213 irec->rm_blockcount);
214 else
215 xchk_rtrmapbt_xref_rtrefc(sc, irec);
216 }
217
218 /* Scrub a realtime rmapbt record. */
219 STATIC int
xchk_rtrmapbt_rec(struct xchk_btree * bs,const union xfs_btree_rec * rec)220 xchk_rtrmapbt_rec(
221 struct xchk_btree *bs,
222 const union xfs_btree_rec *rec)
223 {
224 struct xchk_rtrmap *cr = bs->private;
225 struct xfs_rmap_irec irec;
226
227 if (xfs_rmap_btrec_to_irec(rec, &irec) != NULL ||
228 xfs_rtrmap_check_irec(to_rtg(bs->cur->bc_group), &irec) != NULL) {
229 xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
230 return 0;
231 }
232
233 if (bs->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
234 return 0;
235
236 xchk_rtrmapbt_check_mergeable(bs, cr, &irec);
237 xchk_rtrmapbt_check_overlapping(bs, cr, &irec);
238 xchk_rtrmapbt_xref(bs->sc, &irec);
239 return 0;
240 }
241
242 /* Scrub the realtime rmap btree. */
243 int
xchk_rtrmapbt(struct xfs_scrub * sc)244 xchk_rtrmapbt(
245 struct xfs_scrub *sc)
246 {
247 struct xfs_inode *ip = rtg_rmap(sc->sr.rtg);
248 struct xfs_owner_info oinfo;
249 struct xchk_rtrmap cr = { };
250 int error;
251
252 error = xchk_metadata_inode_forks(sc);
253 if (error || (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT))
254 return error;
255
256 xfs_rmap_ino_bmbt_owner(&oinfo, ip->i_ino, XFS_DATA_FORK);
257 return xchk_btree(sc, sc->sr.rmap_cur, xchk_rtrmapbt_rec, &oinfo, &cr);
258 }
259
260 /* xref check that the extent has no realtime reverse mapping at all */
261 void
xchk_xref_has_no_rt_owner(struct xfs_scrub * sc,xfs_rgblock_t bno,xfs_extlen_t len)262 xchk_xref_has_no_rt_owner(
263 struct xfs_scrub *sc,
264 xfs_rgblock_t bno,
265 xfs_extlen_t len)
266 {
267 enum xbtree_recpacking outcome;
268 int error;
269
270 if (!sc->sr.rmap_cur || xchk_skip_xref(sc->sm))
271 return;
272
273 error = xfs_rmap_has_records(sc->sr.rmap_cur, bno, len, &outcome);
274 if (!xchk_should_check_xref(sc, &error, &sc->sr.rmap_cur))
275 return;
276 if (outcome != XBTREE_RECPACKING_EMPTY)
277 xchk_btree_xref_set_corrupt(sc, sc->sr.rmap_cur, 0);
278 }
279
280 /* xref check that the extent is completely mapped */
281 void
xchk_xref_has_rt_owner(struct xfs_scrub * sc,xfs_rgblock_t bno,xfs_extlen_t len)282 xchk_xref_has_rt_owner(
283 struct xfs_scrub *sc,
284 xfs_rgblock_t bno,
285 xfs_extlen_t len)
286 {
287 enum xbtree_recpacking outcome;
288 int error;
289
290 if (!sc->sr.rmap_cur || xchk_skip_xref(sc->sm))
291 return;
292
293 error = xfs_rmap_has_records(sc->sr.rmap_cur, bno, len, &outcome);
294 if (!xchk_should_check_xref(sc, &error, &sc->sr.rmap_cur))
295 return;
296 if (outcome != XBTREE_RECPACKING_FULL)
297 xchk_btree_xref_set_corrupt(sc, sc->sr.rmap_cur, 0);
298 }
299
300 /* xref check that the extent is only owned by a given owner */
301 void
xchk_xref_is_only_rt_owned_by(struct xfs_scrub * sc,xfs_agblock_t bno,xfs_extlen_t len,const struct xfs_owner_info * oinfo)302 xchk_xref_is_only_rt_owned_by(
303 struct xfs_scrub *sc,
304 xfs_agblock_t bno,
305 xfs_extlen_t len,
306 const struct xfs_owner_info *oinfo)
307 {
308 struct xfs_rmap_matches res;
309 int error;
310
311 if (!sc->sr.rmap_cur || xchk_skip_xref(sc->sm))
312 return;
313
314 error = xfs_rmap_count_owners(sc->sr.rmap_cur, bno, len, oinfo, &res);
315 if (!xchk_should_check_xref(sc, &error, &sc->sr.rmap_cur))
316 return;
317 if (res.matches != 1)
318 xchk_btree_xref_set_corrupt(sc, sc->sr.rmap_cur, 0);
319 if (res.bad_non_owner_matches)
320 xchk_btree_xref_set_corrupt(sc, sc->sr.rmap_cur, 0);
321 if (res.non_owner_matches)
322 xchk_btree_xref_set_corrupt(sc, sc->sr.rmap_cur, 0);
323 }
324