1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright (c) 2021-2024 Oracle. All Rights Reserved.
4 * Author: Darrick J. Wong <djwong@kernel.org>
5 */
6 #include "xfs_platform.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_mount.h"
13 #include "xfs_trans.h"
14 #include "xfs_btree.h"
15 #include "xfs_rmap.h"
16 #include "xfs_refcount.h"
17 #include "xfs_inode.h"
18 #include "xfs_rtbitmap.h"
19 #include "xfs_rtgroup.h"
20 #include "xfs_metafile.h"
21 #include "xfs_rtrefcount_btree.h"
22 #include "xfs_rtalloc.h"
23 #include "scrub/scrub.h"
24 #include "scrub/common.h"
25 #include "scrub/btree.h"
26 #include "scrub/repair.h"
27
28 /* Set us up with the realtime refcount metadata locked. */
29 int
xchk_setup_rtrefcountbt(struct xfs_scrub * sc)30 xchk_setup_rtrefcountbt(
31 struct xfs_scrub *sc)
32 {
33 int error;
34
35 if (xchk_need_intent_drain(sc))
36 xchk_fsgates_enable(sc, XCHK_FSGATES_DRAIN);
37
38 if (xchk_could_repair(sc)) {
39 error = xrep_setup_rtrefcountbt(sc);
40 if (error)
41 return error;
42 }
43
44 error = xchk_rtgroup_init(sc, sc->sm->sm_agno, &sc->sr);
45 if (error)
46 return error;
47
48 error = xchk_setup_rt(sc);
49 if (error)
50 return error;
51
52 error = xchk_install_live_inode(sc, rtg_refcount(sc->sr.rtg));
53 if (error)
54 return error;
55
56 return xchk_rtgroup_lock(sc, &sc->sr, XCHK_RTGLOCK_ALL);
57 }
58
59 /* Realtime Reference count btree scrubber. */
60
61 /*
62 * Confirming Reference Counts via Reverse Mappings
63 *
64 * We want to count the reverse mappings overlapping a refcount record
65 * (bno, len, refcount), allowing for the possibility that some of the
66 * overlap may come from smaller adjoining reverse mappings, while some
67 * comes from single extents which overlap the range entirely. The
68 * outer loop is as follows:
69 *
70 * 1. For all reverse mappings overlapping the refcount extent,
71 * a. If a given rmap completely overlaps, mark it as seen.
72 * b. Otherwise, record the fragment (in agbno order) for later
73 * processing.
74 *
75 * Once we've seen all the rmaps, we know that for all blocks in the
76 * refcount record we want to find $refcount owners and we've already
77 * visited $seen extents that overlap all the blocks. Therefore, we
78 * need to find ($refcount - $seen) owners for every block in the
79 * extent; call that quantity $target_nr. Proceed as follows:
80 *
81 * 2. Pull the first $target_nr fragments from the list; all of them
82 * should start at or before the start of the extent.
83 * Call this subset of fragments the working set.
84 * 3. Until there are no more unprocessed fragments,
85 * a. Find the shortest fragments in the set and remove them.
86 * b. Note the block number of the end of these fragments.
87 * c. Pull the same number of fragments from the list. All of these
88 * fragments should start at the block number recorded in the
89 * previous step.
90 * d. Put those fragments in the set.
91 * 4. Check that there are $target_nr fragments remaining in the list,
92 * and that they all end at or beyond the end of the refcount extent.
93 *
94 * If the refcount is correct, all the check conditions in the algorithm
95 * should always hold true. If not, the refcount is incorrect.
96 */
97 struct xchk_rtrefcnt_frag {
98 struct list_head list;
99 struct xfs_rmap_irec rm;
100 };
101
102 struct xchk_rtrefcnt_check {
103 struct xfs_scrub *sc;
104 struct list_head fragments;
105
106 /* refcount extent we're examining */
107 xfs_rgblock_t bno;
108 xfs_extlen_t len;
109 xfs_nlink_t refcount;
110
111 /* number of owners seen */
112 xfs_nlink_t seen;
113 };
114
115 /*
116 * Decide if the given rmap is large enough that we can redeem it
117 * towards refcount verification now, or if it's a fragment, in
118 * which case we'll hang onto it in the hopes that we'll later
119 * discover that we've collected exactly the correct number of
120 * fragments as the rtrefcountbt says we should have.
121 */
122 STATIC int
xchk_rtrefcountbt_rmap_check(struct xfs_btree_cur * cur,const struct xfs_rmap_irec * rec,void * priv)123 xchk_rtrefcountbt_rmap_check(
124 struct xfs_btree_cur *cur,
125 const struct xfs_rmap_irec *rec,
126 void *priv)
127 {
128 struct xchk_rtrefcnt_check *refchk = priv;
129 struct xchk_rtrefcnt_frag *frag;
130 xfs_rgblock_t rm_last;
131 xfs_rgblock_t rc_last;
132 int error = 0;
133
134 if (xchk_should_terminate(refchk->sc, &error))
135 return error;
136
137 rm_last = rec->rm_startblock + rec->rm_blockcount - 1;
138 rc_last = refchk->bno + refchk->len - 1;
139
140 /* Confirm that a single-owner refc extent is a CoW stage. */
141 if (refchk->refcount == 1 && rec->rm_owner != XFS_RMAP_OWN_COW) {
142 xchk_btree_xref_set_corrupt(refchk->sc, cur, 0);
143 return 0;
144 }
145
146 if (rec->rm_startblock <= refchk->bno && rm_last >= rc_last) {
147 /*
148 * The rmap overlaps the refcount record, so we can confirm
149 * one refcount owner seen.
150 */
151 refchk->seen++;
152 } else {
153 /*
154 * This rmap covers only part of the refcount record, so
155 * save the fragment for later processing. If the rmapbt
156 * is healthy each rmap_irec we see will be in agbno order
157 * so we don't need insertion sort here.
158 */
159 frag = kmalloc_obj(struct xchk_rtrefcnt_frag, XCHK_GFP_FLAGS);
160 if (!frag)
161 return -ENOMEM;
162 memcpy(&frag->rm, rec, sizeof(frag->rm));
163 list_add_tail(&frag->list, &refchk->fragments);
164 }
165
166 return 0;
167 }
168
169 /*
170 * Given a bunch of rmap fragments, iterate through them, keeping
171 * a running tally of the refcount. If this ever deviates from
172 * what we expect (which is the rtrefcountbt's refcount minus the
173 * number of extents that totally covered the rtrefcountbt extent),
174 * we have a rtrefcountbt error.
175 */
176 STATIC void
xchk_rtrefcountbt_process_rmap_fragments(struct xchk_rtrefcnt_check * refchk)177 xchk_rtrefcountbt_process_rmap_fragments(
178 struct xchk_rtrefcnt_check *refchk)
179 {
180 struct list_head worklist;
181 struct xchk_rtrefcnt_frag *frag;
182 struct xchk_rtrefcnt_frag *n;
183 xfs_rgblock_t bno;
184 xfs_rgblock_t rbno;
185 xfs_rgblock_t next_rbno;
186 xfs_nlink_t nr;
187 xfs_nlink_t target_nr;
188
189 target_nr = refchk->refcount - refchk->seen;
190 if (target_nr == 0)
191 return;
192
193 /*
194 * There are (refchk->rc.rc_refcount - refchk->nr refcount)
195 * references we haven't found yet. Pull that many off the
196 * fragment list and figure out where the smallest rmap ends
197 * (and therefore the next rmap should start). All the rmaps
198 * we pull off should start at or before the beginning of the
199 * refcount record's range.
200 */
201 INIT_LIST_HEAD(&worklist);
202 rbno = NULLRGBLOCK;
203
204 /* Make sure the fragments actually /are/ in bno order. */
205 bno = 0;
206 list_for_each_entry(frag, &refchk->fragments, list) {
207 if (frag->rm.rm_startblock < bno)
208 goto done;
209 bno = frag->rm.rm_startblock;
210 }
211
212 /*
213 * Find all the rmaps that start at or before the refc extent,
214 * and put them on the worklist.
215 */
216 nr = 0;
217 list_for_each_entry_safe(frag, n, &refchk->fragments, list) {
218 if (frag->rm.rm_startblock > refchk->bno || nr > target_nr)
219 break;
220 bno = frag->rm.rm_startblock + frag->rm.rm_blockcount;
221 if (bno < rbno)
222 rbno = bno;
223 list_move_tail(&frag->list, &worklist);
224 nr++;
225 }
226
227 /*
228 * We should have found exactly $target_nr rmap fragments starting
229 * at or before the refcount extent.
230 */
231 if (nr != target_nr)
232 goto done;
233
234 while (!list_empty(&refchk->fragments)) {
235 /* Discard any fragments ending at rbno from the worklist. */
236 nr = 0;
237 next_rbno = NULLRGBLOCK;
238 list_for_each_entry_safe(frag, n, &worklist, list) {
239 bno = frag->rm.rm_startblock + frag->rm.rm_blockcount;
240 if (bno != rbno) {
241 if (bno < next_rbno)
242 next_rbno = bno;
243 continue;
244 }
245 list_del(&frag->list);
246 kfree(frag);
247 nr++;
248 }
249
250 /* Try to add nr rmaps starting at rbno to the worklist. */
251 list_for_each_entry_safe(frag, n, &refchk->fragments, list) {
252 bno = frag->rm.rm_startblock + frag->rm.rm_blockcount;
253 if (frag->rm.rm_startblock != rbno)
254 goto done;
255 list_move_tail(&frag->list, &worklist);
256 if (next_rbno > bno)
257 next_rbno = bno;
258 nr--;
259 if (nr == 0)
260 break;
261 }
262
263 /*
264 * If we get here and nr > 0, this means that we added fewer
265 * items to the worklist than we discarded because the fragment
266 * list ran out of items. Therefore, we cannot maintain the
267 * required refcount. Something is wrong, so we're done.
268 */
269 if (nr)
270 goto done;
271
272 rbno = next_rbno;
273 }
274
275 /*
276 * Make sure the last extent we processed ends at or beyond
277 * the end of the refcount extent.
278 */
279 if (rbno < refchk->bno + refchk->len)
280 goto done;
281
282 /* Actually record us having seen the remaining refcount. */
283 refchk->seen = refchk->refcount;
284 done:
285 /* Delete fragments and work list. */
286 list_for_each_entry_safe(frag, n, &worklist, list) {
287 list_del(&frag->list);
288 kfree(frag);
289 }
290 list_for_each_entry_safe(frag, n, &refchk->fragments, list) {
291 list_del(&frag->list);
292 kfree(frag);
293 }
294 }
295
296 /* Use the rmap entries covering this extent to verify the refcount. */
297 STATIC void
xchk_rtrefcountbt_xref_rmap(struct xfs_scrub * sc,const struct xfs_refcount_irec * irec)298 xchk_rtrefcountbt_xref_rmap(
299 struct xfs_scrub *sc,
300 const struct xfs_refcount_irec *irec)
301 {
302 struct xchk_rtrefcnt_check refchk = {
303 .sc = sc,
304 .bno = irec->rc_startblock,
305 .len = irec->rc_blockcount,
306 .refcount = irec->rc_refcount,
307 .seen = 0,
308 };
309 struct xfs_rmap_irec low;
310 struct xfs_rmap_irec high;
311 struct xchk_rtrefcnt_frag *frag;
312 struct xchk_rtrefcnt_frag *n;
313 int error;
314
315 if (!sc->sr.rmap_cur || xchk_skip_xref(sc->sm))
316 return;
317
318 /* Cross-reference with the rmapbt to confirm the refcount. */
319 memset(&low, 0, sizeof(low));
320 low.rm_startblock = irec->rc_startblock;
321 memset(&high, 0xFF, sizeof(high));
322 high.rm_startblock = irec->rc_startblock + irec->rc_blockcount - 1;
323
324 INIT_LIST_HEAD(&refchk.fragments);
325 error = xfs_rmap_query_range(sc->sr.rmap_cur, &low, &high,
326 xchk_rtrefcountbt_rmap_check, &refchk);
327 if (!xchk_should_check_xref(sc, &error, &sc->sr.rmap_cur))
328 goto out_free;
329
330 xchk_rtrefcountbt_process_rmap_fragments(&refchk);
331 if (irec->rc_refcount != refchk.seen)
332 xchk_btree_xref_set_corrupt(sc, sc->sr.rmap_cur, 0);
333
334 out_free:
335 list_for_each_entry_safe(frag, n, &refchk.fragments, list) {
336 list_del(&frag->list);
337 kfree(frag);
338 }
339 }
340
341 /* Cross-reference with the other btrees. */
342 STATIC void
xchk_rtrefcountbt_xref(struct xfs_scrub * sc,const struct xfs_refcount_irec * irec)343 xchk_rtrefcountbt_xref(
344 struct xfs_scrub *sc,
345 const struct xfs_refcount_irec *irec)
346 {
347 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
348 return;
349
350 xchk_xref_is_used_rt_space(sc,
351 xfs_rgbno_to_rtb(sc->sr.rtg, irec->rc_startblock),
352 irec->rc_blockcount);
353 xchk_rtrefcountbt_xref_rmap(sc, irec);
354 }
355
356 struct xchk_rtrefcbt_records {
357 /* Previous refcount record. */
358 struct xfs_refcount_irec prev_rec;
359
360 /* The next rtgroup block where we aren't expecting shared extents. */
361 xfs_rgblock_t next_unshared_rgbno;
362
363 /* Number of CoW blocks we expect. */
364 xfs_extlen_t cow_blocks;
365
366 /* Was the last record a shared or CoW staging extent? */
367 enum xfs_refc_domain prev_domain;
368 };
369
370 static inline bool
xchk_rtrefcount_mergeable(struct xchk_rtrefcbt_records * rrc,const struct xfs_refcount_irec * r2)371 xchk_rtrefcount_mergeable(
372 struct xchk_rtrefcbt_records *rrc,
373 const struct xfs_refcount_irec *r2)
374 {
375 const struct xfs_refcount_irec *r1 = &rrc->prev_rec;
376
377 /* Ignore if prev_rec is not yet initialized. */
378 if (r1->rc_blockcount > 0)
379 return false;
380
381 if (r1->rc_startblock + r1->rc_blockcount != r2->rc_startblock)
382 return false;
383 if (r1->rc_refcount != r2->rc_refcount)
384 return false;
385 if ((unsigned long long)r1->rc_blockcount + r2->rc_blockcount >
386 XFS_REFC_LEN_MAX)
387 return false;
388
389 return true;
390 }
391
392 /* Flag failures for records that could be merged. */
393 STATIC void
xchk_rtrefcountbt_check_mergeable(struct xchk_btree * bs,struct xchk_rtrefcbt_records * rrc,const struct xfs_refcount_irec * irec)394 xchk_rtrefcountbt_check_mergeable(
395 struct xchk_btree *bs,
396 struct xchk_rtrefcbt_records *rrc,
397 const struct xfs_refcount_irec *irec)
398 {
399 if (bs->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
400 return;
401
402 if (xchk_rtrefcount_mergeable(rrc, irec))
403 xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
404
405 memcpy(&rrc->prev_rec, irec, sizeof(struct xfs_refcount_irec));
406 }
407
408 STATIC int
xchk_rtrefcountbt_rmap_check_gap(struct xfs_btree_cur * cur,const struct xfs_rmap_irec * rec,void * priv)409 xchk_rtrefcountbt_rmap_check_gap(
410 struct xfs_btree_cur *cur,
411 const struct xfs_rmap_irec *rec,
412 void *priv)
413 {
414 xfs_rgblock_t *next_bno = priv;
415
416 if (*next_bno != NULLRGBLOCK && rec->rm_startblock < *next_bno)
417 return -ECANCELED;
418
419 *next_bno = rec->rm_startblock + rec->rm_blockcount;
420 return 0;
421 }
422
423 /*
424 * Make sure that a gap in the reference count records does not correspond to
425 * overlapping records (i.e. shared extents) in the reverse mappings.
426 */
427 static inline void
xchk_rtrefcountbt_xref_gaps(struct xfs_scrub * sc,struct xchk_rtrefcbt_records * rrc,xfs_rtblock_t bno)428 xchk_rtrefcountbt_xref_gaps(
429 struct xfs_scrub *sc,
430 struct xchk_rtrefcbt_records *rrc,
431 xfs_rtblock_t bno)
432 {
433 struct xfs_rmap_irec low;
434 struct xfs_rmap_irec high;
435 xfs_rgblock_t next_bno = NULLRGBLOCK;
436 int error;
437
438 if (bno <= rrc->next_unshared_rgbno || !sc->sr.rmap_cur ||
439 xchk_skip_xref(sc->sm))
440 return;
441
442 memset(&low, 0, sizeof(low));
443 low.rm_startblock = rrc->next_unshared_rgbno;
444 memset(&high, 0xFF, sizeof(high));
445 high.rm_startblock = bno - 1;
446
447 error = xfs_rmap_query_range(sc->sr.rmap_cur, &low, &high,
448 xchk_rtrefcountbt_rmap_check_gap, &next_bno);
449 if (error == -ECANCELED)
450 xchk_btree_xref_set_corrupt(sc, sc->sr.rmap_cur, 0);
451 else
452 xchk_should_check_xref(sc, &error, &sc->sr.rmap_cur);
453 }
454
455 /* Scrub a rtrefcountbt record. */
456 STATIC int
xchk_rtrefcountbt_rec(struct xchk_btree * bs,const union xfs_btree_rec * rec)457 xchk_rtrefcountbt_rec(
458 struct xchk_btree *bs,
459 const union xfs_btree_rec *rec)
460 {
461 struct xfs_mount *mp = bs->cur->bc_mp;
462 struct xchk_rtrefcbt_records *rrc = bs->private;
463 struct xfs_refcount_irec irec;
464 u32 mod;
465
466 xfs_refcount_btrec_to_irec(rec, &irec);
467 if (xfs_rtrefcount_check_irec(to_rtg(bs->cur->bc_group), &irec) !=
468 NULL) {
469 xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
470 return 0;
471 }
472
473 /* We can only share full rt extents. */
474 mod = xfs_rgbno_to_rtxoff(mp, irec.rc_startblock);
475 if (mod)
476 xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
477 mod = xfs_extlen_to_rtxmod(mp, irec.rc_blockcount);
478 if (mod)
479 xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
480
481 if (irec.rc_domain == XFS_REFC_DOMAIN_COW)
482 rrc->cow_blocks += irec.rc_blockcount;
483
484 /* Shared records always come before CoW records. */
485 if (irec.rc_domain == XFS_REFC_DOMAIN_SHARED &&
486 rrc->prev_domain == XFS_REFC_DOMAIN_COW)
487 xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
488 rrc->prev_domain = irec.rc_domain;
489
490 xchk_rtrefcountbt_check_mergeable(bs, rrc, &irec);
491 xchk_rtrefcountbt_xref(bs->sc, &irec);
492
493 /*
494 * If this is a record for a shared extent, check that all blocks
495 * between the previous record and this one have at most one reverse
496 * mapping.
497 */
498 if (irec.rc_domain == XFS_REFC_DOMAIN_SHARED) {
499 xchk_rtrefcountbt_xref_gaps(bs->sc, rrc, irec.rc_startblock);
500 rrc->next_unshared_rgbno = irec.rc_startblock +
501 irec.rc_blockcount;
502 }
503
504 return 0;
505 }
506
507 /* Make sure we have as many refc blocks as the rmap says. */
508 STATIC void
xchk_refcount_xref_rmap(struct xfs_scrub * sc,const struct xfs_owner_info * btree_oinfo,xfs_extlen_t cow_blocks)509 xchk_refcount_xref_rmap(
510 struct xfs_scrub *sc,
511 const struct xfs_owner_info *btree_oinfo,
512 xfs_extlen_t cow_blocks)
513 {
514 xfs_filblks_t refcbt_blocks = 0;
515 xfs_filblks_t blocks;
516 int error;
517
518 if (!sc->sr.rmap_cur || !sc->sa.rmap_cur || xchk_skip_xref(sc->sm))
519 return;
520
521 /* Check that we saw as many refcbt blocks as the rmap knows about. */
522 error = xfs_btree_count_blocks(sc->sr.refc_cur, &refcbt_blocks);
523 if (!xchk_btree_process_error(sc, sc->sr.refc_cur, 0, &error))
524 return;
525 error = xchk_count_rmap_ownedby_ag(sc, sc->sa.rmap_cur, btree_oinfo,
526 &blocks);
527 if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur))
528 return;
529 if (blocks != refcbt_blocks)
530 xchk_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0);
531
532 /* Check that we saw as many cow blocks as the rmap knows about. */
533 error = xchk_count_rmap_ownedby_ag(sc, sc->sr.rmap_cur,
534 &XFS_RMAP_OINFO_COW, &blocks);
535 if (!xchk_should_check_xref(sc, &error, &sc->sr.rmap_cur))
536 return;
537 if (blocks != cow_blocks)
538 xchk_btree_xref_set_corrupt(sc, sc->sr.rmap_cur, 0);
539 }
540
541 /* Scrub the refcount btree for some AG. */
542 int
xchk_rtrefcountbt(struct xfs_scrub * sc)543 xchk_rtrefcountbt(
544 struct xfs_scrub *sc)
545 {
546 struct xfs_owner_info btree_oinfo;
547 struct xchk_rtrefcbt_records rrc = {
548 .cow_blocks = 0,
549 .next_unshared_rgbno = 0,
550 .prev_domain = XFS_REFC_DOMAIN_SHARED,
551 };
552 int error;
553
554 error = xchk_metadata_inode_forks(sc);
555 if (error || (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT))
556 return error;
557
558 xfs_rmap_ino_bmbt_owner(&btree_oinfo, rtg_refcount(sc->sr.rtg)->i_ino,
559 XFS_DATA_FORK);
560 error = xchk_btree(sc, sc->sr.refc_cur, xchk_rtrefcountbt_rec,
561 &btree_oinfo, &rrc);
562 if (error || (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT))
563 return error;
564
565 /*
566 * Check that all blocks between the last refcount > 1 record and the
567 * end of the rt volume have at most one reverse mapping.
568 */
569 xchk_rtrefcountbt_xref_gaps(sc, &rrc, sc->mp->m_sb.sb_rblocks);
570
571 xchk_refcount_xref_rmap(sc, &btree_oinfo, rrc.cow_blocks);
572
573 return 0;
574 }
575
576 /* xref check that a cow staging extent is marked in the rtrefcountbt. */
577 void
xchk_xref_is_rt_cow_staging(struct xfs_scrub * sc,xfs_rgblock_t bno,xfs_extlen_t len)578 xchk_xref_is_rt_cow_staging(
579 struct xfs_scrub *sc,
580 xfs_rgblock_t bno,
581 xfs_extlen_t len)
582 {
583 struct xfs_refcount_irec rc;
584 int has_refcount;
585 int error;
586
587 if (!sc->sr.refc_cur || xchk_skip_xref(sc->sm))
588 return;
589
590 /* Find the CoW staging extent. */
591 error = xfs_refcount_lookup_le(sc->sr.refc_cur, XFS_REFC_DOMAIN_COW,
592 bno, &has_refcount);
593 if (!xchk_should_check_xref(sc, &error, &sc->sr.refc_cur))
594 return;
595 if (!has_refcount) {
596 xchk_btree_xref_set_corrupt(sc, sc->sr.refc_cur, 0);
597 return;
598 }
599
600 error = xfs_refcount_get_rec(sc->sr.refc_cur, &rc, &has_refcount);
601 if (!xchk_should_check_xref(sc, &error, &sc->sr.refc_cur))
602 return;
603 if (!has_refcount) {
604 xchk_btree_xref_set_corrupt(sc, sc->sr.refc_cur, 0);
605 return;
606 }
607
608 /* CoW lookup returned a shared extent record? */
609 if (rc.rc_domain != XFS_REFC_DOMAIN_COW)
610 xchk_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0);
611
612 /* Must be at least as long as what was passed in */
613 if (rc.rc_blockcount < len)
614 xchk_btree_xref_set_corrupt(sc, sc->sr.refc_cur, 0);
615 }
616
617 /*
618 * xref check that the extent is not shared. Only file data blocks
619 * can have multiple owners.
620 */
621 void
xchk_xref_is_not_rt_shared(struct xfs_scrub * sc,xfs_rgblock_t bno,xfs_extlen_t len)622 xchk_xref_is_not_rt_shared(
623 struct xfs_scrub *sc,
624 xfs_rgblock_t bno,
625 xfs_extlen_t len)
626 {
627 enum xbtree_recpacking outcome;
628 int error;
629
630 if (!sc->sr.refc_cur || xchk_skip_xref(sc->sm))
631 return;
632
633 error = xfs_refcount_has_records(sc->sr.refc_cur,
634 XFS_REFC_DOMAIN_SHARED, bno, len, &outcome);
635 if (!xchk_should_check_xref(sc, &error, &sc->sr.refc_cur))
636 return;
637 if (outcome != XBTREE_RECPACKING_EMPTY)
638 xchk_btree_xref_set_corrupt(sc, sc->sr.refc_cur, 0);
639 }
640
641 /* xref check that the extent is not being used for CoW staging. */
642 void
xchk_xref_is_not_rt_cow_staging(struct xfs_scrub * sc,xfs_rgblock_t bno,xfs_extlen_t len)643 xchk_xref_is_not_rt_cow_staging(
644 struct xfs_scrub *sc,
645 xfs_rgblock_t bno,
646 xfs_extlen_t len)
647 {
648 enum xbtree_recpacking outcome;
649 int error;
650
651 if (!sc->sr.refc_cur || xchk_skip_xref(sc->sm))
652 return;
653
654 error = xfs_refcount_has_records(sc->sr.refc_cur, XFS_REFC_DOMAIN_COW,
655 bno, len, &outcome);
656 if (!xchk_should_check_xref(sc, &error, &sc->sr.refc_cur))
657 return;
658 if (outcome != XBTREE_RECPACKING_EMPTY)
659 xchk_btree_xref_set_corrupt(sc, sc->sr.refc_cur, 0);
660 }
661