1*9a6cc4f6SDarrick J. Wong // SPDX-License-Identifier: GPL-2.0-or-later 2*9a6cc4f6SDarrick J. Wong /* 3*9a6cc4f6SDarrick J. Wong * Copyright (c) 2018-2024 Oracle. All Rights Reserved. 4*9a6cc4f6SDarrick J. Wong * Author: Darrick J. Wong <djwong@kernel.org> 5*9a6cc4f6SDarrick J. Wong */ 6*9a6cc4f6SDarrick J. Wong #include "xfs.h" 7*9a6cc4f6SDarrick J. Wong #include "xfs_fs.h" 8*9a6cc4f6SDarrick J. Wong #include "xfs_shared.h" 9*9a6cc4f6SDarrick J. Wong #include "xfs_format.h" 10*9a6cc4f6SDarrick J. Wong #include "xfs_trans_resv.h" 11*9a6cc4f6SDarrick J. Wong #include "xfs_mount.h" 12*9a6cc4f6SDarrick J. Wong #include "xfs_defer.h" 13*9a6cc4f6SDarrick J. Wong #include "xfs_btree.h" 14*9a6cc4f6SDarrick J. Wong #include "xfs_bit.h" 15*9a6cc4f6SDarrick J. Wong #include "xfs_log_format.h" 16*9a6cc4f6SDarrick J. Wong #include "xfs_trans.h" 17*9a6cc4f6SDarrick J. Wong #include "xfs_sb.h" 18*9a6cc4f6SDarrick J. Wong #include "xfs_rmap.h" 19*9a6cc4f6SDarrick J. Wong #include "xfs_rmap_btree.h" 20*9a6cc4f6SDarrick J. Wong #include "xfs_rtrmap_btree.h" 21*9a6cc4f6SDarrick J. Wong #include "xfs_inode.h" 22*9a6cc4f6SDarrick J. Wong #include "xfs_rtalloc.h" 23*9a6cc4f6SDarrick J. Wong #include "xfs_rtgroup.h" 24*9a6cc4f6SDarrick J. Wong #include "xfs_metafile.h" 25*9a6cc4f6SDarrick J. Wong #include "scrub/xfs_scrub.h" 26*9a6cc4f6SDarrick J. Wong #include "scrub/scrub.h" 27*9a6cc4f6SDarrick J. Wong #include "scrub/common.h" 28*9a6cc4f6SDarrick J. Wong #include "scrub/btree.h" 29*9a6cc4f6SDarrick J. Wong #include "scrub/trace.h" 30*9a6cc4f6SDarrick J. Wong 31*9a6cc4f6SDarrick J. Wong /* Set us up with the realtime metadata locked. */ 32*9a6cc4f6SDarrick J. Wong int 33*9a6cc4f6SDarrick J. Wong xchk_setup_rtrmapbt( 34*9a6cc4f6SDarrick J. Wong struct xfs_scrub *sc) 35*9a6cc4f6SDarrick J. Wong { 36*9a6cc4f6SDarrick J. Wong int error; 37*9a6cc4f6SDarrick J. Wong 38*9a6cc4f6SDarrick J. Wong if (xchk_need_intent_drain(sc)) 39*9a6cc4f6SDarrick J. Wong xchk_fsgates_enable(sc, XCHK_FSGATES_DRAIN); 40*9a6cc4f6SDarrick J. Wong 41*9a6cc4f6SDarrick J. Wong error = xchk_rtgroup_init(sc, sc->sm->sm_agno, &sc->sr); 42*9a6cc4f6SDarrick J. Wong if (error) 43*9a6cc4f6SDarrick J. Wong return error; 44*9a6cc4f6SDarrick J. Wong 45*9a6cc4f6SDarrick J. Wong error = xchk_setup_rt(sc); 46*9a6cc4f6SDarrick J. Wong if (error) 47*9a6cc4f6SDarrick J. Wong return error; 48*9a6cc4f6SDarrick J. Wong 49*9a6cc4f6SDarrick J. Wong error = xchk_install_live_inode(sc, rtg_rmap(sc->sr.rtg)); 50*9a6cc4f6SDarrick J. Wong if (error) 51*9a6cc4f6SDarrick J. Wong return error; 52*9a6cc4f6SDarrick J. Wong 53*9a6cc4f6SDarrick J. Wong return xchk_rtgroup_lock(sc, &sc->sr, XCHK_RTGLOCK_ALL); 54*9a6cc4f6SDarrick J. Wong } 55*9a6cc4f6SDarrick J. Wong 56*9a6cc4f6SDarrick J. Wong /* Realtime reverse mapping. */ 57*9a6cc4f6SDarrick J. Wong 58*9a6cc4f6SDarrick J. Wong struct xchk_rtrmap { 59*9a6cc4f6SDarrick J. Wong /* 60*9a6cc4f6SDarrick J. Wong * The furthest-reaching of the rmapbt records that we've already 61*9a6cc4f6SDarrick J. Wong * processed. This enables us to detect overlapping records for space 62*9a6cc4f6SDarrick J. Wong * allocations that cannot be shared. 63*9a6cc4f6SDarrick J. Wong */ 64*9a6cc4f6SDarrick J. Wong struct xfs_rmap_irec overlap_rec; 65*9a6cc4f6SDarrick J. Wong 66*9a6cc4f6SDarrick J. Wong /* 67*9a6cc4f6SDarrick J. Wong * The previous rmapbt record, so that we can check for two records 68*9a6cc4f6SDarrick J. Wong * that could be one. 69*9a6cc4f6SDarrick J. Wong */ 70*9a6cc4f6SDarrick J. Wong struct xfs_rmap_irec prev_rec; 71*9a6cc4f6SDarrick J. Wong }; 72*9a6cc4f6SDarrick J. Wong 73*9a6cc4f6SDarrick J. Wong /* Flag failures for records that overlap but cannot. */ 74*9a6cc4f6SDarrick J. Wong STATIC void 75*9a6cc4f6SDarrick J. Wong xchk_rtrmapbt_check_overlapping( 76*9a6cc4f6SDarrick J. Wong struct xchk_btree *bs, 77*9a6cc4f6SDarrick J. Wong struct xchk_rtrmap *cr, 78*9a6cc4f6SDarrick J. Wong const struct xfs_rmap_irec *irec) 79*9a6cc4f6SDarrick J. Wong { 80*9a6cc4f6SDarrick J. Wong xfs_rtblock_t pnext, inext; 81*9a6cc4f6SDarrick J. Wong 82*9a6cc4f6SDarrick J. Wong if (bs->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) 83*9a6cc4f6SDarrick J. Wong return; 84*9a6cc4f6SDarrick J. Wong 85*9a6cc4f6SDarrick J. Wong /* No previous record? */ 86*9a6cc4f6SDarrick J. Wong if (cr->overlap_rec.rm_blockcount == 0) 87*9a6cc4f6SDarrick J. Wong goto set_prev; 88*9a6cc4f6SDarrick J. Wong 89*9a6cc4f6SDarrick J. Wong /* Do overlap_rec and irec overlap? */ 90*9a6cc4f6SDarrick J. Wong pnext = cr->overlap_rec.rm_startblock + cr->overlap_rec.rm_blockcount; 91*9a6cc4f6SDarrick J. Wong if (pnext <= irec->rm_startblock) 92*9a6cc4f6SDarrick J. Wong goto set_prev; 93*9a6cc4f6SDarrick J. Wong 94*9a6cc4f6SDarrick J. Wong xchk_btree_set_corrupt(bs->sc, bs->cur, 0); 95*9a6cc4f6SDarrick J. Wong 96*9a6cc4f6SDarrick J. Wong /* Save whichever rmap record extends furthest. */ 97*9a6cc4f6SDarrick J. Wong inext = irec->rm_startblock + irec->rm_blockcount; 98*9a6cc4f6SDarrick J. Wong if (pnext > inext) 99*9a6cc4f6SDarrick J. Wong return; 100*9a6cc4f6SDarrick J. Wong 101*9a6cc4f6SDarrick J. Wong set_prev: 102*9a6cc4f6SDarrick J. Wong memcpy(&cr->overlap_rec, irec, sizeof(struct xfs_rmap_irec)); 103*9a6cc4f6SDarrick J. Wong } 104*9a6cc4f6SDarrick J. Wong 105*9a6cc4f6SDarrick J. Wong /* Decide if two reverse-mapping records can be merged. */ 106*9a6cc4f6SDarrick J. Wong static inline bool 107*9a6cc4f6SDarrick J. Wong xchk_rtrmap_mergeable( 108*9a6cc4f6SDarrick J. Wong struct xchk_rtrmap *cr, 109*9a6cc4f6SDarrick J. Wong const struct xfs_rmap_irec *r2) 110*9a6cc4f6SDarrick J. Wong { 111*9a6cc4f6SDarrick J. Wong const struct xfs_rmap_irec *r1 = &cr->prev_rec; 112*9a6cc4f6SDarrick J. Wong 113*9a6cc4f6SDarrick J. Wong /* Ignore if prev_rec is not yet initialized. */ 114*9a6cc4f6SDarrick J. Wong if (cr->prev_rec.rm_blockcount == 0) 115*9a6cc4f6SDarrick J. Wong return false; 116*9a6cc4f6SDarrick J. Wong 117*9a6cc4f6SDarrick J. Wong if (r1->rm_owner != r2->rm_owner) 118*9a6cc4f6SDarrick J. Wong return false; 119*9a6cc4f6SDarrick J. Wong if (r1->rm_startblock + r1->rm_blockcount != r2->rm_startblock) 120*9a6cc4f6SDarrick J. Wong return false; 121*9a6cc4f6SDarrick J. Wong if ((unsigned long long)r1->rm_blockcount + r2->rm_blockcount > 122*9a6cc4f6SDarrick J. Wong XFS_RMAP_LEN_MAX) 123*9a6cc4f6SDarrick J. Wong return false; 124*9a6cc4f6SDarrick J. Wong if (r1->rm_flags != r2->rm_flags) 125*9a6cc4f6SDarrick J. Wong return false; 126*9a6cc4f6SDarrick J. Wong return r1->rm_offset + r1->rm_blockcount == r2->rm_offset; 127*9a6cc4f6SDarrick J. Wong } 128*9a6cc4f6SDarrick J. Wong 129*9a6cc4f6SDarrick J. Wong /* Flag failures for records that could be merged. */ 130*9a6cc4f6SDarrick J. Wong STATIC void 131*9a6cc4f6SDarrick J. Wong xchk_rtrmapbt_check_mergeable( 132*9a6cc4f6SDarrick J. Wong struct xchk_btree *bs, 133*9a6cc4f6SDarrick J. Wong struct xchk_rtrmap *cr, 134*9a6cc4f6SDarrick J. Wong const struct xfs_rmap_irec *irec) 135*9a6cc4f6SDarrick J. Wong { 136*9a6cc4f6SDarrick J. Wong if (bs->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) 137*9a6cc4f6SDarrick J. Wong return; 138*9a6cc4f6SDarrick J. Wong 139*9a6cc4f6SDarrick J. Wong if (xchk_rtrmap_mergeable(cr, irec)) 140*9a6cc4f6SDarrick J. Wong xchk_btree_set_corrupt(bs->sc, bs->cur, 0); 141*9a6cc4f6SDarrick J. Wong 142*9a6cc4f6SDarrick J. Wong memcpy(&cr->prev_rec, irec, sizeof(struct xfs_rmap_irec)); 143*9a6cc4f6SDarrick J. Wong } 144*9a6cc4f6SDarrick J. Wong 145*9a6cc4f6SDarrick J. Wong /* Scrub a realtime rmapbt record. */ 146*9a6cc4f6SDarrick J. Wong STATIC int 147*9a6cc4f6SDarrick J. Wong xchk_rtrmapbt_rec( 148*9a6cc4f6SDarrick J. Wong struct xchk_btree *bs, 149*9a6cc4f6SDarrick J. Wong const union xfs_btree_rec *rec) 150*9a6cc4f6SDarrick J. Wong { 151*9a6cc4f6SDarrick J. Wong struct xchk_rtrmap *cr = bs->private; 152*9a6cc4f6SDarrick J. Wong struct xfs_rmap_irec irec; 153*9a6cc4f6SDarrick J. Wong 154*9a6cc4f6SDarrick J. Wong if (xfs_rmap_btrec_to_irec(rec, &irec) != NULL || 155*9a6cc4f6SDarrick J. Wong xfs_rtrmap_check_irec(to_rtg(bs->cur->bc_group), &irec) != NULL) { 156*9a6cc4f6SDarrick J. Wong xchk_btree_set_corrupt(bs->sc, bs->cur, 0); 157*9a6cc4f6SDarrick J. Wong return 0; 158*9a6cc4f6SDarrick J. Wong } 159*9a6cc4f6SDarrick J. Wong 160*9a6cc4f6SDarrick J. Wong if (bs->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) 161*9a6cc4f6SDarrick J. Wong return 0; 162*9a6cc4f6SDarrick J. Wong 163*9a6cc4f6SDarrick J. Wong xchk_rtrmapbt_check_mergeable(bs, cr, &irec); 164*9a6cc4f6SDarrick J. Wong xchk_rtrmapbt_check_overlapping(bs, cr, &irec); 165*9a6cc4f6SDarrick J. Wong return 0; 166*9a6cc4f6SDarrick J. Wong } 167*9a6cc4f6SDarrick J. Wong 168*9a6cc4f6SDarrick J. Wong /* Scrub the realtime rmap btree. */ 169*9a6cc4f6SDarrick J. Wong int 170*9a6cc4f6SDarrick J. Wong xchk_rtrmapbt( 171*9a6cc4f6SDarrick J. Wong struct xfs_scrub *sc) 172*9a6cc4f6SDarrick J. Wong { 173*9a6cc4f6SDarrick J. Wong struct xfs_inode *ip = rtg_rmap(sc->sr.rtg); 174*9a6cc4f6SDarrick J. Wong struct xfs_owner_info oinfo; 175*9a6cc4f6SDarrick J. Wong struct xchk_rtrmap cr = { }; 176*9a6cc4f6SDarrick J. Wong int error; 177*9a6cc4f6SDarrick J. Wong 178*9a6cc4f6SDarrick J. Wong error = xchk_metadata_inode_forks(sc); 179*9a6cc4f6SDarrick J. Wong if (error || (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)) 180*9a6cc4f6SDarrick J. Wong return error; 181*9a6cc4f6SDarrick J. Wong 182*9a6cc4f6SDarrick J. Wong xfs_rmap_ino_bmbt_owner(&oinfo, ip->i_ino, XFS_DATA_FORK); 183*9a6cc4f6SDarrick J. Wong return xchk_btree(sc, sc->sr.rmap_cur, xchk_rtrmapbt_rec, &oinfo, &cr); 184*9a6cc4f6SDarrick J. Wong } 185