19a6cc4f6SDarrick J. Wong // SPDX-License-Identifier: GPL-2.0-or-later 29a6cc4f6SDarrick J. Wong /* 39a6cc4f6SDarrick J. Wong * Copyright (c) 2018-2024 Oracle. All Rights Reserved. 49a6cc4f6SDarrick J. Wong * Author: Darrick J. Wong <djwong@kernel.org> 59a6cc4f6SDarrick J. Wong */ 69a6cc4f6SDarrick J. Wong #include "xfs.h" 79a6cc4f6SDarrick J. Wong #include "xfs_fs.h" 89a6cc4f6SDarrick J. Wong #include "xfs_shared.h" 99a6cc4f6SDarrick J. Wong #include "xfs_format.h" 109a6cc4f6SDarrick J. Wong #include "xfs_trans_resv.h" 119a6cc4f6SDarrick J. Wong #include "xfs_mount.h" 129a6cc4f6SDarrick J. Wong #include "xfs_defer.h" 139a6cc4f6SDarrick J. Wong #include "xfs_btree.h" 149a6cc4f6SDarrick J. Wong #include "xfs_bit.h" 159a6cc4f6SDarrick J. Wong #include "xfs_log_format.h" 169a6cc4f6SDarrick J. Wong #include "xfs_trans.h" 179a6cc4f6SDarrick J. Wong #include "xfs_sb.h" 189a6cc4f6SDarrick J. Wong #include "xfs_rmap.h" 199a6cc4f6SDarrick J. Wong #include "xfs_rmap_btree.h" 209a6cc4f6SDarrick J. Wong #include "xfs_rtrmap_btree.h" 219a6cc4f6SDarrick J. Wong #include "xfs_inode.h" 229a6cc4f6SDarrick J. Wong #include "xfs_rtalloc.h" 239a6cc4f6SDarrick J. Wong #include "xfs_rtgroup.h" 249a6cc4f6SDarrick J. Wong #include "xfs_metafile.h" 25*91683bb3SDarrick J. Wong #include "xfs_refcount.h" 269a6cc4f6SDarrick J. Wong #include "scrub/xfs_scrub.h" 279a6cc4f6SDarrick J. Wong #include "scrub/scrub.h" 289a6cc4f6SDarrick J. Wong #include "scrub/common.h" 299a6cc4f6SDarrick J. Wong #include "scrub/btree.h" 309a6cc4f6SDarrick J. Wong #include "scrub/trace.h" 316a849bd8SDarrick J. Wong #include "scrub/repair.h" 329a6cc4f6SDarrick J. Wong 339a6cc4f6SDarrick J. Wong /* Set us up with the realtime metadata locked. */ 349a6cc4f6SDarrick J. Wong int 359a6cc4f6SDarrick J. Wong xchk_setup_rtrmapbt( 369a6cc4f6SDarrick J. Wong struct xfs_scrub *sc) 379a6cc4f6SDarrick J. Wong { 389a6cc4f6SDarrick J. Wong int error; 399a6cc4f6SDarrick J. Wong 409a6cc4f6SDarrick J. Wong if (xchk_need_intent_drain(sc)) 419a6cc4f6SDarrick J. Wong xchk_fsgates_enable(sc, XCHK_FSGATES_DRAIN); 429a6cc4f6SDarrick J. Wong 436a849bd8SDarrick J. Wong if (xchk_could_repair(sc)) { 446a849bd8SDarrick J. Wong error = xrep_setup_rtrmapbt(sc); 456a849bd8SDarrick J. Wong if (error) 466a849bd8SDarrick J. Wong return error; 476a849bd8SDarrick J. Wong } 486a849bd8SDarrick J. Wong 499a6cc4f6SDarrick J. Wong error = xchk_rtgroup_init(sc, sc->sm->sm_agno, &sc->sr); 509a6cc4f6SDarrick J. Wong if (error) 519a6cc4f6SDarrick J. Wong return error; 529a6cc4f6SDarrick J. Wong 539a6cc4f6SDarrick J. Wong error = xchk_setup_rt(sc); 549a6cc4f6SDarrick J. Wong if (error) 559a6cc4f6SDarrick J. Wong return error; 569a6cc4f6SDarrick J. Wong 579a6cc4f6SDarrick J. Wong error = xchk_install_live_inode(sc, rtg_rmap(sc->sr.rtg)); 589a6cc4f6SDarrick J. Wong if (error) 599a6cc4f6SDarrick J. Wong return error; 609a6cc4f6SDarrick J. Wong 619a6cc4f6SDarrick J. Wong return xchk_rtgroup_lock(sc, &sc->sr, XCHK_RTGLOCK_ALL); 629a6cc4f6SDarrick J. Wong } 639a6cc4f6SDarrick J. Wong 649a6cc4f6SDarrick J. Wong /* Realtime reverse mapping. */ 659a6cc4f6SDarrick J. Wong 669a6cc4f6SDarrick J. Wong struct xchk_rtrmap { 679a6cc4f6SDarrick J. Wong /* 689a6cc4f6SDarrick J. Wong * The furthest-reaching of the rmapbt records that we've already 699a6cc4f6SDarrick J. Wong * processed. This enables us to detect overlapping records for space 709a6cc4f6SDarrick J. Wong * allocations that cannot be shared. 719a6cc4f6SDarrick J. Wong */ 729a6cc4f6SDarrick J. Wong struct xfs_rmap_irec overlap_rec; 739a6cc4f6SDarrick J. Wong 749a6cc4f6SDarrick J. Wong /* 759a6cc4f6SDarrick J. Wong * The previous rmapbt record, so that we can check for two records 769a6cc4f6SDarrick J. Wong * that could be one. 779a6cc4f6SDarrick J. Wong */ 789a6cc4f6SDarrick J. Wong struct xfs_rmap_irec prev_rec; 799a6cc4f6SDarrick J. Wong }; 809a6cc4f6SDarrick J. Wong 819a6cc4f6SDarrick J. Wong /* Flag failures for records that overlap but cannot. */ 829a6cc4f6SDarrick J. Wong STATIC void 839a6cc4f6SDarrick J. Wong xchk_rtrmapbt_check_overlapping( 849a6cc4f6SDarrick J. Wong struct xchk_btree *bs, 859a6cc4f6SDarrick J. Wong struct xchk_rtrmap *cr, 869a6cc4f6SDarrick J. Wong const struct xfs_rmap_irec *irec) 879a6cc4f6SDarrick J. Wong { 889a6cc4f6SDarrick J. Wong xfs_rtblock_t pnext, inext; 899a6cc4f6SDarrick J. Wong 909a6cc4f6SDarrick J. Wong if (bs->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) 919a6cc4f6SDarrick J. Wong return; 929a6cc4f6SDarrick J. Wong 939a6cc4f6SDarrick J. Wong /* No previous record? */ 949a6cc4f6SDarrick J. Wong if (cr->overlap_rec.rm_blockcount == 0) 959a6cc4f6SDarrick J. Wong goto set_prev; 969a6cc4f6SDarrick J. Wong 979a6cc4f6SDarrick J. Wong /* Do overlap_rec and irec overlap? */ 989a6cc4f6SDarrick J. Wong pnext = cr->overlap_rec.rm_startblock + cr->overlap_rec.rm_blockcount; 999a6cc4f6SDarrick J. Wong if (pnext <= irec->rm_startblock) 1009a6cc4f6SDarrick J. Wong goto set_prev; 1019a6cc4f6SDarrick J. Wong 1029a6cc4f6SDarrick J. Wong xchk_btree_set_corrupt(bs->sc, bs->cur, 0); 1039a6cc4f6SDarrick J. Wong 1049a6cc4f6SDarrick J. Wong /* Save whichever rmap record extends furthest. */ 1059a6cc4f6SDarrick J. Wong inext = irec->rm_startblock + irec->rm_blockcount; 1069a6cc4f6SDarrick J. Wong if (pnext > inext) 1079a6cc4f6SDarrick J. Wong return; 1089a6cc4f6SDarrick J. Wong 1099a6cc4f6SDarrick J. Wong set_prev: 1109a6cc4f6SDarrick J. Wong memcpy(&cr->overlap_rec, irec, sizeof(struct xfs_rmap_irec)); 1119a6cc4f6SDarrick J. Wong } 1129a6cc4f6SDarrick J. Wong 1139a6cc4f6SDarrick J. Wong /* Decide if two reverse-mapping records can be merged. */ 1149a6cc4f6SDarrick J. Wong static inline bool 1159a6cc4f6SDarrick J. Wong xchk_rtrmap_mergeable( 1169a6cc4f6SDarrick J. Wong struct xchk_rtrmap *cr, 1179a6cc4f6SDarrick J. Wong const struct xfs_rmap_irec *r2) 1189a6cc4f6SDarrick J. Wong { 1199a6cc4f6SDarrick J. Wong const struct xfs_rmap_irec *r1 = &cr->prev_rec; 1209a6cc4f6SDarrick J. Wong 1219a6cc4f6SDarrick J. Wong /* Ignore if prev_rec is not yet initialized. */ 1229a6cc4f6SDarrick J. Wong if (cr->prev_rec.rm_blockcount == 0) 1239a6cc4f6SDarrick J. Wong return false; 1249a6cc4f6SDarrick J. Wong 1259a6cc4f6SDarrick J. Wong if (r1->rm_owner != r2->rm_owner) 1269a6cc4f6SDarrick J. Wong return false; 1279a6cc4f6SDarrick J. Wong if (r1->rm_startblock + r1->rm_blockcount != r2->rm_startblock) 1289a6cc4f6SDarrick J. Wong return false; 1299a6cc4f6SDarrick J. Wong if ((unsigned long long)r1->rm_blockcount + r2->rm_blockcount > 1309a6cc4f6SDarrick J. Wong XFS_RMAP_LEN_MAX) 1319a6cc4f6SDarrick J. Wong return false; 1329a6cc4f6SDarrick J. Wong if (r1->rm_flags != r2->rm_flags) 1339a6cc4f6SDarrick J. Wong return false; 1349a6cc4f6SDarrick J. Wong return r1->rm_offset + r1->rm_blockcount == r2->rm_offset; 1359a6cc4f6SDarrick J. Wong } 1369a6cc4f6SDarrick J. Wong 1379a6cc4f6SDarrick J. Wong /* Flag failures for records that could be merged. */ 1389a6cc4f6SDarrick J. Wong STATIC void 1399a6cc4f6SDarrick J. Wong xchk_rtrmapbt_check_mergeable( 1409a6cc4f6SDarrick J. Wong struct xchk_btree *bs, 1419a6cc4f6SDarrick J. Wong struct xchk_rtrmap *cr, 1429a6cc4f6SDarrick J. Wong const struct xfs_rmap_irec *irec) 1439a6cc4f6SDarrick J. Wong { 1449a6cc4f6SDarrick J. Wong if (bs->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) 1459a6cc4f6SDarrick J. Wong return; 1469a6cc4f6SDarrick J. Wong 1479a6cc4f6SDarrick J. Wong if (xchk_rtrmap_mergeable(cr, irec)) 1489a6cc4f6SDarrick J. Wong xchk_btree_set_corrupt(bs->sc, bs->cur, 0); 1499a6cc4f6SDarrick J. Wong 1509a6cc4f6SDarrick J. Wong memcpy(&cr->prev_rec, irec, sizeof(struct xfs_rmap_irec)); 1519a6cc4f6SDarrick J. Wong } 1529a6cc4f6SDarrick J. Wong 153*91683bb3SDarrick J. Wong /* Cross-reference a rmap against the refcount btree. */ 154*91683bb3SDarrick J. Wong STATIC void 155*91683bb3SDarrick J. Wong xchk_rtrmapbt_xref_rtrefc( 156*91683bb3SDarrick J. Wong struct xfs_scrub *sc, 157*91683bb3SDarrick J. Wong struct xfs_rmap_irec *irec) 158*91683bb3SDarrick J. Wong { 159*91683bb3SDarrick J. Wong xfs_rgblock_t fbno; 160*91683bb3SDarrick J. Wong xfs_extlen_t flen; 161*91683bb3SDarrick J. Wong bool is_inode; 162*91683bb3SDarrick J. Wong bool is_bmbt; 163*91683bb3SDarrick J. Wong bool is_attr; 164*91683bb3SDarrick J. Wong bool is_unwritten; 165*91683bb3SDarrick J. Wong int error; 166*91683bb3SDarrick J. Wong 167*91683bb3SDarrick J. Wong if (!sc->sr.refc_cur || xchk_skip_xref(sc->sm)) 168*91683bb3SDarrick J. Wong return; 169*91683bb3SDarrick J. Wong 170*91683bb3SDarrick J. Wong is_inode = !XFS_RMAP_NON_INODE_OWNER(irec->rm_owner); 171*91683bb3SDarrick J. Wong is_bmbt = irec->rm_flags & XFS_RMAP_BMBT_BLOCK; 172*91683bb3SDarrick J. Wong is_attr = irec->rm_flags & XFS_RMAP_ATTR_FORK; 173*91683bb3SDarrick J. Wong is_unwritten = irec->rm_flags & XFS_RMAP_UNWRITTEN; 174*91683bb3SDarrick J. Wong 175*91683bb3SDarrick J. Wong /* If this is shared, must be a data fork extent. */ 176*91683bb3SDarrick J. Wong error = xfs_refcount_find_shared(sc->sr.refc_cur, irec->rm_startblock, 177*91683bb3SDarrick J. Wong irec->rm_blockcount, &fbno, &flen, false); 178*91683bb3SDarrick J. Wong if (!xchk_should_check_xref(sc, &error, &sc->sr.refc_cur)) 179*91683bb3SDarrick J. Wong return; 180*91683bb3SDarrick J. Wong if (flen != 0 && (!is_inode || is_attr || is_bmbt || is_unwritten)) 181*91683bb3SDarrick J. Wong xchk_btree_xref_set_corrupt(sc, sc->sr.refc_cur, 0); 182*91683bb3SDarrick J. Wong } 183*91683bb3SDarrick J. Wong 1841ebecab5SDarrick J. Wong /* Cross-reference with other metadata. */ 1851ebecab5SDarrick J. Wong STATIC void 1861ebecab5SDarrick J. Wong xchk_rtrmapbt_xref( 1871ebecab5SDarrick J. Wong struct xfs_scrub *sc, 1881ebecab5SDarrick J. Wong struct xfs_rmap_irec *irec) 1891ebecab5SDarrick J. Wong { 1901ebecab5SDarrick J. Wong if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) 1911ebecab5SDarrick J. Wong return; 1921ebecab5SDarrick J. Wong 1931ebecab5SDarrick J. Wong xchk_xref_is_used_rt_space(sc, 1941ebecab5SDarrick J. Wong xfs_rgbno_to_rtb(sc->sr.rtg, irec->rm_startblock), 1951ebecab5SDarrick J. Wong irec->rm_blockcount); 196*91683bb3SDarrick J. Wong if (irec->rm_owner == XFS_RMAP_OWN_COW) 197*91683bb3SDarrick J. Wong xchk_xref_is_cow_staging(sc, irec->rm_startblock, 198*91683bb3SDarrick J. Wong irec->rm_blockcount); 199*91683bb3SDarrick J. Wong else 200*91683bb3SDarrick J. Wong xchk_rtrmapbt_xref_rtrefc(sc, irec); 2011ebecab5SDarrick J. Wong } 2021ebecab5SDarrick J. Wong 2039a6cc4f6SDarrick J. Wong /* Scrub a realtime rmapbt record. */ 2049a6cc4f6SDarrick J. Wong STATIC int 2059a6cc4f6SDarrick J. Wong xchk_rtrmapbt_rec( 2069a6cc4f6SDarrick J. Wong struct xchk_btree *bs, 2079a6cc4f6SDarrick J. Wong const union xfs_btree_rec *rec) 2089a6cc4f6SDarrick J. Wong { 2099a6cc4f6SDarrick J. Wong struct xchk_rtrmap *cr = bs->private; 2109a6cc4f6SDarrick J. Wong struct xfs_rmap_irec irec; 2119a6cc4f6SDarrick J. Wong 2129a6cc4f6SDarrick J. Wong if (xfs_rmap_btrec_to_irec(rec, &irec) != NULL || 2139a6cc4f6SDarrick J. Wong xfs_rtrmap_check_irec(to_rtg(bs->cur->bc_group), &irec) != NULL) { 2149a6cc4f6SDarrick J. Wong xchk_btree_set_corrupt(bs->sc, bs->cur, 0); 2159a6cc4f6SDarrick J. Wong return 0; 2169a6cc4f6SDarrick J. Wong } 2179a6cc4f6SDarrick J. Wong 2189a6cc4f6SDarrick J. Wong if (bs->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) 2199a6cc4f6SDarrick J. Wong return 0; 2209a6cc4f6SDarrick J. Wong 2219a6cc4f6SDarrick J. Wong xchk_rtrmapbt_check_mergeable(bs, cr, &irec); 2229a6cc4f6SDarrick J. Wong xchk_rtrmapbt_check_overlapping(bs, cr, &irec); 2231ebecab5SDarrick J. Wong xchk_rtrmapbt_xref(bs->sc, &irec); 2249a6cc4f6SDarrick J. Wong return 0; 2259a6cc4f6SDarrick J. Wong } 2269a6cc4f6SDarrick J. Wong 2279a6cc4f6SDarrick J. Wong /* Scrub the realtime rmap btree. */ 2289a6cc4f6SDarrick J. Wong int 2299a6cc4f6SDarrick J. Wong xchk_rtrmapbt( 2309a6cc4f6SDarrick J. Wong struct xfs_scrub *sc) 2319a6cc4f6SDarrick J. Wong { 2329a6cc4f6SDarrick J. Wong struct xfs_inode *ip = rtg_rmap(sc->sr.rtg); 2339a6cc4f6SDarrick J. Wong struct xfs_owner_info oinfo; 2349a6cc4f6SDarrick J. Wong struct xchk_rtrmap cr = { }; 2359a6cc4f6SDarrick J. Wong int error; 2369a6cc4f6SDarrick J. Wong 2379a6cc4f6SDarrick J. Wong error = xchk_metadata_inode_forks(sc); 2389a6cc4f6SDarrick J. Wong if (error || (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)) 2399a6cc4f6SDarrick J. Wong return error; 2409a6cc4f6SDarrick J. Wong 2419a6cc4f6SDarrick J. Wong xfs_rmap_ino_bmbt_owner(&oinfo, ip->i_ino, XFS_DATA_FORK); 2429a6cc4f6SDarrick J. Wong return xchk_btree(sc, sc->sr.rmap_cur, xchk_rtrmapbt_rec, &oinfo, &cr); 2439a6cc4f6SDarrick J. Wong } 244037a44d8SDarrick J. Wong 245037a44d8SDarrick J. Wong /* xref check that the extent has no realtime reverse mapping at all */ 246037a44d8SDarrick J. Wong void 247037a44d8SDarrick J. Wong xchk_xref_has_no_rt_owner( 248037a44d8SDarrick J. Wong struct xfs_scrub *sc, 249037a44d8SDarrick J. Wong xfs_rgblock_t bno, 250037a44d8SDarrick J. Wong xfs_extlen_t len) 251037a44d8SDarrick J. Wong { 252037a44d8SDarrick J. Wong enum xbtree_recpacking outcome; 253037a44d8SDarrick J. Wong int error; 254037a44d8SDarrick J. Wong 255037a44d8SDarrick J. Wong if (!sc->sr.rmap_cur || xchk_skip_xref(sc->sm)) 256037a44d8SDarrick J. Wong return; 257037a44d8SDarrick J. Wong 258037a44d8SDarrick J. Wong error = xfs_rmap_has_records(sc->sr.rmap_cur, bno, len, &outcome); 259037a44d8SDarrick J. Wong if (!xchk_should_check_xref(sc, &error, &sc->sr.rmap_cur)) 260037a44d8SDarrick J. Wong return; 261037a44d8SDarrick J. Wong if (outcome != XBTREE_RECPACKING_EMPTY) 262037a44d8SDarrick J. Wong xchk_btree_xref_set_corrupt(sc, sc->sr.rmap_cur, 0); 263037a44d8SDarrick J. Wong } 264037a44d8SDarrick J. Wong 265037a44d8SDarrick J. Wong /* xref check that the extent is completely mapped */ 266037a44d8SDarrick J. Wong void 267037a44d8SDarrick J. Wong xchk_xref_has_rt_owner( 268037a44d8SDarrick J. Wong struct xfs_scrub *sc, 269037a44d8SDarrick J. Wong xfs_rgblock_t bno, 270037a44d8SDarrick J. Wong xfs_extlen_t len) 271037a44d8SDarrick J. Wong { 272037a44d8SDarrick J. Wong enum xbtree_recpacking outcome; 273037a44d8SDarrick J. Wong int error; 274037a44d8SDarrick J. Wong 275037a44d8SDarrick J. Wong if (!sc->sr.rmap_cur || xchk_skip_xref(sc->sm)) 276037a44d8SDarrick J. Wong return; 277037a44d8SDarrick J. Wong 278037a44d8SDarrick J. Wong error = xfs_rmap_has_records(sc->sr.rmap_cur, bno, len, &outcome); 279037a44d8SDarrick J. Wong if (!xchk_should_check_xref(sc, &error, &sc->sr.rmap_cur)) 280037a44d8SDarrick J. Wong return; 281037a44d8SDarrick J. Wong if (outcome != XBTREE_RECPACKING_FULL) 282037a44d8SDarrick J. Wong xchk_btree_xref_set_corrupt(sc, sc->sr.rmap_cur, 0); 283037a44d8SDarrick J. Wong } 284037a44d8SDarrick J. Wong 285037a44d8SDarrick J. Wong /* xref check that the extent is only owned by a given owner */ 286037a44d8SDarrick J. Wong void 287037a44d8SDarrick J. Wong xchk_xref_is_only_rt_owned_by( 288037a44d8SDarrick J. Wong struct xfs_scrub *sc, 289037a44d8SDarrick J. Wong xfs_agblock_t bno, 290037a44d8SDarrick J. Wong xfs_extlen_t len, 291037a44d8SDarrick J. Wong const struct xfs_owner_info *oinfo) 292037a44d8SDarrick J. Wong { 293037a44d8SDarrick J. Wong struct xfs_rmap_matches res; 294037a44d8SDarrick J. Wong int error; 295037a44d8SDarrick J. Wong 296037a44d8SDarrick J. Wong if (!sc->sr.rmap_cur || xchk_skip_xref(sc->sm)) 297037a44d8SDarrick J. Wong return; 298037a44d8SDarrick J. Wong 299037a44d8SDarrick J. Wong error = xfs_rmap_count_owners(sc->sr.rmap_cur, bno, len, oinfo, &res); 300037a44d8SDarrick J. Wong if (!xchk_should_check_xref(sc, &error, &sc->sr.rmap_cur)) 301037a44d8SDarrick J. Wong return; 302037a44d8SDarrick J. Wong if (res.matches != 1) 303037a44d8SDarrick J. Wong xchk_btree_xref_set_corrupt(sc, sc->sr.rmap_cur, 0); 304037a44d8SDarrick J. Wong if (res.bad_non_owner_matches) 305037a44d8SDarrick J. Wong xchk_btree_xref_set_corrupt(sc, sc->sr.rmap_cur, 0); 306037a44d8SDarrick J. Wong if (res.non_owner_matches) 307037a44d8SDarrick J. Wong xchk_btree_xref_set_corrupt(sc, sc->sr.rmap_cur, 0); 308037a44d8SDarrick J. Wong } 309