16a849bd8SDarrick J. Wong // SPDX-License-Identifier: GPL-2.0-or-later
26a849bd8SDarrick J. Wong /*
36a849bd8SDarrick J. Wong * Copyright (c) 2020-2024 Oracle. All Rights Reserved.
46a849bd8SDarrick J. Wong * Author: Darrick J. Wong <djwong@kernel.org>
56a849bd8SDarrick J. Wong */
66a849bd8SDarrick J. Wong #include "xfs.h"
76a849bd8SDarrick J. Wong #include "xfs_fs.h"
86a849bd8SDarrick J. Wong #include "xfs_shared.h"
96a849bd8SDarrick J. Wong #include "xfs_format.h"
106a849bd8SDarrick J. Wong #include "xfs_trans_resv.h"
116a849bd8SDarrick J. Wong #include "xfs_mount.h"
126a849bd8SDarrick J. Wong #include "xfs_defer.h"
136a849bd8SDarrick J. Wong #include "xfs_btree.h"
146a849bd8SDarrick J. Wong #include "xfs_btree_staging.h"
154a61f12eSDarrick J. Wong #include "xfs_buf_mem.h"
164a61f12eSDarrick J. Wong #include "xfs_btree_mem.h"
176a849bd8SDarrick J. Wong #include "xfs_bit.h"
186a849bd8SDarrick J. Wong #include "xfs_log_format.h"
196a849bd8SDarrick J. Wong #include "xfs_trans.h"
206a849bd8SDarrick J. Wong #include "xfs_sb.h"
216a849bd8SDarrick J. Wong #include "xfs_alloc.h"
226a849bd8SDarrick J. Wong #include "xfs_rmap.h"
236a849bd8SDarrick J. Wong #include "xfs_rmap_btree.h"
246a849bd8SDarrick J. Wong #include "xfs_rtrmap_btree.h"
256a849bd8SDarrick J. Wong #include "xfs_inode.h"
266a849bd8SDarrick J. Wong #include "xfs_icache.h"
276a849bd8SDarrick J. Wong #include "xfs_bmap.h"
286a849bd8SDarrick J. Wong #include "xfs_bmap_btree.h"
296a849bd8SDarrick J. Wong #include "xfs_quota.h"
306a849bd8SDarrick J. Wong #include "xfs_rtalloc.h"
316a849bd8SDarrick J. Wong #include "xfs_ag.h"
326a849bd8SDarrick J. Wong #include "xfs_rtgroup.h"
33*fe2efe95SDarrick J. Wong #include "xfs_refcount.h"
346a849bd8SDarrick J. Wong #include "scrub/xfs_scrub.h"
356a849bd8SDarrick J. Wong #include "scrub/scrub.h"
366a849bd8SDarrick J. Wong #include "scrub/common.h"
376a849bd8SDarrick J. Wong #include "scrub/btree.h"
386a849bd8SDarrick J. Wong #include "scrub/trace.h"
396a849bd8SDarrick J. Wong #include "scrub/repair.h"
406a849bd8SDarrick J. Wong #include "scrub/bitmap.h"
416a849bd8SDarrick J. Wong #include "scrub/fsb_bitmap.h"
42*fe2efe95SDarrick J. Wong #include "scrub/rgb_bitmap.h"
436a849bd8SDarrick J. Wong #include "scrub/xfile.h"
446a849bd8SDarrick J. Wong #include "scrub/xfarray.h"
456a849bd8SDarrick J. Wong #include "scrub/iscan.h"
466a849bd8SDarrick J. Wong #include "scrub/newbt.h"
476a849bd8SDarrick J. Wong #include "scrub/reap.h"
486a849bd8SDarrick J. Wong
496a849bd8SDarrick J. Wong /*
506a849bd8SDarrick J. Wong * Realtime Reverse Mapping Btree Repair
516a849bd8SDarrick J. Wong * =====================================
526a849bd8SDarrick J. Wong *
536a849bd8SDarrick J. Wong * This isn't quite as difficult as repairing the rmap btree on the data
546a849bd8SDarrick J. Wong * device, since we only store the data fork extents of realtime files on the
556a849bd8SDarrick J. Wong * realtime device. We still have to freeze the filesystem and stop the
566a849bd8SDarrick J. Wong * background threads like we do for the rmap repair, but we only have to scan
576a849bd8SDarrick J. Wong * realtime inodes.
586a849bd8SDarrick J. Wong *
596a849bd8SDarrick J. Wong * Collecting entries for the new realtime rmap btree is easy -- all we have
606a849bd8SDarrick J. Wong * to do is generate rtrmap entries from the data fork mappings of all realtime
616a849bd8SDarrick J. Wong * files in the filesystem. We then scan the rmap btrees of the data device
626a849bd8SDarrick J. Wong * looking for extents belonging to the old btree and note them in a bitmap.
636a849bd8SDarrick J. Wong *
646a849bd8SDarrick J. Wong * To rebuild the realtime rmap btree, we bulk-load the collected mappings into
656a849bd8SDarrick J. Wong * a new btree cursor and atomically swap that into the realtime inode. Then
666a849bd8SDarrick J. Wong * we can free the blocks from the old btree.
676a849bd8SDarrick J. Wong *
686a849bd8SDarrick J. Wong * We use the 'xrep_rtrmap' prefix for all the rmap functions.
696a849bd8SDarrick J. Wong */
706a849bd8SDarrick J. Wong
716a849bd8SDarrick J. Wong /* Context for collecting rmaps */
726a849bd8SDarrick J. Wong struct xrep_rtrmap {
736a849bd8SDarrick J. Wong /* new rtrmapbt information */
746a849bd8SDarrick J. Wong struct xrep_newbt new_btree;
756a849bd8SDarrick J. Wong
769515572bSDarrick J. Wong /* lock for the xfbtree and xfile */
779515572bSDarrick J. Wong struct mutex lock;
789515572bSDarrick J. Wong
796a849bd8SDarrick J. Wong /* rmap records generated from primary metadata */
804a61f12eSDarrick J. Wong struct xfbtree rtrmap_btree;
816a849bd8SDarrick J. Wong
826a849bd8SDarrick J. Wong struct xfs_scrub *sc;
836a849bd8SDarrick J. Wong
846a849bd8SDarrick J. Wong /* bitmap of old rtrmapbt blocks */
856a849bd8SDarrick J. Wong struct xfsb_bitmap old_rtrmapbt_blocks;
866a849bd8SDarrick J. Wong
879515572bSDarrick J. Wong /* Hooks into rtrmap update code. */
889515572bSDarrick J. Wong struct xfs_rmap_hook rhook;
899515572bSDarrick J. Wong
906a849bd8SDarrick J. Wong /* inode scan cursor */
916a849bd8SDarrick J. Wong struct xchk_iscan iscan;
926a849bd8SDarrick J. Wong
934a61f12eSDarrick J. Wong /* in-memory btree cursor for the ->get_blocks walk */
944a61f12eSDarrick J. Wong struct xfs_btree_cur *mcur;
954a61f12eSDarrick J. Wong
964a61f12eSDarrick J. Wong /* Number of records we're staging in the new btree. */
974a61f12eSDarrick J. Wong uint64_t nr_records;
986a849bd8SDarrick J. Wong };
996a849bd8SDarrick J. Wong
1006a849bd8SDarrick J. Wong /* Set us up to repair rt reverse mapping btrees. */
1016a849bd8SDarrick J. Wong int
xrep_setup_rtrmapbt(struct xfs_scrub * sc)1026a849bd8SDarrick J. Wong xrep_setup_rtrmapbt(
1036a849bd8SDarrick J. Wong struct xfs_scrub *sc)
1046a849bd8SDarrick J. Wong {
1056a849bd8SDarrick J. Wong struct xrep_rtrmap *rr;
1064a61f12eSDarrick J. Wong char *descr;
1074a61f12eSDarrick J. Wong int error;
1084a61f12eSDarrick J. Wong
1099515572bSDarrick J. Wong xchk_fsgates_enable(sc, XCHK_FSGATES_RMAP);
1109515572bSDarrick J. Wong
1114a61f12eSDarrick J. Wong descr = xchk_xfile_rtgroup_descr(sc, "reverse mapping records");
1124a61f12eSDarrick J. Wong error = xrep_setup_xfbtree(sc, descr);
1134a61f12eSDarrick J. Wong kfree(descr);
1144a61f12eSDarrick J. Wong if (error)
1154a61f12eSDarrick J. Wong return error;
1166a849bd8SDarrick J. Wong
1176a849bd8SDarrick J. Wong rr = kzalloc(sizeof(struct xrep_rtrmap), XCHK_GFP_FLAGS);
1186a849bd8SDarrick J. Wong if (!rr)
1196a849bd8SDarrick J. Wong return -ENOMEM;
1206a849bd8SDarrick J. Wong
1216a849bd8SDarrick J. Wong rr->sc = sc;
1226a849bd8SDarrick J. Wong sc->buf = rr;
1236a849bd8SDarrick J. Wong return 0;
1246a849bd8SDarrick J. Wong }
1256a849bd8SDarrick J. Wong
1266a849bd8SDarrick J. Wong /* Make sure there's nothing funny about this mapping. */
1276a849bd8SDarrick J. Wong STATIC int
xrep_rtrmap_check_mapping(struct xfs_scrub * sc,const struct xfs_rmap_irec * rec)1286a849bd8SDarrick J. Wong xrep_rtrmap_check_mapping(
1296a849bd8SDarrick J. Wong struct xfs_scrub *sc,
1306a849bd8SDarrick J. Wong const struct xfs_rmap_irec *rec)
1316a849bd8SDarrick J. Wong {
1326a849bd8SDarrick J. Wong if (xfs_rtrmap_check_irec(sc->sr.rtg, rec) != NULL)
1336a849bd8SDarrick J. Wong return -EFSCORRUPTED;
1346a849bd8SDarrick J. Wong
1356a849bd8SDarrick J. Wong /* Make sure this isn't free space. */
1366a849bd8SDarrick J. Wong return xrep_require_rtext_inuse(sc, rec->rm_startblock,
1376a849bd8SDarrick J. Wong rec->rm_blockcount);
1386a849bd8SDarrick J. Wong }
1396a849bd8SDarrick J. Wong
1406a849bd8SDarrick J. Wong /* Store a reverse-mapping record. */
1416a849bd8SDarrick J. Wong static inline int
xrep_rtrmap_stash(struct xrep_rtrmap * rr,xfs_rgblock_t startblock,xfs_extlen_t blockcount,uint64_t owner,uint64_t offset,unsigned int flags)1426a849bd8SDarrick J. Wong xrep_rtrmap_stash(
1436a849bd8SDarrick J. Wong struct xrep_rtrmap *rr,
1446a849bd8SDarrick J. Wong xfs_rgblock_t startblock,
1456a849bd8SDarrick J. Wong xfs_extlen_t blockcount,
1466a849bd8SDarrick J. Wong uint64_t owner,
1476a849bd8SDarrick J. Wong uint64_t offset,
1486a849bd8SDarrick J. Wong unsigned int flags)
1496a849bd8SDarrick J. Wong {
1506a849bd8SDarrick J. Wong struct xfs_rmap_irec rmap = {
1516a849bd8SDarrick J. Wong .rm_startblock = startblock,
1526a849bd8SDarrick J. Wong .rm_blockcount = blockcount,
1536a849bd8SDarrick J. Wong .rm_owner = owner,
1546a849bd8SDarrick J. Wong .rm_offset = offset,
1556a849bd8SDarrick J. Wong .rm_flags = flags,
1566a849bd8SDarrick J. Wong };
1576a849bd8SDarrick J. Wong struct xfs_scrub *sc = rr->sc;
1584a61f12eSDarrick J. Wong struct xfs_btree_cur *mcur;
1596a849bd8SDarrick J. Wong int error = 0;
1606a849bd8SDarrick J. Wong
1616a849bd8SDarrick J. Wong if (xchk_should_terminate(sc, &error))
1626a849bd8SDarrick J. Wong return error;
1636a849bd8SDarrick J. Wong
1649515572bSDarrick J. Wong if (xchk_iscan_aborted(&rr->iscan))
1659515572bSDarrick J. Wong return -EFSCORRUPTED;
1669515572bSDarrick J. Wong
1676a849bd8SDarrick J. Wong trace_xrep_rtrmap_found(sc->mp, &rmap);
1686a849bd8SDarrick J. Wong
1694a61f12eSDarrick J. Wong /* Add entry to in-memory btree. */
1709515572bSDarrick J. Wong mutex_lock(&rr->lock);
1714a61f12eSDarrick J. Wong mcur = xfs_rtrmapbt_mem_cursor(sc->sr.rtg, sc->tp, &rr->rtrmap_btree);
1724a61f12eSDarrick J. Wong error = xfs_rmap_map_raw(mcur, &rmap);
1734a61f12eSDarrick J. Wong xfs_btree_del_cursor(mcur, error);
1744a61f12eSDarrick J. Wong if (error)
1754a61f12eSDarrick J. Wong goto out_cancel;
1764a61f12eSDarrick J. Wong
1779515572bSDarrick J. Wong error = xfbtree_trans_commit(&rr->rtrmap_btree, sc->tp);
1789515572bSDarrick J. Wong if (error)
1799515572bSDarrick J. Wong goto out_abort;
1809515572bSDarrick J. Wong
1819515572bSDarrick J. Wong mutex_unlock(&rr->lock);
1829515572bSDarrick J. Wong return 0;
1834a61f12eSDarrick J. Wong
1844a61f12eSDarrick J. Wong out_cancel:
1854a61f12eSDarrick J. Wong xfbtree_trans_cancel(&rr->rtrmap_btree, sc->tp);
1869515572bSDarrick J. Wong out_abort:
1879515572bSDarrick J. Wong xchk_iscan_abort(&rr->iscan);
1889515572bSDarrick J. Wong mutex_unlock(&rr->lock);
1894a61f12eSDarrick J. Wong return error;
1906a849bd8SDarrick J. Wong }
1916a849bd8SDarrick J. Wong
1926a849bd8SDarrick J. Wong /* Finding all file and bmbt extents. */
1936a849bd8SDarrick J. Wong
1946a849bd8SDarrick J. Wong /* Context for accumulating rmaps for an inode fork. */
1956a849bd8SDarrick J. Wong struct xrep_rtrmap_ifork {
1966a849bd8SDarrick J. Wong /*
1976a849bd8SDarrick J. Wong * Accumulate rmap data here to turn multiple adjacent bmaps into a
1986a849bd8SDarrick J. Wong * single rmap.
1996a849bd8SDarrick J. Wong */
2006a849bd8SDarrick J. Wong struct xfs_rmap_irec accum;
2016a849bd8SDarrick J. Wong
2026a849bd8SDarrick J. Wong struct xrep_rtrmap *rr;
2036a849bd8SDarrick J. Wong };
2046a849bd8SDarrick J. Wong
2056a849bd8SDarrick J. Wong /* Stash an rmap that we accumulated while walking an inode fork. */
2066a849bd8SDarrick J. Wong STATIC int
xrep_rtrmap_stash_accumulated(struct xrep_rtrmap_ifork * rf)2076a849bd8SDarrick J. Wong xrep_rtrmap_stash_accumulated(
2086a849bd8SDarrick J. Wong struct xrep_rtrmap_ifork *rf)
2096a849bd8SDarrick J. Wong {
2106a849bd8SDarrick J. Wong if (rf->accum.rm_blockcount == 0)
2116a849bd8SDarrick J. Wong return 0;
2126a849bd8SDarrick J. Wong
2136a849bd8SDarrick J. Wong return xrep_rtrmap_stash(rf->rr, rf->accum.rm_startblock,
2146a849bd8SDarrick J. Wong rf->accum.rm_blockcount, rf->accum.rm_owner,
2156a849bd8SDarrick J. Wong rf->accum.rm_offset, rf->accum.rm_flags);
2166a849bd8SDarrick J. Wong }
2176a849bd8SDarrick J. Wong
2186a849bd8SDarrick J. Wong /* Accumulate a bmbt record. */
2196a849bd8SDarrick J. Wong STATIC int
xrep_rtrmap_visit_bmbt(struct xfs_btree_cur * cur,struct xfs_bmbt_irec * rec,void * priv)2206a849bd8SDarrick J. Wong xrep_rtrmap_visit_bmbt(
2216a849bd8SDarrick J. Wong struct xfs_btree_cur *cur,
2226a849bd8SDarrick J. Wong struct xfs_bmbt_irec *rec,
2236a849bd8SDarrick J. Wong void *priv)
2246a849bd8SDarrick J. Wong {
2256a849bd8SDarrick J. Wong struct xrep_rtrmap_ifork *rf = priv;
2266a849bd8SDarrick J. Wong struct xfs_rmap_irec *accum = &rf->accum;
2276a849bd8SDarrick J. Wong struct xfs_mount *mp = rf->rr->sc->mp;
2286a849bd8SDarrick J. Wong xfs_rgblock_t rgbno;
2296a849bd8SDarrick J. Wong unsigned int rmap_flags = 0;
2306a849bd8SDarrick J. Wong int error;
2316a849bd8SDarrick J. Wong
2326a849bd8SDarrick J. Wong if (xfs_rtb_to_rgno(mp, rec->br_startblock) !=
2336a849bd8SDarrick J. Wong rtg_rgno(rf->rr->sc->sr.rtg))
2346a849bd8SDarrick J. Wong return 0;
2356a849bd8SDarrick J. Wong
2366a849bd8SDarrick J. Wong if (rec->br_state == XFS_EXT_UNWRITTEN)
2376a849bd8SDarrick J. Wong rmap_flags |= XFS_RMAP_UNWRITTEN;
2386a849bd8SDarrick J. Wong
2396a849bd8SDarrick J. Wong /* If this bmap is adjacent to the previous one, just add it. */
2406a849bd8SDarrick J. Wong rgbno = xfs_rtb_to_rgbno(mp, rec->br_startblock);
2416a849bd8SDarrick J. Wong if (accum->rm_blockcount > 0 &&
2426a849bd8SDarrick J. Wong rec->br_startoff == accum->rm_offset + accum->rm_blockcount &&
2436a849bd8SDarrick J. Wong rgbno == accum->rm_startblock + accum->rm_blockcount &&
2446a849bd8SDarrick J. Wong rmap_flags == accum->rm_flags) {
2456a849bd8SDarrick J. Wong accum->rm_blockcount += rec->br_blockcount;
2466a849bd8SDarrick J. Wong return 0;
2476a849bd8SDarrick J. Wong }
2486a849bd8SDarrick J. Wong
2496a849bd8SDarrick J. Wong /* Otherwise stash the old rmap and start accumulating a new one. */
2506a849bd8SDarrick J. Wong error = xrep_rtrmap_stash_accumulated(rf);
2516a849bd8SDarrick J. Wong if (error)
2526a849bd8SDarrick J. Wong return error;
2536a849bd8SDarrick J. Wong
2546a849bd8SDarrick J. Wong accum->rm_startblock = rgbno;
2556a849bd8SDarrick J. Wong accum->rm_blockcount = rec->br_blockcount;
2566a849bd8SDarrick J. Wong accum->rm_offset = rec->br_startoff;
2576a849bd8SDarrick J. Wong accum->rm_flags = rmap_flags;
2586a849bd8SDarrick J. Wong return 0;
2596a849bd8SDarrick J. Wong }
2606a849bd8SDarrick J. Wong
2616a849bd8SDarrick J. Wong /*
2626a849bd8SDarrick J. Wong * Iterate the block mapping btree to collect rmap records for anything in this
2636a849bd8SDarrick J. Wong * fork that maps to the rt volume. Sets @mappings_done to true if we've
2646a849bd8SDarrick J. Wong * scanned the block mappings in this fork.
2656a849bd8SDarrick J. Wong */
2666a849bd8SDarrick J. Wong STATIC int
xrep_rtrmap_scan_bmbt(struct xrep_rtrmap_ifork * rf,struct xfs_inode * ip,bool * mappings_done)2676a849bd8SDarrick J. Wong xrep_rtrmap_scan_bmbt(
2686a849bd8SDarrick J. Wong struct xrep_rtrmap_ifork *rf,
2696a849bd8SDarrick J. Wong struct xfs_inode *ip,
2706a849bd8SDarrick J. Wong bool *mappings_done)
2716a849bd8SDarrick J. Wong {
2726a849bd8SDarrick J. Wong struct xrep_rtrmap *rr = rf->rr;
2736a849bd8SDarrick J. Wong struct xfs_btree_cur *cur;
2746a849bd8SDarrick J. Wong struct xfs_ifork *ifp = xfs_ifork_ptr(ip, XFS_DATA_FORK);
2756a849bd8SDarrick J. Wong int error = 0;
2766a849bd8SDarrick J. Wong
2776a849bd8SDarrick J. Wong *mappings_done = false;
2786a849bd8SDarrick J. Wong
2796a849bd8SDarrick J. Wong /*
2806a849bd8SDarrick J. Wong * If the incore extent cache is already loaded, we'll just use the
2816a849bd8SDarrick J. Wong * incore extent scanner to record mappings. Don't bother walking the
2826a849bd8SDarrick J. Wong * ondisk extent tree.
2836a849bd8SDarrick J. Wong */
2846a849bd8SDarrick J. Wong if (!xfs_need_iread_extents(ifp))
2856a849bd8SDarrick J. Wong return 0;
2866a849bd8SDarrick J. Wong
2876a849bd8SDarrick J. Wong /* Accumulate all the mappings in the bmap btree. */
2886a849bd8SDarrick J. Wong cur = xfs_bmbt_init_cursor(rr->sc->mp, rr->sc->tp, ip, XFS_DATA_FORK);
2896a849bd8SDarrick J. Wong error = xfs_bmap_query_all(cur, xrep_rtrmap_visit_bmbt, rf);
2906a849bd8SDarrick J. Wong xfs_btree_del_cursor(cur, error);
2916a849bd8SDarrick J. Wong if (error)
2926a849bd8SDarrick J. Wong return error;
2936a849bd8SDarrick J. Wong
2946a849bd8SDarrick J. Wong /* Stash any remaining accumulated rmaps and exit. */
2956a849bd8SDarrick J. Wong *mappings_done = true;
2966a849bd8SDarrick J. Wong return xrep_rtrmap_stash_accumulated(rf);
2976a849bd8SDarrick J. Wong }
2986a849bd8SDarrick J. Wong
2996a849bd8SDarrick J. Wong /*
3006a849bd8SDarrick J. Wong * Iterate the in-core extent cache to collect rmap records for anything in
3016a849bd8SDarrick J. Wong * this fork that matches the AG.
3026a849bd8SDarrick J. Wong */
3036a849bd8SDarrick J. Wong STATIC int
xrep_rtrmap_scan_iext(struct xrep_rtrmap_ifork * rf,struct xfs_ifork * ifp)3046a849bd8SDarrick J. Wong xrep_rtrmap_scan_iext(
3056a849bd8SDarrick J. Wong struct xrep_rtrmap_ifork *rf,
3066a849bd8SDarrick J. Wong struct xfs_ifork *ifp)
3076a849bd8SDarrick J. Wong {
3086a849bd8SDarrick J. Wong struct xfs_bmbt_irec rec;
3096a849bd8SDarrick J. Wong struct xfs_iext_cursor icur;
3106a849bd8SDarrick J. Wong int error;
3116a849bd8SDarrick J. Wong
3126a849bd8SDarrick J. Wong for_each_xfs_iext(ifp, &icur, &rec) {
3136a849bd8SDarrick J. Wong if (isnullstartblock(rec.br_startblock))
3146a849bd8SDarrick J. Wong continue;
3156a849bd8SDarrick J. Wong error = xrep_rtrmap_visit_bmbt(NULL, &rec, rf);
3166a849bd8SDarrick J. Wong if (error)
3176a849bd8SDarrick J. Wong return error;
3186a849bd8SDarrick J. Wong }
3196a849bd8SDarrick J. Wong
3206a849bd8SDarrick J. Wong return xrep_rtrmap_stash_accumulated(rf);
3216a849bd8SDarrick J. Wong }
3226a849bd8SDarrick J. Wong
3236a849bd8SDarrick J. Wong /* Find all the extents on the realtime device mapped by an inode fork. */
3246a849bd8SDarrick J. Wong STATIC int
xrep_rtrmap_scan_dfork(struct xrep_rtrmap * rr,struct xfs_inode * ip)3256a849bd8SDarrick J. Wong xrep_rtrmap_scan_dfork(
3266a849bd8SDarrick J. Wong struct xrep_rtrmap *rr,
3276a849bd8SDarrick J. Wong struct xfs_inode *ip)
3286a849bd8SDarrick J. Wong {
3296a849bd8SDarrick J. Wong struct xrep_rtrmap_ifork rf = {
3306a849bd8SDarrick J. Wong .accum = { .rm_owner = ip->i_ino, },
3316a849bd8SDarrick J. Wong .rr = rr,
3326a849bd8SDarrick J. Wong };
3336a849bd8SDarrick J. Wong struct xfs_ifork *ifp = xfs_ifork_ptr(ip, XFS_DATA_FORK);
3346a849bd8SDarrick J. Wong int error = 0;
3356a849bd8SDarrick J. Wong
3366a849bd8SDarrick J. Wong if (ifp->if_format == XFS_DINODE_FMT_BTREE) {
3376a849bd8SDarrick J. Wong bool mappings_done;
3386a849bd8SDarrick J. Wong
3396a849bd8SDarrick J. Wong /*
3406a849bd8SDarrick J. Wong * Scan the bmbt for mappings. If the incore extent tree is
3416a849bd8SDarrick J. Wong * loaded, we want to scan the cached mappings since that's
3426a849bd8SDarrick J. Wong * faster when the extent counts are very high.
3436a849bd8SDarrick J. Wong */
3446a849bd8SDarrick J. Wong error = xrep_rtrmap_scan_bmbt(&rf, ip, &mappings_done);
3456a849bd8SDarrick J. Wong if (error || mappings_done)
3466a849bd8SDarrick J. Wong return error;
3476a849bd8SDarrick J. Wong } else if (ifp->if_format != XFS_DINODE_FMT_EXTENTS) {
3486a849bd8SDarrick J. Wong /* realtime data forks should only be extents or btree */
3496a849bd8SDarrick J. Wong return -EFSCORRUPTED;
3506a849bd8SDarrick J. Wong }
3516a849bd8SDarrick J. Wong
3526a849bd8SDarrick J. Wong /* Scan incore extent cache. */
3536a849bd8SDarrick J. Wong return xrep_rtrmap_scan_iext(&rf, ifp);
3546a849bd8SDarrick J. Wong }
3556a849bd8SDarrick J. Wong
3566a849bd8SDarrick J. Wong /* Record reverse mappings for a file. */
3576a849bd8SDarrick J. Wong STATIC int
xrep_rtrmap_scan_inode(struct xrep_rtrmap * rr,struct xfs_inode * ip)3586a849bd8SDarrick J. Wong xrep_rtrmap_scan_inode(
3596a849bd8SDarrick J. Wong struct xrep_rtrmap *rr,
3606a849bd8SDarrick J. Wong struct xfs_inode *ip)
3616a849bd8SDarrick J. Wong {
3626a849bd8SDarrick J. Wong unsigned int lock_mode;
3636a849bd8SDarrick J. Wong int error = 0;
3646a849bd8SDarrick J. Wong
3656a849bd8SDarrick J. Wong /* Skip the rt rmap btree inode. */
3666a849bd8SDarrick J. Wong if (rr->sc->ip == ip)
3676a849bd8SDarrick J. Wong return 0;
3686a849bd8SDarrick J. Wong
3696a849bd8SDarrick J. Wong lock_mode = xfs_ilock_data_map_shared(ip);
3706a849bd8SDarrick J. Wong
3716a849bd8SDarrick J. Wong /* Check the data fork if it's on the realtime device. */
3726a849bd8SDarrick J. Wong if (XFS_IS_REALTIME_INODE(ip)) {
3736a849bd8SDarrick J. Wong error = xrep_rtrmap_scan_dfork(rr, ip);
3746a849bd8SDarrick J. Wong if (error)
3756a849bd8SDarrick J. Wong goto out_unlock;
3766a849bd8SDarrick J. Wong }
3776a849bd8SDarrick J. Wong
3786a849bd8SDarrick J. Wong xchk_iscan_mark_visited(&rr->iscan, ip);
3796a849bd8SDarrick J. Wong out_unlock:
3806a849bd8SDarrick J. Wong xfs_iunlock(ip, lock_mode);
3816a849bd8SDarrick J. Wong return error;
3826a849bd8SDarrick J. Wong }
3836a849bd8SDarrick J. Wong
3846a849bd8SDarrick J. Wong /* Record extents that belong to the realtime rmap inode. */
3856a849bd8SDarrick J. Wong STATIC int
xrep_rtrmap_walk_rmap(struct xfs_btree_cur * cur,const struct xfs_rmap_irec * rec,void * priv)3866a849bd8SDarrick J. Wong xrep_rtrmap_walk_rmap(
3876a849bd8SDarrick J. Wong struct xfs_btree_cur *cur,
3886a849bd8SDarrick J. Wong const struct xfs_rmap_irec *rec,
3896a849bd8SDarrick J. Wong void *priv)
3906a849bd8SDarrick J. Wong {
3916a849bd8SDarrick J. Wong struct xrep_rtrmap *rr = priv;
3926a849bd8SDarrick J. Wong int error = 0;
3936a849bd8SDarrick J. Wong
3946a849bd8SDarrick J. Wong if (xchk_should_terminate(rr->sc, &error))
3956a849bd8SDarrick J. Wong return error;
3966a849bd8SDarrick J. Wong
3976a849bd8SDarrick J. Wong /* Skip extents which are not owned by this inode and fork. */
3986a849bd8SDarrick J. Wong if (rec->rm_owner != rr->sc->ip->i_ino)
3996a849bd8SDarrick J. Wong return 0;
4006a849bd8SDarrick J. Wong
4016a849bd8SDarrick J. Wong error = xrep_check_ino_btree_mapping(rr->sc, rec);
4026a849bd8SDarrick J. Wong if (error)
4036a849bd8SDarrick J. Wong return error;
4046a849bd8SDarrick J. Wong
4056a849bd8SDarrick J. Wong return xfsb_bitmap_set(&rr->old_rtrmapbt_blocks,
4066a849bd8SDarrick J. Wong xfs_gbno_to_fsb(cur->bc_group, rec->rm_startblock),
4076a849bd8SDarrick J. Wong rec->rm_blockcount);
4086a849bd8SDarrick J. Wong }
4096a849bd8SDarrick J. Wong
4106a849bd8SDarrick J. Wong /* Scan one AG for reverse mappings for the realtime rmap btree. */
4116a849bd8SDarrick J. Wong STATIC int
xrep_rtrmap_scan_ag(struct xrep_rtrmap * rr,struct xfs_perag * pag)4126a849bd8SDarrick J. Wong xrep_rtrmap_scan_ag(
4136a849bd8SDarrick J. Wong struct xrep_rtrmap *rr,
4146a849bd8SDarrick J. Wong struct xfs_perag *pag)
4156a849bd8SDarrick J. Wong {
4166a849bd8SDarrick J. Wong struct xfs_scrub *sc = rr->sc;
4176a849bd8SDarrick J. Wong int error;
4186a849bd8SDarrick J. Wong
4196a849bd8SDarrick J. Wong error = xrep_ag_init(sc, pag, &sc->sa);
4206a849bd8SDarrick J. Wong if (error)
4216a849bd8SDarrick J. Wong return error;
4226a849bd8SDarrick J. Wong
4236a849bd8SDarrick J. Wong error = xfs_rmap_query_all(sc->sa.rmap_cur, xrep_rtrmap_walk_rmap, rr);
4246a849bd8SDarrick J. Wong xchk_ag_free(sc, &sc->sa);
4256a849bd8SDarrick J. Wong return error;
4266a849bd8SDarrick J. Wong }
4276a849bd8SDarrick J. Wong
428*fe2efe95SDarrick J. Wong struct xrep_rtrmap_stash_run {
429*fe2efe95SDarrick J. Wong struct xrep_rtrmap *rr;
430*fe2efe95SDarrick J. Wong uint64_t owner;
431*fe2efe95SDarrick J. Wong };
432*fe2efe95SDarrick J. Wong
433*fe2efe95SDarrick J. Wong static int
xrep_rtrmap_stash_run(uint32_t start,uint32_t len,void * priv)434*fe2efe95SDarrick J. Wong xrep_rtrmap_stash_run(
435*fe2efe95SDarrick J. Wong uint32_t start,
436*fe2efe95SDarrick J. Wong uint32_t len,
437*fe2efe95SDarrick J. Wong void *priv)
438*fe2efe95SDarrick J. Wong {
439*fe2efe95SDarrick J. Wong struct xrep_rtrmap_stash_run *rsr = priv;
440*fe2efe95SDarrick J. Wong struct xrep_rtrmap *rr = rsr->rr;
441*fe2efe95SDarrick J. Wong xfs_rgblock_t rgbno = start;
442*fe2efe95SDarrick J. Wong
443*fe2efe95SDarrick J. Wong return xrep_rtrmap_stash(rr, rgbno, len, rsr->owner, 0, 0);
444*fe2efe95SDarrick J. Wong }
445*fe2efe95SDarrick J. Wong
446*fe2efe95SDarrick J. Wong /*
447*fe2efe95SDarrick J. Wong * Emit rmaps for every extent of bits set in the bitmap. Caller must ensure
448*fe2efe95SDarrick J. Wong * that the ranges are in units of FS blocks.
449*fe2efe95SDarrick J. Wong */
450*fe2efe95SDarrick J. Wong STATIC int
xrep_rtrmap_stash_bitmap(struct xrep_rtrmap * rr,struct xrgb_bitmap * bitmap,const struct xfs_owner_info * oinfo)451*fe2efe95SDarrick J. Wong xrep_rtrmap_stash_bitmap(
452*fe2efe95SDarrick J. Wong struct xrep_rtrmap *rr,
453*fe2efe95SDarrick J. Wong struct xrgb_bitmap *bitmap,
454*fe2efe95SDarrick J. Wong const struct xfs_owner_info *oinfo)
455*fe2efe95SDarrick J. Wong {
456*fe2efe95SDarrick J. Wong struct xrep_rtrmap_stash_run rsr = {
457*fe2efe95SDarrick J. Wong .rr = rr,
458*fe2efe95SDarrick J. Wong .owner = oinfo->oi_owner,
459*fe2efe95SDarrick J. Wong };
460*fe2efe95SDarrick J. Wong
461*fe2efe95SDarrick J. Wong return xrgb_bitmap_walk(bitmap, xrep_rtrmap_stash_run, &rsr);
462*fe2efe95SDarrick J. Wong }
463*fe2efe95SDarrick J. Wong
464*fe2efe95SDarrick J. Wong /* Record a CoW staging extent. */
465*fe2efe95SDarrick J. Wong STATIC int
xrep_rtrmap_walk_cowblocks(struct xfs_btree_cur * cur,const struct xfs_refcount_irec * irec,void * priv)466*fe2efe95SDarrick J. Wong xrep_rtrmap_walk_cowblocks(
467*fe2efe95SDarrick J. Wong struct xfs_btree_cur *cur,
468*fe2efe95SDarrick J. Wong const struct xfs_refcount_irec *irec,
469*fe2efe95SDarrick J. Wong void *priv)
470*fe2efe95SDarrick J. Wong {
471*fe2efe95SDarrick J. Wong struct xrgb_bitmap *bitmap = priv;
472*fe2efe95SDarrick J. Wong
473*fe2efe95SDarrick J. Wong if (!xfs_refcount_check_domain(irec) ||
474*fe2efe95SDarrick J. Wong irec->rc_domain != XFS_REFC_DOMAIN_COW)
475*fe2efe95SDarrick J. Wong return -EFSCORRUPTED;
476*fe2efe95SDarrick J. Wong
477*fe2efe95SDarrick J. Wong return xrgb_bitmap_set(bitmap, irec->rc_startblock,
478*fe2efe95SDarrick J. Wong irec->rc_blockcount);
479*fe2efe95SDarrick J. Wong }
480*fe2efe95SDarrick J. Wong
481*fe2efe95SDarrick J. Wong /*
482*fe2efe95SDarrick J. Wong * Collect rmaps for the blocks containing the refcount btree, and all CoW
483*fe2efe95SDarrick J. Wong * staging extents.
484*fe2efe95SDarrick J. Wong */
485*fe2efe95SDarrick J. Wong STATIC int
xrep_rtrmap_find_refcount_rmaps(struct xrep_rtrmap * rr)486*fe2efe95SDarrick J. Wong xrep_rtrmap_find_refcount_rmaps(
487*fe2efe95SDarrick J. Wong struct xrep_rtrmap *rr)
488*fe2efe95SDarrick J. Wong {
489*fe2efe95SDarrick J. Wong struct xrgb_bitmap cow_blocks; /* COWBIT */
490*fe2efe95SDarrick J. Wong struct xfs_refcount_irec low = {
491*fe2efe95SDarrick J. Wong .rc_startblock = 0,
492*fe2efe95SDarrick J. Wong .rc_domain = XFS_REFC_DOMAIN_COW,
493*fe2efe95SDarrick J. Wong };
494*fe2efe95SDarrick J. Wong struct xfs_refcount_irec high = {
495*fe2efe95SDarrick J. Wong .rc_startblock = -1U,
496*fe2efe95SDarrick J. Wong .rc_domain = XFS_REFC_DOMAIN_COW,
497*fe2efe95SDarrick J. Wong };
498*fe2efe95SDarrick J. Wong struct xfs_scrub *sc = rr->sc;
499*fe2efe95SDarrick J. Wong int error;
500*fe2efe95SDarrick J. Wong
501*fe2efe95SDarrick J. Wong if (!xfs_has_rtreflink(sc->mp))
502*fe2efe95SDarrick J. Wong return 0;
503*fe2efe95SDarrick J. Wong
504*fe2efe95SDarrick J. Wong xrgb_bitmap_init(&cow_blocks);
505*fe2efe95SDarrick J. Wong
506*fe2efe95SDarrick J. Wong /* Collect rmaps for CoW staging extents. */
507*fe2efe95SDarrick J. Wong error = xfs_refcount_query_range(sc->sr.refc_cur, &low, &high,
508*fe2efe95SDarrick J. Wong xrep_rtrmap_walk_cowblocks, &cow_blocks);
509*fe2efe95SDarrick J. Wong if (error)
510*fe2efe95SDarrick J. Wong goto out_bitmap;
511*fe2efe95SDarrick J. Wong
512*fe2efe95SDarrick J. Wong /* Generate rmaps for everything. */
513*fe2efe95SDarrick J. Wong error = xrep_rtrmap_stash_bitmap(rr, &cow_blocks, &XFS_RMAP_OINFO_COW);
514*fe2efe95SDarrick J. Wong if (error)
515*fe2efe95SDarrick J. Wong goto out_bitmap;
516*fe2efe95SDarrick J. Wong
517*fe2efe95SDarrick J. Wong out_bitmap:
518*fe2efe95SDarrick J. Wong xrgb_bitmap_destroy(&cow_blocks);
519*fe2efe95SDarrick J. Wong return error;
520*fe2efe95SDarrick J. Wong }
521*fe2efe95SDarrick J. Wong
5224a61f12eSDarrick J. Wong /* Count and check all collected records. */
5234a61f12eSDarrick J. Wong STATIC int
xrep_rtrmap_check_record(struct xfs_btree_cur * cur,const struct xfs_rmap_irec * rec,void * priv)5244a61f12eSDarrick J. Wong xrep_rtrmap_check_record(
5254a61f12eSDarrick J. Wong struct xfs_btree_cur *cur,
5264a61f12eSDarrick J. Wong const struct xfs_rmap_irec *rec,
5274a61f12eSDarrick J. Wong void *priv)
5284a61f12eSDarrick J. Wong {
5294a61f12eSDarrick J. Wong struct xrep_rtrmap *rr = priv;
5304a61f12eSDarrick J. Wong int error;
5314a61f12eSDarrick J. Wong
5324a61f12eSDarrick J. Wong error = xrep_rtrmap_check_mapping(rr->sc, rec);
5334a61f12eSDarrick J. Wong if (error)
5344a61f12eSDarrick J. Wong return error;
5354a61f12eSDarrick J. Wong
5364a61f12eSDarrick J. Wong rr->nr_records++;
5374a61f12eSDarrick J. Wong return 0;
5384a61f12eSDarrick J. Wong }
5394a61f12eSDarrick J. Wong
5406a849bd8SDarrick J. Wong /* Generate all the reverse-mappings for the realtime device. */
5416a849bd8SDarrick J. Wong STATIC int
xrep_rtrmap_find_rmaps(struct xrep_rtrmap * rr)5426a849bd8SDarrick J. Wong xrep_rtrmap_find_rmaps(
5436a849bd8SDarrick J. Wong struct xrep_rtrmap *rr)
5446a849bd8SDarrick J. Wong {
5456a849bd8SDarrick J. Wong struct xfs_scrub *sc = rr->sc;
5466a849bd8SDarrick J. Wong struct xfs_perag *pag = NULL;
5476a849bd8SDarrick J. Wong struct xfs_inode *ip;
5484a61f12eSDarrick J. Wong struct xfs_btree_cur *mcur;
5496a849bd8SDarrick J. Wong int error;
5506a849bd8SDarrick J. Wong
5516a849bd8SDarrick J. Wong /* Generate rmaps for the realtime superblock */
5526a849bd8SDarrick J. Wong if (xfs_has_rtsb(sc->mp) && rtg_rgno(rr->sc->sr.rtg) == 0) {
5536a849bd8SDarrick J. Wong error = xrep_rtrmap_stash(rr, 0, sc->mp->m_sb.sb_rextsize,
5546a849bd8SDarrick J. Wong XFS_RMAP_OWN_FS, 0, 0);
5556a849bd8SDarrick J. Wong if (error)
5566a849bd8SDarrick J. Wong return error;
5576a849bd8SDarrick J. Wong }
5586a849bd8SDarrick J. Wong
559*fe2efe95SDarrick J. Wong /* Find CoW staging extents. */
560*fe2efe95SDarrick J. Wong xrep_rtgroup_btcur_init(sc, &sc->sr);
561*fe2efe95SDarrick J. Wong error = xrep_rtrmap_find_refcount_rmaps(rr);
562*fe2efe95SDarrick J. Wong xchk_rtgroup_btcur_free(&sc->sr);
563*fe2efe95SDarrick J. Wong if (error)
564*fe2efe95SDarrick J. Wong return error;
565*fe2efe95SDarrick J. Wong
5666a849bd8SDarrick J. Wong /*
5676a849bd8SDarrick J. Wong * Set up for a potentially lengthy filesystem scan by reducing our
5686a849bd8SDarrick J. Wong * transaction resource usage for the duration. Specifically:
5696a849bd8SDarrick J. Wong *
5706a849bd8SDarrick J. Wong * Unlock the realtime metadata inodes and cancel the transaction to
5716a849bd8SDarrick J. Wong * release the log grant space while we scan the filesystem.
5726a849bd8SDarrick J. Wong *
5736a849bd8SDarrick J. Wong * Create a new empty transaction to eliminate the possibility of the
5746a849bd8SDarrick J. Wong * inode scan deadlocking on cyclical metadata.
5756a849bd8SDarrick J. Wong *
5766a849bd8SDarrick J. Wong * We pass the empty transaction to the file scanning function to avoid
5776a849bd8SDarrick J. Wong * repeatedly cycling empty transactions. This can be done even though
5786a849bd8SDarrick J. Wong * we take the IOLOCK to quiesce the file because empty transactions
5796a849bd8SDarrick J. Wong * do not take sb_internal.
5806a849bd8SDarrick J. Wong */
5816a849bd8SDarrick J. Wong xchk_trans_cancel(sc);
5826a849bd8SDarrick J. Wong xchk_rtgroup_unlock(&sc->sr);
5836a849bd8SDarrick J. Wong error = xchk_trans_alloc_empty(sc);
5846a849bd8SDarrick J. Wong if (error)
5856a849bd8SDarrick J. Wong return error;
5866a849bd8SDarrick J. Wong
5876a849bd8SDarrick J. Wong while ((error = xchk_iscan_iter(&rr->iscan, &ip)) == 1) {
5886a849bd8SDarrick J. Wong error = xrep_rtrmap_scan_inode(rr, ip);
5896a849bd8SDarrick J. Wong xchk_irele(sc, ip);
5906a849bd8SDarrick J. Wong if (error)
5916a849bd8SDarrick J. Wong break;
5926a849bd8SDarrick J. Wong
5936a849bd8SDarrick J. Wong if (xchk_should_terminate(sc, &error))
5946a849bd8SDarrick J. Wong break;
5956a849bd8SDarrick J. Wong }
5966a849bd8SDarrick J. Wong xchk_iscan_iter_finish(&rr->iscan);
5976a849bd8SDarrick J. Wong if (error)
5986a849bd8SDarrick J. Wong return error;
5996a849bd8SDarrick J. Wong
6006a849bd8SDarrick J. Wong /*
6016a849bd8SDarrick J. Wong * Switch out for a real transaction and lock the RT metadata in
6026a849bd8SDarrick J. Wong * preparation for building a new tree.
6036a849bd8SDarrick J. Wong */
6046a849bd8SDarrick J. Wong xchk_trans_cancel(sc);
6056a849bd8SDarrick J. Wong error = xchk_setup_rt(sc);
6066a849bd8SDarrick J. Wong if (error)
6076a849bd8SDarrick J. Wong return error;
6086a849bd8SDarrick J. Wong error = xchk_rtgroup_lock(sc, &sc->sr, XCHK_RTGLOCK_ALL);
6096a849bd8SDarrick J. Wong if (error)
6106a849bd8SDarrick J. Wong return error;
6116a849bd8SDarrick J. Wong
6129515572bSDarrick J. Wong /*
6139515572bSDarrick J. Wong * If a hook failed to update the in-memory btree, we lack the data to
6149515572bSDarrick J. Wong * continue the repair.
6159515572bSDarrick J. Wong */
6169515572bSDarrick J. Wong if (xchk_iscan_aborted(&rr->iscan))
6179515572bSDarrick J. Wong return -EFSCORRUPTED;
6189515572bSDarrick J. Wong
6196a849bd8SDarrick J. Wong /* Scan for old rtrmap blocks. */
6206a849bd8SDarrick J. Wong while ((pag = xfs_perag_next(sc->mp, pag))) {
6216a849bd8SDarrick J. Wong error = xrep_rtrmap_scan_ag(rr, pag);
6226a849bd8SDarrick J. Wong if (error) {
6236a849bd8SDarrick J. Wong xfs_perag_rele(pag);
6246a849bd8SDarrick J. Wong return error;
6256a849bd8SDarrick J. Wong }
6266a849bd8SDarrick J. Wong }
6276a849bd8SDarrick J. Wong
6284a61f12eSDarrick J. Wong /*
6294a61f12eSDarrick J. Wong * Now that we have everything locked again, we need to count the
6304a61f12eSDarrick J. Wong * number of rmap records stashed in the btree. This should reflect
6314a61f12eSDarrick J. Wong * all actively-owned rt files in the filesystem. At the same time,
6324a61f12eSDarrick J. Wong * check all our records before we start building a new btree, which
6334a61f12eSDarrick J. Wong * requires the rtbitmap lock.
6344a61f12eSDarrick J. Wong */
6354a61f12eSDarrick J. Wong mcur = xfs_rtrmapbt_mem_cursor(rr->sc->sr.rtg, NULL, &rr->rtrmap_btree);
6364a61f12eSDarrick J. Wong rr->nr_records = 0;
6374a61f12eSDarrick J. Wong error = xfs_rmap_query_all(mcur, xrep_rtrmap_check_record, rr);
6384a61f12eSDarrick J. Wong xfs_btree_del_cursor(mcur, error);
6394a61f12eSDarrick J. Wong
6404a61f12eSDarrick J. Wong return error;
6416a849bd8SDarrick J. Wong }
6426a849bd8SDarrick J. Wong
6436a849bd8SDarrick J. Wong /* Building the new rtrmap btree. */
6446a849bd8SDarrick J. Wong
6456a849bd8SDarrick J. Wong /* Retrieve rtrmapbt data for bulk load. */
6466a849bd8SDarrick J. Wong STATIC int
xrep_rtrmap_get_records(struct xfs_btree_cur * cur,unsigned int idx,struct xfs_btree_block * block,unsigned int nr_wanted,void * priv)6476a849bd8SDarrick J. Wong xrep_rtrmap_get_records(
6486a849bd8SDarrick J. Wong struct xfs_btree_cur *cur,
6496a849bd8SDarrick J. Wong unsigned int idx,
6506a849bd8SDarrick J. Wong struct xfs_btree_block *block,
6516a849bd8SDarrick J. Wong unsigned int nr_wanted,
6526a849bd8SDarrick J. Wong void *priv)
6536a849bd8SDarrick J. Wong {
6546a849bd8SDarrick J. Wong struct xrep_rtrmap *rr = priv;
6556a849bd8SDarrick J. Wong union xfs_btree_rec *block_rec;
6566a849bd8SDarrick J. Wong unsigned int loaded;
6576a849bd8SDarrick J. Wong int error;
6586a849bd8SDarrick J. Wong
6596a849bd8SDarrick J. Wong for (loaded = 0; loaded < nr_wanted; loaded++, idx++) {
6604a61f12eSDarrick J. Wong int stat = 0;
6614a61f12eSDarrick J. Wong
6624a61f12eSDarrick J. Wong error = xfs_btree_increment(rr->mcur, 0, &stat);
6636a849bd8SDarrick J. Wong if (error)
6646a849bd8SDarrick J. Wong return error;
6654a61f12eSDarrick J. Wong if (!stat)
6666a849bd8SDarrick J. Wong return -EFSCORRUPTED;
6676a849bd8SDarrick J. Wong
6684a61f12eSDarrick J. Wong error = xfs_rmap_get_rec(rr->mcur, &cur->bc_rec.r, &stat);
6696a849bd8SDarrick J. Wong if (error)
6706a849bd8SDarrick J. Wong return error;
6714a61f12eSDarrick J. Wong if (!stat)
6724a61f12eSDarrick J. Wong return -EFSCORRUPTED;
6736a849bd8SDarrick J. Wong
6746a849bd8SDarrick J. Wong block_rec = xfs_btree_rec_addr(cur, idx, block);
6756a849bd8SDarrick J. Wong cur->bc_ops->init_rec_from_cur(cur, block_rec);
6766a849bd8SDarrick J. Wong }
6776a849bd8SDarrick J. Wong
6786a849bd8SDarrick J. Wong return loaded;
6796a849bd8SDarrick J. Wong }
6806a849bd8SDarrick J. Wong
6816a849bd8SDarrick J. Wong /* Feed one of the new btree blocks to the bulk loader. */
6826a849bd8SDarrick J. Wong STATIC int
xrep_rtrmap_claim_block(struct xfs_btree_cur * cur,union xfs_btree_ptr * ptr,void * priv)6836a849bd8SDarrick J. Wong xrep_rtrmap_claim_block(
6846a849bd8SDarrick J. Wong struct xfs_btree_cur *cur,
6856a849bd8SDarrick J. Wong union xfs_btree_ptr *ptr,
6866a849bd8SDarrick J. Wong void *priv)
6876a849bd8SDarrick J. Wong {
6886a849bd8SDarrick J. Wong struct xrep_rtrmap *rr = priv;
6896a849bd8SDarrick J. Wong
6906a849bd8SDarrick J. Wong return xrep_newbt_claim_block(cur, &rr->new_btree, ptr);
6916a849bd8SDarrick J. Wong }
6926a849bd8SDarrick J. Wong
6936a849bd8SDarrick J. Wong /* Figure out how much space we need to create the incore btree root block. */
6946a849bd8SDarrick J. Wong STATIC size_t
xrep_rtrmap_iroot_size(struct xfs_btree_cur * cur,unsigned int level,unsigned int nr_this_level,void * priv)6956a849bd8SDarrick J. Wong xrep_rtrmap_iroot_size(
6966a849bd8SDarrick J. Wong struct xfs_btree_cur *cur,
6976a849bd8SDarrick J. Wong unsigned int level,
6986a849bd8SDarrick J. Wong unsigned int nr_this_level,
6996a849bd8SDarrick J. Wong void *priv)
7006a849bd8SDarrick J. Wong {
7016a849bd8SDarrick J. Wong return xfs_rtrmap_broot_space_calc(cur->bc_mp, level, nr_this_level);
7026a849bd8SDarrick J. Wong }
7036a849bd8SDarrick J. Wong
7046a849bd8SDarrick J. Wong /*
7056a849bd8SDarrick J. Wong * Use the collected rmap information to stage a new rmap btree. If this is
7066a849bd8SDarrick J. Wong * successful we'll return with the new btree root information logged to the
7076a849bd8SDarrick J. Wong * repair transaction but not yet committed. This implements section (III)
7086a849bd8SDarrick J. Wong * above.
7096a849bd8SDarrick J. Wong */
7106a849bd8SDarrick J. Wong STATIC int
xrep_rtrmap_build_new_tree(struct xrep_rtrmap * rr)7116a849bd8SDarrick J. Wong xrep_rtrmap_build_new_tree(
7126a849bd8SDarrick J. Wong struct xrep_rtrmap *rr)
7136a849bd8SDarrick J. Wong {
7146a849bd8SDarrick J. Wong struct xfs_scrub *sc = rr->sc;
7156a849bd8SDarrick J. Wong struct xfs_rtgroup *rtg = sc->sr.rtg;
7166a849bd8SDarrick J. Wong struct xfs_btree_cur *rmap_cur;
7176a849bd8SDarrick J. Wong int error;
7186a849bd8SDarrick J. Wong
7196a849bd8SDarrick J. Wong /*
7206a849bd8SDarrick J. Wong * Prepare to construct the new btree by reserving disk space for the
7216a849bd8SDarrick J. Wong * new btree and setting up all the accounting information we'll need
7226a849bd8SDarrick J. Wong * to root the new btree while it's under construction and before we
7236a849bd8SDarrick J. Wong * attach it to the realtime rmapbt inode.
7246a849bd8SDarrick J. Wong */
7256a849bd8SDarrick J. Wong error = xrep_newbt_init_metadir_inode(&rr->new_btree, sc);
7266a849bd8SDarrick J. Wong if (error)
7276a849bd8SDarrick J. Wong return error;
7286a849bd8SDarrick J. Wong
7296a849bd8SDarrick J. Wong rr->new_btree.bload.get_records = xrep_rtrmap_get_records;
7306a849bd8SDarrick J. Wong rr->new_btree.bload.claim_block = xrep_rtrmap_claim_block;
7316a849bd8SDarrick J. Wong rr->new_btree.bload.iroot_size = xrep_rtrmap_iroot_size;
7326a849bd8SDarrick J. Wong
7336a849bd8SDarrick J. Wong rmap_cur = xfs_rtrmapbt_init_cursor(NULL, rtg);
7346a849bd8SDarrick J. Wong xfs_btree_stage_ifakeroot(rmap_cur, &rr->new_btree.ifake);
7356a849bd8SDarrick J. Wong
7366a849bd8SDarrick J. Wong /* Compute how many blocks we'll need for the rmaps collected. */
7376a849bd8SDarrick J. Wong error = xfs_btree_bload_compute_geometry(rmap_cur,
7384a61f12eSDarrick J. Wong &rr->new_btree.bload, rr->nr_records);
7396a849bd8SDarrick J. Wong if (error)
7406a849bd8SDarrick J. Wong goto err_cur;
7416a849bd8SDarrick J. Wong
7426a849bd8SDarrick J. Wong /* Last chance to abort before we start committing fixes. */
7436a849bd8SDarrick J. Wong if (xchk_should_terminate(sc, &error))
7446a849bd8SDarrick J. Wong goto err_cur;
7456a849bd8SDarrick J. Wong
7466a849bd8SDarrick J. Wong /*
7476a849bd8SDarrick J. Wong * Guess how many blocks we're going to need to rebuild an entire
7486a849bd8SDarrick J. Wong * rtrmapbt from the number of extents we found, and pump up our
7496a849bd8SDarrick J. Wong * transaction to have sufficient block reservation. We're allowed
7506a849bd8SDarrick J. Wong * to exceed quota to repair inconsistent metadata, though this is
7516a849bd8SDarrick J. Wong * unlikely.
7526a849bd8SDarrick J. Wong */
7536a849bd8SDarrick J. Wong error = xfs_trans_reserve_more_inode(sc->tp, rtg_rmap(rtg),
7546a849bd8SDarrick J. Wong rr->new_btree.bload.nr_blocks, 0, true);
7556a849bd8SDarrick J. Wong if (error)
7566a849bd8SDarrick J. Wong goto err_cur;
7576a849bd8SDarrick J. Wong
7586a849bd8SDarrick J. Wong /* Reserve the space we'll need for the new btree. */
7596a849bd8SDarrick J. Wong error = xrep_newbt_alloc_blocks(&rr->new_btree,
7606a849bd8SDarrick J. Wong rr->new_btree.bload.nr_blocks);
7616a849bd8SDarrick J. Wong if (error)
7626a849bd8SDarrick J. Wong goto err_cur;
7636a849bd8SDarrick J. Wong
7644a61f12eSDarrick J. Wong /*
7654a61f12eSDarrick J. Wong * Create a cursor to the in-memory btree so that we can bulk load the
7664a61f12eSDarrick J. Wong * new btree.
7674a61f12eSDarrick J. Wong */
7684a61f12eSDarrick J. Wong rr->mcur = xfs_rtrmapbt_mem_cursor(sc->sr.rtg, NULL, &rr->rtrmap_btree);
7694a61f12eSDarrick J. Wong error = xfs_btree_goto_left_edge(rr->mcur);
7704a61f12eSDarrick J. Wong if (error)
7714a61f12eSDarrick J. Wong goto err_mcur;
7724a61f12eSDarrick J. Wong
7736a849bd8SDarrick J. Wong /* Add all observed rmap records. */
7746a849bd8SDarrick J. Wong rr->new_btree.ifake.if_fork->if_format = XFS_DINODE_FMT_META_BTREE;
7756a849bd8SDarrick J. Wong error = xfs_btree_bload(rmap_cur, &rr->new_btree.bload, rr);
7766a849bd8SDarrick J. Wong if (error)
7774a61f12eSDarrick J. Wong goto err_mcur;
7786a849bd8SDarrick J. Wong
7796a849bd8SDarrick J. Wong /*
7806a849bd8SDarrick J. Wong * Install the new rtrmap btree in the inode. After this point the old
7816a849bd8SDarrick J. Wong * btree is no longer accessible, the new tree is live, and we can
7826a849bd8SDarrick J. Wong * delete the cursor.
7836a849bd8SDarrick J. Wong */
7846a849bd8SDarrick J. Wong xfs_rtrmapbt_commit_staged_btree(rmap_cur, sc->tp);
7856a849bd8SDarrick J. Wong xrep_inode_set_nblocks(rr->sc, rr->new_btree.ifake.if_blocks);
7866a849bd8SDarrick J. Wong xfs_btree_del_cursor(rmap_cur, 0);
7874a61f12eSDarrick J. Wong xfs_btree_del_cursor(rr->mcur, 0);
7884a61f12eSDarrick J. Wong rr->mcur = NULL;
7894a61f12eSDarrick J. Wong
7904a61f12eSDarrick J. Wong /*
7914a61f12eSDarrick J. Wong * Now that we've written the new btree to disk, we don't need to keep
7924a61f12eSDarrick J. Wong * updating the in-memory btree. Abort the scan to stop live updates.
7934a61f12eSDarrick J. Wong */
7944a61f12eSDarrick J. Wong xchk_iscan_abort(&rr->iscan);
7956a849bd8SDarrick J. Wong
7966a849bd8SDarrick J. Wong /* Dispose of any unused blocks and the accounting information. */
7976a849bd8SDarrick J. Wong error = xrep_newbt_commit(&rr->new_btree);
7986a849bd8SDarrick J. Wong if (error)
7996a849bd8SDarrick J. Wong return error;
8006a849bd8SDarrick J. Wong
8016a849bd8SDarrick J. Wong return xrep_roll_trans(sc);
8026a849bd8SDarrick J. Wong
8034a61f12eSDarrick J. Wong err_mcur:
8044a61f12eSDarrick J. Wong xfs_btree_del_cursor(rr->mcur, error);
8056a849bd8SDarrick J. Wong err_cur:
8066a849bd8SDarrick J. Wong xfs_btree_del_cursor(rmap_cur, error);
8076a849bd8SDarrick J. Wong xrep_newbt_cancel(&rr->new_btree);
8086a849bd8SDarrick J. Wong return error;
8096a849bd8SDarrick J. Wong }
8106a849bd8SDarrick J. Wong
8116a849bd8SDarrick J. Wong /* Reaping the old btree. */
8126a849bd8SDarrick J. Wong
8136a849bd8SDarrick J. Wong /* Reap the old rtrmapbt blocks. */
8146a849bd8SDarrick J. Wong STATIC int
xrep_rtrmap_remove_old_tree(struct xrep_rtrmap * rr)8156a849bd8SDarrick J. Wong xrep_rtrmap_remove_old_tree(
8166a849bd8SDarrick J. Wong struct xrep_rtrmap *rr)
8176a849bd8SDarrick J. Wong {
8186a849bd8SDarrick J. Wong int error;
8196a849bd8SDarrick J. Wong
8206a849bd8SDarrick J. Wong /*
8216a849bd8SDarrick J. Wong * Free all the extents that were allocated to the former rtrmapbt and
8226a849bd8SDarrick J. Wong * aren't cross-linked with something else.
8236a849bd8SDarrick J. Wong */
8246a849bd8SDarrick J. Wong error = xrep_reap_metadir_fsblocks(rr->sc, &rr->old_rtrmapbt_blocks);
8256a849bd8SDarrick J. Wong if (error)
8266a849bd8SDarrick J. Wong return error;
8276a849bd8SDarrick J. Wong
8286a849bd8SDarrick J. Wong /*
8296a849bd8SDarrick J. Wong * Ensure the proper reservation for the rtrmap inode so that we don't
8306a849bd8SDarrick J. Wong * fail to expand the new btree.
8316a849bd8SDarrick J. Wong */
8326a849bd8SDarrick J. Wong return xrep_reset_metafile_resv(rr->sc);
8336a849bd8SDarrick J. Wong }
8346a849bd8SDarrick J. Wong
8359515572bSDarrick J. Wong static inline bool
xrep_rtrmapbt_want_live_update(struct xchk_iscan * iscan,const struct xfs_owner_info * oi)8369515572bSDarrick J. Wong xrep_rtrmapbt_want_live_update(
8379515572bSDarrick J. Wong struct xchk_iscan *iscan,
8389515572bSDarrick J. Wong const struct xfs_owner_info *oi)
8399515572bSDarrick J. Wong {
8409515572bSDarrick J. Wong if (xchk_iscan_aborted(iscan))
8419515572bSDarrick J. Wong return false;
8429515572bSDarrick J. Wong
8439515572bSDarrick J. Wong /*
8449515572bSDarrick J. Wong * We scanned the CoW staging extents before we started the iscan, so
8459515572bSDarrick J. Wong * we need all the updates.
8469515572bSDarrick J. Wong */
8479515572bSDarrick J. Wong if (XFS_RMAP_NON_INODE_OWNER(oi->oi_owner))
8489515572bSDarrick J. Wong return true;
8499515572bSDarrick J. Wong
8509515572bSDarrick J. Wong /* Ignore updates to files that the scanner hasn't visited yet. */
8519515572bSDarrick J. Wong return xchk_iscan_want_live_update(iscan, oi->oi_owner);
8529515572bSDarrick J. Wong }
8539515572bSDarrick J. Wong
8549515572bSDarrick J. Wong /*
8559515572bSDarrick J. Wong * Apply a rtrmapbt update from the regular filesystem into our shadow btree.
8569515572bSDarrick J. Wong * We're running from the thread that owns the rtrmap ILOCK and is generating
8579515572bSDarrick J. Wong * the update, so we must be careful about which parts of the struct
8589515572bSDarrick J. Wong * xrep_rtrmap that we change.
8599515572bSDarrick J. Wong */
8609515572bSDarrick J. Wong static int
xrep_rtrmapbt_live_update(struct notifier_block * nb,unsigned long action,void * data)8619515572bSDarrick J. Wong xrep_rtrmapbt_live_update(
8629515572bSDarrick J. Wong struct notifier_block *nb,
8639515572bSDarrick J. Wong unsigned long action,
8649515572bSDarrick J. Wong void *data)
8659515572bSDarrick J. Wong {
8669515572bSDarrick J. Wong struct xfs_rmap_update_params *p = data;
8679515572bSDarrick J. Wong struct xrep_rtrmap *rr;
8689515572bSDarrick J. Wong struct xfs_mount *mp;
8699515572bSDarrick J. Wong struct xfs_btree_cur *mcur;
8709515572bSDarrick J. Wong struct xfs_trans *tp;
8719515572bSDarrick J. Wong void *txcookie;
8729515572bSDarrick J. Wong int error;
8739515572bSDarrick J. Wong
8749515572bSDarrick J. Wong rr = container_of(nb, struct xrep_rtrmap, rhook.rmap_hook.nb);
8759515572bSDarrick J. Wong mp = rr->sc->mp;
8769515572bSDarrick J. Wong
8779515572bSDarrick J. Wong if (!xrep_rtrmapbt_want_live_update(&rr->iscan, &p->oinfo))
8789515572bSDarrick J. Wong goto out_unlock;
8799515572bSDarrick J. Wong
8809515572bSDarrick J. Wong trace_xrep_rmap_live_update(rtg_group(rr->sc->sr.rtg), action, p);
8819515572bSDarrick J. Wong
8829515572bSDarrick J. Wong error = xrep_trans_alloc_hook_dummy(mp, &txcookie, &tp);
8839515572bSDarrick J. Wong if (error)
8849515572bSDarrick J. Wong goto out_abort;
8859515572bSDarrick J. Wong
8869515572bSDarrick J. Wong mutex_lock(&rr->lock);
8879515572bSDarrick J. Wong mcur = xfs_rtrmapbt_mem_cursor(rr->sc->sr.rtg, tp, &rr->rtrmap_btree);
8889515572bSDarrick J. Wong error = __xfs_rmap_finish_intent(mcur, action, p->startblock,
8899515572bSDarrick J. Wong p->blockcount, &p->oinfo, p->unwritten);
8909515572bSDarrick J. Wong xfs_btree_del_cursor(mcur, error);
8919515572bSDarrick J. Wong if (error)
8929515572bSDarrick J. Wong goto out_cancel;
8939515572bSDarrick J. Wong
8949515572bSDarrick J. Wong error = xfbtree_trans_commit(&rr->rtrmap_btree, tp);
8959515572bSDarrick J. Wong if (error)
8969515572bSDarrick J. Wong goto out_cancel;
8979515572bSDarrick J. Wong
8989515572bSDarrick J. Wong xrep_trans_cancel_hook_dummy(&txcookie, tp);
8999515572bSDarrick J. Wong mutex_unlock(&rr->lock);
9009515572bSDarrick J. Wong return NOTIFY_DONE;
9019515572bSDarrick J. Wong
9029515572bSDarrick J. Wong out_cancel:
9039515572bSDarrick J. Wong xfbtree_trans_cancel(&rr->rtrmap_btree, tp);
9049515572bSDarrick J. Wong xrep_trans_cancel_hook_dummy(&txcookie, tp);
9059515572bSDarrick J. Wong out_abort:
9069515572bSDarrick J. Wong xchk_iscan_abort(&rr->iscan);
9079515572bSDarrick J. Wong mutex_unlock(&rr->lock);
9089515572bSDarrick J. Wong out_unlock:
9099515572bSDarrick J. Wong return NOTIFY_DONE;
9109515572bSDarrick J. Wong }
9119515572bSDarrick J. Wong
9126a849bd8SDarrick J. Wong /* Set up the filesystem scan components. */
9136a849bd8SDarrick J. Wong STATIC int
xrep_rtrmap_setup_scan(struct xrep_rtrmap * rr)9146a849bd8SDarrick J. Wong xrep_rtrmap_setup_scan(
9156a849bd8SDarrick J. Wong struct xrep_rtrmap *rr)
9166a849bd8SDarrick J. Wong {
9176a849bd8SDarrick J. Wong struct xfs_scrub *sc = rr->sc;
9186a849bd8SDarrick J. Wong int error;
9196a849bd8SDarrick J. Wong
9209515572bSDarrick J. Wong mutex_init(&rr->lock);
9216a849bd8SDarrick J. Wong xfsb_bitmap_init(&rr->old_rtrmapbt_blocks);
9226a849bd8SDarrick J. Wong
9236a849bd8SDarrick J. Wong /* Set up some storage */
9244a61f12eSDarrick J. Wong error = xfs_rtrmapbt_mem_init(sc->mp, &rr->rtrmap_btree, sc->xmbtp,
9254a61f12eSDarrick J. Wong rtg_rgno(sc->sr.rtg));
9266a849bd8SDarrick J. Wong if (error)
9276a849bd8SDarrick J. Wong goto out_bitmap;
9286a849bd8SDarrick J. Wong
9296a849bd8SDarrick J. Wong /* Retry iget every tenth of a second for up to 30 seconds. */
9306a849bd8SDarrick J. Wong xchk_iscan_start(sc, 30000, 100, &rr->iscan);
9319515572bSDarrick J. Wong
9329515572bSDarrick J. Wong /*
9339515572bSDarrick J. Wong * Hook into live rtrmap operations so that we can update our in-memory
9349515572bSDarrick J. Wong * btree to reflect live changes on the filesystem. Since we drop the
9359515572bSDarrick J. Wong * rtrmap ILOCK to scan all the inodes, we need this piece to avoid
9369515572bSDarrick J. Wong * installing a stale btree.
9379515572bSDarrick J. Wong */
9389515572bSDarrick J. Wong ASSERT(sc->flags & XCHK_FSGATES_RMAP);
9399515572bSDarrick J. Wong xfs_rmap_hook_setup(&rr->rhook, xrep_rtrmapbt_live_update);
9409515572bSDarrick J. Wong error = xfs_rmap_hook_add(rtg_group(sc->sr.rtg), &rr->rhook);
9419515572bSDarrick J. Wong if (error)
9429515572bSDarrick J. Wong goto out_iscan;
9436a849bd8SDarrick J. Wong return 0;
9446a849bd8SDarrick J. Wong
9459515572bSDarrick J. Wong out_iscan:
9469515572bSDarrick J. Wong xchk_iscan_teardown(&rr->iscan);
9479515572bSDarrick J. Wong xfbtree_destroy(&rr->rtrmap_btree);
9486a849bd8SDarrick J. Wong out_bitmap:
9496a849bd8SDarrick J. Wong xfsb_bitmap_destroy(&rr->old_rtrmapbt_blocks);
9509515572bSDarrick J. Wong mutex_destroy(&rr->lock);
9516a849bd8SDarrick J. Wong return error;
9526a849bd8SDarrick J. Wong }
9536a849bd8SDarrick J. Wong
9546a849bd8SDarrick J. Wong /* Tear down scan components. */
9556a849bd8SDarrick J. Wong STATIC void
xrep_rtrmap_teardown(struct xrep_rtrmap * rr)9566a849bd8SDarrick J. Wong xrep_rtrmap_teardown(
9576a849bd8SDarrick J. Wong struct xrep_rtrmap *rr)
9586a849bd8SDarrick J. Wong {
9599515572bSDarrick J. Wong struct xfs_scrub *sc = rr->sc;
9609515572bSDarrick J. Wong
9619515572bSDarrick J. Wong xchk_iscan_abort(&rr->iscan);
9629515572bSDarrick J. Wong xfs_rmap_hook_del(rtg_group(sc->sr.rtg), &rr->rhook);
9636a849bd8SDarrick J. Wong xchk_iscan_teardown(&rr->iscan);
9644a61f12eSDarrick J. Wong xfbtree_destroy(&rr->rtrmap_btree);
9656a849bd8SDarrick J. Wong xfsb_bitmap_destroy(&rr->old_rtrmapbt_blocks);
9669515572bSDarrick J. Wong mutex_destroy(&rr->lock);
9676a849bd8SDarrick J. Wong }
9686a849bd8SDarrick J. Wong
9696a849bd8SDarrick J. Wong /* Repair the realtime rmap btree. */
9706a849bd8SDarrick J. Wong int
xrep_rtrmapbt(struct xfs_scrub * sc)9716a849bd8SDarrick J. Wong xrep_rtrmapbt(
9726a849bd8SDarrick J. Wong struct xfs_scrub *sc)
9736a849bd8SDarrick J. Wong {
9746a849bd8SDarrick J. Wong struct xrep_rtrmap *rr = sc->buf;
9756a849bd8SDarrick J. Wong int error;
9766a849bd8SDarrick J. Wong
9776a849bd8SDarrick J. Wong /* Make sure any problems with the fork are fixed. */
9786a849bd8SDarrick J. Wong error = xrep_metadata_inode_forks(sc);
9796a849bd8SDarrick J. Wong if (error)
9806a849bd8SDarrick J. Wong return error;
9816a849bd8SDarrick J. Wong
9826a849bd8SDarrick J. Wong error = xrep_rtrmap_setup_scan(rr);
9836a849bd8SDarrick J. Wong if (error)
9846a849bd8SDarrick J. Wong return error;
9856a849bd8SDarrick J. Wong
9866a849bd8SDarrick J. Wong /* Collect rmaps for realtime files. */
9876a849bd8SDarrick J. Wong error = xrep_rtrmap_find_rmaps(rr);
9886a849bd8SDarrick J. Wong if (error)
9896a849bd8SDarrick J. Wong goto out_records;
9906a849bd8SDarrick J. Wong
9916a849bd8SDarrick J. Wong xfs_trans_ijoin(sc->tp, sc->ip, 0);
9926a849bd8SDarrick J. Wong
9936a849bd8SDarrick J. Wong /* Rebuild the rtrmap information. */
9946a849bd8SDarrick J. Wong error = xrep_rtrmap_build_new_tree(rr);
9956a849bd8SDarrick J. Wong if (error)
9966a849bd8SDarrick J. Wong goto out_records;
9976a849bd8SDarrick J. Wong
9986a849bd8SDarrick J. Wong /* Kill the old tree. */
9996a849bd8SDarrick J. Wong error = xrep_rtrmap_remove_old_tree(rr);
10006a849bd8SDarrick J. Wong if (error)
10016a849bd8SDarrick J. Wong goto out_records;
10026a849bd8SDarrick J. Wong
10036a849bd8SDarrick J. Wong out_records:
10046a849bd8SDarrick J. Wong xrep_rtrmap_teardown(rr);
10056a849bd8SDarrick J. Wong return error;
10066a849bd8SDarrick J. Wong }
1007