Lines Matching +full:ip +full:- +full:blocks

1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright (c) 2018-2024 Oracle. All Rights Reserved.
68 * I) Reverse mappings for all non-space metadata and file data are collected
72 * 1.1. Create a bitmap BMBIT to track bmbt blocks if necessary.
77 * just visit the bmbt blocks to set the corresponding BMBIT areas.
99 * 2. Estimate the number of rmapbt blocks needed to store NR records. (= RMB)
100 * 3. Reserve RMB blocks through the newbt using the allocator in normap mode.
105 * 8. Estimate the number of rmapbt blocks needed for NR + AGNR rmaps. (= RMB')
106 * 9. If RMB' >= RMB, reserve RMB' - RMB more newbt blocks, set RMB = RMB',
115 * IV) Reap the old btree blocks.
123 * that they were the old rmapbt blocks.
142 /* in-memory btree cursor for the xfs_btree_bload iteration */
151 /* Number of non-freespace records found. */
180 return -ENOMEM; in xrep_setup_ag_rmapbt()
182 rr->sc = sc; in xrep_setup_ag_rmapbt()
183 sc->buf = rr; in xrep_setup_ag_rmapbt()
196 if (xfs_rmap_check_irec(sc->sa.pag, rec) != NULL) in xrep_rmap_check_mapping()
197 return -EFSCORRUPTED; in xrep_rmap_check_mapping()
200 error = xfs_alloc_has_records(sc->sa.bno_cur, rec->rm_startblock, in xrep_rmap_check_mapping()
201 rec->rm_blockcount, &outcome); in xrep_rmap_check_mapping()
205 return -EFSCORRUPTED; in xrep_rmap_check_mapping()
210 /* Store a reverse-mapping record. */
227 struct xfs_scrub *sc = rr->sc; in xrep_rmap_stash()
234 if (xchk_iscan_aborted(&rr->iscan)) in xrep_rmap_stash()
235 return -EFSCORRUPTED; in xrep_rmap_stash()
237 trace_xrep_rmap_found(sc->sa.pag, &rmap); in xrep_rmap_stash()
239 mutex_lock(&rr->lock); in xrep_rmap_stash()
240 mcur = xfs_rmapbt_mem_cursor(sc->sa.pag, sc->tp, &rr->rmap_btree); in xrep_rmap_stash()
246 error = xfbtree_trans_commit(&rr->rmap_btree, sc->tp); in xrep_rmap_stash()
250 mutex_unlock(&rr->lock); in xrep_rmap_stash()
254 xfbtree_trans_cancel(&rr->rmap_btree, sc->tp); in xrep_rmap_stash()
256 xchk_iscan_abort(&rr->iscan); in xrep_rmap_stash()
257 mutex_unlock(&rr->lock); in xrep_rmap_stash()
274 struct xrep_rmap *rr = rsr->rr; in xrep_rmap_stash_run()
276 return xrep_rmap_stash(rr, start, len, rsr->owner, 0, rsr->rmap_flags); in xrep_rmap_stash_run()
281 * that the ranges are in units of FS blocks.
291 .owner = oinfo->oi_owner, in xrep_rmap_stash_bitmap()
295 if (oinfo->oi_flags & XFS_OWNER_INFO_ATTR_FORK) in xrep_rmap_stash_bitmap()
297 if (oinfo->oi_flags & XFS_OWNER_INFO_BMBT_BLOCK) in xrep_rmap_stash_bitmap()
313 /* Bitmap of bmbt blocks in this AG. */
327 if (rf->accum.rm_blockcount == 0) in xrep_rmap_stash_accumulated()
330 return xrep_rmap_stash(rf->rr, rf->accum.rm_startblock, in xrep_rmap_stash_accumulated()
331 rf->accum.rm_blockcount, rf->accum.rm_owner, in xrep_rmap_stash_accumulated()
332 rf->accum.rm_offset, rf->accum.rm_flags); in xrep_rmap_stash_accumulated()
343 struct xfs_mount *mp = rf->rr->sc->mp; in xrep_rmap_visit_bmbt()
344 struct xfs_rmap_irec *accum = &rf->accum; in xrep_rmap_visit_bmbt()
349 if (XFS_FSB_TO_AGNO(mp, rec->br_startblock) != in xrep_rmap_visit_bmbt()
350 pag_agno(rf->rr->sc->sa.pag)) in xrep_rmap_visit_bmbt()
353 agbno = XFS_FSB_TO_AGBNO(mp, rec->br_startblock); in xrep_rmap_visit_bmbt()
354 if (rf->whichfork == XFS_ATTR_FORK) in xrep_rmap_visit_bmbt()
356 if (rec->br_state == XFS_EXT_UNWRITTEN) in xrep_rmap_visit_bmbt()
360 if (accum->rm_blockcount > 0 && in xrep_rmap_visit_bmbt()
361 rec->br_startoff == accum->rm_offset + accum->rm_blockcount && in xrep_rmap_visit_bmbt()
362 agbno == accum->rm_startblock + accum->rm_blockcount && in xrep_rmap_visit_bmbt()
363 rmap_flags == accum->rm_flags) { in xrep_rmap_visit_bmbt()
364 accum->rm_blockcount += rec->br_blockcount; in xrep_rmap_visit_bmbt()
373 accum->rm_startblock = agbno; in xrep_rmap_visit_bmbt()
374 accum->rm_blockcount = rec->br_blockcount; in xrep_rmap_visit_bmbt()
375 accum->rm_offset = rec->br_startoff; in xrep_rmap_visit_bmbt()
376 accum->rm_flags = rmap_flags; in xrep_rmap_visit_bmbt()
396 fsbno = XFS_DADDR_TO_FSB(cur->bc_mp, xfs_buf_daddr(bp)); in xrep_rmap_visit_iroot_btree_block()
397 if (XFS_FSB_TO_AGNO(cur->bc_mp, fsbno) != pag_agno(rf->rr->sc->sa.pag)) in xrep_rmap_visit_iroot_btree_block()
400 agbno = XFS_FSB_TO_AGBNO(cur->bc_mp, fsbno); in xrep_rmap_visit_iroot_btree_block()
401 return xagb_bitmap_set(&rf->bmbt_blocks, agbno, 1); in xrep_rmap_visit_iroot_btree_block()
414 struct xrep_rmap *rr = rf->rr; in xrep_rmap_scan_iroot_btree()
417 xagb_bitmap_init(&rf->bmbt_blocks); in xrep_rmap_scan_iroot_btree()
419 /* Record all the blocks in the btree itself. */ in xrep_rmap_scan_iroot_btree()
425 /* Emit rmaps for the btree blocks. */ in xrep_rmap_scan_iroot_btree()
426 xfs_rmap_ino_bmbt_owner(&oinfo, rf->accum.rm_owner, rf->whichfork); in xrep_rmap_scan_iroot_btree()
427 error = xrep_rmap_stash_bitmap(rr, &rf->bmbt_blocks, &oinfo); in xrep_rmap_scan_iroot_btree()
434 xagb_bitmap_destroy(&rf->bmbt_blocks); in xrep_rmap_scan_iroot_btree()
446 struct xfs_inode *ip, in xrep_rmap_scan_bmbt() argument
449 struct xrep_rmap *rr = rf->rr; in xrep_rmap_scan_bmbt()
455 ifp = xfs_ifork_ptr(ip, rf->whichfork); in xrep_rmap_scan_bmbt()
456 cur = xfs_bmbt_init_cursor(rr->sc->mp, rr->sc->tp, ip, rf->whichfork); in xrep_rmap_scan_bmbt()
458 if (!xfs_ifork_is_realtime(ip, rf->whichfork) && in xrep_rmap_scan_bmbt()
474 /* Scan for the bmbt blocks, which always live on the data device. */ in xrep_rmap_scan_bmbt()
482 * Iterate the in-core extent cache to collect rmap records for anything in
508 struct xfs_inode *ip) in xrep_rmap_scan_meta_btree() argument
510 struct xfs_scrub *sc = rf->rr->sc; in xrep_rmap_scan_meta_btree()
516 if (rf->whichfork != XFS_DATA_FORK) in xrep_rmap_scan_meta_btree()
517 return -EFSCORRUPTED; in xrep_rmap_scan_meta_btree()
519 switch (ip->i_metatype) { in xrep_rmap_scan_meta_btree()
528 return -EFSCORRUPTED; in xrep_rmap_scan_meta_btree()
531 while ((rtg = xfs_rtgroup_next(sc->mp, rtg))) { in xrep_rmap_scan_meta_btree()
532 if (ip == rtg->rtg_inodes[type]) in xrep_rmap_scan_meta_btree()
538 * associated with an rtgroup yet has ondisk blocks allocated to it. in xrep_rmap_scan_meta_btree()
540 if (ip->i_nblocks) { in xrep_rmap_scan_meta_btree()
542 return -EFSCORRUPTED; in xrep_rmap_scan_meta_btree()
548 switch (ip->i_metatype) { in xrep_rmap_scan_meta_btree()
550 cur = xfs_rtrmapbt_init_cursor(sc->tp, rtg); in xrep_rmap_scan_meta_btree()
553 cur = xfs_rtrefcountbt_init_cursor(sc->tp, rtg); in xrep_rmap_scan_meta_btree()
557 error = -EFSCORRUPTED; in xrep_rmap_scan_meta_btree()
572 struct xfs_inode *ip, in xrep_rmap_scan_ifork() argument
576 .accum = { .rm_owner = ip->i_ino, }, in xrep_rmap_scan_ifork()
580 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); in xrep_rmap_scan_ifork()
587 switch (ifp->if_format) { in xrep_rmap_scan_ifork()
591 * the btree blocks themselves, even if this is a realtime in xrep_rmap_scan_ifork()
594 error = xrep_rmap_scan_bmbt(&rf, ip, &mappings_done); in xrep_rmap_scan_ifork()
600 if (xfs_ifork_is_realtime(ip, whichfork)) in xrep_rmap_scan_ifork()
605 return xrep_rmap_scan_meta_btree(&rf, ip); in xrep_rmap_scan_ifork()
619 struct xfs_inode *ip) in xrep_rmap_scan_ilock() argument
623 if (xfs_need_iread_extents(&ip->i_df)) { in xrep_rmap_scan_ilock()
628 if (xfs_inode_has_attr_fork(ip) && xfs_need_iread_extents(&ip->i_af)) in xrep_rmap_scan_ilock()
632 xfs_ilock(ip, lock_mode); in xrep_rmap_scan_ilock()
640 struct xfs_inode *ip) in xrep_rmap_scan_inode() argument
642 unsigned int lock_mode = xrep_rmap_scan_ilock(ip); in xrep_rmap_scan_inode()
646 error = xrep_rmap_scan_ifork(rr, ip, XFS_DATA_FORK); in xrep_rmap_scan_inode()
651 error = xrep_rmap_scan_ifork(rr, ip, XFS_ATTR_FORK); in xrep_rmap_scan_inode()
657 xchk_iscan_mark_visited(&rr->iscan, ip); in xrep_rmap_scan_inode()
659 xfs_iunlock(ip, lock_mode); in xrep_rmap_scan_inode()
680 struct xfs_mount *mp = cur->bc_mp; in xrep_rmap_walk_inobt()
688 /* Record the inobt blocks. */ in xrep_rmap_walk_inobt()
689 error = xagb_bitmap_set_btcur_path(&ri->inobt_blocks, cur); in xrep_rmap_walk_inobt()
694 if (xfs_inobt_check_irec(to_perag(cur->bc_group), &irec) != NULL) in xrep_rmap_walk_inobt()
695 return -EFSCORRUPTED; in xrep_rmap_walk_inobt()
699 /* Record a non-sparse inode chunk. */ in xrep_rmap_walk_inobt()
703 XFS_INODES_PER_CHUNK / mp->m_sb.sb_inopblock); in xrep_rmap_walk_inobt()
705 return xagb_bitmap_set(&ri->ichunk_blocks, agbno, aglen); in xrep_rmap_walk_inobt()
709 iperhole = max_t(xfs_agino_t, mp->m_sb.sb_inopblock, in xrep_rmap_walk_inobt()
711 aglen = iperhole / mp->m_sb.sb_inopblock; in xrep_rmap_walk_inobt()
721 error = xagb_bitmap_set(&ri->ichunk_blocks, agbno, aglen); in xrep_rmap_walk_inobt()
729 /* Collect rmaps for the blocks containing inode btrees and the inode chunks. */
737 struct xfs_scrub *sc = rr->sc; in xrep_rmap_find_inode_rmaps()
745 * chunks and the blocks in the inobt itself. in xrep_rmap_find_inode_rmaps()
747 error = xfs_btree_query_all(sc->sa.ino_cur, xrep_rmap_walk_inobt, &ri); in xrep_rmap_find_inode_rmaps()
756 struct xfs_agi *agi = sc->sa.agi_bp->b_addr; in xrep_rmap_find_inode_rmaps()
759 be32_to_cpu(agi->agi_root), 1); in xrep_rmap_find_inode_rmaps()
765 if (xfs_has_finobt(sc->mp)) { in xrep_rmap_find_inode_rmaps()
767 sc->sa.fino_cur); in xrep_rmap_find_inode_rmaps()
796 irec->rc_domain != XFS_REFC_DOMAIN_COW) in xrep_rmap_walk_cowblocks()
797 return -EFSCORRUPTED; in xrep_rmap_walk_cowblocks()
799 return xagb_bitmap_set(bitmap, irec->rc_startblock, irec->rc_blockcount); in xrep_rmap_walk_cowblocks()
803 * Collect rmaps for the blocks containing the refcount btree, and all CoW
817 .rc_startblock = -1U, in xrep_rmap_find_refcount_rmaps()
820 struct xfs_scrub *sc = rr->sc; in xrep_rmap_find_refcount_rmaps()
823 if (!xfs_has_reflink(sc->mp)) in xrep_rmap_find_refcount_rmaps()
830 error = xagb_bitmap_set_btblocks(&refcountbt_blocks, sc->sa.refc_cur); in xrep_rmap_find_refcount_rmaps()
835 error = xfs_refcount_query_range(sc->sa.refc_cur, &low, &high, in xrep_rmap_find_refcount_rmaps()
858 struct xfs_scrub *sc = rr->sc; in xrep_rmap_find_agheader_rmaps()
860 /* Create a record for the AG sb->agfl. */ in xrep_rmap_find_agheader_rmaps()
861 return xrep_rmap_stash(rr, XFS_SB_BLOCK(sc->mp), in xrep_rmap_find_agheader_rmaps()
862 XFS_AGFL_BLOCK(sc->mp) - XFS_SB_BLOCK(sc->mp) + 1, in xrep_rmap_find_agheader_rmaps()
871 struct xfs_scrub *sc = rr->sc; in xrep_rmap_find_log_rmaps()
873 if (!xfs_ag_contains_log(sc->mp, pag_agno(sc->sa.pag))) in xrep_rmap_find_log_rmaps()
877 XFS_FSB_TO_AGBNO(sc->mp, sc->mp->m_sb.sb_logstart), in xrep_rmap_find_log_rmaps()
878 sc->mp->m_sb.sb_logblocks, XFS_RMAP_OWN_LOG, 0, 0); in xrep_rmap_find_log_rmaps()
891 error = xrep_rmap_check_mapping(rr->sc, rec); in xrep_rmap_check_record()
895 rr->nr_records++; in xrep_rmap_check_record()
900 * Generate all the reverse-mappings for this AG, a list of the old rmapbt
901 * blocks, and the new btreeblks count. Figure out if we have enough free
909 struct xfs_scrub *sc = rr->sc; in xrep_rmap_find_rmaps()
910 struct xchk_ag *sa = &sc->sa; in xrep_rmap_find_rmaps()
911 struct xfs_inode *ip; in xrep_rmap_find_rmaps() local
915 /* Find all the per-AG metadata. */ in xrep_rmap_find_rmaps()
916 xrep_ag_btcur_init(sc, &sc->sa); in xrep_rmap_find_rmaps()
932 xchk_ag_btcur_free(&sc->sa); in xrep_rmap_find_rmaps()
951 sa->agf_bp = NULL; in xrep_rmap_find_rmaps()
952 sa->agi_bp = NULL; in xrep_rmap_find_rmaps()
957 while ((error = xchk_iscan_iter(&rr->iscan, &ip)) == 1) { in xrep_rmap_find_rmaps()
958 error = xrep_rmap_scan_inode(rr, ip); in xrep_rmap_find_rmaps()
959 xchk_irele(sc, ip); in xrep_rmap_find_rmaps()
966 xchk_iscan_iter_finish(&rr->iscan); in xrep_rmap_find_rmaps()
983 * If a hook failed to update the in-memory btree, we lack the data to in xrep_rmap_find_rmaps()
986 if (xchk_iscan_aborted(&rr->iscan)) in xrep_rmap_find_rmaps()
987 return -EFSCORRUPTED; in xrep_rmap_find_rmaps()
992 * all actively-owned space in the filesystem. At the same time, check in xrep_rmap_find_rmaps()
996 mcur = xfs_rmapbt_mem_cursor(rr->sc->sa.pag, NULL, &rr->rmap_btree); in xrep_rmap_find_rmaps()
997 sc->sa.bno_cur = xfs_bnobt_init_cursor(sc->mp, sc->tp, sc->sa.agf_bp, in xrep_rmap_find_rmaps()
998 sc->sa.pag); in xrep_rmap_find_rmaps()
1000 rr->nr_records = 0; in xrep_rmap_find_rmaps()
1003 xfs_btree_del_cursor(sc->sa.bno_cur, error); in xrep_rmap_find_rmaps()
1004 sc->sa.bno_cur = NULL; in xrep_rmap_find_rmaps()
1026 return xagb_bitmap_set(ra->bitmap, agbno, 1); in xrep_rmap_walk_agfl()
1031 * number of blocks needed to store the previously observed rmapbt records and
1033 * blocks, return a bitmap of OWN_AG extents in @freesp_blocks and set @done to
1046 .agno = pag_agno(rr->sc->sa.pag), in xrep_rmap_try_reserve()
1048 struct xfs_scrub *sc = rr->sc; in xrep_rmap_try_reserve()
1050 struct xfs_agf *agf = sc->sa.agf_bp->b_addr; in xrep_rmap_try_reserve()
1058 * this function to reflect however many btree blocks we need to store in xrep_rmap_try_reserve()
1060 * made to support the new rmapbt blocks), so we save the old value in xrep_rmap_try_reserve()
1061 * here so we can decide if we've reserved enough blocks. in xrep_rmap_try_reserve()
1063 nr_blocks = rr->new_btree.bload.nr_blocks; in xrep_rmap_try_reserve()
1071 error = xrep_newbt_alloc_blocks(&rr->new_btree, in xrep_rmap_try_reserve()
1072 nr_blocks - *blocks_reserved); in xrep_rmap_try_reserve()
1076 *blocks_reserved = rr->new_btree.bload.nr_blocks; in xrep_rmap_try_reserve()
1081 /* Set all the bnobt blocks in the bitmap. */ in xrep_rmap_try_reserve()
1082 sc->sa.bno_cur = xfs_bnobt_init_cursor(sc->mp, sc->tp, sc->sa.agf_bp, in xrep_rmap_try_reserve()
1083 sc->sa.pag); in xrep_rmap_try_reserve()
1084 error = xagb_bitmap_set_btblocks(freesp_blocks, sc->sa.bno_cur); in xrep_rmap_try_reserve()
1085 xfs_btree_del_cursor(sc->sa.bno_cur, error); in xrep_rmap_try_reserve()
1086 sc->sa.bno_cur = NULL; in xrep_rmap_try_reserve()
1090 /* Set all the cntbt blocks in the bitmap. */ in xrep_rmap_try_reserve()
1091 sc->sa.cnt_cur = xfs_cntbt_init_cursor(sc->mp, sc->tp, sc->sa.agf_bp, in xrep_rmap_try_reserve()
1092 sc->sa.pag); in xrep_rmap_try_reserve()
1093 error = xagb_bitmap_set_btblocks(freesp_blocks, sc->sa.cnt_cur); in xrep_rmap_try_reserve()
1094 xfs_btree_del_cursor(sc->sa.cnt_cur, error); in xrep_rmap_try_reserve()
1095 sc->sa.cnt_cur = NULL; in xrep_rmap_try_reserve()
1100 rr->freesp_btblocks = xagb_bitmap_hweight(freesp_blocks) - 2; in xrep_rmap_try_reserve()
1102 /* Set all the new rmapbt blocks in the bitmap. */ in xrep_rmap_try_reserve()
1103 list_for_each_entry_safe(resv, n, &rr->new_btree.resv_list, list) { in xrep_rmap_try_reserve()
1104 error = xagb_bitmap_set(freesp_blocks, resv->agbno, resv->len); in xrep_rmap_try_reserve()
1109 /* Set all the AGFL blocks in the bitmap. */ in xrep_rmap_try_reserve()
1110 error = xfs_alloc_read_agfl(sc->sa.pag, sc->tp, &agfl_bp); in xrep_rmap_try_reserve()
1114 error = xfs_agfl_walk(sc->mp, agf, agfl_bp, xrep_rmap_walk_agfl, &ra); in xrep_rmap_try_reserve()
1121 /* Compute how many blocks we'll need for all the rmaps. */ in xrep_rmap_try_reserve()
1123 &rr->new_btree.bload, rr->nr_records + freesp_records); in xrep_rmap_try_reserve()
1127 /* We're done when we don't need more blocks. */ in xrep_rmap_try_reserve()
1128 *done = nr_blocks >= rr->new_btree.bload.nr_blocks; in xrep_rmap_try_reserve()
1146 /* Compute how many blocks we'll need for the rmaps collected so far. */ in xrep_rmap_reserve_space()
1148 &rr->new_btree.bload, rr->nr_records); in xrep_rmap_reserve_space()
1153 if (xchk_should_terminate(rr->sc, &error)) in xrep_rmap_reserve_space()
1160 * number of blocks needed to store the previously observed rmapbt in xrep_rmap_reserve_space()
1162 * Finish when we don't need more blocks. in xrep_rmap_reserve_space()
1172 xrep_ag_btcur_init(rr->sc, &rr->sc->sa); in xrep_rmap_reserve_space()
1174 xchk_ag_btcur_free(&rr->sc->sa); in xrep_rmap_reserve_space()
1188 struct xfs_scrub *sc = rr->sc; in xrep_rmap_reset_counters()
1189 struct xfs_perag *pag = sc->sa.pag; in xrep_rmap_reset_counters()
1190 struct xfs_agf *agf = sc->sa.agf_bp->b_addr; in xrep_rmap_reset_counters()
1197 rmap_btblocks = rr->new_btree.afake.af_blocks - 1; in xrep_rmap_reset_counters()
1198 agf->agf_btreeblks = cpu_to_be32(rr->freesp_btblocks + rmap_btblocks); in xrep_rmap_reset_counters()
1199 xfs_alloc_log_agf(sc->tp, sc->sa.agf_bp, XFS_AGF_BTREEBLKS); in xrep_rmap_reset_counters()
1203 * process to reap the old btree blocks will race with the AIL trying in xrep_rmap_reset_counters()
1204 * to checkpoint the old btree blocks into the filesystem. If the new in xrep_rmap_reset_counters()
1209 * height values before re-initializing the perag info from the updated in xrep_rmap_reset_counters()
1212 pag->pagf_repair_rmap_level = pag->pagf_rmap_level; in xrep_rmap_reset_counters()
1235 error = xfs_btree_increment(rr->mcur, 0, &stat); in xrep_rmap_get_records()
1239 return -EFSCORRUPTED; in xrep_rmap_get_records()
1241 error = xfs_rmap_get_rec(rr->mcur, &cur->bc_rec.r, &stat); in xrep_rmap_get_records()
1245 return -EFSCORRUPTED; in xrep_rmap_get_records()
1248 cur->bc_ops->init_rec_from_cur(cur, block_rec); in xrep_rmap_get_records()
1254 /* Feed one of the new btree blocks to the bulk loader. */
1263 return xrep_newbt_claim_block(cur, &rr->new_btree, ptr); in xrep_rmap_claim_block()
1277 * compute the OWN_AG records /after/ allocating blocks for the records in xrep_rmap_alloc_vextent()
1280 * for new AGFL blocks. in xrep_rmap_alloc_vextent()
1287 * If xrep_fix_freelist fixed the freelist by moving blocks from the in xrep_rmap_alloc_vextent()
1288 * free space btrees or by removing blocks from the AGFL and queueing in xrep_rmap_alloc_vextent()
1294 * btree's blocks, which means that we can't have EFIs for former AGFL in xrep_rmap_alloc_vextent()
1295 * blocks attached to the repair transaction when we commit the new in xrep_rmap_alloc_vextent()
1338 struct xfs_scrub *sc = rr->sc; in xrep_rmap_build_new_tree()
1339 struct xfs_perag *pag = sc->sa.pag; in xrep_rmap_build_new_tree()
1340 struct xfs_agf *agf = sc->sa.agf_bp->b_addr; in xrep_rmap_build_new_tree()
1346 * per-AG rmapbt reservation after we commit the new btree root and in xrep_rmap_build_new_tree()
1347 * want to dispose of the old btree blocks. in xrep_rmap_build_new_tree()
1349 rr->old_rmapbt_fsbcount = be32_to_cpu(agf->agf_rmap_blocks); in xrep_rmap_build_new_tree()
1355 * attach it to the AG header. The new blocks are accounted to the in xrep_rmap_build_new_tree()
1356 * rmapbt per-AG reservation, which we will adjust further after in xrep_rmap_build_new_tree()
1359 xrep_newbt_init_ag(&rr->new_btree, sc, &XFS_RMAP_OINFO_SKIP_UPDATE, in xrep_rmap_build_new_tree()
1360 xfs_agbno_to_fsb(pag, XFS_RMAP_BLOCK(sc->mp)), in xrep_rmap_build_new_tree()
1362 rr->new_btree.bload.get_records = xrep_rmap_get_records; in xrep_rmap_build_new_tree()
1363 rr->new_btree.bload.claim_block = xrep_rmap_claim_block; in xrep_rmap_build_new_tree()
1364 rr->new_btree.alloc_vextent = xrep_rmap_alloc_vextent; in xrep_rmap_build_new_tree()
1365 rmap_cur = xfs_rmapbt_init_cursor(sc->mp, NULL, NULL, pag); in xrep_rmap_build_new_tree()
1366 xfs_btree_stage_afakeroot(rmap_cur, &rr->new_btree.afake); in xrep_rmap_build_new_tree()
1369 * Initialize @rr->new_btree, reserve space for the new rmapbt, in xrep_rmap_build_new_tree()
1380 rr->mcur = xfs_rmapbt_mem_cursor(rr->sc->sa.pag, NULL, in xrep_rmap_build_new_tree()
1381 &rr->rmap_btree); in xrep_rmap_build_new_tree()
1383 error = xrep_rmap_count_records(rr->mcur, &rr->nr_records); in xrep_rmap_build_new_tree()
1390 * that we don't trip the verifiers when writing the new btree blocks in xrep_rmap_build_new_tree()
1393 pag->pagf_repair_rmap_level = rr->new_btree.bload.btree_height; in xrep_rmap_build_new_tree()
1397 * increment in ->get_records positions us at the first record. in xrep_rmap_build_new_tree()
1399 error = xfs_btree_goto_left_edge(rr->mcur); in xrep_rmap_build_new_tree()
1404 error = xfs_btree_bload(rmap_cur, &rr->new_btree.bload, rr); in xrep_rmap_build_new_tree()
1412 xfs_rmapbt_commit_staged_btree(rmap_cur, sc->tp, sc->sa.agf_bp); in xrep_rmap_build_new_tree()
1414 xfs_btree_del_cursor(rr->mcur, 0); in xrep_rmap_build_new_tree()
1415 rr->mcur = NULL; in xrep_rmap_build_new_tree()
1419 * updating the in-memory btree. Abort the scan to stop live updates. in xrep_rmap_build_new_tree()
1421 xchk_iscan_abort(&rr->iscan); in xrep_rmap_build_new_tree()
1424 * The newly committed rmap recordset includes mappings for the blocks in xrep_rmap_build_new_tree()
1429 rr->new_btree.oinfo = XFS_RMAP_OINFO_AG; in xrep_rmap_build_new_tree()
1436 /* Dispose of any unused blocks and the accounting information. */ in xrep_rmap_build_new_tree()
1437 error = xrep_newbt_commit(&rr->new_btree); in xrep_rmap_build_new_tree()
1444 pag->pagf_repair_rmap_level = 0; in xrep_rmap_build_new_tree()
1446 xfs_btree_del_cursor(rr->mcur, error); in xrep_rmap_build_new_tree()
1450 xrep_newbt_cancel(&rr->new_btree); in xrep_rmap_build_new_tree()
1470 return xagb_bitmap_clear(&rfg->rmap_gaps, rec->ar_startblock, in xrep_rmap_find_freesp()
1471 rec->ar_blockcount); in xrep_rmap_find_freesp()
1484 if (rec->rm_startblock > rfg->next_agbno) { in xrep_rmap_find_gaps()
1485 error = xagb_bitmap_set(&rfg->rmap_gaps, rfg->next_agbno, in xrep_rmap_find_gaps()
1486 rec->rm_startblock - rfg->next_agbno); in xrep_rmap_find_gaps()
1491 rfg->next_agbno = max_t(xfs_agblock_t, rfg->next_agbno, in xrep_rmap_find_gaps()
1492 rec->rm_startblock + rec->rm_blockcount); in xrep_rmap_find_gaps()
1497 * Reap the old rmapbt blocks. Now that the rmapbt is fully rebuilt, we make
1509 struct xfs_scrub *sc = rr->sc; in xrep_rmap_remove_old_tree()
1510 struct xfs_agf *agf = sc->sa.agf_bp->b_addr; in xrep_rmap_remove_old_tree()
1511 struct xfs_perag *pag = sc->sa.pag; in xrep_rmap_remove_old_tree()
1519 mcur = xfs_rmapbt_mem_cursor(rr->sc->sa.pag, NULL, &rr->rmap_btree); in xrep_rmap_remove_old_tree()
1527 agend = be32_to_cpu(agf->agf_length); in xrep_rmap_remove_old_tree()
1530 agend - rfg.next_agbno); in xrep_rmap_remove_old_tree()
1536 sc->sa.bno_cur = xfs_bnobt_init_cursor(sc->mp, sc->tp, sc->sa.agf_bp, in xrep_rmap_remove_old_tree()
1537 sc->sa.pag); in xrep_rmap_remove_old_tree()
1538 error = xfs_alloc_query_all(sc->sa.bno_cur, xrep_rmap_find_freesp, in xrep_rmap_remove_old_tree()
1540 xfs_btree_del_cursor(sc->sa.bno_cur, error); in xrep_rmap_remove_old_tree()
1541 sc->sa.bno_cur = NULL; in xrep_rmap_remove_old_tree()
1546 * Free the "free" blocks that the new rmapbt knows about but the bnobt in xrep_rmap_remove_old_tree()
1547 * doesn't--these are the old rmapbt blocks. Credit the old rmapbt in xrep_rmap_remove_old_tree()
1548 * block usage count back to the per-AG rmapbt reservation (and not in xrep_rmap_remove_old_tree()
1558 * Now that we've zapped all the old rmapbt blocks we can turn off in xrep_rmap_remove_old_tree()
1559 * the alternate height mechanism and reset the per-AG space in xrep_rmap_remove_old_tree()
1562 pag->pagf_repair_rmap_level = 0; in xrep_rmap_remove_old_tree()
1563 sc->flags |= XREP_RESET_PERAG_RESV; in xrep_rmap_remove_old_tree()
1580 * metadata. IOWs, the in-memory btree knows about the AG headers, the in xrep_rmapbt_want_live_update()
1583 * the in-memory rmap btree. in xrep_rmapbt_want_live_update()
1586 * have re-locked the AGF and are ready to reserve space for the new in xrep_rmapbt_want_live_update()
1589 if (XFS_RMAP_NON_INODE_OWNER(oi->oi_owner)) in xrep_rmapbt_want_live_update()
1590 return oi->oi_owner != XFS_RMAP_OWN_AG; in xrep_rmapbt_want_live_update()
1593 return xchk_iscan_want_live_update(iscan, oi->oi_owner); in xrep_rmapbt_want_live_update()
1616 mp = rr->sc->mp; in xrep_rmapbt_live_update()
1618 if (!xrep_rmapbt_want_live_update(&rr->iscan, &p->oinfo)) in xrep_rmapbt_live_update()
1621 trace_xrep_rmap_live_update(pag_group(rr->sc->sa.pag), action, p); in xrep_rmapbt_live_update()
1625 mutex_lock(&rr->lock); in xrep_rmapbt_live_update()
1626 mcur = xfs_rmapbt_mem_cursor(rr->sc->sa.pag, tp, &rr->rmap_btree); in xrep_rmapbt_live_update()
1627 error = __xfs_rmap_finish_intent(mcur, action, p->startblock, in xrep_rmapbt_live_update()
1628 p->blockcount, &p->oinfo, p->unwritten); in xrep_rmapbt_live_update()
1633 error = xfbtree_trans_commit(&rr->rmap_btree, tp); in xrep_rmapbt_live_update()
1638 mutex_unlock(&rr->lock); in xrep_rmapbt_live_update()
1642 xfbtree_trans_cancel(&rr->rmap_btree, tp); in xrep_rmapbt_live_update()
1644 mutex_unlock(&rr->lock); in xrep_rmapbt_live_update()
1645 xchk_iscan_abort(&rr->iscan); in xrep_rmapbt_live_update()
1655 struct xfs_scrub *sc = rr->sc; in xrep_rmap_setup_scan()
1658 mutex_init(&rr->lock); in xrep_rmap_setup_scan()
1660 /* Set up in-memory rmap btree */ in xrep_rmap_setup_scan()
1661 error = xfs_rmapbt_mem_init(sc->mp, &rr->rmap_btree, sc->xmbtp, in xrep_rmap_setup_scan()
1662 pag_agno(sc->sa.pag)); in xrep_rmap_setup_scan()
1667 xchk_iscan_start(sc, 30000, 100, &rr->iscan); in xrep_rmap_setup_scan()
1670 * Hook into live rmap operations so that we can update our in-memory in xrep_rmap_setup_scan()
1675 ASSERT(sc->flags & XCHK_FSGATES_RMAP); in xrep_rmap_setup_scan()
1676 xfs_rmap_hook_setup(&rr->rhook, xrep_rmapbt_live_update); in xrep_rmap_setup_scan()
1677 error = xfs_rmap_hook_add(pag_group(sc->sa.pag), &rr->rhook); in xrep_rmap_setup_scan()
1683 xchk_iscan_teardown(&rr->iscan); in xrep_rmap_setup_scan()
1684 xfbtree_destroy(&rr->rmap_btree); in xrep_rmap_setup_scan()
1686 mutex_destroy(&rr->lock); in xrep_rmap_setup_scan()
1695 struct xfs_scrub *sc = rr->sc; in xrep_rmap_teardown()
1697 xchk_iscan_abort(&rr->iscan); in xrep_rmap_teardown()
1698 xfs_rmap_hook_del(pag_group(sc->sa.pag), &rr->rhook); in xrep_rmap_teardown()
1699 xchk_iscan_teardown(&rr->iscan); in xrep_rmap_teardown()
1700 xfbtree_destroy(&rr->rmap_btree); in xrep_rmap_teardown()
1701 mutex_destroy(&rr->lock); in xrep_rmap_teardown()
1709 struct xrep_rmap *rr = sc->buf; in xrep_rmapbt()
1718 * These rmaps won't change even as we try to allocate blocks. in xrep_rmapbt()