Lines Matching +full:sub +full:- +full:block
1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright (C) 2018-2023 Oracle. All Rights Reserved.
50 * told us to fix it. This function returns -EAGAIN to mean "re-run scrub",
61 trace_xrep_attempt(XFS_I(file_inode(sc->file)), sc->sm, error); in xrep_attempt()
63 xchk_ag_btcur_free(&sc->sa); in xrep_attempt()
66 ASSERT(sc->ops->repair); in xrep_attempt()
67 run->repair_attempted = true; in xrep_attempt()
69 error = sc->ops->repair(sc); in xrep_attempt()
70 trace_xrep_done(XFS_I(file_inode(sc->file)), sc->sm, error); in xrep_attempt()
71 run->repair_ns += xchk_stats_elapsed_ns(repair_start); in xrep_attempt()
78 sc->sm->sm_flags &= ~XFS_SCRUB_FLAGS_OUT; in xrep_attempt()
79 sc->flags |= XREP_ALREADY_FIXED; in xrep_attempt()
80 run->repair_succeeded = true; in xrep_attempt()
81 return -EAGAIN; in xrep_attempt()
82 case -ECHRNG: in xrep_attempt()
83 sc->flags |= XCHK_NEED_DRAIN; in xrep_attempt()
84 run->retries++; in xrep_attempt()
85 return -EAGAIN; in xrep_attempt()
86 case -EDEADLOCK: in xrep_attempt()
88 if (!(sc->flags & XCHK_TRY_HARDER)) { in xrep_attempt()
89 sc->flags |= XCHK_TRY_HARDER; in xrep_attempt()
90 run->retries++; in xrep_attempt()
91 return -EAGAIN; in xrep_attempt()
101 * EAGAIN tells the caller to re-scrub, so we cannot return in xrep_attempt()
104 ASSERT(error != -EAGAIN); in xrep_attempt()
113 * administrator isn't running xfs_scrub in no-repairs mode.
127 * Repair probe -- userspace uses this to probe if we're willing to repair a
161 if (sc->sa.agi_bp) { in xrep_roll_ag_trans()
162 xfs_ialloc_log_agi(sc->tp, sc->sa.agi_bp, XFS_AGI_MAGICNUM); in xrep_roll_ag_trans()
163 xfs_trans_bhold(sc->tp, sc->sa.agi_bp); in xrep_roll_ag_trans()
166 if (sc->sa.agf_bp) { in xrep_roll_ag_trans()
167 xfs_alloc_log_agf(sc->tp, sc->sa.agf_bp, XFS_AGF_MAGICNUM); in xrep_roll_ag_trans()
168 xfs_trans_bhold(sc->tp, sc->sa.agf_bp); in xrep_roll_ag_trans()
177 error = xfs_trans_roll(&sc->tp); in xrep_roll_ag_trans()
182 if (sc->sa.agi_bp) in xrep_roll_ag_trans()
183 xfs_trans_bjoin(sc->tp, sc->sa.agi_bp); in xrep_roll_ag_trans()
184 if (sc->sa.agf_bp) in xrep_roll_ag_trans()
185 xfs_trans_bjoin(sc->tp, sc->sa.agf_bp); in xrep_roll_ag_trans()
195 if (!sc->ip) in xrep_roll_trans()
197 return xfs_trans_roll_inode(&sc->tp, sc->ip); in xrep_roll_trans()
216 if (sc->sa.agi_bp) { in xrep_defer_finish()
217 xfs_ialloc_log_agi(sc->tp, sc->sa.agi_bp, XFS_AGI_MAGICNUM); in xrep_defer_finish()
218 xfs_trans_bhold(sc->tp, sc->sa.agi_bp); in xrep_defer_finish()
221 if (sc->sa.agf_bp) { in xrep_defer_finish()
222 xfs_alloc_log_agf(sc->tp, sc->sa.agf_bp, XFS_AGF_MAGICNUM); in xrep_defer_finish()
223 xfs_trans_bhold(sc->tp, sc->sa.agf_bp); in xrep_defer_finish()
233 error = xfs_defer_finish(&sc->tp); in xrep_defer_finish()
242 if (sc->sa.agi_bp) in xrep_defer_finish()
243 xfs_trans_bhold_release(sc->tp, sc->sa.agi_bp); in xrep_defer_finish()
244 if (sc->sa.agf_bp) in xrep_defer_finish()
245 xfs_trans_bhold_release(sc->tp, sc->sa.agf_bp); in xrep_defer_finish()
263 pag->pagf_freeblks > xfs_ag_resv_needed(pag, type) + nr_blocks; in xrep_ag_has_space()
269 * any type of per-AG btree.
275 struct xfs_mount *mp = sc->mp; in xrep_calc_ag_resblks()
276 struct xfs_scrub_metadata *sm = sc->sm; in xrep_calc_ag_resblks()
289 if (!(sm->sm_flags & XFS_SCRUB_IFLAG_REPAIR)) in xrep_calc_ag_resblks()
292 pag = xfs_perag_get(mp, sm->sm_agno); in xrep_calc_ag_resblks()
294 /* Use in-core icount if possible. */ in xrep_calc_ag_resblks()
295 icount = pag->pagi_count; in xrep_calc_ag_resblks()
300 icount = pag->pagi_count; in xrep_calc_ag_resblks()
305 /* Now grab the block counters from the AGF. */ in xrep_calc_ag_resblks()
308 aglen = pag->block_count; in xrep_calc_ag_resblks()
312 struct xfs_agf *agf = bp->b_addr; in xrep_calc_ag_resblks()
314 aglen = be32_to_cpu(agf->agf_length); in xrep_calc_ag_resblks()
315 freelen = be32_to_cpu(agf->agf_freeblks); in xrep_calc_ag_resblks()
316 usedlen = aglen - freelen; in xrep_calc_ag_resblks()
320 /* If the icount is impossible, make some worst-case assumptions. */ in xrep_calc_ag_resblks()
323 icount = pag->agino_max - pag->agino_min + 1; in xrep_calc_ag_resblks()
326 /* If the block counts are impossible, make worst-case assumptions. */ in xrep_calc_ag_resblks()
328 aglen != pag->block_count || in xrep_calc_ag_resblks()
330 aglen = pag->block_count; in xrep_calc_ag_resblks()
336 trace_xrep_calc_ag_resblks(mp, sm->sm_agno, icount, aglen, in xrep_calc_ag_resblks()
360 * For non-reflink filesystems we can't have more records than in xrep_calc_ag_resblks()
362 * more than one rmap record per AG block. We don't know how in xrep_calc_ag_resblks()
364 * what we hope is an generous over-estimation. in xrep_calc_ag_resblks()
375 trace_xrep_calc_ag_resblks_btsize(mp, sm->sm_agno, bnobt_sz, in xrep_calc_ag_resblks()
382 * Reconstructing per-AG Btrees
415 args.mp = sc->mp; in xrep_fix_freelist()
416 args.tp = sc->tp; in xrep_fix_freelist()
417 args.agno = sc->sa.pag->pag_agno; in xrep_fix_freelist()
419 args.pag = sc->sa.pag; in xrep_fix_freelist()
425 * Finding per-AG Btree Roots for AGF/AGI Reconstruction
443 * read each block referenced by the rmap record. If the block is a btree
444 * block from this filesystem matching any of the magic numbers and has a
445 * level higher than what we've already seen, remember the block and the
446 * height of the tree required to have such a block. When the call completes,
447 * we return the highest block we've found for each btree description; those
458 /* See if our block is in the AGFL. */
467 return (*agbno == bno) ? -ECANCELED : 0; in xrep_findroot_agfl_walk()
470 /* Does this block match the btree information passed in? */
479 struct xfs_mount *mp = ri->sc->mp; in xrep_findroot_block()
486 daddr = XFS_AGB_TO_DADDR(mp, ri->sc->sa.pag->pag_agno, agbno); in xrep_findroot_block()
495 error = xfs_agfl_walk(mp, ri->agf, ri->agfl_bp, in xrep_findroot_block()
497 if (error == -ECANCELED) in xrep_findroot_block()
521 error = xfs_trans_read_buf(mp, ri->sc->tp, mp->m_ddev_targp, daddr, in xrep_findroot_block()
522 mp->m_bsize, 0, &bp, NULL); in xrep_findroot_block()
526 /* Ensure the block magic matches the btree type we're looking for. */ in xrep_findroot_block()
528 ASSERT(fab->buf_ops->magic[1] != 0); in xrep_findroot_block()
529 if (btblock->bb_magic != fab->buf_ops->magic[1]) in xrep_findroot_block()
534 * this btree type, we know this block doesn't match the btree and we in xrep_findroot_block()
538 * the block for us, so we can move on to checking if this is a root in xrep_findroot_block()
539 * block candidate. in xrep_findroot_block()
544 * if it's a root block candidate. If there is no match, bail out. in xrep_findroot_block()
546 if (bp->b_ops) { in xrep_findroot_block()
547 if (bp->b_ops != fab->buf_ops) in xrep_findroot_block()
551 if (!uuid_equal(&btblock->bb_u.s.bb_uuid, in xrep_findroot_block()
552 &mp->m_sb.sb_meta_uuid)) in xrep_findroot_block()
559 bp->b_ops = fab->buf_ops; in xrep_findroot_block()
560 fab->buf_ops->verify_read(bp); in xrep_findroot_block()
561 if (bp->b_error) { in xrep_findroot_block()
562 bp->b_ops = NULL; in xrep_findroot_block()
563 bp->b_error = 0; in xrep_findroot_block()
574 * This block passes the magic/uuid and verifier tests for this btree in xrep_findroot_block()
580 * Compare this btree block's level to the height of the current in xrep_findroot_block()
581 * candidate root block. in xrep_findroot_block()
587 * ignore this block. in xrep_findroot_block()
590 if (block_level + 1 == fab->height) { in xrep_findroot_block()
591 fab->root = NULLAGBLOCK; in xrep_findroot_block()
593 } else if (block_level < fab->height) { in xrep_findroot_block()
598 * This is the highest block in the tree that we've found so far. in xrep_findroot_block()
600 * block. in xrep_findroot_block()
602 fab->height = block_level + 1; in xrep_findroot_block()
605 * If this block doesn't have sibling pointers, then it's the new root in xrep_findroot_block()
606 * block candidate. Otherwise, the root will be found farther up the in xrep_findroot_block()
609 if (btblock->bb_u.s.bb_leftsib == cpu_to_be32(NULLAGBLOCK) && in xrep_findroot_block()
610 btblock->bb_u.s.bb_rightsib == cpu_to_be32(NULLAGBLOCK)) in xrep_findroot_block()
611 fab->root = agbno; in xrep_findroot_block()
613 fab->root = NULLAGBLOCK; in xrep_findroot_block()
615 trace_xrep_findroot_block(mp, ri->sc->sa.pag->pag_agno, agbno, in xrep_findroot_block()
616 be32_to_cpu(btblock->bb_magic), fab->height - 1); in xrep_findroot_block()
618 xfs_trans_brelse(ri->sc->tp, bp); in xrep_findroot_block()
639 if (!XFS_RMAP_NON_INODE_OWNER(rec->rm_owner)) in xrep_findroot_rmap()
642 /* Otherwise scan each block + btree type. */ in xrep_findroot_rmap()
643 for (b = 0; b < rec->rm_blockcount; b++) { in xrep_findroot_rmap()
645 for (fab = ri->btree_info; fab->buf_ops; fab++) { in xrep_findroot_rmap()
646 if (rec->rm_owner != fab->rmap_owner) in xrep_findroot_rmap()
649 rec->rm_owner, rec->rm_startblock + b, in xrep_findroot_rmap()
661 /* Find the roots of the per-AG btrees described in btree_info. */
669 struct xfs_mount *mp = sc->mp; in xrep_find_ag_btree_roots()
680 ri.agf = agf_bp->b_addr; in xrep_find_ag_btree_roots()
682 for (fab = btree_info; fab->buf_ops; fab++) { in xrep_find_ag_btree_roots()
683 ASSERT(agfl_bp || fab->rmap_owner != XFS_RMAP_OWN_AG); in xrep_find_ag_btree_roots()
684 ASSERT(XFS_RMAP_NON_INODE_OWNER(fab->rmap_owner)); in xrep_find_ag_btree_roots()
685 fab->root = NULLAGBLOCK; in xrep_find_ag_btree_roots()
686 fab->height = 0; in xrep_find_ag_btree_roots()
689 cur = xfs_rmapbt_init_cursor(mp, sc->tp, agf_bp, sc->sa.pag); in xrep_find_ag_btree_roots()
704 struct xfs_mount *mp = sc->mp; in xrep_update_qflags()
707 mutex_lock(&mp->m_quotainfo->qi_quotaofflock); in xrep_update_qflags()
708 if ((mp->m_qflags & clear_flags) == 0 && in xrep_update_qflags()
709 (mp->m_qflags & set_flags) == set_flags) in xrep_update_qflags()
712 mp->m_qflags &= ~clear_flags; in xrep_update_qflags()
713 mp->m_qflags |= set_flags; in xrep_update_qflags()
715 spin_lock(&mp->m_sb_lock); in xrep_update_qflags()
716 mp->m_sb.sb_qflags &= ~clear_flags; in xrep_update_qflags()
717 mp->m_sb.sb_qflags |= set_flags; in xrep_update_qflags()
718 spin_unlock(&mp->m_sb_lock); in xrep_update_qflags()
726 bp = xfs_trans_getsb(sc->tp); in xrep_update_qflags()
727 xfs_sb_to_disk(bp->b_addr, &mp->m_sb); in xrep_update_qflags()
728 xfs_trans_buf_set_type(sc->tp, bp, XFS_BLFT_SB_BUF); in xrep_update_qflags()
729 xfs_trans_log_buf(sc->tp, bp, 0, sizeof(struct xfs_dsb) - 1); in xrep_update_qflags()
732 mutex_unlock(&mp->m_quotainfo->qi_quotaofflock); in xrep_update_qflags()
744 if (!(flag & sc->mp->m_qflags)) in xrep_force_quotacheck()
754 * We cannot allow the dquot code to allocate an on-disk dquot block here
755 * because we're already in transaction context. The on-disk dquot should
766 ASSERT(sc->tp != NULL); in xrep_ino_dqattach()
767 ASSERT(sc->ip != NULL); in xrep_ino_dqattach()
769 error = xfs_qm_dqattach(sc->ip); in xrep_ino_dqattach()
771 case -EFSBADCRC: in xrep_ino_dqattach()
772 case -EFSCORRUPTED: in xrep_ino_dqattach()
773 case -ENOENT: in xrep_ino_dqattach()
774 xfs_err_ratelimited(sc->mp, in xrep_ino_dqattach()
776 (unsigned long long)sc->ip->i_ino, error); in xrep_ino_dqattach()
777 if (XFS_IS_UQUOTA_ON(sc->mp) && !sc->ip->i_udquot) in xrep_ino_dqattach()
779 if (XFS_IS_GQUOTA_ON(sc->mp) && !sc->ip->i_gdquot) in xrep_ino_dqattach()
781 if (XFS_IS_PQUOTA_ON(sc->mp) && !sc->ip->i_pdquot) in xrep_ino_dqattach()
784 case -ESRCH: in xrep_ino_dqattach()
809 inode_has_nrext64 = xfs_inode_has_large_extent_counts(sc->ip); in xrep_ino_ensure_extent_count()
814 return -EFSCORRUPTED; in xrep_ino_ensure_extent_count()
815 if (!xfs_has_large_extent_counts(sc->mp)) in xrep_ino_ensure_extent_count()
816 return -EFSCORRUPTED; in xrep_ino_ensure_extent_count()
820 return -EFSCORRUPTED; in xrep_ino_ensure_extent_count()
822 sc->ip->i_diflags2 |= XFS_DIFLAG2_NREXT64; in xrep_ino_ensure_extent_count()
823 xfs_trans_log_inode(sc->tp, sc->ip, XFS_ILOG_CORE); in xrep_ino_ensure_extent_count()
836 struct xfs_mount *mp = sc->mp; in xrep_ag_btcur_init()
838 /* Set up a bnobt cursor for cross-referencing. */ in xrep_ag_btcur_init()
839 if (sc->sm->sm_type != XFS_SCRUB_TYPE_BNOBT && in xrep_ag_btcur_init()
840 sc->sm->sm_type != XFS_SCRUB_TYPE_CNTBT) { in xrep_ag_btcur_init()
841 sa->bno_cur = xfs_bnobt_init_cursor(mp, sc->tp, sa->agf_bp, in xrep_ag_btcur_init()
842 sc->sa.pag); in xrep_ag_btcur_init()
843 sa->cnt_cur = xfs_cntbt_init_cursor(mp, sc->tp, sa->agf_bp, in xrep_ag_btcur_init()
844 sc->sa.pag); in xrep_ag_btcur_init()
847 /* Set up a inobt cursor for cross-referencing. */ in xrep_ag_btcur_init()
848 if (sc->sm->sm_type != XFS_SCRUB_TYPE_INOBT && in xrep_ag_btcur_init()
849 sc->sm->sm_type != XFS_SCRUB_TYPE_FINOBT) { in xrep_ag_btcur_init()
850 sa->ino_cur = xfs_inobt_init_cursor(sc->sa.pag, sc->tp, in xrep_ag_btcur_init()
851 sa->agi_bp); in xrep_ag_btcur_init()
853 sa->fino_cur = xfs_finobt_init_cursor(sc->sa.pag, in xrep_ag_btcur_init()
854 sc->tp, sa->agi_bp); in xrep_ag_btcur_init()
857 /* Set up a rmapbt cursor for cross-referencing. */ in xrep_ag_btcur_init()
858 if (sc->sm->sm_type != XFS_SCRUB_TYPE_RMAPBT && in xrep_ag_btcur_init()
860 sa->rmap_cur = xfs_rmapbt_init_cursor(mp, sc->tp, sa->agf_bp, in xrep_ag_btcur_init()
861 sc->sa.pag); in xrep_ag_btcur_init()
863 /* Set up a refcountbt cursor for cross-referencing. */ in xrep_ag_btcur_init()
864 if (sc->sm->sm_type != XFS_SCRUB_TYPE_REFCNTBT && in xrep_ag_btcur_init()
866 sa->refc_cur = xfs_refcountbt_init_cursor(mp, sc->tp, in xrep_ag_btcur_init()
867 sa->agf_bp, sc->sa.pag); in xrep_ag_btcur_init()
871 * Reinitialize the in-core AG state after a repair by rereading the AGF
879 struct xfs_perag *pag = sc->sa.pag; in xrep_reinit_pagf()
886 clear_bit(XFS_AGSTATE_AGF_INIT, &pag->pag_opstate); in xrep_reinit_pagf()
887 error = xfs_alloc_read_agf(pag, sc->tp, 0, &bp); in xrep_reinit_pagf()
891 if (bp != sc->sa.agf_bp) { in xrep_reinit_pagf()
892 ASSERT(bp == sc->sa.agf_bp); in xrep_reinit_pagf()
893 return -EFSCORRUPTED; in xrep_reinit_pagf()
900 * Reinitialize the in-core AG state after a repair by rereading the AGI
908 struct xfs_perag *pag = sc->sa.pag; in xrep_reinit_pagi()
915 clear_bit(XFS_AGSTATE_AGI_INIT, &pag->pag_opstate); in xrep_reinit_pagi()
916 error = xfs_ialloc_read_agi(pag, sc->tp, 0, &bp); in xrep_reinit_pagi()
920 if (bp != sc->sa.agi_bp) { in xrep_reinit_pagi()
921 ASSERT(bp == sc->sa.agi_bp); in xrep_reinit_pagi()
922 return -EFSCORRUPTED; in xrep_reinit_pagi()
930 * This should only be called to scan an AG while repairing file-based metadata.
940 ASSERT(!sa->pag); in xrep_ag_init()
942 error = xfs_ialloc_read_agi(pag, sc->tp, 0, &sa->agi_bp); in xrep_ag_init()
946 error = xfs_alloc_read_agf(pag, sc->tp, 0, &sa->agf_bp); in xrep_ag_init()
951 sa->pag = xfs_perag_hold(pag); in xrep_ag_init()
956 /* Reinitialize the per-AG block reservation for the AG we just fixed. */
963 if (!(sc->flags & XREP_RESET_PERAG_RESV)) in xrep_reset_perag_resv()
966 ASSERT(sc->sa.pag != NULL); in xrep_reset_perag_resv()
967 ASSERT(sc->ops->type == ST_PERAG); in xrep_reset_perag_resv()
968 ASSERT(sc->tp); in xrep_reset_perag_resv()
970 sc->flags &= ~XREP_RESET_PERAG_RESV; in xrep_reset_perag_resv()
971 xfs_ag_resv_free(sc->sa.pag); in xrep_reset_perag_resv()
972 error = xfs_ag_resv_init(sc->sa.pag, sc->tp); in xrep_reset_perag_resv()
973 if (error == -ENOSPC) { in xrep_reset_perag_resv()
974 xfs_err(sc->mp, in xrep_reset_perag_resv()
975 "Insufficient free space to reset per-AG reservation for AG %u after repair.", in xrep_reset_perag_resv()
976 sc->sa.pag->pag_agno); in xrep_reset_perag_resv()
989 if (sc->sm->sm_flags & XFS_SCRUB_IFLAG_FORCE_REBUILD) in xrep_will_attempt()
993 if (XFS_TEST_ERROR(false, sc->mp, XFS_ERRTAG_FORCE_SCRUB_REPAIR)) in xrep_will_attempt()
996 /* Metadata is corrupt or failed cross-referencing. */ in xrep_will_attempt()
997 if (xchk_needs_repair(sc->sm)) in xrep_will_attempt()
1009 struct xfs_scrub_subord *sub; in xrep_metadata_inode_subtype() local
1018 sub = xchk_scrub_create_subord(sc, scrub_type); in xrep_metadata_inode_subtype()
1019 error = sub->sc.ops->scrub(&sub->sc); in xrep_metadata_inode_subtype()
1022 if (!xrep_will_attempt(&sub->sc)) in xrep_metadata_inode_subtype()
1029 error = sub->sc.ops->repair(&sub->sc); in xrep_metadata_inode_subtype()
1038 error = xfs_defer_finish(&sub->sc.tp); in xrep_metadata_inode_subtype()
1041 error = xfs_trans_roll(&sub->sc.tp); in xrep_metadata_inode_subtype()
1046 * Clear the corruption flags and re-check the metadata that we just in xrep_metadata_inode_subtype()
1049 sub->sc.sm->sm_flags &= ~XFS_SCRUB_FLAGS_OUT; in xrep_metadata_inode_subtype()
1050 error = sub->sc.ops->scrub(&sub->sc); in xrep_metadata_inode_subtype()
1055 if (xchk_needs_repair(sub->sc.sm)) { in xrep_metadata_inode_subtype()
1056 error = -EFSCORRUPTED; in xrep_metadata_inode_subtype()
1060 xchk_scrub_free_subord(sub); in xrep_metadata_inode_subtype()
1066 * sc->ip points to the metadata inode and the ILOCK is held on that inode.
1087 if (xfs_inode_hasattr(sc->ip)) { in xrep_metadata_inode_forks()
1094 if (xfs_is_reflink_inode(sc->ip)) { in xrep_metadata_inode_forks()
1096 xfs_trans_ijoin(sc->tp, sc->ip, 0); in xrep_metadata_inode_forks()
1097 error = xfs_reflink_clear_inode_flag(sc->ip, &sc->tp); in xrep_metadata_inode_forks()
1103 if (xfs_inode_hasattr(sc->ip)) { in xrep_metadata_inode_forks()
1106 xfs_trans_ijoin(sc->tp, sc->ip, 0); in xrep_metadata_inode_forks()
1118 error = xfs_trans_roll(&sc->tp); in xrep_metadata_inode_forks()
1128 * Set up an in-memory buffer cache so that we can use the xfbtree. Allocating
1138 ASSERT(sc->tp == NULL); in xrep_setup_xfbtree()
1140 return xmbuf_alloc(sc->mp, descr, &sc->xmbtp); in xrep_setup_xfbtree()
1156 *cookiep = current->journal_info; in xrep_trans_alloc_hook_dummy()
1157 current->journal_info = NULL; in xrep_trans_alloc_hook_dummy()
1163 current->journal_info = *cookiep; in xrep_trans_alloc_hook_dummy()
1175 current->journal_info = *cookiep; in xrep_trans_cancel_hook_dummy()
1180 * See if this buffer can pass the given ->verify_struct() function.
1192 const struct xfs_buf_ops *old_ops = bp->b_ops; in xrep_buf_verify_struct()
1201 old_error = bp->b_error; in xrep_buf_verify_struct()
1202 bp->b_ops = ops; in xrep_buf_verify_struct()
1203 fa = bp->b_ops->verify_struct(bp); in xrep_buf_verify_struct()
1204 bp->b_ops = old_ops; in xrep_buf_verify_struct()
1205 bp->b_error = old_error; in xrep_buf_verify_struct()