xref: /linux/fs/xfs/scrub/reap.c (revision b477ff98d903618a1ab8247861f2ea6e70c0f0f8)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright (C) 2022-2023 Oracle.  All Rights Reserved.
4  * Author: Darrick J. Wong <djwong@kernel.org>
5  */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_trans_resv.h"
11 #include "xfs_mount.h"
12 #include "xfs_btree.h"
13 #include "xfs_log_format.h"
14 #include "xfs_trans.h"
15 #include "xfs_sb.h"
16 #include "xfs_inode.h"
17 #include "xfs_alloc.h"
18 #include "xfs_alloc_btree.h"
19 #include "xfs_ialloc.h"
20 #include "xfs_ialloc_btree.h"
21 #include "xfs_rmap.h"
22 #include "xfs_rmap_btree.h"
23 #include "xfs_refcount.h"
24 #include "xfs_refcount_btree.h"
25 #include "xfs_extent_busy.h"
26 #include "xfs_ag.h"
27 #include "xfs_ag_resv.h"
28 #include "xfs_quota.h"
29 #include "xfs_qm.h"
30 #include "xfs_bmap.h"
31 #include "xfs_da_format.h"
32 #include "xfs_da_btree.h"
33 #include "xfs_attr.h"
34 #include "xfs_attr_remote.h"
35 #include "xfs_defer.h"
36 #include "xfs_metafile.h"
37 #include "xfs_rtgroup.h"
38 #include "xfs_rtrmap_btree.h"
39 #include "scrub/scrub.h"
40 #include "scrub/common.h"
41 #include "scrub/trace.h"
42 #include "scrub/repair.h"
43 #include "scrub/bitmap.h"
44 #include "scrub/agb_bitmap.h"
45 #include "scrub/fsb_bitmap.h"
46 #include "scrub/rtb_bitmap.h"
47 #include "scrub/reap.h"
48 
49 /*
50  * Disposal of Blocks from Old Metadata
51  *
52  * Now that we've constructed a new btree to replace the damaged one, we want
53  * to dispose of the blocks that (we think) the old btree was using.
54  * Previously, we used the rmapbt to collect the extents (bitmap) with the
55  * rmap owner corresponding to the tree we rebuilt, collected extents for any
56  * blocks with the same rmap owner that are owned by another data structure
57  * (sublist), and subtracted sublist from bitmap.  In theory the extents
58  * remaining in bitmap are the old btree's blocks.
59  *
60  * Unfortunately, it's possible that the btree was crosslinked with other
61  * blocks on disk.  The rmap data can tell us if there are multiple owners, so
62  * if the rmapbt says there is an owner of this block other than @oinfo, then
63  * the block is crosslinked.  Remove the reverse mapping and continue.
64  *
65  * If there is one rmap record, we can free the block, which removes the
66  * reverse mapping but doesn't add the block to the free space.  Our repair
67  * strategy is to hope the other metadata objects crosslinked on this block
68  * will be rebuilt (atop different blocks), thereby removing all the cross
69  * links.
70  *
71  * If there are no rmap records at all, we also free the block.  If the btree
72  * being rebuilt lives in the free space (bnobt/cntbt/rmapbt) then there isn't
73  * supposed to be a rmap record and everything is ok.  For other btrees there
74  * had to have been an rmap entry for the block to have ended up on @bitmap,
75  * so if it's gone now there's something wrong and the fs will shut down.
76  *
77  * Note: If there are multiple rmap records with only the same rmap owner as
78  * the btree we're trying to rebuild and the block is indeed owned by another
79  * data structure with the same rmap owner, then the block will be in sublist
80  * and therefore doesn't need disposal.  If there are multiple rmap records
81  * with only the same rmap owner but the block is not owned by something with
82  * the same rmap owner, the block will be freed.
83  *
84  * The caller is responsible for locking the AG headers/inode for the entire
85  * rebuild operation so that nothing else can sneak in and change the incore
86  * state while we're not looking.  We must also invalidate any buffers
87  * associated with @bitmap.
88  */
89 
90 /* Information about reaping extents after a repair. */
91 struct xreap_state {
92 	struct xfs_scrub		*sc;
93 
94 	/* Reverse mapping owner and metadata reservation type. */
95 	const struct xfs_owner_info	*oinfo;
96 	enum xfs_ag_resv_type		resv;
97 
98 	/* If true, roll the transaction before reaping the next extent. */
99 	bool				force_roll;
100 
101 	/* Number of deferred reaps attached to the current transaction. */
102 	unsigned int			deferred;
103 
104 	/* Number of invalidated buffers logged to the current transaction. */
105 	unsigned int			invalidated;
106 
107 	/* Number of deferred reaps queued during the whole reap sequence. */
108 	unsigned long long		total_deferred;
109 };
110 
111 /* Put a block back on the AGFL. */
112 STATIC int
xreap_put_freelist(struct xfs_scrub * sc,xfs_agblock_t agbno)113 xreap_put_freelist(
114 	struct xfs_scrub	*sc,
115 	xfs_agblock_t		agbno)
116 {
117 	struct xfs_buf		*agfl_bp;
118 	int			error;
119 
120 	/* Make sure there's space on the freelist. */
121 	error = xrep_fix_freelist(sc, 0);
122 	if (error)
123 		return error;
124 
125 	/*
126 	 * Since we're "freeing" a lost block onto the AGFL, we have to
127 	 * create an rmap for the block prior to merging it or else other
128 	 * parts will break.
129 	 */
130 	error = xfs_rmap_alloc(sc->tp, sc->sa.agf_bp, sc->sa.pag, agbno, 1,
131 			&XFS_RMAP_OINFO_AG);
132 	if (error)
133 		return error;
134 
135 	/* Put the block on the AGFL. */
136 	error = xfs_alloc_read_agfl(sc->sa.pag, sc->tp, &agfl_bp);
137 	if (error)
138 		return error;
139 
140 	error = xfs_alloc_put_freelist(sc->sa.pag, sc->tp, sc->sa.agf_bp,
141 			agfl_bp, agbno, 0);
142 	if (error)
143 		return error;
144 	xfs_extent_busy_insert(sc->tp, pag_group(sc->sa.pag), agbno, 1,
145 			XFS_EXTENT_BUSY_SKIP_DISCARD);
146 
147 	return 0;
148 }
149 
150 /* Are there any uncommitted reap operations? */
xreap_dirty(const struct xreap_state * rs)151 static inline bool xreap_dirty(const struct xreap_state *rs)
152 {
153 	if (rs->force_roll)
154 		return true;
155 	if (rs->deferred)
156 		return true;
157 	if (rs->invalidated)
158 		return true;
159 	if (rs->total_deferred)
160 		return true;
161 	return false;
162 }
163 
164 #define XREAP_MAX_BINVAL	(2048)
165 
166 /*
167  * Decide if we want to roll the transaction after reaping an extent.  We don't
168  * want to overrun the transaction reservation, so we prohibit more than
169  * 128 EFIs per transaction.  For the same reason, we limit the number
170  * of buffer invalidations to 2048.
171  */
xreap_want_roll(const struct xreap_state * rs)172 static inline bool xreap_want_roll(const struct xreap_state *rs)
173 {
174 	if (rs->force_roll)
175 		return true;
176 	if (rs->deferred > XREP_MAX_ITRUNCATE_EFIS)
177 		return true;
178 	if (rs->invalidated > XREAP_MAX_BINVAL)
179 		return true;
180 	return false;
181 }
182 
xreap_reset(struct xreap_state * rs)183 static inline void xreap_reset(struct xreap_state *rs)
184 {
185 	rs->total_deferred += rs->deferred;
186 	rs->deferred = 0;
187 	rs->invalidated = 0;
188 	rs->force_roll = false;
189 }
190 
191 #define XREAP_MAX_DEFER_CHAIN		(2048)
192 
193 /*
194  * Decide if we want to finish the deferred ops that are attached to the scrub
195  * transaction.  We don't want to queue huge chains of deferred ops because
196  * that can consume a lot of log space and kernel memory.  Hence we trigger a
197  * xfs_defer_finish if there are more than 2048 deferred reap operations or the
198  * caller did some real work.
199  */
200 static inline bool
xreap_want_defer_finish(const struct xreap_state * rs)201 xreap_want_defer_finish(const struct xreap_state *rs)
202 {
203 	if (rs->force_roll)
204 		return true;
205 	if (rs->total_deferred > XREAP_MAX_DEFER_CHAIN)
206 		return true;
207 	return false;
208 }
209 
xreap_defer_finish_reset(struct xreap_state * rs)210 static inline void xreap_defer_finish_reset(struct xreap_state *rs)
211 {
212 	rs->total_deferred = 0;
213 	rs->deferred = 0;
214 	rs->invalidated = 0;
215 	rs->force_roll = false;
216 }
217 
218 /*
219  * Compute the maximum length of a buffer cache scan (in units of sectors),
220  * given a quantity of fs blocks.
221  */
222 xfs_daddr_t
xrep_bufscan_max_sectors(struct xfs_mount * mp,xfs_extlen_t fsblocks)223 xrep_bufscan_max_sectors(
224 	struct xfs_mount	*mp,
225 	xfs_extlen_t		fsblocks)
226 {
227 	int			max_fsbs;
228 
229 	/* Remote xattr values are the largest buffers that we support. */
230 	max_fsbs = xfs_attr3_max_rmt_blocks(mp);
231 
232 	return XFS_FSB_TO_BB(mp, min_t(xfs_extlen_t, fsblocks, max_fsbs));
233 }
234 
235 /*
236  * Return an incore buffer from a sector scan, or NULL if there are no buffers
237  * left to return.
238  */
239 struct xfs_buf *
xrep_bufscan_advance(struct xfs_mount * mp,struct xrep_bufscan * scan)240 xrep_bufscan_advance(
241 	struct xfs_mount	*mp,
242 	struct xrep_bufscan	*scan)
243 {
244 	scan->__sector_count += scan->daddr_step;
245 	while (scan->__sector_count <= scan->max_sectors) {
246 		struct xfs_buf	*bp = NULL;
247 		int		error;
248 
249 		error = xfs_buf_incore(mp->m_ddev_targp, scan->daddr,
250 				scan->__sector_count, XBF_LIVESCAN, &bp);
251 		if (!error)
252 			return bp;
253 
254 		scan->__sector_count += scan->daddr_step;
255 	}
256 
257 	return NULL;
258 }
259 
260 /* Try to invalidate the incore buffers for an extent that we're freeing. */
261 STATIC void
xreap_agextent_binval(struct xreap_state * rs,xfs_agblock_t agbno,xfs_extlen_t * aglenp)262 xreap_agextent_binval(
263 	struct xreap_state	*rs,
264 	xfs_agblock_t		agbno,
265 	xfs_extlen_t		*aglenp)
266 {
267 	struct xfs_scrub	*sc = rs->sc;
268 	struct xfs_perag	*pag = sc->sa.pag;
269 	struct xfs_mount	*mp = sc->mp;
270 	xfs_agblock_t		agbno_next = agbno + *aglenp;
271 	xfs_agblock_t		bno = agbno;
272 
273 	/*
274 	 * Avoid invalidating AG headers and post-EOFS blocks because we never
275 	 * own those.
276 	 */
277 	if (!xfs_verify_agbno(pag, agbno) ||
278 	    !xfs_verify_agbno(pag, agbno_next - 1))
279 		return;
280 
281 	/*
282 	 * If there are incore buffers for these blocks, invalidate them.  We
283 	 * assume that the lack of any other known owners means that the buffer
284 	 * can be locked without risk of deadlocking.  The buffer cache cannot
285 	 * detect aliasing, so employ nested loops to scan for incore buffers
286 	 * of any plausible size.
287 	 */
288 	while (bno < agbno_next) {
289 		struct xrep_bufscan	scan = {
290 			.daddr		= xfs_agbno_to_daddr(pag, bno),
291 			.max_sectors	= xrep_bufscan_max_sectors(mp,
292 							agbno_next - bno),
293 			.daddr_step	= XFS_FSB_TO_BB(mp, 1),
294 		};
295 		struct xfs_buf	*bp;
296 
297 		while ((bp = xrep_bufscan_advance(mp, &scan)) != NULL) {
298 			xfs_trans_bjoin(sc->tp, bp);
299 			xfs_trans_binval(sc->tp, bp);
300 			rs->invalidated++;
301 
302 			/*
303 			 * Stop invalidating if we've hit the limit; we should
304 			 * still have enough reservation left to free however
305 			 * far we've gotten.
306 			 */
307 			if (rs->invalidated > XREAP_MAX_BINVAL) {
308 				*aglenp -= agbno_next - bno;
309 				goto out;
310 			}
311 		}
312 
313 		bno++;
314 	}
315 
316 out:
317 	trace_xreap_agextent_binval(pag_group(sc->sa.pag), agbno, *aglenp);
318 }
319 
320 /*
321  * Figure out the longest run of blocks that we can dispose of with a single
322  * call.  Cross-linked blocks should have their reverse mappings removed, but
323  * single-owner extents can be freed.  AGFL blocks can only be put back one at
324  * a time.
325  */
326 STATIC int
xreap_agextent_select(struct xreap_state * rs,xfs_agblock_t agbno,xfs_agblock_t agbno_next,bool * crosslinked,xfs_extlen_t * aglenp)327 xreap_agextent_select(
328 	struct xreap_state	*rs,
329 	xfs_agblock_t		agbno,
330 	xfs_agblock_t		agbno_next,
331 	bool			*crosslinked,
332 	xfs_extlen_t		*aglenp)
333 {
334 	struct xfs_scrub	*sc = rs->sc;
335 	struct xfs_btree_cur	*cur;
336 	xfs_agblock_t		bno = agbno + 1;
337 	xfs_extlen_t		len = 1;
338 	int			error;
339 
340 	/*
341 	 * Determine if there are any other rmap records covering the first
342 	 * block of this extent.  If so, the block is crosslinked.
343 	 */
344 	cur = xfs_rmapbt_init_cursor(sc->mp, sc->tp, sc->sa.agf_bp,
345 			sc->sa.pag);
346 	error = xfs_rmap_has_other_keys(cur, agbno, 1, rs->oinfo,
347 			crosslinked);
348 	if (error)
349 		goto out_cur;
350 
351 	/* AGFL blocks can only be deal with one at a time. */
352 	if (rs->resv == XFS_AG_RESV_AGFL)
353 		goto out_found;
354 
355 	/*
356 	 * Figure out how many of the subsequent blocks have the same crosslink
357 	 * status.
358 	 */
359 	while (bno < agbno_next) {
360 		bool		also_crosslinked;
361 
362 		error = xfs_rmap_has_other_keys(cur, bno, 1, rs->oinfo,
363 				&also_crosslinked);
364 		if (error)
365 			goto out_cur;
366 
367 		if (*crosslinked != also_crosslinked)
368 			break;
369 
370 		len++;
371 		bno++;
372 	}
373 
374 out_found:
375 	*aglenp = len;
376 	trace_xreap_agextent_select(pag_group(sc->sa.pag), agbno, len,
377 			*crosslinked);
378 out_cur:
379 	xfs_btree_del_cursor(cur, error);
380 	return error;
381 }
382 
383 /*
384  * Dispose of as much of the beginning of this AG extent as possible.  The
385  * number of blocks disposed of will be returned in @aglenp.
386  */
387 STATIC int
xreap_agextent_iter(struct xreap_state * rs,xfs_agblock_t agbno,xfs_extlen_t * aglenp,bool crosslinked)388 xreap_agextent_iter(
389 	struct xreap_state	*rs,
390 	xfs_agblock_t		agbno,
391 	xfs_extlen_t		*aglenp,
392 	bool			crosslinked)
393 {
394 	struct xfs_scrub	*sc = rs->sc;
395 	xfs_fsblock_t		fsbno;
396 	int			error = 0;
397 
398 	ASSERT(rs->resv != XFS_AG_RESV_METAFILE);
399 
400 	fsbno = xfs_agbno_to_fsb(sc->sa.pag, agbno);
401 
402 	/*
403 	 * If there are other rmappings, this block is cross linked and must
404 	 * not be freed.  Remove the reverse mapping and move on.  Otherwise,
405 	 * we were the only owner of the block, so free the extent, which will
406 	 * also remove the rmap.
407 	 *
408 	 * XXX: XFS doesn't support detecting the case where a single block
409 	 * metadata structure is crosslinked with a multi-block structure
410 	 * because the buffer cache doesn't detect aliasing problems, so we
411 	 * can't fix 100% of crosslinking problems (yet).  The verifiers will
412 	 * blow on writeout, the filesystem will shut down, and the admin gets
413 	 * to run xfs_repair.
414 	 */
415 	if (crosslinked) {
416 		trace_xreap_dispose_unmap_extent(pag_group(sc->sa.pag), agbno,
417 				*aglenp);
418 
419 		rs->force_roll = true;
420 
421 		if (rs->oinfo == &XFS_RMAP_OINFO_COW) {
422 			/*
423 			 * If we're unmapping CoW staging extents, remove the
424 			 * records from the refcountbt, which will remove the
425 			 * rmap record as well.
426 			 */
427 			xfs_refcount_free_cow_extent(sc->tp, false, fsbno,
428 					*aglenp);
429 			return 0;
430 		}
431 
432 		return xfs_rmap_free(sc->tp, sc->sa.agf_bp, sc->sa.pag, agbno,
433 				*aglenp, rs->oinfo);
434 	}
435 
436 	trace_xreap_dispose_free_extent(pag_group(sc->sa.pag), agbno, *aglenp);
437 
438 	/*
439 	 * Invalidate as many buffers as we can, starting at agbno.  If this
440 	 * function sets *aglenp to zero, the transaction is full of logged
441 	 * buffer invalidations, so we need to return early so that we can
442 	 * roll and retry.
443 	 */
444 	xreap_agextent_binval(rs, agbno, aglenp);
445 	if (*aglenp == 0) {
446 		ASSERT(xreap_want_roll(rs));
447 		return 0;
448 	}
449 
450 	/*
451 	 * If we're getting rid of CoW staging extents, use deferred work items
452 	 * to remove the refcountbt records (which removes the rmap records)
453 	 * and free the extent.  We're not worried about the system going down
454 	 * here because log recovery walks the refcount btree to clean out the
455 	 * CoW staging extents.
456 	 */
457 	if (rs->oinfo == &XFS_RMAP_OINFO_COW) {
458 		ASSERT(rs->resv == XFS_AG_RESV_NONE);
459 
460 		xfs_refcount_free_cow_extent(sc->tp, false, fsbno, *aglenp);
461 		error = xfs_free_extent_later(sc->tp, fsbno, *aglenp, NULL,
462 				rs->resv, XFS_FREE_EXTENT_SKIP_DISCARD);
463 		if (error)
464 			return error;
465 
466 		rs->force_roll = true;
467 		return 0;
468 	}
469 
470 	/* Put blocks back on the AGFL one at a time. */
471 	if (rs->resv == XFS_AG_RESV_AGFL) {
472 		ASSERT(*aglenp == 1);
473 		error = xreap_put_freelist(sc, agbno);
474 		if (error)
475 			return error;
476 
477 		rs->force_roll = true;
478 		return 0;
479 	}
480 
481 	/*
482 	 * Use deferred frees to get rid of the old btree blocks to try to
483 	 * minimize the window in which we could crash and lose the old blocks.
484 	 * Add a defer ops barrier every other extent to avoid stressing the
485 	 * system with large EFIs.
486 	 */
487 	error = xfs_free_extent_later(sc->tp, fsbno, *aglenp, rs->oinfo,
488 			rs->resv, XFS_FREE_EXTENT_SKIP_DISCARD);
489 	if (error)
490 		return error;
491 
492 	rs->deferred++;
493 	if (rs->deferred % 2 == 0)
494 		xfs_defer_add_barrier(sc->tp);
495 	return 0;
496 }
497 
498 /*
499  * Break an AG metadata extent into sub-extents by fate (crosslinked, not
500  * crosslinked), and dispose of each sub-extent separately.
501  */
502 STATIC int
xreap_agmeta_extent(uint32_t agbno,uint32_t len,void * priv)503 xreap_agmeta_extent(
504 	uint32_t		agbno,
505 	uint32_t		len,
506 	void			*priv)
507 {
508 	struct xreap_state	*rs = priv;
509 	struct xfs_scrub	*sc = rs->sc;
510 	xfs_agblock_t		agbno_next = agbno + len;
511 	int			error = 0;
512 
513 	ASSERT(len <= XFS_MAX_BMBT_EXTLEN);
514 	ASSERT(sc->ip == NULL);
515 
516 	while (agbno < agbno_next) {
517 		xfs_extlen_t	aglen;
518 		bool		crosslinked;
519 
520 		error = xreap_agextent_select(rs, agbno, agbno_next,
521 				&crosslinked, &aglen);
522 		if (error)
523 			return error;
524 
525 		error = xreap_agextent_iter(rs, agbno, &aglen, crosslinked);
526 		if (error)
527 			return error;
528 
529 		if (xreap_want_defer_finish(rs)) {
530 			error = xrep_defer_finish(sc);
531 			if (error)
532 				return error;
533 			xreap_defer_finish_reset(rs);
534 		} else if (xreap_want_roll(rs)) {
535 			error = xrep_roll_ag_trans(sc);
536 			if (error)
537 				return error;
538 			xreap_reset(rs);
539 		}
540 
541 		agbno += aglen;
542 	}
543 
544 	return 0;
545 }
546 
547 /* Dispose of every block of every AG metadata extent in the bitmap. */
548 int
xrep_reap_agblocks(struct xfs_scrub * sc,struct xagb_bitmap * bitmap,const struct xfs_owner_info * oinfo,enum xfs_ag_resv_type type)549 xrep_reap_agblocks(
550 	struct xfs_scrub		*sc,
551 	struct xagb_bitmap		*bitmap,
552 	const struct xfs_owner_info	*oinfo,
553 	enum xfs_ag_resv_type		type)
554 {
555 	struct xreap_state		rs = {
556 		.sc			= sc,
557 		.oinfo			= oinfo,
558 		.resv			= type,
559 	};
560 	int				error;
561 
562 	ASSERT(xfs_has_rmapbt(sc->mp));
563 	ASSERT(sc->ip == NULL);
564 
565 	error = xagb_bitmap_walk(bitmap, xreap_agmeta_extent, &rs);
566 	if (error)
567 		return error;
568 
569 	if (xreap_dirty(&rs))
570 		return xrep_defer_finish(sc);
571 
572 	return 0;
573 }
574 
575 /*
576  * Break a file metadata extent into sub-extents by fate (crosslinked, not
577  * crosslinked), and dispose of each sub-extent separately.  The extent must
578  * not cross an AG boundary.
579  */
580 STATIC int
xreap_fsmeta_extent(uint64_t fsbno,uint64_t len,void * priv)581 xreap_fsmeta_extent(
582 	uint64_t		fsbno,
583 	uint64_t		len,
584 	void			*priv)
585 {
586 	struct xreap_state	*rs = priv;
587 	struct xfs_scrub	*sc = rs->sc;
588 	xfs_agnumber_t		agno = XFS_FSB_TO_AGNO(sc->mp, fsbno);
589 	xfs_agblock_t		agbno = XFS_FSB_TO_AGBNO(sc->mp, fsbno);
590 	xfs_agblock_t		agbno_next = agbno + len;
591 	int			error = 0;
592 
593 	ASSERT(len <= XFS_MAX_BMBT_EXTLEN);
594 	ASSERT(sc->ip != NULL);
595 	ASSERT(!sc->sa.pag);
596 
597 	/*
598 	 * We're reaping blocks after repairing file metadata, which means that
599 	 * we have to init the xchk_ag structure ourselves.
600 	 */
601 	sc->sa.pag = xfs_perag_get(sc->mp, agno);
602 	if (!sc->sa.pag)
603 		return -EFSCORRUPTED;
604 
605 	error = xfs_alloc_read_agf(sc->sa.pag, sc->tp, 0, &sc->sa.agf_bp);
606 	if (error)
607 		goto out_pag;
608 
609 	while (agbno < agbno_next) {
610 		xfs_extlen_t	aglen;
611 		bool		crosslinked;
612 
613 		error = xreap_agextent_select(rs, agbno, agbno_next,
614 				&crosslinked, &aglen);
615 		if (error)
616 			goto out_agf;
617 
618 		error = xreap_agextent_iter(rs, agbno, &aglen, crosslinked);
619 		if (error)
620 			goto out_agf;
621 
622 		if (xreap_want_defer_finish(rs)) {
623 			/*
624 			 * Holds the AGF buffer across the deferred chain
625 			 * processing.
626 			 */
627 			error = xrep_defer_finish(sc);
628 			if (error)
629 				goto out_agf;
630 			xreap_defer_finish_reset(rs);
631 		} else if (xreap_want_roll(rs)) {
632 			/*
633 			 * Hold the AGF buffer across the transaction roll so
634 			 * that we don't have to reattach it to the scrub
635 			 * context.
636 			 */
637 			xfs_trans_bhold(sc->tp, sc->sa.agf_bp);
638 			error = xfs_trans_roll_inode(&sc->tp, sc->ip);
639 			xfs_trans_bjoin(sc->tp, sc->sa.agf_bp);
640 			if (error)
641 				goto out_agf;
642 			xreap_reset(rs);
643 		}
644 
645 		agbno += aglen;
646 	}
647 
648 out_agf:
649 	xfs_trans_brelse(sc->tp, sc->sa.agf_bp);
650 	sc->sa.agf_bp = NULL;
651 out_pag:
652 	xfs_perag_put(sc->sa.pag);
653 	sc->sa.pag = NULL;
654 	return error;
655 }
656 
657 /*
658  * Dispose of every block of every fs metadata extent in the bitmap.
659  * Do not use this to dispose of the mappings in an ondisk inode fork.
660  */
661 int
xrep_reap_fsblocks(struct xfs_scrub * sc,struct xfsb_bitmap * bitmap,const struct xfs_owner_info * oinfo)662 xrep_reap_fsblocks(
663 	struct xfs_scrub		*sc,
664 	struct xfsb_bitmap		*bitmap,
665 	const struct xfs_owner_info	*oinfo)
666 {
667 	struct xreap_state		rs = {
668 		.sc			= sc,
669 		.oinfo			= oinfo,
670 		.resv			= XFS_AG_RESV_NONE,
671 	};
672 	int				error;
673 
674 	ASSERT(xfs_has_rmapbt(sc->mp));
675 	ASSERT(sc->ip != NULL);
676 
677 	error = xfsb_bitmap_walk(bitmap, xreap_fsmeta_extent, &rs);
678 	if (error)
679 		return error;
680 
681 	if (xreap_dirty(&rs))
682 		return xrep_defer_finish(sc);
683 
684 	return 0;
685 }
686 
687 #ifdef CONFIG_XFS_RT
688 /*
689  * Figure out the longest run of blocks that we can dispose of with a single
690  * call.  Cross-linked blocks should have their reverse mappings removed, but
691  * single-owner extents can be freed.  Units are rt blocks, not rt extents.
692  */
693 STATIC int
xreap_rgextent_select(struct xreap_state * rs,xfs_rgblock_t rgbno,xfs_rgblock_t rgbno_next,bool * crosslinked,xfs_extlen_t * rglenp)694 xreap_rgextent_select(
695 	struct xreap_state	*rs,
696 	xfs_rgblock_t		rgbno,
697 	xfs_rgblock_t		rgbno_next,
698 	bool			*crosslinked,
699 	xfs_extlen_t		*rglenp)
700 {
701 	struct xfs_scrub	*sc = rs->sc;
702 	struct xfs_btree_cur	*cur;
703 	xfs_rgblock_t		bno = rgbno + 1;
704 	xfs_extlen_t		len = 1;
705 	int			error;
706 
707 	/*
708 	 * Determine if there are any other rmap records covering the first
709 	 * block of this extent.  If so, the block is crosslinked.
710 	 */
711 	cur = xfs_rtrmapbt_init_cursor(sc->tp, sc->sr.rtg);
712 	error = xfs_rmap_has_other_keys(cur, rgbno, 1, rs->oinfo,
713 			crosslinked);
714 	if (error)
715 		goto out_cur;
716 
717 	/*
718 	 * Figure out how many of the subsequent blocks have the same crosslink
719 	 * status.
720 	 */
721 	while (bno < rgbno_next) {
722 		bool		also_crosslinked;
723 
724 		error = xfs_rmap_has_other_keys(cur, bno, 1, rs->oinfo,
725 				&also_crosslinked);
726 		if (error)
727 			goto out_cur;
728 
729 		if (*crosslinked != also_crosslinked)
730 			break;
731 
732 		len++;
733 		bno++;
734 	}
735 
736 	*rglenp = len;
737 	trace_xreap_agextent_select(rtg_group(sc->sr.rtg), rgbno, len,
738 			*crosslinked);
739 out_cur:
740 	xfs_btree_del_cursor(cur, error);
741 	return error;
742 }
743 
744 /*
745  * Dispose of as much of the beginning of this rtgroup extent as possible.
746  * The number of blocks disposed of will be returned in @rglenp.
747  */
748 STATIC int
xreap_rgextent_iter(struct xreap_state * rs,xfs_rgblock_t rgbno,xfs_extlen_t * rglenp,bool crosslinked)749 xreap_rgextent_iter(
750 	struct xreap_state	*rs,
751 	xfs_rgblock_t		rgbno,
752 	xfs_extlen_t		*rglenp,
753 	bool			crosslinked)
754 {
755 	struct xfs_scrub	*sc = rs->sc;
756 	xfs_rtblock_t		rtbno;
757 	int			error;
758 
759 	/*
760 	 * The only caller so far is CoW fork repair, so we only know how to
761 	 * unlink or free CoW staging extents.  Here we don't have to worry
762 	 * about invalidating buffers!
763 	 */
764 	if (rs->oinfo != &XFS_RMAP_OINFO_COW) {
765 		ASSERT(rs->oinfo == &XFS_RMAP_OINFO_COW);
766 		return -EFSCORRUPTED;
767 	}
768 	ASSERT(rs->resv == XFS_AG_RESV_NONE);
769 
770 	rtbno = xfs_rgbno_to_rtb(sc->sr.rtg, rgbno);
771 
772 	/*
773 	 * If there are other rmappings, this block is cross linked and must
774 	 * not be freed.  Remove the forward and reverse mapping and move on.
775 	 */
776 	if (crosslinked) {
777 		trace_xreap_dispose_unmap_extent(rtg_group(sc->sr.rtg), rgbno,
778 				*rglenp);
779 
780 		xfs_refcount_free_cow_extent(sc->tp, true, rtbno, *rglenp);
781 		rs->deferred++;
782 		return 0;
783 	}
784 
785 	trace_xreap_dispose_free_extent(rtg_group(sc->sr.rtg), rgbno, *rglenp);
786 
787 	/*
788 	 * The CoW staging extent is not crosslinked.  Use deferred work items
789 	 * to remove the refcountbt records (which removes the rmap records)
790 	 * and free the extent.  We're not worried about the system going down
791 	 * here because log recovery walks the refcount btree to clean out the
792 	 * CoW staging extents.
793 	 */
794 	xfs_refcount_free_cow_extent(sc->tp, true, rtbno, *rglenp);
795 	error = xfs_free_extent_later(sc->tp, rtbno, *rglenp, NULL,
796 			rs->resv,
797 			XFS_FREE_EXTENT_REALTIME |
798 			XFS_FREE_EXTENT_SKIP_DISCARD);
799 	if (error)
800 		return error;
801 
802 	rs->deferred++;
803 	return 0;
804 }
805 
806 #define XREAP_RTGLOCK_ALL	(XFS_RTGLOCK_BITMAP | \
807 				 XFS_RTGLOCK_RMAP | \
808 				 XFS_RTGLOCK_REFCOUNT)
809 
810 /*
811  * Break a rt file metadata extent into sub-extents by fate (crosslinked, not
812  * crosslinked), and dispose of each sub-extent separately.  The extent must
813  * be aligned to a realtime extent.
814  */
815 STATIC int
xreap_rtmeta_extent(uint64_t rtbno,uint64_t len,void * priv)816 xreap_rtmeta_extent(
817 	uint64_t		rtbno,
818 	uint64_t		len,
819 	void			*priv)
820 {
821 	struct xreap_state	*rs = priv;
822 	struct xfs_scrub	*sc = rs->sc;
823 	xfs_rgblock_t		rgbno = xfs_rtb_to_rgbno(sc->mp, rtbno);
824 	xfs_rgblock_t		rgbno_next = rgbno + len;
825 	int			error = 0;
826 
827 	ASSERT(sc->ip != NULL);
828 	ASSERT(!sc->sr.rtg);
829 
830 	/*
831 	 * We're reaping blocks after repairing file metadata, which means that
832 	 * we have to init the xchk_ag structure ourselves.
833 	 */
834 	sc->sr.rtg = xfs_rtgroup_get(sc->mp, xfs_rtb_to_rgno(sc->mp, rtbno));
835 	if (!sc->sr.rtg)
836 		return -EFSCORRUPTED;
837 
838 	xfs_rtgroup_lock(sc->sr.rtg, XREAP_RTGLOCK_ALL);
839 
840 	while (rgbno < rgbno_next) {
841 		xfs_extlen_t	rglen;
842 		bool		crosslinked;
843 
844 		error = xreap_rgextent_select(rs, rgbno, rgbno_next,
845 				&crosslinked, &rglen);
846 		if (error)
847 			goto out_unlock;
848 
849 		error = xreap_rgextent_iter(rs, rgbno, &rglen, crosslinked);
850 		if (error)
851 			goto out_unlock;
852 
853 		if (xreap_want_defer_finish(rs)) {
854 			error = xfs_defer_finish(&sc->tp);
855 			if (error)
856 				goto out_unlock;
857 			xreap_defer_finish_reset(rs);
858 		} else if (xreap_want_roll(rs)) {
859 			error = xfs_trans_roll_inode(&sc->tp, sc->ip);
860 			if (error)
861 				goto out_unlock;
862 			xreap_reset(rs);
863 		}
864 
865 		rgbno += rglen;
866 	}
867 
868 out_unlock:
869 	xfs_rtgroup_unlock(sc->sr.rtg, XREAP_RTGLOCK_ALL);
870 	xfs_rtgroup_put(sc->sr.rtg);
871 	sc->sr.rtg = NULL;
872 	return error;
873 }
874 
875 /*
876  * Dispose of every block of every rt metadata extent in the bitmap.
877  * Do not use this to dispose of the mappings in an ondisk inode fork.
878  */
879 int
xrep_reap_rtblocks(struct xfs_scrub * sc,struct xrtb_bitmap * bitmap,const struct xfs_owner_info * oinfo)880 xrep_reap_rtblocks(
881 	struct xfs_scrub		*sc,
882 	struct xrtb_bitmap		*bitmap,
883 	const struct xfs_owner_info	*oinfo)
884 {
885 	struct xreap_state		rs = {
886 		.sc			= sc,
887 		.oinfo			= oinfo,
888 		.resv			= XFS_AG_RESV_NONE,
889 	};
890 	int				error;
891 
892 	ASSERT(xfs_has_rmapbt(sc->mp));
893 	ASSERT(sc->ip != NULL);
894 
895 	error = xrtb_bitmap_walk(bitmap, xreap_rtmeta_extent, &rs);
896 	if (error)
897 		return error;
898 
899 	if (xreap_dirty(&rs))
900 		return xrep_defer_finish(sc);
901 
902 	return 0;
903 }
904 #endif /* CONFIG_XFS_RT */
905 
906 /*
907  * Dispose of every block of an old metadata btree that used to be rooted in a
908  * metadata directory file.
909  */
910 int
xrep_reap_metadir_fsblocks(struct xfs_scrub * sc,struct xfsb_bitmap * bitmap)911 xrep_reap_metadir_fsblocks(
912 	struct xfs_scrub		*sc,
913 	struct xfsb_bitmap		*bitmap)
914 {
915 	/*
916 	 * Reap old metadir btree blocks with XFS_AG_RESV_NONE because the old
917 	 * blocks are no longer mapped by the inode, and inode metadata space
918 	 * reservations can only account freed space to the i_nblocks.
919 	 */
920 	struct xfs_owner_info		oinfo;
921 	struct xreap_state		rs = {
922 		.sc			= sc,
923 		.oinfo			= &oinfo,
924 		.resv			= XFS_AG_RESV_NONE,
925 	};
926 	int				error;
927 
928 	ASSERT(xfs_has_rmapbt(sc->mp));
929 	ASSERT(sc->ip != NULL);
930 	ASSERT(xfs_is_metadir_inode(sc->ip));
931 
932 	xfs_rmap_ino_bmbt_owner(&oinfo, sc->ip->i_ino, XFS_DATA_FORK);
933 
934 	error = xfsb_bitmap_walk(bitmap, xreap_fsmeta_extent, &rs);
935 	if (error)
936 		return error;
937 
938 	if (xreap_dirty(&rs))
939 		return xrep_defer_finish(sc);
940 
941 	return 0;
942 }
943 
944 /*
945  * Metadata files are not supposed to share blocks with anything else.
946  * If blocks are shared, we remove the reverse mapping (thus reducing the
947  * crosslink factor); if blocks are not shared, we also need to free them.
948  *
949  * This first step determines the longest subset of the passed-in imap
950  * (starting at its beginning) that is either crosslinked or not crosslinked.
951  * The blockcount will be adjust down as needed.
952  */
953 STATIC int
xreap_bmapi_select(struct xfs_scrub * sc,struct xfs_inode * ip,int whichfork,struct xfs_bmbt_irec * imap,bool * crosslinked)954 xreap_bmapi_select(
955 	struct xfs_scrub	*sc,
956 	struct xfs_inode	*ip,
957 	int			whichfork,
958 	struct xfs_bmbt_irec	*imap,
959 	bool			*crosslinked)
960 {
961 	struct xfs_owner_info	oinfo;
962 	struct xfs_btree_cur	*cur;
963 	xfs_filblks_t		len = 1;
964 	xfs_agblock_t		bno;
965 	xfs_agblock_t		agbno;
966 	xfs_agblock_t		agbno_next;
967 	int			error;
968 
969 	agbno = XFS_FSB_TO_AGBNO(sc->mp, imap->br_startblock);
970 	agbno_next = agbno + imap->br_blockcount;
971 
972 	cur = xfs_rmapbt_init_cursor(sc->mp, sc->tp, sc->sa.agf_bp,
973 			sc->sa.pag);
974 
975 	xfs_rmap_ino_owner(&oinfo, ip->i_ino, whichfork, imap->br_startoff);
976 	error = xfs_rmap_has_other_keys(cur, agbno, 1, &oinfo, crosslinked);
977 	if (error)
978 		goto out_cur;
979 
980 	bno = agbno + 1;
981 	while (bno < agbno_next) {
982 		bool		also_crosslinked;
983 
984 		oinfo.oi_offset++;
985 		error = xfs_rmap_has_other_keys(cur, bno, 1, &oinfo,
986 				&also_crosslinked);
987 		if (error)
988 			goto out_cur;
989 
990 		if (also_crosslinked != *crosslinked)
991 			break;
992 
993 		len++;
994 		bno++;
995 	}
996 
997 	imap->br_blockcount = len;
998 	trace_xreap_bmapi_select(pag_group(sc->sa.pag), agbno, len,
999 			*crosslinked);
1000 out_cur:
1001 	xfs_btree_del_cursor(cur, error);
1002 	return error;
1003 }
1004 
1005 /*
1006  * Decide if this buffer can be joined to a transaction.  This is true for most
1007  * buffers, but there are two cases that we want to catch: large remote xattr
1008  * value buffers are not logged and can overflow the buffer log item dirty
1009  * bitmap size; and oversized cached buffers if things have really gone
1010  * haywire.
1011  */
1012 static inline bool
xreap_buf_loggable(const struct xfs_buf * bp)1013 xreap_buf_loggable(
1014 	const struct xfs_buf	*bp)
1015 {
1016 	int			i;
1017 
1018 	for (i = 0; i < bp->b_map_count; i++) {
1019 		int		chunks;
1020 		int		map_size;
1021 
1022 		chunks = DIV_ROUND_UP(BBTOB(bp->b_maps[i].bm_len),
1023 				XFS_BLF_CHUNK);
1024 		map_size = DIV_ROUND_UP(chunks, NBWORD);
1025 		if (map_size > XFS_BLF_DATAMAP_SIZE)
1026 			return false;
1027 	}
1028 
1029 	return true;
1030 }
1031 
1032 /*
1033  * Invalidate any buffers for this file mapping.  The @imap blockcount may be
1034  * adjusted downward if we need to roll the transaction.
1035  */
1036 STATIC int
xreap_bmapi_binval(struct xfs_scrub * sc,struct xfs_inode * ip,int whichfork,struct xfs_bmbt_irec * imap)1037 xreap_bmapi_binval(
1038 	struct xfs_scrub	*sc,
1039 	struct xfs_inode	*ip,
1040 	int			whichfork,
1041 	struct xfs_bmbt_irec	*imap)
1042 {
1043 	struct xfs_mount	*mp = sc->mp;
1044 	struct xfs_perag	*pag = sc->sa.pag;
1045 	int			bmap_flags = xfs_bmapi_aflag(whichfork);
1046 	xfs_fileoff_t		off;
1047 	xfs_fileoff_t		max_off;
1048 	xfs_extlen_t		scan_blocks;
1049 	xfs_agblock_t		bno;
1050 	xfs_agblock_t		agbno;
1051 	xfs_agblock_t		agbno_next;
1052 	unsigned int		invalidated = 0;
1053 	int			error;
1054 
1055 	/*
1056 	 * Avoid invalidating AG headers and post-EOFS blocks because we never
1057 	 * own those.
1058 	 */
1059 	agbno = bno = XFS_FSB_TO_AGBNO(sc->mp, imap->br_startblock);
1060 	agbno_next = agbno + imap->br_blockcount;
1061 	if (!xfs_verify_agbno(pag, agbno) ||
1062 	    !xfs_verify_agbno(pag, agbno_next - 1))
1063 		return 0;
1064 
1065 	/*
1066 	 * Buffers for file blocks can span multiple contiguous mappings.  This
1067 	 * means that for each block in the mapping, there could exist an
1068 	 * xfs_buf indexed by that block with any length up to the maximum
1069 	 * buffer size (remote xattr values) or to the next hole in the fork.
1070 	 * To set up our binval scan, first we need to figure out the location
1071 	 * of the next hole.
1072 	 */
1073 	off = imap->br_startoff + imap->br_blockcount;
1074 	max_off = off + xfs_attr3_max_rmt_blocks(mp);
1075 	while (off < max_off) {
1076 		struct xfs_bmbt_irec	hmap;
1077 		int			nhmaps = 1;
1078 
1079 		error = xfs_bmapi_read(ip, off, max_off - off, &hmap,
1080 				&nhmaps, bmap_flags);
1081 		if (error)
1082 			return error;
1083 		if (nhmaps != 1 || hmap.br_startblock == DELAYSTARTBLOCK) {
1084 			ASSERT(0);
1085 			return -EFSCORRUPTED;
1086 		}
1087 
1088 		if (!xfs_bmap_is_real_extent(&hmap))
1089 			break;
1090 
1091 		off = hmap.br_startoff + hmap.br_blockcount;
1092 	}
1093 	scan_blocks = off - imap->br_startoff;
1094 
1095 	trace_xreap_bmapi_binval_scan(sc, imap, scan_blocks);
1096 
1097 	/*
1098 	 * If there are incore buffers for these blocks, invalidate them.  If
1099 	 * we can't (try)lock the buffer we assume it's owned by someone else
1100 	 * and leave it alone.  The buffer cache cannot detect aliasing, so
1101 	 * employ nested loops to detect incore buffers of any plausible size.
1102 	 */
1103 	while (bno < agbno_next) {
1104 		struct xrep_bufscan	scan = {
1105 			.daddr		= xfs_agbno_to_daddr(pag, bno),
1106 			.max_sectors	= xrep_bufscan_max_sectors(mp,
1107 								scan_blocks),
1108 			.daddr_step	= XFS_FSB_TO_BB(mp, 1),
1109 		};
1110 		struct xfs_buf		*bp;
1111 
1112 		while ((bp = xrep_bufscan_advance(mp, &scan)) != NULL) {
1113 			if (xreap_buf_loggable(bp)) {
1114 				xfs_trans_bjoin(sc->tp, bp);
1115 				xfs_trans_binval(sc->tp, bp);
1116 			} else {
1117 				xfs_buf_stale(bp);
1118 				xfs_buf_relse(bp);
1119 			}
1120 			invalidated++;
1121 
1122 			/*
1123 			 * Stop invalidating if we've hit the limit; we should
1124 			 * still have enough reservation left to free however
1125 			 * much of the mapping we've seen so far.
1126 			 */
1127 			if (invalidated > XREAP_MAX_BINVAL) {
1128 				imap->br_blockcount = agbno_next - bno;
1129 				goto out;
1130 			}
1131 		}
1132 
1133 		bno++;
1134 		scan_blocks--;
1135 	}
1136 
1137 out:
1138 	trace_xreap_bmapi_binval(pag_group(sc->sa.pag), agbno,
1139 			imap->br_blockcount);
1140 	return 0;
1141 }
1142 
1143 /*
1144  * Dispose of as much of the beginning of this file fork mapping as possible.
1145  * The number of blocks disposed of is returned in @imap->br_blockcount.
1146  */
1147 STATIC int
xrep_reap_bmapi_iter(struct xfs_scrub * sc,struct xfs_inode * ip,int whichfork,struct xfs_bmbt_irec * imap,bool crosslinked)1148 xrep_reap_bmapi_iter(
1149 	struct xfs_scrub		*sc,
1150 	struct xfs_inode		*ip,
1151 	int				whichfork,
1152 	struct xfs_bmbt_irec		*imap,
1153 	bool				crosslinked)
1154 {
1155 	int				error;
1156 
1157 	if (crosslinked) {
1158 		/*
1159 		 * If there are other rmappings, this block is cross linked and
1160 		 * must not be freed.  Remove the reverse mapping, leave the
1161 		 * buffer cache in its possibly confused state, and move on.
1162 		 * We don't want to risk discarding valid data buffers from
1163 		 * anybody else who thinks they own the block, even though that
1164 		 * runs the risk of stale buffer warnings in the future.
1165 		 */
1166 		trace_xreap_dispose_unmap_extent(pag_group(sc->sa.pag),
1167 				XFS_FSB_TO_AGBNO(sc->mp, imap->br_startblock),
1168 				imap->br_blockcount);
1169 
1170 		/*
1171 		 * Schedule removal of the mapping from the fork.  We use
1172 		 * deferred log intents in this function to control the exact
1173 		 * sequence of metadata updates.
1174 		 */
1175 		xfs_bmap_unmap_extent(sc->tp, ip, whichfork, imap);
1176 		xfs_trans_mod_dquot_byino(sc->tp, ip, XFS_TRANS_DQ_BCOUNT,
1177 				-(int64_t)imap->br_blockcount);
1178 		xfs_rmap_unmap_extent(sc->tp, ip, whichfork, imap);
1179 		return 0;
1180 	}
1181 
1182 	/*
1183 	 * If the block is not crosslinked, we can invalidate all the incore
1184 	 * buffers for the extent, and then free the extent.  This is a bit of
1185 	 * a mess since we don't detect discontiguous buffers that are indexed
1186 	 * by a block starting before the first block of the extent but overlap
1187 	 * anyway.
1188 	 */
1189 	trace_xreap_dispose_free_extent(pag_group(sc->sa.pag),
1190 			XFS_FSB_TO_AGBNO(sc->mp, imap->br_startblock),
1191 			imap->br_blockcount);
1192 
1193 	/*
1194 	 * Invalidate as many buffers as we can, starting at the beginning of
1195 	 * this mapping.  If this function sets blockcount to zero, the
1196 	 * transaction is full of logged buffer invalidations, so we need to
1197 	 * return early so that we can roll and retry.
1198 	 */
1199 	error = xreap_bmapi_binval(sc, ip, whichfork, imap);
1200 	if (error || imap->br_blockcount == 0)
1201 		return error;
1202 
1203 	/*
1204 	 * Schedule removal of the mapping from the fork.  We use deferred log
1205 	 * intents in this function to control the exact sequence of metadata
1206 	 * updates.
1207 	 */
1208 	xfs_bmap_unmap_extent(sc->tp, ip, whichfork, imap);
1209 	xfs_trans_mod_dquot_byino(sc->tp, ip, XFS_TRANS_DQ_BCOUNT,
1210 			-(int64_t)imap->br_blockcount);
1211 	return xfs_free_extent_later(sc->tp, imap->br_startblock,
1212 			imap->br_blockcount, NULL, XFS_AG_RESV_NONE,
1213 			XFS_FREE_EXTENT_SKIP_DISCARD);
1214 }
1215 
1216 /*
1217  * Dispose of as much of this file extent as we can.  Upon successful return,
1218  * the imap will reflect the mapping that was removed from the fork.
1219  */
1220 STATIC int
xreap_ifork_extent(struct xfs_scrub * sc,struct xfs_inode * ip,int whichfork,struct xfs_bmbt_irec * imap)1221 xreap_ifork_extent(
1222 	struct xfs_scrub		*sc,
1223 	struct xfs_inode		*ip,
1224 	int				whichfork,
1225 	struct xfs_bmbt_irec		*imap)
1226 {
1227 	xfs_agnumber_t			agno;
1228 	bool				crosslinked;
1229 	int				error;
1230 
1231 	ASSERT(sc->sa.pag == NULL);
1232 
1233 	trace_xreap_ifork_extent(sc, ip, whichfork, imap);
1234 
1235 	agno = XFS_FSB_TO_AGNO(sc->mp, imap->br_startblock);
1236 	sc->sa.pag = xfs_perag_get(sc->mp, agno);
1237 	if (!sc->sa.pag)
1238 		return -EFSCORRUPTED;
1239 
1240 	error = xfs_alloc_read_agf(sc->sa.pag, sc->tp, 0, &sc->sa.agf_bp);
1241 	if (error)
1242 		goto out_pag;
1243 
1244 	/*
1245 	 * Decide the fate of the blocks at the beginning of the mapping, then
1246 	 * update the mapping to use it with the unmap calls.
1247 	 */
1248 	error = xreap_bmapi_select(sc, ip, whichfork, imap, &crosslinked);
1249 	if (error)
1250 		goto out_agf;
1251 
1252 	error = xrep_reap_bmapi_iter(sc, ip, whichfork, imap, crosslinked);
1253 	if (error)
1254 		goto out_agf;
1255 
1256 out_agf:
1257 	xfs_trans_brelse(sc->tp, sc->sa.agf_bp);
1258 	sc->sa.agf_bp = NULL;
1259 out_pag:
1260 	xfs_perag_put(sc->sa.pag);
1261 	sc->sa.pag = NULL;
1262 	return error;
1263 }
1264 
1265 /*
1266  * Dispose of each block mapped to the given fork of the given file.  Callers
1267  * must hold ILOCK_EXCL, and ip can only be sc->ip or sc->tempip.  The fork
1268  * must not have any delalloc reservations.
1269  */
1270 int
xrep_reap_ifork(struct xfs_scrub * sc,struct xfs_inode * ip,int whichfork)1271 xrep_reap_ifork(
1272 	struct xfs_scrub	*sc,
1273 	struct xfs_inode	*ip,
1274 	int			whichfork)
1275 {
1276 	xfs_fileoff_t		off = 0;
1277 	int			bmap_flags = xfs_bmapi_aflag(whichfork);
1278 	int			error;
1279 
1280 	ASSERT(xfs_has_rmapbt(sc->mp));
1281 	ASSERT(ip == sc->ip || ip == sc->tempip);
1282 	ASSERT(whichfork == XFS_ATTR_FORK || !XFS_IS_REALTIME_INODE(ip));
1283 
1284 	while (off < XFS_MAX_FILEOFF) {
1285 		struct xfs_bmbt_irec	imap;
1286 		int			nimaps = 1;
1287 
1288 		/* Read the next extent, skip past holes and delalloc. */
1289 		error = xfs_bmapi_read(ip, off, XFS_MAX_FILEOFF - off, &imap,
1290 				&nimaps, bmap_flags);
1291 		if (error)
1292 			return error;
1293 		if (nimaps != 1 || imap.br_startblock == DELAYSTARTBLOCK) {
1294 			ASSERT(0);
1295 			return -EFSCORRUPTED;
1296 		}
1297 
1298 		/*
1299 		 * If this is a real space mapping, reap as much of it as we
1300 		 * can in a single transaction.
1301 		 */
1302 		if (xfs_bmap_is_real_extent(&imap)) {
1303 			error = xreap_ifork_extent(sc, ip, whichfork, &imap);
1304 			if (error)
1305 				return error;
1306 
1307 			error = xfs_defer_finish(&sc->tp);
1308 			if (error)
1309 				return error;
1310 		}
1311 
1312 		off = imap.br_startoff + imap.br_blockcount;
1313 	}
1314 
1315 	return 0;
1316 }
1317