xref: /linux/fs/xfs/xfs_bmap_util.c (revision 75c8c50fa16a23f8ac89ea74834ae8ddd1558d75)
10b61f8a4SDave Chinner // SPDX-License-Identifier: GPL-2.0
268988114SDave Chinner /*
368988114SDave Chinner  * Copyright (c) 2000-2006 Silicon Graphics, Inc.
4c24b5dfaSDave Chinner  * Copyright (c) 2012 Red Hat, Inc.
568988114SDave Chinner  * All Rights Reserved.
668988114SDave Chinner  */
768988114SDave Chinner #include "xfs.h"
868988114SDave Chinner #include "xfs_fs.h"
970a9883cSDave Chinner #include "xfs_shared.h"
10239880efSDave Chinner #include "xfs_format.h"
11239880efSDave Chinner #include "xfs_log_format.h"
12239880efSDave Chinner #include "xfs_trans_resv.h"
1368988114SDave Chinner #include "xfs_bit.h"
1468988114SDave Chinner #include "xfs_mount.h"
153ab78df2SDarrick J. Wong #include "xfs_defer.h"
1668988114SDave Chinner #include "xfs_inode.h"
1768988114SDave Chinner #include "xfs_btree.h"
18239880efSDave Chinner #include "xfs_trans.h"
1968988114SDave Chinner #include "xfs_alloc.h"
2068988114SDave Chinner #include "xfs_bmap.h"
2168988114SDave Chinner #include "xfs_bmap_util.h"
22a4fbe6abSDave Chinner #include "xfs_bmap_btree.h"
2368988114SDave Chinner #include "xfs_rtalloc.h"
2468988114SDave Chinner #include "xfs_error.h"
2568988114SDave Chinner #include "xfs_quota.h"
2668988114SDave Chinner #include "xfs_trans_space.h"
2768988114SDave Chinner #include "xfs_trace.h"
28c24b5dfaSDave Chinner #include "xfs_icache.h"
29f86f4037SDarrick J. Wong #include "xfs_iomap.h"
30f86f4037SDarrick J. Wong #include "xfs_reflink.h"
3168988114SDave Chinner 
3268988114SDave Chinner /* Kernel only BMAP related definitions and functions */
3368988114SDave Chinner 
3468988114SDave Chinner /*
3568988114SDave Chinner  * Convert the given file system block to a disk block.  We have to treat it
3668988114SDave Chinner  * differently based on whether the file is a real time file or not, because the
3768988114SDave Chinner  * bmap code does.
3868988114SDave Chinner  */
3968988114SDave Chinner xfs_daddr_t
4068988114SDave Chinner xfs_fsb_to_db(struct xfs_inode *ip, xfs_fsblock_t fsb)
4168988114SDave Chinner {
42ecfc28a4SChristoph Hellwig 	if (XFS_IS_REALTIME_INODE(ip))
43ecfc28a4SChristoph Hellwig 		return XFS_FSB_TO_BB(ip->i_mount, fsb);
44ecfc28a4SChristoph Hellwig 	return XFS_FSB_TO_DADDR(ip->i_mount, fsb);
4568988114SDave Chinner }
4668988114SDave Chinner 
4768988114SDave Chinner /*
483fbbbea3SDave Chinner  * Routine to zero an extent on disk allocated to the specific inode.
493fbbbea3SDave Chinner  *
503fbbbea3SDave Chinner  * The VFS functions take a linearised filesystem block offset, so we have to
513fbbbea3SDave Chinner  * convert the sparse xfs fsb to the right format first.
523fbbbea3SDave Chinner  * VFS types are real funky, too.
533fbbbea3SDave Chinner  */
543fbbbea3SDave Chinner int
553fbbbea3SDave Chinner xfs_zero_extent(
563fbbbea3SDave Chinner 	struct xfs_inode	*ip,
573fbbbea3SDave Chinner 	xfs_fsblock_t		start_fsb,
583fbbbea3SDave Chinner 	xfs_off_t		count_fsb)
593fbbbea3SDave Chinner {
603fbbbea3SDave Chinner 	struct xfs_mount	*mp = ip->i_mount;
6130fa529eSChristoph Hellwig 	struct xfs_buftarg	*target = xfs_inode_buftarg(ip);
623fbbbea3SDave Chinner 	xfs_daddr_t		sector = xfs_fsb_to_db(ip, start_fsb);
633fbbbea3SDave Chinner 	sector_t		block = XFS_BB_TO_FSBT(mp, sector);
643fbbbea3SDave Chinner 
6530fa529eSChristoph Hellwig 	return blkdev_issue_zeroout(target->bt_bdev,
663dc29161SMatthew Wilcox 		block << (mp->m_super->s_blocksize_bits - 9),
673dc29161SMatthew Wilcox 		count_fsb << (mp->m_super->s_blocksize_bits - 9),
68ee472d83SChristoph Hellwig 		GFP_NOFS, 0);
693fbbbea3SDave Chinner }
703fbbbea3SDave Chinner 
71bb9c2e54SDave Chinner #ifdef CONFIG_XFS_RT
7268988114SDave Chinner int
7368988114SDave Chinner xfs_bmap_rtalloc(
749d5e8492SDarrick J. Wong 	struct xfs_bmalloca	*ap)
7568988114SDave Chinner {
769d5e8492SDarrick J. Wong 	struct xfs_mount	*mp = ap->ip->i_mount;
779d5e8492SDarrick J. Wong 	xfs_fileoff_t		orig_offset = ap->offset;
789d5e8492SDarrick J. Wong 	xfs_rtblock_t		rtb;
7968988114SDave Chinner 	xfs_extlen_t		prod = 0;  /* product factor for allocators */
800703a8e1SDave Chinner 	xfs_extlen_t		mod = 0;   /* product factor for allocators */
8168988114SDave Chinner 	xfs_extlen_t		ralen = 0; /* realtime allocation length */
8268988114SDave Chinner 	xfs_extlen_t		align;     /* minimum allocation alignment */
839d5e8492SDarrick J. Wong 	xfs_extlen_t		orig_length = ap->length;
849d5e8492SDarrick J. Wong 	xfs_extlen_t		minlen = mp->m_sb.sb_rextsize;
859d5e8492SDarrick J. Wong 	xfs_extlen_t		raminlen;
869d5e8492SDarrick J. Wong 	bool			rtlocked = false;
87676a659bSDarrick J. Wong 	bool			ignore_locality = false;
889d5e8492SDarrick J. Wong 	int			error;
8968988114SDave Chinner 
9068988114SDave Chinner 	align = xfs_get_extsz_hint(ap->ip);
919d5e8492SDarrick J. Wong retry:
9268988114SDave Chinner 	prod = align / mp->m_sb.sb_rextsize;
9368988114SDave Chinner 	error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev,
9468988114SDave Chinner 					align, 1, ap->eof, 0,
9568988114SDave Chinner 					ap->conv, &ap->offset, &ap->length);
9668988114SDave Chinner 	if (error)
9768988114SDave Chinner 		return error;
9868988114SDave Chinner 	ASSERT(ap->length);
9968988114SDave Chinner 	ASSERT(ap->length % mp->m_sb.sb_rextsize == 0);
10068988114SDave Chinner 
10168988114SDave Chinner 	/*
1029d5e8492SDarrick J. Wong 	 * If we shifted the file offset downward to satisfy an extent size
1039d5e8492SDarrick J. Wong 	 * hint, increase minlen by that amount so that the allocator won't
1049d5e8492SDarrick J. Wong 	 * give us an allocation that's too short to cover at least one of the
1059d5e8492SDarrick J. Wong 	 * blocks that the caller asked for.
1069d5e8492SDarrick J. Wong 	 */
1079d5e8492SDarrick J. Wong 	if (ap->offset != orig_offset)
1089d5e8492SDarrick J. Wong 		minlen += orig_offset - ap->offset;
1099d5e8492SDarrick J. Wong 
1109d5e8492SDarrick J. Wong 	/*
11168988114SDave Chinner 	 * If the offset & length are not perfectly aligned
11268988114SDave Chinner 	 * then kill prod, it will just get us in trouble.
11368988114SDave Chinner 	 */
1140703a8e1SDave Chinner 	div_u64_rem(ap->offset, align, &mod);
1150703a8e1SDave Chinner 	if (mod || ap->length % align)
11668988114SDave Chinner 		prod = 1;
11768988114SDave Chinner 	/*
11868988114SDave Chinner 	 * Set ralen to be the actual requested length in rtextents.
11968988114SDave Chinner 	 */
12068988114SDave Chinner 	ralen = ap->length / mp->m_sb.sb_rextsize;
12168988114SDave Chinner 	/*
12268988114SDave Chinner 	 * If the old value was close enough to MAXEXTLEN that
12368988114SDave Chinner 	 * we rounded up to it, cut it back so it's valid again.
12468988114SDave Chinner 	 * Note that if it's a really large request (bigger than
12568988114SDave Chinner 	 * MAXEXTLEN), we don't hear about that number, and can't
12668988114SDave Chinner 	 * adjust the starting point to match it.
12768988114SDave Chinner 	 */
12868988114SDave Chinner 	if (ralen * mp->m_sb.sb_rextsize >= MAXEXTLEN)
12968988114SDave Chinner 		ralen = MAXEXTLEN / mp->m_sb.sb_rextsize;
13068988114SDave Chinner 
13168988114SDave Chinner 	/*
1324b680afbSDave Chinner 	 * Lock out modifications to both the RT bitmap and summary inodes
13368988114SDave Chinner 	 */
1349d5e8492SDarrick J. Wong 	if (!rtlocked) {
135f4a0660dSDarrick J. Wong 		xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL|XFS_ILOCK_RTBITMAP);
13668988114SDave Chinner 		xfs_trans_ijoin(ap->tp, mp->m_rbmip, XFS_ILOCK_EXCL);
137f4a0660dSDarrick J. Wong 		xfs_ilock(mp->m_rsumip, XFS_ILOCK_EXCL|XFS_ILOCK_RTSUM);
1384b680afbSDave Chinner 		xfs_trans_ijoin(ap->tp, mp->m_rsumip, XFS_ILOCK_EXCL);
1399d5e8492SDarrick J. Wong 		rtlocked = true;
1409d5e8492SDarrick J. Wong 	}
14168988114SDave Chinner 
14268988114SDave Chinner 	/*
14368988114SDave Chinner 	 * If it's an allocation to an empty file at offset 0,
14468988114SDave Chinner 	 * pick an extent that will space things out in the rt area.
14568988114SDave Chinner 	 */
14668988114SDave Chinner 	if (ap->eof && ap->offset == 0) {
1473f649ab7SKees Cook 		xfs_rtblock_t rtx; /* realtime extent no */
14868988114SDave Chinner 
14968988114SDave Chinner 		error = xfs_rtpick_extent(mp, ap->tp, ralen, &rtx);
15068988114SDave Chinner 		if (error)
15168988114SDave Chinner 			return error;
15268988114SDave Chinner 		ap->blkno = rtx * mp->m_sb.sb_rextsize;
15368988114SDave Chinner 	} else {
15468988114SDave Chinner 		ap->blkno = 0;
15568988114SDave Chinner 	}
15668988114SDave Chinner 
15768988114SDave Chinner 	xfs_bmap_adjacent(ap);
15868988114SDave Chinner 
15968988114SDave Chinner 	/*
16068988114SDave Chinner 	 * Realtime allocation, done through xfs_rtallocate_extent.
16168988114SDave Chinner 	 */
162676a659bSDarrick J. Wong 	if (ignore_locality)
163676a659bSDarrick J. Wong 		ap->blkno = 0;
164676a659bSDarrick J. Wong 	else
16568988114SDave Chinner 		do_div(ap->blkno, mp->m_sb.sb_rextsize);
16668988114SDave Chinner 	rtb = ap->blkno;
16768988114SDave Chinner 	ap->length = ralen;
1689d5e8492SDarrick J. Wong 	raminlen = max_t(xfs_extlen_t, 1, minlen / mp->m_sb.sb_rextsize);
1699d5e8492SDarrick J. Wong 	error = xfs_rtallocate_extent(ap->tp, ap->blkno, raminlen, ap->length,
170089ec2f8SChristoph Hellwig 			&ralen, ap->wasdel, prod, &rtb);
171089ec2f8SChristoph Hellwig 	if (error)
17268988114SDave Chinner 		return error;
173089ec2f8SChristoph Hellwig 
1749d5e8492SDarrick J. Wong 	if (rtb != NULLRTBLOCK) {
1759d5e8492SDarrick J. Wong 		ap->blkno = rtb * mp->m_sb.sb_rextsize;
1769d5e8492SDarrick J. Wong 		ap->length = ralen * mp->m_sb.sb_rextsize;
1779d5e8492SDarrick J. Wong 		ap->ip->i_nblocks += ap->length;
17868988114SDave Chinner 		xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
17968988114SDave Chinner 		if (ap->wasdel)
1809d5e8492SDarrick J. Wong 			ap->ip->i_delayed_blks -= ap->length;
18168988114SDave Chinner 		/*
18268988114SDave Chinner 		 * Adjust the disk quota also. This was reserved
18368988114SDave Chinner 		 * earlier.
18468988114SDave Chinner 		 */
18568988114SDave Chinner 		xfs_trans_mod_dquot_byino(ap->tp, ap->ip,
18668988114SDave Chinner 			ap->wasdel ? XFS_TRANS_DQ_DELRTBCOUNT :
1879d5e8492SDarrick J. Wong 					XFS_TRANS_DQ_RTBCOUNT, ap->length);
1889d5e8492SDarrick J. Wong 		return 0;
18968988114SDave Chinner 	}
1909d5e8492SDarrick J. Wong 
1919d5e8492SDarrick J. Wong 	if (align > mp->m_sb.sb_rextsize) {
1929d5e8492SDarrick J. Wong 		/*
1939d5e8492SDarrick J. Wong 		 * We previously enlarged the request length to try to satisfy
1949d5e8492SDarrick J. Wong 		 * an extent size hint.  The allocator didn't return anything,
1959d5e8492SDarrick J. Wong 		 * so reset the parameters to the original values and try again
1969d5e8492SDarrick J. Wong 		 * without alignment criteria.
1979d5e8492SDarrick J. Wong 		 */
1989d5e8492SDarrick J. Wong 		ap->offset = orig_offset;
1999d5e8492SDarrick J. Wong 		ap->length = orig_length;
2009d5e8492SDarrick J. Wong 		minlen = align = mp->m_sb.sb_rextsize;
2019d5e8492SDarrick J. Wong 		goto retry;
2029d5e8492SDarrick J. Wong 	}
2039d5e8492SDarrick J. Wong 
204676a659bSDarrick J. Wong 	if (!ignore_locality && ap->blkno != 0) {
205676a659bSDarrick J. Wong 		/*
206676a659bSDarrick J. Wong 		 * If we can't allocate near a specific rt extent, try again
207676a659bSDarrick J. Wong 		 * without locality criteria.
208676a659bSDarrick J. Wong 		 */
209676a659bSDarrick J. Wong 		ignore_locality = true;
210676a659bSDarrick J. Wong 		goto retry;
211676a659bSDarrick J. Wong 	}
212676a659bSDarrick J. Wong 
2139d5e8492SDarrick J. Wong 	ap->blkno = NULLFSBLOCK;
2149d5e8492SDarrick J. Wong 	ap->length = 0;
21568988114SDave Chinner 	return 0;
21668988114SDave Chinner }
217bb9c2e54SDave Chinner #endif /* CONFIG_XFS_RT */
21868988114SDave Chinner 
21968988114SDave Chinner /*
22068988114SDave Chinner  * Extent tree block counting routines.
22168988114SDave Chinner  */
22268988114SDave Chinner 
22368988114SDave Chinner /*
224d29cb3e4SDarrick J. Wong  * Count leaf blocks given a range of extent records.  Delayed allocation
225d29cb3e4SDarrick J. Wong  * extents are not counted towards the totals.
22668988114SDave Chinner  */
227e17a5c6fSChristoph Hellwig xfs_extnum_t
22868988114SDave Chinner xfs_bmap_count_leaves(
229d29cb3e4SDarrick J. Wong 	struct xfs_ifork	*ifp,
230e7f5d5caSDarrick J. Wong 	xfs_filblks_t		*count)
23168988114SDave Chinner {
232b2b1712aSChristoph Hellwig 	struct xfs_iext_cursor	icur;
233e17a5c6fSChristoph Hellwig 	struct xfs_bmbt_irec	got;
234b2b1712aSChristoph Hellwig 	xfs_extnum_t		numrecs = 0;
23568988114SDave Chinner 
236b2b1712aSChristoph Hellwig 	for_each_xfs_iext(ifp, &icur, &got) {
237e17a5c6fSChristoph Hellwig 		if (!isnullstartblock(got.br_startblock)) {
238e17a5c6fSChristoph Hellwig 			*count += got.br_blockcount;
239e17a5c6fSChristoph Hellwig 			numrecs++;
24068988114SDave Chinner 		}
24168988114SDave Chinner 	}
242b2b1712aSChristoph Hellwig 
243e17a5c6fSChristoph Hellwig 	return numrecs;
244d29cb3e4SDarrick J. Wong }
24568988114SDave Chinner 
24668988114SDave Chinner /*
247d29cb3e4SDarrick J. Wong  * Count fsblocks of the given fork.  Delayed allocation extents are
248d29cb3e4SDarrick J. Wong  * not counted towards the totals.
24968988114SDave Chinner  */
250e7f5d5caSDarrick J. Wong int
25168988114SDave Chinner xfs_bmap_count_blocks(
252e7f5d5caSDarrick J. Wong 	struct xfs_trans	*tp,
253e7f5d5caSDarrick J. Wong 	struct xfs_inode	*ip,
254e7f5d5caSDarrick J. Wong 	int			whichfork,
255e7f5d5caSDarrick J. Wong 	xfs_extnum_t		*nextents,
256e7f5d5caSDarrick J. Wong 	xfs_filblks_t		*count)
25768988114SDave Chinner {
258fec40e22SDarrick J. Wong 	struct xfs_mount	*mp = ip->i_mount;
259fec40e22SDarrick J. Wong 	struct xfs_ifork	*ifp = XFS_IFORK_PTR(ip, whichfork);
260fec40e22SDarrick J. Wong 	struct xfs_btree_cur	*cur;
261fec40e22SDarrick J. Wong 	xfs_extlen_t		btblocks = 0;
262e7f5d5caSDarrick J. Wong 	int			error;
26368988114SDave Chinner 
264e7f5d5caSDarrick J. Wong 	*nextents = 0;
265e7f5d5caSDarrick J. Wong 	*count = 0;
266fec40e22SDarrick J. Wong 
267e7f5d5caSDarrick J. Wong 	if (!ifp)
26868988114SDave Chinner 		return 0;
269e7f5d5caSDarrick J. Wong 
270f7e67b20SChristoph Hellwig 	switch (ifp->if_format) {
271e7f5d5caSDarrick J. Wong 	case XFS_DINODE_FMT_BTREE:
272e7f5d5caSDarrick J. Wong 		error = xfs_iread_extents(tp, ip, whichfork);
273e7f5d5caSDarrick J. Wong 		if (error)
274e7f5d5caSDarrick J. Wong 			return error;
27568988114SDave Chinner 
276fec40e22SDarrick J. Wong 		cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
277fec40e22SDarrick J. Wong 		error = xfs_btree_count_blocks(cur, &btblocks);
278fec40e22SDarrick J. Wong 		xfs_btree_del_cursor(cur, error);
279fec40e22SDarrick J. Wong 		if (error)
280fec40e22SDarrick J. Wong 			return error;
28168988114SDave Chinner 
282fec40e22SDarrick J. Wong 		/*
283fec40e22SDarrick J. Wong 		 * xfs_btree_count_blocks includes the root block contained in
284fec40e22SDarrick J. Wong 		 * the inode fork in @btblocks, so subtract one because we're
285fec40e22SDarrick J. Wong 		 * only interested in allocated disk blocks.
286fec40e22SDarrick J. Wong 		 */
287fec40e22SDarrick J. Wong 		*count += btblocks - 1;
288fec40e22SDarrick J. Wong 
28953004ee7SGustavo A. R. Silva 		fallthrough;
290fec40e22SDarrick J. Wong 	case XFS_DINODE_FMT_EXTENTS:
291fec40e22SDarrick J. Wong 		*nextents = xfs_bmap_count_leaves(ifp, count);
292fec40e22SDarrick J. Wong 		break;
293e7f5d5caSDarrick J. Wong 	}
29468988114SDave Chinner 
29568988114SDave Chinner 	return 0;
29668988114SDave Chinner }
29768988114SDave Chinner 
298abbf9e8aSChristoph Hellwig static int
299abbf9e8aSChristoph Hellwig xfs_getbmap_report_one(
300f86f4037SDarrick J. Wong 	struct xfs_inode	*ip,
301abbf9e8aSChristoph Hellwig 	struct getbmapx		*bmv,
302232b5194SChristoph Hellwig 	struct kgetbmap		*out,
303abbf9e8aSChristoph Hellwig 	int64_t			bmv_end,
304abbf9e8aSChristoph Hellwig 	struct xfs_bmbt_irec	*got)
305f86f4037SDarrick J. Wong {
306232b5194SChristoph Hellwig 	struct kgetbmap		*p = out + bmv->bmv_entries;
307d392bc81SChristoph Hellwig 	bool			shared = false;
308f86f4037SDarrick J. Wong 	int			error;
309f86f4037SDarrick J. Wong 
310d392bc81SChristoph Hellwig 	error = xfs_reflink_trim_around_shared(ip, got, &shared);
311f86f4037SDarrick J. Wong 	if (error)
312f86f4037SDarrick J. Wong 		return error;
313f86f4037SDarrick J. Wong 
314abbf9e8aSChristoph Hellwig 	if (isnullstartblock(got->br_startblock) ||
315abbf9e8aSChristoph Hellwig 	    got->br_startblock == DELAYSTARTBLOCK) {
316f86f4037SDarrick J. Wong 		/*
317abbf9e8aSChristoph Hellwig 		 * Delalloc extents that start beyond EOF can occur due to
318abbf9e8aSChristoph Hellwig 		 * speculative EOF allocation when the delalloc extent is larger
319abbf9e8aSChristoph Hellwig 		 * than the largest freespace extent at conversion time.  These
320abbf9e8aSChristoph Hellwig 		 * extents cannot be converted by data writeback, so can exist
321abbf9e8aSChristoph Hellwig 		 * here even if we are not supposed to be finding delalloc
322abbf9e8aSChristoph Hellwig 		 * extents.
323f86f4037SDarrick J. Wong 		 */
324abbf9e8aSChristoph Hellwig 		if (got->br_startoff < XFS_B_TO_FSB(ip->i_mount, XFS_ISIZE(ip)))
325abbf9e8aSChristoph Hellwig 			ASSERT((bmv->bmv_iflags & BMV_IF_DELALLOC) != 0);
326abbf9e8aSChristoph Hellwig 
327abbf9e8aSChristoph Hellwig 		p->bmv_oflags |= BMV_OF_DELALLOC;
328abbf9e8aSChristoph Hellwig 		p->bmv_block = -2;
329f86f4037SDarrick J. Wong 	} else {
330abbf9e8aSChristoph Hellwig 		p->bmv_block = xfs_fsb_to_db(ip, got->br_startblock);
331f86f4037SDarrick J. Wong 	}
332f86f4037SDarrick J. Wong 
333abbf9e8aSChristoph Hellwig 	if (got->br_state == XFS_EXT_UNWRITTEN &&
334abbf9e8aSChristoph Hellwig 	    (bmv->bmv_iflags & BMV_IF_PREALLOC))
335abbf9e8aSChristoph Hellwig 		p->bmv_oflags |= BMV_OF_PREALLOC;
336abbf9e8aSChristoph Hellwig 
337abbf9e8aSChristoph Hellwig 	if (shared)
338abbf9e8aSChristoph Hellwig 		p->bmv_oflags |= BMV_OF_SHARED;
339abbf9e8aSChristoph Hellwig 
340abbf9e8aSChristoph Hellwig 	p->bmv_offset = XFS_FSB_TO_BB(ip->i_mount, got->br_startoff);
341abbf9e8aSChristoph Hellwig 	p->bmv_length = XFS_FSB_TO_BB(ip->i_mount, got->br_blockcount);
342abbf9e8aSChristoph Hellwig 
343abbf9e8aSChristoph Hellwig 	bmv->bmv_offset = p->bmv_offset + p->bmv_length;
344abbf9e8aSChristoph Hellwig 	bmv->bmv_length = max(0LL, bmv_end - bmv->bmv_offset);
345abbf9e8aSChristoph Hellwig 	bmv->bmv_entries++;
346f86f4037SDarrick J. Wong 	return 0;
347f86f4037SDarrick J. Wong }
348f86f4037SDarrick J. Wong 
349abbf9e8aSChristoph Hellwig static void
350abbf9e8aSChristoph Hellwig xfs_getbmap_report_hole(
351abbf9e8aSChristoph Hellwig 	struct xfs_inode	*ip,
352abbf9e8aSChristoph Hellwig 	struct getbmapx		*bmv,
353232b5194SChristoph Hellwig 	struct kgetbmap		*out,
354abbf9e8aSChristoph Hellwig 	int64_t			bmv_end,
355abbf9e8aSChristoph Hellwig 	xfs_fileoff_t		bno,
356abbf9e8aSChristoph Hellwig 	xfs_fileoff_t		end)
357abbf9e8aSChristoph Hellwig {
358232b5194SChristoph Hellwig 	struct kgetbmap		*p = out + bmv->bmv_entries;
359abbf9e8aSChristoph Hellwig 
360abbf9e8aSChristoph Hellwig 	if (bmv->bmv_iflags & BMV_IF_NO_HOLES)
361abbf9e8aSChristoph Hellwig 		return;
362abbf9e8aSChristoph Hellwig 
363abbf9e8aSChristoph Hellwig 	p->bmv_block = -1;
364abbf9e8aSChristoph Hellwig 	p->bmv_offset = XFS_FSB_TO_BB(ip->i_mount, bno);
365abbf9e8aSChristoph Hellwig 	p->bmv_length = XFS_FSB_TO_BB(ip->i_mount, end - bno);
366abbf9e8aSChristoph Hellwig 
367abbf9e8aSChristoph Hellwig 	bmv->bmv_offset = p->bmv_offset + p->bmv_length;
368abbf9e8aSChristoph Hellwig 	bmv->bmv_length = max(0LL, bmv_end - bmv->bmv_offset);
369abbf9e8aSChristoph Hellwig 	bmv->bmv_entries++;
370abbf9e8aSChristoph Hellwig }
371abbf9e8aSChristoph Hellwig 
372abbf9e8aSChristoph Hellwig static inline bool
373abbf9e8aSChristoph Hellwig xfs_getbmap_full(
374abbf9e8aSChristoph Hellwig 	struct getbmapx		*bmv)
375abbf9e8aSChristoph Hellwig {
376abbf9e8aSChristoph Hellwig 	return bmv->bmv_length == 0 || bmv->bmv_entries >= bmv->bmv_count - 1;
377abbf9e8aSChristoph Hellwig }
378abbf9e8aSChristoph Hellwig 
379abbf9e8aSChristoph Hellwig static bool
380abbf9e8aSChristoph Hellwig xfs_getbmap_next_rec(
381abbf9e8aSChristoph Hellwig 	struct xfs_bmbt_irec	*rec,
382abbf9e8aSChristoph Hellwig 	xfs_fileoff_t		total_end)
383abbf9e8aSChristoph Hellwig {
384abbf9e8aSChristoph Hellwig 	xfs_fileoff_t		end = rec->br_startoff + rec->br_blockcount;
385abbf9e8aSChristoph Hellwig 
386abbf9e8aSChristoph Hellwig 	if (end == total_end)
387abbf9e8aSChristoph Hellwig 		return false;
388abbf9e8aSChristoph Hellwig 
389abbf9e8aSChristoph Hellwig 	rec->br_startoff += rec->br_blockcount;
390abbf9e8aSChristoph Hellwig 	if (!isnullstartblock(rec->br_startblock) &&
391abbf9e8aSChristoph Hellwig 	    rec->br_startblock != DELAYSTARTBLOCK)
392abbf9e8aSChristoph Hellwig 		rec->br_startblock += rec->br_blockcount;
393abbf9e8aSChristoph Hellwig 	rec->br_blockcount = total_end - end;
394abbf9e8aSChristoph Hellwig 	return true;
395abbf9e8aSChristoph Hellwig }
396abbf9e8aSChristoph Hellwig 
39768988114SDave Chinner /*
39868988114SDave Chinner  * Get inode's extents as described in bmv, and format for output.
39968988114SDave Chinner  * Calls formatter to fill the user's buffer until all extents
40068988114SDave Chinner  * are mapped, until the passed-in bmv->bmv_count slots have
40168988114SDave Chinner  * been filled, or until the formatter short-circuits the loop,
40268988114SDave Chinner  * if it is tracking filled-in extents on its own.
40368988114SDave Chinner  */
40468988114SDave Chinner int						/* error code */
40568988114SDave Chinner xfs_getbmap(
406232b5194SChristoph Hellwig 	struct xfs_inode	*ip,
40768988114SDave Chinner 	struct getbmapx		*bmv,		/* user bmap structure */
408232b5194SChristoph Hellwig 	struct kgetbmap		*out)
40968988114SDave Chinner {
410abbf9e8aSChristoph Hellwig 	struct xfs_mount	*mp = ip->i_mount;
411abbf9e8aSChristoph Hellwig 	int			iflags = bmv->bmv_iflags;
412232b5194SChristoph Hellwig 	int			whichfork, lock, error = 0;
413abbf9e8aSChristoph Hellwig 	int64_t			bmv_end, max_len;
414abbf9e8aSChristoph Hellwig 	xfs_fileoff_t		bno, first_bno;
415abbf9e8aSChristoph Hellwig 	struct xfs_ifork	*ifp;
416abbf9e8aSChristoph Hellwig 	struct xfs_bmbt_irec	got, rec;
417abbf9e8aSChristoph Hellwig 	xfs_filblks_t		len;
418b2b1712aSChristoph Hellwig 	struct xfs_iext_cursor	icur;
41968988114SDave Chinner 
420232b5194SChristoph Hellwig 	if (bmv->bmv_iflags & ~BMV_IF_VALID)
421232b5194SChristoph Hellwig 		return -EINVAL;
422f86f4037SDarrick J. Wong #ifndef DEBUG
423f86f4037SDarrick J. Wong 	/* Only allow CoW fork queries if we're debugging. */
424f86f4037SDarrick J. Wong 	if (iflags & BMV_IF_COWFORK)
425f86f4037SDarrick J. Wong 		return -EINVAL;
426f86f4037SDarrick J. Wong #endif
427f86f4037SDarrick J. Wong 	if ((iflags & BMV_IF_ATTRFORK) && (iflags & BMV_IF_COWFORK))
428f86f4037SDarrick J. Wong 		return -EINVAL;
429f86f4037SDarrick J. Wong 
430abbf9e8aSChristoph Hellwig 	if (bmv->bmv_length < -1)
431abbf9e8aSChristoph Hellwig 		return -EINVAL;
432abbf9e8aSChristoph Hellwig 	bmv->bmv_entries = 0;
433abbf9e8aSChristoph Hellwig 	if (bmv->bmv_length == 0)
434abbf9e8aSChristoph Hellwig 		return 0;
435abbf9e8aSChristoph Hellwig 
436f86f4037SDarrick J. Wong 	if (iflags & BMV_IF_ATTRFORK)
437f86f4037SDarrick J. Wong 		whichfork = XFS_ATTR_FORK;
438f86f4037SDarrick J. Wong 	else if (iflags & BMV_IF_COWFORK)
439f86f4037SDarrick J. Wong 		whichfork = XFS_COW_FORK;
440f86f4037SDarrick J. Wong 	else
441f86f4037SDarrick J. Wong 		whichfork = XFS_DATA_FORK;
442abbf9e8aSChristoph Hellwig 	ifp = XFS_IFORK_PTR(ip, whichfork);
44368988114SDave Chinner 
44468988114SDave Chinner 	xfs_ilock(ip, XFS_IOLOCK_SHARED);
445f86f4037SDarrick J. Wong 	switch (whichfork) {
446abbf9e8aSChristoph Hellwig 	case XFS_ATTR_FORK:
447abbf9e8aSChristoph Hellwig 		if (!XFS_IFORK_Q(ip))
448abbf9e8aSChristoph Hellwig 			goto out_unlock_iolock;
449abbf9e8aSChristoph Hellwig 
450abbf9e8aSChristoph Hellwig 		max_len = 1LL << 32;
451abbf9e8aSChristoph Hellwig 		lock = xfs_ilock_attr_map_shared(ip);
452abbf9e8aSChristoph Hellwig 		break;
453abbf9e8aSChristoph Hellwig 	case XFS_COW_FORK:
454abbf9e8aSChristoph Hellwig 		/* No CoW fork? Just return */
455abbf9e8aSChristoph Hellwig 		if (!ifp)
456abbf9e8aSChristoph Hellwig 			goto out_unlock_iolock;
457abbf9e8aSChristoph Hellwig 
458abbf9e8aSChristoph Hellwig 		if (xfs_get_cowextsz_hint(ip))
459abbf9e8aSChristoph Hellwig 			max_len = mp->m_super->s_maxbytes;
460abbf9e8aSChristoph Hellwig 		else
461abbf9e8aSChristoph Hellwig 			max_len = XFS_ISIZE(ip);
462abbf9e8aSChristoph Hellwig 
463abbf9e8aSChristoph Hellwig 		lock = XFS_ILOCK_SHARED;
464abbf9e8aSChristoph Hellwig 		xfs_ilock(ip, lock);
465abbf9e8aSChristoph Hellwig 		break;
466f86f4037SDarrick J. Wong 	case XFS_DATA_FORK:
467efa70be1SChristoph Hellwig 		if (!(iflags & BMV_IF_DELALLOC) &&
46813d2c10bSChristoph Hellwig 		    (ip->i_delayed_blks || XFS_ISIZE(ip) > ip->i_disk_size)) {
4692451337dSDave Chinner 			error = filemap_write_and_wait(VFS_I(ip)->i_mapping);
47068988114SDave Chinner 			if (error)
47168988114SDave Chinner 				goto out_unlock_iolock;
472efa70be1SChristoph Hellwig 
47368988114SDave Chinner 			/*
474efa70be1SChristoph Hellwig 			 * Even after flushing the inode, there can still be
475efa70be1SChristoph Hellwig 			 * delalloc blocks on the inode beyond EOF due to
476efa70be1SChristoph Hellwig 			 * speculative preallocation.  These are not removed
477efa70be1SChristoph Hellwig 			 * until the release function is called or the inode
478efa70be1SChristoph Hellwig 			 * is inactivated.  Hence we cannot assert here that
479efa70be1SChristoph Hellwig 			 * ip->i_delayed_blks == 0.
48068988114SDave Chinner 			 */
48168988114SDave Chinner 		}
48268988114SDave Chinner 
483abbf9e8aSChristoph Hellwig 		if (xfs_get_extsz_hint(ip) ||
484db07349dSChristoph Hellwig 		    (ip->i_diflags &
485abbf9e8aSChristoph Hellwig 		     (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND)))
486abbf9e8aSChristoph Hellwig 			max_len = mp->m_super->s_maxbytes;
487abbf9e8aSChristoph Hellwig 		else
488abbf9e8aSChristoph Hellwig 			max_len = XFS_ISIZE(ip);
489abbf9e8aSChristoph Hellwig 
490309ecac8SChristoph Hellwig 		lock = xfs_ilock_data_map_shared(ip);
491f86f4037SDarrick J. Wong 		break;
492efa70be1SChristoph Hellwig 	}
49368988114SDave Chinner 
494f7e67b20SChristoph Hellwig 	switch (ifp->if_format) {
495abbf9e8aSChristoph Hellwig 	case XFS_DINODE_FMT_EXTENTS:
496abbf9e8aSChristoph Hellwig 	case XFS_DINODE_FMT_BTREE:
497abbf9e8aSChristoph Hellwig 		break;
498abbf9e8aSChristoph Hellwig 	case XFS_DINODE_FMT_LOCAL:
499abbf9e8aSChristoph Hellwig 		/* Local format inode forks report no extents. */
50068988114SDave Chinner 		goto out_unlock_ilock;
501abbf9e8aSChristoph Hellwig 	default:
502abbf9e8aSChristoph Hellwig 		error = -EINVAL;
503abbf9e8aSChristoph Hellwig 		goto out_unlock_ilock;
50468988114SDave Chinner 	}
50568988114SDave Chinner 
506abbf9e8aSChristoph Hellwig 	if (bmv->bmv_length == -1) {
507abbf9e8aSChristoph Hellwig 		max_len = XFS_FSB_TO_BB(mp, XFS_B_TO_FSB(mp, max_len));
508abbf9e8aSChristoph Hellwig 		bmv->bmv_length = max(0LL, max_len - bmv->bmv_offset);
509abbf9e8aSChristoph Hellwig 	}
510abbf9e8aSChristoph Hellwig 
511abbf9e8aSChristoph Hellwig 	bmv_end = bmv->bmv_offset + bmv->bmv_length;
512abbf9e8aSChristoph Hellwig 
513abbf9e8aSChristoph Hellwig 	first_bno = bno = XFS_BB_TO_FSBT(mp, bmv->bmv_offset);
514abbf9e8aSChristoph Hellwig 	len = XFS_BB_TO_FSB(mp, bmv->bmv_length);
515abbf9e8aSChristoph Hellwig 
516abbf9e8aSChristoph Hellwig 	error = xfs_iread_extents(NULL, ip, whichfork);
517abbf9e8aSChristoph Hellwig 	if (error)
518abbf9e8aSChristoph Hellwig 		goto out_unlock_ilock;
519abbf9e8aSChristoph Hellwig 
520b2b1712aSChristoph Hellwig 	if (!xfs_iext_lookup_extent(ip, ifp, bno, &icur, &got)) {
521abbf9e8aSChristoph Hellwig 		/*
522abbf9e8aSChristoph Hellwig 		 * Report a whole-file hole if the delalloc flag is set to
523abbf9e8aSChristoph Hellwig 		 * stay compatible with the old implementation.
524abbf9e8aSChristoph Hellwig 		 */
525abbf9e8aSChristoph Hellwig 		if (iflags & BMV_IF_DELALLOC)
526abbf9e8aSChristoph Hellwig 			xfs_getbmap_report_hole(ip, bmv, out, bmv_end, bno,
527abbf9e8aSChristoph Hellwig 					XFS_B_TO_FSB(mp, XFS_ISIZE(ip)));
528abbf9e8aSChristoph Hellwig 		goto out_unlock_ilock;
529abbf9e8aSChristoph Hellwig 	}
530abbf9e8aSChristoph Hellwig 
531abbf9e8aSChristoph Hellwig 	while (!xfs_getbmap_full(bmv)) {
532abbf9e8aSChristoph Hellwig 		xfs_trim_extent(&got, first_bno, len);
533abbf9e8aSChristoph Hellwig 
534abbf9e8aSChristoph Hellwig 		/*
535abbf9e8aSChristoph Hellwig 		 * Report an entry for a hole if this extent doesn't directly
536abbf9e8aSChristoph Hellwig 		 * follow the previous one.
537abbf9e8aSChristoph Hellwig 		 */
538abbf9e8aSChristoph Hellwig 		if (got.br_startoff > bno) {
539abbf9e8aSChristoph Hellwig 			xfs_getbmap_report_hole(ip, bmv, out, bmv_end, bno,
540abbf9e8aSChristoph Hellwig 					got.br_startoff);
541abbf9e8aSChristoph Hellwig 			if (xfs_getbmap_full(bmv))
542abbf9e8aSChristoph Hellwig 				break;
543abbf9e8aSChristoph Hellwig 		}
544abbf9e8aSChristoph Hellwig 
545abbf9e8aSChristoph Hellwig 		/*
546abbf9e8aSChristoph Hellwig 		 * In order to report shared extents accurately, we report each
547abbf9e8aSChristoph Hellwig 		 * distinct shared / unshared part of a single bmbt record with
548abbf9e8aSChristoph Hellwig 		 * an individual getbmapx record.
549abbf9e8aSChristoph Hellwig 		 */
550abbf9e8aSChristoph Hellwig 		bno = got.br_startoff + got.br_blockcount;
551abbf9e8aSChristoph Hellwig 		rec = got;
55268988114SDave Chinner 		do {
553abbf9e8aSChristoph Hellwig 			error = xfs_getbmap_report_one(ip, bmv, out, bmv_end,
554abbf9e8aSChristoph Hellwig 					&rec);
555abbf9e8aSChristoph Hellwig 			if (error || xfs_getbmap_full(bmv))
556abbf9e8aSChristoph Hellwig 				goto out_unlock_ilock;
557abbf9e8aSChristoph Hellwig 		} while (xfs_getbmap_next_rec(&rec, bno));
55868988114SDave Chinner 
559b2b1712aSChristoph Hellwig 		if (!xfs_iext_next_extent(ifp, &icur, &got)) {
560abbf9e8aSChristoph Hellwig 			xfs_fileoff_t	end = XFS_B_TO_FSB(mp, XFS_ISIZE(ip));
56168988114SDave Chinner 
562abbf9e8aSChristoph Hellwig 			out[bmv->bmv_entries - 1].bmv_oflags |= BMV_OF_LAST;
56368988114SDave Chinner 
564abbf9e8aSChristoph Hellwig 			if (whichfork != XFS_ATTR_FORK && bno < end &&
565abbf9e8aSChristoph Hellwig 			    !xfs_getbmap_full(bmv)) {
566abbf9e8aSChristoph Hellwig 				xfs_getbmap_report_hole(ip, bmv, out, bmv_end,
567abbf9e8aSChristoph Hellwig 						bno, end);
568abbf9e8aSChristoph Hellwig 			}
569abbf9e8aSChristoph Hellwig 			break;
57068988114SDave Chinner 		}
57168988114SDave Chinner 
572abbf9e8aSChristoph Hellwig 		if (bno >= first_bno + len)
573abbf9e8aSChristoph Hellwig 			break;
57468988114SDave Chinner 	}
57568988114SDave Chinner 
57668988114SDave Chinner out_unlock_ilock:
57701f4f327SChristoph Hellwig 	xfs_iunlock(ip, lock);
57868988114SDave Chinner out_unlock_iolock:
57968988114SDave Chinner 	xfs_iunlock(ip, XFS_IOLOCK_SHARED);
58068988114SDave Chinner 	return error;
58168988114SDave Chinner }
58268988114SDave Chinner 
58368988114SDave Chinner /*
584e2ac8363SChristoph Hellwig  * Dead simple method of punching delalyed allocation blocks from a range in
585e2ac8363SChristoph Hellwig  * the inode.  This will always punch out both the start and end blocks, even
586e2ac8363SChristoph Hellwig  * if the ranges only partially overlap them, so it is up to the caller to
587e2ac8363SChristoph Hellwig  * ensure that partial blocks are not passed in.
58868988114SDave Chinner  */
58968988114SDave Chinner int
59068988114SDave Chinner xfs_bmap_punch_delalloc_range(
59168988114SDave Chinner 	struct xfs_inode	*ip,
59268988114SDave Chinner 	xfs_fileoff_t		start_fsb,
59368988114SDave Chinner 	xfs_fileoff_t		length)
59468988114SDave Chinner {
595e2ac8363SChristoph Hellwig 	struct xfs_ifork	*ifp = &ip->i_df;
596e2ac8363SChristoph Hellwig 	xfs_fileoff_t		end_fsb = start_fsb + length;
597e2ac8363SChristoph Hellwig 	struct xfs_bmbt_irec	got, del;
598e2ac8363SChristoph Hellwig 	struct xfs_iext_cursor	icur;
59968988114SDave Chinner 	int			error = 0;
60068988114SDave Chinner 
601b2197a36SChristoph Hellwig 	ASSERT(!xfs_need_iread_extents(ifp));
60268988114SDave Chinner 
6030065b541SChristoph Hellwig 	xfs_ilock(ip, XFS_ILOCK_EXCL);
604e2ac8363SChristoph Hellwig 	if (!xfs_iext_lookup_extent_before(ip, ifp, &end_fsb, &icur, &got))
605d4380177SChristoph Hellwig 		goto out_unlock;
606e2ac8363SChristoph Hellwig 
607e2ac8363SChristoph Hellwig 	while (got.br_startoff + got.br_blockcount > start_fsb) {
608e2ac8363SChristoph Hellwig 		del = got;
609e2ac8363SChristoph Hellwig 		xfs_trim_extent(&del, start_fsb, length);
610e2ac8363SChristoph Hellwig 
611e2ac8363SChristoph Hellwig 		/*
612e2ac8363SChristoph Hellwig 		 * A delete can push the cursor forward. Step back to the
613e2ac8363SChristoph Hellwig 		 * previous extent on non-delalloc or extents outside the
614e2ac8363SChristoph Hellwig 		 * target range.
615e2ac8363SChristoph Hellwig 		 */
616e2ac8363SChristoph Hellwig 		if (!del.br_blockcount ||
617e2ac8363SChristoph Hellwig 		    !isnullstartblock(del.br_startblock)) {
618e2ac8363SChristoph Hellwig 			if (!xfs_iext_prev_extent(ifp, &icur, &got))
619e2ac8363SChristoph Hellwig 				break;
620e2ac8363SChristoph Hellwig 			continue;
621e2ac8363SChristoph Hellwig 		}
622e2ac8363SChristoph Hellwig 
623e2ac8363SChristoph Hellwig 		error = xfs_bmap_del_extent_delay(ip, XFS_DATA_FORK, &icur,
624e2ac8363SChristoph Hellwig 						  &got, &del);
625e2ac8363SChristoph Hellwig 		if (error || !xfs_iext_get_extent(ifp, &icur, &got))
626e2ac8363SChristoph Hellwig 			break;
627e2ac8363SChristoph Hellwig 	}
62868988114SDave Chinner 
629d4380177SChristoph Hellwig out_unlock:
630d4380177SChristoph Hellwig 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
63168988114SDave Chinner 	return error;
63268988114SDave Chinner }
633c24b5dfaSDave Chinner 
634c24b5dfaSDave Chinner /*
635c24b5dfaSDave Chinner  * Test whether it is appropriate to check an inode for and free post EOF
636c24b5dfaSDave Chinner  * blocks. The 'force' parameter determines whether we should also consider
637c24b5dfaSDave Chinner  * regular files that are marked preallocated or append-only.
638c24b5dfaSDave Chinner  */
639c24b5dfaSDave Chinner bool
6407d88329eSDarrick J. Wong xfs_can_free_eofblocks(
6417d88329eSDarrick J. Wong 	struct xfs_inode	*ip,
6427d88329eSDarrick J. Wong 	bool			force)
643c24b5dfaSDave Chinner {
6447d88329eSDarrick J. Wong 	struct xfs_bmbt_irec	imap;
6457d88329eSDarrick J. Wong 	struct xfs_mount	*mp = ip->i_mount;
6467d88329eSDarrick J. Wong 	xfs_fileoff_t		end_fsb;
6477d88329eSDarrick J. Wong 	xfs_fileoff_t		last_fsb;
6487d88329eSDarrick J. Wong 	int			nimaps = 1;
6497d88329eSDarrick J. Wong 	int			error;
6507d88329eSDarrick J. Wong 
6517d88329eSDarrick J. Wong 	/*
6527d88329eSDarrick J. Wong 	 * Caller must either hold the exclusive io lock; or be inactivating
6537d88329eSDarrick J. Wong 	 * the inode, which guarantees there are no other users of the inode.
6547d88329eSDarrick J. Wong 	 */
6557d88329eSDarrick J. Wong 	ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL) ||
6567d88329eSDarrick J. Wong 	       (VFS_I(ip)->i_state & I_FREEING));
6577d88329eSDarrick J. Wong 
658c24b5dfaSDave Chinner 	/* prealloc/delalloc exists only on regular files */
659c19b3b05SDave Chinner 	if (!S_ISREG(VFS_I(ip)->i_mode))
660c24b5dfaSDave Chinner 		return false;
661c24b5dfaSDave Chinner 
662c24b5dfaSDave Chinner 	/*
663c24b5dfaSDave Chinner 	 * Zero sized files with no cached pages and delalloc blocks will not
664c24b5dfaSDave Chinner 	 * have speculative prealloc/delalloc blocks to remove.
665c24b5dfaSDave Chinner 	 */
666c24b5dfaSDave Chinner 	if (VFS_I(ip)->i_size == 0 &&
6672667c6f9SDave Chinner 	    VFS_I(ip)->i_mapping->nrpages == 0 &&
668c24b5dfaSDave Chinner 	    ip->i_delayed_blks == 0)
669c24b5dfaSDave Chinner 		return false;
670c24b5dfaSDave Chinner 
671c24b5dfaSDave Chinner 	/* If we haven't read in the extent list, then don't do it now. */
672b2197a36SChristoph Hellwig 	if (xfs_need_iread_extents(&ip->i_df))
673c24b5dfaSDave Chinner 		return false;
674c24b5dfaSDave Chinner 
675c24b5dfaSDave Chinner 	/*
676c24b5dfaSDave Chinner 	 * Do not free real preallocated or append-only files unless the file
677c24b5dfaSDave Chinner 	 * has delalloc blocks and we are forced to remove them.
678c24b5dfaSDave Chinner 	 */
679db07349dSChristoph Hellwig 	if (ip->i_diflags & (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND))
680c24b5dfaSDave Chinner 		if (!force || ip->i_delayed_blks == 0)
681c24b5dfaSDave Chinner 			return false;
682c24b5dfaSDave Chinner 
6837d88329eSDarrick J. Wong 	/*
6847d88329eSDarrick J. Wong 	 * Do not try to free post-EOF blocks if EOF is beyond the end of the
6857d88329eSDarrick J. Wong 	 * range supported by the page cache, because the truncation will loop
6867d88329eSDarrick J. Wong 	 * forever.
6877d88329eSDarrick J. Wong 	 */
6887d88329eSDarrick J. Wong 	end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_ISIZE(ip));
6897d88329eSDarrick J. Wong 	last_fsb = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
6907d88329eSDarrick J. Wong 	if (last_fsb <= end_fsb)
6917d88329eSDarrick J. Wong 		return false;
6927d88329eSDarrick J. Wong 
6937d88329eSDarrick J. Wong 	/*
6947d88329eSDarrick J. Wong 	 * Look up the mapping for the first block past EOF.  If we can't find
6957d88329eSDarrick J. Wong 	 * it, there's nothing to free.
6967d88329eSDarrick J. Wong 	 */
6977d88329eSDarrick J. Wong 	xfs_ilock(ip, XFS_ILOCK_SHARED);
6987d88329eSDarrick J. Wong 	error = xfs_bmapi_read(ip, end_fsb, last_fsb - end_fsb, &imap, &nimaps,
6997d88329eSDarrick J. Wong 			0);
7007d88329eSDarrick J. Wong 	xfs_iunlock(ip, XFS_ILOCK_SHARED);
7017d88329eSDarrick J. Wong 	if (error || nimaps == 0)
7027d88329eSDarrick J. Wong 		return false;
7037d88329eSDarrick J. Wong 
7047d88329eSDarrick J. Wong 	/*
7057d88329eSDarrick J. Wong 	 * If there's a real mapping there or there are delayed allocation
7067d88329eSDarrick J. Wong 	 * reservations, then we have post-EOF blocks to try to free.
7077d88329eSDarrick J. Wong 	 */
7087d88329eSDarrick J. Wong 	return imap.br_startblock != HOLESTARTBLOCK || ip->i_delayed_blks;
709c24b5dfaSDave Chinner }
710c24b5dfaSDave Chinner 
711c24b5dfaSDave Chinner /*
7123b4683c2SBrian Foster  * This is called to free any blocks beyond eof. The caller must hold
7133b4683c2SBrian Foster  * IOLOCK_EXCL unless we are in the inode reclaim path and have the only
7143b4683c2SBrian Foster  * reference to the inode.
715c24b5dfaSDave Chinner  */
716c24b5dfaSDave Chinner int
717c24b5dfaSDave Chinner xfs_free_eofblocks(
718a36b9261SBrian Foster 	struct xfs_inode	*ip)
719c24b5dfaSDave Chinner {
720a36b9261SBrian Foster 	struct xfs_trans	*tp;
721a36b9261SBrian Foster 	struct xfs_mount	*mp = ip->i_mount;
7227d88329eSDarrick J. Wong 	int			error;
723a36b9261SBrian Foster 
7247d88329eSDarrick J. Wong 	/* Attach the dquots to the inode up front. */
725c14cfccaSDarrick J. Wong 	error = xfs_qm_dqattach(ip);
726c24b5dfaSDave Chinner 	if (error)
727c24b5dfaSDave Chinner 		return error;
728c24b5dfaSDave Chinner 
7297d88329eSDarrick J. Wong 	/* Wait on dio to ensure i_size has settled. */
730e4229d6bSBrian Foster 	inode_dio_wait(VFS_I(ip));
731e4229d6bSBrian Foster 
7327d88329eSDarrick J. Wong 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp);
733c24b5dfaSDave Chinner 	if (error) {
734*75c8c50fSDave Chinner 		ASSERT(xfs_is_shutdown(mp));
735c24b5dfaSDave Chinner 		return error;
736c24b5dfaSDave Chinner 	}
737c24b5dfaSDave Chinner 
738c24b5dfaSDave Chinner 	xfs_ilock(ip, XFS_ILOCK_EXCL);
739c24b5dfaSDave Chinner 	xfs_trans_ijoin(tp, ip, 0);
740c24b5dfaSDave Chinner 
741c24b5dfaSDave Chinner 	/*
7427d88329eSDarrick J. Wong 	 * Do not update the on-disk file size.  If we update the on-disk file
7437d88329eSDarrick J. Wong 	 * size and then the system crashes before the contents of the file are
7447d88329eSDarrick J. Wong 	 * flushed to disk then the files may be full of holes (ie NULL files
7457d88329eSDarrick J. Wong 	 * bug).
746c24b5dfaSDave Chinner 	 */
7474e529339SBrian Foster 	error = xfs_itruncate_extents_flags(&tp, ip, XFS_DATA_FORK,
7484e529339SBrian Foster 				XFS_ISIZE(ip), XFS_BMAPI_NODISCARD);
7497d88329eSDarrick J. Wong 	if (error)
7507d88329eSDarrick J. Wong 		goto err_cancel;
7517d88329eSDarrick J. Wong 
7527d88329eSDarrick J. Wong 	error = xfs_trans_commit(tp);
7537d88329eSDarrick J. Wong 	if (error)
7547d88329eSDarrick J. Wong 		goto out_unlock;
7557d88329eSDarrick J. Wong 
7567d88329eSDarrick J. Wong 	xfs_inode_clear_eofblocks_tag(ip);
7577d88329eSDarrick J. Wong 	goto out_unlock;
7587d88329eSDarrick J. Wong 
7597d88329eSDarrick J. Wong err_cancel:
760c24b5dfaSDave Chinner 	/*
761c24b5dfaSDave Chinner 	 * If we get an error at this point we simply don't
762c24b5dfaSDave Chinner 	 * bother truncating the file.
763c24b5dfaSDave Chinner 	 */
7644906e215SChristoph Hellwig 	xfs_trans_cancel(tp);
7657d88329eSDarrick J. Wong out_unlock:
766c24b5dfaSDave Chinner 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
767c24b5dfaSDave Chinner 	return error;
768c24b5dfaSDave Chinner }
769c24b5dfaSDave Chinner 
77083aee9e4SChristoph Hellwig int
771c24b5dfaSDave Chinner xfs_alloc_file_space(
77283aee9e4SChristoph Hellwig 	struct xfs_inode	*ip,
773c24b5dfaSDave Chinner 	xfs_off_t		offset,
774c24b5dfaSDave Chinner 	xfs_off_t		len,
7755f8aca8bSChristoph Hellwig 	int			alloc_type)
776c24b5dfaSDave Chinner {
777c24b5dfaSDave Chinner 	xfs_mount_t		*mp = ip->i_mount;
778c24b5dfaSDave Chinner 	xfs_off_t		count;
779c24b5dfaSDave Chinner 	xfs_filblks_t		allocated_fsb;
780c24b5dfaSDave Chinner 	xfs_filblks_t		allocatesize_fsb;
781c24b5dfaSDave Chinner 	xfs_extlen_t		extsz, temp;
782c24b5dfaSDave Chinner 	xfs_fileoff_t		startoffset_fsb;
783e093c4beSMax Reitz 	xfs_fileoff_t		endoffset_fsb;
784c24b5dfaSDave Chinner 	int			nimaps;
785c24b5dfaSDave Chinner 	int			rt;
786c24b5dfaSDave Chinner 	xfs_trans_t		*tp;
787c24b5dfaSDave Chinner 	xfs_bmbt_irec_t		imaps[1], *imapp;
788c24b5dfaSDave Chinner 	int			error;
789c24b5dfaSDave Chinner 
790c24b5dfaSDave Chinner 	trace_xfs_alloc_file_space(ip);
791c24b5dfaSDave Chinner 
792*75c8c50fSDave Chinner 	if (xfs_is_shutdown(mp))
7932451337dSDave Chinner 		return -EIO;
794c24b5dfaSDave Chinner 
795c14cfccaSDarrick J. Wong 	error = xfs_qm_dqattach(ip);
796c24b5dfaSDave Chinner 	if (error)
797c24b5dfaSDave Chinner 		return error;
798c24b5dfaSDave Chinner 
799c24b5dfaSDave Chinner 	if (len <= 0)
8002451337dSDave Chinner 		return -EINVAL;
801c24b5dfaSDave Chinner 
802c24b5dfaSDave Chinner 	rt = XFS_IS_REALTIME_INODE(ip);
803c24b5dfaSDave Chinner 	extsz = xfs_get_extsz_hint(ip);
804c24b5dfaSDave Chinner 
805c24b5dfaSDave Chinner 	count = len;
806c24b5dfaSDave Chinner 	imapp = &imaps[0];
807c24b5dfaSDave Chinner 	nimaps = 1;
808c24b5dfaSDave Chinner 	startoffset_fsb	= XFS_B_TO_FSBT(mp, offset);
809e093c4beSMax Reitz 	endoffset_fsb = XFS_B_TO_FSB(mp, offset + count);
810e093c4beSMax Reitz 	allocatesize_fsb = endoffset_fsb - startoffset_fsb;
811c24b5dfaSDave Chinner 
812c24b5dfaSDave Chinner 	/*
813c24b5dfaSDave Chinner 	 * Allocate file space until done or until there is an error
814c24b5dfaSDave Chinner 	 */
815c24b5dfaSDave Chinner 	while (allocatesize_fsb && !error) {
816c24b5dfaSDave Chinner 		xfs_fileoff_t	s, e;
8173de4eb10SDarrick J. Wong 		unsigned int	dblocks, rblocks, resblks;
818c24b5dfaSDave Chinner 
819c24b5dfaSDave Chinner 		/*
820c24b5dfaSDave Chinner 		 * Determine space reservations for data/realtime.
821c24b5dfaSDave Chinner 		 */
822c24b5dfaSDave Chinner 		if (unlikely(extsz)) {
823c24b5dfaSDave Chinner 			s = startoffset_fsb;
824c24b5dfaSDave Chinner 			do_div(s, extsz);
825c24b5dfaSDave Chinner 			s *= extsz;
826c24b5dfaSDave Chinner 			e = startoffset_fsb + allocatesize_fsb;
8270703a8e1SDave Chinner 			div_u64_rem(startoffset_fsb, extsz, &temp);
8280703a8e1SDave Chinner 			if (temp)
829c24b5dfaSDave Chinner 				e += temp;
8300703a8e1SDave Chinner 			div_u64_rem(e, extsz, &temp);
8310703a8e1SDave Chinner 			if (temp)
832c24b5dfaSDave Chinner 				e += extsz - temp;
833c24b5dfaSDave Chinner 		} else {
834c24b5dfaSDave Chinner 			s = 0;
835c24b5dfaSDave Chinner 			e = allocatesize_fsb;
836c24b5dfaSDave Chinner 		}
837c24b5dfaSDave Chinner 
838c24b5dfaSDave Chinner 		/*
839c24b5dfaSDave Chinner 		 * The transaction reservation is limited to a 32-bit block
840c24b5dfaSDave Chinner 		 * count, hence we need to limit the number of blocks we are
841c24b5dfaSDave Chinner 		 * trying to reserve to avoid an overflow. We can't allocate
842c24b5dfaSDave Chinner 		 * more than @nimaps extents, and an extent is limited on disk
843c24b5dfaSDave Chinner 		 * to MAXEXTLEN (21 bits), so use that to enforce the limit.
844c24b5dfaSDave Chinner 		 */
845c24b5dfaSDave Chinner 		resblks = min_t(xfs_fileoff_t, (e - s), (MAXEXTLEN * nimaps));
846c24b5dfaSDave Chinner 		if (unlikely(rt)) {
84702b7ee4eSDarrick J. Wong 			dblocks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
84802b7ee4eSDarrick J. Wong 			rblocks = resblks;
849c24b5dfaSDave Chinner 		} else {
85002b7ee4eSDarrick J. Wong 			dblocks = XFS_DIOSTRAT_SPACE_RES(mp, resblks);
85102b7ee4eSDarrick J. Wong 			rblocks = 0;
852c24b5dfaSDave Chinner 		}
853c24b5dfaSDave Chinner 
854c24b5dfaSDave Chinner 		/*
855c24b5dfaSDave Chinner 		 * Allocate and setup the transaction.
856c24b5dfaSDave Chinner 		 */
8573de4eb10SDarrick J. Wong 		error = xfs_trans_alloc_inode(ip, &M_RES(mp)->tr_write,
8583de4eb10SDarrick J. Wong 				dblocks, rblocks, false, &tp);
859c24b5dfaSDave Chinner 		if (error)
8603de4eb10SDarrick J. Wong 			break;
861c24b5dfaSDave Chinner 
862727e1acdSChandan Babu R 		error = xfs_iext_count_may_overflow(ip, XFS_DATA_FORK,
863727e1acdSChandan Babu R 				XFS_IEXT_ADD_NOSPLIT_CNT);
864727e1acdSChandan Babu R 		if (error)
86535b11010SDarrick J. Wong 			goto error;
866727e1acdSChandan Babu R 
867c24b5dfaSDave Chinner 		error = xfs_bmapi_write(tp, ip, startoffset_fsb,
868da781e64SBrian Foster 					allocatesize_fsb, alloc_type, 0, imapp,
869da781e64SBrian Foster 					&nimaps);
870f6106efaSEric Sandeen 		if (error)
87135b11010SDarrick J. Wong 			goto error;
872c24b5dfaSDave Chinner 
873c24b5dfaSDave Chinner 		/*
874c24b5dfaSDave Chinner 		 * Complete the transaction
875c24b5dfaSDave Chinner 		 */
87670393313SChristoph Hellwig 		error = xfs_trans_commit(tp);
877c24b5dfaSDave Chinner 		xfs_iunlock(ip, XFS_ILOCK_EXCL);
878f6106efaSEric Sandeen 		if (error)
879c24b5dfaSDave Chinner 			break;
880c24b5dfaSDave Chinner 
881c24b5dfaSDave Chinner 		allocated_fsb = imapp->br_blockcount;
882c24b5dfaSDave Chinner 
883c24b5dfaSDave Chinner 		if (nimaps == 0) {
8842451337dSDave Chinner 			error = -ENOSPC;
885c24b5dfaSDave Chinner 			break;
886c24b5dfaSDave Chinner 		}
887c24b5dfaSDave Chinner 
888c24b5dfaSDave Chinner 		startoffset_fsb += allocated_fsb;
889c24b5dfaSDave Chinner 		allocatesize_fsb -= allocated_fsb;
890c24b5dfaSDave Chinner 	}
891c24b5dfaSDave Chinner 
892c24b5dfaSDave Chinner 	return error;
893c24b5dfaSDave Chinner 
89435b11010SDarrick J. Wong error:
8954906e215SChristoph Hellwig 	xfs_trans_cancel(tp);
896c24b5dfaSDave Chinner 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
897c24b5dfaSDave Chinner 	return error;
898c24b5dfaSDave Chinner }
899c24b5dfaSDave Chinner 
900bdb0d04fSChristoph Hellwig static int
901bdb0d04fSChristoph Hellwig xfs_unmap_extent(
90283aee9e4SChristoph Hellwig 	struct xfs_inode	*ip,
903bdb0d04fSChristoph Hellwig 	xfs_fileoff_t		startoffset_fsb,
904bdb0d04fSChristoph Hellwig 	xfs_filblks_t		len_fsb,
905bdb0d04fSChristoph Hellwig 	int			*done)
906c24b5dfaSDave Chinner {
907bdb0d04fSChristoph Hellwig 	struct xfs_mount	*mp = ip->i_mount;
908bdb0d04fSChristoph Hellwig 	struct xfs_trans	*tp;
909bdb0d04fSChristoph Hellwig 	uint			resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
910bdb0d04fSChristoph Hellwig 	int			error;
911c24b5dfaSDave Chinner 
9123de4eb10SDarrick J. Wong 	error = xfs_trans_alloc_inode(ip, &M_RES(mp)->tr_write, resblks, 0,
9133a1af6c3SDarrick J. Wong 			false, &tp);
914bdb0d04fSChristoph Hellwig 	if (error)
9153a1af6c3SDarrick J. Wong 		return error;
916c24b5dfaSDave Chinner 
91785ef08b5SChandan Babu R 	error = xfs_iext_count_may_overflow(ip, XFS_DATA_FORK,
91885ef08b5SChandan Babu R 			XFS_IEXT_PUNCH_HOLE_CNT);
91985ef08b5SChandan Babu R 	if (error)
92085ef08b5SChandan Babu R 		goto out_trans_cancel;
92185ef08b5SChandan Babu R 
9222af52842SBrian Foster 	error = xfs_bunmapi(tp, ip, startoffset_fsb, len_fsb, 0, 2, done);
923bdb0d04fSChristoph Hellwig 	if (error)
924c8eac49eSBrian Foster 		goto out_trans_cancel;
925bdb0d04fSChristoph Hellwig 
926bdb0d04fSChristoph Hellwig 	error = xfs_trans_commit(tp);
927bdb0d04fSChristoph Hellwig out_unlock:
928bdb0d04fSChristoph Hellwig 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
929bdb0d04fSChristoph Hellwig 	return error;
930bdb0d04fSChristoph Hellwig 
931bdb0d04fSChristoph Hellwig out_trans_cancel:
932bdb0d04fSChristoph Hellwig 	xfs_trans_cancel(tp);
933bdb0d04fSChristoph Hellwig 	goto out_unlock;
934bdb0d04fSChristoph Hellwig }
935bdb0d04fSChristoph Hellwig 
936249bd908SDave Chinner /* Caller must first wait for the completion of any pending DIOs if required. */
9372c307174SDave Chinner int
938bdb0d04fSChristoph Hellwig xfs_flush_unmap_range(
939bdb0d04fSChristoph Hellwig 	struct xfs_inode	*ip,
940bdb0d04fSChristoph Hellwig 	xfs_off_t		offset,
941bdb0d04fSChristoph Hellwig 	xfs_off_t		len)
942bdb0d04fSChristoph Hellwig {
943bdb0d04fSChristoph Hellwig 	struct xfs_mount	*mp = ip->i_mount;
944bdb0d04fSChristoph Hellwig 	struct inode		*inode = VFS_I(ip);
945bdb0d04fSChristoph Hellwig 	xfs_off_t		rounding, start, end;
946bdb0d04fSChristoph Hellwig 	int			error;
947bdb0d04fSChristoph Hellwig 
94820bd8e63SDarrick J. Wong 	rounding = max_t(xfs_off_t, mp->m_sb.sb_blocksize, PAGE_SIZE);
949bdb0d04fSChristoph Hellwig 	start = round_down(offset, rounding);
950bdb0d04fSChristoph Hellwig 	end = round_up(offset + len, rounding) - 1;
951bdb0d04fSChristoph Hellwig 
952bdb0d04fSChristoph Hellwig 	error = filemap_write_and_wait_range(inode->i_mapping, start, end);
953c24b5dfaSDave Chinner 	if (error)
954c24b5dfaSDave Chinner 		return error;
955bdb0d04fSChristoph Hellwig 	truncate_pagecache_range(inode, start, end);
956bdb0d04fSChristoph Hellwig 	return 0;
957c24b5dfaSDave Chinner }
958c24b5dfaSDave Chinner 
959c24b5dfaSDave Chinner int
960c24b5dfaSDave Chinner xfs_free_file_space(
961c24b5dfaSDave Chinner 	struct xfs_inode	*ip,
962c24b5dfaSDave Chinner 	xfs_off_t		offset,
963c24b5dfaSDave Chinner 	xfs_off_t		len)
964c24b5dfaSDave Chinner {
965bdb0d04fSChristoph Hellwig 	struct xfs_mount	*mp = ip->i_mount;
966c24b5dfaSDave Chinner 	xfs_fileoff_t		startoffset_fsb;
967bdb0d04fSChristoph Hellwig 	xfs_fileoff_t		endoffset_fsb;
9683c2bdc91SChristoph Hellwig 	int			done = 0, error;
969c24b5dfaSDave Chinner 
970c24b5dfaSDave Chinner 	trace_xfs_free_file_space(ip);
971c24b5dfaSDave Chinner 
972c14cfccaSDarrick J. Wong 	error = xfs_qm_dqattach(ip);
973c24b5dfaSDave Chinner 	if (error)
974c24b5dfaSDave Chinner 		return error;
975c24b5dfaSDave Chinner 
976c24b5dfaSDave Chinner 	if (len <= 0)	/* if nothing being freed */
977bdb0d04fSChristoph Hellwig 		return 0;
978bdb0d04fSChristoph Hellwig 
979c24b5dfaSDave Chinner 	startoffset_fsb = XFS_B_TO_FSB(mp, offset);
980c24b5dfaSDave Chinner 	endoffset_fsb = XFS_B_TO_FSBT(mp, offset + len);
981c24b5dfaSDave Chinner 
982fe341eb1SDarrick J. Wong 	/* We can only free complete realtime extents. */
98325219dbfSDarrick J. Wong 	if (XFS_IS_REALTIME_INODE(ip) && mp->m_sb.sb_rextsize > 1) {
98425219dbfSDarrick J. Wong 		startoffset_fsb = roundup_64(startoffset_fsb,
98525219dbfSDarrick J. Wong 					     mp->m_sb.sb_rextsize);
98625219dbfSDarrick J. Wong 		endoffset_fsb = rounddown_64(endoffset_fsb,
98725219dbfSDarrick J. Wong 					     mp->m_sb.sb_rextsize);
988fe341eb1SDarrick J. Wong 	}
989fe341eb1SDarrick J. Wong 
990bdb0d04fSChristoph Hellwig 	/*
991daa79baeSChristoph Hellwig 	 * Need to zero the stuff we're not freeing, on disk.
992bdb0d04fSChristoph Hellwig 	 */
9933c2bdc91SChristoph Hellwig 	if (endoffset_fsb > startoffset_fsb) {
9943c2bdc91SChristoph Hellwig 		while (!done) {
995bdb0d04fSChristoph Hellwig 			error = xfs_unmap_extent(ip, startoffset_fsb,
996bdb0d04fSChristoph Hellwig 					endoffset_fsb - startoffset_fsb, &done);
9973c2bdc91SChristoph Hellwig 			if (error)
9983c2bdc91SChristoph Hellwig 				return error;
9993c2bdc91SChristoph Hellwig 		}
1000c24b5dfaSDave Chinner 	}
1001c24b5dfaSDave Chinner 
10023c2bdc91SChristoph Hellwig 	/*
10033c2bdc91SChristoph Hellwig 	 * Now that we've unmap all full blocks we'll have to zero out any
1004f5c54717SChristoph Hellwig 	 * partial block at the beginning and/or end.  iomap_zero_range is smart
1005f5c54717SChristoph Hellwig 	 * enough to skip any holes, including those we just created, but we
1006f5c54717SChristoph Hellwig 	 * must take care not to zero beyond EOF and enlarge i_size.
10073c2bdc91SChristoph Hellwig 	 */
10083dd09d5aSCalvin Owens 	if (offset >= XFS_ISIZE(ip))
10093dd09d5aSCalvin Owens 		return 0;
10103dd09d5aSCalvin Owens 	if (offset + len > XFS_ISIZE(ip))
10113dd09d5aSCalvin Owens 		len = XFS_ISIZE(ip) - offset;
1012f150b423SChristoph Hellwig 	error = iomap_zero_range(VFS_I(ip), offset, len, NULL,
1013f150b423SChristoph Hellwig 			&xfs_buffered_write_iomap_ops);
1014e53c4b59SDarrick J. Wong 	if (error)
1015e53c4b59SDarrick J. Wong 		return error;
1016e53c4b59SDarrick J. Wong 
1017e53c4b59SDarrick J. Wong 	/*
1018e53c4b59SDarrick J. Wong 	 * If we zeroed right up to EOF and EOF straddles a page boundary we
1019e53c4b59SDarrick J. Wong 	 * must make sure that the post-EOF area is also zeroed because the
1020e53c4b59SDarrick J. Wong 	 * page could be mmap'd and iomap_zero_range doesn't do that for us.
1021e53c4b59SDarrick J. Wong 	 * Writeback of the eof page will do this, albeit clumsily.
1022e53c4b59SDarrick J. Wong 	 */
1023a579121fSDarrick J. Wong 	if (offset + len >= XFS_ISIZE(ip) && offset_in_page(offset + len) > 0) {
1024e53c4b59SDarrick J. Wong 		error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping,
1025a579121fSDarrick J. Wong 				round_down(offset + len, PAGE_SIZE), LLONG_MAX);
1026e53c4b59SDarrick J. Wong 	}
1027e53c4b59SDarrick J. Wong 
1028e53c4b59SDarrick J. Wong 	return error;
1029c24b5dfaSDave Chinner }
1030c24b5dfaSDave Chinner 
103172c1a739Skbuild test robot static int
10324ed36c6bSChristoph Hellwig xfs_prepare_shift(
1033e1d8fb88SNamjae Jeon 	struct xfs_inode	*ip,
10344ed36c6bSChristoph Hellwig 	loff_t			offset)
1035e1d8fb88SNamjae Jeon {
1036d0c22041SBrian Foster 	struct xfs_mount	*mp = ip->i_mount;
1037e1d8fb88SNamjae Jeon 	int			error;
1038f71721d0SBrian Foster 
1039f71721d0SBrian Foster 	/*
1040f71721d0SBrian Foster 	 * Trim eofblocks to avoid shifting uninitialized post-eof preallocation
1041f71721d0SBrian Foster 	 * into the accessible region of the file.
1042f71721d0SBrian Foster 	 */
104341b9d726SBrian Foster 	if (xfs_can_free_eofblocks(ip, true)) {
1044a36b9261SBrian Foster 		error = xfs_free_eofblocks(ip);
104541b9d726SBrian Foster 		if (error)
104641b9d726SBrian Foster 			return error;
104741b9d726SBrian Foster 	}
10481669a8caSDave Chinner 
1049f71721d0SBrian Foster 	/*
1050d0c22041SBrian Foster 	 * Shift operations must stabilize the start block offset boundary along
1051d0c22041SBrian Foster 	 * with the full range of the operation. If we don't, a COW writeback
1052d0c22041SBrian Foster 	 * completion could race with an insert, front merge with the start
1053d0c22041SBrian Foster 	 * extent (after split) during the shift and corrupt the file. Start
1054d0c22041SBrian Foster 	 * with the block just prior to the start to stabilize the boundary.
1055d0c22041SBrian Foster 	 */
105620bd8e63SDarrick J. Wong 	offset = round_down(offset, mp->m_sb.sb_blocksize);
1057d0c22041SBrian Foster 	if (offset)
105820bd8e63SDarrick J. Wong 		offset -= mp->m_sb.sb_blocksize;
1059d0c22041SBrian Foster 
1060d0c22041SBrian Foster 	/*
1061f71721d0SBrian Foster 	 * Writeback and invalidate cache for the remainder of the file as we're
1062a904b1caSNamjae Jeon 	 * about to shift down every extent from offset to EOF.
1063f71721d0SBrian Foster 	 */
10647f9f71beSDave Chinner 	error = xfs_flush_unmap_range(ip, offset, XFS_ISIZE(ip));
10651749d1eaSBrian Foster 	if (error)
10661749d1eaSBrian Foster 		return error;
1067e1d8fb88SNamjae Jeon 
1068a904b1caSNamjae Jeon 	/*
10693af423b0SDarrick J. Wong 	 * Clean out anything hanging around in the cow fork now that
10703af423b0SDarrick J. Wong 	 * we've flushed all the dirty data out to disk to avoid having
10713af423b0SDarrick J. Wong 	 * CoW extents at the wrong offsets.
10723af423b0SDarrick J. Wong 	 */
107351d62690SChristoph Hellwig 	if (xfs_inode_has_cow_data(ip)) {
10743af423b0SDarrick J. Wong 		error = xfs_reflink_cancel_cow_range(ip, offset, NULLFILEOFF,
10753af423b0SDarrick J. Wong 				true);
10763af423b0SDarrick J. Wong 		if (error)
10773af423b0SDarrick J. Wong 			return error;
10783af423b0SDarrick J. Wong 	}
10793af423b0SDarrick J. Wong 
10804ed36c6bSChristoph Hellwig 	return 0;
1081e1d8fb88SNamjae Jeon }
1082e1d8fb88SNamjae Jeon 
1083e1d8fb88SNamjae Jeon /*
1084a904b1caSNamjae Jeon  * xfs_collapse_file_space()
1085a904b1caSNamjae Jeon  *	This routine frees disk space and shift extent for the given file.
1086a904b1caSNamjae Jeon  *	The first thing we do is to free data blocks in the specified range
1087a904b1caSNamjae Jeon  *	by calling xfs_free_file_space(). It would also sync dirty data
1088a904b1caSNamjae Jeon  *	and invalidate page cache over the region on which collapse range
1089a904b1caSNamjae Jeon  *	is working. And Shift extent records to the left to cover a hole.
1090a904b1caSNamjae Jeon  * RETURNS:
1091a904b1caSNamjae Jeon  *	0 on success
1092a904b1caSNamjae Jeon  *	errno on error
1093a904b1caSNamjae Jeon  *
1094a904b1caSNamjae Jeon  */
1095a904b1caSNamjae Jeon int
1096a904b1caSNamjae Jeon xfs_collapse_file_space(
1097a904b1caSNamjae Jeon 	struct xfs_inode	*ip,
1098a904b1caSNamjae Jeon 	xfs_off_t		offset,
1099a904b1caSNamjae Jeon 	xfs_off_t		len)
1100a904b1caSNamjae Jeon {
11014ed36c6bSChristoph Hellwig 	struct xfs_mount	*mp = ip->i_mount;
11024ed36c6bSChristoph Hellwig 	struct xfs_trans	*tp;
1103a904b1caSNamjae Jeon 	int			error;
11044ed36c6bSChristoph Hellwig 	xfs_fileoff_t		next_fsb = XFS_B_TO_FSB(mp, offset + len);
11054ed36c6bSChristoph Hellwig 	xfs_fileoff_t		shift_fsb = XFS_B_TO_FSB(mp, len);
1106ecfea3f0SChristoph Hellwig 	bool			done = false;
1107a904b1caSNamjae Jeon 
1108a904b1caSNamjae Jeon 	ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
11099ad1a23aSChristoph Hellwig 	ASSERT(xfs_isilocked(ip, XFS_MMAPLOCK_EXCL));
11109ad1a23aSChristoph Hellwig 
1111a904b1caSNamjae Jeon 	trace_xfs_collapse_file_space(ip);
1112a904b1caSNamjae Jeon 
1113a904b1caSNamjae Jeon 	error = xfs_free_file_space(ip, offset, len);
1114a904b1caSNamjae Jeon 	if (error)
1115a904b1caSNamjae Jeon 		return error;
1116a904b1caSNamjae Jeon 
11174ed36c6bSChristoph Hellwig 	error = xfs_prepare_shift(ip, offset);
11184ed36c6bSChristoph Hellwig 	if (error)
11194ed36c6bSChristoph Hellwig 		return error;
11204ed36c6bSChristoph Hellwig 
1121211683b2SBrian Foster 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 0, 0, 0, &tp);
11224ed36c6bSChristoph Hellwig 	if (error)
1123211683b2SBrian Foster 		return error;
11244ed36c6bSChristoph Hellwig 
11254ed36c6bSChristoph Hellwig 	xfs_ilock(ip, XFS_ILOCK_EXCL);
1126211683b2SBrian Foster 	xfs_trans_ijoin(tp, ip, 0);
11274ed36c6bSChristoph Hellwig 
1128211683b2SBrian Foster 	while (!done) {
1129ecfea3f0SChristoph Hellwig 		error = xfs_bmap_collapse_extents(tp, ip, &next_fsb, shift_fsb,
1130333f950cSBrian Foster 				&done);
11314ed36c6bSChristoph Hellwig 		if (error)
1132c8eac49eSBrian Foster 			goto out_trans_cancel;
1133211683b2SBrian Foster 		if (done)
1134211683b2SBrian Foster 			break;
11354ed36c6bSChristoph Hellwig 
1136211683b2SBrian Foster 		/* finish any deferred frees and roll the transaction */
1137211683b2SBrian Foster 		error = xfs_defer_finish(&tp);
1138211683b2SBrian Foster 		if (error)
1139211683b2SBrian Foster 			goto out_trans_cancel;
11404ed36c6bSChristoph Hellwig 	}
11414ed36c6bSChristoph Hellwig 
1142211683b2SBrian Foster 	error = xfs_trans_commit(tp);
1143211683b2SBrian Foster 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
11444ed36c6bSChristoph Hellwig 	return error;
11454ed36c6bSChristoph Hellwig 
11464ed36c6bSChristoph Hellwig out_trans_cancel:
11474ed36c6bSChristoph Hellwig 	xfs_trans_cancel(tp);
1148211683b2SBrian Foster 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
11494ed36c6bSChristoph Hellwig 	return error;
1150a904b1caSNamjae Jeon }
1151a904b1caSNamjae Jeon 
1152a904b1caSNamjae Jeon /*
1153a904b1caSNamjae Jeon  * xfs_insert_file_space()
1154a904b1caSNamjae Jeon  *	This routine create hole space by shifting extents for the given file.
1155a904b1caSNamjae Jeon  *	The first thing we do is to sync dirty data and invalidate page cache
1156a904b1caSNamjae Jeon  *	over the region on which insert range is working. And split an extent
1157a904b1caSNamjae Jeon  *	to two extents at given offset by calling xfs_bmap_split_extent.
1158a904b1caSNamjae Jeon  *	And shift all extent records which are laying between [offset,
1159a904b1caSNamjae Jeon  *	last allocated extent] to the right to reserve hole range.
1160a904b1caSNamjae Jeon  * RETURNS:
1161a904b1caSNamjae Jeon  *	0 on success
1162a904b1caSNamjae Jeon  *	errno on error
1163a904b1caSNamjae Jeon  */
1164a904b1caSNamjae Jeon int
1165a904b1caSNamjae Jeon xfs_insert_file_space(
1166a904b1caSNamjae Jeon 	struct xfs_inode	*ip,
1167a904b1caSNamjae Jeon 	loff_t			offset,
1168a904b1caSNamjae Jeon 	loff_t			len)
1169a904b1caSNamjae Jeon {
11704ed36c6bSChristoph Hellwig 	struct xfs_mount	*mp = ip->i_mount;
11714ed36c6bSChristoph Hellwig 	struct xfs_trans	*tp;
11724ed36c6bSChristoph Hellwig 	int			error;
11734ed36c6bSChristoph Hellwig 	xfs_fileoff_t		stop_fsb = XFS_B_TO_FSB(mp, offset);
11744ed36c6bSChristoph Hellwig 	xfs_fileoff_t		next_fsb = NULLFSBLOCK;
11754ed36c6bSChristoph Hellwig 	xfs_fileoff_t		shift_fsb = XFS_B_TO_FSB(mp, len);
1176ecfea3f0SChristoph Hellwig 	bool			done = false;
11774ed36c6bSChristoph Hellwig 
1178a904b1caSNamjae Jeon 	ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
11799ad1a23aSChristoph Hellwig 	ASSERT(xfs_isilocked(ip, XFS_MMAPLOCK_EXCL));
11809ad1a23aSChristoph Hellwig 
1181a904b1caSNamjae Jeon 	trace_xfs_insert_file_space(ip);
1182a904b1caSNamjae Jeon 
1183f62cb48eSDarrick J. Wong 	error = xfs_bmap_can_insert_extents(ip, stop_fsb, shift_fsb);
1184f62cb48eSDarrick J. Wong 	if (error)
1185f62cb48eSDarrick J. Wong 		return error;
1186f62cb48eSDarrick J. Wong 
11874ed36c6bSChristoph Hellwig 	error = xfs_prepare_shift(ip, offset);
11884ed36c6bSChristoph Hellwig 	if (error)
11894ed36c6bSChristoph Hellwig 		return error;
11904ed36c6bSChristoph Hellwig 
1191b73df17eSBrian Foster 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write,
1192b73df17eSBrian Foster 			XFS_DIOSTRAT_SPACE_RES(mp, 0), 0, 0, &tp);
1193b73df17eSBrian Foster 	if (error)
1194b73df17eSBrian Foster 		return error;
1195b73df17eSBrian Foster 
1196b73df17eSBrian Foster 	xfs_ilock(ip, XFS_ILOCK_EXCL);
1197dd87f87dSBrian Foster 	xfs_trans_ijoin(tp, ip, 0);
1198b73df17eSBrian Foster 
119985ef08b5SChandan Babu R 	error = xfs_iext_count_may_overflow(ip, XFS_DATA_FORK,
120085ef08b5SChandan Babu R 			XFS_IEXT_PUNCH_HOLE_CNT);
120185ef08b5SChandan Babu R 	if (error)
120285ef08b5SChandan Babu R 		goto out_trans_cancel;
120385ef08b5SChandan Babu R 
1204dd87f87dSBrian Foster 	/*
1205dd87f87dSBrian Foster 	 * The extent shifting code works on extent granularity. So, if stop_fsb
1206dd87f87dSBrian Foster 	 * is not the starting block of extent, we need to split the extent at
1207dd87f87dSBrian Foster 	 * stop_fsb.
1208dd87f87dSBrian Foster 	 */
1209b73df17eSBrian Foster 	error = xfs_bmap_split_extent(tp, ip, stop_fsb);
1210b73df17eSBrian Foster 	if (error)
1211b73df17eSBrian Foster 		goto out_trans_cancel;
1212b73df17eSBrian Foster 
1213dd87f87dSBrian Foster 	do {
12149c516e0eSBrian Foster 		error = xfs_defer_finish(&tp);
12154ed36c6bSChristoph Hellwig 		if (error)
1216dd87f87dSBrian Foster 			goto out_trans_cancel;
12174ed36c6bSChristoph Hellwig 
1218ecfea3f0SChristoph Hellwig 		error = xfs_bmap_insert_extents(tp, ip, &next_fsb, shift_fsb,
1219333f950cSBrian Foster 				&done, stop_fsb);
12204ed36c6bSChristoph Hellwig 		if (error)
1221c8eac49eSBrian Foster 			goto out_trans_cancel;
1222dd87f87dSBrian Foster 	} while (!done);
12234ed36c6bSChristoph Hellwig 
12244ed36c6bSChristoph Hellwig 	error = xfs_trans_commit(tp);
1225dd87f87dSBrian Foster 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
12264ed36c6bSChristoph Hellwig 	return error;
12274ed36c6bSChristoph Hellwig 
1228c8eac49eSBrian Foster out_trans_cancel:
12294ed36c6bSChristoph Hellwig 	xfs_trans_cancel(tp);
1230dd87f87dSBrian Foster 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
12314ed36c6bSChristoph Hellwig 	return error;
1232a904b1caSNamjae Jeon }
1233a904b1caSNamjae Jeon 
1234a904b1caSNamjae Jeon /*
1235a133d952SDave Chinner  * We need to check that the format of the data fork in the temporary inode is
1236a133d952SDave Chinner  * valid for the target inode before doing the swap. This is not a problem with
1237a133d952SDave Chinner  * attr1 because of the fixed fork offset, but attr2 has a dynamically sized
1238a133d952SDave Chinner  * data fork depending on the space the attribute fork is taking so we can get
1239a133d952SDave Chinner  * invalid formats on the target inode.
1240a133d952SDave Chinner  *
1241a133d952SDave Chinner  * E.g. target has space for 7 extents in extent format, temp inode only has
1242a133d952SDave Chinner  * space for 6.  If we defragment down to 7 extents, then the tmp format is a
1243a133d952SDave Chinner  * btree, but when swapped it needs to be in extent format. Hence we can't just
1244a133d952SDave Chinner  * blindly swap data forks on attr2 filesystems.
1245a133d952SDave Chinner  *
1246a133d952SDave Chinner  * Note that we check the swap in both directions so that we don't end up with
1247a133d952SDave Chinner  * a corrupt temporary inode, either.
1248a133d952SDave Chinner  *
1249a133d952SDave Chinner  * Note that fixing the way xfs_fsr sets up the attribute fork in the source
1250a133d952SDave Chinner  * inode will prevent this situation from occurring, so all we do here is
1251a133d952SDave Chinner  * reject and log the attempt. basically we are putting the responsibility on
1252a133d952SDave Chinner  * userspace to get this right.
1253a133d952SDave Chinner  */
1254a133d952SDave Chinner static int
1255a133d952SDave Chinner xfs_swap_extents_check_format(
1256e06259aaSDarrick J. Wong 	struct xfs_inode	*ip,	/* target inode */
1257e06259aaSDarrick J. Wong 	struct xfs_inode	*tip)	/* tmp inode */
1258a133d952SDave Chinner {
1259f7e67b20SChristoph Hellwig 	struct xfs_ifork	*ifp = &ip->i_df;
1260f7e67b20SChristoph Hellwig 	struct xfs_ifork	*tifp = &tip->i_df;
1261a133d952SDave Chinner 
1262765d3c39SDarrick J. Wong 	/* User/group/project quota ids must match if quotas are enforced. */
1263765d3c39SDarrick J. Wong 	if (XFS_IS_QUOTA_ON(ip->i_mount) &&
1264765d3c39SDarrick J. Wong 	    (!uid_eq(VFS_I(ip)->i_uid, VFS_I(tip)->i_uid) ||
1265765d3c39SDarrick J. Wong 	     !gid_eq(VFS_I(ip)->i_gid, VFS_I(tip)->i_gid) ||
1266ceaf603cSChristoph Hellwig 	     ip->i_projid != tip->i_projid))
1267765d3c39SDarrick J. Wong 		return -EINVAL;
1268765d3c39SDarrick J. Wong 
1269a133d952SDave Chinner 	/* Should never get a local format */
1270f7e67b20SChristoph Hellwig 	if (ifp->if_format == XFS_DINODE_FMT_LOCAL ||
1271f7e67b20SChristoph Hellwig 	    tifp->if_format == XFS_DINODE_FMT_LOCAL)
12722451337dSDave Chinner 		return -EINVAL;
1273a133d952SDave Chinner 
1274a133d952SDave Chinner 	/*
1275a133d952SDave Chinner 	 * if the target inode has less extents that then temporary inode then
1276a133d952SDave Chinner 	 * why did userspace call us?
1277a133d952SDave Chinner 	 */
1278f7e67b20SChristoph Hellwig 	if (ifp->if_nextents < tifp->if_nextents)
12792451337dSDave Chinner 		return -EINVAL;
1280a133d952SDave Chinner 
1281a133d952SDave Chinner 	/*
12821f08af52SDarrick J. Wong 	 * If we have to use the (expensive) rmap swap method, we can
12831f08af52SDarrick J. Wong 	 * handle any number of extents and any format.
12841f08af52SDarrick J. Wong 	 */
128538c26bfdSDave Chinner 	if (xfs_has_rmapbt(ip->i_mount))
12861f08af52SDarrick J. Wong 		return 0;
12871f08af52SDarrick J. Wong 
12881f08af52SDarrick J. Wong 	/*
1289a133d952SDave Chinner 	 * if the target inode is in extent form and the temp inode is in btree
1290a133d952SDave Chinner 	 * form then we will end up with the target inode in the wrong format
1291a133d952SDave Chinner 	 * as we already know there are less extents in the temp inode.
1292a133d952SDave Chinner 	 */
1293f7e67b20SChristoph Hellwig 	if (ifp->if_format == XFS_DINODE_FMT_EXTENTS &&
1294f7e67b20SChristoph Hellwig 	    tifp->if_format == XFS_DINODE_FMT_BTREE)
12952451337dSDave Chinner 		return -EINVAL;
1296a133d952SDave Chinner 
1297a133d952SDave Chinner 	/* Check temp in extent form to max in target */
1298f7e67b20SChristoph Hellwig 	if (tifp->if_format == XFS_DINODE_FMT_EXTENTS &&
1299f7e67b20SChristoph Hellwig 	    tifp->if_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK))
13002451337dSDave Chinner 		return -EINVAL;
1301a133d952SDave Chinner 
1302a133d952SDave Chinner 	/* Check target in extent form to max in temp */
1303f7e67b20SChristoph Hellwig 	if (ifp->if_format == XFS_DINODE_FMT_EXTENTS &&
1304f7e67b20SChristoph Hellwig 	    ifp->if_nextents > XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK))
13052451337dSDave Chinner 		return -EINVAL;
1306a133d952SDave Chinner 
1307a133d952SDave Chinner 	/*
1308a133d952SDave Chinner 	 * If we are in a btree format, check that the temp root block will fit
1309a133d952SDave Chinner 	 * in the target and that it has enough extents to be in btree format
1310a133d952SDave Chinner 	 * in the target.
1311a133d952SDave Chinner 	 *
1312a133d952SDave Chinner 	 * Note that we have to be careful to allow btree->extent conversions
1313a133d952SDave Chinner 	 * (a common defrag case) which will occur when the temp inode is in
1314a133d952SDave Chinner 	 * extent format...
1315a133d952SDave Chinner 	 */
1316f7e67b20SChristoph Hellwig 	if (tifp->if_format == XFS_DINODE_FMT_BTREE) {
13170cbe48ccSArnd Bergmann 		if (XFS_IFORK_Q(ip) &&
1318f7e67b20SChristoph Hellwig 		    XFS_BMAP_BMDR_SPACE(tifp->if_broot) > XFS_IFORK_BOFF(ip))
13192451337dSDave Chinner 			return -EINVAL;
1320f7e67b20SChristoph Hellwig 		if (tifp->if_nextents <= XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK))
13212451337dSDave Chinner 			return -EINVAL;
1322a133d952SDave Chinner 	}
1323a133d952SDave Chinner 
1324a133d952SDave Chinner 	/* Reciprocal target->temp btree format checks */
1325f7e67b20SChristoph Hellwig 	if (ifp->if_format == XFS_DINODE_FMT_BTREE) {
13260cbe48ccSArnd Bergmann 		if (XFS_IFORK_Q(tip) &&
1327a133d952SDave Chinner 		    XFS_BMAP_BMDR_SPACE(ip->i_df.if_broot) > XFS_IFORK_BOFF(tip))
13282451337dSDave Chinner 			return -EINVAL;
1329f7e67b20SChristoph Hellwig 		if (ifp->if_nextents <= XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK))
13302451337dSDave Chinner 			return -EINVAL;
1331a133d952SDave Chinner 	}
1332a133d952SDave Chinner 
1333a133d952SDave Chinner 	return 0;
1334a133d952SDave Chinner }
1335a133d952SDave Chinner 
13367abbb8f9SDave Chinner static int
13374ef897a2SDave Chinner xfs_swap_extent_flush(
13384ef897a2SDave Chinner 	struct xfs_inode	*ip)
13394ef897a2SDave Chinner {
13404ef897a2SDave Chinner 	int	error;
13414ef897a2SDave Chinner 
13424ef897a2SDave Chinner 	error = filemap_write_and_wait(VFS_I(ip)->i_mapping);
13434ef897a2SDave Chinner 	if (error)
13444ef897a2SDave Chinner 		return error;
13454ef897a2SDave Chinner 	truncate_pagecache_range(VFS_I(ip), 0, -1);
13464ef897a2SDave Chinner 
13474ef897a2SDave Chinner 	/* Verify O_DIRECT for ftmp */
13484ef897a2SDave Chinner 	if (VFS_I(ip)->i_mapping->nrpages)
13494ef897a2SDave Chinner 		return -EINVAL;
13504ef897a2SDave Chinner 	return 0;
13514ef897a2SDave Chinner }
13524ef897a2SDave Chinner 
13531f08af52SDarrick J. Wong /*
13541f08af52SDarrick J. Wong  * Move extents from one file to another, when rmap is enabled.
13551f08af52SDarrick J. Wong  */
13561f08af52SDarrick J. Wong STATIC int
13571f08af52SDarrick J. Wong xfs_swap_extent_rmap(
13581f08af52SDarrick J. Wong 	struct xfs_trans		**tpp,
13591f08af52SDarrick J. Wong 	struct xfs_inode		*ip,
13601f08af52SDarrick J. Wong 	struct xfs_inode		*tip)
13611f08af52SDarrick J. Wong {
13627a7943c7SBrian Foster 	struct xfs_trans		*tp = *tpp;
13631f08af52SDarrick J. Wong 	struct xfs_bmbt_irec		irec;
13641f08af52SDarrick J. Wong 	struct xfs_bmbt_irec		uirec;
13651f08af52SDarrick J. Wong 	struct xfs_bmbt_irec		tirec;
13661f08af52SDarrick J. Wong 	xfs_fileoff_t			offset_fsb;
13671f08af52SDarrick J. Wong 	xfs_fileoff_t			end_fsb;
13681f08af52SDarrick J. Wong 	xfs_filblks_t			count_fsb;
13691f08af52SDarrick J. Wong 	int				error;
13701f08af52SDarrick J. Wong 	xfs_filblks_t			ilen;
13711f08af52SDarrick J. Wong 	xfs_filblks_t			rlen;
13721f08af52SDarrick J. Wong 	int				nimaps;
1373c8ce540dSDarrick J. Wong 	uint64_t			tip_flags2;
13741f08af52SDarrick J. Wong 
13751f08af52SDarrick J. Wong 	/*
13761f08af52SDarrick J. Wong 	 * If the source file has shared blocks, we must flag the donor
13771f08af52SDarrick J. Wong 	 * file as having shared blocks so that we get the shared-block
13781f08af52SDarrick J. Wong 	 * rmap functions when we go to fix up the rmaps.  The flags
13791f08af52SDarrick J. Wong 	 * will be switch for reals later.
13801f08af52SDarrick J. Wong 	 */
13813e09ab8fSChristoph Hellwig 	tip_flags2 = tip->i_diflags2;
13823e09ab8fSChristoph Hellwig 	if (ip->i_diflags2 & XFS_DIFLAG2_REFLINK)
13833e09ab8fSChristoph Hellwig 		tip->i_diflags2 |= XFS_DIFLAG2_REFLINK;
13841f08af52SDarrick J. Wong 
13851f08af52SDarrick J. Wong 	offset_fsb = 0;
13861f08af52SDarrick J. Wong 	end_fsb = XFS_B_TO_FSB(ip->i_mount, i_size_read(VFS_I(ip)));
13871f08af52SDarrick J. Wong 	count_fsb = (xfs_filblks_t)(end_fsb - offset_fsb);
13881f08af52SDarrick J. Wong 
13891f08af52SDarrick J. Wong 	while (count_fsb) {
13901f08af52SDarrick J. Wong 		/* Read extent from the donor file */
13911f08af52SDarrick J. Wong 		nimaps = 1;
13921f08af52SDarrick J. Wong 		error = xfs_bmapi_read(tip, offset_fsb, count_fsb, &tirec,
13931f08af52SDarrick J. Wong 				&nimaps, 0);
13941f08af52SDarrick J. Wong 		if (error)
13951f08af52SDarrick J. Wong 			goto out;
13961f08af52SDarrick J. Wong 		ASSERT(nimaps == 1);
13971f08af52SDarrick J. Wong 		ASSERT(tirec.br_startblock != DELAYSTARTBLOCK);
13981f08af52SDarrick J. Wong 
13991f08af52SDarrick J. Wong 		trace_xfs_swap_extent_rmap_remap(tip, &tirec);
14001f08af52SDarrick J. Wong 		ilen = tirec.br_blockcount;
14011f08af52SDarrick J. Wong 
14021f08af52SDarrick J. Wong 		/* Unmap the old blocks in the source file. */
14031f08af52SDarrick J. Wong 		while (tirec.br_blockcount) {
1404c8eac49eSBrian Foster 			ASSERT(tp->t_firstblock == NULLFSBLOCK);
14051f08af52SDarrick J. Wong 			trace_xfs_swap_extent_rmap_remap_piece(tip, &tirec);
14061f08af52SDarrick J. Wong 
14071f08af52SDarrick J. Wong 			/* Read extent from the source file */
14081f08af52SDarrick J. Wong 			nimaps = 1;
14091f08af52SDarrick J. Wong 			error = xfs_bmapi_read(ip, tirec.br_startoff,
14101f08af52SDarrick J. Wong 					tirec.br_blockcount, &irec,
14111f08af52SDarrick J. Wong 					&nimaps, 0);
14121f08af52SDarrick J. Wong 			if (error)
1413d5a2e289SBrian Foster 				goto out;
14141f08af52SDarrick J. Wong 			ASSERT(nimaps == 1);
14151f08af52SDarrick J. Wong 			ASSERT(tirec.br_startoff == irec.br_startoff);
14161f08af52SDarrick J. Wong 			trace_xfs_swap_extent_rmap_remap_piece(ip, &irec);
14171f08af52SDarrick J. Wong 
14181f08af52SDarrick J. Wong 			/* Trim the extent. */
14191f08af52SDarrick J. Wong 			uirec = tirec;
14201f08af52SDarrick J. Wong 			uirec.br_blockcount = rlen = min_t(xfs_filblks_t,
14211f08af52SDarrick J. Wong 					tirec.br_blockcount,
14221f08af52SDarrick J. Wong 					irec.br_blockcount);
14231f08af52SDarrick J. Wong 			trace_xfs_swap_extent_rmap_remap_piece(tip, &uirec);
14241f08af52SDarrick J. Wong 
1425bcc561f2SChandan Babu R 			if (xfs_bmap_is_real_extent(&uirec)) {
1426bcc561f2SChandan Babu R 				error = xfs_iext_count_may_overflow(ip,
1427bcc561f2SChandan Babu R 						XFS_DATA_FORK,
1428bcc561f2SChandan Babu R 						XFS_IEXT_SWAP_RMAP_CNT);
1429bcc561f2SChandan Babu R 				if (error)
1430bcc561f2SChandan Babu R 					goto out;
1431bcc561f2SChandan Babu R 			}
1432bcc561f2SChandan Babu R 
1433bcc561f2SChandan Babu R 			if (xfs_bmap_is_real_extent(&irec)) {
1434bcc561f2SChandan Babu R 				error = xfs_iext_count_may_overflow(tip,
1435bcc561f2SChandan Babu R 						XFS_DATA_FORK,
1436bcc561f2SChandan Babu R 						XFS_IEXT_SWAP_RMAP_CNT);
1437bcc561f2SChandan Babu R 				if (error)
1438bcc561f2SChandan Babu R 					goto out;
1439bcc561f2SChandan Babu R 			}
1440bcc561f2SChandan Babu R 
14411f08af52SDarrick J. Wong 			/* Remove the mapping from the donor file. */
14423e08f42aSDarrick J. Wong 			xfs_bmap_unmap_extent(tp, tip, &uirec);
14431f08af52SDarrick J. Wong 
14441f08af52SDarrick J. Wong 			/* Remove the mapping from the source file. */
14453e08f42aSDarrick J. Wong 			xfs_bmap_unmap_extent(tp, ip, &irec);
14461f08af52SDarrick J. Wong 
14471f08af52SDarrick J. Wong 			/* Map the donor file's blocks into the source file. */
14483e08f42aSDarrick J. Wong 			xfs_bmap_map_extent(tp, ip, &uirec);
14491f08af52SDarrick J. Wong 
14501f08af52SDarrick J. Wong 			/* Map the source file's blocks into the donor file. */
14513e08f42aSDarrick J. Wong 			xfs_bmap_map_extent(tp, tip, &irec);
14521f08af52SDarrick J. Wong 
14539e28a242SBrian Foster 			error = xfs_defer_finish(tpp);
14547a7943c7SBrian Foster 			tp = *tpp;
14551f08af52SDarrick J. Wong 			if (error)
14569b1f4e98SBrian Foster 				goto out;
14571f08af52SDarrick J. Wong 
14581f08af52SDarrick J. Wong 			tirec.br_startoff += rlen;
14591f08af52SDarrick J. Wong 			if (tirec.br_startblock != HOLESTARTBLOCK &&
14601f08af52SDarrick J. Wong 			    tirec.br_startblock != DELAYSTARTBLOCK)
14611f08af52SDarrick J. Wong 				tirec.br_startblock += rlen;
14621f08af52SDarrick J. Wong 			tirec.br_blockcount -= rlen;
14631f08af52SDarrick J. Wong 		}
14641f08af52SDarrick J. Wong 
14651f08af52SDarrick J. Wong 		/* Roll on... */
14661f08af52SDarrick J. Wong 		count_fsb -= ilen;
14671f08af52SDarrick J. Wong 		offset_fsb += ilen;
14681f08af52SDarrick J. Wong 	}
14691f08af52SDarrick J. Wong 
14703e09ab8fSChristoph Hellwig 	tip->i_diflags2 = tip_flags2;
14711f08af52SDarrick J. Wong 	return 0;
14721f08af52SDarrick J. Wong 
14731f08af52SDarrick J. Wong out:
14741f08af52SDarrick J. Wong 	trace_xfs_swap_extent_rmap_error(ip, error, _RET_IP_);
14753e09ab8fSChristoph Hellwig 	tip->i_diflags2 = tip_flags2;
14761f08af52SDarrick J. Wong 	return error;
14771f08af52SDarrick J. Wong }
14781f08af52SDarrick J. Wong 
147939aff5fdSDarrick J. Wong /* Swap the extents of two files by swapping data forks. */
148039aff5fdSDarrick J. Wong STATIC int
148139aff5fdSDarrick J. Wong xfs_swap_extent_forks(
148239aff5fdSDarrick J. Wong 	struct xfs_trans	*tp,
148339aff5fdSDarrick J. Wong 	struct xfs_inode	*ip,
148439aff5fdSDarrick J. Wong 	struct xfs_inode	*tip,
148539aff5fdSDarrick J. Wong 	int			*src_log_flags,
148639aff5fdSDarrick J. Wong 	int			*target_log_flags)
148739aff5fdSDarrick J. Wong {
1488e7f5d5caSDarrick J. Wong 	xfs_filblks_t		aforkblks = 0;
1489e7f5d5caSDarrick J. Wong 	xfs_filblks_t		taforkblks = 0;
1490e7f5d5caSDarrick J. Wong 	xfs_extnum_t		junk;
1491c8ce540dSDarrick J. Wong 	uint64_t		tmp;
149239aff5fdSDarrick J. Wong 	int			error;
149339aff5fdSDarrick J. Wong 
149439aff5fdSDarrick J. Wong 	/*
149539aff5fdSDarrick J. Wong 	 * Count the number of extended attribute blocks
149639aff5fdSDarrick J. Wong 	 */
1497daf83964SChristoph Hellwig 	if (XFS_IFORK_Q(ip) && ip->i_afp->if_nextents > 0 &&
1498f7e67b20SChristoph Hellwig 	    ip->i_afp->if_format != XFS_DINODE_FMT_LOCAL) {
1499e7f5d5caSDarrick J. Wong 		error = xfs_bmap_count_blocks(tp, ip, XFS_ATTR_FORK, &junk,
150039aff5fdSDarrick J. Wong 				&aforkblks);
150139aff5fdSDarrick J. Wong 		if (error)
150239aff5fdSDarrick J. Wong 			return error;
150339aff5fdSDarrick J. Wong 	}
1504daf83964SChristoph Hellwig 	if (XFS_IFORK_Q(tip) && tip->i_afp->if_nextents > 0 &&
1505f7e67b20SChristoph Hellwig 	    tip->i_afp->if_format != XFS_DINODE_FMT_LOCAL) {
1506e7f5d5caSDarrick J. Wong 		error = xfs_bmap_count_blocks(tp, tip, XFS_ATTR_FORK, &junk,
150739aff5fdSDarrick J. Wong 				&taforkblks);
150839aff5fdSDarrick J. Wong 		if (error)
150939aff5fdSDarrick J. Wong 			return error;
151039aff5fdSDarrick J. Wong 	}
151139aff5fdSDarrick J. Wong 
151239aff5fdSDarrick J. Wong 	/*
15136fb10d6dSBrian Foster 	 * Btree format (v3) inodes have the inode number stamped in the bmbt
15146fb10d6dSBrian Foster 	 * block headers. We can't start changing the bmbt blocks until the
15156fb10d6dSBrian Foster 	 * inode owner change is logged so recovery does the right thing in the
15166fb10d6dSBrian Foster 	 * event of a crash. Set the owner change log flags now and leave the
15176fb10d6dSBrian Foster 	 * bmbt scan as the last step.
151839aff5fdSDarrick J. Wong 	 */
151938c26bfdSDave Chinner 	if (xfs_has_v3inodes(ip->i_mount)) {
1520f7e67b20SChristoph Hellwig 		if (ip->i_df.if_format == XFS_DINODE_FMT_BTREE)
152139aff5fdSDarrick J. Wong 			(*target_log_flags) |= XFS_ILOG_DOWNER;
1522f7e67b20SChristoph Hellwig 		if (tip->i_df.if_format == XFS_DINODE_FMT_BTREE)
152339aff5fdSDarrick J. Wong 			(*src_log_flags) |= XFS_ILOG_DOWNER;
15246471e9c5SChristoph Hellwig 	}
152539aff5fdSDarrick J. Wong 
152639aff5fdSDarrick J. Wong 	/*
152739aff5fdSDarrick J. Wong 	 * Swap the data forks of the inodes
152839aff5fdSDarrick J. Wong 	 */
1529897992b7SGustavo A. R. Silva 	swap(ip->i_df, tip->i_df);
153039aff5fdSDarrick J. Wong 
153139aff5fdSDarrick J. Wong 	/*
153239aff5fdSDarrick J. Wong 	 * Fix the on-disk inode values
153339aff5fdSDarrick J. Wong 	 */
15346e73a545SChristoph Hellwig 	tmp = (uint64_t)ip->i_nblocks;
15356e73a545SChristoph Hellwig 	ip->i_nblocks = tip->i_nblocks - taforkblks + aforkblks;
15366e73a545SChristoph Hellwig 	tip->i_nblocks = tmp + taforkblks - aforkblks;
153739aff5fdSDarrick J. Wong 
153839aff5fdSDarrick J. Wong 	/*
153939aff5fdSDarrick J. Wong 	 * The extents in the source inode could still contain speculative
154039aff5fdSDarrick J. Wong 	 * preallocation beyond EOF (e.g. the file is open but not modified
154139aff5fdSDarrick J. Wong 	 * while defrag is in progress). In that case, we need to copy over the
154239aff5fdSDarrick J. Wong 	 * number of delalloc blocks the data fork in the source inode is
154339aff5fdSDarrick J. Wong 	 * tracking beyond EOF so that when the fork is truncated away when the
154439aff5fdSDarrick J. Wong 	 * temporary inode is unlinked we don't underrun the i_delayed_blks
154539aff5fdSDarrick J. Wong 	 * counter on that inode.
154639aff5fdSDarrick J. Wong 	 */
154739aff5fdSDarrick J. Wong 	ASSERT(tip->i_delayed_blks == 0);
154839aff5fdSDarrick J. Wong 	tip->i_delayed_blks = ip->i_delayed_blks;
154939aff5fdSDarrick J. Wong 	ip->i_delayed_blks = 0;
155039aff5fdSDarrick J. Wong 
1551f7e67b20SChristoph Hellwig 	switch (ip->i_df.if_format) {
155239aff5fdSDarrick J. Wong 	case XFS_DINODE_FMT_EXTENTS:
155339aff5fdSDarrick J. Wong 		(*src_log_flags) |= XFS_ILOG_DEXT;
155439aff5fdSDarrick J. Wong 		break;
155539aff5fdSDarrick J. Wong 	case XFS_DINODE_FMT_BTREE:
155638c26bfdSDave Chinner 		ASSERT(!xfs_has_v3inodes(ip->i_mount) ||
155739aff5fdSDarrick J. Wong 		       (*src_log_flags & XFS_ILOG_DOWNER));
155839aff5fdSDarrick J. Wong 		(*src_log_flags) |= XFS_ILOG_DBROOT;
155939aff5fdSDarrick J. Wong 		break;
156039aff5fdSDarrick J. Wong 	}
156139aff5fdSDarrick J. Wong 
1562f7e67b20SChristoph Hellwig 	switch (tip->i_df.if_format) {
156339aff5fdSDarrick J. Wong 	case XFS_DINODE_FMT_EXTENTS:
156439aff5fdSDarrick J. Wong 		(*target_log_flags) |= XFS_ILOG_DEXT;
156539aff5fdSDarrick J. Wong 		break;
156639aff5fdSDarrick J. Wong 	case XFS_DINODE_FMT_BTREE:
156739aff5fdSDarrick J. Wong 		(*target_log_flags) |= XFS_ILOG_DBROOT;
156838c26bfdSDave Chinner 		ASSERT(!xfs_has_v3inodes(ip->i_mount) ||
156939aff5fdSDarrick J. Wong 		       (*target_log_flags & XFS_ILOG_DOWNER));
157039aff5fdSDarrick J. Wong 		break;
157139aff5fdSDarrick J. Wong 	}
157239aff5fdSDarrick J. Wong 
157339aff5fdSDarrick J. Wong 	return 0;
157439aff5fdSDarrick J. Wong }
157539aff5fdSDarrick J. Wong 
15762dd3d709SBrian Foster /*
15772dd3d709SBrian Foster  * Fix up the owners of the bmbt blocks to refer to the current inode. The
15782dd3d709SBrian Foster  * change owner scan attempts to order all modified buffers in the current
15792dd3d709SBrian Foster  * transaction. In the event of ordered buffer failure, the offending buffer is
15802dd3d709SBrian Foster  * physically logged as a fallback and the scan returns -EAGAIN. We must roll
15812dd3d709SBrian Foster  * the transaction in this case to replenish the fallback log reservation and
15822dd3d709SBrian Foster  * restart the scan. This process repeats until the scan completes.
15832dd3d709SBrian Foster  */
15842dd3d709SBrian Foster static int
15852dd3d709SBrian Foster xfs_swap_change_owner(
15862dd3d709SBrian Foster 	struct xfs_trans	**tpp,
15872dd3d709SBrian Foster 	struct xfs_inode	*ip,
15882dd3d709SBrian Foster 	struct xfs_inode	*tmpip)
15892dd3d709SBrian Foster {
15902dd3d709SBrian Foster 	int			error;
15912dd3d709SBrian Foster 	struct xfs_trans	*tp = *tpp;
15922dd3d709SBrian Foster 
15932dd3d709SBrian Foster 	do {
15942dd3d709SBrian Foster 		error = xfs_bmbt_change_owner(tp, ip, XFS_DATA_FORK, ip->i_ino,
15952dd3d709SBrian Foster 					      NULL);
15962dd3d709SBrian Foster 		/* success or fatal error */
15972dd3d709SBrian Foster 		if (error != -EAGAIN)
15982dd3d709SBrian Foster 			break;
15992dd3d709SBrian Foster 
16002dd3d709SBrian Foster 		error = xfs_trans_roll(tpp);
16012dd3d709SBrian Foster 		if (error)
16022dd3d709SBrian Foster 			break;
16032dd3d709SBrian Foster 		tp = *tpp;
16042dd3d709SBrian Foster 
16052dd3d709SBrian Foster 		/*
16062dd3d709SBrian Foster 		 * Redirty both inodes so they can relog and keep the log tail
16072dd3d709SBrian Foster 		 * moving forward.
16082dd3d709SBrian Foster 		 */
16092dd3d709SBrian Foster 		xfs_trans_ijoin(tp, ip, 0);
16102dd3d709SBrian Foster 		xfs_trans_ijoin(tp, tmpip, 0);
16112dd3d709SBrian Foster 		xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
16122dd3d709SBrian Foster 		xfs_trans_log_inode(tp, tmpip, XFS_ILOG_CORE);
16132dd3d709SBrian Foster 	} while (true);
16142dd3d709SBrian Foster 
16152dd3d709SBrian Foster 	return error;
16162dd3d709SBrian Foster }
16172dd3d709SBrian Foster 
16184ef897a2SDave Chinner int
1619a133d952SDave Chinner xfs_swap_extents(
1620e06259aaSDarrick J. Wong 	struct xfs_inode	*ip,	/* target inode */
1621e06259aaSDarrick J. Wong 	struct xfs_inode	*tip,	/* tmp inode */
1622e06259aaSDarrick J. Wong 	struct xfs_swapext	*sxp)
1623a133d952SDave Chinner {
1624e06259aaSDarrick J. Wong 	struct xfs_mount	*mp = ip->i_mount;
1625e06259aaSDarrick J. Wong 	struct xfs_trans	*tp;
1626e06259aaSDarrick J. Wong 	struct xfs_bstat	*sbp = &sxp->sx_stat;
1627a133d952SDave Chinner 	int			src_log_flags, target_log_flags;
1628a133d952SDave Chinner 	int			error = 0;
162981217683SDave Chinner 	int			lock_flags;
1630c8ce540dSDarrick J. Wong 	uint64_t		f;
16312dd3d709SBrian Foster 	int			resblks = 0;
1632f74681baSBrian Foster 	unsigned int		flags = 0;
1633a133d952SDave Chinner 
1634a133d952SDave Chinner 	/*
1635723cac48SDave Chinner 	 * Lock the inodes against other IO, page faults and truncate to
1636723cac48SDave Chinner 	 * begin with.  Then we can ensure the inodes are flushed and have no
1637723cac48SDave Chinner 	 * page cache safely. Once we have done this we can take the ilocks and
1638723cac48SDave Chinner 	 * do the rest of the checks.
1639a133d952SDave Chinner 	 */
164065523218SChristoph Hellwig 	lock_two_nondirectories(VFS_I(ip), VFS_I(tip));
164165523218SChristoph Hellwig 	lock_flags = XFS_MMAPLOCK_EXCL;
16427c2d238aSDarrick J. Wong 	xfs_lock_two_inodes(ip, XFS_MMAPLOCK_EXCL, tip, XFS_MMAPLOCK_EXCL);
1643a133d952SDave Chinner 
1644a133d952SDave Chinner 	/* Verify that both files have the same format */
1645c19b3b05SDave Chinner 	if ((VFS_I(ip)->i_mode & S_IFMT) != (VFS_I(tip)->i_mode & S_IFMT)) {
16462451337dSDave Chinner 		error = -EINVAL;
1647a133d952SDave Chinner 		goto out_unlock;
1648a133d952SDave Chinner 	}
1649a133d952SDave Chinner 
1650a133d952SDave Chinner 	/* Verify both files are either real-time or non-realtime */
1651a133d952SDave Chinner 	if (XFS_IS_REALTIME_INODE(ip) != XFS_IS_REALTIME_INODE(tip)) {
16522451337dSDave Chinner 		error = -EINVAL;
1653a133d952SDave Chinner 		goto out_unlock;
1654a133d952SDave Chinner 	}
1655a133d952SDave Chinner 
16562713fefaSDarrick J. Wong 	error = xfs_qm_dqattach(ip);
16572713fefaSDarrick J. Wong 	if (error)
16582713fefaSDarrick J. Wong 		goto out_unlock;
16592713fefaSDarrick J. Wong 
16602713fefaSDarrick J. Wong 	error = xfs_qm_dqattach(tip);
16612713fefaSDarrick J. Wong 	if (error)
16622713fefaSDarrick J. Wong 		goto out_unlock;
16632713fefaSDarrick J. Wong 
16644ef897a2SDave Chinner 	error = xfs_swap_extent_flush(ip);
1665a133d952SDave Chinner 	if (error)
1666a133d952SDave Chinner 		goto out_unlock;
16674ef897a2SDave Chinner 	error = xfs_swap_extent_flush(tip);
16684ef897a2SDave Chinner 	if (error)
16694ef897a2SDave Chinner 		goto out_unlock;
1670a133d952SDave Chinner 
167196987eeaSChristoph Hellwig 	if (xfs_inode_has_cow_data(tip)) {
167296987eeaSChristoph Hellwig 		error = xfs_reflink_cancel_cow_range(tip, 0, NULLFILEOFF, true);
167396987eeaSChristoph Hellwig 		if (error)
16748bc3b5e4SDarrick J. Wong 			goto out_unlock;
167596987eeaSChristoph Hellwig 	}
167696987eeaSChristoph Hellwig 
16771f08af52SDarrick J. Wong 	/*
16781f08af52SDarrick J. Wong 	 * Extent "swapping" with rmap requires a permanent reservation and
16791f08af52SDarrick J. Wong 	 * a block reservation because it's really just a remap operation
16801f08af52SDarrick J. Wong 	 * performed with log redo items!
16811f08af52SDarrick J. Wong 	 */
168238c26bfdSDave Chinner 	if (xfs_has_rmapbt(mp)) {
1683b3fed434SBrian Foster 		int		w = XFS_DATA_FORK;
1684daf83964SChristoph Hellwig 		uint32_t	ipnext = ip->i_df.if_nextents;
1685daf83964SChristoph Hellwig 		uint32_t	tipnext	= tip->i_df.if_nextents;
1686b3fed434SBrian Foster 
16871f08af52SDarrick J. Wong 		/*
1688b3fed434SBrian Foster 		 * Conceptually this shouldn't affect the shape of either bmbt,
1689b3fed434SBrian Foster 		 * but since we atomically move extents one by one, we reserve
1690b3fed434SBrian Foster 		 * enough space to rebuild both trees.
16911f08af52SDarrick J. Wong 		 */
1692b3fed434SBrian Foster 		resblks = XFS_SWAP_RMAP_SPACE_RES(mp, ipnext, w);
1693b3fed434SBrian Foster 		resblks +=  XFS_SWAP_RMAP_SPACE_RES(mp, tipnext, w);
1694b3fed434SBrian Foster 
1695b3fed434SBrian Foster 		/*
1696f74681baSBrian Foster 		 * If either inode straddles a bmapbt block allocation boundary,
1697f74681baSBrian Foster 		 * the rmapbt algorithm triggers repeated allocs and frees as
1698f74681baSBrian Foster 		 * extents are remapped. This can exhaust the block reservation
1699f74681baSBrian Foster 		 * prematurely and cause shutdown. Return freed blocks to the
1700f74681baSBrian Foster 		 * transaction reservation to counter this behavior.
1701b3fed434SBrian Foster 		 */
1702f74681baSBrian Foster 		flags |= XFS_TRANS_RES_FDBLKS;
17032dd3d709SBrian Foster 	}
1704f74681baSBrian Foster 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, flags,
1705f74681baSBrian Foster 				&tp);
1706253f4911SChristoph Hellwig 	if (error)
1707a133d952SDave Chinner 		goto out_unlock;
1708723cac48SDave Chinner 
1709723cac48SDave Chinner 	/*
1710723cac48SDave Chinner 	 * Lock and join the inodes to the tansaction so that transaction commit
1711723cac48SDave Chinner 	 * or cancel will unlock the inodes from this point onwards.
1712723cac48SDave Chinner 	 */
17137c2d238aSDarrick J. Wong 	xfs_lock_two_inodes(ip, XFS_ILOCK_EXCL, tip, XFS_ILOCK_EXCL);
17144ef897a2SDave Chinner 	lock_flags |= XFS_ILOCK_EXCL;
171539aff5fdSDarrick J. Wong 	xfs_trans_ijoin(tp, ip, 0);
171639aff5fdSDarrick J. Wong 	xfs_trans_ijoin(tp, tip, 0);
1717723cac48SDave Chinner 
1718a133d952SDave Chinner 
1719a133d952SDave Chinner 	/* Verify all data are being swapped */
1720a133d952SDave Chinner 	if (sxp->sx_offset != 0 ||
172113d2c10bSChristoph Hellwig 	    sxp->sx_length != ip->i_disk_size ||
172213d2c10bSChristoph Hellwig 	    sxp->sx_length != tip->i_disk_size) {
17232451337dSDave Chinner 		error = -EFAULT;
17244ef897a2SDave Chinner 		goto out_trans_cancel;
1725a133d952SDave Chinner 	}
1726a133d952SDave Chinner 
1727a133d952SDave Chinner 	trace_xfs_swap_extent_before(ip, 0);
1728a133d952SDave Chinner 	trace_xfs_swap_extent_before(tip, 1);
1729a133d952SDave Chinner 
1730a133d952SDave Chinner 	/* check inode formats now that data is flushed */
1731a133d952SDave Chinner 	error = xfs_swap_extents_check_format(ip, tip);
1732a133d952SDave Chinner 	if (error) {
1733a133d952SDave Chinner 		xfs_notice(mp,
1734a133d952SDave Chinner 		    "%s: inode 0x%llx format is incompatible for exchanging.",
1735a133d952SDave Chinner 				__func__, ip->i_ino);
17364ef897a2SDave Chinner 		goto out_trans_cancel;
1737a133d952SDave Chinner 	}
1738a133d952SDave Chinner 
1739a133d952SDave Chinner 	/*
1740a133d952SDave Chinner 	 * Compare the current change & modify times with that
1741a133d952SDave Chinner 	 * passed in.  If they differ, we abort this swap.
1742a133d952SDave Chinner 	 * This is the mechanism used to ensure the calling
1743a133d952SDave Chinner 	 * process that the file was not changed out from
1744a133d952SDave Chinner 	 * under it.
1745a133d952SDave Chinner 	 */
1746a133d952SDave Chinner 	if ((sbp->bs_ctime.tv_sec != VFS_I(ip)->i_ctime.tv_sec) ||
1747a133d952SDave Chinner 	    (sbp->bs_ctime.tv_nsec != VFS_I(ip)->i_ctime.tv_nsec) ||
1748a133d952SDave Chinner 	    (sbp->bs_mtime.tv_sec != VFS_I(ip)->i_mtime.tv_sec) ||
1749a133d952SDave Chinner 	    (sbp->bs_mtime.tv_nsec != VFS_I(ip)->i_mtime.tv_nsec)) {
17502451337dSDave Chinner 		error = -EBUSY;
175181217683SDave Chinner 		goto out_trans_cancel;
1752a133d952SDave Chinner 	}
1753a133d952SDave Chinner 
175421b5c978SDave Chinner 	/*
175521b5c978SDave Chinner 	 * Note the trickiness in setting the log flags - we set the owner log
175621b5c978SDave Chinner 	 * flag on the opposite inode (i.e. the inode we are setting the new
175721b5c978SDave Chinner 	 * owner to be) because once we swap the forks and log that, log
175821b5c978SDave Chinner 	 * recovery is going to see the fork as owned by the swapped inode,
175921b5c978SDave Chinner 	 * not the pre-swapped inodes.
176021b5c978SDave Chinner 	 */
176121b5c978SDave Chinner 	src_log_flags = XFS_ILOG_CORE;
176221b5c978SDave Chinner 	target_log_flags = XFS_ILOG_CORE;
176339aff5fdSDarrick J. Wong 
176438c26bfdSDave Chinner 	if (xfs_has_rmapbt(mp))
17651f08af52SDarrick J. Wong 		error = xfs_swap_extent_rmap(&tp, ip, tip);
17661f08af52SDarrick J. Wong 	else
176739aff5fdSDarrick J. Wong 		error = xfs_swap_extent_forks(tp, ip, tip, &src_log_flags,
176839aff5fdSDarrick J. Wong 				&target_log_flags);
176921b5c978SDave Chinner 	if (error)
177021b5c978SDave Chinner 		goto out_trans_cancel;
1771a133d952SDave Chinner 
1772f0bc4d13SDarrick J. Wong 	/* Do we have to swap reflink flags? */
17733e09ab8fSChristoph Hellwig 	if ((ip->i_diflags2 & XFS_DIFLAG2_REFLINK) ^
17743e09ab8fSChristoph Hellwig 	    (tip->i_diflags2 & XFS_DIFLAG2_REFLINK)) {
17753e09ab8fSChristoph Hellwig 		f = ip->i_diflags2 & XFS_DIFLAG2_REFLINK;
17763e09ab8fSChristoph Hellwig 		ip->i_diflags2 &= ~XFS_DIFLAG2_REFLINK;
17773e09ab8fSChristoph Hellwig 		ip->i_diflags2 |= tip->i_diflags2 & XFS_DIFLAG2_REFLINK;
17783e09ab8fSChristoph Hellwig 		tip->i_diflags2 &= ~XFS_DIFLAG2_REFLINK;
17793e09ab8fSChristoph Hellwig 		tip->i_diflags2 |= f & XFS_DIFLAG2_REFLINK;
178052bfcdd7SDarrick J. Wong 	}
178152bfcdd7SDarrick J. Wong 
178252bfcdd7SDarrick J. Wong 	/* Swap the cow forks. */
178338c26bfdSDave Chinner 	if (xfs_has_reflink(mp)) {
1784f7e67b20SChristoph Hellwig 		ASSERT(!ip->i_cowfp ||
1785f7e67b20SChristoph Hellwig 		       ip->i_cowfp->if_format == XFS_DINODE_FMT_EXTENTS);
1786f7e67b20SChristoph Hellwig 		ASSERT(!tip->i_cowfp ||
1787f7e67b20SChristoph Hellwig 		       tip->i_cowfp->if_format == XFS_DINODE_FMT_EXTENTS);
178852bfcdd7SDarrick J. Wong 
1789897992b7SGustavo A. R. Silva 		swap(ip->i_cowfp, tip->i_cowfp);
179052bfcdd7SDarrick J. Wong 
17915bcffe30SChristoph Hellwig 		if (ip->i_cowfp && ip->i_cowfp->if_bytes)
179283104d44SDarrick J. Wong 			xfs_inode_set_cowblocks_tag(ip);
179352bfcdd7SDarrick J. Wong 		else
179452bfcdd7SDarrick J. Wong 			xfs_inode_clear_cowblocks_tag(ip);
17955bcffe30SChristoph Hellwig 		if (tip->i_cowfp && tip->i_cowfp->if_bytes)
179683104d44SDarrick J. Wong 			xfs_inode_set_cowblocks_tag(tip);
179752bfcdd7SDarrick J. Wong 		else
179852bfcdd7SDarrick J. Wong 			xfs_inode_clear_cowblocks_tag(tip);
1799f0bc4d13SDarrick J. Wong 	}
1800f0bc4d13SDarrick J. Wong 
1801a133d952SDave Chinner 	xfs_trans_log_inode(tp, ip,  src_log_flags);
1802a133d952SDave Chinner 	xfs_trans_log_inode(tp, tip, target_log_flags);
1803a133d952SDave Chinner 
1804a133d952SDave Chinner 	/*
18056fb10d6dSBrian Foster 	 * The extent forks have been swapped, but crc=1,rmapbt=0 filesystems
18066fb10d6dSBrian Foster 	 * have inode number owner values in the bmbt blocks that still refer to
18076fb10d6dSBrian Foster 	 * the old inode. Scan each bmbt to fix up the owner values with the
18086fb10d6dSBrian Foster 	 * inode number of the current inode.
18096fb10d6dSBrian Foster 	 */
18106fb10d6dSBrian Foster 	if (src_log_flags & XFS_ILOG_DOWNER) {
18112dd3d709SBrian Foster 		error = xfs_swap_change_owner(&tp, ip, tip);
18126fb10d6dSBrian Foster 		if (error)
18136fb10d6dSBrian Foster 			goto out_trans_cancel;
18146fb10d6dSBrian Foster 	}
18156fb10d6dSBrian Foster 	if (target_log_flags & XFS_ILOG_DOWNER) {
18162dd3d709SBrian Foster 		error = xfs_swap_change_owner(&tp, tip, ip);
18176fb10d6dSBrian Foster 		if (error)
18186fb10d6dSBrian Foster 			goto out_trans_cancel;
18196fb10d6dSBrian Foster 	}
18206fb10d6dSBrian Foster 
18216fb10d6dSBrian Foster 	/*
1822a133d952SDave Chinner 	 * If this is a synchronous mount, make sure that the
1823a133d952SDave Chinner 	 * transaction goes to disk before returning to the user.
1824a133d952SDave Chinner 	 */
18250560f31aSDave Chinner 	if (xfs_has_wsync(mp))
1826a133d952SDave Chinner 		xfs_trans_set_sync(tp);
1827a133d952SDave Chinner 
182870393313SChristoph Hellwig 	error = xfs_trans_commit(tp);
1829a133d952SDave Chinner 
1830a133d952SDave Chinner 	trace_xfs_swap_extent_after(ip, 0);
1831a133d952SDave Chinner 	trace_xfs_swap_extent_after(tip, 1);
183239aff5fdSDarrick J. Wong 
183365523218SChristoph Hellwig out_unlock:
183439aff5fdSDarrick J. Wong 	xfs_iunlock(ip, lock_flags);
183539aff5fdSDarrick J. Wong 	xfs_iunlock(tip, lock_flags);
183665523218SChristoph Hellwig 	unlock_two_nondirectories(VFS_I(ip), VFS_I(tip));
1837a133d952SDave Chinner 	return error;
1838a133d952SDave Chinner 
183939aff5fdSDarrick J. Wong out_trans_cancel:
184039aff5fdSDarrick J. Wong 	xfs_trans_cancel(tp);
184165523218SChristoph Hellwig 	goto out_unlock;
1842a133d952SDave Chinner }
1843