xref: /linux/fs/xfs/xfs_bmap_util.c (revision a8fe58cec351c25e09c393bf46117c0c47b5a17c)
1 /*
2  * Copyright (c) 2000-2006 Silicon Graphics, Inc.
3  * Copyright (c) 2012 Red Hat, Inc.
4  * All Rights Reserved.
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License as
8  * published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it would be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write the Free Software Foundation,
17  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
18  */
19 #include "xfs.h"
20 #include "xfs_fs.h"
21 #include "xfs_shared.h"
22 #include "xfs_format.h"
23 #include "xfs_log_format.h"
24 #include "xfs_trans_resv.h"
25 #include "xfs_bit.h"
26 #include "xfs_mount.h"
27 #include "xfs_da_format.h"
28 #include "xfs_inode.h"
29 #include "xfs_btree.h"
30 #include "xfs_trans.h"
31 #include "xfs_extfree_item.h"
32 #include "xfs_alloc.h"
33 #include "xfs_bmap.h"
34 #include "xfs_bmap_util.h"
35 #include "xfs_bmap_btree.h"
36 #include "xfs_rtalloc.h"
37 #include "xfs_error.h"
38 #include "xfs_quota.h"
39 #include "xfs_trans_space.h"
40 #include "xfs_trace.h"
41 #include "xfs_icache.h"
42 #include "xfs_log.h"
43 
44 /* Kernel only BMAP related definitions and functions */
45 
46 /*
47  * Convert the given file system block to a disk block.  We have to treat it
48  * differently based on whether the file is a real time file or not, because the
49  * bmap code does.
50  */
51 xfs_daddr_t
52 xfs_fsb_to_db(struct xfs_inode *ip, xfs_fsblock_t fsb)
53 {
54 	return (XFS_IS_REALTIME_INODE(ip) ? \
55 		 (xfs_daddr_t)XFS_FSB_TO_BB((ip)->i_mount, (fsb)) : \
56 		 XFS_FSB_TO_DADDR((ip)->i_mount, (fsb)));
57 }
58 
59 /*
60  * Routine to zero an extent on disk allocated to the specific inode.
61  *
62  * The VFS functions take a linearised filesystem block offset, so we have to
63  * convert the sparse xfs fsb to the right format first.
64  * VFS types are real funky, too.
65  */
66 int
67 xfs_zero_extent(
68 	struct xfs_inode *ip,
69 	xfs_fsblock_t	start_fsb,
70 	xfs_off_t	count_fsb)
71 {
72 	struct xfs_mount *mp = ip->i_mount;
73 	xfs_daddr_t	sector = xfs_fsb_to_db(ip, start_fsb);
74 	sector_t	block = XFS_BB_TO_FSBT(mp, sector);
75 	ssize_t		size = XFS_FSB_TO_B(mp, count_fsb);
76 
77 	if (IS_DAX(VFS_I(ip)))
78 		return dax_clear_blocks(VFS_I(ip), block, size);
79 
80 	/*
81 	 * let the block layer decide on the fastest method of
82 	 * implementing the zeroing.
83 	 */
84 	return sb_issue_zeroout(mp->m_super, block, count_fsb, GFP_NOFS);
85 
86 }
87 
88 /*
89  * Routine to be called at transaction's end by xfs_bmapi, xfs_bunmapi
90  * caller.  Frees all the extents that need freeing, which must be done
91  * last due to locking considerations.  We never free any extents in
92  * the first transaction.
93  *
94  * If an inode *ip is provided, rejoin it to the transaction if
95  * the transaction was committed.
96  */
97 int						/* error */
98 xfs_bmap_finish(
99 	struct xfs_trans		**tp,	/* transaction pointer addr */
100 	struct xfs_bmap_free		*flist,	/* i/o: list extents to free */
101 	struct xfs_inode		*ip)
102 {
103 	struct xfs_efd_log_item		*efd;	/* extent free data */
104 	struct xfs_efi_log_item		*efi;	/* extent free intention */
105 	int				error;	/* error return value */
106 	int				committed;/* xact committed or not */
107 	struct xfs_bmap_free_item	*free;	/* free extent item */
108 	struct xfs_bmap_free_item	*next;	/* next item on free list */
109 
110 	ASSERT((*tp)->t_flags & XFS_TRANS_PERM_LOG_RES);
111 	if (flist->xbf_count == 0)
112 		return 0;
113 
114 	efi = xfs_trans_get_efi(*tp, flist->xbf_count);
115 	for (free = flist->xbf_first; free; free = free->xbfi_next)
116 		xfs_trans_log_efi_extent(*tp, efi, free->xbfi_startblock,
117 			free->xbfi_blockcount);
118 
119 	error = __xfs_trans_roll(tp, ip, &committed);
120 	if (error) {
121 		/*
122 		 * If the transaction was committed, drop the EFD reference
123 		 * since we're bailing out of here. The other reference is
124 		 * dropped when the EFI hits the AIL.
125 		 *
126 		 * If the transaction was not committed, the EFI is freed by the
127 		 * EFI item unlock handler on abort. Also, we have a new
128 		 * transaction so we should return committed=1 even though we're
129 		 * returning an error.
130 		 */
131 		if (committed) {
132 			xfs_efi_release(efi);
133 			xfs_force_shutdown((*tp)->t_mountp,
134 				(error == -EFSCORRUPTED) ?
135 					SHUTDOWN_CORRUPT_INCORE :
136 					SHUTDOWN_META_IO_ERROR);
137 		}
138 		return error;
139 	}
140 
141 	/*
142 	 * Get an EFD and free each extent in the list, logging to the EFD in
143 	 * the process. The remaining bmap free list is cleaned up by the caller
144 	 * on error.
145 	 */
146 	efd = xfs_trans_get_efd(*tp, efi, flist->xbf_count);
147 	for (free = flist->xbf_first; free != NULL; free = next) {
148 		next = free->xbfi_next;
149 
150 		error = xfs_trans_free_extent(*tp, efd, free->xbfi_startblock,
151 					      free->xbfi_blockcount);
152 		if (error)
153 			return error;
154 
155 		xfs_bmap_del_free(flist, NULL, free);
156 	}
157 
158 	return 0;
159 }
160 
161 int
162 xfs_bmap_rtalloc(
163 	struct xfs_bmalloca	*ap)	/* bmap alloc argument struct */
164 {
165 	xfs_alloctype_t	atype = 0;	/* type for allocation routines */
166 	int		error;		/* error return value */
167 	xfs_mount_t	*mp;		/* mount point structure */
168 	xfs_extlen_t	prod = 0;	/* product factor for allocators */
169 	xfs_extlen_t	ralen = 0;	/* realtime allocation length */
170 	xfs_extlen_t	align;		/* minimum allocation alignment */
171 	xfs_rtblock_t	rtb;
172 
173 	mp = ap->ip->i_mount;
174 	align = xfs_get_extsz_hint(ap->ip);
175 	prod = align / mp->m_sb.sb_rextsize;
176 	error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev,
177 					align, 1, ap->eof, 0,
178 					ap->conv, &ap->offset, &ap->length);
179 	if (error)
180 		return error;
181 	ASSERT(ap->length);
182 	ASSERT(ap->length % mp->m_sb.sb_rextsize == 0);
183 
184 	/*
185 	 * If the offset & length are not perfectly aligned
186 	 * then kill prod, it will just get us in trouble.
187 	 */
188 	if (do_mod(ap->offset, align) || ap->length % align)
189 		prod = 1;
190 	/*
191 	 * Set ralen to be the actual requested length in rtextents.
192 	 */
193 	ralen = ap->length / mp->m_sb.sb_rextsize;
194 	/*
195 	 * If the old value was close enough to MAXEXTLEN that
196 	 * we rounded up to it, cut it back so it's valid again.
197 	 * Note that if it's a really large request (bigger than
198 	 * MAXEXTLEN), we don't hear about that number, and can't
199 	 * adjust the starting point to match it.
200 	 */
201 	if (ralen * mp->m_sb.sb_rextsize >= MAXEXTLEN)
202 		ralen = MAXEXTLEN / mp->m_sb.sb_rextsize;
203 
204 	/*
205 	 * Lock out other modifications to the RT bitmap inode.
206 	 */
207 	xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL);
208 	xfs_trans_ijoin(ap->tp, mp->m_rbmip, XFS_ILOCK_EXCL);
209 
210 	/*
211 	 * If it's an allocation to an empty file at offset 0,
212 	 * pick an extent that will space things out in the rt area.
213 	 */
214 	if (ap->eof && ap->offset == 0) {
215 		xfs_rtblock_t uninitialized_var(rtx); /* realtime extent no */
216 
217 		error = xfs_rtpick_extent(mp, ap->tp, ralen, &rtx);
218 		if (error)
219 			return error;
220 		ap->blkno = rtx * mp->m_sb.sb_rextsize;
221 	} else {
222 		ap->blkno = 0;
223 	}
224 
225 	xfs_bmap_adjacent(ap);
226 
227 	/*
228 	 * Realtime allocation, done through xfs_rtallocate_extent.
229 	 */
230 	atype = ap->blkno == 0 ?  XFS_ALLOCTYPE_ANY_AG : XFS_ALLOCTYPE_NEAR_BNO;
231 	do_div(ap->blkno, mp->m_sb.sb_rextsize);
232 	rtb = ap->blkno;
233 	ap->length = ralen;
234 	if ((error = xfs_rtallocate_extent(ap->tp, ap->blkno, 1, ap->length,
235 				&ralen, atype, ap->wasdel, prod, &rtb)))
236 		return error;
237 	if (rtb == NULLFSBLOCK && prod > 1 &&
238 	    (error = xfs_rtallocate_extent(ap->tp, ap->blkno, 1,
239 					   ap->length, &ralen, atype,
240 					   ap->wasdel, 1, &rtb)))
241 		return error;
242 	ap->blkno = rtb;
243 	if (ap->blkno != NULLFSBLOCK) {
244 		ap->blkno *= mp->m_sb.sb_rextsize;
245 		ralen *= mp->m_sb.sb_rextsize;
246 		ap->length = ralen;
247 		ap->ip->i_d.di_nblocks += ralen;
248 		xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
249 		if (ap->wasdel)
250 			ap->ip->i_delayed_blks -= ralen;
251 		/*
252 		 * Adjust the disk quota also. This was reserved
253 		 * earlier.
254 		 */
255 		xfs_trans_mod_dquot_byino(ap->tp, ap->ip,
256 			ap->wasdel ? XFS_TRANS_DQ_DELRTBCOUNT :
257 					XFS_TRANS_DQ_RTBCOUNT, (long) ralen);
258 
259 		/* Zero the extent if we were asked to do so */
260 		if (ap->userdata & XFS_ALLOC_USERDATA_ZERO) {
261 			error = xfs_zero_extent(ap->ip, ap->blkno, ap->length);
262 			if (error)
263 				return error;
264 		}
265 	} else {
266 		ap->length = 0;
267 	}
268 	return 0;
269 }
270 
271 /*
272  * Check if the endoff is outside the last extent. If so the caller will grow
273  * the allocation to a stripe unit boundary.  All offsets are considered outside
274  * the end of file for an empty fork, so 1 is returned in *eof in that case.
275  */
276 int
277 xfs_bmap_eof(
278 	struct xfs_inode	*ip,
279 	xfs_fileoff_t		endoff,
280 	int			whichfork,
281 	int			*eof)
282 {
283 	struct xfs_bmbt_irec	rec;
284 	int			error;
285 
286 	error = xfs_bmap_last_extent(NULL, ip, whichfork, &rec, eof);
287 	if (error || *eof)
288 		return error;
289 
290 	*eof = endoff >= rec.br_startoff + rec.br_blockcount;
291 	return 0;
292 }
293 
294 /*
295  * Extent tree block counting routines.
296  */
297 
298 /*
299  * Count leaf blocks given a range of extent records.
300  */
301 STATIC void
302 xfs_bmap_count_leaves(
303 	xfs_ifork_t		*ifp,
304 	xfs_extnum_t		idx,
305 	int			numrecs,
306 	int			*count)
307 {
308 	int		b;
309 
310 	for (b = 0; b < numrecs; b++) {
311 		xfs_bmbt_rec_host_t *frp = xfs_iext_get_ext(ifp, idx + b);
312 		*count += xfs_bmbt_get_blockcount(frp);
313 	}
314 }
315 
316 /*
317  * Count leaf blocks given a range of extent records originally
318  * in btree format.
319  */
320 STATIC void
321 xfs_bmap_disk_count_leaves(
322 	struct xfs_mount	*mp,
323 	struct xfs_btree_block	*block,
324 	int			numrecs,
325 	int			*count)
326 {
327 	int		b;
328 	xfs_bmbt_rec_t	*frp;
329 
330 	for (b = 1; b <= numrecs; b++) {
331 		frp = XFS_BMBT_REC_ADDR(mp, block, b);
332 		*count += xfs_bmbt_disk_get_blockcount(frp);
333 	}
334 }
335 
336 /*
337  * Recursively walks each level of a btree
338  * to count total fsblocks in use.
339  */
340 STATIC int                                     /* error */
341 xfs_bmap_count_tree(
342 	xfs_mount_t     *mp,            /* file system mount point */
343 	xfs_trans_t     *tp,            /* transaction pointer */
344 	xfs_ifork_t	*ifp,		/* inode fork pointer */
345 	xfs_fsblock_t   blockno,	/* file system block number */
346 	int             levelin,	/* level in btree */
347 	int		*count)		/* Count of blocks */
348 {
349 	int			error;
350 	xfs_buf_t		*bp, *nbp;
351 	int			level = levelin;
352 	__be64			*pp;
353 	xfs_fsblock_t           bno = blockno;
354 	xfs_fsblock_t		nextbno;
355 	struct xfs_btree_block	*block, *nextblock;
356 	int			numrecs;
357 
358 	error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp, XFS_BMAP_BTREE_REF,
359 						&xfs_bmbt_buf_ops);
360 	if (error)
361 		return error;
362 	*count += 1;
363 	block = XFS_BUF_TO_BLOCK(bp);
364 
365 	if (--level) {
366 		/* Not at node above leaves, count this level of nodes */
367 		nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
368 		while (nextbno != NULLFSBLOCK) {
369 			error = xfs_btree_read_bufl(mp, tp, nextbno, 0, &nbp,
370 						XFS_BMAP_BTREE_REF,
371 						&xfs_bmbt_buf_ops);
372 			if (error)
373 				return error;
374 			*count += 1;
375 			nextblock = XFS_BUF_TO_BLOCK(nbp);
376 			nextbno = be64_to_cpu(nextblock->bb_u.l.bb_rightsib);
377 			xfs_trans_brelse(tp, nbp);
378 		}
379 
380 		/* Dive to the next level */
381 		pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
382 		bno = be64_to_cpu(*pp);
383 		if (unlikely((error =
384 		     xfs_bmap_count_tree(mp, tp, ifp, bno, level, count)) < 0)) {
385 			xfs_trans_brelse(tp, bp);
386 			XFS_ERROR_REPORT("xfs_bmap_count_tree(1)",
387 					 XFS_ERRLEVEL_LOW, mp);
388 			return -EFSCORRUPTED;
389 		}
390 		xfs_trans_brelse(tp, bp);
391 	} else {
392 		/* count all level 1 nodes and their leaves */
393 		for (;;) {
394 			nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
395 			numrecs = be16_to_cpu(block->bb_numrecs);
396 			xfs_bmap_disk_count_leaves(mp, block, numrecs, count);
397 			xfs_trans_brelse(tp, bp);
398 			if (nextbno == NULLFSBLOCK)
399 				break;
400 			bno = nextbno;
401 			error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp,
402 						XFS_BMAP_BTREE_REF,
403 						&xfs_bmbt_buf_ops);
404 			if (error)
405 				return error;
406 			*count += 1;
407 			block = XFS_BUF_TO_BLOCK(bp);
408 		}
409 	}
410 	return 0;
411 }
412 
413 /*
414  * Count fsblocks of the given fork.
415  */
416 int						/* error */
417 xfs_bmap_count_blocks(
418 	xfs_trans_t		*tp,		/* transaction pointer */
419 	xfs_inode_t		*ip,		/* incore inode */
420 	int			whichfork,	/* data or attr fork */
421 	int			*count)		/* out: count of blocks */
422 {
423 	struct xfs_btree_block	*block;	/* current btree block */
424 	xfs_fsblock_t		bno;	/* block # of "block" */
425 	xfs_ifork_t		*ifp;	/* fork structure */
426 	int			level;	/* btree level, for checking */
427 	xfs_mount_t		*mp;	/* file system mount structure */
428 	__be64			*pp;	/* pointer to block address */
429 
430 	bno = NULLFSBLOCK;
431 	mp = ip->i_mount;
432 	ifp = XFS_IFORK_PTR(ip, whichfork);
433 	if ( XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS ) {
434 		xfs_bmap_count_leaves(ifp, 0,
435 			ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t),
436 			count);
437 		return 0;
438 	}
439 
440 	/*
441 	 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
442 	 */
443 	block = ifp->if_broot;
444 	level = be16_to_cpu(block->bb_level);
445 	ASSERT(level > 0);
446 	pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes);
447 	bno = be64_to_cpu(*pp);
448 	ASSERT(bno != NULLFSBLOCK);
449 	ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount);
450 	ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks);
451 
452 	if (unlikely(xfs_bmap_count_tree(mp, tp, ifp, bno, level, count) < 0)) {
453 		XFS_ERROR_REPORT("xfs_bmap_count_blocks(2)", XFS_ERRLEVEL_LOW,
454 				 mp);
455 		return -EFSCORRUPTED;
456 	}
457 
458 	return 0;
459 }
460 
461 /*
462  * returns 1 for success, 0 if we failed to map the extent.
463  */
464 STATIC int
465 xfs_getbmapx_fix_eof_hole(
466 	xfs_inode_t		*ip,		/* xfs incore inode pointer */
467 	struct getbmapx		*out,		/* output structure */
468 	int			prealloced,	/* this is a file with
469 						 * preallocated data space */
470 	__int64_t		end,		/* last block requested */
471 	xfs_fsblock_t		startblock)
472 {
473 	__int64_t		fixlen;
474 	xfs_mount_t		*mp;		/* file system mount point */
475 	xfs_ifork_t		*ifp;		/* inode fork pointer */
476 	xfs_extnum_t		lastx;		/* last extent pointer */
477 	xfs_fileoff_t		fileblock;
478 
479 	if (startblock == HOLESTARTBLOCK) {
480 		mp = ip->i_mount;
481 		out->bmv_block = -1;
482 		fixlen = XFS_FSB_TO_BB(mp, XFS_B_TO_FSB(mp, XFS_ISIZE(ip)));
483 		fixlen -= out->bmv_offset;
484 		if (prealloced && out->bmv_offset + out->bmv_length == end) {
485 			/* Came to hole at EOF. Trim it. */
486 			if (fixlen <= 0)
487 				return 0;
488 			out->bmv_length = fixlen;
489 		}
490 	} else {
491 		if (startblock == DELAYSTARTBLOCK)
492 			out->bmv_block = -2;
493 		else
494 			out->bmv_block = xfs_fsb_to_db(ip, startblock);
495 		fileblock = XFS_BB_TO_FSB(ip->i_mount, out->bmv_offset);
496 		ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
497 		if (xfs_iext_bno_to_ext(ifp, fileblock, &lastx) &&
498 		   (lastx == (ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t))-1))
499 			out->bmv_oflags |= BMV_OF_LAST;
500 	}
501 
502 	return 1;
503 }
504 
505 /*
506  * Get inode's extents as described in bmv, and format for output.
507  * Calls formatter to fill the user's buffer until all extents
508  * are mapped, until the passed-in bmv->bmv_count slots have
509  * been filled, or until the formatter short-circuits the loop,
510  * if it is tracking filled-in extents on its own.
511  */
512 int						/* error code */
513 xfs_getbmap(
514 	xfs_inode_t		*ip,
515 	struct getbmapx		*bmv,		/* user bmap structure */
516 	xfs_bmap_format_t	formatter,	/* format to user */
517 	void			*arg)		/* formatter arg */
518 {
519 	__int64_t		bmvend;		/* last block requested */
520 	int			error = 0;	/* return value */
521 	__int64_t		fixlen;		/* length for -1 case */
522 	int			i;		/* extent number */
523 	int			lock;		/* lock state */
524 	xfs_bmbt_irec_t		*map;		/* buffer for user's data */
525 	xfs_mount_t		*mp;		/* file system mount point */
526 	int			nex;		/* # of user extents can do */
527 	int			nexleft;	/* # of user extents left */
528 	int			subnex;		/* # of bmapi's can do */
529 	int			nmap;		/* number of map entries */
530 	struct getbmapx		*out;		/* output structure */
531 	int			whichfork;	/* data or attr fork */
532 	int			prealloced;	/* this is a file with
533 						 * preallocated data space */
534 	int			iflags;		/* interface flags */
535 	int			bmapi_flags;	/* flags for xfs_bmapi */
536 	int			cur_ext = 0;
537 
538 	mp = ip->i_mount;
539 	iflags = bmv->bmv_iflags;
540 	whichfork = iflags & BMV_IF_ATTRFORK ? XFS_ATTR_FORK : XFS_DATA_FORK;
541 
542 	if (whichfork == XFS_ATTR_FORK) {
543 		if (XFS_IFORK_Q(ip)) {
544 			if (ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS &&
545 			    ip->i_d.di_aformat != XFS_DINODE_FMT_BTREE &&
546 			    ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)
547 				return -EINVAL;
548 		} else if (unlikely(
549 			   ip->i_d.di_aformat != 0 &&
550 			   ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS)) {
551 			XFS_ERROR_REPORT("xfs_getbmap", XFS_ERRLEVEL_LOW,
552 					 ip->i_mount);
553 			return -EFSCORRUPTED;
554 		}
555 
556 		prealloced = 0;
557 		fixlen = 1LL << 32;
558 	} else {
559 		if (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS &&
560 		    ip->i_d.di_format != XFS_DINODE_FMT_BTREE &&
561 		    ip->i_d.di_format != XFS_DINODE_FMT_LOCAL)
562 			return -EINVAL;
563 
564 		if (xfs_get_extsz_hint(ip) ||
565 		    ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC|XFS_DIFLAG_APPEND)){
566 			prealloced = 1;
567 			fixlen = mp->m_super->s_maxbytes;
568 		} else {
569 			prealloced = 0;
570 			fixlen = XFS_ISIZE(ip);
571 		}
572 	}
573 
574 	if (bmv->bmv_length == -1) {
575 		fixlen = XFS_FSB_TO_BB(mp, XFS_B_TO_FSB(mp, fixlen));
576 		bmv->bmv_length =
577 			max_t(__int64_t, fixlen - bmv->bmv_offset, 0);
578 	} else if (bmv->bmv_length == 0) {
579 		bmv->bmv_entries = 0;
580 		return 0;
581 	} else if (bmv->bmv_length < 0) {
582 		return -EINVAL;
583 	}
584 
585 	nex = bmv->bmv_count - 1;
586 	if (nex <= 0)
587 		return -EINVAL;
588 	bmvend = bmv->bmv_offset + bmv->bmv_length;
589 
590 
591 	if (bmv->bmv_count > ULONG_MAX / sizeof(struct getbmapx))
592 		return -ENOMEM;
593 	out = kmem_zalloc_large(bmv->bmv_count * sizeof(struct getbmapx), 0);
594 	if (!out)
595 		return -ENOMEM;
596 
597 	xfs_ilock(ip, XFS_IOLOCK_SHARED);
598 	if (whichfork == XFS_DATA_FORK) {
599 		if (!(iflags & BMV_IF_DELALLOC) &&
600 		    (ip->i_delayed_blks || XFS_ISIZE(ip) > ip->i_d.di_size)) {
601 			error = filemap_write_and_wait(VFS_I(ip)->i_mapping);
602 			if (error)
603 				goto out_unlock_iolock;
604 
605 			/*
606 			 * Even after flushing the inode, there can still be
607 			 * delalloc blocks on the inode beyond EOF due to
608 			 * speculative preallocation.  These are not removed
609 			 * until the release function is called or the inode
610 			 * is inactivated.  Hence we cannot assert here that
611 			 * ip->i_delayed_blks == 0.
612 			 */
613 		}
614 
615 		lock = xfs_ilock_data_map_shared(ip);
616 	} else {
617 		lock = xfs_ilock_attr_map_shared(ip);
618 	}
619 
620 	/*
621 	 * Don't let nex be bigger than the number of extents
622 	 * we can have assuming alternating holes and real extents.
623 	 */
624 	if (nex > XFS_IFORK_NEXTENTS(ip, whichfork) * 2 + 1)
625 		nex = XFS_IFORK_NEXTENTS(ip, whichfork) * 2 + 1;
626 
627 	bmapi_flags = xfs_bmapi_aflag(whichfork);
628 	if (!(iflags & BMV_IF_PREALLOC))
629 		bmapi_flags |= XFS_BMAPI_IGSTATE;
630 
631 	/*
632 	 * Allocate enough space to handle "subnex" maps at a time.
633 	 */
634 	error = -ENOMEM;
635 	subnex = 16;
636 	map = kmem_alloc(subnex * sizeof(*map), KM_MAYFAIL | KM_NOFS);
637 	if (!map)
638 		goto out_unlock_ilock;
639 
640 	bmv->bmv_entries = 0;
641 
642 	if (XFS_IFORK_NEXTENTS(ip, whichfork) == 0 &&
643 	    (whichfork == XFS_ATTR_FORK || !(iflags & BMV_IF_DELALLOC))) {
644 		error = 0;
645 		goto out_free_map;
646 	}
647 
648 	nexleft = nex;
649 
650 	do {
651 		nmap = (nexleft > subnex) ? subnex : nexleft;
652 		error = xfs_bmapi_read(ip, XFS_BB_TO_FSBT(mp, bmv->bmv_offset),
653 				       XFS_BB_TO_FSB(mp, bmv->bmv_length),
654 				       map, &nmap, bmapi_flags);
655 		if (error)
656 			goto out_free_map;
657 		ASSERT(nmap <= subnex);
658 
659 		for (i = 0; i < nmap && nexleft && bmv->bmv_length; i++) {
660 			out[cur_ext].bmv_oflags = 0;
661 			if (map[i].br_state == XFS_EXT_UNWRITTEN)
662 				out[cur_ext].bmv_oflags |= BMV_OF_PREALLOC;
663 			else if (map[i].br_startblock == DELAYSTARTBLOCK)
664 				out[cur_ext].bmv_oflags |= BMV_OF_DELALLOC;
665 			out[cur_ext].bmv_offset =
666 				XFS_FSB_TO_BB(mp, map[i].br_startoff);
667 			out[cur_ext].bmv_length =
668 				XFS_FSB_TO_BB(mp, map[i].br_blockcount);
669 			out[cur_ext].bmv_unused1 = 0;
670 			out[cur_ext].bmv_unused2 = 0;
671 
672 			/*
673 			 * delayed allocation extents that start beyond EOF can
674 			 * occur due to speculative EOF allocation when the
675 			 * delalloc extent is larger than the largest freespace
676 			 * extent at conversion time. These extents cannot be
677 			 * converted by data writeback, so can exist here even
678 			 * if we are not supposed to be finding delalloc
679 			 * extents.
680 			 */
681 			if (map[i].br_startblock == DELAYSTARTBLOCK &&
682 			    map[i].br_startoff <= XFS_B_TO_FSB(mp, XFS_ISIZE(ip)))
683 				ASSERT((iflags & BMV_IF_DELALLOC) != 0);
684 
685                         if (map[i].br_startblock == HOLESTARTBLOCK &&
686 			    whichfork == XFS_ATTR_FORK) {
687 				/* came to the end of attribute fork */
688 				out[cur_ext].bmv_oflags |= BMV_OF_LAST;
689 				goto out_free_map;
690 			}
691 
692 			if (!xfs_getbmapx_fix_eof_hole(ip, &out[cur_ext],
693 					prealloced, bmvend,
694 					map[i].br_startblock))
695 				goto out_free_map;
696 
697 			bmv->bmv_offset =
698 				out[cur_ext].bmv_offset +
699 				out[cur_ext].bmv_length;
700 			bmv->bmv_length =
701 				max_t(__int64_t, 0, bmvend - bmv->bmv_offset);
702 
703 			/*
704 			 * In case we don't want to return the hole,
705 			 * don't increase cur_ext so that we can reuse
706 			 * it in the next loop.
707 			 */
708 			if ((iflags & BMV_IF_NO_HOLES) &&
709 			    map[i].br_startblock == HOLESTARTBLOCK) {
710 				memset(&out[cur_ext], 0, sizeof(out[cur_ext]));
711 				continue;
712 			}
713 
714 			nexleft--;
715 			bmv->bmv_entries++;
716 			cur_ext++;
717 		}
718 	} while (nmap && nexleft && bmv->bmv_length);
719 
720  out_free_map:
721 	kmem_free(map);
722  out_unlock_ilock:
723 	xfs_iunlock(ip, lock);
724  out_unlock_iolock:
725 	xfs_iunlock(ip, XFS_IOLOCK_SHARED);
726 
727 	for (i = 0; i < cur_ext; i++) {
728 		int full = 0;	/* user array is full */
729 
730 		/* format results & advance arg */
731 		error = formatter(&arg, &out[i], &full);
732 		if (error || full)
733 			break;
734 	}
735 
736 	kmem_free(out);
737 	return error;
738 }
739 
740 /*
741  * dead simple method of punching delalyed allocation blocks from a range in
742  * the inode. Walks a block at a time so will be slow, but is only executed in
743  * rare error cases so the overhead is not critical. This will always punch out
744  * both the start and end blocks, even if the ranges only partially overlap
745  * them, so it is up to the caller to ensure that partial blocks are not
746  * passed in.
747  */
748 int
749 xfs_bmap_punch_delalloc_range(
750 	struct xfs_inode	*ip,
751 	xfs_fileoff_t		start_fsb,
752 	xfs_fileoff_t		length)
753 {
754 	xfs_fileoff_t		remaining = length;
755 	int			error = 0;
756 
757 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
758 
759 	do {
760 		int		done;
761 		xfs_bmbt_irec_t	imap;
762 		int		nimaps = 1;
763 		xfs_fsblock_t	firstblock;
764 		xfs_bmap_free_t flist;
765 
766 		/*
767 		 * Map the range first and check that it is a delalloc extent
768 		 * before trying to unmap the range. Otherwise we will be
769 		 * trying to remove a real extent (which requires a
770 		 * transaction) or a hole, which is probably a bad idea...
771 		 */
772 		error = xfs_bmapi_read(ip, start_fsb, 1, &imap, &nimaps,
773 				       XFS_BMAPI_ENTIRE);
774 
775 		if (error) {
776 			/* something screwed, just bail */
777 			if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
778 				xfs_alert(ip->i_mount,
779 			"Failed delalloc mapping lookup ino %lld fsb %lld.",
780 						ip->i_ino, start_fsb);
781 			}
782 			break;
783 		}
784 		if (!nimaps) {
785 			/* nothing there */
786 			goto next_block;
787 		}
788 		if (imap.br_startblock != DELAYSTARTBLOCK) {
789 			/* been converted, ignore */
790 			goto next_block;
791 		}
792 		WARN_ON(imap.br_blockcount == 0);
793 
794 		/*
795 		 * Note: while we initialise the firstblock/flist pair, they
796 		 * should never be used because blocks should never be
797 		 * allocated or freed for a delalloc extent and hence we need
798 		 * don't cancel or finish them after the xfs_bunmapi() call.
799 		 */
800 		xfs_bmap_init(&flist, &firstblock);
801 		error = xfs_bunmapi(NULL, ip, start_fsb, 1, 0, 1, &firstblock,
802 					&flist, &done);
803 		if (error)
804 			break;
805 
806 		ASSERT(!flist.xbf_count && !flist.xbf_first);
807 next_block:
808 		start_fsb++;
809 		remaining--;
810 	} while(remaining > 0);
811 
812 	return error;
813 }
814 
815 /*
816  * Test whether it is appropriate to check an inode for and free post EOF
817  * blocks. The 'force' parameter determines whether we should also consider
818  * regular files that are marked preallocated or append-only.
819  */
820 bool
821 xfs_can_free_eofblocks(struct xfs_inode *ip, bool force)
822 {
823 	/* prealloc/delalloc exists only on regular files */
824 	if (!S_ISREG(ip->i_d.di_mode))
825 		return false;
826 
827 	/*
828 	 * Zero sized files with no cached pages and delalloc blocks will not
829 	 * have speculative prealloc/delalloc blocks to remove.
830 	 */
831 	if (VFS_I(ip)->i_size == 0 &&
832 	    VFS_I(ip)->i_mapping->nrpages == 0 &&
833 	    ip->i_delayed_blks == 0)
834 		return false;
835 
836 	/* If we haven't read in the extent list, then don't do it now. */
837 	if (!(ip->i_df.if_flags & XFS_IFEXTENTS))
838 		return false;
839 
840 	/*
841 	 * Do not free real preallocated or append-only files unless the file
842 	 * has delalloc blocks and we are forced to remove them.
843 	 */
844 	if (ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND))
845 		if (!force || ip->i_delayed_blks == 0)
846 			return false;
847 
848 	return true;
849 }
850 
851 /*
852  * This is called by xfs_inactive to free any blocks beyond eof
853  * when the link count isn't zero and by xfs_dm_punch_hole() when
854  * punching a hole to EOF.
855  */
856 int
857 xfs_free_eofblocks(
858 	xfs_mount_t	*mp,
859 	xfs_inode_t	*ip,
860 	bool		need_iolock)
861 {
862 	xfs_trans_t	*tp;
863 	int		error;
864 	xfs_fileoff_t	end_fsb;
865 	xfs_fileoff_t	last_fsb;
866 	xfs_filblks_t	map_len;
867 	int		nimaps;
868 	xfs_bmbt_irec_t	imap;
869 
870 	/*
871 	 * Figure out if there are any blocks beyond the end
872 	 * of the file.  If not, then there is nothing to do.
873 	 */
874 	end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_ISIZE(ip));
875 	last_fsb = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
876 	if (last_fsb <= end_fsb)
877 		return 0;
878 	map_len = last_fsb - end_fsb;
879 
880 	nimaps = 1;
881 	xfs_ilock(ip, XFS_ILOCK_SHARED);
882 	error = xfs_bmapi_read(ip, end_fsb, map_len, &imap, &nimaps, 0);
883 	xfs_iunlock(ip, XFS_ILOCK_SHARED);
884 
885 	if (!error && (nimaps != 0) &&
886 	    (imap.br_startblock != HOLESTARTBLOCK ||
887 	     ip->i_delayed_blks)) {
888 		/*
889 		 * Attach the dquots to the inode up front.
890 		 */
891 		error = xfs_qm_dqattach(ip, 0);
892 		if (error)
893 			return error;
894 
895 		/*
896 		 * There are blocks after the end of file.
897 		 * Free them up now by truncating the file to
898 		 * its current size.
899 		 */
900 		tp = xfs_trans_alloc(mp, XFS_TRANS_INACTIVE);
901 
902 		if (need_iolock) {
903 			if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
904 				xfs_trans_cancel(tp);
905 				return -EAGAIN;
906 			}
907 		}
908 
909 		error = xfs_trans_reserve(tp, &M_RES(mp)->tr_itruncate, 0, 0);
910 		if (error) {
911 			ASSERT(XFS_FORCED_SHUTDOWN(mp));
912 			xfs_trans_cancel(tp);
913 			if (need_iolock)
914 				xfs_iunlock(ip, XFS_IOLOCK_EXCL);
915 			return error;
916 		}
917 
918 		xfs_ilock(ip, XFS_ILOCK_EXCL);
919 		xfs_trans_ijoin(tp, ip, 0);
920 
921 		/*
922 		 * Do not update the on-disk file size.  If we update the
923 		 * on-disk file size and then the system crashes before the
924 		 * contents of the file are flushed to disk then the files
925 		 * may be full of holes (ie NULL files bug).
926 		 */
927 		error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK,
928 					      XFS_ISIZE(ip));
929 		if (error) {
930 			/*
931 			 * If we get an error at this point we simply don't
932 			 * bother truncating the file.
933 			 */
934 			xfs_trans_cancel(tp);
935 		} else {
936 			error = xfs_trans_commit(tp);
937 			if (!error)
938 				xfs_inode_clear_eofblocks_tag(ip);
939 		}
940 
941 		xfs_iunlock(ip, XFS_ILOCK_EXCL);
942 		if (need_iolock)
943 			xfs_iunlock(ip, XFS_IOLOCK_EXCL);
944 	}
945 	return error;
946 }
947 
948 int
949 xfs_alloc_file_space(
950 	struct xfs_inode	*ip,
951 	xfs_off_t		offset,
952 	xfs_off_t		len,
953 	int			alloc_type)
954 {
955 	xfs_mount_t		*mp = ip->i_mount;
956 	xfs_off_t		count;
957 	xfs_filblks_t		allocated_fsb;
958 	xfs_filblks_t		allocatesize_fsb;
959 	xfs_extlen_t		extsz, temp;
960 	xfs_fileoff_t		startoffset_fsb;
961 	xfs_fsblock_t		firstfsb;
962 	int			nimaps;
963 	int			quota_flag;
964 	int			rt;
965 	xfs_trans_t		*tp;
966 	xfs_bmbt_irec_t		imaps[1], *imapp;
967 	xfs_bmap_free_t		free_list;
968 	uint			qblocks, resblks, resrtextents;
969 	int			error;
970 
971 	trace_xfs_alloc_file_space(ip);
972 
973 	if (XFS_FORCED_SHUTDOWN(mp))
974 		return -EIO;
975 
976 	error = xfs_qm_dqattach(ip, 0);
977 	if (error)
978 		return error;
979 
980 	if (len <= 0)
981 		return -EINVAL;
982 
983 	rt = XFS_IS_REALTIME_INODE(ip);
984 	extsz = xfs_get_extsz_hint(ip);
985 
986 	count = len;
987 	imapp = &imaps[0];
988 	nimaps = 1;
989 	startoffset_fsb	= XFS_B_TO_FSBT(mp, offset);
990 	allocatesize_fsb = XFS_B_TO_FSB(mp, count);
991 
992 	/*
993 	 * Allocate file space until done or until there is an error
994 	 */
995 	while (allocatesize_fsb && !error) {
996 		xfs_fileoff_t	s, e;
997 
998 		/*
999 		 * Determine space reservations for data/realtime.
1000 		 */
1001 		if (unlikely(extsz)) {
1002 			s = startoffset_fsb;
1003 			do_div(s, extsz);
1004 			s *= extsz;
1005 			e = startoffset_fsb + allocatesize_fsb;
1006 			if ((temp = do_mod(startoffset_fsb, extsz)))
1007 				e += temp;
1008 			if ((temp = do_mod(e, extsz)))
1009 				e += extsz - temp;
1010 		} else {
1011 			s = 0;
1012 			e = allocatesize_fsb;
1013 		}
1014 
1015 		/*
1016 		 * The transaction reservation is limited to a 32-bit block
1017 		 * count, hence we need to limit the number of blocks we are
1018 		 * trying to reserve to avoid an overflow. We can't allocate
1019 		 * more than @nimaps extents, and an extent is limited on disk
1020 		 * to MAXEXTLEN (21 bits), so use that to enforce the limit.
1021 		 */
1022 		resblks = min_t(xfs_fileoff_t, (e - s), (MAXEXTLEN * nimaps));
1023 		if (unlikely(rt)) {
1024 			resrtextents = qblocks = resblks;
1025 			resrtextents /= mp->m_sb.sb_rextsize;
1026 			resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
1027 			quota_flag = XFS_QMOPT_RES_RTBLKS;
1028 		} else {
1029 			resrtextents = 0;
1030 			resblks = qblocks = XFS_DIOSTRAT_SPACE_RES(mp, resblks);
1031 			quota_flag = XFS_QMOPT_RES_REGBLKS;
1032 		}
1033 
1034 		/*
1035 		 * Allocate and setup the transaction.
1036 		 */
1037 		tp = xfs_trans_alloc(mp, XFS_TRANS_DIOSTRAT);
1038 		error = xfs_trans_reserve(tp, &M_RES(mp)->tr_write,
1039 					  resblks, resrtextents);
1040 		/*
1041 		 * Check for running out of space
1042 		 */
1043 		if (error) {
1044 			/*
1045 			 * Free the transaction structure.
1046 			 */
1047 			ASSERT(error == -ENOSPC || XFS_FORCED_SHUTDOWN(mp));
1048 			xfs_trans_cancel(tp);
1049 			break;
1050 		}
1051 		xfs_ilock(ip, XFS_ILOCK_EXCL);
1052 		error = xfs_trans_reserve_quota_nblks(tp, ip, qblocks,
1053 						      0, quota_flag);
1054 		if (error)
1055 			goto error1;
1056 
1057 		xfs_trans_ijoin(tp, ip, 0);
1058 
1059 		xfs_bmap_init(&free_list, &firstfsb);
1060 		error = xfs_bmapi_write(tp, ip, startoffset_fsb,
1061 					allocatesize_fsb, alloc_type, &firstfsb,
1062 					resblks, imapp, &nimaps, &free_list);
1063 		if (error)
1064 			goto error0;
1065 
1066 		/*
1067 		 * Complete the transaction
1068 		 */
1069 		error = xfs_bmap_finish(&tp, &free_list, NULL);
1070 		if (error)
1071 			goto error0;
1072 
1073 		error = xfs_trans_commit(tp);
1074 		xfs_iunlock(ip, XFS_ILOCK_EXCL);
1075 		if (error)
1076 			break;
1077 
1078 		allocated_fsb = imapp->br_blockcount;
1079 
1080 		if (nimaps == 0) {
1081 			error = -ENOSPC;
1082 			break;
1083 		}
1084 
1085 		startoffset_fsb += allocated_fsb;
1086 		allocatesize_fsb -= allocated_fsb;
1087 	}
1088 
1089 	return error;
1090 
1091 error0:	/* Cancel bmap, unlock inode, unreserve quota blocks, cancel trans */
1092 	xfs_bmap_cancel(&free_list);
1093 	xfs_trans_unreserve_quota_nblks(tp, ip, (long)qblocks, 0, quota_flag);
1094 
1095 error1:	/* Just cancel transaction */
1096 	xfs_trans_cancel(tp);
1097 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1098 	return error;
1099 }
1100 
1101 /*
1102  * Zero file bytes between startoff and endoff inclusive.
1103  * The iolock is held exclusive and no blocks are buffered.
1104  *
1105  * This function is used by xfs_free_file_space() to zero
1106  * partial blocks when the range to free is not block aligned.
1107  * When unreserving space with boundaries that are not block
1108  * aligned we round up the start and round down the end
1109  * boundaries and then use this function to zero the parts of
1110  * the blocks that got dropped during the rounding.
1111  */
1112 STATIC int
1113 xfs_zero_remaining_bytes(
1114 	xfs_inode_t		*ip,
1115 	xfs_off_t		startoff,
1116 	xfs_off_t		endoff)
1117 {
1118 	xfs_bmbt_irec_t		imap;
1119 	xfs_fileoff_t		offset_fsb;
1120 	xfs_off_t		lastoffset;
1121 	xfs_off_t		offset;
1122 	xfs_buf_t		*bp;
1123 	xfs_mount_t		*mp = ip->i_mount;
1124 	int			nimap;
1125 	int			error = 0;
1126 
1127 	/*
1128 	 * Avoid doing I/O beyond eof - it's not necessary
1129 	 * since nothing can read beyond eof.  The space will
1130 	 * be zeroed when the file is extended anyway.
1131 	 */
1132 	if (startoff >= XFS_ISIZE(ip))
1133 		return 0;
1134 
1135 	if (endoff > XFS_ISIZE(ip))
1136 		endoff = XFS_ISIZE(ip);
1137 
1138 	for (offset = startoff; offset <= endoff; offset = lastoffset + 1) {
1139 		uint lock_mode;
1140 
1141 		offset_fsb = XFS_B_TO_FSBT(mp, offset);
1142 		nimap = 1;
1143 
1144 		lock_mode = xfs_ilock_data_map_shared(ip);
1145 		error = xfs_bmapi_read(ip, offset_fsb, 1, &imap, &nimap, 0);
1146 		xfs_iunlock(ip, lock_mode);
1147 
1148 		if (error || nimap < 1)
1149 			break;
1150 		ASSERT(imap.br_blockcount >= 1);
1151 		ASSERT(imap.br_startoff == offset_fsb);
1152 		ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
1153 
1154 		if (imap.br_startblock == HOLESTARTBLOCK ||
1155 		    imap.br_state == XFS_EXT_UNWRITTEN) {
1156 			/* skip the entire extent */
1157 			lastoffset = XFS_FSB_TO_B(mp, imap.br_startoff +
1158 						      imap.br_blockcount) - 1;
1159 			continue;
1160 		}
1161 
1162 		lastoffset = XFS_FSB_TO_B(mp, imap.br_startoff + 1) - 1;
1163 		if (lastoffset > endoff)
1164 			lastoffset = endoff;
1165 
1166 		/* DAX can just zero the backing device directly */
1167 		if (IS_DAX(VFS_I(ip))) {
1168 			error = dax_zero_page_range(VFS_I(ip), offset,
1169 						    lastoffset - offset + 1,
1170 						    xfs_get_blocks_direct);
1171 			if (error)
1172 				return error;
1173 			continue;
1174 		}
1175 
1176 		error = xfs_buf_read_uncached(XFS_IS_REALTIME_INODE(ip) ?
1177 				mp->m_rtdev_targp : mp->m_ddev_targp,
1178 				xfs_fsb_to_db(ip, imap.br_startblock),
1179 				BTOBB(mp->m_sb.sb_blocksize),
1180 				0, &bp, NULL);
1181 		if (error)
1182 			return error;
1183 
1184 		memset(bp->b_addr +
1185 				(offset - XFS_FSB_TO_B(mp, imap.br_startoff)),
1186 		       0, lastoffset - offset + 1);
1187 
1188 		error = xfs_bwrite(bp);
1189 		xfs_buf_relse(bp);
1190 		if (error)
1191 			return error;
1192 	}
1193 	return error;
1194 }
1195 
1196 int
1197 xfs_free_file_space(
1198 	struct xfs_inode	*ip,
1199 	xfs_off_t		offset,
1200 	xfs_off_t		len)
1201 {
1202 	int			done;
1203 	xfs_fileoff_t		endoffset_fsb;
1204 	int			error;
1205 	xfs_fsblock_t		firstfsb;
1206 	xfs_bmap_free_t		free_list;
1207 	xfs_bmbt_irec_t		imap;
1208 	xfs_off_t		ioffset;
1209 	xfs_off_t		iendoffset;
1210 	xfs_extlen_t		mod=0;
1211 	xfs_mount_t		*mp;
1212 	int			nimap;
1213 	uint			resblks;
1214 	xfs_off_t		rounding;
1215 	int			rt;
1216 	xfs_fileoff_t		startoffset_fsb;
1217 	xfs_trans_t		*tp;
1218 
1219 	mp = ip->i_mount;
1220 
1221 	trace_xfs_free_file_space(ip);
1222 
1223 	error = xfs_qm_dqattach(ip, 0);
1224 	if (error)
1225 		return error;
1226 
1227 	error = 0;
1228 	if (len <= 0)	/* if nothing being freed */
1229 		return error;
1230 	rt = XFS_IS_REALTIME_INODE(ip);
1231 	startoffset_fsb	= XFS_B_TO_FSB(mp, offset);
1232 	endoffset_fsb = XFS_B_TO_FSBT(mp, offset + len);
1233 
1234 	/* wait for the completion of any pending DIOs */
1235 	inode_dio_wait(VFS_I(ip));
1236 
1237 	rounding = max_t(xfs_off_t, 1 << mp->m_sb.sb_blocklog, PAGE_CACHE_SIZE);
1238 	ioffset = round_down(offset, rounding);
1239 	iendoffset = round_up(offset + len, rounding) - 1;
1240 	error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping, ioffset,
1241 					     iendoffset);
1242 	if (error)
1243 		goto out;
1244 	truncate_pagecache_range(VFS_I(ip), ioffset, iendoffset);
1245 
1246 	/*
1247 	 * Need to zero the stuff we're not freeing, on disk.
1248 	 * If it's a realtime file & can't use unwritten extents then we
1249 	 * actually need to zero the extent edges.  Otherwise xfs_bunmapi
1250 	 * will take care of it for us.
1251 	 */
1252 	if (rt && !xfs_sb_version_hasextflgbit(&mp->m_sb)) {
1253 		nimap = 1;
1254 		error = xfs_bmapi_read(ip, startoffset_fsb, 1,
1255 					&imap, &nimap, 0);
1256 		if (error)
1257 			goto out;
1258 		ASSERT(nimap == 0 || nimap == 1);
1259 		if (nimap && imap.br_startblock != HOLESTARTBLOCK) {
1260 			xfs_daddr_t	block;
1261 
1262 			ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
1263 			block = imap.br_startblock;
1264 			mod = do_div(block, mp->m_sb.sb_rextsize);
1265 			if (mod)
1266 				startoffset_fsb += mp->m_sb.sb_rextsize - mod;
1267 		}
1268 		nimap = 1;
1269 		error = xfs_bmapi_read(ip, endoffset_fsb - 1, 1,
1270 					&imap, &nimap, 0);
1271 		if (error)
1272 			goto out;
1273 		ASSERT(nimap == 0 || nimap == 1);
1274 		if (nimap && imap.br_startblock != HOLESTARTBLOCK) {
1275 			ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
1276 			mod++;
1277 			if (mod && (mod != mp->m_sb.sb_rextsize))
1278 				endoffset_fsb -= mod;
1279 		}
1280 	}
1281 	if ((done = (endoffset_fsb <= startoffset_fsb)))
1282 		/*
1283 		 * One contiguous piece to clear
1284 		 */
1285 		error = xfs_zero_remaining_bytes(ip, offset, offset + len - 1);
1286 	else {
1287 		/*
1288 		 * Some full blocks, possibly two pieces to clear
1289 		 */
1290 		if (offset < XFS_FSB_TO_B(mp, startoffset_fsb))
1291 			error = xfs_zero_remaining_bytes(ip, offset,
1292 				XFS_FSB_TO_B(mp, startoffset_fsb) - 1);
1293 		if (!error &&
1294 		    XFS_FSB_TO_B(mp, endoffset_fsb) < offset + len)
1295 			error = xfs_zero_remaining_bytes(ip,
1296 				XFS_FSB_TO_B(mp, endoffset_fsb),
1297 				offset + len - 1);
1298 	}
1299 
1300 	/*
1301 	 * free file space until done or until there is an error
1302 	 */
1303 	resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
1304 	while (!error && !done) {
1305 
1306 		/*
1307 		 * allocate and setup the transaction. Allow this
1308 		 * transaction to dip into the reserve blocks to ensure
1309 		 * the freeing of the space succeeds at ENOSPC.
1310 		 */
1311 		tp = xfs_trans_alloc(mp, XFS_TRANS_DIOSTRAT);
1312 		error = xfs_trans_reserve(tp, &M_RES(mp)->tr_write, resblks, 0);
1313 
1314 		/*
1315 		 * check for running out of space
1316 		 */
1317 		if (error) {
1318 			/*
1319 			 * Free the transaction structure.
1320 			 */
1321 			ASSERT(error == -ENOSPC || XFS_FORCED_SHUTDOWN(mp));
1322 			xfs_trans_cancel(tp);
1323 			break;
1324 		}
1325 		xfs_ilock(ip, XFS_ILOCK_EXCL);
1326 		error = xfs_trans_reserve_quota(tp, mp,
1327 				ip->i_udquot, ip->i_gdquot, ip->i_pdquot,
1328 				resblks, 0, XFS_QMOPT_RES_REGBLKS);
1329 		if (error)
1330 			goto error1;
1331 
1332 		xfs_trans_ijoin(tp, ip, 0);
1333 
1334 		/*
1335 		 * issue the bunmapi() call to free the blocks
1336 		 */
1337 		xfs_bmap_init(&free_list, &firstfsb);
1338 		error = xfs_bunmapi(tp, ip, startoffset_fsb,
1339 				  endoffset_fsb - startoffset_fsb,
1340 				  0, 2, &firstfsb, &free_list, &done);
1341 		if (error)
1342 			goto error0;
1343 
1344 		/*
1345 		 * complete the transaction
1346 		 */
1347 		error = xfs_bmap_finish(&tp, &free_list, NULL);
1348 		if (error)
1349 			goto error0;
1350 
1351 		error = xfs_trans_commit(tp);
1352 		xfs_iunlock(ip, XFS_ILOCK_EXCL);
1353 	}
1354 
1355  out:
1356 	return error;
1357 
1358  error0:
1359 	xfs_bmap_cancel(&free_list);
1360  error1:
1361 	xfs_trans_cancel(tp);
1362 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1363 	goto out;
1364 }
1365 
1366 /*
1367  * Preallocate and zero a range of a file. This mechanism has the allocation
1368  * semantics of fallocate and in addition converts data in the range to zeroes.
1369  */
1370 int
1371 xfs_zero_file_space(
1372 	struct xfs_inode	*ip,
1373 	xfs_off_t		offset,
1374 	xfs_off_t		len)
1375 {
1376 	struct xfs_mount	*mp = ip->i_mount;
1377 	uint			blksize;
1378 	int			error;
1379 
1380 	trace_xfs_zero_file_space(ip);
1381 
1382 	blksize = 1 << mp->m_sb.sb_blocklog;
1383 
1384 	/*
1385 	 * Punch a hole and prealloc the range. We use hole punch rather than
1386 	 * unwritten extent conversion for two reasons:
1387 	 *
1388 	 * 1.) Hole punch handles partial block zeroing for us.
1389 	 *
1390 	 * 2.) If prealloc returns ENOSPC, the file range is still zero-valued
1391 	 * by virtue of the hole punch.
1392 	 */
1393 	error = xfs_free_file_space(ip, offset, len);
1394 	if (error)
1395 		goto out;
1396 
1397 	error = xfs_alloc_file_space(ip, round_down(offset, blksize),
1398 				     round_up(offset + len, blksize) -
1399 				     round_down(offset, blksize),
1400 				     XFS_BMAPI_PREALLOC);
1401 out:
1402 	return error;
1403 
1404 }
1405 
1406 /*
1407  * @next_fsb will keep track of the extent currently undergoing shift.
1408  * @stop_fsb will keep track of the extent at which we have to stop.
1409  * If we are shifting left, we will start with block (offset + len) and
1410  * shift each extent till last extent.
1411  * If we are shifting right, we will start with last extent inside file space
1412  * and continue until we reach the block corresponding to offset.
1413  */
1414 static int
1415 xfs_shift_file_space(
1416 	struct xfs_inode        *ip,
1417 	xfs_off_t               offset,
1418 	xfs_off_t               len,
1419 	enum shift_direction	direction)
1420 {
1421 	int			done = 0;
1422 	struct xfs_mount	*mp = ip->i_mount;
1423 	struct xfs_trans	*tp;
1424 	int			error;
1425 	struct xfs_bmap_free	free_list;
1426 	xfs_fsblock_t		first_block;
1427 	xfs_fileoff_t		stop_fsb;
1428 	xfs_fileoff_t		next_fsb;
1429 	xfs_fileoff_t		shift_fsb;
1430 
1431 	ASSERT(direction == SHIFT_LEFT || direction == SHIFT_RIGHT);
1432 
1433 	if (direction == SHIFT_LEFT) {
1434 		next_fsb = XFS_B_TO_FSB(mp, offset + len);
1435 		stop_fsb = XFS_B_TO_FSB(mp, VFS_I(ip)->i_size);
1436 	} else {
1437 		/*
1438 		 * If right shift, delegate the work of initialization of
1439 		 * next_fsb to xfs_bmap_shift_extent as it has ilock held.
1440 		 */
1441 		next_fsb = NULLFSBLOCK;
1442 		stop_fsb = XFS_B_TO_FSB(mp, offset);
1443 	}
1444 
1445 	shift_fsb = XFS_B_TO_FSB(mp, len);
1446 
1447 	/*
1448 	 * Trim eofblocks to avoid shifting uninitialized post-eof preallocation
1449 	 * into the accessible region of the file.
1450 	 */
1451 	if (xfs_can_free_eofblocks(ip, true)) {
1452 		error = xfs_free_eofblocks(mp, ip, false);
1453 		if (error)
1454 			return error;
1455 	}
1456 
1457 	/*
1458 	 * Writeback and invalidate cache for the remainder of the file as we're
1459 	 * about to shift down every extent from offset to EOF.
1460 	 */
1461 	error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping,
1462 					     offset, -1);
1463 	if (error)
1464 		return error;
1465 	error = invalidate_inode_pages2_range(VFS_I(ip)->i_mapping,
1466 					offset >> PAGE_CACHE_SHIFT, -1);
1467 	if (error)
1468 		return error;
1469 
1470 	/*
1471 	 * The extent shiting code works on extent granularity. So, if
1472 	 * stop_fsb is not the starting block of extent, we need to split
1473 	 * the extent at stop_fsb.
1474 	 */
1475 	if (direction == SHIFT_RIGHT) {
1476 		error = xfs_bmap_split_extent(ip, stop_fsb);
1477 		if (error)
1478 			return error;
1479 	}
1480 
1481 	while (!error && !done) {
1482 		tp = xfs_trans_alloc(mp, XFS_TRANS_DIOSTRAT);
1483 		/*
1484 		 * We would need to reserve permanent block for transaction.
1485 		 * This will come into picture when after shifting extent into
1486 		 * hole we found that adjacent extents can be merged which
1487 		 * may lead to freeing of a block during record update.
1488 		 */
1489 		error = xfs_trans_reserve(tp, &M_RES(mp)->tr_write,
1490 				XFS_DIOSTRAT_SPACE_RES(mp, 0), 0);
1491 		if (error) {
1492 			xfs_trans_cancel(tp);
1493 			break;
1494 		}
1495 
1496 		xfs_ilock(ip, XFS_ILOCK_EXCL);
1497 		error = xfs_trans_reserve_quota(tp, mp, ip->i_udquot,
1498 				ip->i_gdquot, ip->i_pdquot,
1499 				XFS_DIOSTRAT_SPACE_RES(mp, 0), 0,
1500 				XFS_QMOPT_RES_REGBLKS);
1501 		if (error)
1502 			goto out_trans_cancel;
1503 
1504 		xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
1505 
1506 		xfs_bmap_init(&free_list, &first_block);
1507 
1508 		/*
1509 		 * We are using the write transaction in which max 2 bmbt
1510 		 * updates are allowed
1511 		 */
1512 		error = xfs_bmap_shift_extents(tp, ip, &next_fsb, shift_fsb,
1513 				&done, stop_fsb, &first_block, &free_list,
1514 				direction, XFS_BMAP_MAX_SHIFT_EXTENTS);
1515 		if (error)
1516 			goto out_bmap_cancel;
1517 
1518 		error = xfs_bmap_finish(&tp, &free_list, NULL);
1519 		if (error)
1520 			goto out_bmap_cancel;
1521 
1522 		error = xfs_trans_commit(tp);
1523 	}
1524 
1525 	return error;
1526 
1527 out_bmap_cancel:
1528 	xfs_bmap_cancel(&free_list);
1529 out_trans_cancel:
1530 	xfs_trans_cancel(tp);
1531 	return error;
1532 }
1533 
1534 /*
1535  * xfs_collapse_file_space()
1536  *	This routine frees disk space and shift extent for the given file.
1537  *	The first thing we do is to free data blocks in the specified range
1538  *	by calling xfs_free_file_space(). It would also sync dirty data
1539  *	and invalidate page cache over the region on which collapse range
1540  *	is working. And Shift extent records to the left to cover a hole.
1541  * RETURNS:
1542  *	0 on success
1543  *	errno on error
1544  *
1545  */
1546 int
1547 xfs_collapse_file_space(
1548 	struct xfs_inode	*ip,
1549 	xfs_off_t		offset,
1550 	xfs_off_t		len)
1551 {
1552 	int error;
1553 
1554 	ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
1555 	trace_xfs_collapse_file_space(ip);
1556 
1557 	error = xfs_free_file_space(ip, offset, len);
1558 	if (error)
1559 		return error;
1560 
1561 	return xfs_shift_file_space(ip, offset, len, SHIFT_LEFT);
1562 }
1563 
1564 /*
1565  * xfs_insert_file_space()
1566  *	This routine create hole space by shifting extents for the given file.
1567  *	The first thing we do is to sync dirty data and invalidate page cache
1568  *	over the region on which insert range is working. And split an extent
1569  *	to two extents at given offset by calling xfs_bmap_split_extent.
1570  *	And shift all extent records which are laying between [offset,
1571  *	last allocated extent] to the right to reserve hole range.
1572  * RETURNS:
1573  *	0 on success
1574  *	errno on error
1575  */
1576 int
1577 xfs_insert_file_space(
1578 	struct xfs_inode	*ip,
1579 	loff_t			offset,
1580 	loff_t			len)
1581 {
1582 	ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
1583 	trace_xfs_insert_file_space(ip);
1584 
1585 	return xfs_shift_file_space(ip, offset, len, SHIFT_RIGHT);
1586 }
1587 
1588 /*
1589  * We need to check that the format of the data fork in the temporary inode is
1590  * valid for the target inode before doing the swap. This is not a problem with
1591  * attr1 because of the fixed fork offset, but attr2 has a dynamically sized
1592  * data fork depending on the space the attribute fork is taking so we can get
1593  * invalid formats on the target inode.
1594  *
1595  * E.g. target has space for 7 extents in extent format, temp inode only has
1596  * space for 6.  If we defragment down to 7 extents, then the tmp format is a
1597  * btree, but when swapped it needs to be in extent format. Hence we can't just
1598  * blindly swap data forks on attr2 filesystems.
1599  *
1600  * Note that we check the swap in both directions so that we don't end up with
1601  * a corrupt temporary inode, either.
1602  *
1603  * Note that fixing the way xfs_fsr sets up the attribute fork in the source
1604  * inode will prevent this situation from occurring, so all we do here is
1605  * reject and log the attempt. basically we are putting the responsibility on
1606  * userspace to get this right.
1607  */
1608 static int
1609 xfs_swap_extents_check_format(
1610 	xfs_inode_t	*ip,	/* target inode */
1611 	xfs_inode_t	*tip)	/* tmp inode */
1612 {
1613 
1614 	/* Should never get a local format */
1615 	if (ip->i_d.di_format == XFS_DINODE_FMT_LOCAL ||
1616 	    tip->i_d.di_format == XFS_DINODE_FMT_LOCAL)
1617 		return -EINVAL;
1618 
1619 	/*
1620 	 * if the target inode has less extents that then temporary inode then
1621 	 * why did userspace call us?
1622 	 */
1623 	if (ip->i_d.di_nextents < tip->i_d.di_nextents)
1624 		return -EINVAL;
1625 
1626 	/*
1627 	 * if the target inode is in extent form and the temp inode is in btree
1628 	 * form then we will end up with the target inode in the wrong format
1629 	 * as we already know there are less extents in the temp inode.
1630 	 */
1631 	if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
1632 	    tip->i_d.di_format == XFS_DINODE_FMT_BTREE)
1633 		return -EINVAL;
1634 
1635 	/* Check temp in extent form to max in target */
1636 	if (tip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
1637 	    XFS_IFORK_NEXTENTS(tip, XFS_DATA_FORK) >
1638 			XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK))
1639 		return -EINVAL;
1640 
1641 	/* Check target in extent form to max in temp */
1642 	if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
1643 	    XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK) >
1644 			XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK))
1645 		return -EINVAL;
1646 
1647 	/*
1648 	 * If we are in a btree format, check that the temp root block will fit
1649 	 * in the target and that it has enough extents to be in btree format
1650 	 * in the target.
1651 	 *
1652 	 * Note that we have to be careful to allow btree->extent conversions
1653 	 * (a common defrag case) which will occur when the temp inode is in
1654 	 * extent format...
1655 	 */
1656 	if (tip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
1657 		if (XFS_IFORK_BOFF(ip) &&
1658 		    XFS_BMAP_BMDR_SPACE(tip->i_df.if_broot) > XFS_IFORK_BOFF(ip))
1659 			return -EINVAL;
1660 		if (XFS_IFORK_NEXTENTS(tip, XFS_DATA_FORK) <=
1661 		    XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK))
1662 			return -EINVAL;
1663 	}
1664 
1665 	/* Reciprocal target->temp btree format checks */
1666 	if (ip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
1667 		if (XFS_IFORK_BOFF(tip) &&
1668 		    XFS_BMAP_BMDR_SPACE(ip->i_df.if_broot) > XFS_IFORK_BOFF(tip))
1669 			return -EINVAL;
1670 		if (XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK) <=
1671 		    XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK))
1672 			return -EINVAL;
1673 	}
1674 
1675 	return 0;
1676 }
1677 
1678 static int
1679 xfs_swap_extent_flush(
1680 	struct xfs_inode	*ip)
1681 {
1682 	int	error;
1683 
1684 	error = filemap_write_and_wait(VFS_I(ip)->i_mapping);
1685 	if (error)
1686 		return error;
1687 	truncate_pagecache_range(VFS_I(ip), 0, -1);
1688 
1689 	/* Verify O_DIRECT for ftmp */
1690 	if (VFS_I(ip)->i_mapping->nrpages)
1691 		return -EINVAL;
1692 	return 0;
1693 }
1694 
1695 int
1696 xfs_swap_extents(
1697 	xfs_inode_t	*ip,	/* target inode */
1698 	xfs_inode_t	*tip,	/* tmp inode */
1699 	xfs_swapext_t	*sxp)
1700 {
1701 	xfs_mount_t	*mp = ip->i_mount;
1702 	xfs_trans_t	*tp;
1703 	xfs_bstat_t	*sbp = &sxp->sx_stat;
1704 	xfs_ifork_t	*tempifp, *ifp, *tifp;
1705 	int		src_log_flags, target_log_flags;
1706 	int		error = 0;
1707 	int		aforkblks = 0;
1708 	int		taforkblks = 0;
1709 	__uint64_t	tmp;
1710 	int		lock_flags;
1711 
1712 	tempifp = kmem_alloc(sizeof(xfs_ifork_t), KM_MAYFAIL);
1713 	if (!tempifp) {
1714 		error = -ENOMEM;
1715 		goto out;
1716 	}
1717 
1718 	/*
1719 	 * Lock the inodes against other IO, page faults and truncate to
1720 	 * begin with.  Then we can ensure the inodes are flushed and have no
1721 	 * page cache safely. Once we have done this we can take the ilocks and
1722 	 * do the rest of the checks.
1723 	 */
1724 	lock_flags = XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL;
1725 	xfs_lock_two_inodes(ip, tip, XFS_IOLOCK_EXCL);
1726 	xfs_lock_two_inodes(ip, tip, XFS_MMAPLOCK_EXCL);
1727 
1728 	/* Verify that both files have the same format */
1729 	if ((ip->i_d.di_mode & S_IFMT) != (tip->i_d.di_mode & S_IFMT)) {
1730 		error = -EINVAL;
1731 		goto out_unlock;
1732 	}
1733 
1734 	/* Verify both files are either real-time or non-realtime */
1735 	if (XFS_IS_REALTIME_INODE(ip) != XFS_IS_REALTIME_INODE(tip)) {
1736 		error = -EINVAL;
1737 		goto out_unlock;
1738 	}
1739 
1740 	error = xfs_swap_extent_flush(ip);
1741 	if (error)
1742 		goto out_unlock;
1743 	error = xfs_swap_extent_flush(tip);
1744 	if (error)
1745 		goto out_unlock;
1746 
1747 	tp = xfs_trans_alloc(mp, XFS_TRANS_SWAPEXT);
1748 	error = xfs_trans_reserve(tp, &M_RES(mp)->tr_ichange, 0, 0);
1749 	if (error) {
1750 		xfs_trans_cancel(tp);
1751 		goto out_unlock;
1752 	}
1753 
1754 	/*
1755 	 * Lock and join the inodes to the tansaction so that transaction commit
1756 	 * or cancel will unlock the inodes from this point onwards.
1757 	 */
1758 	xfs_lock_two_inodes(ip, tip, XFS_ILOCK_EXCL);
1759 	lock_flags |= XFS_ILOCK_EXCL;
1760 	xfs_trans_ijoin(tp, ip, lock_flags);
1761 	xfs_trans_ijoin(tp, tip, lock_flags);
1762 
1763 
1764 	/* Verify all data are being swapped */
1765 	if (sxp->sx_offset != 0 ||
1766 	    sxp->sx_length != ip->i_d.di_size ||
1767 	    sxp->sx_length != tip->i_d.di_size) {
1768 		error = -EFAULT;
1769 		goto out_trans_cancel;
1770 	}
1771 
1772 	trace_xfs_swap_extent_before(ip, 0);
1773 	trace_xfs_swap_extent_before(tip, 1);
1774 
1775 	/* check inode formats now that data is flushed */
1776 	error = xfs_swap_extents_check_format(ip, tip);
1777 	if (error) {
1778 		xfs_notice(mp,
1779 		    "%s: inode 0x%llx format is incompatible for exchanging.",
1780 				__func__, ip->i_ino);
1781 		goto out_trans_cancel;
1782 	}
1783 
1784 	/*
1785 	 * Compare the current change & modify times with that
1786 	 * passed in.  If they differ, we abort this swap.
1787 	 * This is the mechanism used to ensure the calling
1788 	 * process that the file was not changed out from
1789 	 * under it.
1790 	 */
1791 	if ((sbp->bs_ctime.tv_sec != VFS_I(ip)->i_ctime.tv_sec) ||
1792 	    (sbp->bs_ctime.tv_nsec != VFS_I(ip)->i_ctime.tv_nsec) ||
1793 	    (sbp->bs_mtime.tv_sec != VFS_I(ip)->i_mtime.tv_sec) ||
1794 	    (sbp->bs_mtime.tv_nsec != VFS_I(ip)->i_mtime.tv_nsec)) {
1795 		error = -EBUSY;
1796 		goto out_trans_cancel;
1797 	}
1798 	/*
1799 	 * Count the number of extended attribute blocks
1800 	 */
1801 	if ( ((XFS_IFORK_Q(ip) != 0) && (ip->i_d.di_anextents > 0)) &&
1802 	     (ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)) {
1803 		error = xfs_bmap_count_blocks(tp, ip, XFS_ATTR_FORK, &aforkblks);
1804 		if (error)
1805 			goto out_trans_cancel;
1806 	}
1807 	if ( ((XFS_IFORK_Q(tip) != 0) && (tip->i_d.di_anextents > 0)) &&
1808 	     (tip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)) {
1809 		error = xfs_bmap_count_blocks(tp, tip, XFS_ATTR_FORK,
1810 			&taforkblks);
1811 		if (error)
1812 			goto out_trans_cancel;
1813 	}
1814 
1815 	/*
1816 	 * Before we've swapped the forks, lets set the owners of the forks
1817 	 * appropriately. We have to do this as we are demand paging the btree
1818 	 * buffers, and so the validation done on read will expect the owner
1819 	 * field to be correctly set. Once we change the owners, we can swap the
1820 	 * inode forks.
1821 	 *
1822 	 * Note the trickiness in setting the log flags - we set the owner log
1823 	 * flag on the opposite inode (i.e. the inode we are setting the new
1824 	 * owner to be) because once we swap the forks and log that, log
1825 	 * recovery is going to see the fork as owned by the swapped inode,
1826 	 * not the pre-swapped inodes.
1827 	 */
1828 	src_log_flags = XFS_ILOG_CORE;
1829 	target_log_flags = XFS_ILOG_CORE;
1830 	if (ip->i_d.di_version == 3 &&
1831 	    ip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
1832 		target_log_flags |= XFS_ILOG_DOWNER;
1833 		error = xfs_bmbt_change_owner(tp, ip, XFS_DATA_FORK,
1834 					      tip->i_ino, NULL);
1835 		if (error)
1836 			goto out_trans_cancel;
1837 	}
1838 
1839 	if (tip->i_d.di_version == 3 &&
1840 	    tip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
1841 		src_log_flags |= XFS_ILOG_DOWNER;
1842 		error = xfs_bmbt_change_owner(tp, tip, XFS_DATA_FORK,
1843 					      ip->i_ino, NULL);
1844 		if (error)
1845 			goto out_trans_cancel;
1846 	}
1847 
1848 	/*
1849 	 * Swap the data forks of the inodes
1850 	 */
1851 	ifp = &ip->i_df;
1852 	tifp = &tip->i_df;
1853 	*tempifp = *ifp;	/* struct copy */
1854 	*ifp = *tifp;		/* struct copy */
1855 	*tifp = *tempifp;	/* struct copy */
1856 
1857 	/*
1858 	 * Fix the on-disk inode values
1859 	 */
1860 	tmp = (__uint64_t)ip->i_d.di_nblocks;
1861 	ip->i_d.di_nblocks = tip->i_d.di_nblocks - taforkblks + aforkblks;
1862 	tip->i_d.di_nblocks = tmp + taforkblks - aforkblks;
1863 
1864 	tmp = (__uint64_t) ip->i_d.di_nextents;
1865 	ip->i_d.di_nextents = tip->i_d.di_nextents;
1866 	tip->i_d.di_nextents = tmp;
1867 
1868 	tmp = (__uint64_t) ip->i_d.di_format;
1869 	ip->i_d.di_format = tip->i_d.di_format;
1870 	tip->i_d.di_format = tmp;
1871 
1872 	/*
1873 	 * The extents in the source inode could still contain speculative
1874 	 * preallocation beyond EOF (e.g. the file is open but not modified
1875 	 * while defrag is in progress). In that case, we need to copy over the
1876 	 * number of delalloc blocks the data fork in the source inode is
1877 	 * tracking beyond EOF so that when the fork is truncated away when the
1878 	 * temporary inode is unlinked we don't underrun the i_delayed_blks
1879 	 * counter on that inode.
1880 	 */
1881 	ASSERT(tip->i_delayed_blks == 0);
1882 	tip->i_delayed_blks = ip->i_delayed_blks;
1883 	ip->i_delayed_blks = 0;
1884 
1885 	switch (ip->i_d.di_format) {
1886 	case XFS_DINODE_FMT_EXTENTS:
1887 		/* If the extents fit in the inode, fix the
1888 		 * pointer.  Otherwise it's already NULL or
1889 		 * pointing to the extent.
1890 		 */
1891 		if (ip->i_d.di_nextents <= XFS_INLINE_EXTS) {
1892 			ifp->if_u1.if_extents =
1893 				ifp->if_u2.if_inline_ext;
1894 		}
1895 		src_log_flags |= XFS_ILOG_DEXT;
1896 		break;
1897 	case XFS_DINODE_FMT_BTREE:
1898 		ASSERT(ip->i_d.di_version < 3 ||
1899 		       (src_log_flags & XFS_ILOG_DOWNER));
1900 		src_log_flags |= XFS_ILOG_DBROOT;
1901 		break;
1902 	}
1903 
1904 	switch (tip->i_d.di_format) {
1905 	case XFS_DINODE_FMT_EXTENTS:
1906 		/* If the extents fit in the inode, fix the
1907 		 * pointer.  Otherwise it's already NULL or
1908 		 * pointing to the extent.
1909 		 */
1910 		if (tip->i_d.di_nextents <= XFS_INLINE_EXTS) {
1911 			tifp->if_u1.if_extents =
1912 				tifp->if_u2.if_inline_ext;
1913 		}
1914 		target_log_flags |= XFS_ILOG_DEXT;
1915 		break;
1916 	case XFS_DINODE_FMT_BTREE:
1917 		target_log_flags |= XFS_ILOG_DBROOT;
1918 		ASSERT(tip->i_d.di_version < 3 ||
1919 		       (target_log_flags & XFS_ILOG_DOWNER));
1920 		break;
1921 	}
1922 
1923 	xfs_trans_log_inode(tp, ip,  src_log_flags);
1924 	xfs_trans_log_inode(tp, tip, target_log_flags);
1925 
1926 	/*
1927 	 * If this is a synchronous mount, make sure that the
1928 	 * transaction goes to disk before returning to the user.
1929 	 */
1930 	if (mp->m_flags & XFS_MOUNT_WSYNC)
1931 		xfs_trans_set_sync(tp);
1932 
1933 	error = xfs_trans_commit(tp);
1934 
1935 	trace_xfs_swap_extent_after(ip, 0);
1936 	trace_xfs_swap_extent_after(tip, 1);
1937 out:
1938 	kmem_free(tempifp);
1939 	return error;
1940 
1941 out_unlock:
1942 	xfs_iunlock(ip, lock_flags);
1943 	xfs_iunlock(tip, lock_flags);
1944 	goto out;
1945 
1946 out_trans_cancel:
1947 	xfs_trans_cancel(tp);
1948 	goto out;
1949 }
1950