xref: /linux/fs/xfs/xfs_bmap_util.c (revision ca55b2fef3a9373fcfc30f82fd26bc7fccbda732)
1 /*
2  * Copyright (c) 2000-2006 Silicon Graphics, Inc.
3  * Copyright (c) 2012 Red Hat, Inc.
4  * All Rights Reserved.
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License as
8  * published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it would be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write the Free Software Foundation,
17  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
18  */
19 #include "xfs.h"
20 #include "xfs_fs.h"
21 #include "xfs_shared.h"
22 #include "xfs_format.h"
23 #include "xfs_log_format.h"
24 #include "xfs_trans_resv.h"
25 #include "xfs_bit.h"
26 #include "xfs_mount.h"
27 #include "xfs_da_format.h"
28 #include "xfs_inode.h"
29 #include "xfs_btree.h"
30 #include "xfs_trans.h"
31 #include "xfs_extfree_item.h"
32 #include "xfs_alloc.h"
33 #include "xfs_bmap.h"
34 #include "xfs_bmap_util.h"
35 #include "xfs_bmap_btree.h"
36 #include "xfs_rtalloc.h"
37 #include "xfs_error.h"
38 #include "xfs_quota.h"
39 #include "xfs_trans_space.h"
40 #include "xfs_trace.h"
41 #include "xfs_icache.h"
42 #include "xfs_log.h"
43 
44 /* Kernel only BMAP related definitions and functions */
45 
46 /*
47  * Convert the given file system block to a disk block.  We have to treat it
48  * differently based on whether the file is a real time file or not, because the
49  * bmap code does.
50  */
51 xfs_daddr_t
52 xfs_fsb_to_db(struct xfs_inode *ip, xfs_fsblock_t fsb)
53 {
54 	return (XFS_IS_REALTIME_INODE(ip) ? \
55 		 (xfs_daddr_t)XFS_FSB_TO_BB((ip)->i_mount, (fsb)) : \
56 		 XFS_FSB_TO_DADDR((ip)->i_mount, (fsb)));
57 }
58 
59 /*
60  * Routine to be called at transaction's end by xfs_bmapi, xfs_bunmapi
61  * caller.  Frees all the extents that need freeing, which must be done
62  * last due to locking considerations.  We never free any extents in
63  * the first transaction.
64  *
65  * Return 1 if the given transaction was committed and a new one
66  * started, and 0 otherwise in the committed parameter.
67  */
68 int						/* error */
69 xfs_bmap_finish(
70 	struct xfs_trans		**tp,	/* transaction pointer addr */
71 	struct xfs_bmap_free		*flist,	/* i/o: list extents to free */
72 	int				*committed)/* xact committed or not */
73 {
74 	struct xfs_efd_log_item		*efd;	/* extent free data */
75 	struct xfs_efi_log_item		*efi;	/* extent free intention */
76 	int				error;	/* error return value */
77 	struct xfs_bmap_free_item	*free;	/* free extent item */
78 	struct xfs_bmap_free_item	*next;	/* next item on free list */
79 
80 	ASSERT((*tp)->t_flags & XFS_TRANS_PERM_LOG_RES);
81 	if (flist->xbf_count == 0) {
82 		*committed = 0;
83 		return 0;
84 	}
85 	efi = xfs_trans_get_efi(*tp, flist->xbf_count);
86 	for (free = flist->xbf_first; free; free = free->xbfi_next)
87 		xfs_trans_log_efi_extent(*tp, efi, free->xbfi_startblock,
88 			free->xbfi_blockcount);
89 
90 	error = __xfs_trans_roll(tp, NULL, committed);
91 	if (error) {
92 		/*
93 		 * If the transaction was committed, drop the EFD reference
94 		 * since we're bailing out of here. The other reference is
95 		 * dropped when the EFI hits the AIL.
96 		 *
97 		 * If the transaction was not committed, the EFI is freed by the
98 		 * EFI item unlock handler on abort. Also, we have a new
99 		 * transaction so we should return committed=1 even though we're
100 		 * returning an error.
101 		 */
102 		if (*committed) {
103 			xfs_efi_release(efi);
104 			xfs_force_shutdown((*tp)->t_mountp,
105 				(error == -EFSCORRUPTED) ?
106 					SHUTDOWN_CORRUPT_INCORE :
107 					SHUTDOWN_META_IO_ERROR);
108 		} else {
109 			*committed = 1;
110 		}
111 
112 		return error;
113 	}
114 
115 	/*
116 	 * Get an EFD and free each extent in the list, logging to the EFD in
117 	 * the process. The remaining bmap free list is cleaned up by the caller
118 	 * on error.
119 	 */
120 	efd = xfs_trans_get_efd(*tp, efi, flist->xbf_count);
121 	for (free = flist->xbf_first; free != NULL; free = next) {
122 		next = free->xbfi_next;
123 
124 		error = xfs_trans_free_extent(*tp, efd, free->xbfi_startblock,
125 					      free->xbfi_blockcount);
126 		if (error)
127 			return error;
128 
129 		xfs_bmap_del_free(flist, NULL, free);
130 	}
131 
132 	return 0;
133 }
134 
135 int
136 xfs_bmap_rtalloc(
137 	struct xfs_bmalloca	*ap)	/* bmap alloc argument struct */
138 {
139 	xfs_alloctype_t	atype = 0;	/* type for allocation routines */
140 	int		error;		/* error return value */
141 	xfs_mount_t	*mp;		/* mount point structure */
142 	xfs_extlen_t	prod = 0;	/* product factor for allocators */
143 	xfs_extlen_t	ralen = 0;	/* realtime allocation length */
144 	xfs_extlen_t	align;		/* minimum allocation alignment */
145 	xfs_rtblock_t	rtb;
146 
147 	mp = ap->ip->i_mount;
148 	align = xfs_get_extsz_hint(ap->ip);
149 	prod = align / mp->m_sb.sb_rextsize;
150 	error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev,
151 					align, 1, ap->eof, 0,
152 					ap->conv, &ap->offset, &ap->length);
153 	if (error)
154 		return error;
155 	ASSERT(ap->length);
156 	ASSERT(ap->length % mp->m_sb.sb_rextsize == 0);
157 
158 	/*
159 	 * If the offset & length are not perfectly aligned
160 	 * then kill prod, it will just get us in trouble.
161 	 */
162 	if (do_mod(ap->offset, align) || ap->length % align)
163 		prod = 1;
164 	/*
165 	 * Set ralen to be the actual requested length in rtextents.
166 	 */
167 	ralen = ap->length / mp->m_sb.sb_rextsize;
168 	/*
169 	 * If the old value was close enough to MAXEXTLEN that
170 	 * we rounded up to it, cut it back so it's valid again.
171 	 * Note that if it's a really large request (bigger than
172 	 * MAXEXTLEN), we don't hear about that number, and can't
173 	 * adjust the starting point to match it.
174 	 */
175 	if (ralen * mp->m_sb.sb_rextsize >= MAXEXTLEN)
176 		ralen = MAXEXTLEN / mp->m_sb.sb_rextsize;
177 
178 	/*
179 	 * Lock out other modifications to the RT bitmap inode.
180 	 */
181 	xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL);
182 	xfs_trans_ijoin(ap->tp, mp->m_rbmip, XFS_ILOCK_EXCL);
183 
184 	/*
185 	 * If it's an allocation to an empty file at offset 0,
186 	 * pick an extent that will space things out in the rt area.
187 	 */
188 	if (ap->eof && ap->offset == 0) {
189 		xfs_rtblock_t uninitialized_var(rtx); /* realtime extent no */
190 
191 		error = xfs_rtpick_extent(mp, ap->tp, ralen, &rtx);
192 		if (error)
193 			return error;
194 		ap->blkno = rtx * mp->m_sb.sb_rextsize;
195 	} else {
196 		ap->blkno = 0;
197 	}
198 
199 	xfs_bmap_adjacent(ap);
200 
201 	/*
202 	 * Realtime allocation, done through xfs_rtallocate_extent.
203 	 */
204 	atype = ap->blkno == 0 ?  XFS_ALLOCTYPE_ANY_AG : XFS_ALLOCTYPE_NEAR_BNO;
205 	do_div(ap->blkno, mp->m_sb.sb_rextsize);
206 	rtb = ap->blkno;
207 	ap->length = ralen;
208 	if ((error = xfs_rtallocate_extent(ap->tp, ap->blkno, 1, ap->length,
209 				&ralen, atype, ap->wasdel, prod, &rtb)))
210 		return error;
211 	if (rtb == NULLFSBLOCK && prod > 1 &&
212 	    (error = xfs_rtallocate_extent(ap->tp, ap->blkno, 1,
213 					   ap->length, &ralen, atype,
214 					   ap->wasdel, 1, &rtb)))
215 		return error;
216 	ap->blkno = rtb;
217 	if (ap->blkno != NULLFSBLOCK) {
218 		ap->blkno *= mp->m_sb.sb_rextsize;
219 		ralen *= mp->m_sb.sb_rextsize;
220 		ap->length = ralen;
221 		ap->ip->i_d.di_nblocks += ralen;
222 		xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
223 		if (ap->wasdel)
224 			ap->ip->i_delayed_blks -= ralen;
225 		/*
226 		 * Adjust the disk quota also. This was reserved
227 		 * earlier.
228 		 */
229 		xfs_trans_mod_dquot_byino(ap->tp, ap->ip,
230 			ap->wasdel ? XFS_TRANS_DQ_DELRTBCOUNT :
231 					XFS_TRANS_DQ_RTBCOUNT, (long) ralen);
232 	} else {
233 		ap->length = 0;
234 	}
235 	return 0;
236 }
237 
238 /*
239  * Check if the endoff is outside the last extent. If so the caller will grow
240  * the allocation to a stripe unit boundary.  All offsets are considered outside
241  * the end of file for an empty fork, so 1 is returned in *eof in that case.
242  */
243 int
244 xfs_bmap_eof(
245 	struct xfs_inode	*ip,
246 	xfs_fileoff_t		endoff,
247 	int			whichfork,
248 	int			*eof)
249 {
250 	struct xfs_bmbt_irec	rec;
251 	int			error;
252 
253 	error = xfs_bmap_last_extent(NULL, ip, whichfork, &rec, eof);
254 	if (error || *eof)
255 		return error;
256 
257 	*eof = endoff >= rec.br_startoff + rec.br_blockcount;
258 	return 0;
259 }
260 
261 /*
262  * Extent tree block counting routines.
263  */
264 
265 /*
266  * Count leaf blocks given a range of extent records.
267  */
268 STATIC void
269 xfs_bmap_count_leaves(
270 	xfs_ifork_t		*ifp,
271 	xfs_extnum_t		idx,
272 	int			numrecs,
273 	int			*count)
274 {
275 	int		b;
276 
277 	for (b = 0; b < numrecs; b++) {
278 		xfs_bmbt_rec_host_t *frp = xfs_iext_get_ext(ifp, idx + b);
279 		*count += xfs_bmbt_get_blockcount(frp);
280 	}
281 }
282 
283 /*
284  * Count leaf blocks given a range of extent records originally
285  * in btree format.
286  */
287 STATIC void
288 xfs_bmap_disk_count_leaves(
289 	struct xfs_mount	*mp,
290 	struct xfs_btree_block	*block,
291 	int			numrecs,
292 	int			*count)
293 {
294 	int		b;
295 	xfs_bmbt_rec_t	*frp;
296 
297 	for (b = 1; b <= numrecs; b++) {
298 		frp = XFS_BMBT_REC_ADDR(mp, block, b);
299 		*count += xfs_bmbt_disk_get_blockcount(frp);
300 	}
301 }
302 
303 /*
304  * Recursively walks each level of a btree
305  * to count total fsblocks in use.
306  */
307 STATIC int                                     /* error */
308 xfs_bmap_count_tree(
309 	xfs_mount_t     *mp,            /* file system mount point */
310 	xfs_trans_t     *tp,            /* transaction pointer */
311 	xfs_ifork_t	*ifp,		/* inode fork pointer */
312 	xfs_fsblock_t   blockno,	/* file system block number */
313 	int             levelin,	/* level in btree */
314 	int		*count)		/* Count of blocks */
315 {
316 	int			error;
317 	xfs_buf_t		*bp, *nbp;
318 	int			level = levelin;
319 	__be64			*pp;
320 	xfs_fsblock_t           bno = blockno;
321 	xfs_fsblock_t		nextbno;
322 	struct xfs_btree_block	*block, *nextblock;
323 	int			numrecs;
324 
325 	error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp, XFS_BMAP_BTREE_REF,
326 						&xfs_bmbt_buf_ops);
327 	if (error)
328 		return error;
329 	*count += 1;
330 	block = XFS_BUF_TO_BLOCK(bp);
331 
332 	if (--level) {
333 		/* Not at node above leaves, count this level of nodes */
334 		nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
335 		while (nextbno != NULLFSBLOCK) {
336 			error = xfs_btree_read_bufl(mp, tp, nextbno, 0, &nbp,
337 						XFS_BMAP_BTREE_REF,
338 						&xfs_bmbt_buf_ops);
339 			if (error)
340 				return error;
341 			*count += 1;
342 			nextblock = XFS_BUF_TO_BLOCK(nbp);
343 			nextbno = be64_to_cpu(nextblock->bb_u.l.bb_rightsib);
344 			xfs_trans_brelse(tp, nbp);
345 		}
346 
347 		/* Dive to the next level */
348 		pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
349 		bno = be64_to_cpu(*pp);
350 		if (unlikely((error =
351 		     xfs_bmap_count_tree(mp, tp, ifp, bno, level, count)) < 0)) {
352 			xfs_trans_brelse(tp, bp);
353 			XFS_ERROR_REPORT("xfs_bmap_count_tree(1)",
354 					 XFS_ERRLEVEL_LOW, mp);
355 			return -EFSCORRUPTED;
356 		}
357 		xfs_trans_brelse(tp, bp);
358 	} else {
359 		/* count all level 1 nodes and their leaves */
360 		for (;;) {
361 			nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
362 			numrecs = be16_to_cpu(block->bb_numrecs);
363 			xfs_bmap_disk_count_leaves(mp, block, numrecs, count);
364 			xfs_trans_brelse(tp, bp);
365 			if (nextbno == NULLFSBLOCK)
366 				break;
367 			bno = nextbno;
368 			error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp,
369 						XFS_BMAP_BTREE_REF,
370 						&xfs_bmbt_buf_ops);
371 			if (error)
372 				return error;
373 			*count += 1;
374 			block = XFS_BUF_TO_BLOCK(bp);
375 		}
376 	}
377 	return 0;
378 }
379 
380 /*
381  * Count fsblocks of the given fork.
382  */
383 int						/* error */
384 xfs_bmap_count_blocks(
385 	xfs_trans_t		*tp,		/* transaction pointer */
386 	xfs_inode_t		*ip,		/* incore inode */
387 	int			whichfork,	/* data or attr fork */
388 	int			*count)		/* out: count of blocks */
389 {
390 	struct xfs_btree_block	*block;	/* current btree block */
391 	xfs_fsblock_t		bno;	/* block # of "block" */
392 	xfs_ifork_t		*ifp;	/* fork structure */
393 	int			level;	/* btree level, for checking */
394 	xfs_mount_t		*mp;	/* file system mount structure */
395 	__be64			*pp;	/* pointer to block address */
396 
397 	bno = NULLFSBLOCK;
398 	mp = ip->i_mount;
399 	ifp = XFS_IFORK_PTR(ip, whichfork);
400 	if ( XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS ) {
401 		xfs_bmap_count_leaves(ifp, 0,
402 			ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t),
403 			count);
404 		return 0;
405 	}
406 
407 	/*
408 	 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
409 	 */
410 	block = ifp->if_broot;
411 	level = be16_to_cpu(block->bb_level);
412 	ASSERT(level > 0);
413 	pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes);
414 	bno = be64_to_cpu(*pp);
415 	ASSERT(bno != NULLFSBLOCK);
416 	ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount);
417 	ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks);
418 
419 	if (unlikely(xfs_bmap_count_tree(mp, tp, ifp, bno, level, count) < 0)) {
420 		XFS_ERROR_REPORT("xfs_bmap_count_blocks(2)", XFS_ERRLEVEL_LOW,
421 				 mp);
422 		return -EFSCORRUPTED;
423 	}
424 
425 	return 0;
426 }
427 
428 /*
429  * returns 1 for success, 0 if we failed to map the extent.
430  */
431 STATIC int
432 xfs_getbmapx_fix_eof_hole(
433 	xfs_inode_t		*ip,		/* xfs incore inode pointer */
434 	struct getbmapx		*out,		/* output structure */
435 	int			prealloced,	/* this is a file with
436 						 * preallocated data space */
437 	__int64_t		end,		/* last block requested */
438 	xfs_fsblock_t		startblock)
439 {
440 	__int64_t		fixlen;
441 	xfs_mount_t		*mp;		/* file system mount point */
442 	xfs_ifork_t		*ifp;		/* inode fork pointer */
443 	xfs_extnum_t		lastx;		/* last extent pointer */
444 	xfs_fileoff_t		fileblock;
445 
446 	if (startblock == HOLESTARTBLOCK) {
447 		mp = ip->i_mount;
448 		out->bmv_block = -1;
449 		fixlen = XFS_FSB_TO_BB(mp, XFS_B_TO_FSB(mp, XFS_ISIZE(ip)));
450 		fixlen -= out->bmv_offset;
451 		if (prealloced && out->bmv_offset + out->bmv_length == end) {
452 			/* Came to hole at EOF. Trim it. */
453 			if (fixlen <= 0)
454 				return 0;
455 			out->bmv_length = fixlen;
456 		}
457 	} else {
458 		if (startblock == DELAYSTARTBLOCK)
459 			out->bmv_block = -2;
460 		else
461 			out->bmv_block = xfs_fsb_to_db(ip, startblock);
462 		fileblock = XFS_BB_TO_FSB(ip->i_mount, out->bmv_offset);
463 		ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
464 		if (xfs_iext_bno_to_ext(ifp, fileblock, &lastx) &&
465 		   (lastx == (ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t))-1))
466 			out->bmv_oflags |= BMV_OF_LAST;
467 	}
468 
469 	return 1;
470 }
471 
472 /*
473  * Get inode's extents as described in bmv, and format for output.
474  * Calls formatter to fill the user's buffer until all extents
475  * are mapped, until the passed-in bmv->bmv_count slots have
476  * been filled, or until the formatter short-circuits the loop,
477  * if it is tracking filled-in extents on its own.
478  */
479 int						/* error code */
480 xfs_getbmap(
481 	xfs_inode_t		*ip,
482 	struct getbmapx		*bmv,		/* user bmap structure */
483 	xfs_bmap_format_t	formatter,	/* format to user */
484 	void			*arg)		/* formatter arg */
485 {
486 	__int64_t		bmvend;		/* last block requested */
487 	int			error = 0;	/* return value */
488 	__int64_t		fixlen;		/* length for -1 case */
489 	int			i;		/* extent number */
490 	int			lock;		/* lock state */
491 	xfs_bmbt_irec_t		*map;		/* buffer for user's data */
492 	xfs_mount_t		*mp;		/* file system mount point */
493 	int			nex;		/* # of user extents can do */
494 	int			nexleft;	/* # of user extents left */
495 	int			subnex;		/* # of bmapi's can do */
496 	int			nmap;		/* number of map entries */
497 	struct getbmapx		*out;		/* output structure */
498 	int			whichfork;	/* data or attr fork */
499 	int			prealloced;	/* this is a file with
500 						 * preallocated data space */
501 	int			iflags;		/* interface flags */
502 	int			bmapi_flags;	/* flags for xfs_bmapi */
503 	int			cur_ext = 0;
504 
505 	mp = ip->i_mount;
506 	iflags = bmv->bmv_iflags;
507 	whichfork = iflags & BMV_IF_ATTRFORK ? XFS_ATTR_FORK : XFS_DATA_FORK;
508 
509 	if (whichfork == XFS_ATTR_FORK) {
510 		if (XFS_IFORK_Q(ip)) {
511 			if (ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS &&
512 			    ip->i_d.di_aformat != XFS_DINODE_FMT_BTREE &&
513 			    ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)
514 				return -EINVAL;
515 		} else if (unlikely(
516 			   ip->i_d.di_aformat != 0 &&
517 			   ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS)) {
518 			XFS_ERROR_REPORT("xfs_getbmap", XFS_ERRLEVEL_LOW,
519 					 ip->i_mount);
520 			return -EFSCORRUPTED;
521 		}
522 
523 		prealloced = 0;
524 		fixlen = 1LL << 32;
525 	} else {
526 		if (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS &&
527 		    ip->i_d.di_format != XFS_DINODE_FMT_BTREE &&
528 		    ip->i_d.di_format != XFS_DINODE_FMT_LOCAL)
529 			return -EINVAL;
530 
531 		if (xfs_get_extsz_hint(ip) ||
532 		    ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC|XFS_DIFLAG_APPEND)){
533 			prealloced = 1;
534 			fixlen = mp->m_super->s_maxbytes;
535 		} else {
536 			prealloced = 0;
537 			fixlen = XFS_ISIZE(ip);
538 		}
539 	}
540 
541 	if (bmv->bmv_length == -1) {
542 		fixlen = XFS_FSB_TO_BB(mp, XFS_B_TO_FSB(mp, fixlen));
543 		bmv->bmv_length =
544 			max_t(__int64_t, fixlen - bmv->bmv_offset, 0);
545 	} else if (bmv->bmv_length == 0) {
546 		bmv->bmv_entries = 0;
547 		return 0;
548 	} else if (bmv->bmv_length < 0) {
549 		return -EINVAL;
550 	}
551 
552 	nex = bmv->bmv_count - 1;
553 	if (nex <= 0)
554 		return -EINVAL;
555 	bmvend = bmv->bmv_offset + bmv->bmv_length;
556 
557 
558 	if (bmv->bmv_count > ULONG_MAX / sizeof(struct getbmapx))
559 		return -ENOMEM;
560 	out = kmem_zalloc_large(bmv->bmv_count * sizeof(struct getbmapx), 0);
561 	if (!out)
562 		return -ENOMEM;
563 
564 	xfs_ilock(ip, XFS_IOLOCK_SHARED);
565 	if (whichfork == XFS_DATA_FORK) {
566 		if (!(iflags & BMV_IF_DELALLOC) &&
567 		    (ip->i_delayed_blks || XFS_ISIZE(ip) > ip->i_d.di_size)) {
568 			error = filemap_write_and_wait(VFS_I(ip)->i_mapping);
569 			if (error)
570 				goto out_unlock_iolock;
571 
572 			/*
573 			 * Even after flushing the inode, there can still be
574 			 * delalloc blocks on the inode beyond EOF due to
575 			 * speculative preallocation.  These are not removed
576 			 * until the release function is called or the inode
577 			 * is inactivated.  Hence we cannot assert here that
578 			 * ip->i_delayed_blks == 0.
579 			 */
580 		}
581 
582 		lock = xfs_ilock_data_map_shared(ip);
583 	} else {
584 		lock = xfs_ilock_attr_map_shared(ip);
585 	}
586 
587 	/*
588 	 * Don't let nex be bigger than the number of extents
589 	 * we can have assuming alternating holes and real extents.
590 	 */
591 	if (nex > XFS_IFORK_NEXTENTS(ip, whichfork) * 2 + 1)
592 		nex = XFS_IFORK_NEXTENTS(ip, whichfork) * 2 + 1;
593 
594 	bmapi_flags = xfs_bmapi_aflag(whichfork);
595 	if (!(iflags & BMV_IF_PREALLOC))
596 		bmapi_flags |= XFS_BMAPI_IGSTATE;
597 
598 	/*
599 	 * Allocate enough space to handle "subnex" maps at a time.
600 	 */
601 	error = -ENOMEM;
602 	subnex = 16;
603 	map = kmem_alloc(subnex * sizeof(*map), KM_MAYFAIL | KM_NOFS);
604 	if (!map)
605 		goto out_unlock_ilock;
606 
607 	bmv->bmv_entries = 0;
608 
609 	if (XFS_IFORK_NEXTENTS(ip, whichfork) == 0 &&
610 	    (whichfork == XFS_ATTR_FORK || !(iflags & BMV_IF_DELALLOC))) {
611 		error = 0;
612 		goto out_free_map;
613 	}
614 
615 	nexleft = nex;
616 
617 	do {
618 		nmap = (nexleft > subnex) ? subnex : nexleft;
619 		error = xfs_bmapi_read(ip, XFS_BB_TO_FSBT(mp, bmv->bmv_offset),
620 				       XFS_BB_TO_FSB(mp, bmv->bmv_length),
621 				       map, &nmap, bmapi_flags);
622 		if (error)
623 			goto out_free_map;
624 		ASSERT(nmap <= subnex);
625 
626 		for (i = 0; i < nmap && nexleft && bmv->bmv_length; i++) {
627 			out[cur_ext].bmv_oflags = 0;
628 			if (map[i].br_state == XFS_EXT_UNWRITTEN)
629 				out[cur_ext].bmv_oflags |= BMV_OF_PREALLOC;
630 			else if (map[i].br_startblock == DELAYSTARTBLOCK)
631 				out[cur_ext].bmv_oflags |= BMV_OF_DELALLOC;
632 			out[cur_ext].bmv_offset =
633 				XFS_FSB_TO_BB(mp, map[i].br_startoff);
634 			out[cur_ext].bmv_length =
635 				XFS_FSB_TO_BB(mp, map[i].br_blockcount);
636 			out[cur_ext].bmv_unused1 = 0;
637 			out[cur_ext].bmv_unused2 = 0;
638 
639 			/*
640 			 * delayed allocation extents that start beyond EOF can
641 			 * occur due to speculative EOF allocation when the
642 			 * delalloc extent is larger than the largest freespace
643 			 * extent at conversion time. These extents cannot be
644 			 * converted by data writeback, so can exist here even
645 			 * if we are not supposed to be finding delalloc
646 			 * extents.
647 			 */
648 			if (map[i].br_startblock == DELAYSTARTBLOCK &&
649 			    map[i].br_startoff <= XFS_B_TO_FSB(mp, XFS_ISIZE(ip)))
650 				ASSERT((iflags & BMV_IF_DELALLOC) != 0);
651 
652                         if (map[i].br_startblock == HOLESTARTBLOCK &&
653 			    whichfork == XFS_ATTR_FORK) {
654 				/* came to the end of attribute fork */
655 				out[cur_ext].bmv_oflags |= BMV_OF_LAST;
656 				goto out_free_map;
657 			}
658 
659 			if (!xfs_getbmapx_fix_eof_hole(ip, &out[cur_ext],
660 					prealloced, bmvend,
661 					map[i].br_startblock))
662 				goto out_free_map;
663 
664 			bmv->bmv_offset =
665 				out[cur_ext].bmv_offset +
666 				out[cur_ext].bmv_length;
667 			bmv->bmv_length =
668 				max_t(__int64_t, 0, bmvend - bmv->bmv_offset);
669 
670 			/*
671 			 * In case we don't want to return the hole,
672 			 * don't increase cur_ext so that we can reuse
673 			 * it in the next loop.
674 			 */
675 			if ((iflags & BMV_IF_NO_HOLES) &&
676 			    map[i].br_startblock == HOLESTARTBLOCK) {
677 				memset(&out[cur_ext], 0, sizeof(out[cur_ext]));
678 				continue;
679 			}
680 
681 			nexleft--;
682 			bmv->bmv_entries++;
683 			cur_ext++;
684 		}
685 	} while (nmap && nexleft && bmv->bmv_length);
686 
687  out_free_map:
688 	kmem_free(map);
689  out_unlock_ilock:
690 	xfs_iunlock(ip, lock);
691  out_unlock_iolock:
692 	xfs_iunlock(ip, XFS_IOLOCK_SHARED);
693 
694 	for (i = 0; i < cur_ext; i++) {
695 		int full = 0;	/* user array is full */
696 
697 		/* format results & advance arg */
698 		error = formatter(&arg, &out[i], &full);
699 		if (error || full)
700 			break;
701 	}
702 
703 	kmem_free(out);
704 	return error;
705 }
706 
707 /*
708  * dead simple method of punching delalyed allocation blocks from a range in
709  * the inode. Walks a block at a time so will be slow, but is only executed in
710  * rare error cases so the overhead is not critical. This will always punch out
711  * both the start and end blocks, even if the ranges only partially overlap
712  * them, so it is up to the caller to ensure that partial blocks are not
713  * passed in.
714  */
715 int
716 xfs_bmap_punch_delalloc_range(
717 	struct xfs_inode	*ip,
718 	xfs_fileoff_t		start_fsb,
719 	xfs_fileoff_t		length)
720 {
721 	xfs_fileoff_t		remaining = length;
722 	int			error = 0;
723 
724 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
725 
726 	do {
727 		int		done;
728 		xfs_bmbt_irec_t	imap;
729 		int		nimaps = 1;
730 		xfs_fsblock_t	firstblock;
731 		xfs_bmap_free_t flist;
732 
733 		/*
734 		 * Map the range first and check that it is a delalloc extent
735 		 * before trying to unmap the range. Otherwise we will be
736 		 * trying to remove a real extent (which requires a
737 		 * transaction) or a hole, which is probably a bad idea...
738 		 */
739 		error = xfs_bmapi_read(ip, start_fsb, 1, &imap, &nimaps,
740 				       XFS_BMAPI_ENTIRE);
741 
742 		if (error) {
743 			/* something screwed, just bail */
744 			if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
745 				xfs_alert(ip->i_mount,
746 			"Failed delalloc mapping lookup ino %lld fsb %lld.",
747 						ip->i_ino, start_fsb);
748 			}
749 			break;
750 		}
751 		if (!nimaps) {
752 			/* nothing there */
753 			goto next_block;
754 		}
755 		if (imap.br_startblock != DELAYSTARTBLOCK) {
756 			/* been converted, ignore */
757 			goto next_block;
758 		}
759 		WARN_ON(imap.br_blockcount == 0);
760 
761 		/*
762 		 * Note: while we initialise the firstblock/flist pair, they
763 		 * should never be used because blocks should never be
764 		 * allocated or freed for a delalloc extent and hence we need
765 		 * don't cancel or finish them after the xfs_bunmapi() call.
766 		 */
767 		xfs_bmap_init(&flist, &firstblock);
768 		error = xfs_bunmapi(NULL, ip, start_fsb, 1, 0, 1, &firstblock,
769 					&flist, &done);
770 		if (error)
771 			break;
772 
773 		ASSERT(!flist.xbf_count && !flist.xbf_first);
774 next_block:
775 		start_fsb++;
776 		remaining--;
777 	} while(remaining > 0);
778 
779 	return error;
780 }
781 
782 /*
783  * Test whether it is appropriate to check an inode for and free post EOF
784  * blocks. The 'force' parameter determines whether we should also consider
785  * regular files that are marked preallocated or append-only.
786  */
787 bool
788 xfs_can_free_eofblocks(struct xfs_inode *ip, bool force)
789 {
790 	/* prealloc/delalloc exists only on regular files */
791 	if (!S_ISREG(ip->i_d.di_mode))
792 		return false;
793 
794 	/*
795 	 * Zero sized files with no cached pages and delalloc blocks will not
796 	 * have speculative prealloc/delalloc blocks to remove.
797 	 */
798 	if (VFS_I(ip)->i_size == 0 &&
799 	    VFS_I(ip)->i_mapping->nrpages == 0 &&
800 	    ip->i_delayed_blks == 0)
801 		return false;
802 
803 	/* If we haven't read in the extent list, then don't do it now. */
804 	if (!(ip->i_df.if_flags & XFS_IFEXTENTS))
805 		return false;
806 
807 	/*
808 	 * Do not free real preallocated or append-only files unless the file
809 	 * has delalloc blocks and we are forced to remove them.
810 	 */
811 	if (ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND))
812 		if (!force || ip->i_delayed_blks == 0)
813 			return false;
814 
815 	return true;
816 }
817 
818 /*
819  * This is called by xfs_inactive to free any blocks beyond eof
820  * when the link count isn't zero and by xfs_dm_punch_hole() when
821  * punching a hole to EOF.
822  */
823 int
824 xfs_free_eofblocks(
825 	xfs_mount_t	*mp,
826 	xfs_inode_t	*ip,
827 	bool		need_iolock)
828 {
829 	xfs_trans_t	*tp;
830 	int		error;
831 	xfs_fileoff_t	end_fsb;
832 	xfs_fileoff_t	last_fsb;
833 	xfs_filblks_t	map_len;
834 	int		nimaps;
835 	xfs_bmbt_irec_t	imap;
836 
837 	/*
838 	 * Figure out if there are any blocks beyond the end
839 	 * of the file.  If not, then there is nothing to do.
840 	 */
841 	end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_ISIZE(ip));
842 	last_fsb = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
843 	if (last_fsb <= end_fsb)
844 		return 0;
845 	map_len = last_fsb - end_fsb;
846 
847 	nimaps = 1;
848 	xfs_ilock(ip, XFS_ILOCK_SHARED);
849 	error = xfs_bmapi_read(ip, end_fsb, map_len, &imap, &nimaps, 0);
850 	xfs_iunlock(ip, XFS_ILOCK_SHARED);
851 
852 	if (!error && (nimaps != 0) &&
853 	    (imap.br_startblock != HOLESTARTBLOCK ||
854 	     ip->i_delayed_blks)) {
855 		/*
856 		 * Attach the dquots to the inode up front.
857 		 */
858 		error = xfs_qm_dqattach(ip, 0);
859 		if (error)
860 			return error;
861 
862 		/*
863 		 * There are blocks after the end of file.
864 		 * Free them up now by truncating the file to
865 		 * its current size.
866 		 */
867 		tp = xfs_trans_alloc(mp, XFS_TRANS_INACTIVE);
868 
869 		if (need_iolock) {
870 			if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
871 				xfs_trans_cancel(tp);
872 				return -EAGAIN;
873 			}
874 		}
875 
876 		error = xfs_trans_reserve(tp, &M_RES(mp)->tr_itruncate, 0, 0);
877 		if (error) {
878 			ASSERT(XFS_FORCED_SHUTDOWN(mp));
879 			xfs_trans_cancel(tp);
880 			if (need_iolock)
881 				xfs_iunlock(ip, XFS_IOLOCK_EXCL);
882 			return error;
883 		}
884 
885 		xfs_ilock(ip, XFS_ILOCK_EXCL);
886 		xfs_trans_ijoin(tp, ip, 0);
887 
888 		/*
889 		 * Do not update the on-disk file size.  If we update the
890 		 * on-disk file size and then the system crashes before the
891 		 * contents of the file are flushed to disk then the files
892 		 * may be full of holes (ie NULL files bug).
893 		 */
894 		error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK,
895 					      XFS_ISIZE(ip));
896 		if (error) {
897 			/*
898 			 * If we get an error at this point we simply don't
899 			 * bother truncating the file.
900 			 */
901 			xfs_trans_cancel(tp);
902 		} else {
903 			error = xfs_trans_commit(tp);
904 			if (!error)
905 				xfs_inode_clear_eofblocks_tag(ip);
906 		}
907 
908 		xfs_iunlock(ip, XFS_ILOCK_EXCL);
909 		if (need_iolock)
910 			xfs_iunlock(ip, XFS_IOLOCK_EXCL);
911 	}
912 	return error;
913 }
914 
915 int
916 xfs_alloc_file_space(
917 	struct xfs_inode	*ip,
918 	xfs_off_t		offset,
919 	xfs_off_t		len,
920 	int			alloc_type)
921 {
922 	xfs_mount_t		*mp = ip->i_mount;
923 	xfs_off_t		count;
924 	xfs_filblks_t		allocated_fsb;
925 	xfs_filblks_t		allocatesize_fsb;
926 	xfs_extlen_t		extsz, temp;
927 	xfs_fileoff_t		startoffset_fsb;
928 	xfs_fsblock_t		firstfsb;
929 	int			nimaps;
930 	int			quota_flag;
931 	int			rt;
932 	xfs_trans_t		*tp;
933 	xfs_bmbt_irec_t		imaps[1], *imapp;
934 	xfs_bmap_free_t		free_list;
935 	uint			qblocks, resblks, resrtextents;
936 	int			committed;
937 	int			error;
938 
939 	trace_xfs_alloc_file_space(ip);
940 
941 	if (XFS_FORCED_SHUTDOWN(mp))
942 		return -EIO;
943 
944 	error = xfs_qm_dqattach(ip, 0);
945 	if (error)
946 		return error;
947 
948 	if (len <= 0)
949 		return -EINVAL;
950 
951 	rt = XFS_IS_REALTIME_INODE(ip);
952 	extsz = xfs_get_extsz_hint(ip);
953 
954 	count = len;
955 	imapp = &imaps[0];
956 	nimaps = 1;
957 	startoffset_fsb	= XFS_B_TO_FSBT(mp, offset);
958 	allocatesize_fsb = XFS_B_TO_FSB(mp, count);
959 
960 	/*
961 	 * Allocate file space until done or until there is an error
962 	 */
963 	while (allocatesize_fsb && !error) {
964 		xfs_fileoff_t	s, e;
965 
966 		/*
967 		 * Determine space reservations for data/realtime.
968 		 */
969 		if (unlikely(extsz)) {
970 			s = startoffset_fsb;
971 			do_div(s, extsz);
972 			s *= extsz;
973 			e = startoffset_fsb + allocatesize_fsb;
974 			if ((temp = do_mod(startoffset_fsb, extsz)))
975 				e += temp;
976 			if ((temp = do_mod(e, extsz)))
977 				e += extsz - temp;
978 		} else {
979 			s = 0;
980 			e = allocatesize_fsb;
981 		}
982 
983 		/*
984 		 * The transaction reservation is limited to a 32-bit block
985 		 * count, hence we need to limit the number of blocks we are
986 		 * trying to reserve to avoid an overflow. We can't allocate
987 		 * more than @nimaps extents, and an extent is limited on disk
988 		 * to MAXEXTLEN (21 bits), so use that to enforce the limit.
989 		 */
990 		resblks = min_t(xfs_fileoff_t, (e - s), (MAXEXTLEN * nimaps));
991 		if (unlikely(rt)) {
992 			resrtextents = qblocks = resblks;
993 			resrtextents /= mp->m_sb.sb_rextsize;
994 			resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
995 			quota_flag = XFS_QMOPT_RES_RTBLKS;
996 		} else {
997 			resrtextents = 0;
998 			resblks = qblocks = XFS_DIOSTRAT_SPACE_RES(mp, resblks);
999 			quota_flag = XFS_QMOPT_RES_REGBLKS;
1000 		}
1001 
1002 		/*
1003 		 * Allocate and setup the transaction.
1004 		 */
1005 		tp = xfs_trans_alloc(mp, XFS_TRANS_DIOSTRAT);
1006 		error = xfs_trans_reserve(tp, &M_RES(mp)->tr_write,
1007 					  resblks, resrtextents);
1008 		/*
1009 		 * Check for running out of space
1010 		 */
1011 		if (error) {
1012 			/*
1013 			 * Free the transaction structure.
1014 			 */
1015 			ASSERT(error == -ENOSPC || XFS_FORCED_SHUTDOWN(mp));
1016 			xfs_trans_cancel(tp);
1017 			break;
1018 		}
1019 		xfs_ilock(ip, XFS_ILOCK_EXCL);
1020 		error = xfs_trans_reserve_quota_nblks(tp, ip, qblocks,
1021 						      0, quota_flag);
1022 		if (error)
1023 			goto error1;
1024 
1025 		xfs_trans_ijoin(tp, ip, 0);
1026 
1027 		xfs_bmap_init(&free_list, &firstfsb);
1028 		error = xfs_bmapi_write(tp, ip, startoffset_fsb,
1029 					allocatesize_fsb, alloc_type, &firstfsb,
1030 					0, imapp, &nimaps, &free_list);
1031 		if (error) {
1032 			goto error0;
1033 		}
1034 
1035 		/*
1036 		 * Complete the transaction
1037 		 */
1038 		error = xfs_bmap_finish(&tp, &free_list, &committed);
1039 		if (error) {
1040 			goto error0;
1041 		}
1042 
1043 		error = xfs_trans_commit(tp);
1044 		xfs_iunlock(ip, XFS_ILOCK_EXCL);
1045 		if (error) {
1046 			break;
1047 		}
1048 
1049 		allocated_fsb = imapp->br_blockcount;
1050 
1051 		if (nimaps == 0) {
1052 			error = -ENOSPC;
1053 			break;
1054 		}
1055 
1056 		startoffset_fsb += allocated_fsb;
1057 		allocatesize_fsb -= allocated_fsb;
1058 	}
1059 
1060 	return error;
1061 
1062 error0:	/* Cancel bmap, unlock inode, unreserve quota blocks, cancel trans */
1063 	xfs_bmap_cancel(&free_list);
1064 	xfs_trans_unreserve_quota_nblks(tp, ip, (long)qblocks, 0, quota_flag);
1065 
1066 error1:	/* Just cancel transaction */
1067 	xfs_trans_cancel(tp);
1068 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1069 	return error;
1070 }
1071 
1072 /*
1073  * Zero file bytes between startoff and endoff inclusive.
1074  * The iolock is held exclusive and no blocks are buffered.
1075  *
1076  * This function is used by xfs_free_file_space() to zero
1077  * partial blocks when the range to free is not block aligned.
1078  * When unreserving space with boundaries that are not block
1079  * aligned we round up the start and round down the end
1080  * boundaries and then use this function to zero the parts of
1081  * the blocks that got dropped during the rounding.
1082  */
1083 STATIC int
1084 xfs_zero_remaining_bytes(
1085 	xfs_inode_t		*ip,
1086 	xfs_off_t		startoff,
1087 	xfs_off_t		endoff)
1088 {
1089 	xfs_bmbt_irec_t		imap;
1090 	xfs_fileoff_t		offset_fsb;
1091 	xfs_off_t		lastoffset;
1092 	xfs_off_t		offset;
1093 	xfs_buf_t		*bp;
1094 	xfs_mount_t		*mp = ip->i_mount;
1095 	int			nimap;
1096 	int			error = 0;
1097 
1098 	/*
1099 	 * Avoid doing I/O beyond eof - it's not necessary
1100 	 * since nothing can read beyond eof.  The space will
1101 	 * be zeroed when the file is extended anyway.
1102 	 */
1103 	if (startoff >= XFS_ISIZE(ip))
1104 		return 0;
1105 
1106 	if (endoff > XFS_ISIZE(ip))
1107 		endoff = XFS_ISIZE(ip);
1108 
1109 	for (offset = startoff; offset <= endoff; offset = lastoffset + 1) {
1110 		uint lock_mode;
1111 
1112 		offset_fsb = XFS_B_TO_FSBT(mp, offset);
1113 		nimap = 1;
1114 
1115 		lock_mode = xfs_ilock_data_map_shared(ip);
1116 		error = xfs_bmapi_read(ip, offset_fsb, 1, &imap, &nimap, 0);
1117 		xfs_iunlock(ip, lock_mode);
1118 
1119 		if (error || nimap < 1)
1120 			break;
1121 		ASSERT(imap.br_blockcount >= 1);
1122 		ASSERT(imap.br_startoff == offset_fsb);
1123 		ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
1124 
1125 		if (imap.br_startblock == HOLESTARTBLOCK ||
1126 		    imap.br_state == XFS_EXT_UNWRITTEN) {
1127 			/* skip the entire extent */
1128 			lastoffset = XFS_FSB_TO_B(mp, imap.br_startoff +
1129 						      imap.br_blockcount) - 1;
1130 			continue;
1131 		}
1132 
1133 		lastoffset = XFS_FSB_TO_B(mp, imap.br_startoff + 1) - 1;
1134 		if (lastoffset > endoff)
1135 			lastoffset = endoff;
1136 
1137 		/* DAX can just zero the backing device directly */
1138 		if (IS_DAX(VFS_I(ip))) {
1139 			error = dax_zero_page_range(VFS_I(ip), offset,
1140 						    lastoffset - offset + 1,
1141 						    xfs_get_blocks_direct);
1142 			if (error)
1143 				return error;
1144 			continue;
1145 		}
1146 
1147 		error = xfs_buf_read_uncached(XFS_IS_REALTIME_INODE(ip) ?
1148 				mp->m_rtdev_targp : mp->m_ddev_targp,
1149 				xfs_fsb_to_db(ip, imap.br_startblock),
1150 				BTOBB(mp->m_sb.sb_blocksize),
1151 				0, &bp, NULL);
1152 		if (error)
1153 			return error;
1154 
1155 		memset(bp->b_addr +
1156 				(offset - XFS_FSB_TO_B(mp, imap.br_startoff)),
1157 		       0, lastoffset - offset + 1);
1158 
1159 		error = xfs_bwrite(bp);
1160 		xfs_buf_relse(bp);
1161 		if (error)
1162 			return error;
1163 	}
1164 	return error;
1165 }
1166 
1167 int
1168 xfs_free_file_space(
1169 	struct xfs_inode	*ip,
1170 	xfs_off_t		offset,
1171 	xfs_off_t		len)
1172 {
1173 	int			committed;
1174 	int			done;
1175 	xfs_fileoff_t		endoffset_fsb;
1176 	int			error;
1177 	xfs_fsblock_t		firstfsb;
1178 	xfs_bmap_free_t		free_list;
1179 	xfs_bmbt_irec_t		imap;
1180 	xfs_off_t		ioffset;
1181 	xfs_off_t		iendoffset;
1182 	xfs_extlen_t		mod=0;
1183 	xfs_mount_t		*mp;
1184 	int			nimap;
1185 	uint			resblks;
1186 	xfs_off_t		rounding;
1187 	int			rt;
1188 	xfs_fileoff_t		startoffset_fsb;
1189 	xfs_trans_t		*tp;
1190 
1191 	mp = ip->i_mount;
1192 
1193 	trace_xfs_free_file_space(ip);
1194 
1195 	error = xfs_qm_dqattach(ip, 0);
1196 	if (error)
1197 		return error;
1198 
1199 	error = 0;
1200 	if (len <= 0)	/* if nothing being freed */
1201 		return error;
1202 	rt = XFS_IS_REALTIME_INODE(ip);
1203 	startoffset_fsb	= XFS_B_TO_FSB(mp, offset);
1204 	endoffset_fsb = XFS_B_TO_FSBT(mp, offset + len);
1205 
1206 	/* wait for the completion of any pending DIOs */
1207 	inode_dio_wait(VFS_I(ip));
1208 
1209 	rounding = max_t(xfs_off_t, 1 << mp->m_sb.sb_blocklog, PAGE_CACHE_SIZE);
1210 	ioffset = round_down(offset, rounding);
1211 	iendoffset = round_up(offset + len, rounding) - 1;
1212 	error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping, ioffset,
1213 					     iendoffset);
1214 	if (error)
1215 		goto out;
1216 	truncate_pagecache_range(VFS_I(ip), ioffset, iendoffset);
1217 
1218 	/*
1219 	 * Need to zero the stuff we're not freeing, on disk.
1220 	 * If it's a realtime file & can't use unwritten extents then we
1221 	 * actually need to zero the extent edges.  Otherwise xfs_bunmapi
1222 	 * will take care of it for us.
1223 	 */
1224 	if (rt && !xfs_sb_version_hasextflgbit(&mp->m_sb)) {
1225 		nimap = 1;
1226 		error = xfs_bmapi_read(ip, startoffset_fsb, 1,
1227 					&imap, &nimap, 0);
1228 		if (error)
1229 			goto out;
1230 		ASSERT(nimap == 0 || nimap == 1);
1231 		if (nimap && imap.br_startblock != HOLESTARTBLOCK) {
1232 			xfs_daddr_t	block;
1233 
1234 			ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
1235 			block = imap.br_startblock;
1236 			mod = do_div(block, mp->m_sb.sb_rextsize);
1237 			if (mod)
1238 				startoffset_fsb += mp->m_sb.sb_rextsize - mod;
1239 		}
1240 		nimap = 1;
1241 		error = xfs_bmapi_read(ip, endoffset_fsb - 1, 1,
1242 					&imap, &nimap, 0);
1243 		if (error)
1244 			goto out;
1245 		ASSERT(nimap == 0 || nimap == 1);
1246 		if (nimap && imap.br_startblock != HOLESTARTBLOCK) {
1247 			ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
1248 			mod++;
1249 			if (mod && (mod != mp->m_sb.sb_rextsize))
1250 				endoffset_fsb -= mod;
1251 		}
1252 	}
1253 	if ((done = (endoffset_fsb <= startoffset_fsb)))
1254 		/*
1255 		 * One contiguous piece to clear
1256 		 */
1257 		error = xfs_zero_remaining_bytes(ip, offset, offset + len - 1);
1258 	else {
1259 		/*
1260 		 * Some full blocks, possibly two pieces to clear
1261 		 */
1262 		if (offset < XFS_FSB_TO_B(mp, startoffset_fsb))
1263 			error = xfs_zero_remaining_bytes(ip, offset,
1264 				XFS_FSB_TO_B(mp, startoffset_fsb) - 1);
1265 		if (!error &&
1266 		    XFS_FSB_TO_B(mp, endoffset_fsb) < offset + len)
1267 			error = xfs_zero_remaining_bytes(ip,
1268 				XFS_FSB_TO_B(mp, endoffset_fsb),
1269 				offset + len - 1);
1270 	}
1271 
1272 	/*
1273 	 * free file space until done or until there is an error
1274 	 */
1275 	resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
1276 	while (!error && !done) {
1277 
1278 		/*
1279 		 * allocate and setup the transaction. Allow this
1280 		 * transaction to dip into the reserve blocks to ensure
1281 		 * the freeing of the space succeeds at ENOSPC.
1282 		 */
1283 		tp = xfs_trans_alloc(mp, XFS_TRANS_DIOSTRAT);
1284 		error = xfs_trans_reserve(tp, &M_RES(mp)->tr_write, resblks, 0);
1285 
1286 		/*
1287 		 * check for running out of space
1288 		 */
1289 		if (error) {
1290 			/*
1291 			 * Free the transaction structure.
1292 			 */
1293 			ASSERT(error == -ENOSPC || XFS_FORCED_SHUTDOWN(mp));
1294 			xfs_trans_cancel(tp);
1295 			break;
1296 		}
1297 		xfs_ilock(ip, XFS_ILOCK_EXCL);
1298 		error = xfs_trans_reserve_quota(tp, mp,
1299 				ip->i_udquot, ip->i_gdquot, ip->i_pdquot,
1300 				resblks, 0, XFS_QMOPT_RES_REGBLKS);
1301 		if (error)
1302 			goto error1;
1303 
1304 		xfs_trans_ijoin(tp, ip, 0);
1305 
1306 		/*
1307 		 * issue the bunmapi() call to free the blocks
1308 		 */
1309 		xfs_bmap_init(&free_list, &firstfsb);
1310 		error = xfs_bunmapi(tp, ip, startoffset_fsb,
1311 				  endoffset_fsb - startoffset_fsb,
1312 				  0, 2, &firstfsb, &free_list, &done);
1313 		if (error) {
1314 			goto error0;
1315 		}
1316 
1317 		/*
1318 		 * complete the transaction
1319 		 */
1320 		error = xfs_bmap_finish(&tp, &free_list, &committed);
1321 		if (error) {
1322 			goto error0;
1323 		}
1324 
1325 		error = xfs_trans_commit(tp);
1326 		xfs_iunlock(ip, XFS_ILOCK_EXCL);
1327 	}
1328 
1329  out:
1330 	return error;
1331 
1332  error0:
1333 	xfs_bmap_cancel(&free_list);
1334  error1:
1335 	xfs_trans_cancel(tp);
1336 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1337 	goto out;
1338 }
1339 
1340 /*
1341  * Preallocate and zero a range of a file. This mechanism has the allocation
1342  * semantics of fallocate and in addition converts data in the range to zeroes.
1343  */
1344 int
1345 xfs_zero_file_space(
1346 	struct xfs_inode	*ip,
1347 	xfs_off_t		offset,
1348 	xfs_off_t		len)
1349 {
1350 	struct xfs_mount	*mp = ip->i_mount;
1351 	uint			blksize;
1352 	int			error;
1353 
1354 	trace_xfs_zero_file_space(ip);
1355 
1356 	blksize = 1 << mp->m_sb.sb_blocklog;
1357 
1358 	/*
1359 	 * Punch a hole and prealloc the range. We use hole punch rather than
1360 	 * unwritten extent conversion for two reasons:
1361 	 *
1362 	 * 1.) Hole punch handles partial block zeroing for us.
1363 	 *
1364 	 * 2.) If prealloc returns ENOSPC, the file range is still zero-valued
1365 	 * by virtue of the hole punch.
1366 	 */
1367 	error = xfs_free_file_space(ip, offset, len);
1368 	if (error)
1369 		goto out;
1370 
1371 	error = xfs_alloc_file_space(ip, round_down(offset, blksize),
1372 				     round_up(offset + len, blksize) -
1373 				     round_down(offset, blksize),
1374 				     XFS_BMAPI_PREALLOC);
1375 out:
1376 	return error;
1377 
1378 }
1379 
1380 /*
1381  * @next_fsb will keep track of the extent currently undergoing shift.
1382  * @stop_fsb will keep track of the extent at which we have to stop.
1383  * If we are shifting left, we will start with block (offset + len) and
1384  * shift each extent till last extent.
1385  * If we are shifting right, we will start with last extent inside file space
1386  * and continue until we reach the block corresponding to offset.
1387  */
1388 static int
1389 xfs_shift_file_space(
1390 	struct xfs_inode        *ip,
1391 	xfs_off_t               offset,
1392 	xfs_off_t               len,
1393 	enum shift_direction	direction)
1394 {
1395 	int			done = 0;
1396 	struct xfs_mount	*mp = ip->i_mount;
1397 	struct xfs_trans	*tp;
1398 	int			error;
1399 	struct xfs_bmap_free	free_list;
1400 	xfs_fsblock_t		first_block;
1401 	int			committed;
1402 	xfs_fileoff_t		stop_fsb;
1403 	xfs_fileoff_t		next_fsb;
1404 	xfs_fileoff_t		shift_fsb;
1405 
1406 	ASSERT(direction == SHIFT_LEFT || direction == SHIFT_RIGHT);
1407 
1408 	if (direction == SHIFT_LEFT) {
1409 		next_fsb = XFS_B_TO_FSB(mp, offset + len);
1410 		stop_fsb = XFS_B_TO_FSB(mp, VFS_I(ip)->i_size);
1411 	} else {
1412 		/*
1413 		 * If right shift, delegate the work of initialization of
1414 		 * next_fsb to xfs_bmap_shift_extent as it has ilock held.
1415 		 */
1416 		next_fsb = NULLFSBLOCK;
1417 		stop_fsb = XFS_B_TO_FSB(mp, offset);
1418 	}
1419 
1420 	shift_fsb = XFS_B_TO_FSB(mp, len);
1421 
1422 	/*
1423 	 * Trim eofblocks to avoid shifting uninitialized post-eof preallocation
1424 	 * into the accessible region of the file.
1425 	 */
1426 	if (xfs_can_free_eofblocks(ip, true)) {
1427 		error = xfs_free_eofblocks(mp, ip, false);
1428 		if (error)
1429 			return error;
1430 	}
1431 
1432 	/*
1433 	 * Writeback and invalidate cache for the remainder of the file as we're
1434 	 * about to shift down every extent from offset to EOF.
1435 	 */
1436 	error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping,
1437 					     offset, -1);
1438 	if (error)
1439 		return error;
1440 	error = invalidate_inode_pages2_range(VFS_I(ip)->i_mapping,
1441 					offset >> PAGE_CACHE_SHIFT, -1);
1442 	if (error)
1443 		return error;
1444 
1445 	/*
1446 	 * The extent shiting code works on extent granularity. So, if
1447 	 * stop_fsb is not the starting block of extent, we need to split
1448 	 * the extent at stop_fsb.
1449 	 */
1450 	if (direction == SHIFT_RIGHT) {
1451 		error = xfs_bmap_split_extent(ip, stop_fsb);
1452 		if (error)
1453 			return error;
1454 	}
1455 
1456 	while (!error && !done) {
1457 		tp = xfs_trans_alloc(mp, XFS_TRANS_DIOSTRAT);
1458 		/*
1459 		 * We would need to reserve permanent block for transaction.
1460 		 * This will come into picture when after shifting extent into
1461 		 * hole we found that adjacent extents can be merged which
1462 		 * may lead to freeing of a block during record update.
1463 		 */
1464 		error = xfs_trans_reserve(tp, &M_RES(mp)->tr_write,
1465 				XFS_DIOSTRAT_SPACE_RES(mp, 0), 0);
1466 		if (error) {
1467 			xfs_trans_cancel(tp);
1468 			break;
1469 		}
1470 
1471 		xfs_ilock(ip, XFS_ILOCK_EXCL);
1472 		error = xfs_trans_reserve_quota(tp, mp, ip->i_udquot,
1473 				ip->i_gdquot, ip->i_pdquot,
1474 				XFS_DIOSTRAT_SPACE_RES(mp, 0), 0,
1475 				XFS_QMOPT_RES_REGBLKS);
1476 		if (error)
1477 			goto out_trans_cancel;
1478 
1479 		xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
1480 
1481 		xfs_bmap_init(&free_list, &first_block);
1482 
1483 		/*
1484 		 * We are using the write transaction in which max 2 bmbt
1485 		 * updates are allowed
1486 		 */
1487 		error = xfs_bmap_shift_extents(tp, ip, &next_fsb, shift_fsb,
1488 				&done, stop_fsb, &first_block, &free_list,
1489 				direction, XFS_BMAP_MAX_SHIFT_EXTENTS);
1490 		if (error)
1491 			goto out_bmap_cancel;
1492 
1493 		error = xfs_bmap_finish(&tp, &free_list, &committed);
1494 		if (error)
1495 			goto out_bmap_cancel;
1496 
1497 		error = xfs_trans_commit(tp);
1498 	}
1499 
1500 	return error;
1501 
1502 out_bmap_cancel:
1503 	xfs_bmap_cancel(&free_list);
1504 out_trans_cancel:
1505 	xfs_trans_cancel(tp);
1506 	return error;
1507 }
1508 
1509 /*
1510  * xfs_collapse_file_space()
1511  *	This routine frees disk space and shift extent for the given file.
1512  *	The first thing we do is to free data blocks in the specified range
1513  *	by calling xfs_free_file_space(). It would also sync dirty data
1514  *	and invalidate page cache over the region on which collapse range
1515  *	is working. And Shift extent records to the left to cover a hole.
1516  * RETURNS:
1517  *	0 on success
1518  *	errno on error
1519  *
1520  */
1521 int
1522 xfs_collapse_file_space(
1523 	struct xfs_inode	*ip,
1524 	xfs_off_t		offset,
1525 	xfs_off_t		len)
1526 {
1527 	int error;
1528 
1529 	ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
1530 	trace_xfs_collapse_file_space(ip);
1531 
1532 	error = xfs_free_file_space(ip, offset, len);
1533 	if (error)
1534 		return error;
1535 
1536 	return xfs_shift_file_space(ip, offset, len, SHIFT_LEFT);
1537 }
1538 
1539 /*
1540  * xfs_insert_file_space()
1541  *	This routine create hole space by shifting extents for the given file.
1542  *	The first thing we do is to sync dirty data and invalidate page cache
1543  *	over the region on which insert range is working. And split an extent
1544  *	to two extents at given offset by calling xfs_bmap_split_extent.
1545  *	And shift all extent records which are laying between [offset,
1546  *	last allocated extent] to the right to reserve hole range.
1547  * RETURNS:
1548  *	0 on success
1549  *	errno on error
1550  */
1551 int
1552 xfs_insert_file_space(
1553 	struct xfs_inode	*ip,
1554 	loff_t			offset,
1555 	loff_t			len)
1556 {
1557 	ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
1558 	trace_xfs_insert_file_space(ip);
1559 
1560 	return xfs_shift_file_space(ip, offset, len, SHIFT_RIGHT);
1561 }
1562 
1563 /*
1564  * We need to check that the format of the data fork in the temporary inode is
1565  * valid for the target inode before doing the swap. This is not a problem with
1566  * attr1 because of the fixed fork offset, but attr2 has a dynamically sized
1567  * data fork depending on the space the attribute fork is taking so we can get
1568  * invalid formats on the target inode.
1569  *
1570  * E.g. target has space for 7 extents in extent format, temp inode only has
1571  * space for 6.  If we defragment down to 7 extents, then the tmp format is a
1572  * btree, but when swapped it needs to be in extent format. Hence we can't just
1573  * blindly swap data forks on attr2 filesystems.
1574  *
1575  * Note that we check the swap in both directions so that we don't end up with
1576  * a corrupt temporary inode, either.
1577  *
1578  * Note that fixing the way xfs_fsr sets up the attribute fork in the source
1579  * inode will prevent this situation from occurring, so all we do here is
1580  * reject and log the attempt. basically we are putting the responsibility on
1581  * userspace to get this right.
1582  */
1583 static int
1584 xfs_swap_extents_check_format(
1585 	xfs_inode_t	*ip,	/* target inode */
1586 	xfs_inode_t	*tip)	/* tmp inode */
1587 {
1588 
1589 	/* Should never get a local format */
1590 	if (ip->i_d.di_format == XFS_DINODE_FMT_LOCAL ||
1591 	    tip->i_d.di_format == XFS_DINODE_FMT_LOCAL)
1592 		return -EINVAL;
1593 
1594 	/*
1595 	 * if the target inode has less extents that then temporary inode then
1596 	 * why did userspace call us?
1597 	 */
1598 	if (ip->i_d.di_nextents < tip->i_d.di_nextents)
1599 		return -EINVAL;
1600 
1601 	/*
1602 	 * if the target inode is in extent form and the temp inode is in btree
1603 	 * form then we will end up with the target inode in the wrong format
1604 	 * as we already know there are less extents in the temp inode.
1605 	 */
1606 	if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
1607 	    tip->i_d.di_format == XFS_DINODE_FMT_BTREE)
1608 		return -EINVAL;
1609 
1610 	/* Check temp in extent form to max in target */
1611 	if (tip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
1612 	    XFS_IFORK_NEXTENTS(tip, XFS_DATA_FORK) >
1613 			XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK))
1614 		return -EINVAL;
1615 
1616 	/* Check target in extent form to max in temp */
1617 	if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
1618 	    XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK) >
1619 			XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK))
1620 		return -EINVAL;
1621 
1622 	/*
1623 	 * If we are in a btree format, check that the temp root block will fit
1624 	 * in the target and that it has enough extents to be in btree format
1625 	 * in the target.
1626 	 *
1627 	 * Note that we have to be careful to allow btree->extent conversions
1628 	 * (a common defrag case) which will occur when the temp inode is in
1629 	 * extent format...
1630 	 */
1631 	if (tip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
1632 		if (XFS_IFORK_BOFF(ip) &&
1633 		    XFS_BMAP_BMDR_SPACE(tip->i_df.if_broot) > XFS_IFORK_BOFF(ip))
1634 			return -EINVAL;
1635 		if (XFS_IFORK_NEXTENTS(tip, XFS_DATA_FORK) <=
1636 		    XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK))
1637 			return -EINVAL;
1638 	}
1639 
1640 	/* Reciprocal target->temp btree format checks */
1641 	if (ip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
1642 		if (XFS_IFORK_BOFF(tip) &&
1643 		    XFS_BMAP_BMDR_SPACE(ip->i_df.if_broot) > XFS_IFORK_BOFF(tip))
1644 			return -EINVAL;
1645 		if (XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK) <=
1646 		    XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK))
1647 			return -EINVAL;
1648 	}
1649 
1650 	return 0;
1651 }
1652 
1653 static int
1654 xfs_swap_extent_flush(
1655 	struct xfs_inode	*ip)
1656 {
1657 	int	error;
1658 
1659 	error = filemap_write_and_wait(VFS_I(ip)->i_mapping);
1660 	if (error)
1661 		return error;
1662 	truncate_pagecache_range(VFS_I(ip), 0, -1);
1663 
1664 	/* Verify O_DIRECT for ftmp */
1665 	if (VFS_I(ip)->i_mapping->nrpages)
1666 		return -EINVAL;
1667 	return 0;
1668 }
1669 
1670 int
1671 xfs_swap_extents(
1672 	xfs_inode_t	*ip,	/* target inode */
1673 	xfs_inode_t	*tip,	/* tmp inode */
1674 	xfs_swapext_t	*sxp)
1675 {
1676 	xfs_mount_t	*mp = ip->i_mount;
1677 	xfs_trans_t	*tp;
1678 	xfs_bstat_t	*sbp = &sxp->sx_stat;
1679 	xfs_ifork_t	*tempifp, *ifp, *tifp;
1680 	int		src_log_flags, target_log_flags;
1681 	int		error = 0;
1682 	int		aforkblks = 0;
1683 	int		taforkblks = 0;
1684 	__uint64_t	tmp;
1685 	int		lock_flags;
1686 
1687 	tempifp = kmem_alloc(sizeof(xfs_ifork_t), KM_MAYFAIL);
1688 	if (!tempifp) {
1689 		error = -ENOMEM;
1690 		goto out;
1691 	}
1692 
1693 	/*
1694 	 * Lock the inodes against other IO, page faults and truncate to
1695 	 * begin with.  Then we can ensure the inodes are flushed and have no
1696 	 * page cache safely. Once we have done this we can take the ilocks and
1697 	 * do the rest of the checks.
1698 	 */
1699 	lock_flags = XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL;
1700 	xfs_lock_two_inodes(ip, tip, XFS_IOLOCK_EXCL);
1701 	xfs_lock_two_inodes(ip, tip, XFS_MMAPLOCK_EXCL);
1702 
1703 	/* Verify that both files have the same format */
1704 	if ((ip->i_d.di_mode & S_IFMT) != (tip->i_d.di_mode & S_IFMT)) {
1705 		error = -EINVAL;
1706 		goto out_unlock;
1707 	}
1708 
1709 	/* Verify both files are either real-time or non-realtime */
1710 	if (XFS_IS_REALTIME_INODE(ip) != XFS_IS_REALTIME_INODE(tip)) {
1711 		error = -EINVAL;
1712 		goto out_unlock;
1713 	}
1714 
1715 	error = xfs_swap_extent_flush(ip);
1716 	if (error)
1717 		goto out_unlock;
1718 	error = xfs_swap_extent_flush(tip);
1719 	if (error)
1720 		goto out_unlock;
1721 
1722 	tp = xfs_trans_alloc(mp, XFS_TRANS_SWAPEXT);
1723 	error = xfs_trans_reserve(tp, &M_RES(mp)->tr_ichange, 0, 0);
1724 	if (error) {
1725 		xfs_trans_cancel(tp);
1726 		goto out_unlock;
1727 	}
1728 
1729 	/*
1730 	 * Lock and join the inodes to the tansaction so that transaction commit
1731 	 * or cancel will unlock the inodes from this point onwards.
1732 	 */
1733 	xfs_lock_two_inodes(ip, tip, XFS_ILOCK_EXCL);
1734 	lock_flags |= XFS_ILOCK_EXCL;
1735 	xfs_trans_ijoin(tp, ip, lock_flags);
1736 	xfs_trans_ijoin(tp, tip, lock_flags);
1737 
1738 
1739 	/* Verify all data are being swapped */
1740 	if (sxp->sx_offset != 0 ||
1741 	    sxp->sx_length != ip->i_d.di_size ||
1742 	    sxp->sx_length != tip->i_d.di_size) {
1743 		error = -EFAULT;
1744 		goto out_trans_cancel;
1745 	}
1746 
1747 	trace_xfs_swap_extent_before(ip, 0);
1748 	trace_xfs_swap_extent_before(tip, 1);
1749 
1750 	/* check inode formats now that data is flushed */
1751 	error = xfs_swap_extents_check_format(ip, tip);
1752 	if (error) {
1753 		xfs_notice(mp,
1754 		    "%s: inode 0x%llx format is incompatible for exchanging.",
1755 				__func__, ip->i_ino);
1756 		goto out_trans_cancel;
1757 	}
1758 
1759 	/*
1760 	 * Compare the current change & modify times with that
1761 	 * passed in.  If they differ, we abort this swap.
1762 	 * This is the mechanism used to ensure the calling
1763 	 * process that the file was not changed out from
1764 	 * under it.
1765 	 */
1766 	if ((sbp->bs_ctime.tv_sec != VFS_I(ip)->i_ctime.tv_sec) ||
1767 	    (sbp->bs_ctime.tv_nsec != VFS_I(ip)->i_ctime.tv_nsec) ||
1768 	    (sbp->bs_mtime.tv_sec != VFS_I(ip)->i_mtime.tv_sec) ||
1769 	    (sbp->bs_mtime.tv_nsec != VFS_I(ip)->i_mtime.tv_nsec)) {
1770 		error = -EBUSY;
1771 		goto out_trans_cancel;
1772 	}
1773 	/*
1774 	 * Count the number of extended attribute blocks
1775 	 */
1776 	if ( ((XFS_IFORK_Q(ip) != 0) && (ip->i_d.di_anextents > 0)) &&
1777 	     (ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)) {
1778 		error = xfs_bmap_count_blocks(tp, ip, XFS_ATTR_FORK, &aforkblks);
1779 		if (error)
1780 			goto out_trans_cancel;
1781 	}
1782 	if ( ((XFS_IFORK_Q(tip) != 0) && (tip->i_d.di_anextents > 0)) &&
1783 	     (tip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)) {
1784 		error = xfs_bmap_count_blocks(tp, tip, XFS_ATTR_FORK,
1785 			&taforkblks);
1786 		if (error)
1787 			goto out_trans_cancel;
1788 	}
1789 
1790 	/*
1791 	 * Before we've swapped the forks, lets set the owners of the forks
1792 	 * appropriately. We have to do this as we are demand paging the btree
1793 	 * buffers, and so the validation done on read will expect the owner
1794 	 * field to be correctly set. Once we change the owners, we can swap the
1795 	 * inode forks.
1796 	 *
1797 	 * Note the trickiness in setting the log flags - we set the owner log
1798 	 * flag on the opposite inode (i.e. the inode we are setting the new
1799 	 * owner to be) because once we swap the forks and log that, log
1800 	 * recovery is going to see the fork as owned by the swapped inode,
1801 	 * not the pre-swapped inodes.
1802 	 */
1803 	src_log_flags = XFS_ILOG_CORE;
1804 	target_log_flags = XFS_ILOG_CORE;
1805 	if (ip->i_d.di_version == 3 &&
1806 	    ip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
1807 		target_log_flags |= XFS_ILOG_DOWNER;
1808 		error = xfs_bmbt_change_owner(tp, ip, XFS_DATA_FORK,
1809 					      tip->i_ino, NULL);
1810 		if (error)
1811 			goto out_trans_cancel;
1812 	}
1813 
1814 	if (tip->i_d.di_version == 3 &&
1815 	    tip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
1816 		src_log_flags |= XFS_ILOG_DOWNER;
1817 		error = xfs_bmbt_change_owner(tp, tip, XFS_DATA_FORK,
1818 					      ip->i_ino, NULL);
1819 		if (error)
1820 			goto out_trans_cancel;
1821 	}
1822 
1823 	/*
1824 	 * Swap the data forks of the inodes
1825 	 */
1826 	ifp = &ip->i_df;
1827 	tifp = &tip->i_df;
1828 	*tempifp = *ifp;	/* struct copy */
1829 	*ifp = *tifp;		/* struct copy */
1830 	*tifp = *tempifp;	/* struct copy */
1831 
1832 	/*
1833 	 * Fix the on-disk inode values
1834 	 */
1835 	tmp = (__uint64_t)ip->i_d.di_nblocks;
1836 	ip->i_d.di_nblocks = tip->i_d.di_nblocks - taforkblks + aforkblks;
1837 	tip->i_d.di_nblocks = tmp + taforkblks - aforkblks;
1838 
1839 	tmp = (__uint64_t) ip->i_d.di_nextents;
1840 	ip->i_d.di_nextents = tip->i_d.di_nextents;
1841 	tip->i_d.di_nextents = tmp;
1842 
1843 	tmp = (__uint64_t) ip->i_d.di_format;
1844 	ip->i_d.di_format = tip->i_d.di_format;
1845 	tip->i_d.di_format = tmp;
1846 
1847 	/*
1848 	 * The extents in the source inode could still contain speculative
1849 	 * preallocation beyond EOF (e.g. the file is open but not modified
1850 	 * while defrag is in progress). In that case, we need to copy over the
1851 	 * number of delalloc blocks the data fork in the source inode is
1852 	 * tracking beyond EOF so that when the fork is truncated away when the
1853 	 * temporary inode is unlinked we don't underrun the i_delayed_blks
1854 	 * counter on that inode.
1855 	 */
1856 	ASSERT(tip->i_delayed_blks == 0);
1857 	tip->i_delayed_blks = ip->i_delayed_blks;
1858 	ip->i_delayed_blks = 0;
1859 
1860 	switch (ip->i_d.di_format) {
1861 	case XFS_DINODE_FMT_EXTENTS:
1862 		/* If the extents fit in the inode, fix the
1863 		 * pointer.  Otherwise it's already NULL or
1864 		 * pointing to the extent.
1865 		 */
1866 		if (ip->i_d.di_nextents <= XFS_INLINE_EXTS) {
1867 			ifp->if_u1.if_extents =
1868 				ifp->if_u2.if_inline_ext;
1869 		}
1870 		src_log_flags |= XFS_ILOG_DEXT;
1871 		break;
1872 	case XFS_DINODE_FMT_BTREE:
1873 		ASSERT(ip->i_d.di_version < 3 ||
1874 		       (src_log_flags & XFS_ILOG_DOWNER));
1875 		src_log_flags |= XFS_ILOG_DBROOT;
1876 		break;
1877 	}
1878 
1879 	switch (tip->i_d.di_format) {
1880 	case XFS_DINODE_FMT_EXTENTS:
1881 		/* If the extents fit in the inode, fix the
1882 		 * pointer.  Otherwise it's already NULL or
1883 		 * pointing to the extent.
1884 		 */
1885 		if (tip->i_d.di_nextents <= XFS_INLINE_EXTS) {
1886 			tifp->if_u1.if_extents =
1887 				tifp->if_u2.if_inline_ext;
1888 		}
1889 		target_log_flags |= XFS_ILOG_DEXT;
1890 		break;
1891 	case XFS_DINODE_FMT_BTREE:
1892 		target_log_flags |= XFS_ILOG_DBROOT;
1893 		ASSERT(tip->i_d.di_version < 3 ||
1894 		       (target_log_flags & XFS_ILOG_DOWNER));
1895 		break;
1896 	}
1897 
1898 	xfs_trans_log_inode(tp, ip,  src_log_flags);
1899 	xfs_trans_log_inode(tp, tip, target_log_flags);
1900 
1901 	/*
1902 	 * If this is a synchronous mount, make sure that the
1903 	 * transaction goes to disk before returning to the user.
1904 	 */
1905 	if (mp->m_flags & XFS_MOUNT_WSYNC)
1906 		xfs_trans_set_sync(tp);
1907 
1908 	error = xfs_trans_commit(tp);
1909 
1910 	trace_xfs_swap_extent_after(ip, 0);
1911 	trace_xfs_swap_extent_after(tip, 1);
1912 out:
1913 	kmem_free(tempifp);
1914 	return error;
1915 
1916 out_unlock:
1917 	xfs_iunlock(ip, lock_flags);
1918 	xfs_iunlock(tip, lock_flags);
1919 	goto out;
1920 
1921 out_trans_cancel:
1922 	xfs_trans_cancel(tp);
1923 	goto out;
1924 }
1925