xref: /linux/fs/xfs/libxfs/xfs_bmap.c (revision 7f71507851fc7764b36a3221839607d3a45c2025)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2000-2006 Silicon Graphics, Inc.
4  * All Rights Reserved.
5  */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_bit.h"
13 #include "xfs_sb.h"
14 #include "xfs_mount.h"
15 #include "xfs_defer.h"
16 #include "xfs_dir2.h"
17 #include "xfs_inode.h"
18 #include "xfs_btree.h"
19 #include "xfs_trans.h"
20 #include "xfs_alloc.h"
21 #include "xfs_bmap.h"
22 #include "xfs_bmap_util.h"
23 #include "xfs_bmap_btree.h"
24 #include "xfs_rtbitmap.h"
25 #include "xfs_errortag.h"
26 #include "xfs_error.h"
27 #include "xfs_quota.h"
28 #include "xfs_trans_space.h"
29 #include "xfs_buf_item.h"
30 #include "xfs_trace.h"
31 #include "xfs_attr_leaf.h"
32 #include "xfs_filestream.h"
33 #include "xfs_rmap.h"
34 #include "xfs_ag.h"
35 #include "xfs_ag_resv.h"
36 #include "xfs_refcount.h"
37 #include "xfs_icache.h"
38 #include "xfs_iomap.h"
39 #include "xfs_health.h"
40 #include "xfs_bmap_item.h"
41 #include "xfs_symlink_remote.h"
42 #include "xfs_inode_util.h"
43 #include "xfs_rtgroup.h"
44 
45 struct kmem_cache		*xfs_bmap_intent_cache;
46 
47 /*
48  * Miscellaneous helper functions
49  */
50 
51 /*
52  * Compute and fill in the value of the maximum depth of a bmap btree
53  * in this filesystem.  Done once, during mount.
54  */
55 void
56 xfs_bmap_compute_maxlevels(
57 	xfs_mount_t	*mp,		/* file system mount structure */
58 	int		whichfork)	/* data or attr fork */
59 {
60 	uint64_t	maxblocks;	/* max blocks at this level */
61 	xfs_extnum_t	maxleafents;	/* max leaf entries possible */
62 	int		level;		/* btree level */
63 	int		maxrootrecs;	/* max records in root block */
64 	int		minleafrecs;	/* min records in leaf block */
65 	int		minnoderecs;	/* min records in node block */
66 	int		sz;		/* root block size */
67 
68 	/*
69 	 * The maximum number of extents in a fork, hence the maximum number of
70 	 * leaf entries, is controlled by the size of the on-disk extent count.
71 	 *
72 	 * Note that we can no longer assume that if we are in ATTR1 that the
73 	 * fork offset of all the inodes will be
74 	 * (xfs_default_attroffset(ip) >> 3) because we could have mounted with
75 	 * ATTR2 and then mounted back with ATTR1, keeping the i_forkoff's fixed
76 	 * but probably at various positions. Therefore, for both ATTR1 and
77 	 * ATTR2 we have to assume the worst case scenario of a minimum size
78 	 * available.
79 	 */
80 	maxleafents = xfs_iext_max_nextents(xfs_has_large_extent_counts(mp),
81 				whichfork);
82 	if (whichfork == XFS_DATA_FORK)
83 		sz = xfs_bmdr_space_calc(MINDBTPTRS);
84 	else
85 		sz = xfs_bmdr_space_calc(MINABTPTRS);
86 
87 	maxrootrecs = xfs_bmdr_maxrecs(sz, 0);
88 	minleafrecs = mp->m_bmap_dmnr[0];
89 	minnoderecs = mp->m_bmap_dmnr[1];
90 	maxblocks = howmany_64(maxleafents, minleafrecs);
91 	for (level = 1; maxblocks > 1; level++) {
92 		if (maxblocks <= maxrootrecs)
93 			maxblocks = 1;
94 		else
95 			maxblocks = howmany_64(maxblocks, minnoderecs);
96 	}
97 	mp->m_bm_maxlevels[whichfork] = level;
98 	ASSERT(mp->m_bm_maxlevels[whichfork] <= xfs_bmbt_maxlevels_ondisk());
99 }
100 
101 unsigned int
102 xfs_bmap_compute_attr_offset(
103 	struct xfs_mount	*mp)
104 {
105 	if (mp->m_sb.sb_inodesize == 256)
106 		return XFS_LITINO(mp) - xfs_bmdr_space_calc(MINABTPTRS);
107 	return xfs_bmdr_space_calc(6 * MINABTPTRS);
108 }
109 
110 STATIC int				/* error */
111 xfs_bmbt_lookup_eq(
112 	struct xfs_btree_cur	*cur,
113 	struct xfs_bmbt_irec	*irec,
114 	int			*stat)	/* success/failure */
115 {
116 	cur->bc_rec.b = *irec;
117 	return xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat);
118 }
119 
120 STATIC int				/* error */
121 xfs_bmbt_lookup_first(
122 	struct xfs_btree_cur	*cur,
123 	int			*stat)	/* success/failure */
124 {
125 	cur->bc_rec.b.br_startoff = 0;
126 	cur->bc_rec.b.br_startblock = 0;
127 	cur->bc_rec.b.br_blockcount = 0;
128 	return xfs_btree_lookup(cur, XFS_LOOKUP_GE, stat);
129 }
130 
131 /*
132  * Check if the inode needs to be converted to btree format.
133  */
134 static inline bool xfs_bmap_needs_btree(struct xfs_inode *ip, int whichfork)
135 {
136 	struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
137 
138 	return whichfork != XFS_COW_FORK &&
139 		ifp->if_format == XFS_DINODE_FMT_EXTENTS &&
140 		ifp->if_nextents > XFS_IFORK_MAXEXT(ip, whichfork);
141 }
142 
143 /*
144  * Check if the inode should be converted to extent format.
145  */
146 static inline bool xfs_bmap_wants_extents(struct xfs_inode *ip, int whichfork)
147 {
148 	struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
149 
150 	return whichfork != XFS_COW_FORK &&
151 		ifp->if_format == XFS_DINODE_FMT_BTREE &&
152 		ifp->if_nextents <= XFS_IFORK_MAXEXT(ip, whichfork);
153 }
154 
155 /*
156  * Update the record referred to by cur to the value given by irec
157  * This either works (return 0) or gets an EFSCORRUPTED error.
158  */
159 STATIC int
160 xfs_bmbt_update(
161 	struct xfs_btree_cur	*cur,
162 	struct xfs_bmbt_irec	*irec)
163 {
164 	union xfs_btree_rec	rec;
165 
166 	xfs_bmbt_disk_set_all(&rec.bmbt, irec);
167 	return xfs_btree_update(cur, &rec);
168 }
169 
170 /*
171  * Compute the worst-case number of indirect blocks that will be used
172  * for ip's delayed extent of length "len".
173  */
174 STATIC xfs_filblks_t
175 xfs_bmap_worst_indlen(
176 	xfs_inode_t	*ip,		/* incore inode pointer */
177 	xfs_filblks_t	len)		/* delayed extent length */
178 {
179 	int		level;		/* btree level number */
180 	int		maxrecs;	/* maximum record count at this level */
181 	xfs_mount_t	*mp;		/* mount structure */
182 	xfs_filblks_t	rval;		/* return value */
183 
184 	mp = ip->i_mount;
185 	maxrecs = mp->m_bmap_dmxr[0];
186 	for (level = 0, rval = 0;
187 	     level < XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK);
188 	     level++) {
189 		len += maxrecs - 1;
190 		do_div(len, maxrecs);
191 		rval += len;
192 		if (len == 1)
193 			return rval + XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) -
194 				level - 1;
195 		if (level == 0)
196 			maxrecs = mp->m_bmap_dmxr[1];
197 	}
198 	return rval;
199 }
200 
201 /*
202  * Calculate the default attribute fork offset for newly created inodes.
203  */
204 uint
205 xfs_default_attroffset(
206 	struct xfs_inode	*ip)
207 {
208 	if (ip->i_df.if_format == XFS_DINODE_FMT_DEV)
209 		return roundup(sizeof(xfs_dev_t), 8);
210 	return M_IGEO(ip->i_mount)->attr_fork_offset;
211 }
212 
213 /*
214  * Helper routine to reset inode i_forkoff field when switching attribute fork
215  * from local to extent format - we reset it where possible to make space
216  * available for inline data fork extents.
217  */
218 STATIC void
219 xfs_bmap_forkoff_reset(
220 	xfs_inode_t	*ip,
221 	int		whichfork)
222 {
223 	if (whichfork == XFS_ATTR_FORK &&
224 	    ip->i_df.if_format != XFS_DINODE_FMT_DEV &&
225 	    ip->i_df.if_format != XFS_DINODE_FMT_BTREE) {
226 		uint	dfl_forkoff = xfs_default_attroffset(ip) >> 3;
227 
228 		if (dfl_forkoff > ip->i_forkoff)
229 			ip->i_forkoff = dfl_forkoff;
230 	}
231 }
232 
233 static int
234 xfs_bmap_read_buf(
235 	struct xfs_mount	*mp,		/* file system mount point */
236 	struct xfs_trans	*tp,		/* transaction pointer */
237 	xfs_fsblock_t		fsbno,		/* file system block number */
238 	struct xfs_buf		**bpp)		/* buffer for fsbno */
239 {
240 	struct xfs_buf		*bp;		/* return value */
241 	int			error;
242 
243 	if (!xfs_verify_fsbno(mp, fsbno))
244 		return -EFSCORRUPTED;
245 	error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp,
246 			XFS_FSB_TO_DADDR(mp, fsbno), mp->m_bsize, 0, &bp,
247 			&xfs_bmbt_buf_ops);
248 	if (!error) {
249 		xfs_buf_set_ref(bp, XFS_BMAP_BTREE_REF);
250 		*bpp = bp;
251 	}
252 	return error;
253 }
254 
255 #ifdef DEBUG
256 STATIC struct xfs_buf *
257 xfs_bmap_get_bp(
258 	struct xfs_btree_cur	*cur,
259 	xfs_fsblock_t		bno)
260 {
261 	struct xfs_log_item	*lip;
262 	int			i;
263 
264 	if (!cur)
265 		return NULL;
266 
267 	for (i = 0; i < cur->bc_maxlevels; i++) {
268 		if (!cur->bc_levels[i].bp)
269 			break;
270 		if (xfs_buf_daddr(cur->bc_levels[i].bp) == bno)
271 			return cur->bc_levels[i].bp;
272 	}
273 
274 	/* Chase down all the log items to see if the bp is there */
275 	list_for_each_entry(lip, &cur->bc_tp->t_items, li_trans) {
276 		struct xfs_buf_log_item	*bip = (struct xfs_buf_log_item *)lip;
277 
278 		if (bip->bli_item.li_type == XFS_LI_BUF &&
279 		    xfs_buf_daddr(bip->bli_buf) == bno)
280 			return bip->bli_buf;
281 	}
282 
283 	return NULL;
284 }
285 
286 STATIC void
287 xfs_check_block(
288 	struct xfs_btree_block	*block,
289 	xfs_mount_t		*mp,
290 	int			root,
291 	short			sz)
292 {
293 	int			i, j, dmxr;
294 	__be64			*pp, *thispa;	/* pointer to block address */
295 	xfs_bmbt_key_t		*prevp, *keyp;
296 
297 	ASSERT(be16_to_cpu(block->bb_level) > 0);
298 
299 	prevp = NULL;
300 	for( i = 1; i <= xfs_btree_get_numrecs(block); i++) {
301 		dmxr = mp->m_bmap_dmxr[0];
302 		keyp = xfs_bmbt_key_addr(mp, block, i);
303 
304 		if (prevp) {
305 			ASSERT(be64_to_cpu(prevp->br_startoff) <
306 			       be64_to_cpu(keyp->br_startoff));
307 		}
308 		prevp = keyp;
309 
310 		/*
311 		 * Compare the block numbers to see if there are dups.
312 		 */
313 		if (root)
314 			pp = xfs_bmap_broot_ptr_addr(mp, block, i, sz);
315 		else
316 			pp = xfs_bmbt_ptr_addr(mp, block, i, dmxr);
317 
318 		for (j = i+1; j <= be16_to_cpu(block->bb_numrecs); j++) {
319 			if (root)
320 				thispa = xfs_bmap_broot_ptr_addr(mp, block, j, sz);
321 			else
322 				thispa = xfs_bmbt_ptr_addr(mp, block, j, dmxr);
323 			if (*thispa == *pp) {
324 				xfs_warn(mp, "%s: thispa(%d) == pp(%d) %lld",
325 					__func__, j, i,
326 					(unsigned long long)be64_to_cpu(*thispa));
327 				xfs_err(mp, "%s: ptrs are equal in node\n",
328 					__func__);
329 				xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
330 			}
331 		}
332 	}
333 }
334 
335 /*
336  * Check that the extents for the inode ip are in the right order in all
337  * btree leaves. THis becomes prohibitively expensive for large extent count
338  * files, so don't bother with inodes that have more than 10,000 extents in
339  * them. The btree record ordering checks will still be done, so for such large
340  * bmapbt constructs that is going to catch most corruptions.
341  */
342 STATIC void
343 xfs_bmap_check_leaf_extents(
344 	struct xfs_btree_cur	*cur,	/* btree cursor or null */
345 	xfs_inode_t		*ip,		/* incore inode pointer */
346 	int			whichfork)	/* data or attr fork */
347 {
348 	struct xfs_mount	*mp = ip->i_mount;
349 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
350 	struct xfs_btree_block	*block;	/* current btree block */
351 	xfs_fsblock_t		bno;	/* block # of "block" */
352 	struct xfs_buf		*bp;	/* buffer for "block" */
353 	int			error;	/* error return value */
354 	xfs_extnum_t		i=0, j;	/* index into the extents list */
355 	int			level;	/* btree level, for checking */
356 	__be64			*pp;	/* pointer to block address */
357 	xfs_bmbt_rec_t		*ep;	/* pointer to current extent */
358 	xfs_bmbt_rec_t		last = {0, 0}; /* last extent in prev block */
359 	xfs_bmbt_rec_t		*nextp;	/* pointer to next extent */
360 	int			bp_release = 0;
361 
362 	if (ifp->if_format != XFS_DINODE_FMT_BTREE)
363 		return;
364 
365 	/* skip large extent count inodes */
366 	if (ip->i_df.if_nextents > 10000)
367 		return;
368 
369 	bno = NULLFSBLOCK;
370 	block = ifp->if_broot;
371 	/*
372 	 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
373 	 */
374 	level = be16_to_cpu(block->bb_level);
375 	ASSERT(level > 0);
376 	xfs_check_block(block, mp, 1, ifp->if_broot_bytes);
377 	pp = xfs_bmap_broot_ptr_addr(mp, block, 1, ifp->if_broot_bytes);
378 	bno = be64_to_cpu(*pp);
379 
380 	ASSERT(bno != NULLFSBLOCK);
381 	ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount);
382 	ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks);
383 
384 	/*
385 	 * Go down the tree until leaf level is reached, following the first
386 	 * pointer (leftmost) at each level.
387 	 */
388 	while (level-- > 0) {
389 		/* See if buf is in cur first */
390 		bp_release = 0;
391 		bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno));
392 		if (!bp) {
393 			bp_release = 1;
394 			error = xfs_bmap_read_buf(mp, NULL, bno, &bp);
395 			if (xfs_metadata_is_sick(error))
396 				xfs_btree_mark_sick(cur);
397 			if (error)
398 				goto error_norelse;
399 		}
400 		block = XFS_BUF_TO_BLOCK(bp);
401 		if (level == 0)
402 			break;
403 
404 		/*
405 		 * Check this block for basic sanity (increasing keys and
406 		 * no duplicate blocks).
407 		 */
408 
409 		xfs_check_block(block, mp, 0, 0);
410 		pp = xfs_bmbt_ptr_addr(mp, block, 1, mp->m_bmap_dmxr[1]);
411 		bno = be64_to_cpu(*pp);
412 		if (XFS_IS_CORRUPT(mp, !xfs_verify_fsbno(mp, bno))) {
413 			xfs_btree_mark_sick(cur);
414 			error = -EFSCORRUPTED;
415 			goto error0;
416 		}
417 		if (bp_release) {
418 			bp_release = 0;
419 			xfs_trans_brelse(NULL, bp);
420 		}
421 	}
422 
423 	/*
424 	 * Here with bp and block set to the leftmost leaf node in the tree.
425 	 */
426 	i = 0;
427 
428 	/*
429 	 * Loop over all leaf nodes checking that all extents are in the right order.
430 	 */
431 	for (;;) {
432 		xfs_fsblock_t	nextbno;
433 		xfs_extnum_t	num_recs;
434 
435 
436 		num_recs = xfs_btree_get_numrecs(block);
437 
438 		/*
439 		 * Read-ahead the next leaf block, if any.
440 		 */
441 
442 		nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
443 
444 		/*
445 		 * Check all the extents to make sure they are OK.
446 		 * If we had a previous block, the last entry should
447 		 * conform with the first entry in this one.
448 		 */
449 
450 		ep = xfs_bmbt_rec_addr(mp, block, 1);
451 		if (i) {
452 			ASSERT(xfs_bmbt_disk_get_startoff(&last) +
453 			       xfs_bmbt_disk_get_blockcount(&last) <=
454 			       xfs_bmbt_disk_get_startoff(ep));
455 		}
456 		for (j = 1; j < num_recs; j++) {
457 			nextp = xfs_bmbt_rec_addr(mp, block, j + 1);
458 			ASSERT(xfs_bmbt_disk_get_startoff(ep) +
459 			       xfs_bmbt_disk_get_blockcount(ep) <=
460 			       xfs_bmbt_disk_get_startoff(nextp));
461 			ep = nextp;
462 		}
463 
464 		last = *ep;
465 		i += num_recs;
466 		if (bp_release) {
467 			bp_release = 0;
468 			xfs_trans_brelse(NULL, bp);
469 		}
470 		bno = nextbno;
471 		/*
472 		 * If we've reached the end, stop.
473 		 */
474 		if (bno == NULLFSBLOCK)
475 			break;
476 
477 		bp_release = 0;
478 		bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno));
479 		if (!bp) {
480 			bp_release = 1;
481 			error = xfs_bmap_read_buf(mp, NULL, bno, &bp);
482 			if (xfs_metadata_is_sick(error))
483 				xfs_btree_mark_sick(cur);
484 			if (error)
485 				goto error_norelse;
486 		}
487 		block = XFS_BUF_TO_BLOCK(bp);
488 	}
489 
490 	return;
491 
492 error0:
493 	xfs_warn(mp, "%s: at error0", __func__);
494 	if (bp_release)
495 		xfs_trans_brelse(NULL, bp);
496 error_norelse:
497 	xfs_warn(mp, "%s: BAD after btree leaves for %llu extents",
498 		__func__, i);
499 	xfs_err(mp, "%s: CORRUPTED BTREE OR SOMETHING", __func__);
500 	xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
501 	return;
502 }
503 
504 /*
505  * Validate that the bmbt_irecs being returned from bmapi are valid
506  * given the caller's original parameters.  Specifically check the
507  * ranges of the returned irecs to ensure that they only extend beyond
508  * the given parameters if the XFS_BMAPI_ENTIRE flag was set.
509  */
510 STATIC void
511 xfs_bmap_validate_ret(
512 	xfs_fileoff_t		bno,
513 	xfs_filblks_t		len,
514 	uint32_t		flags,
515 	xfs_bmbt_irec_t		*mval,
516 	int			nmap,
517 	int			ret_nmap)
518 {
519 	int			i;		/* index to map values */
520 
521 	ASSERT(ret_nmap <= nmap);
522 
523 	for (i = 0; i < ret_nmap; i++) {
524 		ASSERT(mval[i].br_blockcount > 0);
525 		if (!(flags & XFS_BMAPI_ENTIRE)) {
526 			ASSERT(mval[i].br_startoff >= bno);
527 			ASSERT(mval[i].br_blockcount <= len);
528 			ASSERT(mval[i].br_startoff + mval[i].br_blockcount <=
529 			       bno + len);
530 		} else {
531 			ASSERT(mval[i].br_startoff < bno + len);
532 			ASSERT(mval[i].br_startoff + mval[i].br_blockcount >
533 			       bno);
534 		}
535 		ASSERT(i == 0 ||
536 		       mval[i - 1].br_startoff + mval[i - 1].br_blockcount ==
537 		       mval[i].br_startoff);
538 		ASSERT(mval[i].br_startblock != DELAYSTARTBLOCK &&
539 		       mval[i].br_startblock != HOLESTARTBLOCK);
540 		ASSERT(mval[i].br_state == XFS_EXT_NORM ||
541 		       mval[i].br_state == XFS_EXT_UNWRITTEN);
542 	}
543 }
544 
545 #else
546 #define xfs_bmap_check_leaf_extents(cur, ip, whichfork)		do { } while (0)
547 #define	xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)	do { } while (0)
548 #endif /* DEBUG */
549 
550 /*
551  * Inode fork format manipulation functions
552  */
553 
554 /*
555  * Convert the inode format to extent format if it currently is in btree format,
556  * but the extent list is small enough that it fits into the extent format.
557  *
558  * Since the extents are already in-core, all we have to do is give up the space
559  * for the btree root and pitch the leaf block.
560  */
561 STATIC int				/* error */
562 xfs_bmap_btree_to_extents(
563 	struct xfs_trans	*tp,	/* transaction pointer */
564 	struct xfs_inode	*ip,	/* incore inode pointer */
565 	struct xfs_btree_cur	*cur,	/* btree cursor */
566 	int			*logflagsp, /* inode logging flags */
567 	int			whichfork)  /* data or attr fork */
568 {
569 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
570 	struct xfs_mount	*mp = ip->i_mount;
571 	struct xfs_btree_block	*rblock = ifp->if_broot;
572 	struct xfs_btree_block	*cblock;/* child btree block */
573 	xfs_fsblock_t		cbno;	/* child block number */
574 	struct xfs_buf		*cbp;	/* child block's buffer */
575 	int			error;	/* error return value */
576 	__be64			*pp;	/* ptr to block address */
577 	struct xfs_owner_info	oinfo;
578 
579 	/* check if we actually need the extent format first: */
580 	if (!xfs_bmap_wants_extents(ip, whichfork))
581 		return 0;
582 
583 	ASSERT(cur);
584 	ASSERT(whichfork != XFS_COW_FORK);
585 	ASSERT(ifp->if_format == XFS_DINODE_FMT_BTREE);
586 	ASSERT(be16_to_cpu(rblock->bb_level) == 1);
587 	ASSERT(be16_to_cpu(rblock->bb_numrecs) == 1);
588 	ASSERT(xfs_bmbt_maxrecs(mp, ifp->if_broot_bytes, false) == 1);
589 
590 	pp = xfs_bmap_broot_ptr_addr(mp, rblock, 1, ifp->if_broot_bytes);
591 	cbno = be64_to_cpu(*pp);
592 #ifdef DEBUG
593 	if (XFS_IS_CORRUPT(cur->bc_mp, !xfs_verify_fsbno(mp, cbno))) {
594 		xfs_btree_mark_sick(cur);
595 		return -EFSCORRUPTED;
596 	}
597 #endif
598 	error = xfs_bmap_read_buf(mp, tp, cbno, &cbp);
599 	if (xfs_metadata_is_sick(error))
600 		xfs_btree_mark_sick(cur);
601 	if (error)
602 		return error;
603 	cblock = XFS_BUF_TO_BLOCK(cbp);
604 	if ((error = xfs_btree_check_block(cur, cblock, 0, cbp)))
605 		return error;
606 
607 	xfs_rmap_ino_bmbt_owner(&oinfo, ip->i_ino, whichfork);
608 	error = xfs_free_extent_later(cur->bc_tp, cbno, 1, &oinfo,
609 			XFS_AG_RESV_NONE, 0);
610 	if (error)
611 		return error;
612 
613 	ip->i_nblocks--;
614 	xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L);
615 	xfs_trans_binval(tp, cbp);
616 	if (cur->bc_levels[0].bp == cbp)
617 		cur->bc_levels[0].bp = NULL;
618 	xfs_iroot_realloc(ip, -1, whichfork);
619 	ASSERT(ifp->if_broot == NULL);
620 	ifp->if_format = XFS_DINODE_FMT_EXTENTS;
621 	*logflagsp |= XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
622 	return 0;
623 }
624 
625 /*
626  * Convert an extents-format file into a btree-format file.
627  * The new file will have a root block (in the inode) and a single child block.
628  */
629 STATIC int					/* error */
630 xfs_bmap_extents_to_btree(
631 	struct xfs_trans	*tp,		/* transaction pointer */
632 	struct xfs_inode	*ip,		/* incore inode pointer */
633 	struct xfs_btree_cur	**curp,		/* cursor returned to caller */
634 	int			wasdel,		/* converting a delayed alloc */
635 	int			*logflagsp,	/* inode logging flags */
636 	int			whichfork)	/* data or attr fork */
637 {
638 	struct xfs_btree_block	*ablock;	/* allocated (child) bt block */
639 	struct xfs_buf		*abp;		/* buffer for ablock */
640 	struct xfs_alloc_arg	args;		/* allocation arguments */
641 	struct xfs_bmbt_rec	*arp;		/* child record pointer */
642 	struct xfs_btree_block	*block;		/* btree root block */
643 	struct xfs_btree_cur	*cur;		/* bmap btree cursor */
644 	int			error;		/* error return value */
645 	struct xfs_ifork	*ifp;		/* inode fork pointer */
646 	struct xfs_bmbt_key	*kp;		/* root block key pointer */
647 	struct xfs_mount	*mp;		/* mount structure */
648 	xfs_bmbt_ptr_t		*pp;		/* root block address pointer */
649 	struct xfs_iext_cursor	icur;
650 	struct xfs_bmbt_irec	rec;
651 	xfs_extnum_t		cnt = 0;
652 
653 	mp = ip->i_mount;
654 	ASSERT(whichfork != XFS_COW_FORK);
655 	ifp = xfs_ifork_ptr(ip, whichfork);
656 	ASSERT(ifp->if_format == XFS_DINODE_FMT_EXTENTS);
657 
658 	/*
659 	 * Make space in the inode incore. This needs to be undone if we fail
660 	 * to expand the root.
661 	 */
662 	xfs_iroot_realloc(ip, 1, whichfork);
663 
664 	/*
665 	 * Fill in the root.
666 	 */
667 	block = ifp->if_broot;
668 	xfs_bmbt_init_block(ip, block, NULL, 1, 1);
669 	/*
670 	 * Need a cursor.  Can't allocate until bb_level is filled in.
671 	 */
672 	cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
673 	if (wasdel)
674 		cur->bc_flags |= XFS_BTREE_BMBT_WASDEL;
675 	/*
676 	 * Convert to a btree with two levels, one record in root.
677 	 */
678 	ifp->if_format = XFS_DINODE_FMT_BTREE;
679 	memset(&args, 0, sizeof(args));
680 	args.tp = tp;
681 	args.mp = mp;
682 	xfs_rmap_ino_bmbt_owner(&args.oinfo, ip->i_ino, whichfork);
683 
684 	args.minlen = args.maxlen = args.prod = 1;
685 	args.wasdel = wasdel;
686 	*logflagsp = 0;
687 	error = xfs_alloc_vextent_start_ag(&args,
688 				XFS_INO_TO_FSB(mp, ip->i_ino));
689 	if (error)
690 		goto out_root_realloc;
691 
692 	/*
693 	 * Allocation can't fail, the space was reserved.
694 	 */
695 	if (WARN_ON_ONCE(args.fsbno == NULLFSBLOCK)) {
696 		error = -ENOSPC;
697 		goto out_root_realloc;
698 	}
699 
700 	cur->bc_bmap.allocated++;
701 	ip->i_nblocks++;
702 	xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, 1L);
703 	error = xfs_trans_get_buf(tp, mp->m_ddev_targp,
704 			XFS_FSB_TO_DADDR(mp, args.fsbno),
705 			mp->m_bsize, 0, &abp);
706 	if (error)
707 		goto out_unreserve_dquot;
708 
709 	/*
710 	 * Fill in the child block.
711 	 */
712 	ablock = XFS_BUF_TO_BLOCK(abp);
713 	xfs_bmbt_init_block(ip, ablock, abp, 0, 0);
714 
715 	for_each_xfs_iext(ifp, &icur, &rec) {
716 		if (isnullstartblock(rec.br_startblock))
717 			continue;
718 		arp = xfs_bmbt_rec_addr(mp, ablock, 1 + cnt);
719 		xfs_bmbt_disk_set_all(arp, &rec);
720 		cnt++;
721 	}
722 	ASSERT(cnt == ifp->if_nextents);
723 	xfs_btree_set_numrecs(ablock, cnt);
724 
725 	/*
726 	 * Fill in the root key and pointer.
727 	 */
728 	kp = xfs_bmbt_key_addr(mp, block, 1);
729 	arp = xfs_bmbt_rec_addr(mp, ablock, 1);
730 	kp->br_startoff = cpu_to_be64(xfs_bmbt_disk_get_startoff(arp));
731 	pp = xfs_bmbt_ptr_addr(mp, block, 1, xfs_bmbt_get_maxrecs(cur,
732 						be16_to_cpu(block->bb_level)));
733 	*pp = cpu_to_be64(args.fsbno);
734 
735 	/*
736 	 * Do all this logging at the end so that
737 	 * the root is at the right level.
738 	 */
739 	xfs_btree_log_block(cur, abp, XFS_BB_ALL_BITS);
740 	xfs_btree_log_recs(cur, abp, 1, be16_to_cpu(ablock->bb_numrecs));
741 	ASSERT(*curp == NULL);
742 	*curp = cur;
743 	*logflagsp = XFS_ILOG_CORE | xfs_ilog_fbroot(whichfork);
744 	return 0;
745 
746 out_unreserve_dquot:
747 	xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L);
748 out_root_realloc:
749 	xfs_iroot_realloc(ip, -1, whichfork);
750 	ifp->if_format = XFS_DINODE_FMT_EXTENTS;
751 	ASSERT(ifp->if_broot == NULL);
752 	xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
753 
754 	return error;
755 }
756 
757 /*
758  * Convert a local file to an extents file.
759  * This code is out of bounds for data forks of regular files,
760  * since the file data needs to get logged so things will stay consistent.
761  * (The bmap-level manipulations are ok, though).
762  */
763 void
764 xfs_bmap_local_to_extents_empty(
765 	struct xfs_trans	*tp,
766 	struct xfs_inode	*ip,
767 	int			whichfork)
768 {
769 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
770 
771 	ASSERT(whichfork != XFS_COW_FORK);
772 	ASSERT(ifp->if_format == XFS_DINODE_FMT_LOCAL);
773 	ASSERT(ifp->if_bytes == 0);
774 	ASSERT(ifp->if_nextents == 0);
775 
776 	xfs_bmap_forkoff_reset(ip, whichfork);
777 	ifp->if_data = NULL;
778 	ifp->if_height = 0;
779 	ifp->if_format = XFS_DINODE_FMT_EXTENTS;
780 	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
781 }
782 
783 
784 int					/* error */
785 xfs_bmap_local_to_extents(
786 	xfs_trans_t	*tp,		/* transaction pointer */
787 	xfs_inode_t	*ip,		/* incore inode pointer */
788 	xfs_extlen_t	total,		/* total blocks needed by transaction */
789 	int		*logflagsp,	/* inode logging flags */
790 	int		whichfork,
791 	void		(*init_fn)(struct xfs_trans *tp,
792 				   struct xfs_buf *bp,
793 				   struct xfs_inode *ip,
794 				   struct xfs_ifork *ifp, void *priv),
795 	void		*priv)
796 {
797 	int		error = 0;
798 	int		flags;		/* logging flags returned */
799 	struct xfs_ifork *ifp;		/* inode fork pointer */
800 	xfs_alloc_arg_t	args;		/* allocation arguments */
801 	struct xfs_buf	*bp;		/* buffer for extent block */
802 	struct xfs_bmbt_irec rec;
803 	struct xfs_iext_cursor icur;
804 
805 	/*
806 	 * We don't want to deal with the case of keeping inode data inline yet.
807 	 * So sending the data fork of a regular inode is invalid.
808 	 */
809 	ASSERT(!(S_ISREG(VFS_I(ip)->i_mode) && whichfork == XFS_DATA_FORK));
810 	ifp = xfs_ifork_ptr(ip, whichfork);
811 	ASSERT(ifp->if_format == XFS_DINODE_FMT_LOCAL);
812 
813 	if (!ifp->if_bytes) {
814 		xfs_bmap_local_to_extents_empty(tp, ip, whichfork);
815 		flags = XFS_ILOG_CORE;
816 		goto done;
817 	}
818 
819 	flags = 0;
820 	error = 0;
821 	memset(&args, 0, sizeof(args));
822 	args.tp = tp;
823 	args.mp = ip->i_mount;
824 	args.total = total;
825 	args.minlen = args.maxlen = args.prod = 1;
826 	xfs_rmap_ino_owner(&args.oinfo, ip->i_ino, whichfork, 0);
827 
828 	/*
829 	 * Allocate a block.  We know we need only one, since the
830 	 * file currently fits in an inode.
831 	 */
832 	args.total = total;
833 	args.minlen = args.maxlen = args.prod = 1;
834 	error = xfs_alloc_vextent_start_ag(&args,
835 			XFS_INO_TO_FSB(args.mp, ip->i_ino));
836 	if (error)
837 		goto done;
838 
839 	/* Can't fail, the space was reserved. */
840 	ASSERT(args.fsbno != NULLFSBLOCK);
841 	ASSERT(args.len == 1);
842 	error = xfs_trans_get_buf(tp, args.mp->m_ddev_targp,
843 			XFS_FSB_TO_DADDR(args.mp, args.fsbno),
844 			args.mp->m_bsize, 0, &bp);
845 	if (error)
846 		goto done;
847 
848 	/*
849 	 * Initialize the block, copy the data and log the remote buffer.
850 	 *
851 	 * The callout is responsible for logging because the remote format
852 	 * might differ from the local format and thus we don't know how much to
853 	 * log here. Note that init_fn must also set the buffer log item type
854 	 * correctly.
855 	 */
856 	init_fn(tp, bp, ip, ifp, priv);
857 
858 	/* account for the change in fork size */
859 	xfs_idata_realloc(ip, -ifp->if_bytes, whichfork);
860 	xfs_bmap_local_to_extents_empty(tp, ip, whichfork);
861 	flags |= XFS_ILOG_CORE;
862 
863 	ifp->if_data = NULL;
864 	ifp->if_height = 0;
865 
866 	rec.br_startoff = 0;
867 	rec.br_startblock = args.fsbno;
868 	rec.br_blockcount = 1;
869 	rec.br_state = XFS_EXT_NORM;
870 	xfs_iext_first(ifp, &icur);
871 	xfs_iext_insert(ip, &icur, &rec, 0);
872 
873 	ifp->if_nextents = 1;
874 	ip->i_nblocks = 1;
875 	xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, 1L);
876 	flags |= xfs_ilog_fext(whichfork);
877 
878 done:
879 	*logflagsp = flags;
880 	return error;
881 }
882 
883 /*
884  * Called from xfs_bmap_add_attrfork to handle btree format files.
885  */
886 STATIC int					/* error */
887 xfs_bmap_add_attrfork_btree(
888 	xfs_trans_t		*tp,		/* transaction pointer */
889 	xfs_inode_t		*ip,		/* incore inode pointer */
890 	int			*flags)		/* inode logging flags */
891 {
892 	struct xfs_btree_block	*block = ip->i_df.if_broot;
893 	struct xfs_btree_cur	*cur;		/* btree cursor */
894 	int			error;		/* error return value */
895 	xfs_mount_t		*mp;		/* file system mount struct */
896 	int			stat;		/* newroot status */
897 
898 	mp = ip->i_mount;
899 
900 	if (xfs_bmap_bmdr_space(block) <= xfs_inode_data_fork_size(ip))
901 		*flags |= XFS_ILOG_DBROOT;
902 	else {
903 		cur = xfs_bmbt_init_cursor(mp, tp, ip, XFS_DATA_FORK);
904 		error = xfs_bmbt_lookup_first(cur, &stat);
905 		if (error)
906 			goto error0;
907 		/* must be at least one entry */
908 		if (XFS_IS_CORRUPT(mp, stat != 1)) {
909 			xfs_btree_mark_sick(cur);
910 			error = -EFSCORRUPTED;
911 			goto error0;
912 		}
913 		if ((error = xfs_btree_new_iroot(cur, flags, &stat)))
914 			goto error0;
915 		if (stat == 0) {
916 			xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
917 			return -ENOSPC;
918 		}
919 		cur->bc_bmap.allocated = 0;
920 		xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
921 	}
922 	return 0;
923 error0:
924 	xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
925 	return error;
926 }
927 
928 /*
929  * Called from xfs_bmap_add_attrfork to handle extents format files.
930  */
931 STATIC int					/* error */
932 xfs_bmap_add_attrfork_extents(
933 	struct xfs_trans	*tp,		/* transaction pointer */
934 	struct xfs_inode	*ip,		/* incore inode pointer */
935 	int			*flags)		/* inode logging flags */
936 {
937 	struct xfs_btree_cur	*cur;		/* bmap btree cursor */
938 	int			error;		/* error return value */
939 
940 	if (ip->i_df.if_nextents * sizeof(struct xfs_bmbt_rec) <=
941 	    xfs_inode_data_fork_size(ip))
942 		return 0;
943 	cur = NULL;
944 	error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0, flags,
945 					  XFS_DATA_FORK);
946 	if (cur) {
947 		cur->bc_bmap.allocated = 0;
948 		xfs_btree_del_cursor(cur, error);
949 	}
950 	return error;
951 }
952 
953 /*
954  * Called from xfs_bmap_add_attrfork to handle local format files. Each
955  * different data fork content type needs a different callout to do the
956  * conversion. Some are basic and only require special block initialisation
957  * callouts for the data formating, others (directories) are so specialised they
958  * handle everything themselves.
959  *
960  * XXX (dgc): investigate whether directory conversion can use the generic
961  * formatting callout. It should be possible - it's just a very complex
962  * formatter.
963  */
964 STATIC int					/* error */
965 xfs_bmap_add_attrfork_local(
966 	struct xfs_trans	*tp,		/* transaction pointer */
967 	struct xfs_inode	*ip,		/* incore inode pointer */
968 	int			*flags)		/* inode logging flags */
969 {
970 	struct xfs_da_args	dargs;		/* args for dir/attr code */
971 
972 	if (ip->i_df.if_bytes <= xfs_inode_data_fork_size(ip))
973 		return 0;
974 
975 	if (S_ISDIR(VFS_I(ip)->i_mode)) {
976 		memset(&dargs, 0, sizeof(dargs));
977 		dargs.geo = ip->i_mount->m_dir_geo;
978 		dargs.dp = ip;
979 		dargs.total = dargs.geo->fsbcount;
980 		dargs.whichfork = XFS_DATA_FORK;
981 		dargs.trans = tp;
982 		dargs.owner = ip->i_ino;
983 		return xfs_dir2_sf_to_block(&dargs);
984 	}
985 
986 	if (S_ISLNK(VFS_I(ip)->i_mode))
987 		return xfs_bmap_local_to_extents(tp, ip, 1, flags,
988 				XFS_DATA_FORK, xfs_symlink_local_to_remote,
989 				NULL);
990 
991 	/* should only be called for types that support local format data */
992 	ASSERT(0);
993 	xfs_bmap_mark_sick(ip, XFS_ATTR_FORK);
994 	return -EFSCORRUPTED;
995 }
996 
997 /*
998  * Set an inode attr fork offset based on the format of the data fork.
999  */
1000 static int
1001 xfs_bmap_set_attrforkoff(
1002 	struct xfs_inode	*ip,
1003 	int			size,
1004 	int			*version)
1005 {
1006 	int			default_size = xfs_default_attroffset(ip) >> 3;
1007 
1008 	switch (ip->i_df.if_format) {
1009 	case XFS_DINODE_FMT_DEV:
1010 		ip->i_forkoff = default_size;
1011 		break;
1012 	case XFS_DINODE_FMT_LOCAL:
1013 	case XFS_DINODE_FMT_EXTENTS:
1014 	case XFS_DINODE_FMT_BTREE:
1015 		ip->i_forkoff = xfs_attr_shortform_bytesfit(ip, size);
1016 		if (!ip->i_forkoff)
1017 			ip->i_forkoff = default_size;
1018 		else if (xfs_has_attr2(ip->i_mount) && version)
1019 			*version = 2;
1020 		break;
1021 	default:
1022 		ASSERT(0);
1023 		return -EINVAL;
1024 	}
1025 
1026 	return 0;
1027 }
1028 
1029 /*
1030  * Convert inode from non-attributed to attributed.  Caller must hold the
1031  * ILOCK_EXCL and the file cannot have an attr fork.
1032  */
1033 int						/* error code */
1034 xfs_bmap_add_attrfork(
1035 	struct xfs_trans	*tp,
1036 	struct xfs_inode	*ip,		/* incore inode pointer */
1037 	int			size,		/* space new attribute needs */
1038 	int			rsvd)		/* xact may use reserved blks */
1039 {
1040 	struct xfs_mount	*mp = tp->t_mountp;
1041 	int			version = 1;	/* superblock attr version */
1042 	int			logflags;	/* logging flags */
1043 	int			error;		/* error return value */
1044 
1045 	xfs_assert_ilocked(ip, XFS_ILOCK_EXCL);
1046 	if (xfs_is_metadir_inode(ip))
1047 		ASSERT(XFS_IS_DQDETACHED(ip));
1048 	else
1049 		ASSERT(!XFS_NOT_DQATTACHED(mp, ip));
1050 	ASSERT(!xfs_inode_has_attr_fork(ip));
1051 
1052 	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1053 	error = xfs_bmap_set_attrforkoff(ip, size, &version);
1054 	if (error)
1055 		return error;
1056 
1057 	xfs_ifork_init_attr(ip, XFS_DINODE_FMT_EXTENTS, 0);
1058 	logflags = 0;
1059 	switch (ip->i_df.if_format) {
1060 	case XFS_DINODE_FMT_LOCAL:
1061 		error = xfs_bmap_add_attrfork_local(tp, ip, &logflags);
1062 		break;
1063 	case XFS_DINODE_FMT_EXTENTS:
1064 		error = xfs_bmap_add_attrfork_extents(tp, ip, &logflags);
1065 		break;
1066 	case XFS_DINODE_FMT_BTREE:
1067 		error = xfs_bmap_add_attrfork_btree(tp, ip, &logflags);
1068 		break;
1069 	default:
1070 		error = 0;
1071 		break;
1072 	}
1073 	if (logflags)
1074 		xfs_trans_log_inode(tp, ip, logflags);
1075 	if (error)
1076 		return error;
1077 	if (!xfs_has_attr(mp) ||
1078 	   (!xfs_has_attr2(mp) && version == 2)) {
1079 		bool log_sb = false;
1080 
1081 		spin_lock(&mp->m_sb_lock);
1082 		if (!xfs_has_attr(mp)) {
1083 			xfs_add_attr(mp);
1084 			log_sb = true;
1085 		}
1086 		if (!xfs_has_attr2(mp) && version == 2) {
1087 			xfs_add_attr2(mp);
1088 			log_sb = true;
1089 		}
1090 		spin_unlock(&mp->m_sb_lock);
1091 		if (log_sb)
1092 			xfs_log_sb(tp);
1093 	}
1094 
1095 	return 0;
1096 }
1097 
1098 /*
1099  * Internal and external extent tree search functions.
1100  */
1101 
1102 struct xfs_iread_state {
1103 	struct xfs_iext_cursor	icur;
1104 	xfs_extnum_t		loaded;
1105 };
1106 
1107 int
1108 xfs_bmap_complain_bad_rec(
1109 	struct xfs_inode		*ip,
1110 	int				whichfork,
1111 	xfs_failaddr_t			fa,
1112 	const struct xfs_bmbt_irec	*irec)
1113 {
1114 	struct xfs_mount		*mp = ip->i_mount;
1115 	const char			*forkname;
1116 
1117 	switch (whichfork) {
1118 	case XFS_DATA_FORK:	forkname = "data"; break;
1119 	case XFS_ATTR_FORK:	forkname = "attr"; break;
1120 	case XFS_COW_FORK:	forkname = "CoW"; break;
1121 	default:		forkname = "???"; break;
1122 	}
1123 
1124 	xfs_warn(mp,
1125  "Bmap BTree record corruption in inode 0x%llx %s fork detected at %pS!",
1126 				ip->i_ino, forkname, fa);
1127 	xfs_warn(mp,
1128 		"Offset 0x%llx, start block 0x%llx, block count 0x%llx state 0x%x",
1129 		irec->br_startoff, irec->br_startblock, irec->br_blockcount,
1130 		irec->br_state);
1131 
1132 	return -EFSCORRUPTED;
1133 }
1134 
1135 /* Stuff every bmbt record from this block into the incore extent map. */
1136 static int
1137 xfs_iread_bmbt_block(
1138 	struct xfs_btree_cur	*cur,
1139 	int			level,
1140 	void			*priv)
1141 {
1142 	struct xfs_iread_state	*ir = priv;
1143 	struct xfs_mount	*mp = cur->bc_mp;
1144 	struct xfs_inode	*ip = cur->bc_ino.ip;
1145 	struct xfs_btree_block	*block;
1146 	struct xfs_buf		*bp;
1147 	struct xfs_bmbt_rec	*frp;
1148 	xfs_extnum_t		num_recs;
1149 	xfs_extnum_t		j;
1150 	int			whichfork = cur->bc_ino.whichfork;
1151 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
1152 
1153 	block = xfs_btree_get_block(cur, level, &bp);
1154 
1155 	/* Abort if we find more records than nextents. */
1156 	num_recs = xfs_btree_get_numrecs(block);
1157 	if (unlikely(ir->loaded + num_recs > ifp->if_nextents)) {
1158 		xfs_warn(ip->i_mount, "corrupt dinode %llu, (btree extents).",
1159 				(unsigned long long)ip->i_ino);
1160 		xfs_inode_verifier_error(ip, -EFSCORRUPTED, __func__, block,
1161 				sizeof(*block), __this_address);
1162 		xfs_bmap_mark_sick(ip, whichfork);
1163 		return -EFSCORRUPTED;
1164 	}
1165 
1166 	/* Copy records into the incore cache. */
1167 	frp = xfs_bmbt_rec_addr(mp, block, 1);
1168 	for (j = 0; j < num_recs; j++, frp++, ir->loaded++) {
1169 		struct xfs_bmbt_irec	new;
1170 		xfs_failaddr_t		fa;
1171 
1172 		xfs_bmbt_disk_get_all(frp, &new);
1173 		fa = xfs_bmap_validate_extent(ip, whichfork, &new);
1174 		if (fa) {
1175 			xfs_inode_verifier_error(ip, -EFSCORRUPTED,
1176 					"xfs_iread_extents(2)", frp,
1177 					sizeof(*frp), fa);
1178 			xfs_bmap_mark_sick(ip, whichfork);
1179 			return xfs_bmap_complain_bad_rec(ip, whichfork, fa,
1180 					&new);
1181 		}
1182 		xfs_iext_insert(ip, &ir->icur, &new,
1183 				xfs_bmap_fork_to_state(whichfork));
1184 		trace_xfs_read_extent(ip, &ir->icur,
1185 				xfs_bmap_fork_to_state(whichfork), _THIS_IP_);
1186 		xfs_iext_next(ifp, &ir->icur);
1187 	}
1188 
1189 	return 0;
1190 }
1191 
1192 /*
1193  * Read in extents from a btree-format inode.
1194  */
1195 int
1196 xfs_iread_extents(
1197 	struct xfs_trans	*tp,
1198 	struct xfs_inode	*ip,
1199 	int			whichfork)
1200 {
1201 	struct xfs_iread_state	ir;
1202 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
1203 	struct xfs_mount	*mp = ip->i_mount;
1204 	struct xfs_btree_cur	*cur;
1205 	int			error;
1206 
1207 	if (!xfs_need_iread_extents(ifp))
1208 		return 0;
1209 
1210 	xfs_assert_ilocked(ip, XFS_ILOCK_EXCL);
1211 
1212 	ir.loaded = 0;
1213 	xfs_iext_first(ifp, &ir.icur);
1214 	cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
1215 	error = xfs_btree_visit_blocks(cur, xfs_iread_bmbt_block,
1216 			XFS_BTREE_VISIT_RECORDS, &ir);
1217 	xfs_btree_del_cursor(cur, error);
1218 	if (error)
1219 		goto out;
1220 
1221 	if (XFS_IS_CORRUPT(mp, ir.loaded != ifp->if_nextents)) {
1222 		xfs_bmap_mark_sick(ip, whichfork);
1223 		error = -EFSCORRUPTED;
1224 		goto out;
1225 	}
1226 	ASSERT(ir.loaded == xfs_iext_count(ifp));
1227 	/*
1228 	 * Use release semantics so that we can use acquire semantics in
1229 	 * xfs_need_iread_extents and be guaranteed to see a valid mapping tree
1230 	 * after that load.
1231 	 */
1232 	smp_store_release(&ifp->if_needextents, 0);
1233 	return 0;
1234 out:
1235 	if (xfs_metadata_is_sick(error))
1236 		xfs_bmap_mark_sick(ip, whichfork);
1237 	xfs_iext_destroy(ifp);
1238 	return error;
1239 }
1240 
1241 /*
1242  * Returns the relative block number of the first unused block(s) in the given
1243  * fork with at least "len" logically contiguous blocks free.  This is the
1244  * lowest-address hole if the fork has holes, else the first block past the end
1245  * of fork.  Return 0 if the fork is currently local (in-inode).
1246  */
1247 int						/* error */
1248 xfs_bmap_first_unused(
1249 	struct xfs_trans	*tp,		/* transaction pointer */
1250 	struct xfs_inode	*ip,		/* incore inode */
1251 	xfs_extlen_t		len,		/* size of hole to find */
1252 	xfs_fileoff_t		*first_unused,	/* unused block */
1253 	int			whichfork)	/* data or attr fork */
1254 {
1255 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
1256 	struct xfs_bmbt_irec	got;
1257 	struct xfs_iext_cursor	icur;
1258 	xfs_fileoff_t		lastaddr = 0;
1259 	xfs_fileoff_t		lowest, max;
1260 	int			error;
1261 
1262 	if (ifp->if_format == XFS_DINODE_FMT_LOCAL) {
1263 		*first_unused = 0;
1264 		return 0;
1265 	}
1266 
1267 	ASSERT(xfs_ifork_has_extents(ifp));
1268 
1269 	error = xfs_iread_extents(tp, ip, whichfork);
1270 	if (error)
1271 		return error;
1272 
1273 	lowest = max = *first_unused;
1274 	for_each_xfs_iext(ifp, &icur, &got) {
1275 		/*
1276 		 * See if the hole before this extent will work.
1277 		 */
1278 		if (got.br_startoff >= lowest + len &&
1279 		    got.br_startoff - max >= len)
1280 			break;
1281 		lastaddr = got.br_startoff + got.br_blockcount;
1282 		max = XFS_FILEOFF_MAX(lastaddr, lowest);
1283 	}
1284 
1285 	*first_unused = max;
1286 	return 0;
1287 }
1288 
1289 /*
1290  * Returns the file-relative block number of the last block - 1 before
1291  * last_block (input value) in the file.
1292  * This is not based on i_size, it is based on the extent records.
1293  * Returns 0 for local files, as they do not have extent records.
1294  */
1295 int						/* error */
1296 xfs_bmap_last_before(
1297 	struct xfs_trans	*tp,		/* transaction pointer */
1298 	struct xfs_inode	*ip,		/* incore inode */
1299 	xfs_fileoff_t		*last_block,	/* last block */
1300 	int			whichfork)	/* data or attr fork */
1301 {
1302 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
1303 	struct xfs_bmbt_irec	got;
1304 	struct xfs_iext_cursor	icur;
1305 	int			error;
1306 
1307 	switch (ifp->if_format) {
1308 	case XFS_DINODE_FMT_LOCAL:
1309 		*last_block = 0;
1310 		return 0;
1311 	case XFS_DINODE_FMT_BTREE:
1312 	case XFS_DINODE_FMT_EXTENTS:
1313 		break;
1314 	default:
1315 		ASSERT(0);
1316 		xfs_bmap_mark_sick(ip, whichfork);
1317 		return -EFSCORRUPTED;
1318 	}
1319 
1320 	error = xfs_iread_extents(tp, ip, whichfork);
1321 	if (error)
1322 		return error;
1323 
1324 	if (!xfs_iext_lookup_extent_before(ip, ifp, last_block, &icur, &got))
1325 		*last_block = 0;
1326 	return 0;
1327 }
1328 
1329 int
1330 xfs_bmap_last_extent(
1331 	struct xfs_trans	*tp,
1332 	struct xfs_inode	*ip,
1333 	int			whichfork,
1334 	struct xfs_bmbt_irec	*rec,
1335 	int			*is_empty)
1336 {
1337 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
1338 	struct xfs_iext_cursor	icur;
1339 	int			error;
1340 
1341 	error = xfs_iread_extents(tp, ip, whichfork);
1342 	if (error)
1343 		return error;
1344 
1345 	xfs_iext_last(ifp, &icur);
1346 	if (!xfs_iext_get_extent(ifp, &icur, rec))
1347 		*is_empty = 1;
1348 	else
1349 		*is_empty = 0;
1350 	return 0;
1351 }
1352 
1353 /*
1354  * Check the last inode extent to determine whether this allocation will result
1355  * in blocks being allocated at the end of the file. When we allocate new data
1356  * blocks at the end of the file which do not start at the previous data block,
1357  * we will try to align the new blocks at stripe unit boundaries.
1358  *
1359  * Returns 1 in bma->aeof if the file (fork) is empty as any new write will be
1360  * at, or past the EOF.
1361  */
1362 STATIC int
1363 xfs_bmap_isaeof(
1364 	struct xfs_bmalloca	*bma,
1365 	int			whichfork)
1366 {
1367 	struct xfs_bmbt_irec	rec;
1368 	int			is_empty;
1369 	int			error;
1370 
1371 	bma->aeof = false;
1372 	error = xfs_bmap_last_extent(NULL, bma->ip, whichfork, &rec,
1373 				     &is_empty);
1374 	if (error)
1375 		return error;
1376 
1377 	if (is_empty) {
1378 		bma->aeof = true;
1379 		return 0;
1380 	}
1381 
1382 	/*
1383 	 * Check if we are allocation or past the last extent, or at least into
1384 	 * the last delayed allocated extent.
1385 	 */
1386 	bma->aeof = bma->offset >= rec.br_startoff + rec.br_blockcount ||
1387 		(bma->offset >= rec.br_startoff &&
1388 		 isnullstartblock(rec.br_startblock));
1389 	return 0;
1390 }
1391 
1392 /*
1393  * Returns the file-relative block number of the first block past eof in
1394  * the file.  This is not based on i_size, it is based on the extent records.
1395  * Returns 0 for local files, as they do not have extent records.
1396  */
1397 int
1398 xfs_bmap_last_offset(
1399 	struct xfs_inode	*ip,
1400 	xfs_fileoff_t		*last_block,
1401 	int			whichfork)
1402 {
1403 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
1404 	struct xfs_bmbt_irec	rec;
1405 	int			is_empty;
1406 	int			error;
1407 
1408 	*last_block = 0;
1409 
1410 	if (ifp->if_format == XFS_DINODE_FMT_LOCAL)
1411 		return 0;
1412 
1413 	if (XFS_IS_CORRUPT(ip->i_mount, !xfs_ifork_has_extents(ifp))) {
1414 		xfs_bmap_mark_sick(ip, whichfork);
1415 		return -EFSCORRUPTED;
1416 	}
1417 
1418 	error = xfs_bmap_last_extent(NULL, ip, whichfork, &rec, &is_empty);
1419 	if (error || is_empty)
1420 		return error;
1421 
1422 	*last_block = rec.br_startoff + rec.br_blockcount;
1423 	return 0;
1424 }
1425 
1426 /*
1427  * Extent tree manipulation functions used during allocation.
1428  */
1429 
1430 static inline bool
1431 xfs_bmap_same_rtgroup(
1432 	struct xfs_inode	*ip,
1433 	int			whichfork,
1434 	struct xfs_bmbt_irec	*left,
1435 	struct xfs_bmbt_irec	*right)
1436 {
1437 	struct xfs_mount	*mp = ip->i_mount;
1438 
1439 	if (xfs_ifork_is_realtime(ip, whichfork) && xfs_has_rtgroups(mp)) {
1440 		if (xfs_rtb_to_rgno(mp, left->br_startblock) !=
1441 		    xfs_rtb_to_rgno(mp, right->br_startblock))
1442 			return false;
1443 	}
1444 
1445 	return true;
1446 }
1447 
1448 /*
1449  * Convert a delayed allocation to a real allocation.
1450  */
1451 STATIC int				/* error */
1452 xfs_bmap_add_extent_delay_real(
1453 	struct xfs_bmalloca	*bma,
1454 	int			whichfork)
1455 {
1456 	struct xfs_mount	*mp = bma->ip->i_mount;
1457 	struct xfs_ifork	*ifp = xfs_ifork_ptr(bma->ip, whichfork);
1458 	struct xfs_bmbt_irec	*new = &bma->got;
1459 	int			error;	/* error return value */
1460 	int			i;	/* temp state */
1461 	xfs_fileoff_t		new_endoff;	/* end offset of new entry */
1462 	xfs_bmbt_irec_t		r[3];	/* neighbor extent entries */
1463 					/* left is 0, right is 1, prev is 2 */
1464 	int			rval=0;	/* return value (logging flags) */
1465 	uint32_t		state = xfs_bmap_fork_to_state(whichfork);
1466 	xfs_filblks_t		da_new; /* new count del alloc blocks used */
1467 	xfs_filblks_t		da_old; /* old count del alloc blocks used */
1468 	xfs_filblks_t		temp=0;	/* value for da_new calculations */
1469 	int			tmp_rval;	/* partial logging flags */
1470 	struct xfs_bmbt_irec	old;
1471 
1472 	ASSERT(whichfork != XFS_ATTR_FORK);
1473 	ASSERT(!isnullstartblock(new->br_startblock));
1474 	ASSERT(!bma->cur || (bma->cur->bc_flags & XFS_BTREE_BMBT_WASDEL));
1475 
1476 	XFS_STATS_INC(mp, xs_add_exlist);
1477 
1478 #define	LEFT		r[0]
1479 #define	RIGHT		r[1]
1480 #define	PREV		r[2]
1481 
1482 	/*
1483 	 * Set up a bunch of variables to make the tests simpler.
1484 	 */
1485 	xfs_iext_get_extent(ifp, &bma->icur, &PREV);
1486 	new_endoff = new->br_startoff + new->br_blockcount;
1487 	ASSERT(isnullstartblock(PREV.br_startblock));
1488 	ASSERT(PREV.br_startoff <= new->br_startoff);
1489 	ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff);
1490 
1491 	da_old = startblockval(PREV.br_startblock);
1492 	da_new = 0;
1493 
1494 	/*
1495 	 * Set flags determining what part of the previous delayed allocation
1496 	 * extent is being replaced by a real allocation.
1497 	 */
1498 	if (PREV.br_startoff == new->br_startoff)
1499 		state |= BMAP_LEFT_FILLING;
1500 	if (PREV.br_startoff + PREV.br_blockcount == new_endoff)
1501 		state |= BMAP_RIGHT_FILLING;
1502 
1503 	/*
1504 	 * Check and set flags if this segment has a left neighbor.
1505 	 * Don't set contiguous if the combined extent would be too large.
1506 	 */
1507 	if (xfs_iext_peek_prev_extent(ifp, &bma->icur, &LEFT)) {
1508 		state |= BMAP_LEFT_VALID;
1509 		if (isnullstartblock(LEFT.br_startblock))
1510 			state |= BMAP_LEFT_DELAY;
1511 	}
1512 
1513 	if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
1514 	    LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff &&
1515 	    LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock &&
1516 	    LEFT.br_state == new->br_state &&
1517 	    LEFT.br_blockcount + new->br_blockcount <= XFS_MAX_BMBT_EXTLEN &&
1518 	    xfs_bmap_same_rtgroup(bma->ip, whichfork, &LEFT, new))
1519 		state |= BMAP_LEFT_CONTIG;
1520 
1521 	/*
1522 	 * Check and set flags if this segment has a right neighbor.
1523 	 * Don't set contiguous if the combined extent would be too large.
1524 	 * Also check for all-three-contiguous being too large.
1525 	 */
1526 	if (xfs_iext_peek_next_extent(ifp, &bma->icur, &RIGHT)) {
1527 		state |= BMAP_RIGHT_VALID;
1528 		if (isnullstartblock(RIGHT.br_startblock))
1529 			state |= BMAP_RIGHT_DELAY;
1530 	}
1531 
1532 	if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
1533 	    new_endoff == RIGHT.br_startoff &&
1534 	    new->br_startblock + new->br_blockcount == RIGHT.br_startblock &&
1535 	    new->br_state == RIGHT.br_state &&
1536 	    new->br_blockcount + RIGHT.br_blockcount <= XFS_MAX_BMBT_EXTLEN &&
1537 	    ((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
1538 		       BMAP_RIGHT_FILLING)) !=
1539 		      (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
1540 		       BMAP_RIGHT_FILLING) ||
1541 	     LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount
1542 			<= XFS_MAX_BMBT_EXTLEN) &&
1543 	    xfs_bmap_same_rtgroup(bma->ip, whichfork, new, &RIGHT))
1544 		state |= BMAP_RIGHT_CONTIG;
1545 
1546 	error = 0;
1547 	/*
1548 	 * Switch out based on the FILLING and CONTIG state bits.
1549 	 */
1550 	switch (state & (BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
1551 			 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG)) {
1552 	case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
1553 	     BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
1554 		/*
1555 		 * Filling in all of a previously delayed allocation extent.
1556 		 * The left and right neighbors are both contiguous with new.
1557 		 */
1558 		LEFT.br_blockcount += PREV.br_blockcount + RIGHT.br_blockcount;
1559 
1560 		xfs_iext_remove(bma->ip, &bma->icur, state);
1561 		xfs_iext_remove(bma->ip, &bma->icur, state);
1562 		xfs_iext_prev(ifp, &bma->icur);
1563 		xfs_iext_update_extent(bma->ip, state, &bma->icur, &LEFT);
1564 		ifp->if_nextents--;
1565 
1566 		if (bma->cur == NULL)
1567 			rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1568 		else {
1569 			rval = XFS_ILOG_CORE;
1570 			error = xfs_bmbt_lookup_eq(bma->cur, &RIGHT, &i);
1571 			if (error)
1572 				goto done;
1573 			if (XFS_IS_CORRUPT(mp, i != 1)) {
1574 				xfs_btree_mark_sick(bma->cur);
1575 				error = -EFSCORRUPTED;
1576 				goto done;
1577 			}
1578 			error = xfs_btree_delete(bma->cur, &i);
1579 			if (error)
1580 				goto done;
1581 			if (XFS_IS_CORRUPT(mp, i != 1)) {
1582 				xfs_btree_mark_sick(bma->cur);
1583 				error = -EFSCORRUPTED;
1584 				goto done;
1585 			}
1586 			error = xfs_btree_decrement(bma->cur, 0, &i);
1587 			if (error)
1588 				goto done;
1589 			if (XFS_IS_CORRUPT(mp, i != 1)) {
1590 				xfs_btree_mark_sick(bma->cur);
1591 				error = -EFSCORRUPTED;
1592 				goto done;
1593 			}
1594 			error = xfs_bmbt_update(bma->cur, &LEFT);
1595 			if (error)
1596 				goto done;
1597 		}
1598 		ASSERT(da_new <= da_old);
1599 		break;
1600 
1601 	case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
1602 		/*
1603 		 * Filling in all of a previously delayed allocation extent.
1604 		 * The left neighbor is contiguous, the right is not.
1605 		 */
1606 		old = LEFT;
1607 		LEFT.br_blockcount += PREV.br_blockcount;
1608 
1609 		xfs_iext_remove(bma->ip, &bma->icur, state);
1610 		xfs_iext_prev(ifp, &bma->icur);
1611 		xfs_iext_update_extent(bma->ip, state, &bma->icur, &LEFT);
1612 
1613 		if (bma->cur == NULL)
1614 			rval = XFS_ILOG_DEXT;
1615 		else {
1616 			rval = 0;
1617 			error = xfs_bmbt_lookup_eq(bma->cur, &old, &i);
1618 			if (error)
1619 				goto done;
1620 			if (XFS_IS_CORRUPT(mp, i != 1)) {
1621 				xfs_btree_mark_sick(bma->cur);
1622 				error = -EFSCORRUPTED;
1623 				goto done;
1624 			}
1625 			error = xfs_bmbt_update(bma->cur, &LEFT);
1626 			if (error)
1627 				goto done;
1628 		}
1629 		ASSERT(da_new <= da_old);
1630 		break;
1631 
1632 	case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
1633 		/*
1634 		 * Filling in all of a previously delayed allocation extent.
1635 		 * The right neighbor is contiguous, the left is not. Take care
1636 		 * with delay -> unwritten extent allocation here because the
1637 		 * delalloc record we are overwriting is always written.
1638 		 */
1639 		PREV.br_startblock = new->br_startblock;
1640 		PREV.br_blockcount += RIGHT.br_blockcount;
1641 		PREV.br_state = new->br_state;
1642 
1643 		xfs_iext_next(ifp, &bma->icur);
1644 		xfs_iext_remove(bma->ip, &bma->icur, state);
1645 		xfs_iext_prev(ifp, &bma->icur);
1646 		xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV);
1647 
1648 		if (bma->cur == NULL)
1649 			rval = XFS_ILOG_DEXT;
1650 		else {
1651 			rval = 0;
1652 			error = xfs_bmbt_lookup_eq(bma->cur, &RIGHT, &i);
1653 			if (error)
1654 				goto done;
1655 			if (XFS_IS_CORRUPT(mp, i != 1)) {
1656 				xfs_btree_mark_sick(bma->cur);
1657 				error = -EFSCORRUPTED;
1658 				goto done;
1659 			}
1660 			error = xfs_bmbt_update(bma->cur, &PREV);
1661 			if (error)
1662 				goto done;
1663 		}
1664 		ASSERT(da_new <= da_old);
1665 		break;
1666 
1667 	case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
1668 		/*
1669 		 * Filling in all of a previously delayed allocation extent.
1670 		 * Neither the left nor right neighbors are contiguous with
1671 		 * the new one.
1672 		 */
1673 		PREV.br_startblock = new->br_startblock;
1674 		PREV.br_state = new->br_state;
1675 		xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV);
1676 		ifp->if_nextents++;
1677 
1678 		if (bma->cur == NULL)
1679 			rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1680 		else {
1681 			rval = XFS_ILOG_CORE;
1682 			error = xfs_bmbt_lookup_eq(bma->cur, new, &i);
1683 			if (error)
1684 				goto done;
1685 			if (XFS_IS_CORRUPT(mp, i != 0)) {
1686 				xfs_btree_mark_sick(bma->cur);
1687 				error = -EFSCORRUPTED;
1688 				goto done;
1689 			}
1690 			error = xfs_btree_insert(bma->cur, &i);
1691 			if (error)
1692 				goto done;
1693 			if (XFS_IS_CORRUPT(mp, i != 1)) {
1694 				xfs_btree_mark_sick(bma->cur);
1695 				error = -EFSCORRUPTED;
1696 				goto done;
1697 			}
1698 		}
1699 		ASSERT(da_new <= da_old);
1700 		break;
1701 
1702 	case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG:
1703 		/*
1704 		 * Filling in the first part of a previous delayed allocation.
1705 		 * The left neighbor is contiguous.
1706 		 */
1707 		old = LEFT;
1708 		temp = PREV.br_blockcount - new->br_blockcount;
1709 		da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
1710 				startblockval(PREV.br_startblock));
1711 
1712 		LEFT.br_blockcount += new->br_blockcount;
1713 
1714 		PREV.br_blockcount = temp;
1715 		PREV.br_startoff += new->br_blockcount;
1716 		PREV.br_startblock = nullstartblock(da_new);
1717 
1718 		xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV);
1719 		xfs_iext_prev(ifp, &bma->icur);
1720 		xfs_iext_update_extent(bma->ip, state, &bma->icur, &LEFT);
1721 
1722 		if (bma->cur == NULL)
1723 			rval = XFS_ILOG_DEXT;
1724 		else {
1725 			rval = 0;
1726 			error = xfs_bmbt_lookup_eq(bma->cur, &old, &i);
1727 			if (error)
1728 				goto done;
1729 			if (XFS_IS_CORRUPT(mp, i != 1)) {
1730 				xfs_btree_mark_sick(bma->cur);
1731 				error = -EFSCORRUPTED;
1732 				goto done;
1733 			}
1734 			error = xfs_bmbt_update(bma->cur, &LEFT);
1735 			if (error)
1736 				goto done;
1737 		}
1738 		ASSERT(da_new <= da_old);
1739 		break;
1740 
1741 	case BMAP_LEFT_FILLING:
1742 		/*
1743 		 * Filling in the first part of a previous delayed allocation.
1744 		 * The left neighbor is not contiguous.
1745 		 */
1746 		xfs_iext_update_extent(bma->ip, state, &bma->icur, new);
1747 		ifp->if_nextents++;
1748 
1749 		if (bma->cur == NULL)
1750 			rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1751 		else {
1752 			rval = XFS_ILOG_CORE;
1753 			error = xfs_bmbt_lookup_eq(bma->cur, new, &i);
1754 			if (error)
1755 				goto done;
1756 			if (XFS_IS_CORRUPT(mp, i != 0)) {
1757 				xfs_btree_mark_sick(bma->cur);
1758 				error = -EFSCORRUPTED;
1759 				goto done;
1760 			}
1761 			error = xfs_btree_insert(bma->cur, &i);
1762 			if (error)
1763 				goto done;
1764 			if (XFS_IS_CORRUPT(mp, i != 1)) {
1765 				xfs_btree_mark_sick(bma->cur);
1766 				error = -EFSCORRUPTED;
1767 				goto done;
1768 			}
1769 		}
1770 
1771 		if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
1772 			error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
1773 					&bma->cur, 1, &tmp_rval, whichfork);
1774 			rval |= tmp_rval;
1775 			if (error)
1776 				goto done;
1777 		}
1778 
1779 		temp = PREV.br_blockcount - new->br_blockcount;
1780 		da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
1781 			startblockval(PREV.br_startblock) -
1782 			(bma->cur ? bma->cur->bc_bmap.allocated : 0));
1783 
1784 		PREV.br_startoff = new_endoff;
1785 		PREV.br_blockcount = temp;
1786 		PREV.br_startblock = nullstartblock(da_new);
1787 		xfs_iext_next(ifp, &bma->icur);
1788 		xfs_iext_insert(bma->ip, &bma->icur, &PREV, state);
1789 		xfs_iext_prev(ifp, &bma->icur);
1790 		break;
1791 
1792 	case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
1793 		/*
1794 		 * Filling in the last part of a previous delayed allocation.
1795 		 * The right neighbor is contiguous with the new allocation.
1796 		 */
1797 		old = RIGHT;
1798 		RIGHT.br_startoff = new->br_startoff;
1799 		RIGHT.br_startblock = new->br_startblock;
1800 		RIGHT.br_blockcount += new->br_blockcount;
1801 
1802 		if (bma->cur == NULL)
1803 			rval = XFS_ILOG_DEXT;
1804 		else {
1805 			rval = 0;
1806 			error = xfs_bmbt_lookup_eq(bma->cur, &old, &i);
1807 			if (error)
1808 				goto done;
1809 			if (XFS_IS_CORRUPT(mp, i != 1)) {
1810 				xfs_btree_mark_sick(bma->cur);
1811 				error = -EFSCORRUPTED;
1812 				goto done;
1813 			}
1814 			error = xfs_bmbt_update(bma->cur, &RIGHT);
1815 			if (error)
1816 				goto done;
1817 		}
1818 
1819 		temp = PREV.br_blockcount - new->br_blockcount;
1820 		da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
1821 			startblockval(PREV.br_startblock));
1822 
1823 		PREV.br_blockcount = temp;
1824 		PREV.br_startblock = nullstartblock(da_new);
1825 
1826 		xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV);
1827 		xfs_iext_next(ifp, &bma->icur);
1828 		xfs_iext_update_extent(bma->ip, state, &bma->icur, &RIGHT);
1829 		ASSERT(da_new <= da_old);
1830 		break;
1831 
1832 	case BMAP_RIGHT_FILLING:
1833 		/*
1834 		 * Filling in the last part of a previous delayed allocation.
1835 		 * The right neighbor is not contiguous.
1836 		 */
1837 		xfs_iext_update_extent(bma->ip, state, &bma->icur, new);
1838 		ifp->if_nextents++;
1839 
1840 		if (bma->cur == NULL)
1841 			rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1842 		else {
1843 			rval = XFS_ILOG_CORE;
1844 			error = xfs_bmbt_lookup_eq(bma->cur, new, &i);
1845 			if (error)
1846 				goto done;
1847 			if (XFS_IS_CORRUPT(mp, i != 0)) {
1848 				xfs_btree_mark_sick(bma->cur);
1849 				error = -EFSCORRUPTED;
1850 				goto done;
1851 			}
1852 			error = xfs_btree_insert(bma->cur, &i);
1853 			if (error)
1854 				goto done;
1855 			if (XFS_IS_CORRUPT(mp, i != 1)) {
1856 				xfs_btree_mark_sick(bma->cur);
1857 				error = -EFSCORRUPTED;
1858 				goto done;
1859 			}
1860 		}
1861 
1862 		if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
1863 			error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
1864 				&bma->cur, 1, &tmp_rval, whichfork);
1865 			rval |= tmp_rval;
1866 			if (error)
1867 				goto done;
1868 		}
1869 
1870 		temp = PREV.br_blockcount - new->br_blockcount;
1871 		da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
1872 			startblockval(PREV.br_startblock) -
1873 			(bma->cur ? bma->cur->bc_bmap.allocated : 0));
1874 
1875 		PREV.br_startblock = nullstartblock(da_new);
1876 		PREV.br_blockcount = temp;
1877 		xfs_iext_insert(bma->ip, &bma->icur, &PREV, state);
1878 		xfs_iext_next(ifp, &bma->icur);
1879 		ASSERT(da_new <= da_old);
1880 		break;
1881 
1882 	case 0:
1883 		/*
1884 		 * Filling in the middle part of a previous delayed allocation.
1885 		 * Contiguity is impossible here.
1886 		 * This case is avoided almost all the time.
1887 		 *
1888 		 * We start with a delayed allocation:
1889 		 *
1890 		 * +ddddddddddddddddddddddddddddddddddddddddddddddddddddddd+
1891 		 *  PREV @ idx
1892 		 *
1893 	         * and we are allocating:
1894 		 *                     +rrrrrrrrrrrrrrrrr+
1895 		 *			      new
1896 		 *
1897 		 * and we set it up for insertion as:
1898 		 * +ddddddddddddddddddd+rrrrrrrrrrrrrrrrr+ddddddddddddddddd+
1899 		 *                            new
1900 		 *  PREV @ idx          LEFT              RIGHT
1901 		 *                      inserted at idx + 1
1902 		 */
1903 		old = PREV;
1904 
1905 		/* LEFT is the new middle */
1906 		LEFT = *new;
1907 
1908 		/* RIGHT is the new right */
1909 		RIGHT.br_state = PREV.br_state;
1910 		RIGHT.br_startoff = new_endoff;
1911 		RIGHT.br_blockcount =
1912 			PREV.br_startoff + PREV.br_blockcount - new_endoff;
1913 		RIGHT.br_startblock =
1914 			nullstartblock(xfs_bmap_worst_indlen(bma->ip,
1915 					RIGHT.br_blockcount));
1916 
1917 		/* truncate PREV */
1918 		PREV.br_blockcount = new->br_startoff - PREV.br_startoff;
1919 		PREV.br_startblock =
1920 			nullstartblock(xfs_bmap_worst_indlen(bma->ip,
1921 					PREV.br_blockcount));
1922 		xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV);
1923 
1924 		xfs_iext_next(ifp, &bma->icur);
1925 		xfs_iext_insert(bma->ip, &bma->icur, &RIGHT, state);
1926 		xfs_iext_insert(bma->ip, &bma->icur, &LEFT, state);
1927 		ifp->if_nextents++;
1928 
1929 		if (bma->cur == NULL)
1930 			rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1931 		else {
1932 			rval = XFS_ILOG_CORE;
1933 			error = xfs_bmbt_lookup_eq(bma->cur, new, &i);
1934 			if (error)
1935 				goto done;
1936 			if (XFS_IS_CORRUPT(mp, i != 0)) {
1937 				xfs_btree_mark_sick(bma->cur);
1938 				error = -EFSCORRUPTED;
1939 				goto done;
1940 			}
1941 			error = xfs_btree_insert(bma->cur, &i);
1942 			if (error)
1943 				goto done;
1944 			if (XFS_IS_CORRUPT(mp, i != 1)) {
1945 				xfs_btree_mark_sick(bma->cur);
1946 				error = -EFSCORRUPTED;
1947 				goto done;
1948 			}
1949 		}
1950 
1951 		if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
1952 			error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
1953 					&bma->cur, 1, &tmp_rval, whichfork);
1954 			rval |= tmp_rval;
1955 			if (error)
1956 				goto done;
1957 		}
1958 
1959 		da_new = startblockval(PREV.br_startblock) +
1960 			 startblockval(RIGHT.br_startblock);
1961 		break;
1962 
1963 	case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
1964 	case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
1965 	case BMAP_LEFT_FILLING | BMAP_RIGHT_CONTIG:
1966 	case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
1967 	case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
1968 	case BMAP_LEFT_CONTIG:
1969 	case BMAP_RIGHT_CONTIG:
1970 		/*
1971 		 * These cases are all impossible.
1972 		 */
1973 		ASSERT(0);
1974 	}
1975 
1976 	/* add reverse mapping unless caller opted out */
1977 	if (!(bma->flags & XFS_BMAPI_NORMAP))
1978 		xfs_rmap_map_extent(bma->tp, bma->ip, whichfork, new);
1979 
1980 	/* convert to a btree if necessary */
1981 	if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
1982 		int	tmp_logflags;	/* partial log flag return val */
1983 
1984 		ASSERT(bma->cur == NULL);
1985 		error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
1986 				&bma->cur, da_old > 0, &tmp_logflags,
1987 				whichfork);
1988 		bma->logflags |= tmp_logflags;
1989 		if (error)
1990 			goto done;
1991 	}
1992 
1993 	if (da_new != da_old)
1994 		xfs_mod_delalloc(bma->ip, 0, (int64_t)da_new - da_old);
1995 
1996 	if (bma->cur) {
1997 		da_new += bma->cur->bc_bmap.allocated;
1998 		bma->cur->bc_bmap.allocated = 0;
1999 	}
2000 
2001 	/* adjust for changes in reserved delayed indirect blocks */
2002 	if (da_new < da_old)
2003 		xfs_add_fdblocks(mp, da_old - da_new);
2004 	else if (da_new > da_old)
2005 		error = xfs_dec_fdblocks(mp, da_new - da_old, true);
2006 
2007 	xfs_bmap_check_leaf_extents(bma->cur, bma->ip, whichfork);
2008 done:
2009 	if (whichfork != XFS_COW_FORK)
2010 		bma->logflags |= rval;
2011 	return error;
2012 #undef	LEFT
2013 #undef	RIGHT
2014 #undef	PREV
2015 }
2016 
2017 /*
2018  * Convert an unwritten allocation to a real allocation or vice versa.
2019  */
2020 int					/* error */
2021 xfs_bmap_add_extent_unwritten_real(
2022 	struct xfs_trans	*tp,
2023 	xfs_inode_t		*ip,	/* incore inode pointer */
2024 	int			whichfork,
2025 	struct xfs_iext_cursor	*icur,
2026 	struct xfs_btree_cur	**curp,	/* if *curp is null, not a btree */
2027 	xfs_bmbt_irec_t		*new,	/* new data to add to file extents */
2028 	int			*logflagsp) /* inode logging flags */
2029 {
2030 	struct xfs_btree_cur	*cur;	/* btree cursor */
2031 	int			error;	/* error return value */
2032 	int			i;	/* temp state */
2033 	struct xfs_ifork	*ifp;	/* inode fork pointer */
2034 	xfs_fileoff_t		new_endoff;	/* end offset of new entry */
2035 	xfs_bmbt_irec_t		r[3];	/* neighbor extent entries */
2036 					/* left is 0, right is 1, prev is 2 */
2037 	int			rval=0;	/* return value (logging flags) */
2038 	uint32_t		state = xfs_bmap_fork_to_state(whichfork);
2039 	struct xfs_mount	*mp = ip->i_mount;
2040 	struct xfs_bmbt_irec	old;
2041 
2042 	*logflagsp = 0;
2043 
2044 	cur = *curp;
2045 	ifp = xfs_ifork_ptr(ip, whichfork);
2046 
2047 	ASSERT(!isnullstartblock(new->br_startblock));
2048 
2049 	XFS_STATS_INC(mp, xs_add_exlist);
2050 
2051 #define	LEFT		r[0]
2052 #define	RIGHT		r[1]
2053 #define	PREV		r[2]
2054 
2055 	/*
2056 	 * Set up a bunch of variables to make the tests simpler.
2057 	 */
2058 	error = 0;
2059 	xfs_iext_get_extent(ifp, icur, &PREV);
2060 	ASSERT(new->br_state != PREV.br_state);
2061 	new_endoff = new->br_startoff + new->br_blockcount;
2062 	ASSERT(PREV.br_startoff <= new->br_startoff);
2063 	ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff);
2064 
2065 	/*
2066 	 * Set flags determining what part of the previous oldext allocation
2067 	 * extent is being replaced by a newext allocation.
2068 	 */
2069 	if (PREV.br_startoff == new->br_startoff)
2070 		state |= BMAP_LEFT_FILLING;
2071 	if (PREV.br_startoff + PREV.br_blockcount == new_endoff)
2072 		state |= BMAP_RIGHT_FILLING;
2073 
2074 	/*
2075 	 * Check and set flags if this segment has a left neighbor.
2076 	 * Don't set contiguous if the combined extent would be too large.
2077 	 */
2078 	if (xfs_iext_peek_prev_extent(ifp, icur, &LEFT)) {
2079 		state |= BMAP_LEFT_VALID;
2080 		if (isnullstartblock(LEFT.br_startblock))
2081 			state |= BMAP_LEFT_DELAY;
2082 	}
2083 
2084 	if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
2085 	    LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff &&
2086 	    LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock &&
2087 	    LEFT.br_state == new->br_state &&
2088 	    LEFT.br_blockcount + new->br_blockcount <= XFS_MAX_BMBT_EXTLEN &&
2089 	    xfs_bmap_same_rtgroup(ip, whichfork, &LEFT, new))
2090 		state |= BMAP_LEFT_CONTIG;
2091 
2092 	/*
2093 	 * Check and set flags if this segment has a right neighbor.
2094 	 * Don't set contiguous if the combined extent would be too large.
2095 	 * Also check for all-three-contiguous being too large.
2096 	 */
2097 	if (xfs_iext_peek_next_extent(ifp, icur, &RIGHT)) {
2098 		state |= BMAP_RIGHT_VALID;
2099 		if (isnullstartblock(RIGHT.br_startblock))
2100 			state |= BMAP_RIGHT_DELAY;
2101 	}
2102 
2103 	if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
2104 	    new_endoff == RIGHT.br_startoff &&
2105 	    new->br_startblock + new->br_blockcount == RIGHT.br_startblock &&
2106 	    new->br_state == RIGHT.br_state &&
2107 	    new->br_blockcount + RIGHT.br_blockcount <= XFS_MAX_BMBT_EXTLEN &&
2108 	    ((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
2109 		       BMAP_RIGHT_FILLING)) !=
2110 		      (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
2111 		       BMAP_RIGHT_FILLING) ||
2112 	     LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount
2113 			<= XFS_MAX_BMBT_EXTLEN) &&
2114 	    xfs_bmap_same_rtgroup(ip, whichfork, new, &RIGHT))
2115 		state |= BMAP_RIGHT_CONTIG;
2116 
2117 	/*
2118 	 * Switch out based on the FILLING and CONTIG state bits.
2119 	 */
2120 	switch (state & (BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
2121 			 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG)) {
2122 	case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
2123 	     BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
2124 		/*
2125 		 * Setting all of a previous oldext extent to newext.
2126 		 * The left and right neighbors are both contiguous with new.
2127 		 */
2128 		LEFT.br_blockcount += PREV.br_blockcount + RIGHT.br_blockcount;
2129 
2130 		xfs_iext_remove(ip, icur, state);
2131 		xfs_iext_remove(ip, icur, state);
2132 		xfs_iext_prev(ifp, icur);
2133 		xfs_iext_update_extent(ip, state, icur, &LEFT);
2134 		ifp->if_nextents -= 2;
2135 		if (cur == NULL)
2136 			rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2137 		else {
2138 			rval = XFS_ILOG_CORE;
2139 			error = xfs_bmbt_lookup_eq(cur, &RIGHT, &i);
2140 			if (error)
2141 				goto done;
2142 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2143 				xfs_btree_mark_sick(cur);
2144 				error = -EFSCORRUPTED;
2145 				goto done;
2146 			}
2147 			if ((error = xfs_btree_delete(cur, &i)))
2148 				goto done;
2149 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2150 				xfs_btree_mark_sick(cur);
2151 				error = -EFSCORRUPTED;
2152 				goto done;
2153 			}
2154 			if ((error = xfs_btree_decrement(cur, 0, &i)))
2155 				goto done;
2156 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2157 				xfs_btree_mark_sick(cur);
2158 				error = -EFSCORRUPTED;
2159 				goto done;
2160 			}
2161 			if ((error = xfs_btree_delete(cur, &i)))
2162 				goto done;
2163 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2164 				xfs_btree_mark_sick(cur);
2165 				error = -EFSCORRUPTED;
2166 				goto done;
2167 			}
2168 			if ((error = xfs_btree_decrement(cur, 0, &i)))
2169 				goto done;
2170 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2171 				xfs_btree_mark_sick(cur);
2172 				error = -EFSCORRUPTED;
2173 				goto done;
2174 			}
2175 			error = xfs_bmbt_update(cur, &LEFT);
2176 			if (error)
2177 				goto done;
2178 		}
2179 		break;
2180 
2181 	case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
2182 		/*
2183 		 * Setting all of a previous oldext extent to newext.
2184 		 * The left neighbor is contiguous, the right is not.
2185 		 */
2186 		LEFT.br_blockcount += PREV.br_blockcount;
2187 
2188 		xfs_iext_remove(ip, icur, state);
2189 		xfs_iext_prev(ifp, icur);
2190 		xfs_iext_update_extent(ip, state, icur, &LEFT);
2191 		ifp->if_nextents--;
2192 		if (cur == NULL)
2193 			rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2194 		else {
2195 			rval = XFS_ILOG_CORE;
2196 			error = xfs_bmbt_lookup_eq(cur, &PREV, &i);
2197 			if (error)
2198 				goto done;
2199 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2200 				xfs_btree_mark_sick(cur);
2201 				error = -EFSCORRUPTED;
2202 				goto done;
2203 			}
2204 			if ((error = xfs_btree_delete(cur, &i)))
2205 				goto done;
2206 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2207 				xfs_btree_mark_sick(cur);
2208 				error = -EFSCORRUPTED;
2209 				goto done;
2210 			}
2211 			if ((error = xfs_btree_decrement(cur, 0, &i)))
2212 				goto done;
2213 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2214 				xfs_btree_mark_sick(cur);
2215 				error = -EFSCORRUPTED;
2216 				goto done;
2217 			}
2218 			error = xfs_bmbt_update(cur, &LEFT);
2219 			if (error)
2220 				goto done;
2221 		}
2222 		break;
2223 
2224 	case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
2225 		/*
2226 		 * Setting all of a previous oldext extent to newext.
2227 		 * The right neighbor is contiguous, the left is not.
2228 		 */
2229 		PREV.br_blockcount += RIGHT.br_blockcount;
2230 		PREV.br_state = new->br_state;
2231 
2232 		xfs_iext_next(ifp, icur);
2233 		xfs_iext_remove(ip, icur, state);
2234 		xfs_iext_prev(ifp, icur);
2235 		xfs_iext_update_extent(ip, state, icur, &PREV);
2236 		ifp->if_nextents--;
2237 
2238 		if (cur == NULL)
2239 			rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2240 		else {
2241 			rval = XFS_ILOG_CORE;
2242 			error = xfs_bmbt_lookup_eq(cur, &RIGHT, &i);
2243 			if (error)
2244 				goto done;
2245 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2246 				xfs_btree_mark_sick(cur);
2247 				error = -EFSCORRUPTED;
2248 				goto done;
2249 			}
2250 			if ((error = xfs_btree_delete(cur, &i)))
2251 				goto done;
2252 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2253 				xfs_btree_mark_sick(cur);
2254 				error = -EFSCORRUPTED;
2255 				goto done;
2256 			}
2257 			if ((error = xfs_btree_decrement(cur, 0, &i)))
2258 				goto done;
2259 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2260 				xfs_btree_mark_sick(cur);
2261 				error = -EFSCORRUPTED;
2262 				goto done;
2263 			}
2264 			error = xfs_bmbt_update(cur, &PREV);
2265 			if (error)
2266 				goto done;
2267 		}
2268 		break;
2269 
2270 	case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
2271 		/*
2272 		 * Setting all of a previous oldext extent to newext.
2273 		 * Neither the left nor right neighbors are contiguous with
2274 		 * the new one.
2275 		 */
2276 		PREV.br_state = new->br_state;
2277 		xfs_iext_update_extent(ip, state, icur, &PREV);
2278 
2279 		if (cur == NULL)
2280 			rval = XFS_ILOG_DEXT;
2281 		else {
2282 			rval = 0;
2283 			error = xfs_bmbt_lookup_eq(cur, new, &i);
2284 			if (error)
2285 				goto done;
2286 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2287 				xfs_btree_mark_sick(cur);
2288 				error = -EFSCORRUPTED;
2289 				goto done;
2290 			}
2291 			error = xfs_bmbt_update(cur, &PREV);
2292 			if (error)
2293 				goto done;
2294 		}
2295 		break;
2296 
2297 	case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG:
2298 		/*
2299 		 * Setting the first part of a previous oldext extent to newext.
2300 		 * The left neighbor is contiguous.
2301 		 */
2302 		LEFT.br_blockcount += new->br_blockcount;
2303 
2304 		old = PREV;
2305 		PREV.br_startoff += new->br_blockcount;
2306 		PREV.br_startblock += new->br_blockcount;
2307 		PREV.br_blockcount -= new->br_blockcount;
2308 
2309 		xfs_iext_update_extent(ip, state, icur, &PREV);
2310 		xfs_iext_prev(ifp, icur);
2311 		xfs_iext_update_extent(ip, state, icur, &LEFT);
2312 
2313 		if (cur == NULL)
2314 			rval = XFS_ILOG_DEXT;
2315 		else {
2316 			rval = 0;
2317 			error = xfs_bmbt_lookup_eq(cur, &old, &i);
2318 			if (error)
2319 				goto done;
2320 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2321 				xfs_btree_mark_sick(cur);
2322 				error = -EFSCORRUPTED;
2323 				goto done;
2324 			}
2325 			error = xfs_bmbt_update(cur, &PREV);
2326 			if (error)
2327 				goto done;
2328 			error = xfs_btree_decrement(cur, 0, &i);
2329 			if (error)
2330 				goto done;
2331 			error = xfs_bmbt_update(cur, &LEFT);
2332 			if (error)
2333 				goto done;
2334 		}
2335 		break;
2336 
2337 	case BMAP_LEFT_FILLING:
2338 		/*
2339 		 * Setting the first part of a previous oldext extent to newext.
2340 		 * The left neighbor is not contiguous.
2341 		 */
2342 		old = PREV;
2343 		PREV.br_startoff += new->br_blockcount;
2344 		PREV.br_startblock += new->br_blockcount;
2345 		PREV.br_blockcount -= new->br_blockcount;
2346 
2347 		xfs_iext_update_extent(ip, state, icur, &PREV);
2348 		xfs_iext_insert(ip, icur, new, state);
2349 		ifp->if_nextents++;
2350 
2351 		if (cur == NULL)
2352 			rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2353 		else {
2354 			rval = XFS_ILOG_CORE;
2355 			error = xfs_bmbt_lookup_eq(cur, &old, &i);
2356 			if (error)
2357 				goto done;
2358 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2359 				xfs_btree_mark_sick(cur);
2360 				error = -EFSCORRUPTED;
2361 				goto done;
2362 			}
2363 			error = xfs_bmbt_update(cur, &PREV);
2364 			if (error)
2365 				goto done;
2366 			cur->bc_rec.b = *new;
2367 			if ((error = xfs_btree_insert(cur, &i)))
2368 				goto done;
2369 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2370 				xfs_btree_mark_sick(cur);
2371 				error = -EFSCORRUPTED;
2372 				goto done;
2373 			}
2374 		}
2375 		break;
2376 
2377 	case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
2378 		/*
2379 		 * Setting the last part of a previous oldext extent to newext.
2380 		 * The right neighbor is contiguous with the new allocation.
2381 		 */
2382 		old = PREV;
2383 		PREV.br_blockcount -= new->br_blockcount;
2384 
2385 		RIGHT.br_startoff = new->br_startoff;
2386 		RIGHT.br_startblock = new->br_startblock;
2387 		RIGHT.br_blockcount += new->br_blockcount;
2388 
2389 		xfs_iext_update_extent(ip, state, icur, &PREV);
2390 		xfs_iext_next(ifp, icur);
2391 		xfs_iext_update_extent(ip, state, icur, &RIGHT);
2392 
2393 		if (cur == NULL)
2394 			rval = XFS_ILOG_DEXT;
2395 		else {
2396 			rval = 0;
2397 			error = xfs_bmbt_lookup_eq(cur, &old, &i);
2398 			if (error)
2399 				goto done;
2400 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2401 				xfs_btree_mark_sick(cur);
2402 				error = -EFSCORRUPTED;
2403 				goto done;
2404 			}
2405 			error = xfs_bmbt_update(cur, &PREV);
2406 			if (error)
2407 				goto done;
2408 			error = xfs_btree_increment(cur, 0, &i);
2409 			if (error)
2410 				goto done;
2411 			error = xfs_bmbt_update(cur, &RIGHT);
2412 			if (error)
2413 				goto done;
2414 		}
2415 		break;
2416 
2417 	case BMAP_RIGHT_FILLING:
2418 		/*
2419 		 * Setting the last part of a previous oldext extent to newext.
2420 		 * The right neighbor is not contiguous.
2421 		 */
2422 		old = PREV;
2423 		PREV.br_blockcount -= new->br_blockcount;
2424 
2425 		xfs_iext_update_extent(ip, state, icur, &PREV);
2426 		xfs_iext_next(ifp, icur);
2427 		xfs_iext_insert(ip, icur, new, state);
2428 		ifp->if_nextents++;
2429 
2430 		if (cur == NULL)
2431 			rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2432 		else {
2433 			rval = XFS_ILOG_CORE;
2434 			error = xfs_bmbt_lookup_eq(cur, &old, &i);
2435 			if (error)
2436 				goto done;
2437 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2438 				xfs_btree_mark_sick(cur);
2439 				error = -EFSCORRUPTED;
2440 				goto done;
2441 			}
2442 			error = xfs_bmbt_update(cur, &PREV);
2443 			if (error)
2444 				goto done;
2445 			error = xfs_bmbt_lookup_eq(cur, new, &i);
2446 			if (error)
2447 				goto done;
2448 			if (XFS_IS_CORRUPT(mp, i != 0)) {
2449 				xfs_btree_mark_sick(cur);
2450 				error = -EFSCORRUPTED;
2451 				goto done;
2452 			}
2453 			if ((error = xfs_btree_insert(cur, &i)))
2454 				goto done;
2455 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2456 				xfs_btree_mark_sick(cur);
2457 				error = -EFSCORRUPTED;
2458 				goto done;
2459 			}
2460 		}
2461 		break;
2462 
2463 	case 0:
2464 		/*
2465 		 * Setting the middle part of a previous oldext extent to
2466 		 * newext.  Contiguity is impossible here.
2467 		 * One extent becomes three extents.
2468 		 */
2469 		old = PREV;
2470 		PREV.br_blockcount = new->br_startoff - PREV.br_startoff;
2471 
2472 		r[0] = *new;
2473 		r[1].br_startoff = new_endoff;
2474 		r[1].br_blockcount =
2475 			old.br_startoff + old.br_blockcount - new_endoff;
2476 		r[1].br_startblock = new->br_startblock + new->br_blockcount;
2477 		r[1].br_state = PREV.br_state;
2478 
2479 		xfs_iext_update_extent(ip, state, icur, &PREV);
2480 		xfs_iext_next(ifp, icur);
2481 		xfs_iext_insert(ip, icur, &r[1], state);
2482 		xfs_iext_insert(ip, icur, &r[0], state);
2483 		ifp->if_nextents += 2;
2484 
2485 		if (cur == NULL)
2486 			rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2487 		else {
2488 			rval = XFS_ILOG_CORE;
2489 			error = xfs_bmbt_lookup_eq(cur, &old, &i);
2490 			if (error)
2491 				goto done;
2492 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2493 				xfs_btree_mark_sick(cur);
2494 				error = -EFSCORRUPTED;
2495 				goto done;
2496 			}
2497 			/* new right extent - oldext */
2498 			error = xfs_bmbt_update(cur, &r[1]);
2499 			if (error)
2500 				goto done;
2501 			/* new left extent - oldext */
2502 			cur->bc_rec.b = PREV;
2503 			if ((error = xfs_btree_insert(cur, &i)))
2504 				goto done;
2505 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2506 				xfs_btree_mark_sick(cur);
2507 				error = -EFSCORRUPTED;
2508 				goto done;
2509 			}
2510 			/*
2511 			 * Reset the cursor to the position of the new extent
2512 			 * we are about to insert as we can't trust it after
2513 			 * the previous insert.
2514 			 */
2515 			error = xfs_bmbt_lookup_eq(cur, new, &i);
2516 			if (error)
2517 				goto done;
2518 			if (XFS_IS_CORRUPT(mp, i != 0)) {
2519 				xfs_btree_mark_sick(cur);
2520 				error = -EFSCORRUPTED;
2521 				goto done;
2522 			}
2523 			/* new middle extent - newext */
2524 			if ((error = xfs_btree_insert(cur, &i)))
2525 				goto done;
2526 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2527 				xfs_btree_mark_sick(cur);
2528 				error = -EFSCORRUPTED;
2529 				goto done;
2530 			}
2531 		}
2532 		break;
2533 
2534 	case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2535 	case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2536 	case BMAP_LEFT_FILLING | BMAP_RIGHT_CONTIG:
2537 	case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
2538 	case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2539 	case BMAP_LEFT_CONTIG:
2540 	case BMAP_RIGHT_CONTIG:
2541 		/*
2542 		 * These cases are all impossible.
2543 		 */
2544 		ASSERT(0);
2545 	}
2546 
2547 	/* update reverse mappings */
2548 	xfs_rmap_convert_extent(mp, tp, ip, whichfork, new);
2549 
2550 	/* convert to a btree if necessary */
2551 	if (xfs_bmap_needs_btree(ip, whichfork)) {
2552 		int	tmp_logflags;	/* partial log flag return val */
2553 
2554 		ASSERT(cur == NULL);
2555 		error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0,
2556 				&tmp_logflags, whichfork);
2557 		*logflagsp |= tmp_logflags;
2558 		if (error)
2559 			goto done;
2560 	}
2561 
2562 	/* clear out the allocated field, done with it now in any case. */
2563 	if (cur) {
2564 		cur->bc_bmap.allocated = 0;
2565 		*curp = cur;
2566 	}
2567 
2568 	xfs_bmap_check_leaf_extents(*curp, ip, whichfork);
2569 done:
2570 	*logflagsp |= rval;
2571 	return error;
2572 #undef	LEFT
2573 #undef	RIGHT
2574 #undef	PREV
2575 }
2576 
2577 /*
2578  * Convert a hole to a delayed allocation.
2579  */
2580 STATIC void
2581 xfs_bmap_add_extent_hole_delay(
2582 	xfs_inode_t		*ip,	/* incore inode pointer */
2583 	int			whichfork,
2584 	struct xfs_iext_cursor	*icur,
2585 	xfs_bmbt_irec_t		*new)	/* new data to add to file extents */
2586 {
2587 	struct xfs_ifork	*ifp;	/* inode fork pointer */
2588 	xfs_bmbt_irec_t		left;	/* left neighbor extent entry */
2589 	xfs_filblks_t		newlen=0;	/* new indirect size */
2590 	xfs_filblks_t		oldlen=0;	/* old indirect size */
2591 	xfs_bmbt_irec_t		right;	/* right neighbor extent entry */
2592 	uint32_t		state = xfs_bmap_fork_to_state(whichfork);
2593 	xfs_filblks_t		temp;	 /* temp for indirect calculations */
2594 
2595 	ifp = xfs_ifork_ptr(ip, whichfork);
2596 	ASSERT(isnullstartblock(new->br_startblock));
2597 
2598 	/*
2599 	 * Check and set flags if this segment has a left neighbor
2600 	 */
2601 	if (xfs_iext_peek_prev_extent(ifp, icur, &left)) {
2602 		state |= BMAP_LEFT_VALID;
2603 		if (isnullstartblock(left.br_startblock))
2604 			state |= BMAP_LEFT_DELAY;
2605 	}
2606 
2607 	/*
2608 	 * Check and set flags if the current (right) segment exists.
2609 	 * If it doesn't exist, we're converting the hole at end-of-file.
2610 	 */
2611 	if (xfs_iext_get_extent(ifp, icur, &right)) {
2612 		state |= BMAP_RIGHT_VALID;
2613 		if (isnullstartblock(right.br_startblock))
2614 			state |= BMAP_RIGHT_DELAY;
2615 	}
2616 
2617 	/*
2618 	 * Set contiguity flags on the left and right neighbors.
2619 	 * Don't let extents get too large, even if the pieces are contiguous.
2620 	 */
2621 	if ((state & BMAP_LEFT_VALID) && (state & BMAP_LEFT_DELAY) &&
2622 	    left.br_startoff + left.br_blockcount == new->br_startoff &&
2623 	    left.br_blockcount + new->br_blockcount <= XFS_MAX_BMBT_EXTLEN &&
2624 	    xfs_bmap_same_rtgroup(ip, whichfork, &left, new))
2625 		state |= BMAP_LEFT_CONTIG;
2626 
2627 	if ((state & BMAP_RIGHT_VALID) && (state & BMAP_RIGHT_DELAY) &&
2628 	    new->br_startoff + new->br_blockcount == right.br_startoff &&
2629 	    new->br_blockcount + right.br_blockcount <= XFS_MAX_BMBT_EXTLEN &&
2630 	    (!(state & BMAP_LEFT_CONTIG) ||
2631 	     (left.br_blockcount + new->br_blockcount +
2632 	      right.br_blockcount <= XFS_MAX_BMBT_EXTLEN)) &&
2633 	    xfs_bmap_same_rtgroup(ip, whichfork, new, &right))
2634 		state |= BMAP_RIGHT_CONTIG;
2635 
2636 	/*
2637 	 * Switch out based on the contiguity flags.
2638 	 */
2639 	switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) {
2640 	case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2641 		/*
2642 		 * New allocation is contiguous with delayed allocations
2643 		 * on the left and on the right.
2644 		 * Merge all three into a single extent record.
2645 		 */
2646 		temp = left.br_blockcount + new->br_blockcount +
2647 			right.br_blockcount;
2648 
2649 		oldlen = startblockval(left.br_startblock) +
2650 			startblockval(new->br_startblock) +
2651 			startblockval(right.br_startblock);
2652 		newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
2653 					 oldlen);
2654 		left.br_startblock = nullstartblock(newlen);
2655 		left.br_blockcount = temp;
2656 
2657 		xfs_iext_remove(ip, icur, state);
2658 		xfs_iext_prev(ifp, icur);
2659 		xfs_iext_update_extent(ip, state, icur, &left);
2660 		break;
2661 
2662 	case BMAP_LEFT_CONTIG:
2663 		/*
2664 		 * New allocation is contiguous with a delayed allocation
2665 		 * on the left.
2666 		 * Merge the new allocation with the left neighbor.
2667 		 */
2668 		temp = left.br_blockcount + new->br_blockcount;
2669 
2670 		oldlen = startblockval(left.br_startblock) +
2671 			startblockval(new->br_startblock);
2672 		newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
2673 					 oldlen);
2674 		left.br_blockcount = temp;
2675 		left.br_startblock = nullstartblock(newlen);
2676 
2677 		xfs_iext_prev(ifp, icur);
2678 		xfs_iext_update_extent(ip, state, icur, &left);
2679 		break;
2680 
2681 	case BMAP_RIGHT_CONTIG:
2682 		/*
2683 		 * New allocation is contiguous with a delayed allocation
2684 		 * on the right.
2685 		 * Merge the new allocation with the right neighbor.
2686 		 */
2687 		temp = new->br_blockcount + right.br_blockcount;
2688 		oldlen = startblockval(new->br_startblock) +
2689 			startblockval(right.br_startblock);
2690 		newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
2691 					 oldlen);
2692 		right.br_startoff = new->br_startoff;
2693 		right.br_startblock = nullstartblock(newlen);
2694 		right.br_blockcount = temp;
2695 		xfs_iext_update_extent(ip, state, icur, &right);
2696 		break;
2697 
2698 	case 0:
2699 		/*
2700 		 * New allocation is not contiguous with another
2701 		 * delayed allocation.
2702 		 * Insert a new entry.
2703 		 */
2704 		oldlen = newlen = 0;
2705 		xfs_iext_insert(ip, icur, new, state);
2706 		break;
2707 	}
2708 	if (oldlen != newlen) {
2709 		ASSERT(oldlen > newlen);
2710 		xfs_add_fdblocks(ip->i_mount, oldlen - newlen);
2711 
2712 		/*
2713 		 * Nothing to do for disk quota accounting here.
2714 		 */
2715 		xfs_mod_delalloc(ip, 0, (int64_t)newlen - oldlen);
2716 	}
2717 }
2718 
2719 /*
2720  * Convert a hole to a real allocation.
2721  */
2722 STATIC int				/* error */
2723 xfs_bmap_add_extent_hole_real(
2724 	struct xfs_trans	*tp,
2725 	struct xfs_inode	*ip,
2726 	int			whichfork,
2727 	struct xfs_iext_cursor	*icur,
2728 	struct xfs_btree_cur	**curp,
2729 	struct xfs_bmbt_irec	*new,
2730 	int			*logflagsp,
2731 	uint32_t		flags)
2732 {
2733 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
2734 	struct xfs_mount	*mp = ip->i_mount;
2735 	struct xfs_btree_cur	*cur = *curp;
2736 	int			error;	/* error return value */
2737 	int			i;	/* temp state */
2738 	xfs_bmbt_irec_t		left;	/* left neighbor extent entry */
2739 	xfs_bmbt_irec_t		right;	/* right neighbor extent entry */
2740 	int			rval=0;	/* return value (logging flags) */
2741 	uint32_t		state = xfs_bmap_fork_to_state(whichfork);
2742 	struct xfs_bmbt_irec	old;
2743 
2744 	ASSERT(!isnullstartblock(new->br_startblock));
2745 	ASSERT(!cur || !(cur->bc_flags & XFS_BTREE_BMBT_WASDEL));
2746 
2747 	XFS_STATS_INC(mp, xs_add_exlist);
2748 
2749 	/*
2750 	 * Check and set flags if this segment has a left neighbor.
2751 	 */
2752 	if (xfs_iext_peek_prev_extent(ifp, icur, &left)) {
2753 		state |= BMAP_LEFT_VALID;
2754 		if (isnullstartblock(left.br_startblock))
2755 			state |= BMAP_LEFT_DELAY;
2756 	}
2757 
2758 	/*
2759 	 * Check and set flags if this segment has a current value.
2760 	 * Not true if we're inserting into the "hole" at eof.
2761 	 */
2762 	if (xfs_iext_get_extent(ifp, icur, &right)) {
2763 		state |= BMAP_RIGHT_VALID;
2764 		if (isnullstartblock(right.br_startblock))
2765 			state |= BMAP_RIGHT_DELAY;
2766 	}
2767 
2768 	/*
2769 	 * We're inserting a real allocation between "left" and "right".
2770 	 * Set the contiguity flags.  Don't let extents get too large.
2771 	 */
2772 	if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
2773 	    left.br_startoff + left.br_blockcount == new->br_startoff &&
2774 	    left.br_startblock + left.br_blockcount == new->br_startblock &&
2775 	    left.br_state == new->br_state &&
2776 	    left.br_blockcount + new->br_blockcount <= XFS_MAX_BMBT_EXTLEN &&
2777 	    xfs_bmap_same_rtgroup(ip, whichfork, &left, new))
2778 		state |= BMAP_LEFT_CONTIG;
2779 
2780 	if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
2781 	    new->br_startoff + new->br_blockcount == right.br_startoff &&
2782 	    new->br_startblock + new->br_blockcount == right.br_startblock &&
2783 	    new->br_state == right.br_state &&
2784 	    new->br_blockcount + right.br_blockcount <= XFS_MAX_BMBT_EXTLEN &&
2785 	    (!(state & BMAP_LEFT_CONTIG) ||
2786 	     left.br_blockcount + new->br_blockcount +
2787 	     right.br_blockcount <= XFS_MAX_BMBT_EXTLEN) &&
2788 	    xfs_bmap_same_rtgroup(ip, whichfork, new, &right))
2789 		state |= BMAP_RIGHT_CONTIG;
2790 
2791 	error = 0;
2792 	/*
2793 	 * Select which case we're in here, and implement it.
2794 	 */
2795 	switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) {
2796 	case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2797 		/*
2798 		 * New allocation is contiguous with real allocations on the
2799 		 * left and on the right.
2800 		 * Merge all three into a single extent record.
2801 		 */
2802 		left.br_blockcount += new->br_blockcount + right.br_blockcount;
2803 
2804 		xfs_iext_remove(ip, icur, state);
2805 		xfs_iext_prev(ifp, icur);
2806 		xfs_iext_update_extent(ip, state, icur, &left);
2807 		ifp->if_nextents--;
2808 
2809 		if (cur == NULL) {
2810 			rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
2811 		} else {
2812 			rval = XFS_ILOG_CORE;
2813 			error = xfs_bmbt_lookup_eq(cur, &right, &i);
2814 			if (error)
2815 				goto done;
2816 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2817 				xfs_btree_mark_sick(cur);
2818 				error = -EFSCORRUPTED;
2819 				goto done;
2820 			}
2821 			error = xfs_btree_delete(cur, &i);
2822 			if (error)
2823 				goto done;
2824 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2825 				xfs_btree_mark_sick(cur);
2826 				error = -EFSCORRUPTED;
2827 				goto done;
2828 			}
2829 			error = xfs_btree_decrement(cur, 0, &i);
2830 			if (error)
2831 				goto done;
2832 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2833 				xfs_btree_mark_sick(cur);
2834 				error = -EFSCORRUPTED;
2835 				goto done;
2836 			}
2837 			error = xfs_bmbt_update(cur, &left);
2838 			if (error)
2839 				goto done;
2840 		}
2841 		break;
2842 
2843 	case BMAP_LEFT_CONTIG:
2844 		/*
2845 		 * New allocation is contiguous with a real allocation
2846 		 * on the left.
2847 		 * Merge the new allocation with the left neighbor.
2848 		 */
2849 		old = left;
2850 		left.br_blockcount += new->br_blockcount;
2851 
2852 		xfs_iext_prev(ifp, icur);
2853 		xfs_iext_update_extent(ip, state, icur, &left);
2854 
2855 		if (cur == NULL) {
2856 			rval = xfs_ilog_fext(whichfork);
2857 		} else {
2858 			rval = 0;
2859 			error = xfs_bmbt_lookup_eq(cur, &old, &i);
2860 			if (error)
2861 				goto done;
2862 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2863 				xfs_btree_mark_sick(cur);
2864 				error = -EFSCORRUPTED;
2865 				goto done;
2866 			}
2867 			error = xfs_bmbt_update(cur, &left);
2868 			if (error)
2869 				goto done;
2870 		}
2871 		break;
2872 
2873 	case BMAP_RIGHT_CONTIG:
2874 		/*
2875 		 * New allocation is contiguous with a real allocation
2876 		 * on the right.
2877 		 * Merge the new allocation with the right neighbor.
2878 		 */
2879 		old = right;
2880 
2881 		right.br_startoff = new->br_startoff;
2882 		right.br_startblock = new->br_startblock;
2883 		right.br_blockcount += new->br_blockcount;
2884 		xfs_iext_update_extent(ip, state, icur, &right);
2885 
2886 		if (cur == NULL) {
2887 			rval = xfs_ilog_fext(whichfork);
2888 		} else {
2889 			rval = 0;
2890 			error = xfs_bmbt_lookup_eq(cur, &old, &i);
2891 			if (error)
2892 				goto done;
2893 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2894 				xfs_btree_mark_sick(cur);
2895 				error = -EFSCORRUPTED;
2896 				goto done;
2897 			}
2898 			error = xfs_bmbt_update(cur, &right);
2899 			if (error)
2900 				goto done;
2901 		}
2902 		break;
2903 
2904 	case 0:
2905 		/*
2906 		 * New allocation is not contiguous with another
2907 		 * real allocation.
2908 		 * Insert a new entry.
2909 		 */
2910 		xfs_iext_insert(ip, icur, new, state);
2911 		ifp->if_nextents++;
2912 
2913 		if (cur == NULL) {
2914 			rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
2915 		} else {
2916 			rval = XFS_ILOG_CORE;
2917 			error = xfs_bmbt_lookup_eq(cur, new, &i);
2918 			if (error)
2919 				goto done;
2920 			if (XFS_IS_CORRUPT(mp, i != 0)) {
2921 				xfs_btree_mark_sick(cur);
2922 				error = -EFSCORRUPTED;
2923 				goto done;
2924 			}
2925 			error = xfs_btree_insert(cur, &i);
2926 			if (error)
2927 				goto done;
2928 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2929 				xfs_btree_mark_sick(cur);
2930 				error = -EFSCORRUPTED;
2931 				goto done;
2932 			}
2933 		}
2934 		break;
2935 	}
2936 
2937 	/* add reverse mapping unless caller opted out */
2938 	if (!(flags & XFS_BMAPI_NORMAP))
2939 		xfs_rmap_map_extent(tp, ip, whichfork, new);
2940 
2941 	/* convert to a btree if necessary */
2942 	if (xfs_bmap_needs_btree(ip, whichfork)) {
2943 		int	tmp_logflags;	/* partial log flag return val */
2944 
2945 		ASSERT(cur == NULL);
2946 		error = xfs_bmap_extents_to_btree(tp, ip, curp, 0,
2947 				&tmp_logflags, whichfork);
2948 		*logflagsp |= tmp_logflags;
2949 		cur = *curp;
2950 		if (error)
2951 			goto done;
2952 	}
2953 
2954 	/* clear out the allocated field, done with it now in any case. */
2955 	if (cur)
2956 		cur->bc_bmap.allocated = 0;
2957 
2958 	xfs_bmap_check_leaf_extents(cur, ip, whichfork);
2959 done:
2960 	*logflagsp |= rval;
2961 	return error;
2962 }
2963 
2964 /*
2965  * Functions used in the extent read, allocate and remove paths
2966  */
2967 
2968 /*
2969  * Adjust the size of the new extent based on i_extsize and rt extsize.
2970  */
2971 int
2972 xfs_bmap_extsize_align(
2973 	xfs_mount_t	*mp,
2974 	xfs_bmbt_irec_t	*gotp,		/* next extent pointer */
2975 	xfs_bmbt_irec_t	*prevp,		/* previous extent pointer */
2976 	xfs_extlen_t	extsz,		/* align to this extent size */
2977 	int		rt,		/* is this a realtime inode? */
2978 	int		eof,		/* is extent at end-of-file? */
2979 	int		delay,		/* creating delalloc extent? */
2980 	int		convert,	/* overwriting unwritten extent? */
2981 	xfs_fileoff_t	*offp,		/* in/out: aligned offset */
2982 	xfs_extlen_t	*lenp)		/* in/out: aligned length */
2983 {
2984 	xfs_fileoff_t	orig_off;	/* original offset */
2985 	xfs_extlen_t	orig_alen;	/* original length */
2986 	xfs_fileoff_t	orig_end;	/* original off+len */
2987 	xfs_fileoff_t	nexto;		/* next file offset */
2988 	xfs_fileoff_t	prevo;		/* previous file offset */
2989 	xfs_fileoff_t	align_off;	/* temp for offset */
2990 	xfs_extlen_t	align_alen;	/* temp for length */
2991 	xfs_extlen_t	temp;		/* temp for calculations */
2992 
2993 	if (convert)
2994 		return 0;
2995 
2996 	orig_off = align_off = *offp;
2997 	orig_alen = align_alen = *lenp;
2998 	orig_end = orig_off + orig_alen;
2999 
3000 	/*
3001 	 * If this request overlaps an existing extent, then don't
3002 	 * attempt to perform any additional alignment.
3003 	 */
3004 	if (!delay && !eof &&
3005 	    (orig_off >= gotp->br_startoff) &&
3006 	    (orig_end <= gotp->br_startoff + gotp->br_blockcount)) {
3007 		return 0;
3008 	}
3009 
3010 	/*
3011 	 * If the file offset is unaligned vs. the extent size
3012 	 * we need to align it.  This will be possible unless
3013 	 * the file was previously written with a kernel that didn't
3014 	 * perform this alignment, or if a truncate shot us in the
3015 	 * foot.
3016 	 */
3017 	div_u64_rem(orig_off, extsz, &temp);
3018 	if (temp) {
3019 		align_alen += temp;
3020 		align_off -= temp;
3021 	}
3022 
3023 	/* Same adjustment for the end of the requested area. */
3024 	temp = (align_alen % extsz);
3025 	if (temp)
3026 		align_alen += extsz - temp;
3027 
3028 	/*
3029 	 * For large extent hint sizes, the aligned extent might be larger than
3030 	 * XFS_BMBT_MAX_EXTLEN. In that case, reduce the size by an extsz so
3031 	 * that it pulls the length back under XFS_BMBT_MAX_EXTLEN. The outer
3032 	 * allocation loops handle short allocation just fine, so it is safe to
3033 	 * do this. We only want to do it when we are forced to, though, because
3034 	 * it means more allocation operations are required.
3035 	 */
3036 	while (align_alen > XFS_MAX_BMBT_EXTLEN)
3037 		align_alen -= extsz;
3038 	ASSERT(align_alen <= XFS_MAX_BMBT_EXTLEN);
3039 
3040 	/*
3041 	 * If the previous block overlaps with this proposed allocation
3042 	 * then move the start forward without adjusting the length.
3043 	 */
3044 	if (prevp->br_startoff != NULLFILEOFF) {
3045 		if (prevp->br_startblock == HOLESTARTBLOCK)
3046 			prevo = prevp->br_startoff;
3047 		else
3048 			prevo = prevp->br_startoff + prevp->br_blockcount;
3049 	} else
3050 		prevo = 0;
3051 	if (align_off != orig_off && align_off < prevo)
3052 		align_off = prevo;
3053 	/*
3054 	 * If the next block overlaps with this proposed allocation
3055 	 * then move the start back without adjusting the length,
3056 	 * but not before offset 0.
3057 	 * This may of course make the start overlap previous block,
3058 	 * and if we hit the offset 0 limit then the next block
3059 	 * can still overlap too.
3060 	 */
3061 	if (!eof && gotp->br_startoff != NULLFILEOFF) {
3062 		if ((delay && gotp->br_startblock == HOLESTARTBLOCK) ||
3063 		    (!delay && gotp->br_startblock == DELAYSTARTBLOCK))
3064 			nexto = gotp->br_startoff + gotp->br_blockcount;
3065 		else
3066 			nexto = gotp->br_startoff;
3067 	} else
3068 		nexto = NULLFILEOFF;
3069 	if (!eof &&
3070 	    align_off + align_alen != orig_end &&
3071 	    align_off + align_alen > nexto)
3072 		align_off = nexto > align_alen ? nexto - align_alen : 0;
3073 	/*
3074 	 * If we're now overlapping the next or previous extent that
3075 	 * means we can't fit an extsz piece in this hole.  Just move
3076 	 * the start forward to the first valid spot and set
3077 	 * the length so we hit the end.
3078 	 */
3079 	if (align_off != orig_off && align_off < prevo)
3080 		align_off = prevo;
3081 	if (align_off + align_alen != orig_end &&
3082 	    align_off + align_alen > nexto &&
3083 	    nexto != NULLFILEOFF) {
3084 		ASSERT(nexto > prevo);
3085 		align_alen = nexto - align_off;
3086 	}
3087 
3088 	/*
3089 	 * If realtime, and the result isn't a multiple of the realtime
3090 	 * extent size we need to remove blocks until it is.
3091 	 */
3092 	if (rt && (temp = xfs_extlen_to_rtxmod(mp, align_alen))) {
3093 		/*
3094 		 * We're not covering the original request, or
3095 		 * we won't be able to once we fix the length.
3096 		 */
3097 		if (orig_off < align_off ||
3098 		    orig_end > align_off + align_alen ||
3099 		    align_alen - temp < orig_alen)
3100 			return -EINVAL;
3101 		/*
3102 		 * Try to fix it by moving the start up.
3103 		 */
3104 		if (align_off + temp <= orig_off) {
3105 			align_alen -= temp;
3106 			align_off += temp;
3107 		}
3108 		/*
3109 		 * Try to fix it by moving the end in.
3110 		 */
3111 		else if (align_off + align_alen - temp >= orig_end)
3112 			align_alen -= temp;
3113 		/*
3114 		 * Set the start to the minimum then trim the length.
3115 		 */
3116 		else {
3117 			align_alen -= orig_off - align_off;
3118 			align_off = orig_off;
3119 			align_alen -= xfs_extlen_to_rtxmod(mp, align_alen);
3120 		}
3121 		/*
3122 		 * Result doesn't cover the request, fail it.
3123 		 */
3124 		if (orig_off < align_off || orig_end > align_off + align_alen)
3125 			return -EINVAL;
3126 	} else {
3127 		ASSERT(orig_off >= align_off);
3128 		/* see XFS_BMBT_MAX_EXTLEN handling above */
3129 		ASSERT(orig_end <= align_off + align_alen ||
3130 		       align_alen + extsz > XFS_MAX_BMBT_EXTLEN);
3131 	}
3132 
3133 #ifdef DEBUG
3134 	if (!eof && gotp->br_startoff != NULLFILEOFF)
3135 		ASSERT(align_off + align_alen <= gotp->br_startoff);
3136 	if (prevp->br_startoff != NULLFILEOFF)
3137 		ASSERT(align_off >= prevp->br_startoff + prevp->br_blockcount);
3138 #endif
3139 
3140 	*lenp = align_alen;
3141 	*offp = align_off;
3142 	return 0;
3143 }
3144 
3145 static inline bool
3146 xfs_bmap_adjacent_valid(
3147 	struct xfs_bmalloca	*ap,
3148 	xfs_fsblock_t		x,
3149 	xfs_fsblock_t		y)
3150 {
3151 	struct xfs_mount	*mp = ap->ip->i_mount;
3152 
3153 	if (XFS_IS_REALTIME_INODE(ap->ip) &&
3154 	    (ap->datatype & XFS_ALLOC_USERDATA)) {
3155 		if (!xfs_has_rtgroups(mp))
3156 			return x < mp->m_sb.sb_rblocks;
3157 
3158 		return xfs_rtb_to_rgno(mp, x) == xfs_rtb_to_rgno(mp, y) &&
3159 			xfs_rtb_to_rgno(mp, x) < mp->m_sb.sb_rgcount &&
3160 			xfs_rtb_to_rtx(mp, x) < mp->m_sb.sb_rgextents;
3161 
3162 	}
3163 
3164 	return XFS_FSB_TO_AGNO(mp, x) == XFS_FSB_TO_AGNO(mp, y) &&
3165 		XFS_FSB_TO_AGNO(mp, x) < mp->m_sb.sb_agcount &&
3166 		XFS_FSB_TO_AGBNO(mp, x) < mp->m_sb.sb_agblocks;
3167 }
3168 
3169 #define XFS_ALLOC_GAP_UNITS	4
3170 
3171 /* returns true if ap->blkno was modified */
3172 bool
3173 xfs_bmap_adjacent(
3174 	struct xfs_bmalloca	*ap)	/* bmap alloc argument struct */
3175 {
3176 	xfs_fsblock_t		adjust;		/* adjustment to block numbers */
3177 
3178 	/*
3179 	 * If allocating at eof, and there's a previous real block,
3180 	 * try to use its last block as our starting point.
3181 	 */
3182 	if (ap->eof && ap->prev.br_startoff != NULLFILEOFF &&
3183 	    !isnullstartblock(ap->prev.br_startblock) &&
3184 	    xfs_bmap_adjacent_valid(ap,
3185 			ap->prev.br_startblock + ap->prev.br_blockcount,
3186 			ap->prev.br_startblock)) {
3187 		ap->blkno = ap->prev.br_startblock + ap->prev.br_blockcount;
3188 		/*
3189 		 * Adjust for the gap between prevp and us.
3190 		 */
3191 		adjust = ap->offset -
3192 			(ap->prev.br_startoff + ap->prev.br_blockcount);
3193 		if (adjust && xfs_bmap_adjacent_valid(ap, ap->blkno + adjust,
3194 				ap->prev.br_startblock))
3195 			ap->blkno += adjust;
3196 		return true;
3197 	}
3198 	/*
3199 	 * If not at eof, then compare the two neighbor blocks.
3200 	 * Figure out whether either one gives us a good starting point,
3201 	 * and pick the better one.
3202 	 */
3203 	if (!ap->eof) {
3204 		xfs_fsblock_t	gotbno;		/* right side block number */
3205 		xfs_fsblock_t	gotdiff=0;	/* right side difference */
3206 		xfs_fsblock_t	prevbno;	/* left side block number */
3207 		xfs_fsblock_t	prevdiff=0;	/* left side difference */
3208 
3209 		/*
3210 		 * If there's a previous (left) block, select a requested
3211 		 * start block based on it.
3212 		 */
3213 		if (ap->prev.br_startoff != NULLFILEOFF &&
3214 		    !isnullstartblock(ap->prev.br_startblock) &&
3215 		    (prevbno = ap->prev.br_startblock +
3216 			       ap->prev.br_blockcount) &&
3217 		    xfs_bmap_adjacent_valid(ap, prevbno,
3218 				ap->prev.br_startblock)) {
3219 			/*
3220 			 * Calculate gap to end of previous block.
3221 			 */
3222 			adjust = prevdiff = ap->offset -
3223 				(ap->prev.br_startoff +
3224 				 ap->prev.br_blockcount);
3225 			/*
3226 			 * Figure the startblock based on the previous block's
3227 			 * end and the gap size.
3228 			 * Heuristic!
3229 			 * If the gap is large relative to the piece we're
3230 			 * allocating, or using it gives us an invalid block
3231 			 * number, then just use the end of the previous block.
3232 			 */
3233 			if (prevdiff <= XFS_ALLOC_GAP_UNITS * ap->length &&
3234 			    xfs_bmap_adjacent_valid(ap, prevbno + prevdiff,
3235 					ap->prev.br_startblock))
3236 				prevbno += adjust;
3237 			else
3238 				prevdiff += adjust;
3239 		}
3240 		/*
3241 		 * No previous block or can't follow it, just default.
3242 		 */
3243 		else
3244 			prevbno = NULLFSBLOCK;
3245 		/*
3246 		 * If there's a following (right) block, select a requested
3247 		 * start block based on it.
3248 		 */
3249 		if (!isnullstartblock(ap->got.br_startblock)) {
3250 			/*
3251 			 * Calculate gap to start of next block.
3252 			 */
3253 			adjust = gotdiff = ap->got.br_startoff - ap->offset;
3254 			/*
3255 			 * Figure the startblock based on the next block's
3256 			 * start and the gap size.
3257 			 */
3258 			gotbno = ap->got.br_startblock;
3259 			/*
3260 			 * Heuristic!
3261 			 * If the gap is large relative to the piece we're
3262 			 * allocating, or using it gives us an invalid block
3263 			 * number, then just use the start of the next block
3264 			 * offset by our length.
3265 			 */
3266 			if (gotdiff <= XFS_ALLOC_GAP_UNITS * ap->length &&
3267 			    xfs_bmap_adjacent_valid(ap, gotbno - gotdiff,
3268 					gotbno))
3269 				gotbno -= adjust;
3270 			else if (xfs_bmap_adjacent_valid(ap, gotbno - ap->length,
3271 					gotbno)) {
3272 				gotbno -= ap->length;
3273 				gotdiff += adjust - ap->length;
3274 			} else
3275 				gotdiff += adjust;
3276 		}
3277 		/*
3278 		 * No next block, just default.
3279 		 */
3280 		else
3281 			gotbno = NULLFSBLOCK;
3282 		/*
3283 		 * If both valid, pick the better one, else the only good
3284 		 * one, else ap->blkno is already set (to 0 or the inode block).
3285 		 */
3286 		if (prevbno != NULLFSBLOCK && gotbno != NULLFSBLOCK) {
3287 			ap->blkno = prevdiff <= gotdiff ? prevbno : gotbno;
3288 			return true;
3289 		}
3290 		if (prevbno != NULLFSBLOCK) {
3291 			ap->blkno = prevbno;
3292 			return true;
3293 		}
3294 		if (gotbno != NULLFSBLOCK) {
3295 			ap->blkno = gotbno;
3296 			return true;
3297 		}
3298 	}
3299 
3300 	return false;
3301 }
3302 
3303 int
3304 xfs_bmap_longest_free_extent(
3305 	struct xfs_perag	*pag,
3306 	struct xfs_trans	*tp,
3307 	xfs_extlen_t		*blen)
3308 {
3309 	xfs_extlen_t		longest;
3310 	int			error = 0;
3311 
3312 	if (!xfs_perag_initialised_agf(pag)) {
3313 		error = xfs_alloc_read_agf(pag, tp, XFS_ALLOC_FLAG_TRYLOCK,
3314 				NULL);
3315 		if (error)
3316 			return error;
3317 	}
3318 
3319 	longest = xfs_alloc_longest_free_extent(pag,
3320 				xfs_alloc_min_freelist(pag_mount(pag), pag),
3321 				xfs_ag_resv_needed(pag, XFS_AG_RESV_NONE));
3322 	if (*blen < longest)
3323 		*blen = longest;
3324 
3325 	return 0;
3326 }
3327 
3328 static xfs_extlen_t
3329 xfs_bmap_select_minlen(
3330 	struct xfs_bmalloca	*ap,
3331 	struct xfs_alloc_arg	*args,
3332 	xfs_extlen_t		blen)
3333 {
3334 
3335 	/*
3336 	 * Since we used XFS_ALLOC_FLAG_TRYLOCK in _longest_free_extent(), it is
3337 	 * possible that there is enough contiguous free space for this request.
3338 	 */
3339 	if (blen < ap->minlen)
3340 		return ap->minlen;
3341 
3342 	/*
3343 	 * If the best seen length is less than the request length,
3344 	 * use the best as the minimum, otherwise we've got the maxlen we
3345 	 * were asked for.
3346 	 */
3347 	if (blen < args->maxlen)
3348 		return blen;
3349 	return args->maxlen;
3350 }
3351 
3352 static int
3353 xfs_bmap_btalloc_select_lengths(
3354 	struct xfs_bmalloca	*ap,
3355 	struct xfs_alloc_arg	*args,
3356 	xfs_extlen_t		*blen)
3357 {
3358 	struct xfs_mount	*mp = args->mp;
3359 	struct xfs_perag	*pag;
3360 	xfs_agnumber_t		agno, startag;
3361 	int			error = 0;
3362 
3363 	if (ap->tp->t_flags & XFS_TRANS_LOWMODE) {
3364 		args->total = ap->minlen;
3365 		args->minlen = ap->minlen;
3366 		return 0;
3367 	}
3368 
3369 	args->total = ap->total;
3370 	startag = XFS_FSB_TO_AGNO(mp, ap->blkno);
3371 	if (startag == NULLAGNUMBER)
3372 		startag = 0;
3373 
3374 	*blen = 0;
3375 	for_each_perag_wrap(mp, startag, agno, pag) {
3376 		error = xfs_bmap_longest_free_extent(pag, args->tp, blen);
3377 		if (error && error != -EAGAIN)
3378 			break;
3379 		error = 0;
3380 		if (*blen >= args->maxlen)
3381 			break;
3382 	}
3383 	if (pag)
3384 		xfs_perag_rele(pag);
3385 
3386 	args->minlen = xfs_bmap_select_minlen(ap, args, *blen);
3387 	return error;
3388 }
3389 
3390 /* Update all inode and quota accounting for the allocation we just did. */
3391 void
3392 xfs_bmap_alloc_account(
3393 	struct xfs_bmalloca	*ap)
3394 {
3395 	bool			isrt = XFS_IS_REALTIME_INODE(ap->ip) &&
3396 					!(ap->flags & XFS_BMAPI_ATTRFORK);
3397 	uint			fld;
3398 
3399 	if (ap->flags & XFS_BMAPI_COWFORK) {
3400 		/*
3401 		 * COW fork blocks are in-core only and thus are treated as
3402 		 * in-core quota reservation (like delalloc blocks) even when
3403 		 * converted to real blocks. The quota reservation is not
3404 		 * accounted to disk until blocks are remapped to the data
3405 		 * fork. So if these blocks were previously delalloc, we
3406 		 * already have quota reservation and there's nothing to do
3407 		 * yet.
3408 		 */
3409 		if (ap->wasdel) {
3410 			xfs_mod_delalloc(ap->ip, -(int64_t)ap->length, 0);
3411 			return;
3412 		}
3413 
3414 		/*
3415 		 * Otherwise, we've allocated blocks in a hole. The transaction
3416 		 * has acquired in-core quota reservation for this extent.
3417 		 * Rather than account these as real blocks, however, we reduce
3418 		 * the transaction quota reservation based on the allocation.
3419 		 * This essentially transfers the transaction quota reservation
3420 		 * to that of a delalloc extent.
3421 		 */
3422 		ap->ip->i_delayed_blks += ap->length;
3423 		xfs_trans_mod_dquot_byino(ap->tp, ap->ip, isrt ?
3424 				XFS_TRANS_DQ_RES_RTBLKS : XFS_TRANS_DQ_RES_BLKS,
3425 				-(long)ap->length);
3426 		return;
3427 	}
3428 
3429 	/* data/attr fork only */
3430 	ap->ip->i_nblocks += ap->length;
3431 	xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
3432 	if (ap->wasdel) {
3433 		ap->ip->i_delayed_blks -= ap->length;
3434 		xfs_mod_delalloc(ap->ip, -(int64_t)ap->length, 0);
3435 		fld = isrt ? XFS_TRANS_DQ_DELRTBCOUNT : XFS_TRANS_DQ_DELBCOUNT;
3436 	} else {
3437 		fld = isrt ? XFS_TRANS_DQ_RTBCOUNT : XFS_TRANS_DQ_BCOUNT;
3438 	}
3439 
3440 	xfs_trans_mod_dquot_byino(ap->tp, ap->ip, fld, ap->length);
3441 }
3442 
3443 static int
3444 xfs_bmap_compute_alignments(
3445 	struct xfs_bmalloca	*ap,
3446 	struct xfs_alloc_arg	*args)
3447 {
3448 	struct xfs_mount	*mp = args->mp;
3449 	xfs_extlen_t		align = 0; /* minimum allocation alignment */
3450 	int			stripe_align = 0;
3451 
3452 	/* stripe alignment for allocation is determined by mount parameters */
3453 	if (mp->m_swidth && xfs_has_swalloc(mp))
3454 		stripe_align = mp->m_swidth;
3455 	else if (mp->m_dalign)
3456 		stripe_align = mp->m_dalign;
3457 
3458 	if (ap->flags & XFS_BMAPI_COWFORK)
3459 		align = xfs_get_cowextsz_hint(ap->ip);
3460 	else if (ap->datatype & XFS_ALLOC_USERDATA)
3461 		align = xfs_get_extsz_hint(ap->ip);
3462 	if (align) {
3463 		if (xfs_bmap_extsize_align(mp, &ap->got, &ap->prev, align, 0,
3464 					ap->eof, 0, ap->conv, &ap->offset,
3465 					&ap->length))
3466 			ASSERT(0);
3467 		ASSERT(ap->length);
3468 	}
3469 
3470 	/* apply extent size hints if obtained earlier */
3471 	if (align) {
3472 		args->prod = align;
3473 		div_u64_rem(ap->offset, args->prod, &args->mod);
3474 		if (args->mod)
3475 			args->mod = args->prod - args->mod;
3476 	} else if (mp->m_sb.sb_blocksize >= PAGE_SIZE) {
3477 		args->prod = 1;
3478 		args->mod = 0;
3479 	} else {
3480 		args->prod = PAGE_SIZE >> mp->m_sb.sb_blocklog;
3481 		div_u64_rem(ap->offset, args->prod, &args->mod);
3482 		if (args->mod)
3483 			args->mod = args->prod - args->mod;
3484 	}
3485 
3486 	return stripe_align;
3487 }
3488 
3489 static void
3490 xfs_bmap_process_allocated_extent(
3491 	struct xfs_bmalloca	*ap,
3492 	struct xfs_alloc_arg	*args,
3493 	xfs_fileoff_t		orig_offset,
3494 	xfs_extlen_t		orig_length)
3495 {
3496 	ap->blkno = args->fsbno;
3497 	ap->length = args->len;
3498 	/*
3499 	 * If the extent size hint is active, we tried to round the
3500 	 * caller's allocation request offset down to extsz and the
3501 	 * length up to another extsz boundary.  If we found a free
3502 	 * extent we mapped it in starting at this new offset.  If the
3503 	 * newly mapped space isn't long enough to cover any of the
3504 	 * range of offsets that was originally requested, move the
3505 	 * mapping up so that we can fill as much of the caller's
3506 	 * original request as possible.  Free space is apparently
3507 	 * very fragmented so we're unlikely to be able to satisfy the
3508 	 * hints anyway.
3509 	 */
3510 	if (ap->length <= orig_length)
3511 		ap->offset = orig_offset;
3512 	else if (ap->offset + ap->length < orig_offset + orig_length)
3513 		ap->offset = orig_offset + orig_length - ap->length;
3514 	xfs_bmap_alloc_account(ap);
3515 }
3516 
3517 static int
3518 xfs_bmap_exact_minlen_extent_alloc(
3519 	struct xfs_bmalloca	*ap,
3520 	struct xfs_alloc_arg	*args)
3521 {
3522 	if (ap->minlen != 1) {
3523 		args->fsbno = NULLFSBLOCK;
3524 		return 0;
3525 	}
3526 
3527 	args->alloc_minlen_only = 1;
3528 	args->minlen = args->maxlen = ap->minlen;
3529 	args->total = ap->total;
3530 
3531 	/*
3532 	 * Unlike the longest extent available in an AG, we don't track
3533 	 * the length of an AG's shortest extent.
3534 	 * XFS_ERRTAG_BMAP_ALLOC_MINLEN_EXTENT is a debug only knob and
3535 	 * hence we can afford to start traversing from the 0th AG since
3536 	 * we need not be concerned about a drop in performance in
3537 	 * "debug only" code paths.
3538 	 */
3539 	ap->blkno = XFS_AGB_TO_FSB(ap->ip->i_mount, 0, 0);
3540 
3541 	/*
3542 	 * Call xfs_bmap_btalloc_low_space here as it first does a "normal" AG
3543 	 * iteration and then drops args->total to args->minlen, which might be
3544 	 * required to find an allocation for the transaction reservation when
3545 	 * the file system is very full.
3546 	 */
3547 	return xfs_bmap_btalloc_low_space(ap, args);
3548 }
3549 
3550 /*
3551  * If we are not low on available data blocks and we are allocating at
3552  * EOF, optimise allocation for contiguous file extension and/or stripe
3553  * alignment of the new extent.
3554  *
3555  * NOTE: ap->aeof is only set if the allocation length is >= the
3556  * stripe unit and the allocation offset is at the end of file.
3557  */
3558 static int
3559 xfs_bmap_btalloc_at_eof(
3560 	struct xfs_bmalloca	*ap,
3561 	struct xfs_alloc_arg	*args,
3562 	xfs_extlen_t		blen,
3563 	int			stripe_align,
3564 	bool			ag_only)
3565 {
3566 	struct xfs_mount	*mp = args->mp;
3567 	struct xfs_perag	*caller_pag = args->pag;
3568 	int			error;
3569 
3570 	/*
3571 	 * If there are already extents in the file, try an exact EOF block
3572 	 * allocation to extend the file as a contiguous extent. If that fails,
3573 	 * or it's the first allocation in a file, just try for a stripe aligned
3574 	 * allocation.
3575 	 */
3576 	if (ap->offset) {
3577 		xfs_extlen_t	nextminlen = 0;
3578 
3579 		/*
3580 		 * Compute the minlen+alignment for the next case.  Set slop so
3581 		 * that the value of minlen+alignment+slop doesn't go up between
3582 		 * the calls.
3583 		 */
3584 		args->alignment = 1;
3585 		if (blen > stripe_align && blen <= args->maxlen)
3586 			nextminlen = blen - stripe_align;
3587 		else
3588 			nextminlen = args->minlen;
3589 		if (nextminlen + stripe_align > args->minlen + 1)
3590 			args->minalignslop = nextminlen + stripe_align -
3591 					args->minlen - 1;
3592 		else
3593 			args->minalignslop = 0;
3594 
3595 		if (!caller_pag)
3596 			args->pag = xfs_perag_get(mp, XFS_FSB_TO_AGNO(mp, ap->blkno));
3597 		error = xfs_alloc_vextent_exact_bno(args, ap->blkno);
3598 		if (!caller_pag) {
3599 			xfs_perag_put(args->pag);
3600 			args->pag = NULL;
3601 		}
3602 		if (error)
3603 			return error;
3604 
3605 		if (args->fsbno != NULLFSBLOCK)
3606 			return 0;
3607 		/*
3608 		 * Exact allocation failed. Reset to try an aligned allocation
3609 		 * according to the original allocation specification.
3610 		 */
3611 		args->alignment = stripe_align;
3612 		args->minlen = nextminlen;
3613 		args->minalignslop = 0;
3614 	} else {
3615 		/*
3616 		 * Adjust minlen to try and preserve alignment if we
3617 		 * can't guarantee an aligned maxlen extent.
3618 		 */
3619 		args->alignment = stripe_align;
3620 		if (blen > args->alignment &&
3621 		    blen <= args->maxlen + args->alignment)
3622 			args->minlen = blen - args->alignment;
3623 		args->minalignslop = 0;
3624 	}
3625 
3626 	if (ag_only) {
3627 		error = xfs_alloc_vextent_near_bno(args, ap->blkno);
3628 	} else {
3629 		args->pag = NULL;
3630 		error = xfs_alloc_vextent_start_ag(args, ap->blkno);
3631 		ASSERT(args->pag == NULL);
3632 		args->pag = caller_pag;
3633 	}
3634 	if (error)
3635 		return error;
3636 
3637 	if (args->fsbno != NULLFSBLOCK)
3638 		return 0;
3639 
3640 	/*
3641 	 * Allocation failed, so turn return the allocation args to their
3642 	 * original non-aligned state so the caller can proceed on allocation
3643 	 * failure as if this function was never called.
3644 	 */
3645 	args->alignment = 1;
3646 	return 0;
3647 }
3648 
3649 /*
3650  * We have failed multiple allocation attempts so now are in a low space
3651  * allocation situation. Try a locality first full filesystem minimum length
3652  * allocation whilst still maintaining necessary total block reservation
3653  * requirements.
3654  *
3655  * If that fails, we are now critically low on space, so perform a last resort
3656  * allocation attempt: no reserve, no locality, blocking, minimum length, full
3657  * filesystem free space scan. We also indicate to future allocations in this
3658  * transaction that we are critically low on space so they don't waste time on
3659  * allocation modes that are unlikely to succeed.
3660  */
3661 int
3662 xfs_bmap_btalloc_low_space(
3663 	struct xfs_bmalloca	*ap,
3664 	struct xfs_alloc_arg	*args)
3665 {
3666 	int			error;
3667 
3668 	if (args->minlen > ap->minlen) {
3669 		args->minlen = ap->minlen;
3670 		error = xfs_alloc_vextent_start_ag(args, ap->blkno);
3671 		if (error || args->fsbno != NULLFSBLOCK)
3672 			return error;
3673 	}
3674 
3675 	/* Last ditch attempt before failure is declared. */
3676 	args->total = ap->minlen;
3677 	error = xfs_alloc_vextent_first_ag(args, 0);
3678 	if (error)
3679 		return error;
3680 	ap->tp->t_flags |= XFS_TRANS_LOWMODE;
3681 	return 0;
3682 }
3683 
3684 static int
3685 xfs_bmap_btalloc_filestreams(
3686 	struct xfs_bmalloca	*ap,
3687 	struct xfs_alloc_arg	*args,
3688 	int			stripe_align)
3689 {
3690 	xfs_extlen_t		blen = 0;
3691 	int			error = 0;
3692 
3693 
3694 	error = xfs_filestream_select_ag(ap, args, &blen);
3695 	if (error)
3696 		return error;
3697 	ASSERT(args->pag);
3698 
3699 	/*
3700 	 * If we are in low space mode, then optimal allocation will fail so
3701 	 * prepare for minimal allocation and jump to the low space algorithm
3702 	 * immediately.
3703 	 */
3704 	if (ap->tp->t_flags & XFS_TRANS_LOWMODE) {
3705 		args->minlen = ap->minlen;
3706 		ASSERT(args->fsbno == NULLFSBLOCK);
3707 		goto out_low_space;
3708 	}
3709 
3710 	args->minlen = xfs_bmap_select_minlen(ap, args, blen);
3711 	if (ap->aeof)
3712 		error = xfs_bmap_btalloc_at_eof(ap, args, blen, stripe_align,
3713 				true);
3714 
3715 	if (!error && args->fsbno == NULLFSBLOCK)
3716 		error = xfs_alloc_vextent_near_bno(args, ap->blkno);
3717 
3718 out_low_space:
3719 	/*
3720 	 * We are now done with the perag reference for the filestreams
3721 	 * association provided by xfs_filestream_select_ag(). Release it now as
3722 	 * we've either succeeded, had a fatal error or we are out of space and
3723 	 * need to do a full filesystem scan for free space which will take it's
3724 	 * own references.
3725 	 */
3726 	xfs_perag_rele(args->pag);
3727 	args->pag = NULL;
3728 	if (error || args->fsbno != NULLFSBLOCK)
3729 		return error;
3730 
3731 	return xfs_bmap_btalloc_low_space(ap, args);
3732 }
3733 
3734 static int
3735 xfs_bmap_btalloc_best_length(
3736 	struct xfs_bmalloca	*ap,
3737 	struct xfs_alloc_arg	*args,
3738 	int			stripe_align)
3739 {
3740 	xfs_extlen_t		blen = 0;
3741 	int			error;
3742 
3743 	ap->blkno = XFS_INO_TO_FSB(args->mp, ap->ip->i_ino);
3744 	xfs_bmap_adjacent(ap);
3745 
3746 	/*
3747 	 * Search for an allocation group with a single extent large enough for
3748 	 * the request.  If one isn't found, then adjust the minimum allocation
3749 	 * size to the largest space found.
3750 	 */
3751 	error = xfs_bmap_btalloc_select_lengths(ap, args, &blen);
3752 	if (error)
3753 		return error;
3754 
3755 	/*
3756 	 * Don't attempt optimal EOF allocation if previous allocations barely
3757 	 * succeeded due to being near ENOSPC. It is highly unlikely we'll get
3758 	 * optimal or even aligned allocations in this case, so don't waste time
3759 	 * trying.
3760 	 */
3761 	if (ap->aeof && !(ap->tp->t_flags & XFS_TRANS_LOWMODE)) {
3762 		error = xfs_bmap_btalloc_at_eof(ap, args, blen, stripe_align,
3763 				false);
3764 		if (error || args->fsbno != NULLFSBLOCK)
3765 			return error;
3766 	}
3767 
3768 	error = xfs_alloc_vextent_start_ag(args, ap->blkno);
3769 	if (error || args->fsbno != NULLFSBLOCK)
3770 		return error;
3771 
3772 	return xfs_bmap_btalloc_low_space(ap, args);
3773 }
3774 
3775 static int
3776 xfs_bmap_btalloc(
3777 	struct xfs_bmalloca	*ap)
3778 {
3779 	struct xfs_mount	*mp = ap->ip->i_mount;
3780 	struct xfs_alloc_arg	args = {
3781 		.tp		= ap->tp,
3782 		.mp		= mp,
3783 		.fsbno		= NULLFSBLOCK,
3784 		.oinfo		= XFS_RMAP_OINFO_SKIP_UPDATE,
3785 		.minleft	= ap->minleft,
3786 		.wasdel		= ap->wasdel,
3787 		.resv		= XFS_AG_RESV_NONE,
3788 		.datatype	= ap->datatype,
3789 		.alignment	= 1,
3790 		.minalignslop	= 0,
3791 	};
3792 	xfs_fileoff_t		orig_offset;
3793 	xfs_extlen_t		orig_length;
3794 	int			error;
3795 	int			stripe_align;
3796 
3797 	ASSERT(ap->length);
3798 	orig_offset = ap->offset;
3799 	orig_length = ap->length;
3800 
3801 	stripe_align = xfs_bmap_compute_alignments(ap, &args);
3802 
3803 	/* Trim the allocation back to the maximum an AG can fit. */
3804 	args.maxlen = min(ap->length, mp->m_ag_max_usable);
3805 
3806 	if (unlikely(XFS_TEST_ERROR(false, mp,
3807 			XFS_ERRTAG_BMAP_ALLOC_MINLEN_EXTENT)))
3808 		error = xfs_bmap_exact_minlen_extent_alloc(ap, &args);
3809 	else if ((ap->datatype & XFS_ALLOC_USERDATA) &&
3810 			xfs_inode_is_filestream(ap->ip))
3811 		error = xfs_bmap_btalloc_filestreams(ap, &args, stripe_align);
3812 	else
3813 		error = xfs_bmap_btalloc_best_length(ap, &args, stripe_align);
3814 	if (error)
3815 		return error;
3816 
3817 	if (args.fsbno != NULLFSBLOCK) {
3818 		xfs_bmap_process_allocated_extent(ap, &args, orig_offset,
3819 			orig_length);
3820 	} else {
3821 		ap->blkno = NULLFSBLOCK;
3822 		ap->length = 0;
3823 	}
3824 	return 0;
3825 }
3826 
3827 /* Trim extent to fit a logical block range. */
3828 void
3829 xfs_trim_extent(
3830 	struct xfs_bmbt_irec	*irec,
3831 	xfs_fileoff_t		bno,
3832 	xfs_filblks_t		len)
3833 {
3834 	xfs_fileoff_t		distance;
3835 	xfs_fileoff_t		end = bno + len;
3836 
3837 	if (irec->br_startoff + irec->br_blockcount <= bno ||
3838 	    irec->br_startoff >= end) {
3839 		irec->br_blockcount = 0;
3840 		return;
3841 	}
3842 
3843 	if (irec->br_startoff < bno) {
3844 		distance = bno - irec->br_startoff;
3845 		if (isnullstartblock(irec->br_startblock))
3846 			irec->br_startblock = DELAYSTARTBLOCK;
3847 		if (irec->br_startblock != DELAYSTARTBLOCK &&
3848 		    irec->br_startblock != HOLESTARTBLOCK)
3849 			irec->br_startblock += distance;
3850 		irec->br_startoff += distance;
3851 		irec->br_blockcount -= distance;
3852 	}
3853 
3854 	if (end < irec->br_startoff + irec->br_blockcount) {
3855 		distance = irec->br_startoff + irec->br_blockcount - end;
3856 		irec->br_blockcount -= distance;
3857 	}
3858 }
3859 
3860 /*
3861  * Trim the returned map to the required bounds
3862  */
3863 STATIC void
3864 xfs_bmapi_trim_map(
3865 	struct xfs_bmbt_irec	*mval,
3866 	struct xfs_bmbt_irec	*got,
3867 	xfs_fileoff_t		*bno,
3868 	xfs_filblks_t		len,
3869 	xfs_fileoff_t		obno,
3870 	xfs_fileoff_t		end,
3871 	int			n,
3872 	uint32_t		flags)
3873 {
3874 	if ((flags & XFS_BMAPI_ENTIRE) ||
3875 	    got->br_startoff + got->br_blockcount <= obno) {
3876 		*mval = *got;
3877 		if (isnullstartblock(got->br_startblock))
3878 			mval->br_startblock = DELAYSTARTBLOCK;
3879 		return;
3880 	}
3881 
3882 	if (obno > *bno)
3883 		*bno = obno;
3884 	ASSERT((*bno >= obno) || (n == 0));
3885 	ASSERT(*bno < end);
3886 	mval->br_startoff = *bno;
3887 	if (isnullstartblock(got->br_startblock))
3888 		mval->br_startblock = DELAYSTARTBLOCK;
3889 	else
3890 		mval->br_startblock = got->br_startblock +
3891 					(*bno - got->br_startoff);
3892 	/*
3893 	 * Return the minimum of what we got and what we asked for for
3894 	 * the length.  We can use the len variable here because it is
3895 	 * modified below and we could have been there before coming
3896 	 * here if the first part of the allocation didn't overlap what
3897 	 * was asked for.
3898 	 */
3899 	mval->br_blockcount = XFS_FILBLKS_MIN(end - *bno,
3900 			got->br_blockcount - (*bno - got->br_startoff));
3901 	mval->br_state = got->br_state;
3902 	ASSERT(mval->br_blockcount <= len);
3903 	return;
3904 }
3905 
3906 /*
3907  * Update and validate the extent map to return
3908  */
3909 STATIC void
3910 xfs_bmapi_update_map(
3911 	struct xfs_bmbt_irec	**map,
3912 	xfs_fileoff_t		*bno,
3913 	xfs_filblks_t		*len,
3914 	xfs_fileoff_t		obno,
3915 	xfs_fileoff_t		end,
3916 	int			*n,
3917 	uint32_t		flags)
3918 {
3919 	xfs_bmbt_irec_t	*mval = *map;
3920 
3921 	ASSERT((flags & XFS_BMAPI_ENTIRE) ||
3922 	       ((mval->br_startoff + mval->br_blockcount) <= end));
3923 	ASSERT((flags & XFS_BMAPI_ENTIRE) || (mval->br_blockcount <= *len) ||
3924 	       (mval->br_startoff < obno));
3925 
3926 	*bno = mval->br_startoff + mval->br_blockcount;
3927 	*len = end - *bno;
3928 	if (*n > 0 && mval->br_startoff == mval[-1].br_startoff) {
3929 		/* update previous map with new information */
3930 		ASSERT(mval->br_startblock == mval[-1].br_startblock);
3931 		ASSERT(mval->br_blockcount > mval[-1].br_blockcount);
3932 		ASSERT(mval->br_state == mval[-1].br_state);
3933 		mval[-1].br_blockcount = mval->br_blockcount;
3934 		mval[-1].br_state = mval->br_state;
3935 	} else if (*n > 0 && mval->br_startblock != DELAYSTARTBLOCK &&
3936 		   mval[-1].br_startblock != DELAYSTARTBLOCK &&
3937 		   mval[-1].br_startblock != HOLESTARTBLOCK &&
3938 		   mval->br_startblock == mval[-1].br_startblock +
3939 					  mval[-1].br_blockcount &&
3940 		   mval[-1].br_state == mval->br_state) {
3941 		ASSERT(mval->br_startoff ==
3942 		       mval[-1].br_startoff + mval[-1].br_blockcount);
3943 		mval[-1].br_blockcount += mval->br_blockcount;
3944 	} else if (*n > 0 &&
3945 		   mval->br_startblock == DELAYSTARTBLOCK &&
3946 		   mval[-1].br_startblock == DELAYSTARTBLOCK &&
3947 		   mval->br_startoff ==
3948 		   mval[-1].br_startoff + mval[-1].br_blockcount) {
3949 		mval[-1].br_blockcount += mval->br_blockcount;
3950 		mval[-1].br_state = mval->br_state;
3951 	} else if (!((*n == 0) &&
3952 		     ((mval->br_startoff + mval->br_blockcount) <=
3953 		      obno))) {
3954 		mval++;
3955 		(*n)++;
3956 	}
3957 	*map = mval;
3958 }
3959 
3960 /*
3961  * Map file blocks to filesystem blocks without allocation.
3962  */
3963 int
3964 xfs_bmapi_read(
3965 	struct xfs_inode	*ip,
3966 	xfs_fileoff_t		bno,
3967 	xfs_filblks_t		len,
3968 	struct xfs_bmbt_irec	*mval,
3969 	int			*nmap,
3970 	uint32_t		flags)
3971 {
3972 	struct xfs_mount	*mp = ip->i_mount;
3973 	int			whichfork = xfs_bmapi_whichfork(flags);
3974 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
3975 	struct xfs_bmbt_irec	got;
3976 	xfs_fileoff_t		obno;
3977 	xfs_fileoff_t		end;
3978 	struct xfs_iext_cursor	icur;
3979 	int			error;
3980 	bool			eof = false;
3981 	int			n = 0;
3982 
3983 	ASSERT(*nmap >= 1);
3984 	ASSERT(!(flags & ~(XFS_BMAPI_ATTRFORK | XFS_BMAPI_ENTIRE)));
3985 	xfs_assert_ilocked(ip, XFS_ILOCK_SHARED | XFS_ILOCK_EXCL);
3986 
3987 	if (WARN_ON_ONCE(!ifp)) {
3988 		xfs_bmap_mark_sick(ip, whichfork);
3989 		return -EFSCORRUPTED;
3990 	}
3991 
3992 	if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)) ||
3993 	    XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) {
3994 		xfs_bmap_mark_sick(ip, whichfork);
3995 		return -EFSCORRUPTED;
3996 	}
3997 
3998 	if (xfs_is_shutdown(mp))
3999 		return -EIO;
4000 
4001 	XFS_STATS_INC(mp, xs_blk_mapr);
4002 
4003 	error = xfs_iread_extents(NULL, ip, whichfork);
4004 	if (error)
4005 		return error;
4006 
4007 	if (!xfs_iext_lookup_extent(ip, ifp, bno, &icur, &got))
4008 		eof = true;
4009 	end = bno + len;
4010 	obno = bno;
4011 
4012 	while (bno < end && n < *nmap) {
4013 		/* Reading past eof, act as though there's a hole up to end. */
4014 		if (eof)
4015 			got.br_startoff = end;
4016 		if (got.br_startoff > bno) {
4017 			/* Reading in a hole.  */
4018 			mval->br_startoff = bno;
4019 			mval->br_startblock = HOLESTARTBLOCK;
4020 			mval->br_blockcount =
4021 				XFS_FILBLKS_MIN(len, got.br_startoff - bno);
4022 			mval->br_state = XFS_EXT_NORM;
4023 			bno += mval->br_blockcount;
4024 			len -= mval->br_blockcount;
4025 			mval++;
4026 			n++;
4027 			continue;
4028 		}
4029 
4030 		/* set up the extent map to return. */
4031 		xfs_bmapi_trim_map(mval, &got, &bno, len, obno, end, n, flags);
4032 		xfs_bmapi_update_map(&mval, &bno, &len, obno, end, &n, flags);
4033 
4034 		/* If we're done, stop now. */
4035 		if (bno >= end || n >= *nmap)
4036 			break;
4037 
4038 		/* Else go on to the next record. */
4039 		if (!xfs_iext_next_extent(ifp, &icur, &got))
4040 			eof = true;
4041 	}
4042 	*nmap = n;
4043 	return 0;
4044 }
4045 
4046 /*
4047  * Add a delayed allocation extent to an inode. Blocks are reserved from the
4048  * global pool and the extent inserted into the inode in-core extent tree.
4049  *
4050  * On entry, got refers to the first extent beyond the offset of the extent to
4051  * allocate or eof is specified if no such extent exists. On return, got refers
4052  * to the extent record that was inserted to the inode fork.
4053  *
4054  * Note that the allocated extent may have been merged with contiguous extents
4055  * during insertion into the inode fork. Thus, got does not reflect the current
4056  * state of the inode fork on return. If necessary, the caller can use lastx to
4057  * look up the updated record in the inode fork.
4058  */
4059 int
4060 xfs_bmapi_reserve_delalloc(
4061 	struct xfs_inode	*ip,
4062 	int			whichfork,
4063 	xfs_fileoff_t		off,
4064 	xfs_filblks_t		len,
4065 	xfs_filblks_t		prealloc,
4066 	struct xfs_bmbt_irec	*got,
4067 	struct xfs_iext_cursor	*icur,
4068 	int			eof)
4069 {
4070 	struct xfs_mount	*mp = ip->i_mount;
4071 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
4072 	xfs_extlen_t		alen;
4073 	xfs_extlen_t		indlen;
4074 	uint64_t		fdblocks;
4075 	int			error;
4076 	xfs_fileoff_t		aoff;
4077 	bool			use_cowextszhint =
4078 					whichfork == XFS_COW_FORK && !prealloc;
4079 
4080 retry:
4081 	/*
4082 	 * Cap the alloc length. Keep track of prealloc so we know whether to
4083 	 * tag the inode before we return.
4084 	 */
4085 	aoff = off;
4086 	alen = XFS_FILBLKS_MIN(len + prealloc, XFS_MAX_BMBT_EXTLEN);
4087 	if (!eof)
4088 		alen = XFS_FILBLKS_MIN(alen, got->br_startoff - aoff);
4089 	if (prealloc && alen >= len)
4090 		prealloc = alen - len;
4091 
4092 	/*
4093 	 * If we're targetting the COW fork but aren't creating a speculative
4094 	 * posteof preallocation, try to expand the reservation to align with
4095 	 * the COW extent size hint if there's sufficient free space.
4096 	 *
4097 	 * Unlike the data fork, the CoW cancellation functions will free all
4098 	 * the reservations at inactivation, so we don't require that every
4099 	 * delalloc reservation have a dirty pagecache.
4100 	 */
4101 	if (use_cowextszhint) {
4102 		struct xfs_bmbt_irec	prev;
4103 		xfs_extlen_t		extsz = xfs_get_cowextsz_hint(ip);
4104 
4105 		if (!xfs_iext_peek_prev_extent(ifp, icur, &prev))
4106 			prev.br_startoff = NULLFILEOFF;
4107 
4108 		error = xfs_bmap_extsize_align(mp, got, &prev, extsz, 0, eof,
4109 					       1, 0, &aoff, &alen);
4110 		ASSERT(!error);
4111 	}
4112 
4113 	/*
4114 	 * Make a transaction-less quota reservation for delayed allocation
4115 	 * blocks.  This number gets adjusted later.  We return if we haven't
4116 	 * allocated blocks already inside this loop.
4117 	 */
4118 	error = xfs_quota_reserve_blkres(ip, alen);
4119 	if (error)
4120 		goto out;
4121 
4122 	/*
4123 	 * Split changing sb for alen and indlen since they could be coming
4124 	 * from different places.
4125 	 */
4126 	indlen = (xfs_extlen_t)xfs_bmap_worst_indlen(ip, alen);
4127 	ASSERT(indlen > 0);
4128 
4129 	fdblocks = indlen;
4130 	if (XFS_IS_REALTIME_INODE(ip)) {
4131 		error = xfs_dec_frextents(mp, xfs_blen_to_rtbxlen(mp, alen));
4132 		if (error)
4133 			goto out_unreserve_quota;
4134 	} else {
4135 		fdblocks += alen;
4136 	}
4137 
4138 	error = xfs_dec_fdblocks(mp, fdblocks, false);
4139 	if (error)
4140 		goto out_unreserve_frextents;
4141 
4142 	ip->i_delayed_blks += alen;
4143 	xfs_mod_delalloc(ip, alen, indlen);
4144 
4145 	got->br_startoff = aoff;
4146 	got->br_startblock = nullstartblock(indlen);
4147 	got->br_blockcount = alen;
4148 	got->br_state = XFS_EXT_NORM;
4149 
4150 	xfs_bmap_add_extent_hole_delay(ip, whichfork, icur, got);
4151 
4152 	/*
4153 	 * Tag the inode if blocks were preallocated. Note that COW fork
4154 	 * preallocation can occur at the start or end of the extent, even when
4155 	 * prealloc == 0, so we must also check the aligned offset and length.
4156 	 */
4157 	if (whichfork == XFS_DATA_FORK && prealloc)
4158 		xfs_inode_set_eofblocks_tag(ip);
4159 	if (whichfork == XFS_COW_FORK && (prealloc || aoff < off || alen > len))
4160 		xfs_inode_set_cowblocks_tag(ip);
4161 
4162 	return 0;
4163 
4164 out_unreserve_frextents:
4165 	if (XFS_IS_REALTIME_INODE(ip))
4166 		xfs_add_frextents(mp, xfs_blen_to_rtbxlen(mp, alen));
4167 out_unreserve_quota:
4168 	if (XFS_IS_QUOTA_ON(mp))
4169 		xfs_quota_unreserve_blkres(ip, alen);
4170 out:
4171 	if (error == -ENOSPC || error == -EDQUOT) {
4172 		trace_xfs_delalloc_enospc(ip, off, len);
4173 
4174 		if (prealloc || use_cowextszhint) {
4175 			/* retry without any preallocation */
4176 			use_cowextszhint = false;
4177 			prealloc = 0;
4178 			goto retry;
4179 		}
4180 	}
4181 	return error;
4182 }
4183 
4184 static int
4185 xfs_bmapi_allocate(
4186 	struct xfs_bmalloca	*bma)
4187 {
4188 	struct xfs_mount	*mp = bma->ip->i_mount;
4189 	int			whichfork = xfs_bmapi_whichfork(bma->flags);
4190 	struct xfs_ifork	*ifp = xfs_ifork_ptr(bma->ip, whichfork);
4191 	int			error;
4192 
4193 	ASSERT(bma->length > 0);
4194 	ASSERT(bma->length <= XFS_MAX_BMBT_EXTLEN);
4195 
4196 	if (bma->flags & XFS_BMAPI_CONTIG)
4197 		bma->minlen = bma->length;
4198 	else
4199 		bma->minlen = 1;
4200 
4201 	if (!(bma->flags & XFS_BMAPI_METADATA)) {
4202 		/*
4203 		 * For the data and COW fork, the first data in the file is
4204 		 * treated differently to all other allocations. For the
4205 		 * attribute fork, we only need to ensure the allocated range
4206 		 * is not on the busy list.
4207 		 */
4208 		bma->datatype = XFS_ALLOC_NOBUSY;
4209 		if (whichfork == XFS_DATA_FORK || whichfork == XFS_COW_FORK) {
4210 			bma->datatype |= XFS_ALLOC_USERDATA;
4211 			if (bma->offset == 0)
4212 				bma->datatype |= XFS_ALLOC_INITIAL_USER_DATA;
4213 
4214 			if (mp->m_dalign && bma->length >= mp->m_dalign) {
4215 				error = xfs_bmap_isaeof(bma, whichfork);
4216 				if (error)
4217 					return error;
4218 			}
4219 		}
4220 	}
4221 
4222 	if ((bma->datatype & XFS_ALLOC_USERDATA) &&
4223 	    XFS_IS_REALTIME_INODE(bma->ip))
4224 		error = xfs_bmap_rtalloc(bma);
4225 	else
4226 		error = xfs_bmap_btalloc(bma);
4227 	if (error)
4228 		return error;
4229 	if (bma->blkno == NULLFSBLOCK)
4230 		return -ENOSPC;
4231 
4232 	if (WARN_ON_ONCE(!xfs_valid_startblock(bma->ip, bma->blkno))) {
4233 		xfs_bmap_mark_sick(bma->ip, whichfork);
4234 		return -EFSCORRUPTED;
4235 	}
4236 
4237 	if (bma->flags & XFS_BMAPI_ZERO) {
4238 		error = xfs_zero_extent(bma->ip, bma->blkno, bma->length);
4239 		if (error)
4240 			return error;
4241 	}
4242 
4243 	if (ifp->if_format == XFS_DINODE_FMT_BTREE && !bma->cur)
4244 		bma->cur = xfs_bmbt_init_cursor(mp, bma->tp, bma->ip, whichfork);
4245 	/*
4246 	 * Bump the number of extents we've allocated
4247 	 * in this call.
4248 	 */
4249 	bma->nallocs++;
4250 
4251 	if (bma->cur && bma->wasdel)
4252 		bma->cur->bc_flags |= XFS_BTREE_BMBT_WASDEL;
4253 
4254 	bma->got.br_startoff = bma->offset;
4255 	bma->got.br_startblock = bma->blkno;
4256 	bma->got.br_blockcount = bma->length;
4257 	bma->got.br_state = XFS_EXT_NORM;
4258 
4259 	if (bma->flags & XFS_BMAPI_PREALLOC)
4260 		bma->got.br_state = XFS_EXT_UNWRITTEN;
4261 
4262 	if (bma->wasdel)
4263 		error = xfs_bmap_add_extent_delay_real(bma, whichfork);
4264 	else
4265 		error = xfs_bmap_add_extent_hole_real(bma->tp, bma->ip,
4266 				whichfork, &bma->icur, &bma->cur, &bma->got,
4267 				&bma->logflags, bma->flags);
4268 	if (error)
4269 		return error;
4270 
4271 	/*
4272 	 * Update our extent pointer, given that xfs_bmap_add_extent_delay_real
4273 	 * or xfs_bmap_add_extent_hole_real might have merged it into one of
4274 	 * the neighbouring ones.
4275 	 */
4276 	xfs_iext_get_extent(ifp, &bma->icur, &bma->got);
4277 
4278 	ASSERT(bma->got.br_startoff <= bma->offset);
4279 	ASSERT(bma->got.br_startoff + bma->got.br_blockcount >=
4280 	       bma->offset + bma->length);
4281 	ASSERT(bma->got.br_state == XFS_EXT_NORM ||
4282 	       bma->got.br_state == XFS_EXT_UNWRITTEN);
4283 	return 0;
4284 }
4285 
4286 STATIC int
4287 xfs_bmapi_convert_unwritten(
4288 	struct xfs_bmalloca	*bma,
4289 	struct xfs_bmbt_irec	*mval,
4290 	xfs_filblks_t		len,
4291 	uint32_t		flags)
4292 {
4293 	int			whichfork = xfs_bmapi_whichfork(flags);
4294 	struct xfs_ifork	*ifp = xfs_ifork_ptr(bma->ip, whichfork);
4295 	int			tmp_logflags = 0;
4296 	int			error;
4297 
4298 	/* check if we need to do unwritten->real conversion */
4299 	if (mval->br_state == XFS_EXT_UNWRITTEN &&
4300 	    (flags & XFS_BMAPI_PREALLOC))
4301 		return 0;
4302 
4303 	/* check if we need to do real->unwritten conversion */
4304 	if (mval->br_state == XFS_EXT_NORM &&
4305 	    (flags & (XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT)) !=
4306 			(XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT))
4307 		return 0;
4308 
4309 	/*
4310 	 * Modify (by adding) the state flag, if writing.
4311 	 */
4312 	ASSERT(mval->br_blockcount <= len);
4313 	if (ifp->if_format == XFS_DINODE_FMT_BTREE && !bma->cur) {
4314 		bma->cur = xfs_bmbt_init_cursor(bma->ip->i_mount, bma->tp,
4315 					bma->ip, whichfork);
4316 	}
4317 	mval->br_state = (mval->br_state == XFS_EXT_UNWRITTEN)
4318 				? XFS_EXT_NORM : XFS_EXT_UNWRITTEN;
4319 
4320 	/*
4321 	 * Before insertion into the bmbt, zero the range being converted
4322 	 * if required.
4323 	 */
4324 	if (flags & XFS_BMAPI_ZERO) {
4325 		error = xfs_zero_extent(bma->ip, mval->br_startblock,
4326 					mval->br_blockcount);
4327 		if (error)
4328 			return error;
4329 	}
4330 
4331 	error = xfs_bmap_add_extent_unwritten_real(bma->tp, bma->ip, whichfork,
4332 			&bma->icur, &bma->cur, mval, &tmp_logflags);
4333 	/*
4334 	 * Log the inode core unconditionally in the unwritten extent conversion
4335 	 * path because the conversion might not have done so (e.g., if the
4336 	 * extent count hasn't changed). We need to make sure the inode is dirty
4337 	 * in the transaction for the sake of fsync(), even if nothing has
4338 	 * changed, because fsync() will not force the log for this transaction
4339 	 * unless it sees the inode pinned.
4340 	 *
4341 	 * Note: If we're only converting cow fork extents, there aren't
4342 	 * any on-disk updates to make, so we don't need to log anything.
4343 	 */
4344 	if (whichfork != XFS_COW_FORK)
4345 		bma->logflags |= tmp_logflags | XFS_ILOG_CORE;
4346 	if (error)
4347 		return error;
4348 
4349 	/*
4350 	 * Update our extent pointer, given that
4351 	 * xfs_bmap_add_extent_unwritten_real might have merged it into one
4352 	 * of the neighbouring ones.
4353 	 */
4354 	xfs_iext_get_extent(ifp, &bma->icur, &bma->got);
4355 
4356 	/*
4357 	 * We may have combined previously unwritten space with written space,
4358 	 * so generate another request.
4359 	 */
4360 	if (mval->br_blockcount < len)
4361 		return -EAGAIN;
4362 	return 0;
4363 }
4364 
4365 xfs_extlen_t
4366 xfs_bmapi_minleft(
4367 	struct xfs_trans	*tp,
4368 	struct xfs_inode	*ip,
4369 	int			fork)
4370 {
4371 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, fork);
4372 
4373 	if (tp && tp->t_highest_agno != NULLAGNUMBER)
4374 		return 0;
4375 	if (ifp->if_format != XFS_DINODE_FMT_BTREE)
4376 		return 1;
4377 	return be16_to_cpu(ifp->if_broot->bb_level) + 1;
4378 }
4379 
4380 /*
4381  * Log whatever the flags say, even if error.  Otherwise we might miss detecting
4382  * a case where the data is changed, there's an error, and it's not logged so we
4383  * don't shutdown when we should.  Don't bother logging extents/btree changes if
4384  * we converted to the other format.
4385  */
4386 static void
4387 xfs_bmapi_finish(
4388 	struct xfs_bmalloca	*bma,
4389 	int			whichfork,
4390 	int			error)
4391 {
4392 	struct xfs_ifork	*ifp = xfs_ifork_ptr(bma->ip, whichfork);
4393 
4394 	if ((bma->logflags & xfs_ilog_fext(whichfork)) &&
4395 	    ifp->if_format != XFS_DINODE_FMT_EXTENTS)
4396 		bma->logflags &= ~xfs_ilog_fext(whichfork);
4397 	else if ((bma->logflags & xfs_ilog_fbroot(whichfork)) &&
4398 		 ifp->if_format != XFS_DINODE_FMT_BTREE)
4399 		bma->logflags &= ~xfs_ilog_fbroot(whichfork);
4400 
4401 	if (bma->logflags)
4402 		xfs_trans_log_inode(bma->tp, bma->ip, bma->logflags);
4403 	if (bma->cur)
4404 		xfs_btree_del_cursor(bma->cur, error);
4405 }
4406 
4407 /*
4408  * Map file blocks to filesystem blocks, and allocate blocks or convert the
4409  * extent state if necessary.  Details behaviour is controlled by the flags
4410  * parameter.  Only allocates blocks from a single allocation group, to avoid
4411  * locking problems.
4412  *
4413  * Returns 0 on success and places the extent mappings in mval.  nmaps is used
4414  * as an input/output parameter where the caller specifies the maximum number
4415  * of mappings that may be returned and xfs_bmapi_write passes back the number
4416  * of mappings (including existing mappings) it found.
4417  *
4418  * Returns a negative error code on failure, including -ENOSPC when it could not
4419  * allocate any blocks and -ENOSR when it did allocate blocks to convert a
4420  * delalloc range, but those blocks were before the passed in range.
4421  */
4422 int
4423 xfs_bmapi_write(
4424 	struct xfs_trans	*tp,		/* transaction pointer */
4425 	struct xfs_inode	*ip,		/* incore inode */
4426 	xfs_fileoff_t		bno,		/* starting file offs. mapped */
4427 	xfs_filblks_t		len,		/* length to map in file */
4428 	uint32_t		flags,		/* XFS_BMAPI_... */
4429 	xfs_extlen_t		total,		/* total blocks needed */
4430 	struct xfs_bmbt_irec	*mval,		/* output: map values */
4431 	int			*nmap)		/* i/o: mval size/count */
4432 {
4433 	struct xfs_bmalloca	bma = {
4434 		.tp		= tp,
4435 		.ip		= ip,
4436 		.total		= total,
4437 	};
4438 	struct xfs_mount	*mp = ip->i_mount;
4439 	int			whichfork = xfs_bmapi_whichfork(flags);
4440 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
4441 	xfs_fileoff_t		end;		/* end of mapped file region */
4442 	bool			eof = false;	/* after the end of extents */
4443 	int			error;		/* error return */
4444 	int			n;		/* current extent index */
4445 	xfs_fileoff_t		obno;		/* old block number (offset) */
4446 
4447 #ifdef DEBUG
4448 	xfs_fileoff_t		orig_bno;	/* original block number value */
4449 	int			orig_flags;	/* original flags arg value */
4450 	xfs_filblks_t		orig_len;	/* original value of len arg */
4451 	struct xfs_bmbt_irec	*orig_mval;	/* original value of mval */
4452 	int			orig_nmap;	/* original value of *nmap */
4453 
4454 	orig_bno = bno;
4455 	orig_len = len;
4456 	orig_flags = flags;
4457 	orig_mval = mval;
4458 	orig_nmap = *nmap;
4459 #endif
4460 
4461 	ASSERT(*nmap >= 1);
4462 	ASSERT(*nmap <= XFS_BMAP_MAX_NMAP);
4463 	ASSERT(tp != NULL);
4464 	ASSERT(len > 0);
4465 	ASSERT(ifp->if_format != XFS_DINODE_FMT_LOCAL);
4466 	xfs_assert_ilocked(ip, XFS_ILOCK_EXCL);
4467 	ASSERT(!(flags & XFS_BMAPI_REMAP));
4468 
4469 	/* zeroing is for currently only for data extents, not metadata */
4470 	ASSERT((flags & (XFS_BMAPI_METADATA | XFS_BMAPI_ZERO)) !=
4471 			(XFS_BMAPI_METADATA | XFS_BMAPI_ZERO));
4472 	/*
4473 	 * we can allocate unwritten extents or pre-zero allocated blocks,
4474 	 * but it makes no sense to do both at once. This would result in
4475 	 * zeroing the unwritten extent twice, but it still being an
4476 	 * unwritten extent....
4477 	 */
4478 	ASSERT((flags & (XFS_BMAPI_PREALLOC | XFS_BMAPI_ZERO)) !=
4479 			(XFS_BMAPI_PREALLOC | XFS_BMAPI_ZERO));
4480 
4481 	if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)) ||
4482 	    XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) {
4483 		xfs_bmap_mark_sick(ip, whichfork);
4484 		return -EFSCORRUPTED;
4485 	}
4486 
4487 	if (xfs_is_shutdown(mp))
4488 		return -EIO;
4489 
4490 	XFS_STATS_INC(mp, xs_blk_mapw);
4491 
4492 	error = xfs_iread_extents(tp, ip, whichfork);
4493 	if (error)
4494 		goto error0;
4495 
4496 	if (!xfs_iext_lookup_extent(ip, ifp, bno, &bma.icur, &bma.got))
4497 		eof = true;
4498 	if (!xfs_iext_peek_prev_extent(ifp, &bma.icur, &bma.prev))
4499 		bma.prev.br_startoff = NULLFILEOFF;
4500 	bma.minleft = xfs_bmapi_minleft(tp, ip, whichfork);
4501 
4502 	n = 0;
4503 	end = bno + len;
4504 	obno = bno;
4505 	while (bno < end && n < *nmap) {
4506 		bool			need_alloc = false, wasdelay = false;
4507 
4508 		/* in hole or beyond EOF? */
4509 		if (eof || bma.got.br_startoff > bno) {
4510 			/*
4511 			 * CoW fork conversions should /never/ hit EOF or
4512 			 * holes.  There should always be something for us
4513 			 * to work on.
4514 			 */
4515 			ASSERT(!((flags & XFS_BMAPI_CONVERT) &&
4516 			         (flags & XFS_BMAPI_COWFORK)));
4517 
4518 			need_alloc = true;
4519 		} else if (isnullstartblock(bma.got.br_startblock)) {
4520 			wasdelay = true;
4521 		}
4522 
4523 		/*
4524 		 * First, deal with the hole before the allocated space
4525 		 * that we found, if any.
4526 		 */
4527 		if (need_alloc || wasdelay) {
4528 			bma.eof = eof;
4529 			bma.conv = !!(flags & XFS_BMAPI_CONVERT);
4530 			bma.wasdel = wasdelay;
4531 			bma.offset = bno;
4532 			bma.flags = flags;
4533 
4534 			/*
4535 			 * There's a 32/64 bit type mismatch between the
4536 			 * allocation length request (which can be 64 bits in
4537 			 * length) and the bma length request, which is
4538 			 * xfs_extlen_t and therefore 32 bits. Hence we have to
4539 			 * be careful and do the min() using the larger type to
4540 			 * avoid overflows.
4541 			 */
4542 			bma.length = XFS_FILBLKS_MIN(len, XFS_MAX_BMBT_EXTLEN);
4543 
4544 			if (wasdelay) {
4545 				bma.length = XFS_FILBLKS_MIN(bma.length,
4546 					bma.got.br_blockcount -
4547 					(bno - bma.got.br_startoff));
4548 			} else {
4549 				if (!eof)
4550 					bma.length = XFS_FILBLKS_MIN(bma.length,
4551 						bma.got.br_startoff - bno);
4552 			}
4553 
4554 			ASSERT(bma.length > 0);
4555 			error = xfs_bmapi_allocate(&bma);
4556 			if (error) {
4557 				/*
4558 				 * If we already allocated space in a previous
4559 				 * iteration return what we go so far when
4560 				 * running out of space.
4561 				 */
4562 				if (error == -ENOSPC && bma.nallocs)
4563 					break;
4564 				goto error0;
4565 			}
4566 
4567 			/*
4568 			 * If this is a CoW allocation, record the data in
4569 			 * the refcount btree for orphan recovery.
4570 			 */
4571 			if (whichfork == XFS_COW_FORK)
4572 				xfs_refcount_alloc_cow_extent(tp, bma.blkno,
4573 						bma.length);
4574 		}
4575 
4576 		/* Deal with the allocated space we found.  */
4577 		xfs_bmapi_trim_map(mval, &bma.got, &bno, len, obno,
4578 							end, n, flags);
4579 
4580 		/* Execute unwritten extent conversion if necessary */
4581 		error = xfs_bmapi_convert_unwritten(&bma, mval, len, flags);
4582 		if (error == -EAGAIN)
4583 			continue;
4584 		if (error)
4585 			goto error0;
4586 
4587 		/* update the extent map to return */
4588 		xfs_bmapi_update_map(&mval, &bno, &len, obno, end, &n, flags);
4589 
4590 		/*
4591 		 * If we're done, stop now.  Stop when we've allocated
4592 		 * XFS_BMAP_MAX_NMAP extents no matter what.  Otherwise
4593 		 * the transaction may get too big.
4594 		 */
4595 		if (bno >= end || n >= *nmap || bma.nallocs >= *nmap)
4596 			break;
4597 
4598 		/* Else go on to the next record. */
4599 		bma.prev = bma.got;
4600 		if (!xfs_iext_next_extent(ifp, &bma.icur, &bma.got))
4601 			eof = true;
4602 	}
4603 
4604 	error = xfs_bmap_btree_to_extents(tp, ip, bma.cur, &bma.logflags,
4605 			whichfork);
4606 	if (error)
4607 		goto error0;
4608 
4609 	ASSERT(ifp->if_format != XFS_DINODE_FMT_BTREE ||
4610 	       ifp->if_nextents > XFS_IFORK_MAXEXT(ip, whichfork));
4611 	xfs_bmapi_finish(&bma, whichfork, 0);
4612 	xfs_bmap_validate_ret(orig_bno, orig_len, orig_flags, orig_mval,
4613 		orig_nmap, n);
4614 
4615 	/*
4616 	 * When converting delayed allocations, xfs_bmapi_allocate ignores
4617 	 * the passed in bno and always converts from the start of the found
4618 	 * delalloc extent.
4619 	 *
4620 	 * To avoid a successful return with *nmap set to 0, return the magic
4621 	 * -ENOSR error code for this particular case so that the caller can
4622 	 * handle it.
4623 	 */
4624 	if (!n) {
4625 		ASSERT(bma.nallocs >= *nmap);
4626 		return -ENOSR;
4627 	}
4628 	*nmap = n;
4629 	return 0;
4630 error0:
4631 	xfs_bmapi_finish(&bma, whichfork, error);
4632 	return error;
4633 }
4634 
4635 /*
4636  * Convert an existing delalloc extent to real blocks based on file offset. This
4637  * attempts to allocate the entire delalloc extent and may require multiple
4638  * invocations to allocate the target offset if a large enough physical extent
4639  * is not available.
4640  */
4641 static int
4642 xfs_bmapi_convert_one_delalloc(
4643 	struct xfs_inode	*ip,
4644 	int			whichfork,
4645 	xfs_off_t		offset,
4646 	struct iomap		*iomap,
4647 	unsigned int		*seq)
4648 {
4649 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
4650 	struct xfs_mount	*mp = ip->i_mount;
4651 	xfs_fileoff_t		offset_fsb = XFS_B_TO_FSBT(mp, offset);
4652 	struct xfs_bmalloca	bma = { NULL };
4653 	uint16_t		flags = 0;
4654 	struct xfs_trans	*tp;
4655 	int			error;
4656 
4657 	if (whichfork == XFS_COW_FORK)
4658 		flags |= IOMAP_F_SHARED;
4659 
4660 	/*
4661 	 * Space for the extent and indirect blocks was reserved when the
4662 	 * delalloc extent was created so there's no need to do so here.
4663 	 */
4664 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 0, 0,
4665 				XFS_TRANS_RESERVE, &tp);
4666 	if (error)
4667 		return error;
4668 
4669 	xfs_ilock(ip, XFS_ILOCK_EXCL);
4670 	xfs_trans_ijoin(tp, ip, 0);
4671 
4672 	error = xfs_iext_count_extend(tp, ip, whichfork,
4673 			XFS_IEXT_ADD_NOSPLIT_CNT);
4674 	if (error)
4675 		goto out_trans_cancel;
4676 
4677 	if (!xfs_iext_lookup_extent(ip, ifp, offset_fsb, &bma.icur, &bma.got) ||
4678 	    bma.got.br_startoff > offset_fsb) {
4679 		/*
4680 		 * No extent found in the range we are trying to convert.  This
4681 		 * should only happen for the COW fork, where another thread
4682 		 * might have moved the extent to the data fork in the meantime.
4683 		 */
4684 		WARN_ON_ONCE(whichfork != XFS_COW_FORK);
4685 		error = -EAGAIN;
4686 		goto out_trans_cancel;
4687 	}
4688 
4689 	/*
4690 	 * If we find a real extent here we raced with another thread converting
4691 	 * the extent.  Just return the real extent at this offset.
4692 	 */
4693 	if (!isnullstartblock(bma.got.br_startblock)) {
4694 		xfs_bmbt_to_iomap(ip, iomap, &bma.got, 0, flags,
4695 				xfs_iomap_inode_sequence(ip, flags));
4696 		if (seq)
4697 			*seq = READ_ONCE(ifp->if_seq);
4698 		goto out_trans_cancel;
4699 	}
4700 
4701 	bma.tp = tp;
4702 	bma.ip = ip;
4703 	bma.wasdel = true;
4704 	bma.minleft = xfs_bmapi_minleft(tp, ip, whichfork);
4705 
4706 	/*
4707 	 * Always allocate convert from the start of the delalloc extent even if
4708 	 * that is outside the passed in range to create large contiguous
4709 	 * extents on disk.
4710 	 */
4711 	bma.offset = bma.got.br_startoff;
4712 	bma.length = bma.got.br_blockcount;
4713 
4714 	/*
4715 	 * When we're converting the delalloc reservations backing dirty pages
4716 	 * in the page cache, we must be careful about how we create the new
4717 	 * extents:
4718 	 *
4719 	 * New CoW fork extents are created unwritten, turned into real extents
4720 	 * when we're about to write the data to disk, and mapped into the data
4721 	 * fork after the write finishes.  End of story.
4722 	 *
4723 	 * New data fork extents must be mapped in as unwritten and converted
4724 	 * to real extents after the write succeeds to avoid exposing stale
4725 	 * disk contents if we crash.
4726 	 */
4727 	bma.flags = XFS_BMAPI_PREALLOC;
4728 	if (whichfork == XFS_COW_FORK)
4729 		bma.flags |= XFS_BMAPI_COWFORK;
4730 
4731 	if (!xfs_iext_peek_prev_extent(ifp, &bma.icur, &bma.prev))
4732 		bma.prev.br_startoff = NULLFILEOFF;
4733 
4734 	error = xfs_bmapi_allocate(&bma);
4735 	if (error)
4736 		goto out_finish;
4737 
4738 	XFS_STATS_ADD(mp, xs_xstrat_bytes, XFS_FSB_TO_B(mp, bma.length));
4739 	XFS_STATS_INC(mp, xs_xstrat_quick);
4740 
4741 	ASSERT(!isnullstartblock(bma.got.br_startblock));
4742 	xfs_bmbt_to_iomap(ip, iomap, &bma.got, 0, flags,
4743 				xfs_iomap_inode_sequence(ip, flags));
4744 	if (seq)
4745 		*seq = READ_ONCE(ifp->if_seq);
4746 
4747 	if (whichfork == XFS_COW_FORK)
4748 		xfs_refcount_alloc_cow_extent(tp, bma.blkno, bma.length);
4749 
4750 	error = xfs_bmap_btree_to_extents(tp, ip, bma.cur, &bma.logflags,
4751 			whichfork);
4752 	if (error)
4753 		goto out_finish;
4754 
4755 	xfs_bmapi_finish(&bma, whichfork, 0);
4756 	error = xfs_trans_commit(tp);
4757 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
4758 	return error;
4759 
4760 out_finish:
4761 	xfs_bmapi_finish(&bma, whichfork, error);
4762 out_trans_cancel:
4763 	xfs_trans_cancel(tp);
4764 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
4765 	return error;
4766 }
4767 
4768 /*
4769  * Pass in a dellalloc extent and convert it to real extents, return the real
4770  * extent that maps offset_fsb in iomap.
4771  */
4772 int
4773 xfs_bmapi_convert_delalloc(
4774 	struct xfs_inode	*ip,
4775 	int			whichfork,
4776 	loff_t			offset,
4777 	struct iomap		*iomap,
4778 	unsigned int		*seq)
4779 {
4780 	int			error;
4781 
4782 	/*
4783 	 * Attempt to allocate whatever delalloc extent currently backs offset
4784 	 * and put the result into iomap.  Allocate in a loop because it may
4785 	 * take several attempts to allocate real blocks for a contiguous
4786 	 * delalloc extent if free space is sufficiently fragmented.
4787 	 */
4788 	do {
4789 		error = xfs_bmapi_convert_one_delalloc(ip, whichfork, offset,
4790 					iomap, seq);
4791 		if (error)
4792 			return error;
4793 	} while (iomap->offset + iomap->length <= offset);
4794 
4795 	return 0;
4796 }
4797 
4798 int
4799 xfs_bmapi_remap(
4800 	struct xfs_trans	*tp,
4801 	struct xfs_inode	*ip,
4802 	xfs_fileoff_t		bno,
4803 	xfs_filblks_t		len,
4804 	xfs_fsblock_t		startblock,
4805 	uint32_t		flags)
4806 {
4807 	struct xfs_mount	*mp = ip->i_mount;
4808 	struct xfs_ifork	*ifp;
4809 	struct xfs_btree_cur	*cur = NULL;
4810 	struct xfs_bmbt_irec	got;
4811 	struct xfs_iext_cursor	icur;
4812 	int			whichfork = xfs_bmapi_whichfork(flags);
4813 	int			logflags = 0, error;
4814 
4815 	ifp = xfs_ifork_ptr(ip, whichfork);
4816 	ASSERT(len > 0);
4817 	ASSERT(len <= (xfs_filblks_t)XFS_MAX_BMBT_EXTLEN);
4818 	xfs_assert_ilocked(ip, XFS_ILOCK_EXCL);
4819 	ASSERT(!(flags & ~(XFS_BMAPI_ATTRFORK | XFS_BMAPI_PREALLOC |
4820 			   XFS_BMAPI_NORMAP)));
4821 	ASSERT((flags & (XFS_BMAPI_ATTRFORK | XFS_BMAPI_PREALLOC)) !=
4822 			(XFS_BMAPI_ATTRFORK | XFS_BMAPI_PREALLOC));
4823 
4824 	if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)) ||
4825 	    XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) {
4826 		xfs_bmap_mark_sick(ip, whichfork);
4827 		return -EFSCORRUPTED;
4828 	}
4829 
4830 	if (xfs_is_shutdown(mp))
4831 		return -EIO;
4832 
4833 	error = xfs_iread_extents(tp, ip, whichfork);
4834 	if (error)
4835 		return error;
4836 
4837 	if (xfs_iext_lookup_extent(ip, ifp, bno, &icur, &got)) {
4838 		/* make sure we only reflink into a hole. */
4839 		ASSERT(got.br_startoff > bno);
4840 		ASSERT(got.br_startoff - bno >= len);
4841 	}
4842 
4843 	ip->i_nblocks += len;
4844 	ip->i_delayed_blks -= len; /* see xfs_bmap_defer_add */
4845 	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
4846 
4847 	if (ifp->if_format == XFS_DINODE_FMT_BTREE)
4848 		cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
4849 
4850 	got.br_startoff = bno;
4851 	got.br_startblock = startblock;
4852 	got.br_blockcount = len;
4853 	if (flags & XFS_BMAPI_PREALLOC)
4854 		got.br_state = XFS_EXT_UNWRITTEN;
4855 	else
4856 		got.br_state = XFS_EXT_NORM;
4857 
4858 	error = xfs_bmap_add_extent_hole_real(tp, ip, whichfork, &icur,
4859 			&cur, &got, &logflags, flags);
4860 	if (error)
4861 		goto error0;
4862 
4863 	error = xfs_bmap_btree_to_extents(tp, ip, cur, &logflags, whichfork);
4864 
4865 error0:
4866 	if (ip->i_df.if_format != XFS_DINODE_FMT_EXTENTS)
4867 		logflags &= ~XFS_ILOG_DEXT;
4868 	else if (ip->i_df.if_format != XFS_DINODE_FMT_BTREE)
4869 		logflags &= ~XFS_ILOG_DBROOT;
4870 
4871 	if (logflags)
4872 		xfs_trans_log_inode(tp, ip, logflags);
4873 	if (cur)
4874 		xfs_btree_del_cursor(cur, error);
4875 	return error;
4876 }
4877 
4878 /*
4879  * When a delalloc extent is split (e.g., due to a hole punch), the original
4880  * indlen reservation must be shared across the two new extents that are left
4881  * behind.
4882  *
4883  * Given the original reservation and the worst case indlen for the two new
4884  * extents (as calculated by xfs_bmap_worst_indlen()), split the original
4885  * reservation fairly across the two new extents. If necessary, steal available
4886  * blocks from a deleted extent to make up a reservation deficiency (e.g., if
4887  * ores == 1). The number of stolen blocks is returned. The availability and
4888  * subsequent accounting of stolen blocks is the responsibility of the caller.
4889  */
4890 static void
4891 xfs_bmap_split_indlen(
4892 	xfs_filblks_t			ores,		/* original res. */
4893 	xfs_filblks_t			*indlen1,	/* ext1 worst indlen */
4894 	xfs_filblks_t			*indlen2)	/* ext2 worst indlen */
4895 {
4896 	xfs_filblks_t			len1 = *indlen1;
4897 	xfs_filblks_t			len2 = *indlen2;
4898 	xfs_filblks_t			nres = len1 + len2; /* new total res. */
4899 	xfs_filblks_t			resfactor;
4900 
4901 	/*
4902 	 * We can't meet the total required reservation for the two extents.
4903 	 * Calculate the percent of the overall shortage between both extents
4904 	 * and apply this percentage to each of the requested indlen values.
4905 	 * This distributes the shortage fairly and reduces the chances that one
4906 	 * of the two extents is left with nothing when extents are repeatedly
4907 	 * split.
4908 	 */
4909 	resfactor = (ores * 100);
4910 	do_div(resfactor, nres);
4911 	len1 *= resfactor;
4912 	do_div(len1, 100);
4913 	len2 *= resfactor;
4914 	do_div(len2, 100);
4915 	ASSERT(len1 + len2 <= ores);
4916 	ASSERT(len1 < *indlen1 && len2 < *indlen2);
4917 
4918 	/*
4919 	 * Hand out the remainder to each extent. If one of the two reservations
4920 	 * is zero, we want to make sure that one gets a block first. The loop
4921 	 * below starts with len1, so hand len2 a block right off the bat if it
4922 	 * is zero.
4923 	 */
4924 	ores -= (len1 + len2);
4925 	ASSERT((*indlen1 - len1) + (*indlen2 - len2) >= ores);
4926 	if (ores && !len2 && *indlen2) {
4927 		len2++;
4928 		ores--;
4929 	}
4930 	while (ores) {
4931 		if (len1 < *indlen1) {
4932 			len1++;
4933 			ores--;
4934 		}
4935 		if (!ores)
4936 			break;
4937 		if (len2 < *indlen2) {
4938 			len2++;
4939 			ores--;
4940 		}
4941 	}
4942 
4943 	*indlen1 = len1;
4944 	*indlen2 = len2;
4945 }
4946 
4947 void
4948 xfs_bmap_del_extent_delay(
4949 	struct xfs_inode	*ip,
4950 	int			whichfork,
4951 	struct xfs_iext_cursor	*icur,
4952 	struct xfs_bmbt_irec	*got,
4953 	struct xfs_bmbt_irec	*del)
4954 {
4955 	struct xfs_mount	*mp = ip->i_mount;
4956 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
4957 	struct xfs_bmbt_irec	new;
4958 	int64_t			da_old, da_new, da_diff = 0;
4959 	xfs_fileoff_t		del_endoff, got_endoff;
4960 	xfs_filblks_t		got_indlen, new_indlen, stolen = 0;
4961 	uint32_t		state = xfs_bmap_fork_to_state(whichfork);
4962 	uint64_t		fdblocks;
4963 	bool			isrt;
4964 
4965 	XFS_STATS_INC(mp, xs_del_exlist);
4966 
4967 	isrt = xfs_ifork_is_realtime(ip, whichfork);
4968 	del_endoff = del->br_startoff + del->br_blockcount;
4969 	got_endoff = got->br_startoff + got->br_blockcount;
4970 	da_old = startblockval(got->br_startblock);
4971 	da_new = 0;
4972 
4973 	ASSERT(del->br_blockcount > 0);
4974 	ASSERT(got->br_startoff <= del->br_startoff);
4975 	ASSERT(got_endoff >= del_endoff);
4976 
4977 	/*
4978 	 * Update the inode delalloc counter now and wait to update the
4979 	 * sb counters as we might have to borrow some blocks for the
4980 	 * indirect block accounting.
4981 	 */
4982 	xfs_quota_unreserve_blkres(ip, del->br_blockcount);
4983 	ip->i_delayed_blks -= del->br_blockcount;
4984 
4985 	if (got->br_startoff == del->br_startoff)
4986 		state |= BMAP_LEFT_FILLING;
4987 	if (got_endoff == del_endoff)
4988 		state |= BMAP_RIGHT_FILLING;
4989 
4990 	switch (state & (BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING)) {
4991 	case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
4992 		/*
4993 		 * Matches the whole extent.  Delete the entry.
4994 		 */
4995 		xfs_iext_remove(ip, icur, state);
4996 		xfs_iext_prev(ifp, icur);
4997 		break;
4998 	case BMAP_LEFT_FILLING:
4999 		/*
5000 		 * Deleting the first part of the extent.
5001 		 */
5002 		got->br_startoff = del_endoff;
5003 		got->br_blockcount -= del->br_blockcount;
5004 		da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip,
5005 				got->br_blockcount), da_old);
5006 		got->br_startblock = nullstartblock((int)da_new);
5007 		xfs_iext_update_extent(ip, state, icur, got);
5008 		break;
5009 	case BMAP_RIGHT_FILLING:
5010 		/*
5011 		 * Deleting the last part of the extent.
5012 		 */
5013 		got->br_blockcount = got->br_blockcount - del->br_blockcount;
5014 		da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip,
5015 				got->br_blockcount), da_old);
5016 		got->br_startblock = nullstartblock((int)da_new);
5017 		xfs_iext_update_extent(ip, state, icur, got);
5018 		break;
5019 	case 0:
5020 		/*
5021 		 * Deleting the middle of the extent.
5022 		 *
5023 		 * Distribute the original indlen reservation across the two new
5024 		 * extents.  Steal blocks from the deleted extent if necessary.
5025 		 * Stealing blocks simply fudges the fdblocks accounting below.
5026 		 * Warn if either of the new indlen reservations is zero as this
5027 		 * can lead to delalloc problems.
5028 		 */
5029 		got->br_blockcount = del->br_startoff - got->br_startoff;
5030 		got_indlen = xfs_bmap_worst_indlen(ip, got->br_blockcount);
5031 
5032 		new.br_blockcount = got_endoff - del_endoff;
5033 		new_indlen = xfs_bmap_worst_indlen(ip, new.br_blockcount);
5034 
5035 		WARN_ON_ONCE(!got_indlen || !new_indlen);
5036 		/*
5037 		 * Steal as many blocks as we can to try and satisfy the worst
5038 		 * case indlen for both new extents.
5039 		 *
5040 		 * However, we can't just steal reservations from the data
5041 		 * blocks if this is an RT inodes as the data and metadata
5042 		 * blocks come from different pools.  We'll have to live with
5043 		 * under-filled indirect reservation in this case.
5044 		 */
5045 		da_new = got_indlen + new_indlen;
5046 		if (da_new > da_old && !isrt) {
5047 			stolen = XFS_FILBLKS_MIN(da_new - da_old,
5048 						 del->br_blockcount);
5049 			da_old += stolen;
5050 		}
5051 		if (da_new > da_old)
5052 			xfs_bmap_split_indlen(da_old, &got_indlen, &new_indlen);
5053 		da_new = got_indlen + new_indlen;
5054 
5055 		got->br_startblock = nullstartblock((int)got_indlen);
5056 
5057 		new.br_startoff = del_endoff;
5058 		new.br_state = got->br_state;
5059 		new.br_startblock = nullstartblock((int)new_indlen);
5060 
5061 		xfs_iext_update_extent(ip, state, icur, got);
5062 		xfs_iext_next(ifp, icur);
5063 		xfs_iext_insert(ip, icur, &new, state);
5064 
5065 		del->br_blockcount -= stolen;
5066 		break;
5067 	}
5068 
5069 	ASSERT(da_old >= da_new);
5070 	da_diff = da_old - da_new;
5071 	fdblocks = da_diff;
5072 
5073 	if (isrt)
5074 		xfs_add_frextents(mp, xfs_blen_to_rtbxlen(mp, del->br_blockcount));
5075 	else
5076 		fdblocks += del->br_blockcount;
5077 
5078 	xfs_add_fdblocks(mp, fdblocks);
5079 	xfs_mod_delalloc(ip, -(int64_t)del->br_blockcount, -da_diff);
5080 }
5081 
5082 void
5083 xfs_bmap_del_extent_cow(
5084 	struct xfs_inode	*ip,
5085 	struct xfs_iext_cursor	*icur,
5086 	struct xfs_bmbt_irec	*got,
5087 	struct xfs_bmbt_irec	*del)
5088 {
5089 	struct xfs_mount	*mp = ip->i_mount;
5090 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, XFS_COW_FORK);
5091 	struct xfs_bmbt_irec	new;
5092 	xfs_fileoff_t		del_endoff, got_endoff;
5093 	uint32_t		state = BMAP_COWFORK;
5094 
5095 	XFS_STATS_INC(mp, xs_del_exlist);
5096 
5097 	del_endoff = del->br_startoff + del->br_blockcount;
5098 	got_endoff = got->br_startoff + got->br_blockcount;
5099 
5100 	ASSERT(del->br_blockcount > 0);
5101 	ASSERT(got->br_startoff <= del->br_startoff);
5102 	ASSERT(got_endoff >= del_endoff);
5103 	ASSERT(!isnullstartblock(got->br_startblock));
5104 
5105 	if (got->br_startoff == del->br_startoff)
5106 		state |= BMAP_LEFT_FILLING;
5107 	if (got_endoff == del_endoff)
5108 		state |= BMAP_RIGHT_FILLING;
5109 
5110 	switch (state & (BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING)) {
5111 	case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
5112 		/*
5113 		 * Matches the whole extent.  Delete the entry.
5114 		 */
5115 		xfs_iext_remove(ip, icur, state);
5116 		xfs_iext_prev(ifp, icur);
5117 		break;
5118 	case BMAP_LEFT_FILLING:
5119 		/*
5120 		 * Deleting the first part of the extent.
5121 		 */
5122 		got->br_startoff = del_endoff;
5123 		got->br_blockcount -= del->br_blockcount;
5124 		got->br_startblock = del->br_startblock + del->br_blockcount;
5125 		xfs_iext_update_extent(ip, state, icur, got);
5126 		break;
5127 	case BMAP_RIGHT_FILLING:
5128 		/*
5129 		 * Deleting the last part of the extent.
5130 		 */
5131 		got->br_blockcount -= del->br_blockcount;
5132 		xfs_iext_update_extent(ip, state, icur, got);
5133 		break;
5134 	case 0:
5135 		/*
5136 		 * Deleting the middle of the extent.
5137 		 */
5138 		got->br_blockcount = del->br_startoff - got->br_startoff;
5139 
5140 		new.br_startoff = del_endoff;
5141 		new.br_blockcount = got_endoff - del_endoff;
5142 		new.br_state = got->br_state;
5143 		new.br_startblock = del->br_startblock + del->br_blockcount;
5144 
5145 		xfs_iext_update_extent(ip, state, icur, got);
5146 		xfs_iext_next(ifp, icur);
5147 		xfs_iext_insert(ip, icur, &new, state);
5148 		break;
5149 	}
5150 	ip->i_delayed_blks -= del->br_blockcount;
5151 }
5152 
5153 static int
5154 xfs_bmap_free_rtblocks(
5155 	struct xfs_trans	*tp,
5156 	struct xfs_bmbt_irec	*del)
5157 {
5158 	struct xfs_rtgroup	*rtg;
5159 	int			error;
5160 
5161 	rtg = xfs_rtgroup_grab(tp->t_mountp, 0);
5162 	if (!rtg)
5163 		return -EIO;
5164 
5165 	/*
5166 	 * Ensure the bitmap and summary inodes are locked and joined to the
5167 	 * transaction before modifying them.
5168 	 */
5169 	if (!(tp->t_flags & XFS_TRANS_RTBITMAP_LOCKED)) {
5170 		tp->t_flags |= XFS_TRANS_RTBITMAP_LOCKED;
5171 		xfs_rtgroup_lock(rtg, XFS_RTGLOCK_BITMAP);
5172 		xfs_rtgroup_trans_join(tp, rtg, XFS_RTGLOCK_BITMAP);
5173 	}
5174 
5175 	error = xfs_rtfree_blocks(tp, rtg, del->br_startblock,
5176 			del->br_blockcount);
5177 	xfs_rtgroup_rele(rtg);
5178 	return error;
5179 }
5180 
5181 /*
5182  * Called by xfs_bmapi to update file extent records and the btree
5183  * after removing space.
5184  */
5185 STATIC int				/* error */
5186 xfs_bmap_del_extent_real(
5187 	xfs_inode_t		*ip,	/* incore inode pointer */
5188 	xfs_trans_t		*tp,	/* current transaction pointer */
5189 	struct xfs_iext_cursor	*icur,
5190 	struct xfs_btree_cur	*cur,	/* if null, not a btree */
5191 	xfs_bmbt_irec_t		*del,	/* data to remove from extents */
5192 	int			*logflagsp, /* inode logging flags */
5193 	int			whichfork, /* data or attr fork */
5194 	uint32_t		bflags)	/* bmapi flags */
5195 {
5196 	xfs_fsblock_t		del_endblock=0;	/* first block past del */
5197 	xfs_fileoff_t		del_endoff;	/* first offset past del */
5198 	int			error = 0;	/* error return value */
5199 	struct xfs_bmbt_irec	got;	/* current extent entry */
5200 	xfs_fileoff_t		got_endoff;	/* first offset past got */
5201 	int			i;	/* temp state */
5202 	struct xfs_ifork	*ifp;	/* inode fork pointer */
5203 	xfs_mount_t		*mp;	/* mount structure */
5204 	xfs_filblks_t		nblks;	/* quota/sb block count */
5205 	xfs_bmbt_irec_t		new;	/* new record to be inserted */
5206 	/* REFERENCED */
5207 	uint			qfield;	/* quota field to update */
5208 	uint32_t		state = xfs_bmap_fork_to_state(whichfork);
5209 	struct xfs_bmbt_irec	old;
5210 
5211 	*logflagsp = 0;
5212 
5213 	mp = ip->i_mount;
5214 	XFS_STATS_INC(mp, xs_del_exlist);
5215 
5216 	ifp = xfs_ifork_ptr(ip, whichfork);
5217 	ASSERT(del->br_blockcount > 0);
5218 	xfs_iext_get_extent(ifp, icur, &got);
5219 	ASSERT(got.br_startoff <= del->br_startoff);
5220 	del_endoff = del->br_startoff + del->br_blockcount;
5221 	got_endoff = got.br_startoff + got.br_blockcount;
5222 	ASSERT(got_endoff >= del_endoff);
5223 	ASSERT(!isnullstartblock(got.br_startblock));
5224 	qfield = 0;
5225 
5226 	/*
5227 	 * If it's the case where the directory code is running with no block
5228 	 * reservation, and the deleted block is in the middle of its extent,
5229 	 * and the resulting insert of an extent would cause transformation to
5230 	 * btree format, then reject it.  The calling code will then swap blocks
5231 	 * around instead.  We have to do this now, rather than waiting for the
5232 	 * conversion to btree format, since the transaction will be dirty then.
5233 	 */
5234 	if (tp->t_blk_res == 0 &&
5235 	    ifp->if_format == XFS_DINODE_FMT_EXTENTS &&
5236 	    ifp->if_nextents >= XFS_IFORK_MAXEXT(ip, whichfork) &&
5237 	    del->br_startoff > got.br_startoff && del_endoff < got_endoff)
5238 		return -ENOSPC;
5239 
5240 	*logflagsp = XFS_ILOG_CORE;
5241 	if (xfs_ifork_is_realtime(ip, whichfork))
5242 		qfield = XFS_TRANS_DQ_RTBCOUNT;
5243 	else
5244 		qfield = XFS_TRANS_DQ_BCOUNT;
5245 	nblks = del->br_blockcount;
5246 
5247 	del_endblock = del->br_startblock + del->br_blockcount;
5248 	if (cur) {
5249 		error = xfs_bmbt_lookup_eq(cur, &got, &i);
5250 		if (error)
5251 			return error;
5252 		if (XFS_IS_CORRUPT(mp, i != 1)) {
5253 			xfs_btree_mark_sick(cur);
5254 			return -EFSCORRUPTED;
5255 		}
5256 	}
5257 
5258 	if (got.br_startoff == del->br_startoff)
5259 		state |= BMAP_LEFT_FILLING;
5260 	if (got_endoff == del_endoff)
5261 		state |= BMAP_RIGHT_FILLING;
5262 
5263 	switch (state & (BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING)) {
5264 	case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
5265 		/*
5266 		 * Matches the whole extent.  Delete the entry.
5267 		 */
5268 		xfs_iext_remove(ip, icur, state);
5269 		xfs_iext_prev(ifp, icur);
5270 		ifp->if_nextents--;
5271 
5272 		*logflagsp |= XFS_ILOG_CORE;
5273 		if (!cur) {
5274 			*logflagsp |= xfs_ilog_fext(whichfork);
5275 			break;
5276 		}
5277 		if ((error = xfs_btree_delete(cur, &i)))
5278 			return error;
5279 		if (XFS_IS_CORRUPT(mp, i != 1)) {
5280 			xfs_btree_mark_sick(cur);
5281 			return -EFSCORRUPTED;
5282 		}
5283 		break;
5284 	case BMAP_LEFT_FILLING:
5285 		/*
5286 		 * Deleting the first part of the extent.
5287 		 */
5288 		got.br_startoff = del_endoff;
5289 		got.br_startblock = del_endblock;
5290 		got.br_blockcount -= del->br_blockcount;
5291 		xfs_iext_update_extent(ip, state, icur, &got);
5292 		if (!cur) {
5293 			*logflagsp |= xfs_ilog_fext(whichfork);
5294 			break;
5295 		}
5296 		error = xfs_bmbt_update(cur, &got);
5297 		if (error)
5298 			return error;
5299 		break;
5300 	case BMAP_RIGHT_FILLING:
5301 		/*
5302 		 * Deleting the last part of the extent.
5303 		 */
5304 		got.br_blockcount -= del->br_blockcount;
5305 		xfs_iext_update_extent(ip, state, icur, &got);
5306 		if (!cur) {
5307 			*logflagsp |= xfs_ilog_fext(whichfork);
5308 			break;
5309 		}
5310 		error = xfs_bmbt_update(cur, &got);
5311 		if (error)
5312 			return error;
5313 		break;
5314 	case 0:
5315 		/*
5316 		 * Deleting the middle of the extent.
5317 		 */
5318 
5319 		old = got;
5320 
5321 		got.br_blockcount = del->br_startoff - got.br_startoff;
5322 		xfs_iext_update_extent(ip, state, icur, &got);
5323 
5324 		new.br_startoff = del_endoff;
5325 		new.br_blockcount = got_endoff - del_endoff;
5326 		new.br_state = got.br_state;
5327 		new.br_startblock = del_endblock;
5328 
5329 		*logflagsp |= XFS_ILOG_CORE;
5330 		if (cur) {
5331 			error = xfs_bmbt_update(cur, &got);
5332 			if (error)
5333 				return error;
5334 			error = xfs_btree_increment(cur, 0, &i);
5335 			if (error)
5336 				return error;
5337 			cur->bc_rec.b = new;
5338 			error = xfs_btree_insert(cur, &i);
5339 			if (error && error != -ENOSPC)
5340 				return error;
5341 			/*
5342 			 * If get no-space back from btree insert, it tried a
5343 			 * split, and we have a zero block reservation.  Fix up
5344 			 * our state and return the error.
5345 			 */
5346 			if (error == -ENOSPC) {
5347 				/*
5348 				 * Reset the cursor, don't trust it after any
5349 				 * insert operation.
5350 				 */
5351 				error = xfs_bmbt_lookup_eq(cur, &got, &i);
5352 				if (error)
5353 					return error;
5354 				if (XFS_IS_CORRUPT(mp, i != 1)) {
5355 					xfs_btree_mark_sick(cur);
5356 					return -EFSCORRUPTED;
5357 				}
5358 				/*
5359 				 * Update the btree record back
5360 				 * to the original value.
5361 				 */
5362 				error = xfs_bmbt_update(cur, &old);
5363 				if (error)
5364 					return error;
5365 				/*
5366 				 * Reset the extent record back
5367 				 * to the original value.
5368 				 */
5369 				xfs_iext_update_extent(ip, state, icur, &old);
5370 				*logflagsp = 0;
5371 				return -ENOSPC;
5372 			}
5373 			if (XFS_IS_CORRUPT(mp, i != 1)) {
5374 				xfs_btree_mark_sick(cur);
5375 				return -EFSCORRUPTED;
5376 			}
5377 		} else
5378 			*logflagsp |= xfs_ilog_fext(whichfork);
5379 
5380 		ifp->if_nextents++;
5381 		xfs_iext_next(ifp, icur);
5382 		xfs_iext_insert(ip, icur, &new, state);
5383 		break;
5384 	}
5385 
5386 	/* remove reverse mapping */
5387 	xfs_rmap_unmap_extent(tp, ip, whichfork, del);
5388 
5389 	/*
5390 	 * If we need to, add to list of extents to delete.
5391 	 */
5392 	if (!(bflags & XFS_BMAPI_REMAP)) {
5393 		bool	isrt = xfs_ifork_is_realtime(ip, whichfork);
5394 
5395 		if (xfs_is_reflink_inode(ip) && whichfork == XFS_DATA_FORK) {
5396 			xfs_refcount_decrease_extent(tp, del);
5397 		} else if (isrt && !xfs_has_rtgroups(mp)) {
5398 			error = xfs_bmap_free_rtblocks(tp, del);
5399 		} else {
5400 			unsigned int	efi_flags = 0;
5401 
5402 			if ((bflags & XFS_BMAPI_NODISCARD) ||
5403 			    del->br_state == XFS_EXT_UNWRITTEN)
5404 				efi_flags |= XFS_FREE_EXTENT_SKIP_DISCARD;
5405 
5406 			/*
5407 			 * Historically, we did not use EFIs to free realtime
5408 			 * extents.  However, when reverse mapping is enabled,
5409 			 * we must maintain the same order of operations as the
5410 			 * data device, which is: Remove the file mapping,
5411 			 * remove the reverse mapping, and then free the
5412 			 * blocks.  Reflink for realtime volumes requires the
5413 			 * same sort of ordering.  Both features rely on
5414 			 * rtgroups, so let's gate rt EFI usage on rtgroups.
5415 			 */
5416 			if (isrt)
5417 				efi_flags |= XFS_FREE_EXTENT_REALTIME;
5418 
5419 			error = xfs_free_extent_later(tp, del->br_startblock,
5420 					del->br_blockcount, NULL,
5421 					XFS_AG_RESV_NONE, efi_flags);
5422 		}
5423 		if (error)
5424 			return error;
5425 	}
5426 
5427 	/*
5428 	 * Adjust inode # blocks in the file.
5429 	 */
5430 	if (nblks)
5431 		ip->i_nblocks -= nblks;
5432 	/*
5433 	 * Adjust quota data.
5434 	 */
5435 	if (qfield && !(bflags & XFS_BMAPI_REMAP))
5436 		xfs_trans_mod_dquot_byino(tp, ip, qfield, (long)-nblks);
5437 
5438 	return 0;
5439 }
5440 
5441 /*
5442  * Unmap (remove) blocks from a file.
5443  * If nexts is nonzero then the number of extents to remove is limited to
5444  * that value.  If not all extents in the block range can be removed then
5445  * *done is set.
5446  */
5447 static int
5448 __xfs_bunmapi(
5449 	struct xfs_trans	*tp,		/* transaction pointer */
5450 	struct xfs_inode	*ip,		/* incore inode */
5451 	xfs_fileoff_t		start,		/* first file offset deleted */
5452 	xfs_filblks_t		*rlen,		/* i/o: amount remaining */
5453 	uint32_t		flags,		/* misc flags */
5454 	xfs_extnum_t		nexts)		/* number of extents max */
5455 {
5456 	struct xfs_btree_cur	*cur;		/* bmap btree cursor */
5457 	struct xfs_bmbt_irec	del;		/* extent being deleted */
5458 	int			error;		/* error return value */
5459 	xfs_extnum_t		extno;		/* extent number in list */
5460 	struct xfs_bmbt_irec	got;		/* current extent record */
5461 	struct xfs_ifork	*ifp;		/* inode fork pointer */
5462 	int			isrt;		/* freeing in rt area */
5463 	int			logflags;	/* transaction logging flags */
5464 	xfs_extlen_t		mod;		/* rt extent offset */
5465 	struct xfs_mount	*mp = ip->i_mount;
5466 	int			tmp_logflags;	/* partial logging flags */
5467 	int			wasdel;		/* was a delayed alloc extent */
5468 	int			whichfork;	/* data or attribute fork */
5469 	xfs_filblks_t		len = *rlen;	/* length to unmap in file */
5470 	xfs_fileoff_t		end;
5471 	struct xfs_iext_cursor	icur;
5472 	bool			done = false;
5473 
5474 	trace_xfs_bunmap(ip, start, len, flags, _RET_IP_);
5475 
5476 	whichfork = xfs_bmapi_whichfork(flags);
5477 	ASSERT(whichfork != XFS_COW_FORK);
5478 	ifp = xfs_ifork_ptr(ip, whichfork);
5479 	if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp))) {
5480 		xfs_bmap_mark_sick(ip, whichfork);
5481 		return -EFSCORRUPTED;
5482 	}
5483 	if (xfs_is_shutdown(mp))
5484 		return -EIO;
5485 
5486 	xfs_assert_ilocked(ip, XFS_ILOCK_EXCL);
5487 	ASSERT(len > 0);
5488 	ASSERT(nexts >= 0);
5489 
5490 	error = xfs_iread_extents(tp, ip, whichfork);
5491 	if (error)
5492 		return error;
5493 
5494 	if (xfs_iext_count(ifp) == 0) {
5495 		*rlen = 0;
5496 		return 0;
5497 	}
5498 	XFS_STATS_INC(mp, xs_blk_unmap);
5499 	isrt = xfs_ifork_is_realtime(ip, whichfork);
5500 	end = start + len;
5501 
5502 	if (!xfs_iext_lookup_extent_before(ip, ifp, &end, &icur, &got)) {
5503 		*rlen = 0;
5504 		return 0;
5505 	}
5506 	end--;
5507 
5508 	logflags = 0;
5509 	if (ifp->if_format == XFS_DINODE_FMT_BTREE) {
5510 		ASSERT(ifp->if_format == XFS_DINODE_FMT_BTREE);
5511 		cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
5512 	} else
5513 		cur = NULL;
5514 
5515 	extno = 0;
5516 	while (end != (xfs_fileoff_t)-1 && end >= start &&
5517 	       (nexts == 0 || extno < nexts)) {
5518 		/*
5519 		 * Is the found extent after a hole in which end lives?
5520 		 * Just back up to the previous extent, if so.
5521 		 */
5522 		if (got.br_startoff > end &&
5523 		    !xfs_iext_prev_extent(ifp, &icur, &got)) {
5524 			done = true;
5525 			break;
5526 		}
5527 		/*
5528 		 * Is the last block of this extent before the range
5529 		 * we're supposed to delete?  If so, we're done.
5530 		 */
5531 		end = XFS_FILEOFF_MIN(end,
5532 			got.br_startoff + got.br_blockcount - 1);
5533 		if (end < start)
5534 			break;
5535 		/*
5536 		 * Then deal with the (possibly delayed) allocated space
5537 		 * we found.
5538 		 */
5539 		del = got;
5540 		wasdel = isnullstartblock(del.br_startblock);
5541 
5542 		if (got.br_startoff < start) {
5543 			del.br_startoff = start;
5544 			del.br_blockcount -= start - got.br_startoff;
5545 			if (!wasdel)
5546 				del.br_startblock += start - got.br_startoff;
5547 		}
5548 		if (del.br_startoff + del.br_blockcount > end + 1)
5549 			del.br_blockcount = end + 1 - del.br_startoff;
5550 
5551 		if (!isrt || (flags & XFS_BMAPI_REMAP))
5552 			goto delete;
5553 
5554 		mod = xfs_rtb_to_rtxoff(mp,
5555 				del.br_startblock + del.br_blockcount);
5556 		if (mod) {
5557 			/*
5558 			 * Realtime extent not lined up at the end.
5559 			 * The extent could have been split into written
5560 			 * and unwritten pieces, or we could just be
5561 			 * unmapping part of it.  But we can't really
5562 			 * get rid of part of a realtime extent.
5563 			 */
5564 			if (del.br_state == XFS_EXT_UNWRITTEN) {
5565 				/*
5566 				 * This piece is unwritten, or we're not
5567 				 * using unwritten extents.  Skip over it.
5568 				 */
5569 				ASSERT((flags & XFS_BMAPI_REMAP) || end >= mod);
5570 				end -= mod > del.br_blockcount ?
5571 					del.br_blockcount : mod;
5572 				if (end < got.br_startoff &&
5573 				    !xfs_iext_prev_extent(ifp, &icur, &got)) {
5574 					done = true;
5575 					break;
5576 				}
5577 				continue;
5578 			}
5579 			/*
5580 			 * It's written, turn it unwritten.
5581 			 * This is better than zeroing it.
5582 			 */
5583 			ASSERT(del.br_state == XFS_EXT_NORM);
5584 			ASSERT(tp->t_blk_res > 0);
5585 			/*
5586 			 * If this spans a realtime extent boundary,
5587 			 * chop it back to the start of the one we end at.
5588 			 */
5589 			if (del.br_blockcount > mod) {
5590 				del.br_startoff += del.br_blockcount - mod;
5591 				del.br_startblock += del.br_blockcount - mod;
5592 				del.br_blockcount = mod;
5593 			}
5594 			del.br_state = XFS_EXT_UNWRITTEN;
5595 			error = xfs_bmap_add_extent_unwritten_real(tp, ip,
5596 					whichfork, &icur, &cur, &del,
5597 					&logflags);
5598 			if (error)
5599 				goto error0;
5600 			goto nodelete;
5601 		}
5602 
5603 		mod = xfs_rtb_to_rtxoff(mp, del.br_startblock);
5604 		if (mod) {
5605 			xfs_extlen_t off = mp->m_sb.sb_rextsize - mod;
5606 
5607 			/*
5608 			 * Realtime extent is lined up at the end but not
5609 			 * at the front.  We'll get rid of full extents if
5610 			 * we can.
5611 			 */
5612 			if (del.br_blockcount > off) {
5613 				del.br_blockcount -= off;
5614 				del.br_startoff += off;
5615 				del.br_startblock += off;
5616 			} else if (del.br_startoff == start &&
5617 				   (del.br_state == XFS_EXT_UNWRITTEN ||
5618 				    tp->t_blk_res == 0)) {
5619 				/*
5620 				 * Can't make it unwritten.  There isn't
5621 				 * a full extent here so just skip it.
5622 				 */
5623 				ASSERT(end >= del.br_blockcount);
5624 				end -= del.br_blockcount;
5625 				if (got.br_startoff > end &&
5626 				    !xfs_iext_prev_extent(ifp, &icur, &got)) {
5627 					done = true;
5628 					break;
5629 				}
5630 				continue;
5631 			} else if (del.br_state == XFS_EXT_UNWRITTEN) {
5632 				struct xfs_bmbt_irec	prev;
5633 				xfs_fileoff_t		unwrite_start;
5634 
5635 				/*
5636 				 * This one is already unwritten.
5637 				 * It must have a written left neighbor.
5638 				 * Unwrite the killed part of that one and
5639 				 * try again.
5640 				 */
5641 				if (!xfs_iext_prev_extent(ifp, &icur, &prev))
5642 					ASSERT(0);
5643 				ASSERT(prev.br_state == XFS_EXT_NORM);
5644 				ASSERT(!isnullstartblock(prev.br_startblock));
5645 				ASSERT(del.br_startblock ==
5646 				       prev.br_startblock + prev.br_blockcount);
5647 				unwrite_start = max3(start,
5648 						     del.br_startoff - mod,
5649 						     prev.br_startoff);
5650 				mod = unwrite_start - prev.br_startoff;
5651 				prev.br_startoff = unwrite_start;
5652 				prev.br_startblock += mod;
5653 				prev.br_blockcount -= mod;
5654 				prev.br_state = XFS_EXT_UNWRITTEN;
5655 				error = xfs_bmap_add_extent_unwritten_real(tp,
5656 						ip, whichfork, &icur, &cur,
5657 						&prev, &logflags);
5658 				if (error)
5659 					goto error0;
5660 				goto nodelete;
5661 			} else {
5662 				ASSERT(del.br_state == XFS_EXT_NORM);
5663 				del.br_state = XFS_EXT_UNWRITTEN;
5664 				error = xfs_bmap_add_extent_unwritten_real(tp,
5665 						ip, whichfork, &icur, &cur,
5666 						&del, &logflags);
5667 				if (error)
5668 					goto error0;
5669 				goto nodelete;
5670 			}
5671 		}
5672 
5673 delete:
5674 		if (wasdel) {
5675 			xfs_bmap_del_extent_delay(ip, whichfork, &icur, &got, &del);
5676 		} else {
5677 			error = xfs_bmap_del_extent_real(ip, tp, &icur, cur,
5678 					&del, &tmp_logflags, whichfork,
5679 					flags);
5680 			logflags |= tmp_logflags;
5681 			if (error)
5682 				goto error0;
5683 		}
5684 
5685 		end = del.br_startoff - 1;
5686 nodelete:
5687 		/*
5688 		 * If not done go on to the next (previous) record.
5689 		 */
5690 		if (end != (xfs_fileoff_t)-1 && end >= start) {
5691 			if (!xfs_iext_get_extent(ifp, &icur, &got) ||
5692 			    (got.br_startoff > end &&
5693 			     !xfs_iext_prev_extent(ifp, &icur, &got))) {
5694 				done = true;
5695 				break;
5696 			}
5697 			extno++;
5698 		}
5699 	}
5700 	if (done || end == (xfs_fileoff_t)-1 || end < start)
5701 		*rlen = 0;
5702 	else
5703 		*rlen = end - start + 1;
5704 
5705 	/*
5706 	 * Convert to a btree if necessary.
5707 	 */
5708 	if (xfs_bmap_needs_btree(ip, whichfork)) {
5709 		ASSERT(cur == NULL);
5710 		error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0,
5711 				&tmp_logflags, whichfork);
5712 		logflags |= tmp_logflags;
5713 	} else {
5714 		error = xfs_bmap_btree_to_extents(tp, ip, cur, &logflags,
5715 			whichfork);
5716 	}
5717 
5718 error0:
5719 	/*
5720 	 * Log everything.  Do this after conversion, there's no point in
5721 	 * logging the extent records if we've converted to btree format.
5722 	 */
5723 	if ((logflags & xfs_ilog_fext(whichfork)) &&
5724 	    ifp->if_format != XFS_DINODE_FMT_EXTENTS)
5725 		logflags &= ~xfs_ilog_fext(whichfork);
5726 	else if ((logflags & xfs_ilog_fbroot(whichfork)) &&
5727 		 ifp->if_format != XFS_DINODE_FMT_BTREE)
5728 		logflags &= ~xfs_ilog_fbroot(whichfork);
5729 	/*
5730 	 * Log inode even in the error case, if the transaction
5731 	 * is dirty we'll need to shut down the filesystem.
5732 	 */
5733 	if (logflags)
5734 		xfs_trans_log_inode(tp, ip, logflags);
5735 	if (cur) {
5736 		if (!error)
5737 			cur->bc_bmap.allocated = 0;
5738 		xfs_btree_del_cursor(cur, error);
5739 	}
5740 	return error;
5741 }
5742 
5743 /* Unmap a range of a file. */
5744 int
5745 xfs_bunmapi(
5746 	xfs_trans_t		*tp,
5747 	struct xfs_inode	*ip,
5748 	xfs_fileoff_t		bno,
5749 	xfs_filblks_t		len,
5750 	uint32_t		flags,
5751 	xfs_extnum_t		nexts,
5752 	int			*done)
5753 {
5754 	int			error;
5755 
5756 	error = __xfs_bunmapi(tp, ip, bno, &len, flags, nexts);
5757 	*done = (len == 0);
5758 	return error;
5759 }
5760 
5761 /*
5762  * Determine whether an extent shift can be accomplished by a merge with the
5763  * extent that precedes the target hole of the shift.
5764  */
5765 STATIC bool
5766 xfs_bmse_can_merge(
5767 	struct xfs_inode	*ip,
5768 	int			whichfork,
5769 	struct xfs_bmbt_irec	*left,	/* preceding extent */
5770 	struct xfs_bmbt_irec	*got,	/* current extent to shift */
5771 	xfs_fileoff_t		shift)	/* shift fsb */
5772 {
5773 	xfs_fileoff_t		startoff;
5774 
5775 	startoff = got->br_startoff - shift;
5776 
5777 	/*
5778 	 * The extent, once shifted, must be adjacent in-file and on-disk with
5779 	 * the preceding extent.
5780 	 */
5781 	if ((left->br_startoff + left->br_blockcount != startoff) ||
5782 	    (left->br_startblock + left->br_blockcount != got->br_startblock) ||
5783 	    (left->br_state != got->br_state) ||
5784 	    (left->br_blockcount + got->br_blockcount > XFS_MAX_BMBT_EXTLEN) ||
5785 	    !xfs_bmap_same_rtgroup(ip, whichfork, left, got))
5786 		return false;
5787 
5788 	return true;
5789 }
5790 
5791 /*
5792  * A bmap extent shift adjusts the file offset of an extent to fill a preceding
5793  * hole in the file. If an extent shift would result in the extent being fully
5794  * adjacent to the extent that currently precedes the hole, we can merge with
5795  * the preceding extent rather than do the shift.
5796  *
5797  * This function assumes the caller has verified a shift-by-merge is possible
5798  * with the provided extents via xfs_bmse_can_merge().
5799  */
5800 STATIC int
5801 xfs_bmse_merge(
5802 	struct xfs_trans		*tp,
5803 	struct xfs_inode		*ip,
5804 	int				whichfork,
5805 	xfs_fileoff_t			shift,		/* shift fsb */
5806 	struct xfs_iext_cursor		*icur,
5807 	struct xfs_bmbt_irec		*got,		/* extent to shift */
5808 	struct xfs_bmbt_irec		*left,		/* preceding extent */
5809 	struct xfs_btree_cur		*cur,
5810 	int				*logflags)	/* output */
5811 {
5812 	struct xfs_ifork		*ifp = xfs_ifork_ptr(ip, whichfork);
5813 	struct xfs_bmbt_irec		new;
5814 	xfs_filblks_t			blockcount;
5815 	int				error, i;
5816 	struct xfs_mount		*mp = ip->i_mount;
5817 
5818 	blockcount = left->br_blockcount + got->br_blockcount;
5819 
5820 	xfs_assert_ilocked(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL);
5821 	ASSERT(xfs_bmse_can_merge(ip, whichfork, left, got, shift));
5822 
5823 	new = *left;
5824 	new.br_blockcount = blockcount;
5825 
5826 	/*
5827 	 * Update the on-disk extent count, the btree if necessary and log the
5828 	 * inode.
5829 	 */
5830 	ifp->if_nextents--;
5831 	*logflags |= XFS_ILOG_CORE;
5832 	if (!cur) {
5833 		*logflags |= XFS_ILOG_DEXT;
5834 		goto done;
5835 	}
5836 
5837 	/* lookup and remove the extent to merge */
5838 	error = xfs_bmbt_lookup_eq(cur, got, &i);
5839 	if (error)
5840 		return error;
5841 	if (XFS_IS_CORRUPT(mp, i != 1)) {
5842 		xfs_btree_mark_sick(cur);
5843 		return -EFSCORRUPTED;
5844 	}
5845 
5846 	error = xfs_btree_delete(cur, &i);
5847 	if (error)
5848 		return error;
5849 	if (XFS_IS_CORRUPT(mp, i != 1)) {
5850 		xfs_btree_mark_sick(cur);
5851 		return -EFSCORRUPTED;
5852 	}
5853 
5854 	/* lookup and update size of the previous extent */
5855 	error = xfs_bmbt_lookup_eq(cur, left, &i);
5856 	if (error)
5857 		return error;
5858 	if (XFS_IS_CORRUPT(mp, i != 1)) {
5859 		xfs_btree_mark_sick(cur);
5860 		return -EFSCORRUPTED;
5861 	}
5862 
5863 	error = xfs_bmbt_update(cur, &new);
5864 	if (error)
5865 		return error;
5866 
5867 	/* change to extent format if required after extent removal */
5868 	error = xfs_bmap_btree_to_extents(tp, ip, cur, logflags, whichfork);
5869 	if (error)
5870 		return error;
5871 
5872 done:
5873 	xfs_iext_remove(ip, icur, 0);
5874 	xfs_iext_prev(ifp, icur);
5875 	xfs_iext_update_extent(ip, xfs_bmap_fork_to_state(whichfork), icur,
5876 			&new);
5877 
5878 	/* update reverse mapping. rmap functions merge the rmaps for us */
5879 	xfs_rmap_unmap_extent(tp, ip, whichfork, got);
5880 	memcpy(&new, got, sizeof(new));
5881 	new.br_startoff = left->br_startoff + left->br_blockcount;
5882 	xfs_rmap_map_extent(tp, ip, whichfork, &new);
5883 	return 0;
5884 }
5885 
5886 static int
5887 xfs_bmap_shift_update_extent(
5888 	struct xfs_trans	*tp,
5889 	struct xfs_inode	*ip,
5890 	int			whichfork,
5891 	struct xfs_iext_cursor	*icur,
5892 	struct xfs_bmbt_irec	*got,
5893 	struct xfs_btree_cur	*cur,
5894 	int			*logflags,
5895 	xfs_fileoff_t		startoff)
5896 {
5897 	struct xfs_mount	*mp = ip->i_mount;
5898 	struct xfs_bmbt_irec	prev = *got;
5899 	int			error, i;
5900 
5901 	*logflags |= XFS_ILOG_CORE;
5902 
5903 	got->br_startoff = startoff;
5904 
5905 	if (cur) {
5906 		error = xfs_bmbt_lookup_eq(cur, &prev, &i);
5907 		if (error)
5908 			return error;
5909 		if (XFS_IS_CORRUPT(mp, i != 1)) {
5910 			xfs_btree_mark_sick(cur);
5911 			return -EFSCORRUPTED;
5912 		}
5913 
5914 		error = xfs_bmbt_update(cur, got);
5915 		if (error)
5916 			return error;
5917 	} else {
5918 		*logflags |= XFS_ILOG_DEXT;
5919 	}
5920 
5921 	xfs_iext_update_extent(ip, xfs_bmap_fork_to_state(whichfork), icur,
5922 			got);
5923 
5924 	/* update reverse mapping */
5925 	xfs_rmap_unmap_extent(tp, ip, whichfork, &prev);
5926 	xfs_rmap_map_extent(tp, ip, whichfork, got);
5927 	return 0;
5928 }
5929 
5930 int
5931 xfs_bmap_collapse_extents(
5932 	struct xfs_trans	*tp,
5933 	struct xfs_inode	*ip,
5934 	xfs_fileoff_t		*next_fsb,
5935 	xfs_fileoff_t		offset_shift_fsb,
5936 	bool			*done)
5937 {
5938 	int			whichfork = XFS_DATA_FORK;
5939 	struct xfs_mount	*mp = ip->i_mount;
5940 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
5941 	struct xfs_btree_cur	*cur = NULL;
5942 	struct xfs_bmbt_irec	got, prev;
5943 	struct xfs_iext_cursor	icur;
5944 	xfs_fileoff_t		new_startoff;
5945 	int			error = 0;
5946 	int			logflags = 0;
5947 
5948 	if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)) ||
5949 	    XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) {
5950 		xfs_bmap_mark_sick(ip, whichfork);
5951 		return -EFSCORRUPTED;
5952 	}
5953 
5954 	if (xfs_is_shutdown(mp))
5955 		return -EIO;
5956 
5957 	xfs_assert_ilocked(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL);
5958 
5959 	error = xfs_iread_extents(tp, ip, whichfork);
5960 	if (error)
5961 		return error;
5962 
5963 	if (ifp->if_format == XFS_DINODE_FMT_BTREE)
5964 		cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
5965 
5966 	if (!xfs_iext_lookup_extent(ip, ifp, *next_fsb, &icur, &got)) {
5967 		*done = true;
5968 		goto del_cursor;
5969 	}
5970 	if (XFS_IS_CORRUPT(mp, isnullstartblock(got.br_startblock))) {
5971 		xfs_bmap_mark_sick(ip, whichfork);
5972 		error = -EFSCORRUPTED;
5973 		goto del_cursor;
5974 	}
5975 
5976 	new_startoff = got.br_startoff - offset_shift_fsb;
5977 	if (xfs_iext_peek_prev_extent(ifp, &icur, &prev)) {
5978 		if (new_startoff < prev.br_startoff + prev.br_blockcount) {
5979 			error = -EINVAL;
5980 			goto del_cursor;
5981 		}
5982 
5983 		if (xfs_bmse_can_merge(ip, whichfork, &prev, &got,
5984 				offset_shift_fsb)) {
5985 			error = xfs_bmse_merge(tp, ip, whichfork,
5986 					offset_shift_fsb, &icur, &got, &prev,
5987 					cur, &logflags);
5988 			if (error)
5989 				goto del_cursor;
5990 			goto done;
5991 		}
5992 	} else {
5993 		if (got.br_startoff < offset_shift_fsb) {
5994 			error = -EINVAL;
5995 			goto del_cursor;
5996 		}
5997 	}
5998 
5999 	error = xfs_bmap_shift_update_extent(tp, ip, whichfork, &icur, &got,
6000 			cur, &logflags, new_startoff);
6001 	if (error)
6002 		goto del_cursor;
6003 
6004 done:
6005 	if (!xfs_iext_next_extent(ifp, &icur, &got)) {
6006 		*done = true;
6007 		goto del_cursor;
6008 	}
6009 
6010 	*next_fsb = got.br_startoff;
6011 del_cursor:
6012 	if (cur)
6013 		xfs_btree_del_cursor(cur, error);
6014 	if (logflags)
6015 		xfs_trans_log_inode(tp, ip, logflags);
6016 	return error;
6017 }
6018 
6019 /* Make sure we won't be right-shifting an extent past the maximum bound. */
6020 int
6021 xfs_bmap_can_insert_extents(
6022 	struct xfs_inode	*ip,
6023 	xfs_fileoff_t		off,
6024 	xfs_fileoff_t		shift)
6025 {
6026 	struct xfs_bmbt_irec	got;
6027 	int			is_empty;
6028 	int			error = 0;
6029 
6030 	xfs_assert_ilocked(ip, XFS_IOLOCK_EXCL);
6031 
6032 	if (xfs_is_shutdown(ip->i_mount))
6033 		return -EIO;
6034 
6035 	xfs_ilock(ip, XFS_ILOCK_EXCL);
6036 	error = xfs_bmap_last_extent(NULL, ip, XFS_DATA_FORK, &got, &is_empty);
6037 	if (!error && !is_empty && got.br_startoff >= off &&
6038 	    ((got.br_startoff + shift) & BMBT_STARTOFF_MASK) < got.br_startoff)
6039 		error = -EINVAL;
6040 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
6041 
6042 	return error;
6043 }
6044 
6045 int
6046 xfs_bmap_insert_extents(
6047 	struct xfs_trans	*tp,
6048 	struct xfs_inode	*ip,
6049 	xfs_fileoff_t		*next_fsb,
6050 	xfs_fileoff_t		offset_shift_fsb,
6051 	bool			*done,
6052 	xfs_fileoff_t		stop_fsb)
6053 {
6054 	int			whichfork = XFS_DATA_FORK;
6055 	struct xfs_mount	*mp = ip->i_mount;
6056 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
6057 	struct xfs_btree_cur	*cur = NULL;
6058 	struct xfs_bmbt_irec	got, next;
6059 	struct xfs_iext_cursor	icur;
6060 	xfs_fileoff_t		new_startoff;
6061 	int			error = 0;
6062 	int			logflags = 0;
6063 
6064 	if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)) ||
6065 	    XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) {
6066 		xfs_bmap_mark_sick(ip, whichfork);
6067 		return -EFSCORRUPTED;
6068 	}
6069 
6070 	if (xfs_is_shutdown(mp))
6071 		return -EIO;
6072 
6073 	xfs_assert_ilocked(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL);
6074 
6075 	error = xfs_iread_extents(tp, ip, whichfork);
6076 	if (error)
6077 		return error;
6078 
6079 	if (ifp->if_format == XFS_DINODE_FMT_BTREE)
6080 		cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
6081 
6082 	if (*next_fsb == NULLFSBLOCK) {
6083 		xfs_iext_last(ifp, &icur);
6084 		if (!xfs_iext_get_extent(ifp, &icur, &got) ||
6085 		    stop_fsb > got.br_startoff) {
6086 			*done = true;
6087 			goto del_cursor;
6088 		}
6089 	} else {
6090 		if (!xfs_iext_lookup_extent(ip, ifp, *next_fsb, &icur, &got)) {
6091 			*done = true;
6092 			goto del_cursor;
6093 		}
6094 	}
6095 	if (XFS_IS_CORRUPT(mp, isnullstartblock(got.br_startblock))) {
6096 		xfs_bmap_mark_sick(ip, whichfork);
6097 		error = -EFSCORRUPTED;
6098 		goto del_cursor;
6099 	}
6100 
6101 	if (XFS_IS_CORRUPT(mp, stop_fsb > got.br_startoff)) {
6102 		xfs_bmap_mark_sick(ip, whichfork);
6103 		error = -EFSCORRUPTED;
6104 		goto del_cursor;
6105 	}
6106 
6107 	new_startoff = got.br_startoff + offset_shift_fsb;
6108 	if (xfs_iext_peek_next_extent(ifp, &icur, &next)) {
6109 		if (new_startoff + got.br_blockcount > next.br_startoff) {
6110 			error = -EINVAL;
6111 			goto del_cursor;
6112 		}
6113 
6114 		/*
6115 		 * Unlike a left shift (which involves a hole punch), a right
6116 		 * shift does not modify extent neighbors in any way.  We should
6117 		 * never find mergeable extents in this scenario.  Check anyways
6118 		 * and warn if we encounter two extents that could be one.
6119 		 */
6120 		if (xfs_bmse_can_merge(ip, whichfork, &got, &next,
6121 				offset_shift_fsb))
6122 			WARN_ON_ONCE(1);
6123 	}
6124 
6125 	error = xfs_bmap_shift_update_extent(tp, ip, whichfork, &icur, &got,
6126 			cur, &logflags, new_startoff);
6127 	if (error)
6128 		goto del_cursor;
6129 
6130 	if (!xfs_iext_prev_extent(ifp, &icur, &got) ||
6131 	    stop_fsb >= got.br_startoff + got.br_blockcount) {
6132 		*done = true;
6133 		goto del_cursor;
6134 	}
6135 
6136 	*next_fsb = got.br_startoff;
6137 del_cursor:
6138 	if (cur)
6139 		xfs_btree_del_cursor(cur, error);
6140 	if (logflags)
6141 		xfs_trans_log_inode(tp, ip, logflags);
6142 	return error;
6143 }
6144 
6145 /*
6146  * Splits an extent into two extents at split_fsb block such that it is the
6147  * first block of the current_ext. @ext is a target extent to be split.
6148  * @split_fsb is a block where the extents is split.  If split_fsb lies in a
6149  * hole or the first block of extents, just return 0.
6150  */
6151 int
6152 xfs_bmap_split_extent(
6153 	struct xfs_trans	*tp,
6154 	struct xfs_inode	*ip,
6155 	xfs_fileoff_t		split_fsb)
6156 {
6157 	int				whichfork = XFS_DATA_FORK;
6158 	struct xfs_ifork		*ifp = xfs_ifork_ptr(ip, whichfork);
6159 	struct xfs_btree_cur		*cur = NULL;
6160 	struct xfs_bmbt_irec		got;
6161 	struct xfs_bmbt_irec		new; /* split extent */
6162 	struct xfs_mount		*mp = ip->i_mount;
6163 	xfs_fsblock_t			gotblkcnt; /* new block count for got */
6164 	struct xfs_iext_cursor		icur;
6165 	int				error = 0;
6166 	int				logflags = 0;
6167 	int				i = 0;
6168 
6169 	if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)) ||
6170 	    XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) {
6171 		xfs_bmap_mark_sick(ip, whichfork);
6172 		return -EFSCORRUPTED;
6173 	}
6174 
6175 	if (xfs_is_shutdown(mp))
6176 		return -EIO;
6177 
6178 	/* Read in all the extents */
6179 	error = xfs_iread_extents(tp, ip, whichfork);
6180 	if (error)
6181 		return error;
6182 
6183 	/*
6184 	 * If there are not extents, or split_fsb lies in a hole we are done.
6185 	 */
6186 	if (!xfs_iext_lookup_extent(ip, ifp, split_fsb, &icur, &got) ||
6187 	    got.br_startoff >= split_fsb)
6188 		return 0;
6189 
6190 	gotblkcnt = split_fsb - got.br_startoff;
6191 	new.br_startoff = split_fsb;
6192 	new.br_startblock = got.br_startblock + gotblkcnt;
6193 	new.br_blockcount = got.br_blockcount - gotblkcnt;
6194 	new.br_state = got.br_state;
6195 
6196 	if (ifp->if_format == XFS_DINODE_FMT_BTREE) {
6197 		cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
6198 		error = xfs_bmbt_lookup_eq(cur, &got, &i);
6199 		if (error)
6200 			goto del_cursor;
6201 		if (XFS_IS_CORRUPT(mp, i != 1)) {
6202 			xfs_btree_mark_sick(cur);
6203 			error = -EFSCORRUPTED;
6204 			goto del_cursor;
6205 		}
6206 	}
6207 
6208 	got.br_blockcount = gotblkcnt;
6209 	xfs_iext_update_extent(ip, xfs_bmap_fork_to_state(whichfork), &icur,
6210 			&got);
6211 
6212 	logflags = XFS_ILOG_CORE;
6213 	if (cur) {
6214 		error = xfs_bmbt_update(cur, &got);
6215 		if (error)
6216 			goto del_cursor;
6217 	} else
6218 		logflags |= XFS_ILOG_DEXT;
6219 
6220 	/* Add new extent */
6221 	xfs_iext_next(ifp, &icur);
6222 	xfs_iext_insert(ip, &icur, &new, 0);
6223 	ifp->if_nextents++;
6224 
6225 	if (cur) {
6226 		error = xfs_bmbt_lookup_eq(cur, &new, &i);
6227 		if (error)
6228 			goto del_cursor;
6229 		if (XFS_IS_CORRUPT(mp, i != 0)) {
6230 			xfs_btree_mark_sick(cur);
6231 			error = -EFSCORRUPTED;
6232 			goto del_cursor;
6233 		}
6234 		error = xfs_btree_insert(cur, &i);
6235 		if (error)
6236 			goto del_cursor;
6237 		if (XFS_IS_CORRUPT(mp, i != 1)) {
6238 			xfs_btree_mark_sick(cur);
6239 			error = -EFSCORRUPTED;
6240 			goto del_cursor;
6241 		}
6242 	}
6243 
6244 	/*
6245 	 * Convert to a btree if necessary.
6246 	 */
6247 	if (xfs_bmap_needs_btree(ip, whichfork)) {
6248 		int tmp_logflags; /* partial log flag return val */
6249 
6250 		ASSERT(cur == NULL);
6251 		error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0,
6252 				&tmp_logflags, whichfork);
6253 		logflags |= tmp_logflags;
6254 	}
6255 
6256 del_cursor:
6257 	if (cur) {
6258 		cur->bc_bmap.allocated = 0;
6259 		xfs_btree_del_cursor(cur, error);
6260 	}
6261 
6262 	if (logflags)
6263 		xfs_trans_log_inode(tp, ip, logflags);
6264 	return error;
6265 }
6266 
6267 /* Record a bmap intent. */
6268 static inline void
6269 __xfs_bmap_add(
6270 	struct xfs_trans		*tp,
6271 	enum xfs_bmap_intent_type	type,
6272 	struct xfs_inode		*ip,
6273 	int				whichfork,
6274 	struct xfs_bmbt_irec		*bmap)
6275 {
6276 	struct xfs_bmap_intent		*bi;
6277 
6278 	if ((whichfork != XFS_DATA_FORK && whichfork != XFS_ATTR_FORK) ||
6279 	    bmap->br_startblock == HOLESTARTBLOCK ||
6280 	    bmap->br_startblock == DELAYSTARTBLOCK)
6281 		return;
6282 
6283 	bi = kmem_cache_alloc(xfs_bmap_intent_cache, GFP_KERNEL | __GFP_NOFAIL);
6284 	INIT_LIST_HEAD(&bi->bi_list);
6285 	bi->bi_type = type;
6286 	bi->bi_owner = ip;
6287 	bi->bi_whichfork = whichfork;
6288 	bi->bi_bmap = *bmap;
6289 
6290 	xfs_bmap_defer_add(tp, bi);
6291 }
6292 
6293 /* Map an extent into a file. */
6294 void
6295 xfs_bmap_map_extent(
6296 	struct xfs_trans	*tp,
6297 	struct xfs_inode	*ip,
6298 	int			whichfork,
6299 	struct xfs_bmbt_irec	*PREV)
6300 {
6301 	__xfs_bmap_add(tp, XFS_BMAP_MAP, ip, whichfork, PREV);
6302 }
6303 
6304 /* Unmap an extent out of a file. */
6305 void
6306 xfs_bmap_unmap_extent(
6307 	struct xfs_trans	*tp,
6308 	struct xfs_inode	*ip,
6309 	int			whichfork,
6310 	struct xfs_bmbt_irec	*PREV)
6311 {
6312 	__xfs_bmap_add(tp, XFS_BMAP_UNMAP, ip, whichfork, PREV);
6313 }
6314 
6315 /*
6316  * Process one of the deferred bmap operations.  We pass back the
6317  * btree cursor to maintain our lock on the bmapbt between calls.
6318  */
6319 int
6320 xfs_bmap_finish_one(
6321 	struct xfs_trans		*tp,
6322 	struct xfs_bmap_intent		*bi)
6323 {
6324 	struct xfs_bmbt_irec		*bmap = &bi->bi_bmap;
6325 	int				error = 0;
6326 	int				flags = 0;
6327 
6328 	if (bi->bi_whichfork == XFS_ATTR_FORK)
6329 		flags |= XFS_BMAPI_ATTRFORK;
6330 
6331 	ASSERT(tp->t_highest_agno == NULLAGNUMBER);
6332 
6333 	trace_xfs_bmap_deferred(bi);
6334 
6335 	if (XFS_TEST_ERROR(false, tp->t_mountp, XFS_ERRTAG_BMAP_FINISH_ONE))
6336 		return -EIO;
6337 
6338 	switch (bi->bi_type) {
6339 	case XFS_BMAP_MAP:
6340 		if (bi->bi_bmap.br_state == XFS_EXT_UNWRITTEN)
6341 			flags |= XFS_BMAPI_PREALLOC;
6342 		error = xfs_bmapi_remap(tp, bi->bi_owner, bmap->br_startoff,
6343 				bmap->br_blockcount, bmap->br_startblock,
6344 				flags);
6345 		bmap->br_blockcount = 0;
6346 		break;
6347 	case XFS_BMAP_UNMAP:
6348 		error = __xfs_bunmapi(tp, bi->bi_owner, bmap->br_startoff,
6349 				&bmap->br_blockcount, flags | XFS_BMAPI_REMAP,
6350 				1);
6351 		break;
6352 	default:
6353 		ASSERT(0);
6354 		xfs_bmap_mark_sick(bi->bi_owner, bi->bi_whichfork);
6355 		error = -EFSCORRUPTED;
6356 	}
6357 
6358 	return error;
6359 }
6360 
6361 /* Check that an extent does not have invalid flags or bad ranges. */
6362 xfs_failaddr_t
6363 xfs_bmap_validate_extent_raw(
6364 	struct xfs_mount	*mp,
6365 	bool			rtfile,
6366 	int			whichfork,
6367 	struct xfs_bmbt_irec	*irec)
6368 {
6369 	if (!xfs_verify_fileext(mp, irec->br_startoff, irec->br_blockcount))
6370 		return __this_address;
6371 
6372 	if (rtfile && whichfork == XFS_DATA_FORK) {
6373 		if (!xfs_verify_rtbext(mp, irec->br_startblock,
6374 					   irec->br_blockcount))
6375 			return __this_address;
6376 	} else {
6377 		if (!xfs_verify_fsbext(mp, irec->br_startblock,
6378 					   irec->br_blockcount))
6379 			return __this_address;
6380 	}
6381 	if (irec->br_state != XFS_EXT_NORM && whichfork != XFS_DATA_FORK)
6382 		return __this_address;
6383 	return NULL;
6384 }
6385 
6386 int __init
6387 xfs_bmap_intent_init_cache(void)
6388 {
6389 	xfs_bmap_intent_cache = kmem_cache_create("xfs_bmap_intent",
6390 			sizeof(struct xfs_bmap_intent),
6391 			0, 0, NULL);
6392 
6393 	return xfs_bmap_intent_cache != NULL ? 0 : -ENOMEM;
6394 }
6395 
6396 void
6397 xfs_bmap_intent_destroy_cache(void)
6398 {
6399 	kmem_cache_destroy(xfs_bmap_intent_cache);
6400 	xfs_bmap_intent_cache = NULL;
6401 }
6402 
6403 /* Check that an inode's extent does not have invalid flags or bad ranges. */
6404 xfs_failaddr_t
6405 xfs_bmap_validate_extent(
6406 	struct xfs_inode	*ip,
6407 	int			whichfork,
6408 	struct xfs_bmbt_irec	*irec)
6409 {
6410 	return xfs_bmap_validate_extent_raw(ip->i_mount,
6411 			XFS_IS_REALTIME_INODE(ip), whichfork, irec);
6412 }
6413 
6414 /*
6415  * Used in xfs_itruncate_extents().  This is the maximum number of extents
6416  * freed from a file in a single transaction.
6417  */
6418 #define	XFS_ITRUNC_MAX_EXTENTS	2
6419 
6420 /*
6421  * Unmap every extent in part of an inode's fork.  We don't do any higher level
6422  * invalidation work at all.
6423  */
6424 int
6425 xfs_bunmapi_range(
6426 	struct xfs_trans	**tpp,
6427 	struct xfs_inode	*ip,
6428 	uint32_t		flags,
6429 	xfs_fileoff_t		startoff,
6430 	xfs_fileoff_t		endoff)
6431 {
6432 	xfs_filblks_t		unmap_len = endoff - startoff + 1;
6433 	int			error = 0;
6434 
6435 	xfs_assert_ilocked(ip, XFS_ILOCK_EXCL);
6436 
6437 	while (unmap_len > 0) {
6438 		ASSERT((*tpp)->t_highest_agno == NULLAGNUMBER);
6439 		error = __xfs_bunmapi(*tpp, ip, startoff, &unmap_len, flags,
6440 				XFS_ITRUNC_MAX_EXTENTS);
6441 		if (error)
6442 			goto out;
6443 
6444 		/* free the just unmapped extents */
6445 		error = xfs_defer_finish(tpp);
6446 		if (error)
6447 			goto out;
6448 		cond_resched();
6449 	}
6450 out:
6451 	return error;
6452 }
6453 
6454 struct xfs_bmap_query_range {
6455 	xfs_bmap_query_range_fn	fn;
6456 	void			*priv;
6457 };
6458 
6459 /* Format btree record and pass to our callback. */
6460 STATIC int
6461 xfs_bmap_query_range_helper(
6462 	struct xfs_btree_cur		*cur,
6463 	const union xfs_btree_rec	*rec,
6464 	void				*priv)
6465 {
6466 	struct xfs_bmap_query_range	*query = priv;
6467 	struct xfs_bmbt_irec		irec;
6468 	xfs_failaddr_t			fa;
6469 
6470 	xfs_bmbt_disk_get_all(&rec->bmbt, &irec);
6471 	fa = xfs_bmap_validate_extent(cur->bc_ino.ip, cur->bc_ino.whichfork,
6472 			&irec);
6473 	if (fa) {
6474 		xfs_btree_mark_sick(cur);
6475 		return xfs_bmap_complain_bad_rec(cur->bc_ino.ip,
6476 				cur->bc_ino.whichfork, fa, &irec);
6477 	}
6478 
6479 	return query->fn(cur, &irec, query->priv);
6480 }
6481 
6482 /* Find all bmaps. */
6483 int
6484 xfs_bmap_query_all(
6485 	struct xfs_btree_cur		*cur,
6486 	xfs_bmap_query_range_fn		fn,
6487 	void				*priv)
6488 {
6489 	struct xfs_bmap_query_range	query = {
6490 		.priv			= priv,
6491 		.fn			= fn,
6492 	};
6493 
6494 	return xfs_btree_query_all(cur, xfs_bmap_query_range_helper, &query);
6495 }
6496 
6497 /* Helper function to extract extent size hint from inode */
6498 xfs_extlen_t
6499 xfs_get_extsz_hint(
6500 	struct xfs_inode	*ip)
6501 {
6502 	/*
6503 	 * No point in aligning allocations if we need to COW to actually
6504 	 * write to them.
6505 	 */
6506 	if (xfs_is_always_cow_inode(ip))
6507 		return 0;
6508 	if ((ip->i_diflags & XFS_DIFLAG_EXTSIZE) && ip->i_extsize)
6509 		return ip->i_extsize;
6510 	if (XFS_IS_REALTIME_INODE(ip) &&
6511 	    ip->i_mount->m_sb.sb_rextsize > 1)
6512 		return ip->i_mount->m_sb.sb_rextsize;
6513 	return 0;
6514 }
6515 
6516 /*
6517  * Helper function to extract CoW extent size hint from inode.
6518  * Between the extent size hint and the CoW extent size hint, we
6519  * return the greater of the two.  If the value is zero (automatic),
6520  * use the default size.
6521  */
6522 xfs_extlen_t
6523 xfs_get_cowextsz_hint(
6524 	struct xfs_inode	*ip)
6525 {
6526 	xfs_extlen_t		a, b;
6527 
6528 	a = 0;
6529 	if (ip->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE)
6530 		a = ip->i_cowextsize;
6531 	b = xfs_get_extsz_hint(ip);
6532 
6533 	a = max(a, b);
6534 	if (a == 0)
6535 		return XFS_DEFAULT_COWEXTSZ_HINT;
6536 	return a;
6537 }
6538