xref: /linux/fs/xfs/libxfs/xfs_bmap.c (revision 505d66d1abfb90853e24ab6cbdf83b611473d6fc)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2000-2006 Silicon Graphics, Inc.
4  * All Rights Reserved.
5  */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_bit.h"
13 #include "xfs_sb.h"
14 #include "xfs_mount.h"
15 #include "xfs_defer.h"
16 #include "xfs_dir2.h"
17 #include "xfs_inode.h"
18 #include "xfs_btree.h"
19 #include "xfs_trans.h"
20 #include "xfs_alloc.h"
21 #include "xfs_bmap.h"
22 #include "xfs_bmap_util.h"
23 #include "xfs_bmap_btree.h"
24 #include "xfs_rtbitmap.h"
25 #include "xfs_errortag.h"
26 #include "xfs_error.h"
27 #include "xfs_quota.h"
28 #include "xfs_trans_space.h"
29 #include "xfs_buf_item.h"
30 #include "xfs_trace.h"
31 #include "xfs_attr_leaf.h"
32 #include "xfs_filestream.h"
33 #include "xfs_rmap.h"
34 #include "xfs_ag.h"
35 #include "xfs_ag_resv.h"
36 #include "xfs_refcount.h"
37 #include "xfs_icache.h"
38 #include "xfs_iomap.h"
39 #include "xfs_health.h"
40 #include "xfs_bmap_item.h"
41 #include "xfs_symlink_remote.h"
42 
43 struct kmem_cache		*xfs_bmap_intent_cache;
44 
45 /*
46  * Miscellaneous helper functions
47  */
48 
49 /*
50  * Compute and fill in the value of the maximum depth of a bmap btree
51  * in this filesystem.  Done once, during mount.
52  */
53 void
54 xfs_bmap_compute_maxlevels(
55 	xfs_mount_t	*mp,		/* file system mount structure */
56 	int		whichfork)	/* data or attr fork */
57 {
58 	uint64_t	maxblocks;	/* max blocks at this level */
59 	xfs_extnum_t	maxleafents;	/* max leaf entries possible */
60 	int		level;		/* btree level */
61 	int		maxrootrecs;	/* max records in root block */
62 	int		minleafrecs;	/* min records in leaf block */
63 	int		minnoderecs;	/* min records in node block */
64 	int		sz;		/* root block size */
65 
66 	/*
67 	 * The maximum number of extents in a fork, hence the maximum number of
68 	 * leaf entries, is controlled by the size of the on-disk extent count.
69 	 *
70 	 * Note that we can no longer assume that if we are in ATTR1 that the
71 	 * fork offset of all the inodes will be
72 	 * (xfs_default_attroffset(ip) >> 3) because we could have mounted with
73 	 * ATTR2 and then mounted back with ATTR1, keeping the i_forkoff's fixed
74 	 * but probably at various positions. Therefore, for both ATTR1 and
75 	 * ATTR2 we have to assume the worst case scenario of a minimum size
76 	 * available.
77 	 */
78 	maxleafents = xfs_iext_max_nextents(xfs_has_large_extent_counts(mp),
79 				whichfork);
80 	if (whichfork == XFS_DATA_FORK)
81 		sz = XFS_BMDR_SPACE_CALC(MINDBTPTRS);
82 	else
83 		sz = XFS_BMDR_SPACE_CALC(MINABTPTRS);
84 
85 	maxrootrecs = xfs_bmdr_maxrecs(sz, 0);
86 	minleafrecs = mp->m_bmap_dmnr[0];
87 	minnoderecs = mp->m_bmap_dmnr[1];
88 	maxblocks = howmany_64(maxleafents, minleafrecs);
89 	for (level = 1; maxblocks > 1; level++) {
90 		if (maxblocks <= maxrootrecs)
91 			maxblocks = 1;
92 		else
93 			maxblocks = howmany_64(maxblocks, minnoderecs);
94 	}
95 	mp->m_bm_maxlevels[whichfork] = level;
96 	ASSERT(mp->m_bm_maxlevels[whichfork] <= xfs_bmbt_maxlevels_ondisk());
97 }
98 
99 unsigned int
100 xfs_bmap_compute_attr_offset(
101 	struct xfs_mount	*mp)
102 {
103 	if (mp->m_sb.sb_inodesize == 256)
104 		return XFS_LITINO(mp) - XFS_BMDR_SPACE_CALC(MINABTPTRS);
105 	return XFS_BMDR_SPACE_CALC(6 * MINABTPTRS);
106 }
107 
108 STATIC int				/* error */
109 xfs_bmbt_lookup_eq(
110 	struct xfs_btree_cur	*cur,
111 	struct xfs_bmbt_irec	*irec,
112 	int			*stat)	/* success/failure */
113 {
114 	cur->bc_rec.b = *irec;
115 	return xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat);
116 }
117 
118 STATIC int				/* error */
119 xfs_bmbt_lookup_first(
120 	struct xfs_btree_cur	*cur,
121 	int			*stat)	/* success/failure */
122 {
123 	cur->bc_rec.b.br_startoff = 0;
124 	cur->bc_rec.b.br_startblock = 0;
125 	cur->bc_rec.b.br_blockcount = 0;
126 	return xfs_btree_lookup(cur, XFS_LOOKUP_GE, stat);
127 }
128 
129 /*
130  * Check if the inode needs to be converted to btree format.
131  */
132 static inline bool xfs_bmap_needs_btree(struct xfs_inode *ip, int whichfork)
133 {
134 	struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
135 
136 	return whichfork != XFS_COW_FORK &&
137 		ifp->if_format == XFS_DINODE_FMT_EXTENTS &&
138 		ifp->if_nextents > XFS_IFORK_MAXEXT(ip, whichfork);
139 }
140 
141 /*
142  * Check if the inode should be converted to extent format.
143  */
144 static inline bool xfs_bmap_wants_extents(struct xfs_inode *ip, int whichfork)
145 {
146 	struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
147 
148 	return whichfork != XFS_COW_FORK &&
149 		ifp->if_format == XFS_DINODE_FMT_BTREE &&
150 		ifp->if_nextents <= XFS_IFORK_MAXEXT(ip, whichfork);
151 }
152 
153 /*
154  * Update the record referred to by cur to the value given by irec
155  * This either works (return 0) or gets an EFSCORRUPTED error.
156  */
157 STATIC int
158 xfs_bmbt_update(
159 	struct xfs_btree_cur	*cur,
160 	struct xfs_bmbt_irec	*irec)
161 {
162 	union xfs_btree_rec	rec;
163 
164 	xfs_bmbt_disk_set_all(&rec.bmbt, irec);
165 	return xfs_btree_update(cur, &rec);
166 }
167 
168 /*
169  * Compute the worst-case number of indirect blocks that will be used
170  * for ip's delayed extent of length "len".
171  */
172 STATIC xfs_filblks_t
173 xfs_bmap_worst_indlen(
174 	xfs_inode_t	*ip,		/* incore inode pointer */
175 	xfs_filblks_t	len)		/* delayed extent length */
176 {
177 	int		level;		/* btree level number */
178 	int		maxrecs;	/* maximum record count at this level */
179 	xfs_mount_t	*mp;		/* mount structure */
180 	xfs_filblks_t	rval;		/* return value */
181 
182 	mp = ip->i_mount;
183 	maxrecs = mp->m_bmap_dmxr[0];
184 	for (level = 0, rval = 0;
185 	     level < XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK);
186 	     level++) {
187 		len += maxrecs - 1;
188 		do_div(len, maxrecs);
189 		rval += len;
190 		if (len == 1)
191 			return rval + XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) -
192 				level - 1;
193 		if (level == 0)
194 			maxrecs = mp->m_bmap_dmxr[1];
195 	}
196 	return rval;
197 }
198 
199 /*
200  * Calculate the default attribute fork offset for newly created inodes.
201  */
202 uint
203 xfs_default_attroffset(
204 	struct xfs_inode	*ip)
205 {
206 	if (ip->i_df.if_format == XFS_DINODE_FMT_DEV)
207 		return roundup(sizeof(xfs_dev_t), 8);
208 	return M_IGEO(ip->i_mount)->attr_fork_offset;
209 }
210 
211 /*
212  * Helper routine to reset inode i_forkoff field when switching attribute fork
213  * from local to extent format - we reset it where possible to make space
214  * available for inline data fork extents.
215  */
216 STATIC void
217 xfs_bmap_forkoff_reset(
218 	xfs_inode_t	*ip,
219 	int		whichfork)
220 {
221 	if (whichfork == XFS_ATTR_FORK &&
222 	    ip->i_df.if_format != XFS_DINODE_FMT_DEV &&
223 	    ip->i_df.if_format != XFS_DINODE_FMT_BTREE) {
224 		uint	dfl_forkoff = xfs_default_attroffset(ip) >> 3;
225 
226 		if (dfl_forkoff > ip->i_forkoff)
227 			ip->i_forkoff = dfl_forkoff;
228 	}
229 }
230 
231 static int
232 xfs_bmap_read_buf(
233 	struct xfs_mount	*mp,		/* file system mount point */
234 	struct xfs_trans	*tp,		/* transaction pointer */
235 	xfs_fsblock_t		fsbno,		/* file system block number */
236 	struct xfs_buf		**bpp)		/* buffer for fsbno */
237 {
238 	struct xfs_buf		*bp;		/* return value */
239 	int			error;
240 
241 	if (!xfs_verify_fsbno(mp, fsbno))
242 		return -EFSCORRUPTED;
243 	error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp,
244 			XFS_FSB_TO_DADDR(mp, fsbno), mp->m_bsize, 0, &bp,
245 			&xfs_bmbt_buf_ops);
246 	if (!error) {
247 		xfs_buf_set_ref(bp, XFS_BMAP_BTREE_REF);
248 		*bpp = bp;
249 	}
250 	return error;
251 }
252 
253 #ifdef DEBUG
254 STATIC struct xfs_buf *
255 xfs_bmap_get_bp(
256 	struct xfs_btree_cur	*cur,
257 	xfs_fsblock_t		bno)
258 {
259 	struct xfs_log_item	*lip;
260 	int			i;
261 
262 	if (!cur)
263 		return NULL;
264 
265 	for (i = 0; i < cur->bc_maxlevels; i++) {
266 		if (!cur->bc_levels[i].bp)
267 			break;
268 		if (xfs_buf_daddr(cur->bc_levels[i].bp) == bno)
269 			return cur->bc_levels[i].bp;
270 	}
271 
272 	/* Chase down all the log items to see if the bp is there */
273 	list_for_each_entry(lip, &cur->bc_tp->t_items, li_trans) {
274 		struct xfs_buf_log_item	*bip = (struct xfs_buf_log_item *)lip;
275 
276 		if (bip->bli_item.li_type == XFS_LI_BUF &&
277 		    xfs_buf_daddr(bip->bli_buf) == bno)
278 			return bip->bli_buf;
279 	}
280 
281 	return NULL;
282 }
283 
284 STATIC void
285 xfs_check_block(
286 	struct xfs_btree_block	*block,
287 	xfs_mount_t		*mp,
288 	int			root,
289 	short			sz)
290 {
291 	int			i, j, dmxr;
292 	__be64			*pp, *thispa;	/* pointer to block address */
293 	xfs_bmbt_key_t		*prevp, *keyp;
294 
295 	ASSERT(be16_to_cpu(block->bb_level) > 0);
296 
297 	prevp = NULL;
298 	for( i = 1; i <= xfs_btree_get_numrecs(block); i++) {
299 		dmxr = mp->m_bmap_dmxr[0];
300 		keyp = XFS_BMBT_KEY_ADDR(mp, block, i);
301 
302 		if (prevp) {
303 			ASSERT(be64_to_cpu(prevp->br_startoff) <
304 			       be64_to_cpu(keyp->br_startoff));
305 		}
306 		prevp = keyp;
307 
308 		/*
309 		 * Compare the block numbers to see if there are dups.
310 		 */
311 		if (root)
312 			pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, i, sz);
313 		else
314 			pp = XFS_BMBT_PTR_ADDR(mp, block, i, dmxr);
315 
316 		for (j = i+1; j <= be16_to_cpu(block->bb_numrecs); j++) {
317 			if (root)
318 				thispa = XFS_BMAP_BROOT_PTR_ADDR(mp, block, j, sz);
319 			else
320 				thispa = XFS_BMBT_PTR_ADDR(mp, block, j, dmxr);
321 			if (*thispa == *pp) {
322 				xfs_warn(mp, "%s: thispa(%d) == pp(%d) %lld",
323 					__func__, j, i,
324 					(unsigned long long)be64_to_cpu(*thispa));
325 				xfs_err(mp, "%s: ptrs are equal in node\n",
326 					__func__);
327 				xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
328 			}
329 		}
330 	}
331 }
332 
333 /*
334  * Check that the extents for the inode ip are in the right order in all
335  * btree leaves. THis becomes prohibitively expensive for large extent count
336  * files, so don't bother with inodes that have more than 10,000 extents in
337  * them. The btree record ordering checks will still be done, so for such large
338  * bmapbt constructs that is going to catch most corruptions.
339  */
340 STATIC void
341 xfs_bmap_check_leaf_extents(
342 	struct xfs_btree_cur	*cur,	/* btree cursor or null */
343 	xfs_inode_t		*ip,		/* incore inode pointer */
344 	int			whichfork)	/* data or attr fork */
345 {
346 	struct xfs_mount	*mp = ip->i_mount;
347 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
348 	struct xfs_btree_block	*block;	/* current btree block */
349 	xfs_fsblock_t		bno;	/* block # of "block" */
350 	struct xfs_buf		*bp;	/* buffer for "block" */
351 	int			error;	/* error return value */
352 	xfs_extnum_t		i=0, j;	/* index into the extents list */
353 	int			level;	/* btree level, for checking */
354 	__be64			*pp;	/* pointer to block address */
355 	xfs_bmbt_rec_t		*ep;	/* pointer to current extent */
356 	xfs_bmbt_rec_t		last = {0, 0}; /* last extent in prev block */
357 	xfs_bmbt_rec_t		*nextp;	/* pointer to next extent */
358 	int			bp_release = 0;
359 
360 	if (ifp->if_format != XFS_DINODE_FMT_BTREE)
361 		return;
362 
363 	/* skip large extent count inodes */
364 	if (ip->i_df.if_nextents > 10000)
365 		return;
366 
367 	bno = NULLFSBLOCK;
368 	block = ifp->if_broot;
369 	/*
370 	 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
371 	 */
372 	level = be16_to_cpu(block->bb_level);
373 	ASSERT(level > 0);
374 	xfs_check_block(block, mp, 1, ifp->if_broot_bytes);
375 	pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes);
376 	bno = be64_to_cpu(*pp);
377 
378 	ASSERT(bno != NULLFSBLOCK);
379 	ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount);
380 	ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks);
381 
382 	/*
383 	 * Go down the tree until leaf level is reached, following the first
384 	 * pointer (leftmost) at each level.
385 	 */
386 	while (level-- > 0) {
387 		/* See if buf is in cur first */
388 		bp_release = 0;
389 		bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno));
390 		if (!bp) {
391 			bp_release = 1;
392 			error = xfs_bmap_read_buf(mp, NULL, bno, &bp);
393 			if (xfs_metadata_is_sick(error))
394 				xfs_btree_mark_sick(cur);
395 			if (error)
396 				goto error_norelse;
397 		}
398 		block = XFS_BUF_TO_BLOCK(bp);
399 		if (level == 0)
400 			break;
401 
402 		/*
403 		 * Check this block for basic sanity (increasing keys and
404 		 * no duplicate blocks).
405 		 */
406 
407 		xfs_check_block(block, mp, 0, 0);
408 		pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
409 		bno = be64_to_cpu(*pp);
410 		if (XFS_IS_CORRUPT(mp, !xfs_verify_fsbno(mp, bno))) {
411 			xfs_btree_mark_sick(cur);
412 			error = -EFSCORRUPTED;
413 			goto error0;
414 		}
415 		if (bp_release) {
416 			bp_release = 0;
417 			xfs_trans_brelse(NULL, bp);
418 		}
419 	}
420 
421 	/*
422 	 * Here with bp and block set to the leftmost leaf node in the tree.
423 	 */
424 	i = 0;
425 
426 	/*
427 	 * Loop over all leaf nodes checking that all extents are in the right order.
428 	 */
429 	for (;;) {
430 		xfs_fsblock_t	nextbno;
431 		xfs_extnum_t	num_recs;
432 
433 
434 		num_recs = xfs_btree_get_numrecs(block);
435 
436 		/*
437 		 * Read-ahead the next leaf block, if any.
438 		 */
439 
440 		nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
441 
442 		/*
443 		 * Check all the extents to make sure they are OK.
444 		 * If we had a previous block, the last entry should
445 		 * conform with the first entry in this one.
446 		 */
447 
448 		ep = XFS_BMBT_REC_ADDR(mp, block, 1);
449 		if (i) {
450 			ASSERT(xfs_bmbt_disk_get_startoff(&last) +
451 			       xfs_bmbt_disk_get_blockcount(&last) <=
452 			       xfs_bmbt_disk_get_startoff(ep));
453 		}
454 		for (j = 1; j < num_recs; j++) {
455 			nextp = XFS_BMBT_REC_ADDR(mp, block, j + 1);
456 			ASSERT(xfs_bmbt_disk_get_startoff(ep) +
457 			       xfs_bmbt_disk_get_blockcount(ep) <=
458 			       xfs_bmbt_disk_get_startoff(nextp));
459 			ep = nextp;
460 		}
461 
462 		last = *ep;
463 		i += num_recs;
464 		if (bp_release) {
465 			bp_release = 0;
466 			xfs_trans_brelse(NULL, bp);
467 		}
468 		bno = nextbno;
469 		/*
470 		 * If we've reached the end, stop.
471 		 */
472 		if (bno == NULLFSBLOCK)
473 			break;
474 
475 		bp_release = 0;
476 		bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno));
477 		if (!bp) {
478 			bp_release = 1;
479 			error = xfs_bmap_read_buf(mp, NULL, bno, &bp);
480 			if (xfs_metadata_is_sick(error))
481 				xfs_btree_mark_sick(cur);
482 			if (error)
483 				goto error_norelse;
484 		}
485 		block = XFS_BUF_TO_BLOCK(bp);
486 	}
487 
488 	return;
489 
490 error0:
491 	xfs_warn(mp, "%s: at error0", __func__);
492 	if (bp_release)
493 		xfs_trans_brelse(NULL, bp);
494 error_norelse:
495 	xfs_warn(mp, "%s: BAD after btree leaves for %llu extents",
496 		__func__, i);
497 	xfs_err(mp, "%s: CORRUPTED BTREE OR SOMETHING", __func__);
498 	xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
499 	return;
500 }
501 
502 /*
503  * Validate that the bmbt_irecs being returned from bmapi are valid
504  * given the caller's original parameters.  Specifically check the
505  * ranges of the returned irecs to ensure that they only extend beyond
506  * the given parameters if the XFS_BMAPI_ENTIRE flag was set.
507  */
508 STATIC void
509 xfs_bmap_validate_ret(
510 	xfs_fileoff_t		bno,
511 	xfs_filblks_t		len,
512 	uint32_t		flags,
513 	xfs_bmbt_irec_t		*mval,
514 	int			nmap,
515 	int			ret_nmap)
516 {
517 	int			i;		/* index to map values */
518 
519 	ASSERT(ret_nmap <= nmap);
520 
521 	for (i = 0; i < ret_nmap; i++) {
522 		ASSERT(mval[i].br_blockcount > 0);
523 		if (!(flags & XFS_BMAPI_ENTIRE)) {
524 			ASSERT(mval[i].br_startoff >= bno);
525 			ASSERT(mval[i].br_blockcount <= len);
526 			ASSERT(mval[i].br_startoff + mval[i].br_blockcount <=
527 			       bno + len);
528 		} else {
529 			ASSERT(mval[i].br_startoff < bno + len);
530 			ASSERT(mval[i].br_startoff + mval[i].br_blockcount >
531 			       bno);
532 		}
533 		ASSERT(i == 0 ||
534 		       mval[i - 1].br_startoff + mval[i - 1].br_blockcount ==
535 		       mval[i].br_startoff);
536 		ASSERT(mval[i].br_startblock != DELAYSTARTBLOCK &&
537 		       mval[i].br_startblock != HOLESTARTBLOCK);
538 		ASSERT(mval[i].br_state == XFS_EXT_NORM ||
539 		       mval[i].br_state == XFS_EXT_UNWRITTEN);
540 	}
541 }
542 
543 #else
544 #define xfs_bmap_check_leaf_extents(cur, ip, whichfork)		do { } while (0)
545 #define	xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)	do { } while (0)
546 #endif /* DEBUG */
547 
548 /*
549  * Inode fork format manipulation functions
550  */
551 
552 /*
553  * Convert the inode format to extent format if it currently is in btree format,
554  * but the extent list is small enough that it fits into the extent format.
555  *
556  * Since the extents are already in-core, all we have to do is give up the space
557  * for the btree root and pitch the leaf block.
558  */
559 STATIC int				/* error */
560 xfs_bmap_btree_to_extents(
561 	struct xfs_trans	*tp,	/* transaction pointer */
562 	struct xfs_inode	*ip,	/* incore inode pointer */
563 	struct xfs_btree_cur	*cur,	/* btree cursor */
564 	int			*logflagsp, /* inode logging flags */
565 	int			whichfork)  /* data or attr fork */
566 {
567 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
568 	struct xfs_mount	*mp = ip->i_mount;
569 	struct xfs_btree_block	*rblock = ifp->if_broot;
570 	struct xfs_btree_block	*cblock;/* child btree block */
571 	xfs_fsblock_t		cbno;	/* child block number */
572 	struct xfs_buf		*cbp;	/* child block's buffer */
573 	int			error;	/* error return value */
574 	__be64			*pp;	/* ptr to block address */
575 	struct xfs_owner_info	oinfo;
576 
577 	/* check if we actually need the extent format first: */
578 	if (!xfs_bmap_wants_extents(ip, whichfork))
579 		return 0;
580 
581 	ASSERT(cur);
582 	ASSERT(whichfork != XFS_COW_FORK);
583 	ASSERT(ifp->if_format == XFS_DINODE_FMT_BTREE);
584 	ASSERT(be16_to_cpu(rblock->bb_level) == 1);
585 	ASSERT(be16_to_cpu(rblock->bb_numrecs) == 1);
586 	ASSERT(xfs_bmbt_maxrecs(mp, ifp->if_broot_bytes, 0) == 1);
587 
588 	pp = XFS_BMAP_BROOT_PTR_ADDR(mp, rblock, 1, ifp->if_broot_bytes);
589 	cbno = be64_to_cpu(*pp);
590 #ifdef DEBUG
591 	if (XFS_IS_CORRUPT(cur->bc_mp, !xfs_verify_fsbno(mp, cbno))) {
592 		xfs_btree_mark_sick(cur);
593 		return -EFSCORRUPTED;
594 	}
595 #endif
596 	error = xfs_bmap_read_buf(mp, tp, cbno, &cbp);
597 	if (xfs_metadata_is_sick(error))
598 		xfs_btree_mark_sick(cur);
599 	if (error)
600 		return error;
601 	cblock = XFS_BUF_TO_BLOCK(cbp);
602 	if ((error = xfs_btree_check_block(cur, cblock, 0, cbp)))
603 		return error;
604 
605 	xfs_rmap_ino_bmbt_owner(&oinfo, ip->i_ino, whichfork);
606 	error = xfs_free_extent_later(cur->bc_tp, cbno, 1, &oinfo,
607 			XFS_AG_RESV_NONE, false);
608 	if (error)
609 		return error;
610 
611 	ip->i_nblocks--;
612 	xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L);
613 	xfs_trans_binval(tp, cbp);
614 	if (cur->bc_levels[0].bp == cbp)
615 		cur->bc_levels[0].bp = NULL;
616 	xfs_iroot_realloc(ip, -1, whichfork);
617 	ASSERT(ifp->if_broot == NULL);
618 	ifp->if_format = XFS_DINODE_FMT_EXTENTS;
619 	*logflagsp |= XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
620 	return 0;
621 }
622 
623 /*
624  * Convert an extents-format file into a btree-format file.
625  * The new file will have a root block (in the inode) and a single child block.
626  */
627 STATIC int					/* error */
628 xfs_bmap_extents_to_btree(
629 	struct xfs_trans	*tp,		/* transaction pointer */
630 	struct xfs_inode	*ip,		/* incore inode pointer */
631 	struct xfs_btree_cur	**curp,		/* cursor returned to caller */
632 	int			wasdel,		/* converting a delayed alloc */
633 	int			*logflagsp,	/* inode logging flags */
634 	int			whichfork)	/* data or attr fork */
635 {
636 	struct xfs_btree_block	*ablock;	/* allocated (child) bt block */
637 	struct xfs_buf		*abp;		/* buffer for ablock */
638 	struct xfs_alloc_arg	args;		/* allocation arguments */
639 	struct xfs_bmbt_rec	*arp;		/* child record pointer */
640 	struct xfs_btree_block	*block;		/* btree root block */
641 	struct xfs_btree_cur	*cur;		/* bmap btree cursor */
642 	int			error;		/* error return value */
643 	struct xfs_ifork	*ifp;		/* inode fork pointer */
644 	struct xfs_bmbt_key	*kp;		/* root block key pointer */
645 	struct xfs_mount	*mp;		/* mount structure */
646 	xfs_bmbt_ptr_t		*pp;		/* root block address pointer */
647 	struct xfs_iext_cursor	icur;
648 	struct xfs_bmbt_irec	rec;
649 	xfs_extnum_t		cnt = 0;
650 
651 	mp = ip->i_mount;
652 	ASSERT(whichfork != XFS_COW_FORK);
653 	ifp = xfs_ifork_ptr(ip, whichfork);
654 	ASSERT(ifp->if_format == XFS_DINODE_FMT_EXTENTS);
655 
656 	/*
657 	 * Make space in the inode incore. This needs to be undone if we fail
658 	 * to expand the root.
659 	 */
660 	xfs_iroot_realloc(ip, 1, whichfork);
661 
662 	/*
663 	 * Fill in the root.
664 	 */
665 	block = ifp->if_broot;
666 	xfs_bmbt_init_block(ip, block, NULL, 1, 1);
667 	/*
668 	 * Need a cursor.  Can't allocate until bb_level is filled in.
669 	 */
670 	cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
671 	if (wasdel)
672 		cur->bc_flags |= XFS_BTREE_BMBT_WASDEL;
673 	/*
674 	 * Convert to a btree with two levels, one record in root.
675 	 */
676 	ifp->if_format = XFS_DINODE_FMT_BTREE;
677 	memset(&args, 0, sizeof(args));
678 	args.tp = tp;
679 	args.mp = mp;
680 	xfs_rmap_ino_bmbt_owner(&args.oinfo, ip->i_ino, whichfork);
681 
682 	args.minlen = args.maxlen = args.prod = 1;
683 	args.wasdel = wasdel;
684 	*logflagsp = 0;
685 	error = xfs_alloc_vextent_start_ag(&args,
686 				XFS_INO_TO_FSB(mp, ip->i_ino));
687 	if (error)
688 		goto out_root_realloc;
689 
690 	/*
691 	 * Allocation can't fail, the space was reserved.
692 	 */
693 	if (WARN_ON_ONCE(args.fsbno == NULLFSBLOCK)) {
694 		error = -ENOSPC;
695 		goto out_root_realloc;
696 	}
697 
698 	cur->bc_bmap.allocated++;
699 	ip->i_nblocks++;
700 	xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, 1L);
701 	error = xfs_trans_get_buf(tp, mp->m_ddev_targp,
702 			XFS_FSB_TO_DADDR(mp, args.fsbno),
703 			mp->m_bsize, 0, &abp);
704 	if (error)
705 		goto out_unreserve_dquot;
706 
707 	/*
708 	 * Fill in the child block.
709 	 */
710 	ablock = XFS_BUF_TO_BLOCK(abp);
711 	xfs_bmbt_init_block(ip, ablock, abp, 0, 0);
712 
713 	for_each_xfs_iext(ifp, &icur, &rec) {
714 		if (isnullstartblock(rec.br_startblock))
715 			continue;
716 		arp = XFS_BMBT_REC_ADDR(mp, ablock, 1 + cnt);
717 		xfs_bmbt_disk_set_all(arp, &rec);
718 		cnt++;
719 	}
720 	ASSERT(cnt == ifp->if_nextents);
721 	xfs_btree_set_numrecs(ablock, cnt);
722 
723 	/*
724 	 * Fill in the root key and pointer.
725 	 */
726 	kp = XFS_BMBT_KEY_ADDR(mp, block, 1);
727 	arp = XFS_BMBT_REC_ADDR(mp, ablock, 1);
728 	kp->br_startoff = cpu_to_be64(xfs_bmbt_disk_get_startoff(arp));
729 	pp = XFS_BMBT_PTR_ADDR(mp, block, 1, xfs_bmbt_get_maxrecs(cur,
730 						be16_to_cpu(block->bb_level)));
731 	*pp = cpu_to_be64(args.fsbno);
732 
733 	/*
734 	 * Do all this logging at the end so that
735 	 * the root is at the right level.
736 	 */
737 	xfs_btree_log_block(cur, abp, XFS_BB_ALL_BITS);
738 	xfs_btree_log_recs(cur, abp, 1, be16_to_cpu(ablock->bb_numrecs));
739 	ASSERT(*curp == NULL);
740 	*curp = cur;
741 	*logflagsp = XFS_ILOG_CORE | xfs_ilog_fbroot(whichfork);
742 	return 0;
743 
744 out_unreserve_dquot:
745 	xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L);
746 out_root_realloc:
747 	xfs_iroot_realloc(ip, -1, whichfork);
748 	ifp->if_format = XFS_DINODE_FMT_EXTENTS;
749 	ASSERT(ifp->if_broot == NULL);
750 	xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
751 
752 	return error;
753 }
754 
755 /*
756  * Convert a local file to an extents file.
757  * This code is out of bounds for data forks of regular files,
758  * since the file data needs to get logged so things will stay consistent.
759  * (The bmap-level manipulations are ok, though).
760  */
761 void
762 xfs_bmap_local_to_extents_empty(
763 	struct xfs_trans	*tp,
764 	struct xfs_inode	*ip,
765 	int			whichfork)
766 {
767 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
768 
769 	ASSERT(whichfork != XFS_COW_FORK);
770 	ASSERT(ifp->if_format == XFS_DINODE_FMT_LOCAL);
771 	ASSERT(ifp->if_bytes == 0);
772 	ASSERT(ifp->if_nextents == 0);
773 
774 	xfs_bmap_forkoff_reset(ip, whichfork);
775 	ifp->if_data = NULL;
776 	ifp->if_height = 0;
777 	ifp->if_format = XFS_DINODE_FMT_EXTENTS;
778 	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
779 }
780 
781 
782 int					/* error */
783 xfs_bmap_local_to_extents(
784 	xfs_trans_t	*tp,		/* transaction pointer */
785 	xfs_inode_t	*ip,		/* incore inode pointer */
786 	xfs_extlen_t	total,		/* total blocks needed by transaction */
787 	int		*logflagsp,	/* inode logging flags */
788 	int		whichfork,
789 	void		(*init_fn)(struct xfs_trans *tp,
790 				   struct xfs_buf *bp,
791 				   struct xfs_inode *ip,
792 				   struct xfs_ifork *ifp, void *priv),
793 	void		*priv)
794 {
795 	int		error = 0;
796 	int		flags;		/* logging flags returned */
797 	struct xfs_ifork *ifp;		/* inode fork pointer */
798 	xfs_alloc_arg_t	args;		/* allocation arguments */
799 	struct xfs_buf	*bp;		/* buffer for extent block */
800 	struct xfs_bmbt_irec rec;
801 	struct xfs_iext_cursor icur;
802 
803 	/*
804 	 * We don't want to deal with the case of keeping inode data inline yet.
805 	 * So sending the data fork of a regular inode is invalid.
806 	 */
807 	ASSERT(!(S_ISREG(VFS_I(ip)->i_mode) && whichfork == XFS_DATA_FORK));
808 	ifp = xfs_ifork_ptr(ip, whichfork);
809 	ASSERT(ifp->if_format == XFS_DINODE_FMT_LOCAL);
810 
811 	if (!ifp->if_bytes) {
812 		xfs_bmap_local_to_extents_empty(tp, ip, whichfork);
813 		flags = XFS_ILOG_CORE;
814 		goto done;
815 	}
816 
817 	flags = 0;
818 	error = 0;
819 	memset(&args, 0, sizeof(args));
820 	args.tp = tp;
821 	args.mp = ip->i_mount;
822 	args.total = total;
823 	args.minlen = args.maxlen = args.prod = 1;
824 	xfs_rmap_ino_owner(&args.oinfo, ip->i_ino, whichfork, 0);
825 
826 	/*
827 	 * Allocate a block.  We know we need only one, since the
828 	 * file currently fits in an inode.
829 	 */
830 	args.total = total;
831 	args.minlen = args.maxlen = args.prod = 1;
832 	error = xfs_alloc_vextent_start_ag(&args,
833 			XFS_INO_TO_FSB(args.mp, ip->i_ino));
834 	if (error)
835 		goto done;
836 
837 	/* Can't fail, the space was reserved. */
838 	ASSERT(args.fsbno != NULLFSBLOCK);
839 	ASSERT(args.len == 1);
840 	error = xfs_trans_get_buf(tp, args.mp->m_ddev_targp,
841 			XFS_FSB_TO_DADDR(args.mp, args.fsbno),
842 			args.mp->m_bsize, 0, &bp);
843 	if (error)
844 		goto done;
845 
846 	/*
847 	 * Initialize the block, copy the data and log the remote buffer.
848 	 *
849 	 * The callout is responsible for logging because the remote format
850 	 * might differ from the local format and thus we don't know how much to
851 	 * log here. Note that init_fn must also set the buffer log item type
852 	 * correctly.
853 	 */
854 	init_fn(tp, bp, ip, ifp, priv);
855 
856 	/* account for the change in fork size */
857 	xfs_idata_realloc(ip, -ifp->if_bytes, whichfork);
858 	xfs_bmap_local_to_extents_empty(tp, ip, whichfork);
859 	flags |= XFS_ILOG_CORE;
860 
861 	ifp->if_data = NULL;
862 	ifp->if_height = 0;
863 
864 	rec.br_startoff = 0;
865 	rec.br_startblock = args.fsbno;
866 	rec.br_blockcount = 1;
867 	rec.br_state = XFS_EXT_NORM;
868 	xfs_iext_first(ifp, &icur);
869 	xfs_iext_insert(ip, &icur, &rec, 0);
870 
871 	ifp->if_nextents = 1;
872 	ip->i_nblocks = 1;
873 	xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, 1L);
874 	flags |= xfs_ilog_fext(whichfork);
875 
876 done:
877 	*logflagsp = flags;
878 	return error;
879 }
880 
881 /*
882  * Called from xfs_bmap_add_attrfork to handle btree format files.
883  */
884 STATIC int					/* error */
885 xfs_bmap_add_attrfork_btree(
886 	xfs_trans_t		*tp,		/* transaction pointer */
887 	xfs_inode_t		*ip,		/* incore inode pointer */
888 	int			*flags)		/* inode logging flags */
889 {
890 	struct xfs_btree_block	*block = ip->i_df.if_broot;
891 	struct xfs_btree_cur	*cur;		/* btree cursor */
892 	int			error;		/* error return value */
893 	xfs_mount_t		*mp;		/* file system mount struct */
894 	int			stat;		/* newroot status */
895 
896 	mp = ip->i_mount;
897 
898 	if (XFS_BMAP_BMDR_SPACE(block) <= xfs_inode_data_fork_size(ip))
899 		*flags |= XFS_ILOG_DBROOT;
900 	else {
901 		cur = xfs_bmbt_init_cursor(mp, tp, ip, XFS_DATA_FORK);
902 		error = xfs_bmbt_lookup_first(cur, &stat);
903 		if (error)
904 			goto error0;
905 		/* must be at least one entry */
906 		if (XFS_IS_CORRUPT(mp, stat != 1)) {
907 			xfs_btree_mark_sick(cur);
908 			error = -EFSCORRUPTED;
909 			goto error0;
910 		}
911 		if ((error = xfs_btree_new_iroot(cur, flags, &stat)))
912 			goto error0;
913 		if (stat == 0) {
914 			xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
915 			return -ENOSPC;
916 		}
917 		cur->bc_bmap.allocated = 0;
918 		xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
919 	}
920 	return 0;
921 error0:
922 	xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
923 	return error;
924 }
925 
926 /*
927  * Called from xfs_bmap_add_attrfork to handle extents format files.
928  */
929 STATIC int					/* error */
930 xfs_bmap_add_attrfork_extents(
931 	struct xfs_trans	*tp,		/* transaction pointer */
932 	struct xfs_inode	*ip,		/* incore inode pointer */
933 	int			*flags)		/* inode logging flags */
934 {
935 	struct xfs_btree_cur	*cur;		/* bmap btree cursor */
936 	int			error;		/* error return value */
937 
938 	if (ip->i_df.if_nextents * sizeof(struct xfs_bmbt_rec) <=
939 	    xfs_inode_data_fork_size(ip))
940 		return 0;
941 	cur = NULL;
942 	error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0, flags,
943 					  XFS_DATA_FORK);
944 	if (cur) {
945 		cur->bc_bmap.allocated = 0;
946 		xfs_btree_del_cursor(cur, error);
947 	}
948 	return error;
949 }
950 
951 /*
952  * Called from xfs_bmap_add_attrfork to handle local format files. Each
953  * different data fork content type needs a different callout to do the
954  * conversion. Some are basic and only require special block initialisation
955  * callouts for the data formating, others (directories) are so specialised they
956  * handle everything themselves.
957  *
958  * XXX (dgc): investigate whether directory conversion can use the generic
959  * formatting callout. It should be possible - it's just a very complex
960  * formatter.
961  */
962 STATIC int					/* error */
963 xfs_bmap_add_attrfork_local(
964 	struct xfs_trans	*tp,		/* transaction pointer */
965 	struct xfs_inode	*ip,		/* incore inode pointer */
966 	int			*flags)		/* inode logging flags */
967 {
968 	struct xfs_da_args	dargs;		/* args for dir/attr code */
969 
970 	if (ip->i_df.if_bytes <= xfs_inode_data_fork_size(ip))
971 		return 0;
972 
973 	if (S_ISDIR(VFS_I(ip)->i_mode)) {
974 		memset(&dargs, 0, sizeof(dargs));
975 		dargs.geo = ip->i_mount->m_dir_geo;
976 		dargs.dp = ip;
977 		dargs.total = dargs.geo->fsbcount;
978 		dargs.whichfork = XFS_DATA_FORK;
979 		dargs.trans = tp;
980 		dargs.owner = ip->i_ino;
981 		return xfs_dir2_sf_to_block(&dargs);
982 	}
983 
984 	if (S_ISLNK(VFS_I(ip)->i_mode))
985 		return xfs_bmap_local_to_extents(tp, ip, 1, flags,
986 				XFS_DATA_FORK, xfs_symlink_local_to_remote,
987 				NULL);
988 
989 	/* should only be called for types that support local format data */
990 	ASSERT(0);
991 	xfs_bmap_mark_sick(ip, XFS_ATTR_FORK);
992 	return -EFSCORRUPTED;
993 }
994 
995 /*
996  * Set an inode attr fork offset based on the format of the data fork.
997  */
998 static int
999 xfs_bmap_set_attrforkoff(
1000 	struct xfs_inode	*ip,
1001 	int			size,
1002 	int			*version)
1003 {
1004 	int			default_size = xfs_default_attroffset(ip) >> 3;
1005 
1006 	switch (ip->i_df.if_format) {
1007 	case XFS_DINODE_FMT_DEV:
1008 		ip->i_forkoff = default_size;
1009 		break;
1010 	case XFS_DINODE_FMT_LOCAL:
1011 	case XFS_DINODE_FMT_EXTENTS:
1012 	case XFS_DINODE_FMT_BTREE:
1013 		ip->i_forkoff = xfs_attr_shortform_bytesfit(ip, size);
1014 		if (!ip->i_forkoff)
1015 			ip->i_forkoff = default_size;
1016 		else if (xfs_has_attr2(ip->i_mount) && version)
1017 			*version = 2;
1018 		break;
1019 	default:
1020 		ASSERT(0);
1021 		return -EINVAL;
1022 	}
1023 
1024 	return 0;
1025 }
1026 
1027 /*
1028  * Convert inode from non-attributed to attributed.  Caller must hold the
1029  * ILOCK_EXCL and the file cannot have an attr fork.
1030  */
1031 int						/* error code */
1032 xfs_bmap_add_attrfork(
1033 	struct xfs_trans	*tp,
1034 	struct xfs_inode	*ip,		/* incore inode pointer */
1035 	int			size,		/* space new attribute needs */
1036 	int			rsvd)		/* xact may use reserved blks */
1037 {
1038 	struct xfs_mount	*mp = tp->t_mountp;
1039 	int			version = 1;	/* superblock attr version */
1040 	int			logflags;	/* logging flags */
1041 	int			error;		/* error return value */
1042 
1043 	xfs_assert_ilocked(ip, XFS_ILOCK_EXCL);
1044 	ASSERT(!XFS_NOT_DQATTACHED(mp, ip));
1045 	ASSERT(!xfs_inode_has_attr_fork(ip));
1046 
1047 	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1048 	error = xfs_bmap_set_attrforkoff(ip, size, &version);
1049 	if (error)
1050 		return error;
1051 
1052 	xfs_ifork_init_attr(ip, XFS_DINODE_FMT_EXTENTS, 0);
1053 	logflags = 0;
1054 	switch (ip->i_df.if_format) {
1055 	case XFS_DINODE_FMT_LOCAL:
1056 		error = xfs_bmap_add_attrfork_local(tp, ip, &logflags);
1057 		break;
1058 	case XFS_DINODE_FMT_EXTENTS:
1059 		error = xfs_bmap_add_attrfork_extents(tp, ip, &logflags);
1060 		break;
1061 	case XFS_DINODE_FMT_BTREE:
1062 		error = xfs_bmap_add_attrfork_btree(tp, ip, &logflags);
1063 		break;
1064 	default:
1065 		error = 0;
1066 		break;
1067 	}
1068 	if (logflags)
1069 		xfs_trans_log_inode(tp, ip, logflags);
1070 	if (error)
1071 		return error;
1072 	if (!xfs_has_attr(mp) ||
1073 	   (!xfs_has_attr2(mp) && version == 2)) {
1074 		bool log_sb = false;
1075 
1076 		spin_lock(&mp->m_sb_lock);
1077 		if (!xfs_has_attr(mp)) {
1078 			xfs_add_attr(mp);
1079 			log_sb = true;
1080 		}
1081 		if (!xfs_has_attr2(mp) && version == 2) {
1082 			xfs_add_attr2(mp);
1083 			log_sb = true;
1084 		}
1085 		spin_unlock(&mp->m_sb_lock);
1086 		if (log_sb)
1087 			xfs_log_sb(tp);
1088 	}
1089 
1090 	return 0;
1091 }
1092 
1093 /*
1094  * Internal and external extent tree search functions.
1095  */
1096 
1097 struct xfs_iread_state {
1098 	struct xfs_iext_cursor	icur;
1099 	xfs_extnum_t		loaded;
1100 };
1101 
1102 int
1103 xfs_bmap_complain_bad_rec(
1104 	struct xfs_inode		*ip,
1105 	int				whichfork,
1106 	xfs_failaddr_t			fa,
1107 	const struct xfs_bmbt_irec	*irec)
1108 {
1109 	struct xfs_mount		*mp = ip->i_mount;
1110 	const char			*forkname;
1111 
1112 	switch (whichfork) {
1113 	case XFS_DATA_FORK:	forkname = "data"; break;
1114 	case XFS_ATTR_FORK:	forkname = "attr"; break;
1115 	case XFS_COW_FORK:	forkname = "CoW"; break;
1116 	default:		forkname = "???"; break;
1117 	}
1118 
1119 	xfs_warn(mp,
1120  "Bmap BTree record corruption in inode 0x%llx %s fork detected at %pS!",
1121 				ip->i_ino, forkname, fa);
1122 	xfs_warn(mp,
1123 		"Offset 0x%llx, start block 0x%llx, block count 0x%llx state 0x%x",
1124 		irec->br_startoff, irec->br_startblock, irec->br_blockcount,
1125 		irec->br_state);
1126 
1127 	return -EFSCORRUPTED;
1128 }
1129 
1130 /* Stuff every bmbt record from this block into the incore extent map. */
1131 static int
1132 xfs_iread_bmbt_block(
1133 	struct xfs_btree_cur	*cur,
1134 	int			level,
1135 	void			*priv)
1136 {
1137 	struct xfs_iread_state	*ir = priv;
1138 	struct xfs_mount	*mp = cur->bc_mp;
1139 	struct xfs_inode	*ip = cur->bc_ino.ip;
1140 	struct xfs_btree_block	*block;
1141 	struct xfs_buf		*bp;
1142 	struct xfs_bmbt_rec	*frp;
1143 	xfs_extnum_t		num_recs;
1144 	xfs_extnum_t		j;
1145 	int			whichfork = cur->bc_ino.whichfork;
1146 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
1147 
1148 	block = xfs_btree_get_block(cur, level, &bp);
1149 
1150 	/* Abort if we find more records than nextents. */
1151 	num_recs = xfs_btree_get_numrecs(block);
1152 	if (unlikely(ir->loaded + num_recs > ifp->if_nextents)) {
1153 		xfs_warn(ip->i_mount, "corrupt dinode %llu, (btree extents).",
1154 				(unsigned long long)ip->i_ino);
1155 		xfs_inode_verifier_error(ip, -EFSCORRUPTED, __func__, block,
1156 				sizeof(*block), __this_address);
1157 		xfs_bmap_mark_sick(ip, whichfork);
1158 		return -EFSCORRUPTED;
1159 	}
1160 
1161 	/* Copy records into the incore cache. */
1162 	frp = XFS_BMBT_REC_ADDR(mp, block, 1);
1163 	for (j = 0; j < num_recs; j++, frp++, ir->loaded++) {
1164 		struct xfs_bmbt_irec	new;
1165 		xfs_failaddr_t		fa;
1166 
1167 		xfs_bmbt_disk_get_all(frp, &new);
1168 		fa = xfs_bmap_validate_extent(ip, whichfork, &new);
1169 		if (fa) {
1170 			xfs_inode_verifier_error(ip, -EFSCORRUPTED,
1171 					"xfs_iread_extents(2)", frp,
1172 					sizeof(*frp), fa);
1173 			xfs_bmap_mark_sick(ip, whichfork);
1174 			return xfs_bmap_complain_bad_rec(ip, whichfork, fa,
1175 					&new);
1176 		}
1177 		xfs_iext_insert(ip, &ir->icur, &new,
1178 				xfs_bmap_fork_to_state(whichfork));
1179 		trace_xfs_read_extent(ip, &ir->icur,
1180 				xfs_bmap_fork_to_state(whichfork), _THIS_IP_);
1181 		xfs_iext_next(ifp, &ir->icur);
1182 	}
1183 
1184 	return 0;
1185 }
1186 
1187 /*
1188  * Read in extents from a btree-format inode.
1189  */
1190 int
1191 xfs_iread_extents(
1192 	struct xfs_trans	*tp,
1193 	struct xfs_inode	*ip,
1194 	int			whichfork)
1195 {
1196 	struct xfs_iread_state	ir;
1197 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
1198 	struct xfs_mount	*mp = ip->i_mount;
1199 	struct xfs_btree_cur	*cur;
1200 	int			error;
1201 
1202 	if (!xfs_need_iread_extents(ifp))
1203 		return 0;
1204 
1205 	xfs_assert_ilocked(ip, XFS_ILOCK_EXCL);
1206 
1207 	ir.loaded = 0;
1208 	xfs_iext_first(ifp, &ir.icur);
1209 	cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
1210 	error = xfs_btree_visit_blocks(cur, xfs_iread_bmbt_block,
1211 			XFS_BTREE_VISIT_RECORDS, &ir);
1212 	xfs_btree_del_cursor(cur, error);
1213 	if (error)
1214 		goto out;
1215 
1216 	if (XFS_IS_CORRUPT(mp, ir.loaded != ifp->if_nextents)) {
1217 		xfs_bmap_mark_sick(ip, whichfork);
1218 		error = -EFSCORRUPTED;
1219 		goto out;
1220 	}
1221 	ASSERT(ir.loaded == xfs_iext_count(ifp));
1222 	/*
1223 	 * Use release semantics so that we can use acquire semantics in
1224 	 * xfs_need_iread_extents and be guaranteed to see a valid mapping tree
1225 	 * after that load.
1226 	 */
1227 	smp_store_release(&ifp->if_needextents, 0);
1228 	return 0;
1229 out:
1230 	if (xfs_metadata_is_sick(error))
1231 		xfs_bmap_mark_sick(ip, whichfork);
1232 	xfs_iext_destroy(ifp);
1233 	return error;
1234 }
1235 
1236 /*
1237  * Returns the relative block number of the first unused block(s) in the given
1238  * fork with at least "len" logically contiguous blocks free.  This is the
1239  * lowest-address hole if the fork has holes, else the first block past the end
1240  * of fork.  Return 0 if the fork is currently local (in-inode).
1241  */
1242 int						/* error */
1243 xfs_bmap_first_unused(
1244 	struct xfs_trans	*tp,		/* transaction pointer */
1245 	struct xfs_inode	*ip,		/* incore inode */
1246 	xfs_extlen_t		len,		/* size of hole to find */
1247 	xfs_fileoff_t		*first_unused,	/* unused block */
1248 	int			whichfork)	/* data or attr fork */
1249 {
1250 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
1251 	struct xfs_bmbt_irec	got;
1252 	struct xfs_iext_cursor	icur;
1253 	xfs_fileoff_t		lastaddr = 0;
1254 	xfs_fileoff_t		lowest, max;
1255 	int			error;
1256 
1257 	if (ifp->if_format == XFS_DINODE_FMT_LOCAL) {
1258 		*first_unused = 0;
1259 		return 0;
1260 	}
1261 
1262 	ASSERT(xfs_ifork_has_extents(ifp));
1263 
1264 	error = xfs_iread_extents(tp, ip, whichfork);
1265 	if (error)
1266 		return error;
1267 
1268 	lowest = max = *first_unused;
1269 	for_each_xfs_iext(ifp, &icur, &got) {
1270 		/*
1271 		 * See if the hole before this extent will work.
1272 		 */
1273 		if (got.br_startoff >= lowest + len &&
1274 		    got.br_startoff - max >= len)
1275 			break;
1276 		lastaddr = got.br_startoff + got.br_blockcount;
1277 		max = XFS_FILEOFF_MAX(lastaddr, lowest);
1278 	}
1279 
1280 	*first_unused = max;
1281 	return 0;
1282 }
1283 
1284 /*
1285  * Returns the file-relative block number of the last block - 1 before
1286  * last_block (input value) in the file.
1287  * This is not based on i_size, it is based on the extent records.
1288  * Returns 0 for local files, as they do not have extent records.
1289  */
1290 int						/* error */
1291 xfs_bmap_last_before(
1292 	struct xfs_trans	*tp,		/* transaction pointer */
1293 	struct xfs_inode	*ip,		/* incore inode */
1294 	xfs_fileoff_t		*last_block,	/* last block */
1295 	int			whichfork)	/* data or attr fork */
1296 {
1297 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
1298 	struct xfs_bmbt_irec	got;
1299 	struct xfs_iext_cursor	icur;
1300 	int			error;
1301 
1302 	switch (ifp->if_format) {
1303 	case XFS_DINODE_FMT_LOCAL:
1304 		*last_block = 0;
1305 		return 0;
1306 	case XFS_DINODE_FMT_BTREE:
1307 	case XFS_DINODE_FMT_EXTENTS:
1308 		break;
1309 	default:
1310 		ASSERT(0);
1311 		xfs_bmap_mark_sick(ip, whichfork);
1312 		return -EFSCORRUPTED;
1313 	}
1314 
1315 	error = xfs_iread_extents(tp, ip, whichfork);
1316 	if (error)
1317 		return error;
1318 
1319 	if (!xfs_iext_lookup_extent_before(ip, ifp, last_block, &icur, &got))
1320 		*last_block = 0;
1321 	return 0;
1322 }
1323 
1324 int
1325 xfs_bmap_last_extent(
1326 	struct xfs_trans	*tp,
1327 	struct xfs_inode	*ip,
1328 	int			whichfork,
1329 	struct xfs_bmbt_irec	*rec,
1330 	int			*is_empty)
1331 {
1332 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
1333 	struct xfs_iext_cursor	icur;
1334 	int			error;
1335 
1336 	error = xfs_iread_extents(tp, ip, whichfork);
1337 	if (error)
1338 		return error;
1339 
1340 	xfs_iext_last(ifp, &icur);
1341 	if (!xfs_iext_get_extent(ifp, &icur, rec))
1342 		*is_empty = 1;
1343 	else
1344 		*is_empty = 0;
1345 	return 0;
1346 }
1347 
1348 /*
1349  * Check the last inode extent to determine whether this allocation will result
1350  * in blocks being allocated at the end of the file. When we allocate new data
1351  * blocks at the end of the file which do not start at the previous data block,
1352  * we will try to align the new blocks at stripe unit boundaries.
1353  *
1354  * Returns 1 in bma->aeof if the file (fork) is empty as any new write will be
1355  * at, or past the EOF.
1356  */
1357 STATIC int
1358 xfs_bmap_isaeof(
1359 	struct xfs_bmalloca	*bma,
1360 	int			whichfork)
1361 {
1362 	struct xfs_bmbt_irec	rec;
1363 	int			is_empty;
1364 	int			error;
1365 
1366 	bma->aeof = false;
1367 	error = xfs_bmap_last_extent(NULL, bma->ip, whichfork, &rec,
1368 				     &is_empty);
1369 	if (error)
1370 		return error;
1371 
1372 	if (is_empty) {
1373 		bma->aeof = true;
1374 		return 0;
1375 	}
1376 
1377 	/*
1378 	 * Check if we are allocation or past the last extent, or at least into
1379 	 * the last delayed allocated extent.
1380 	 */
1381 	bma->aeof = bma->offset >= rec.br_startoff + rec.br_blockcount ||
1382 		(bma->offset >= rec.br_startoff &&
1383 		 isnullstartblock(rec.br_startblock));
1384 	return 0;
1385 }
1386 
1387 /*
1388  * Returns the file-relative block number of the first block past eof in
1389  * the file.  This is not based on i_size, it is based on the extent records.
1390  * Returns 0 for local files, as they do not have extent records.
1391  */
1392 int
1393 xfs_bmap_last_offset(
1394 	struct xfs_inode	*ip,
1395 	xfs_fileoff_t		*last_block,
1396 	int			whichfork)
1397 {
1398 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
1399 	struct xfs_bmbt_irec	rec;
1400 	int			is_empty;
1401 	int			error;
1402 
1403 	*last_block = 0;
1404 
1405 	if (ifp->if_format == XFS_DINODE_FMT_LOCAL)
1406 		return 0;
1407 
1408 	if (XFS_IS_CORRUPT(ip->i_mount, !xfs_ifork_has_extents(ifp))) {
1409 		xfs_bmap_mark_sick(ip, whichfork);
1410 		return -EFSCORRUPTED;
1411 	}
1412 
1413 	error = xfs_bmap_last_extent(NULL, ip, whichfork, &rec, &is_empty);
1414 	if (error || is_empty)
1415 		return error;
1416 
1417 	*last_block = rec.br_startoff + rec.br_blockcount;
1418 	return 0;
1419 }
1420 
1421 /*
1422  * Extent tree manipulation functions used during allocation.
1423  */
1424 
1425 /*
1426  * Convert a delayed allocation to a real allocation.
1427  */
1428 STATIC int				/* error */
1429 xfs_bmap_add_extent_delay_real(
1430 	struct xfs_bmalloca	*bma,
1431 	int			whichfork)
1432 {
1433 	struct xfs_mount	*mp = bma->ip->i_mount;
1434 	struct xfs_ifork	*ifp = xfs_ifork_ptr(bma->ip, whichfork);
1435 	struct xfs_bmbt_irec	*new = &bma->got;
1436 	int			error;	/* error return value */
1437 	int			i;	/* temp state */
1438 	xfs_fileoff_t		new_endoff;	/* end offset of new entry */
1439 	xfs_bmbt_irec_t		r[3];	/* neighbor extent entries */
1440 					/* left is 0, right is 1, prev is 2 */
1441 	int			rval=0;	/* return value (logging flags) */
1442 	uint32_t		state = xfs_bmap_fork_to_state(whichfork);
1443 	xfs_filblks_t		da_new; /* new count del alloc blocks used */
1444 	xfs_filblks_t		da_old; /* old count del alloc blocks used */
1445 	xfs_filblks_t		temp=0;	/* value for da_new calculations */
1446 	int			tmp_rval;	/* partial logging flags */
1447 	struct xfs_bmbt_irec	old;
1448 
1449 	ASSERT(whichfork != XFS_ATTR_FORK);
1450 	ASSERT(!isnullstartblock(new->br_startblock));
1451 	ASSERT(!bma->cur || (bma->cur->bc_flags & XFS_BTREE_BMBT_WASDEL));
1452 
1453 	XFS_STATS_INC(mp, xs_add_exlist);
1454 
1455 #define	LEFT		r[0]
1456 #define	RIGHT		r[1]
1457 #define	PREV		r[2]
1458 
1459 	/*
1460 	 * Set up a bunch of variables to make the tests simpler.
1461 	 */
1462 	xfs_iext_get_extent(ifp, &bma->icur, &PREV);
1463 	new_endoff = new->br_startoff + new->br_blockcount;
1464 	ASSERT(isnullstartblock(PREV.br_startblock));
1465 	ASSERT(PREV.br_startoff <= new->br_startoff);
1466 	ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff);
1467 
1468 	da_old = startblockval(PREV.br_startblock);
1469 	da_new = 0;
1470 
1471 	/*
1472 	 * Set flags determining what part of the previous delayed allocation
1473 	 * extent is being replaced by a real allocation.
1474 	 */
1475 	if (PREV.br_startoff == new->br_startoff)
1476 		state |= BMAP_LEFT_FILLING;
1477 	if (PREV.br_startoff + PREV.br_blockcount == new_endoff)
1478 		state |= BMAP_RIGHT_FILLING;
1479 
1480 	/*
1481 	 * Check and set flags if this segment has a left neighbor.
1482 	 * Don't set contiguous if the combined extent would be too large.
1483 	 */
1484 	if (xfs_iext_peek_prev_extent(ifp, &bma->icur, &LEFT)) {
1485 		state |= BMAP_LEFT_VALID;
1486 		if (isnullstartblock(LEFT.br_startblock))
1487 			state |= BMAP_LEFT_DELAY;
1488 	}
1489 
1490 	if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
1491 	    LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff &&
1492 	    LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock &&
1493 	    LEFT.br_state == new->br_state &&
1494 	    LEFT.br_blockcount + new->br_blockcount <= XFS_MAX_BMBT_EXTLEN)
1495 		state |= BMAP_LEFT_CONTIG;
1496 
1497 	/*
1498 	 * Check and set flags if this segment has a right neighbor.
1499 	 * Don't set contiguous if the combined extent would be too large.
1500 	 * Also check for all-three-contiguous being too large.
1501 	 */
1502 	if (xfs_iext_peek_next_extent(ifp, &bma->icur, &RIGHT)) {
1503 		state |= BMAP_RIGHT_VALID;
1504 		if (isnullstartblock(RIGHT.br_startblock))
1505 			state |= BMAP_RIGHT_DELAY;
1506 	}
1507 
1508 	if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
1509 	    new_endoff == RIGHT.br_startoff &&
1510 	    new->br_startblock + new->br_blockcount == RIGHT.br_startblock &&
1511 	    new->br_state == RIGHT.br_state &&
1512 	    new->br_blockcount + RIGHT.br_blockcount <= XFS_MAX_BMBT_EXTLEN &&
1513 	    ((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
1514 		       BMAP_RIGHT_FILLING)) !=
1515 		      (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
1516 		       BMAP_RIGHT_FILLING) ||
1517 	     LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount
1518 			<= XFS_MAX_BMBT_EXTLEN))
1519 		state |= BMAP_RIGHT_CONTIG;
1520 
1521 	error = 0;
1522 	/*
1523 	 * Switch out based on the FILLING and CONTIG state bits.
1524 	 */
1525 	switch (state & (BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
1526 			 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG)) {
1527 	case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
1528 	     BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
1529 		/*
1530 		 * Filling in all of a previously delayed allocation extent.
1531 		 * The left and right neighbors are both contiguous with new.
1532 		 */
1533 		LEFT.br_blockcount += PREV.br_blockcount + RIGHT.br_blockcount;
1534 
1535 		xfs_iext_remove(bma->ip, &bma->icur, state);
1536 		xfs_iext_remove(bma->ip, &bma->icur, state);
1537 		xfs_iext_prev(ifp, &bma->icur);
1538 		xfs_iext_update_extent(bma->ip, state, &bma->icur, &LEFT);
1539 		ifp->if_nextents--;
1540 
1541 		if (bma->cur == NULL)
1542 			rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1543 		else {
1544 			rval = XFS_ILOG_CORE;
1545 			error = xfs_bmbt_lookup_eq(bma->cur, &RIGHT, &i);
1546 			if (error)
1547 				goto done;
1548 			if (XFS_IS_CORRUPT(mp, i != 1)) {
1549 				xfs_btree_mark_sick(bma->cur);
1550 				error = -EFSCORRUPTED;
1551 				goto done;
1552 			}
1553 			error = xfs_btree_delete(bma->cur, &i);
1554 			if (error)
1555 				goto done;
1556 			if (XFS_IS_CORRUPT(mp, i != 1)) {
1557 				xfs_btree_mark_sick(bma->cur);
1558 				error = -EFSCORRUPTED;
1559 				goto done;
1560 			}
1561 			error = xfs_btree_decrement(bma->cur, 0, &i);
1562 			if (error)
1563 				goto done;
1564 			if (XFS_IS_CORRUPT(mp, i != 1)) {
1565 				xfs_btree_mark_sick(bma->cur);
1566 				error = -EFSCORRUPTED;
1567 				goto done;
1568 			}
1569 			error = xfs_bmbt_update(bma->cur, &LEFT);
1570 			if (error)
1571 				goto done;
1572 		}
1573 		ASSERT(da_new <= da_old);
1574 		break;
1575 
1576 	case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
1577 		/*
1578 		 * Filling in all of a previously delayed allocation extent.
1579 		 * The left neighbor is contiguous, the right is not.
1580 		 */
1581 		old = LEFT;
1582 		LEFT.br_blockcount += PREV.br_blockcount;
1583 
1584 		xfs_iext_remove(bma->ip, &bma->icur, state);
1585 		xfs_iext_prev(ifp, &bma->icur);
1586 		xfs_iext_update_extent(bma->ip, state, &bma->icur, &LEFT);
1587 
1588 		if (bma->cur == NULL)
1589 			rval = XFS_ILOG_DEXT;
1590 		else {
1591 			rval = 0;
1592 			error = xfs_bmbt_lookup_eq(bma->cur, &old, &i);
1593 			if (error)
1594 				goto done;
1595 			if (XFS_IS_CORRUPT(mp, i != 1)) {
1596 				xfs_btree_mark_sick(bma->cur);
1597 				error = -EFSCORRUPTED;
1598 				goto done;
1599 			}
1600 			error = xfs_bmbt_update(bma->cur, &LEFT);
1601 			if (error)
1602 				goto done;
1603 		}
1604 		ASSERT(da_new <= da_old);
1605 		break;
1606 
1607 	case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
1608 		/*
1609 		 * Filling in all of a previously delayed allocation extent.
1610 		 * The right neighbor is contiguous, the left is not. Take care
1611 		 * with delay -> unwritten extent allocation here because the
1612 		 * delalloc record we are overwriting is always written.
1613 		 */
1614 		PREV.br_startblock = new->br_startblock;
1615 		PREV.br_blockcount += RIGHT.br_blockcount;
1616 		PREV.br_state = new->br_state;
1617 
1618 		xfs_iext_next(ifp, &bma->icur);
1619 		xfs_iext_remove(bma->ip, &bma->icur, state);
1620 		xfs_iext_prev(ifp, &bma->icur);
1621 		xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV);
1622 
1623 		if (bma->cur == NULL)
1624 			rval = XFS_ILOG_DEXT;
1625 		else {
1626 			rval = 0;
1627 			error = xfs_bmbt_lookup_eq(bma->cur, &RIGHT, &i);
1628 			if (error)
1629 				goto done;
1630 			if (XFS_IS_CORRUPT(mp, i != 1)) {
1631 				xfs_btree_mark_sick(bma->cur);
1632 				error = -EFSCORRUPTED;
1633 				goto done;
1634 			}
1635 			error = xfs_bmbt_update(bma->cur, &PREV);
1636 			if (error)
1637 				goto done;
1638 		}
1639 		ASSERT(da_new <= da_old);
1640 		break;
1641 
1642 	case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
1643 		/*
1644 		 * Filling in all of a previously delayed allocation extent.
1645 		 * Neither the left nor right neighbors are contiguous with
1646 		 * the new one.
1647 		 */
1648 		PREV.br_startblock = new->br_startblock;
1649 		PREV.br_state = new->br_state;
1650 		xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV);
1651 		ifp->if_nextents++;
1652 
1653 		if (bma->cur == NULL)
1654 			rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1655 		else {
1656 			rval = XFS_ILOG_CORE;
1657 			error = xfs_bmbt_lookup_eq(bma->cur, new, &i);
1658 			if (error)
1659 				goto done;
1660 			if (XFS_IS_CORRUPT(mp, i != 0)) {
1661 				xfs_btree_mark_sick(bma->cur);
1662 				error = -EFSCORRUPTED;
1663 				goto done;
1664 			}
1665 			error = xfs_btree_insert(bma->cur, &i);
1666 			if (error)
1667 				goto done;
1668 			if (XFS_IS_CORRUPT(mp, i != 1)) {
1669 				xfs_btree_mark_sick(bma->cur);
1670 				error = -EFSCORRUPTED;
1671 				goto done;
1672 			}
1673 		}
1674 		ASSERT(da_new <= da_old);
1675 		break;
1676 
1677 	case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG:
1678 		/*
1679 		 * Filling in the first part of a previous delayed allocation.
1680 		 * The left neighbor is contiguous.
1681 		 */
1682 		old = LEFT;
1683 		temp = PREV.br_blockcount - new->br_blockcount;
1684 		da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
1685 				startblockval(PREV.br_startblock));
1686 
1687 		LEFT.br_blockcount += new->br_blockcount;
1688 
1689 		PREV.br_blockcount = temp;
1690 		PREV.br_startoff += new->br_blockcount;
1691 		PREV.br_startblock = nullstartblock(da_new);
1692 
1693 		xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV);
1694 		xfs_iext_prev(ifp, &bma->icur);
1695 		xfs_iext_update_extent(bma->ip, state, &bma->icur, &LEFT);
1696 
1697 		if (bma->cur == NULL)
1698 			rval = XFS_ILOG_DEXT;
1699 		else {
1700 			rval = 0;
1701 			error = xfs_bmbt_lookup_eq(bma->cur, &old, &i);
1702 			if (error)
1703 				goto done;
1704 			if (XFS_IS_CORRUPT(mp, i != 1)) {
1705 				xfs_btree_mark_sick(bma->cur);
1706 				error = -EFSCORRUPTED;
1707 				goto done;
1708 			}
1709 			error = xfs_bmbt_update(bma->cur, &LEFT);
1710 			if (error)
1711 				goto done;
1712 		}
1713 		ASSERT(da_new <= da_old);
1714 		break;
1715 
1716 	case BMAP_LEFT_FILLING:
1717 		/*
1718 		 * Filling in the first part of a previous delayed allocation.
1719 		 * The left neighbor is not contiguous.
1720 		 */
1721 		xfs_iext_update_extent(bma->ip, state, &bma->icur, new);
1722 		ifp->if_nextents++;
1723 
1724 		if (bma->cur == NULL)
1725 			rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1726 		else {
1727 			rval = XFS_ILOG_CORE;
1728 			error = xfs_bmbt_lookup_eq(bma->cur, new, &i);
1729 			if (error)
1730 				goto done;
1731 			if (XFS_IS_CORRUPT(mp, i != 0)) {
1732 				xfs_btree_mark_sick(bma->cur);
1733 				error = -EFSCORRUPTED;
1734 				goto done;
1735 			}
1736 			error = xfs_btree_insert(bma->cur, &i);
1737 			if (error)
1738 				goto done;
1739 			if (XFS_IS_CORRUPT(mp, i != 1)) {
1740 				xfs_btree_mark_sick(bma->cur);
1741 				error = -EFSCORRUPTED;
1742 				goto done;
1743 			}
1744 		}
1745 
1746 		if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
1747 			error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
1748 					&bma->cur, 1, &tmp_rval, whichfork);
1749 			rval |= tmp_rval;
1750 			if (error)
1751 				goto done;
1752 		}
1753 
1754 		temp = PREV.br_blockcount - new->br_blockcount;
1755 		da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
1756 			startblockval(PREV.br_startblock) -
1757 			(bma->cur ? bma->cur->bc_bmap.allocated : 0));
1758 
1759 		PREV.br_startoff = new_endoff;
1760 		PREV.br_blockcount = temp;
1761 		PREV.br_startblock = nullstartblock(da_new);
1762 		xfs_iext_next(ifp, &bma->icur);
1763 		xfs_iext_insert(bma->ip, &bma->icur, &PREV, state);
1764 		xfs_iext_prev(ifp, &bma->icur);
1765 		break;
1766 
1767 	case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
1768 		/*
1769 		 * Filling in the last part of a previous delayed allocation.
1770 		 * The right neighbor is contiguous with the new allocation.
1771 		 */
1772 		old = RIGHT;
1773 		RIGHT.br_startoff = new->br_startoff;
1774 		RIGHT.br_startblock = new->br_startblock;
1775 		RIGHT.br_blockcount += new->br_blockcount;
1776 
1777 		if (bma->cur == NULL)
1778 			rval = XFS_ILOG_DEXT;
1779 		else {
1780 			rval = 0;
1781 			error = xfs_bmbt_lookup_eq(bma->cur, &old, &i);
1782 			if (error)
1783 				goto done;
1784 			if (XFS_IS_CORRUPT(mp, i != 1)) {
1785 				xfs_btree_mark_sick(bma->cur);
1786 				error = -EFSCORRUPTED;
1787 				goto done;
1788 			}
1789 			error = xfs_bmbt_update(bma->cur, &RIGHT);
1790 			if (error)
1791 				goto done;
1792 		}
1793 
1794 		temp = PREV.br_blockcount - new->br_blockcount;
1795 		da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
1796 			startblockval(PREV.br_startblock));
1797 
1798 		PREV.br_blockcount = temp;
1799 		PREV.br_startblock = nullstartblock(da_new);
1800 
1801 		xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV);
1802 		xfs_iext_next(ifp, &bma->icur);
1803 		xfs_iext_update_extent(bma->ip, state, &bma->icur, &RIGHT);
1804 		ASSERT(da_new <= da_old);
1805 		break;
1806 
1807 	case BMAP_RIGHT_FILLING:
1808 		/*
1809 		 * Filling in the last part of a previous delayed allocation.
1810 		 * The right neighbor is not contiguous.
1811 		 */
1812 		xfs_iext_update_extent(bma->ip, state, &bma->icur, new);
1813 		ifp->if_nextents++;
1814 
1815 		if (bma->cur == NULL)
1816 			rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1817 		else {
1818 			rval = XFS_ILOG_CORE;
1819 			error = xfs_bmbt_lookup_eq(bma->cur, new, &i);
1820 			if (error)
1821 				goto done;
1822 			if (XFS_IS_CORRUPT(mp, i != 0)) {
1823 				xfs_btree_mark_sick(bma->cur);
1824 				error = -EFSCORRUPTED;
1825 				goto done;
1826 			}
1827 			error = xfs_btree_insert(bma->cur, &i);
1828 			if (error)
1829 				goto done;
1830 			if (XFS_IS_CORRUPT(mp, i != 1)) {
1831 				xfs_btree_mark_sick(bma->cur);
1832 				error = -EFSCORRUPTED;
1833 				goto done;
1834 			}
1835 		}
1836 
1837 		if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
1838 			error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
1839 				&bma->cur, 1, &tmp_rval, whichfork);
1840 			rval |= tmp_rval;
1841 			if (error)
1842 				goto done;
1843 		}
1844 
1845 		temp = PREV.br_blockcount - new->br_blockcount;
1846 		da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
1847 			startblockval(PREV.br_startblock) -
1848 			(bma->cur ? bma->cur->bc_bmap.allocated : 0));
1849 
1850 		PREV.br_startblock = nullstartblock(da_new);
1851 		PREV.br_blockcount = temp;
1852 		xfs_iext_insert(bma->ip, &bma->icur, &PREV, state);
1853 		xfs_iext_next(ifp, &bma->icur);
1854 		ASSERT(da_new <= da_old);
1855 		break;
1856 
1857 	case 0:
1858 		/*
1859 		 * Filling in the middle part of a previous delayed allocation.
1860 		 * Contiguity is impossible here.
1861 		 * This case is avoided almost all the time.
1862 		 *
1863 		 * We start with a delayed allocation:
1864 		 *
1865 		 * +ddddddddddddddddddddddddddddddddddddddddddddddddddddddd+
1866 		 *  PREV @ idx
1867 		 *
1868 	         * and we are allocating:
1869 		 *                     +rrrrrrrrrrrrrrrrr+
1870 		 *			      new
1871 		 *
1872 		 * and we set it up for insertion as:
1873 		 * +ddddddddddddddddddd+rrrrrrrrrrrrrrrrr+ddddddddddddddddd+
1874 		 *                            new
1875 		 *  PREV @ idx          LEFT              RIGHT
1876 		 *                      inserted at idx + 1
1877 		 */
1878 		old = PREV;
1879 
1880 		/* LEFT is the new middle */
1881 		LEFT = *new;
1882 
1883 		/* RIGHT is the new right */
1884 		RIGHT.br_state = PREV.br_state;
1885 		RIGHT.br_startoff = new_endoff;
1886 		RIGHT.br_blockcount =
1887 			PREV.br_startoff + PREV.br_blockcount - new_endoff;
1888 		RIGHT.br_startblock =
1889 			nullstartblock(xfs_bmap_worst_indlen(bma->ip,
1890 					RIGHT.br_blockcount));
1891 
1892 		/* truncate PREV */
1893 		PREV.br_blockcount = new->br_startoff - PREV.br_startoff;
1894 		PREV.br_startblock =
1895 			nullstartblock(xfs_bmap_worst_indlen(bma->ip,
1896 					PREV.br_blockcount));
1897 		xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV);
1898 
1899 		xfs_iext_next(ifp, &bma->icur);
1900 		xfs_iext_insert(bma->ip, &bma->icur, &RIGHT, state);
1901 		xfs_iext_insert(bma->ip, &bma->icur, &LEFT, state);
1902 		ifp->if_nextents++;
1903 
1904 		if (bma->cur == NULL)
1905 			rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1906 		else {
1907 			rval = XFS_ILOG_CORE;
1908 			error = xfs_bmbt_lookup_eq(bma->cur, new, &i);
1909 			if (error)
1910 				goto done;
1911 			if (XFS_IS_CORRUPT(mp, i != 0)) {
1912 				xfs_btree_mark_sick(bma->cur);
1913 				error = -EFSCORRUPTED;
1914 				goto done;
1915 			}
1916 			error = xfs_btree_insert(bma->cur, &i);
1917 			if (error)
1918 				goto done;
1919 			if (XFS_IS_CORRUPT(mp, i != 1)) {
1920 				xfs_btree_mark_sick(bma->cur);
1921 				error = -EFSCORRUPTED;
1922 				goto done;
1923 			}
1924 		}
1925 
1926 		if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
1927 			error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
1928 					&bma->cur, 1, &tmp_rval, whichfork);
1929 			rval |= tmp_rval;
1930 			if (error)
1931 				goto done;
1932 		}
1933 
1934 		da_new = startblockval(PREV.br_startblock) +
1935 			 startblockval(RIGHT.br_startblock);
1936 		break;
1937 
1938 	case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
1939 	case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
1940 	case BMAP_LEFT_FILLING | BMAP_RIGHT_CONTIG:
1941 	case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
1942 	case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
1943 	case BMAP_LEFT_CONTIG:
1944 	case BMAP_RIGHT_CONTIG:
1945 		/*
1946 		 * These cases are all impossible.
1947 		 */
1948 		ASSERT(0);
1949 	}
1950 
1951 	/* add reverse mapping unless caller opted out */
1952 	if (!(bma->flags & XFS_BMAPI_NORMAP))
1953 		xfs_rmap_map_extent(bma->tp, bma->ip, whichfork, new);
1954 
1955 	/* convert to a btree if necessary */
1956 	if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
1957 		int	tmp_logflags;	/* partial log flag return val */
1958 
1959 		ASSERT(bma->cur == NULL);
1960 		error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
1961 				&bma->cur, da_old > 0, &tmp_logflags,
1962 				whichfork);
1963 		bma->logflags |= tmp_logflags;
1964 		if (error)
1965 			goto done;
1966 	}
1967 
1968 	if (da_new != da_old)
1969 		xfs_mod_delalloc(bma->ip, 0, (int64_t)da_new - da_old);
1970 
1971 	if (bma->cur) {
1972 		da_new += bma->cur->bc_bmap.allocated;
1973 		bma->cur->bc_bmap.allocated = 0;
1974 	}
1975 
1976 	/* adjust for changes in reserved delayed indirect blocks */
1977 	if (da_new < da_old)
1978 		xfs_add_fdblocks(mp, da_old - da_new);
1979 	else if (da_new > da_old)
1980 		error = xfs_dec_fdblocks(mp, da_new - da_old, true);
1981 
1982 	xfs_bmap_check_leaf_extents(bma->cur, bma->ip, whichfork);
1983 done:
1984 	if (whichfork != XFS_COW_FORK)
1985 		bma->logflags |= rval;
1986 	return error;
1987 #undef	LEFT
1988 #undef	RIGHT
1989 #undef	PREV
1990 }
1991 
1992 /*
1993  * Convert an unwritten allocation to a real allocation or vice versa.
1994  */
1995 int					/* error */
1996 xfs_bmap_add_extent_unwritten_real(
1997 	struct xfs_trans	*tp,
1998 	xfs_inode_t		*ip,	/* incore inode pointer */
1999 	int			whichfork,
2000 	struct xfs_iext_cursor	*icur,
2001 	struct xfs_btree_cur	**curp,	/* if *curp is null, not a btree */
2002 	xfs_bmbt_irec_t		*new,	/* new data to add to file extents */
2003 	int			*logflagsp) /* inode logging flags */
2004 {
2005 	struct xfs_btree_cur	*cur;	/* btree cursor */
2006 	int			error;	/* error return value */
2007 	int			i;	/* temp state */
2008 	struct xfs_ifork	*ifp;	/* inode fork pointer */
2009 	xfs_fileoff_t		new_endoff;	/* end offset of new entry */
2010 	xfs_bmbt_irec_t		r[3];	/* neighbor extent entries */
2011 					/* left is 0, right is 1, prev is 2 */
2012 	int			rval=0;	/* return value (logging flags) */
2013 	uint32_t		state = xfs_bmap_fork_to_state(whichfork);
2014 	struct xfs_mount	*mp = ip->i_mount;
2015 	struct xfs_bmbt_irec	old;
2016 
2017 	*logflagsp = 0;
2018 
2019 	cur = *curp;
2020 	ifp = xfs_ifork_ptr(ip, whichfork);
2021 
2022 	ASSERT(!isnullstartblock(new->br_startblock));
2023 
2024 	XFS_STATS_INC(mp, xs_add_exlist);
2025 
2026 #define	LEFT		r[0]
2027 #define	RIGHT		r[1]
2028 #define	PREV		r[2]
2029 
2030 	/*
2031 	 * Set up a bunch of variables to make the tests simpler.
2032 	 */
2033 	error = 0;
2034 	xfs_iext_get_extent(ifp, icur, &PREV);
2035 	ASSERT(new->br_state != PREV.br_state);
2036 	new_endoff = new->br_startoff + new->br_blockcount;
2037 	ASSERT(PREV.br_startoff <= new->br_startoff);
2038 	ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff);
2039 
2040 	/*
2041 	 * Set flags determining what part of the previous oldext allocation
2042 	 * extent is being replaced by a newext allocation.
2043 	 */
2044 	if (PREV.br_startoff == new->br_startoff)
2045 		state |= BMAP_LEFT_FILLING;
2046 	if (PREV.br_startoff + PREV.br_blockcount == new_endoff)
2047 		state |= BMAP_RIGHT_FILLING;
2048 
2049 	/*
2050 	 * Check and set flags if this segment has a left neighbor.
2051 	 * Don't set contiguous if the combined extent would be too large.
2052 	 */
2053 	if (xfs_iext_peek_prev_extent(ifp, icur, &LEFT)) {
2054 		state |= BMAP_LEFT_VALID;
2055 		if (isnullstartblock(LEFT.br_startblock))
2056 			state |= BMAP_LEFT_DELAY;
2057 	}
2058 
2059 	if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
2060 	    LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff &&
2061 	    LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock &&
2062 	    LEFT.br_state == new->br_state &&
2063 	    LEFT.br_blockcount + new->br_blockcount <= XFS_MAX_BMBT_EXTLEN)
2064 		state |= BMAP_LEFT_CONTIG;
2065 
2066 	/*
2067 	 * Check and set flags if this segment has a right neighbor.
2068 	 * Don't set contiguous if the combined extent would be too large.
2069 	 * Also check for all-three-contiguous being too large.
2070 	 */
2071 	if (xfs_iext_peek_next_extent(ifp, icur, &RIGHT)) {
2072 		state |= BMAP_RIGHT_VALID;
2073 		if (isnullstartblock(RIGHT.br_startblock))
2074 			state |= BMAP_RIGHT_DELAY;
2075 	}
2076 
2077 	if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
2078 	    new_endoff == RIGHT.br_startoff &&
2079 	    new->br_startblock + new->br_blockcount == RIGHT.br_startblock &&
2080 	    new->br_state == RIGHT.br_state &&
2081 	    new->br_blockcount + RIGHT.br_blockcount <= XFS_MAX_BMBT_EXTLEN &&
2082 	    ((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
2083 		       BMAP_RIGHT_FILLING)) !=
2084 		      (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
2085 		       BMAP_RIGHT_FILLING) ||
2086 	     LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount
2087 			<= XFS_MAX_BMBT_EXTLEN))
2088 		state |= BMAP_RIGHT_CONTIG;
2089 
2090 	/*
2091 	 * Switch out based on the FILLING and CONTIG state bits.
2092 	 */
2093 	switch (state & (BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
2094 			 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG)) {
2095 	case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
2096 	     BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
2097 		/*
2098 		 * Setting all of a previous oldext extent to newext.
2099 		 * The left and right neighbors are both contiguous with new.
2100 		 */
2101 		LEFT.br_blockcount += PREV.br_blockcount + RIGHT.br_blockcount;
2102 
2103 		xfs_iext_remove(ip, icur, state);
2104 		xfs_iext_remove(ip, icur, state);
2105 		xfs_iext_prev(ifp, icur);
2106 		xfs_iext_update_extent(ip, state, icur, &LEFT);
2107 		ifp->if_nextents -= 2;
2108 		if (cur == NULL)
2109 			rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2110 		else {
2111 			rval = XFS_ILOG_CORE;
2112 			error = xfs_bmbt_lookup_eq(cur, &RIGHT, &i);
2113 			if (error)
2114 				goto done;
2115 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2116 				xfs_btree_mark_sick(cur);
2117 				error = -EFSCORRUPTED;
2118 				goto done;
2119 			}
2120 			if ((error = xfs_btree_delete(cur, &i)))
2121 				goto done;
2122 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2123 				xfs_btree_mark_sick(cur);
2124 				error = -EFSCORRUPTED;
2125 				goto done;
2126 			}
2127 			if ((error = xfs_btree_decrement(cur, 0, &i)))
2128 				goto done;
2129 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2130 				xfs_btree_mark_sick(cur);
2131 				error = -EFSCORRUPTED;
2132 				goto done;
2133 			}
2134 			if ((error = xfs_btree_delete(cur, &i)))
2135 				goto done;
2136 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2137 				xfs_btree_mark_sick(cur);
2138 				error = -EFSCORRUPTED;
2139 				goto done;
2140 			}
2141 			if ((error = xfs_btree_decrement(cur, 0, &i)))
2142 				goto done;
2143 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2144 				xfs_btree_mark_sick(cur);
2145 				error = -EFSCORRUPTED;
2146 				goto done;
2147 			}
2148 			error = xfs_bmbt_update(cur, &LEFT);
2149 			if (error)
2150 				goto done;
2151 		}
2152 		break;
2153 
2154 	case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
2155 		/*
2156 		 * Setting all of a previous oldext extent to newext.
2157 		 * The left neighbor is contiguous, the right is not.
2158 		 */
2159 		LEFT.br_blockcount += PREV.br_blockcount;
2160 
2161 		xfs_iext_remove(ip, icur, state);
2162 		xfs_iext_prev(ifp, icur);
2163 		xfs_iext_update_extent(ip, state, icur, &LEFT);
2164 		ifp->if_nextents--;
2165 		if (cur == NULL)
2166 			rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2167 		else {
2168 			rval = XFS_ILOG_CORE;
2169 			error = xfs_bmbt_lookup_eq(cur, &PREV, &i);
2170 			if (error)
2171 				goto done;
2172 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2173 				xfs_btree_mark_sick(cur);
2174 				error = -EFSCORRUPTED;
2175 				goto done;
2176 			}
2177 			if ((error = xfs_btree_delete(cur, &i)))
2178 				goto done;
2179 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2180 				xfs_btree_mark_sick(cur);
2181 				error = -EFSCORRUPTED;
2182 				goto done;
2183 			}
2184 			if ((error = xfs_btree_decrement(cur, 0, &i)))
2185 				goto done;
2186 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2187 				xfs_btree_mark_sick(cur);
2188 				error = -EFSCORRUPTED;
2189 				goto done;
2190 			}
2191 			error = xfs_bmbt_update(cur, &LEFT);
2192 			if (error)
2193 				goto done;
2194 		}
2195 		break;
2196 
2197 	case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
2198 		/*
2199 		 * Setting all of a previous oldext extent to newext.
2200 		 * The right neighbor is contiguous, the left is not.
2201 		 */
2202 		PREV.br_blockcount += RIGHT.br_blockcount;
2203 		PREV.br_state = new->br_state;
2204 
2205 		xfs_iext_next(ifp, icur);
2206 		xfs_iext_remove(ip, icur, state);
2207 		xfs_iext_prev(ifp, icur);
2208 		xfs_iext_update_extent(ip, state, icur, &PREV);
2209 		ifp->if_nextents--;
2210 
2211 		if (cur == NULL)
2212 			rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2213 		else {
2214 			rval = XFS_ILOG_CORE;
2215 			error = xfs_bmbt_lookup_eq(cur, &RIGHT, &i);
2216 			if (error)
2217 				goto done;
2218 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2219 				xfs_btree_mark_sick(cur);
2220 				error = -EFSCORRUPTED;
2221 				goto done;
2222 			}
2223 			if ((error = xfs_btree_delete(cur, &i)))
2224 				goto done;
2225 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2226 				xfs_btree_mark_sick(cur);
2227 				error = -EFSCORRUPTED;
2228 				goto done;
2229 			}
2230 			if ((error = xfs_btree_decrement(cur, 0, &i)))
2231 				goto done;
2232 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2233 				xfs_btree_mark_sick(cur);
2234 				error = -EFSCORRUPTED;
2235 				goto done;
2236 			}
2237 			error = xfs_bmbt_update(cur, &PREV);
2238 			if (error)
2239 				goto done;
2240 		}
2241 		break;
2242 
2243 	case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
2244 		/*
2245 		 * Setting all of a previous oldext extent to newext.
2246 		 * Neither the left nor right neighbors are contiguous with
2247 		 * the new one.
2248 		 */
2249 		PREV.br_state = new->br_state;
2250 		xfs_iext_update_extent(ip, state, icur, &PREV);
2251 
2252 		if (cur == NULL)
2253 			rval = XFS_ILOG_DEXT;
2254 		else {
2255 			rval = 0;
2256 			error = xfs_bmbt_lookup_eq(cur, new, &i);
2257 			if (error)
2258 				goto done;
2259 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2260 				xfs_btree_mark_sick(cur);
2261 				error = -EFSCORRUPTED;
2262 				goto done;
2263 			}
2264 			error = xfs_bmbt_update(cur, &PREV);
2265 			if (error)
2266 				goto done;
2267 		}
2268 		break;
2269 
2270 	case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG:
2271 		/*
2272 		 * Setting the first part of a previous oldext extent to newext.
2273 		 * The left neighbor is contiguous.
2274 		 */
2275 		LEFT.br_blockcount += new->br_blockcount;
2276 
2277 		old = PREV;
2278 		PREV.br_startoff += new->br_blockcount;
2279 		PREV.br_startblock += new->br_blockcount;
2280 		PREV.br_blockcount -= new->br_blockcount;
2281 
2282 		xfs_iext_update_extent(ip, state, icur, &PREV);
2283 		xfs_iext_prev(ifp, icur);
2284 		xfs_iext_update_extent(ip, state, icur, &LEFT);
2285 
2286 		if (cur == NULL)
2287 			rval = XFS_ILOG_DEXT;
2288 		else {
2289 			rval = 0;
2290 			error = xfs_bmbt_lookup_eq(cur, &old, &i);
2291 			if (error)
2292 				goto done;
2293 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2294 				xfs_btree_mark_sick(cur);
2295 				error = -EFSCORRUPTED;
2296 				goto done;
2297 			}
2298 			error = xfs_bmbt_update(cur, &PREV);
2299 			if (error)
2300 				goto done;
2301 			error = xfs_btree_decrement(cur, 0, &i);
2302 			if (error)
2303 				goto done;
2304 			error = xfs_bmbt_update(cur, &LEFT);
2305 			if (error)
2306 				goto done;
2307 		}
2308 		break;
2309 
2310 	case BMAP_LEFT_FILLING:
2311 		/*
2312 		 * Setting the first part of a previous oldext extent to newext.
2313 		 * The left neighbor is not contiguous.
2314 		 */
2315 		old = PREV;
2316 		PREV.br_startoff += new->br_blockcount;
2317 		PREV.br_startblock += new->br_blockcount;
2318 		PREV.br_blockcount -= new->br_blockcount;
2319 
2320 		xfs_iext_update_extent(ip, state, icur, &PREV);
2321 		xfs_iext_insert(ip, icur, new, state);
2322 		ifp->if_nextents++;
2323 
2324 		if (cur == NULL)
2325 			rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2326 		else {
2327 			rval = XFS_ILOG_CORE;
2328 			error = xfs_bmbt_lookup_eq(cur, &old, &i);
2329 			if (error)
2330 				goto done;
2331 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2332 				xfs_btree_mark_sick(cur);
2333 				error = -EFSCORRUPTED;
2334 				goto done;
2335 			}
2336 			error = xfs_bmbt_update(cur, &PREV);
2337 			if (error)
2338 				goto done;
2339 			cur->bc_rec.b = *new;
2340 			if ((error = xfs_btree_insert(cur, &i)))
2341 				goto done;
2342 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2343 				xfs_btree_mark_sick(cur);
2344 				error = -EFSCORRUPTED;
2345 				goto done;
2346 			}
2347 		}
2348 		break;
2349 
2350 	case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
2351 		/*
2352 		 * Setting the last part of a previous oldext extent to newext.
2353 		 * The right neighbor is contiguous with the new allocation.
2354 		 */
2355 		old = PREV;
2356 		PREV.br_blockcount -= new->br_blockcount;
2357 
2358 		RIGHT.br_startoff = new->br_startoff;
2359 		RIGHT.br_startblock = new->br_startblock;
2360 		RIGHT.br_blockcount += new->br_blockcount;
2361 
2362 		xfs_iext_update_extent(ip, state, icur, &PREV);
2363 		xfs_iext_next(ifp, icur);
2364 		xfs_iext_update_extent(ip, state, icur, &RIGHT);
2365 
2366 		if (cur == NULL)
2367 			rval = XFS_ILOG_DEXT;
2368 		else {
2369 			rval = 0;
2370 			error = xfs_bmbt_lookup_eq(cur, &old, &i);
2371 			if (error)
2372 				goto done;
2373 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2374 				xfs_btree_mark_sick(cur);
2375 				error = -EFSCORRUPTED;
2376 				goto done;
2377 			}
2378 			error = xfs_bmbt_update(cur, &PREV);
2379 			if (error)
2380 				goto done;
2381 			error = xfs_btree_increment(cur, 0, &i);
2382 			if (error)
2383 				goto done;
2384 			error = xfs_bmbt_update(cur, &RIGHT);
2385 			if (error)
2386 				goto done;
2387 		}
2388 		break;
2389 
2390 	case BMAP_RIGHT_FILLING:
2391 		/*
2392 		 * Setting the last part of a previous oldext extent to newext.
2393 		 * The right neighbor is not contiguous.
2394 		 */
2395 		old = PREV;
2396 		PREV.br_blockcount -= new->br_blockcount;
2397 
2398 		xfs_iext_update_extent(ip, state, icur, &PREV);
2399 		xfs_iext_next(ifp, icur);
2400 		xfs_iext_insert(ip, icur, new, state);
2401 		ifp->if_nextents++;
2402 
2403 		if (cur == NULL)
2404 			rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2405 		else {
2406 			rval = XFS_ILOG_CORE;
2407 			error = xfs_bmbt_lookup_eq(cur, &old, &i);
2408 			if (error)
2409 				goto done;
2410 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2411 				xfs_btree_mark_sick(cur);
2412 				error = -EFSCORRUPTED;
2413 				goto done;
2414 			}
2415 			error = xfs_bmbt_update(cur, &PREV);
2416 			if (error)
2417 				goto done;
2418 			error = xfs_bmbt_lookup_eq(cur, new, &i);
2419 			if (error)
2420 				goto done;
2421 			if (XFS_IS_CORRUPT(mp, i != 0)) {
2422 				xfs_btree_mark_sick(cur);
2423 				error = -EFSCORRUPTED;
2424 				goto done;
2425 			}
2426 			if ((error = xfs_btree_insert(cur, &i)))
2427 				goto done;
2428 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2429 				xfs_btree_mark_sick(cur);
2430 				error = -EFSCORRUPTED;
2431 				goto done;
2432 			}
2433 		}
2434 		break;
2435 
2436 	case 0:
2437 		/*
2438 		 * Setting the middle part of a previous oldext extent to
2439 		 * newext.  Contiguity is impossible here.
2440 		 * One extent becomes three extents.
2441 		 */
2442 		old = PREV;
2443 		PREV.br_blockcount = new->br_startoff - PREV.br_startoff;
2444 
2445 		r[0] = *new;
2446 		r[1].br_startoff = new_endoff;
2447 		r[1].br_blockcount =
2448 			old.br_startoff + old.br_blockcount - new_endoff;
2449 		r[1].br_startblock = new->br_startblock + new->br_blockcount;
2450 		r[1].br_state = PREV.br_state;
2451 
2452 		xfs_iext_update_extent(ip, state, icur, &PREV);
2453 		xfs_iext_next(ifp, icur);
2454 		xfs_iext_insert(ip, icur, &r[1], state);
2455 		xfs_iext_insert(ip, icur, &r[0], state);
2456 		ifp->if_nextents += 2;
2457 
2458 		if (cur == NULL)
2459 			rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2460 		else {
2461 			rval = XFS_ILOG_CORE;
2462 			error = xfs_bmbt_lookup_eq(cur, &old, &i);
2463 			if (error)
2464 				goto done;
2465 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2466 				xfs_btree_mark_sick(cur);
2467 				error = -EFSCORRUPTED;
2468 				goto done;
2469 			}
2470 			/* new right extent - oldext */
2471 			error = xfs_bmbt_update(cur, &r[1]);
2472 			if (error)
2473 				goto done;
2474 			/* new left extent - oldext */
2475 			cur->bc_rec.b = PREV;
2476 			if ((error = xfs_btree_insert(cur, &i)))
2477 				goto done;
2478 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2479 				xfs_btree_mark_sick(cur);
2480 				error = -EFSCORRUPTED;
2481 				goto done;
2482 			}
2483 			/*
2484 			 * Reset the cursor to the position of the new extent
2485 			 * we are about to insert as we can't trust it after
2486 			 * the previous insert.
2487 			 */
2488 			error = xfs_bmbt_lookup_eq(cur, new, &i);
2489 			if (error)
2490 				goto done;
2491 			if (XFS_IS_CORRUPT(mp, i != 0)) {
2492 				xfs_btree_mark_sick(cur);
2493 				error = -EFSCORRUPTED;
2494 				goto done;
2495 			}
2496 			/* new middle extent - newext */
2497 			if ((error = xfs_btree_insert(cur, &i)))
2498 				goto done;
2499 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2500 				xfs_btree_mark_sick(cur);
2501 				error = -EFSCORRUPTED;
2502 				goto done;
2503 			}
2504 		}
2505 		break;
2506 
2507 	case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2508 	case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2509 	case BMAP_LEFT_FILLING | BMAP_RIGHT_CONTIG:
2510 	case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
2511 	case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2512 	case BMAP_LEFT_CONTIG:
2513 	case BMAP_RIGHT_CONTIG:
2514 		/*
2515 		 * These cases are all impossible.
2516 		 */
2517 		ASSERT(0);
2518 	}
2519 
2520 	/* update reverse mappings */
2521 	xfs_rmap_convert_extent(mp, tp, ip, whichfork, new);
2522 
2523 	/* convert to a btree if necessary */
2524 	if (xfs_bmap_needs_btree(ip, whichfork)) {
2525 		int	tmp_logflags;	/* partial log flag return val */
2526 
2527 		ASSERT(cur == NULL);
2528 		error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0,
2529 				&tmp_logflags, whichfork);
2530 		*logflagsp |= tmp_logflags;
2531 		if (error)
2532 			goto done;
2533 	}
2534 
2535 	/* clear out the allocated field, done with it now in any case. */
2536 	if (cur) {
2537 		cur->bc_bmap.allocated = 0;
2538 		*curp = cur;
2539 	}
2540 
2541 	xfs_bmap_check_leaf_extents(*curp, ip, whichfork);
2542 done:
2543 	*logflagsp |= rval;
2544 	return error;
2545 #undef	LEFT
2546 #undef	RIGHT
2547 #undef	PREV
2548 }
2549 
2550 /*
2551  * Convert a hole to a delayed allocation.
2552  */
2553 STATIC void
2554 xfs_bmap_add_extent_hole_delay(
2555 	xfs_inode_t		*ip,	/* incore inode pointer */
2556 	int			whichfork,
2557 	struct xfs_iext_cursor	*icur,
2558 	xfs_bmbt_irec_t		*new)	/* new data to add to file extents */
2559 {
2560 	struct xfs_ifork	*ifp;	/* inode fork pointer */
2561 	xfs_bmbt_irec_t		left;	/* left neighbor extent entry */
2562 	xfs_filblks_t		newlen=0;	/* new indirect size */
2563 	xfs_filblks_t		oldlen=0;	/* old indirect size */
2564 	xfs_bmbt_irec_t		right;	/* right neighbor extent entry */
2565 	uint32_t		state = xfs_bmap_fork_to_state(whichfork);
2566 	xfs_filblks_t		temp;	 /* temp for indirect calculations */
2567 
2568 	ifp = xfs_ifork_ptr(ip, whichfork);
2569 	ASSERT(isnullstartblock(new->br_startblock));
2570 
2571 	/*
2572 	 * Check and set flags if this segment has a left neighbor
2573 	 */
2574 	if (xfs_iext_peek_prev_extent(ifp, icur, &left)) {
2575 		state |= BMAP_LEFT_VALID;
2576 		if (isnullstartblock(left.br_startblock))
2577 			state |= BMAP_LEFT_DELAY;
2578 	}
2579 
2580 	/*
2581 	 * Check and set flags if the current (right) segment exists.
2582 	 * If it doesn't exist, we're converting the hole at end-of-file.
2583 	 */
2584 	if (xfs_iext_get_extent(ifp, icur, &right)) {
2585 		state |= BMAP_RIGHT_VALID;
2586 		if (isnullstartblock(right.br_startblock))
2587 			state |= BMAP_RIGHT_DELAY;
2588 	}
2589 
2590 	/*
2591 	 * Set contiguity flags on the left and right neighbors.
2592 	 * Don't let extents get too large, even if the pieces are contiguous.
2593 	 */
2594 	if ((state & BMAP_LEFT_VALID) && (state & BMAP_LEFT_DELAY) &&
2595 	    left.br_startoff + left.br_blockcount == new->br_startoff &&
2596 	    left.br_blockcount + new->br_blockcount <= XFS_MAX_BMBT_EXTLEN)
2597 		state |= BMAP_LEFT_CONTIG;
2598 
2599 	if ((state & BMAP_RIGHT_VALID) && (state & BMAP_RIGHT_DELAY) &&
2600 	    new->br_startoff + new->br_blockcount == right.br_startoff &&
2601 	    new->br_blockcount + right.br_blockcount <= XFS_MAX_BMBT_EXTLEN &&
2602 	    (!(state & BMAP_LEFT_CONTIG) ||
2603 	     (left.br_blockcount + new->br_blockcount +
2604 	      right.br_blockcount <= XFS_MAX_BMBT_EXTLEN)))
2605 		state |= BMAP_RIGHT_CONTIG;
2606 
2607 	/*
2608 	 * Switch out based on the contiguity flags.
2609 	 */
2610 	switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) {
2611 	case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2612 		/*
2613 		 * New allocation is contiguous with delayed allocations
2614 		 * on the left and on the right.
2615 		 * Merge all three into a single extent record.
2616 		 */
2617 		temp = left.br_blockcount + new->br_blockcount +
2618 			right.br_blockcount;
2619 
2620 		oldlen = startblockval(left.br_startblock) +
2621 			startblockval(new->br_startblock) +
2622 			startblockval(right.br_startblock);
2623 		newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
2624 					 oldlen);
2625 		left.br_startblock = nullstartblock(newlen);
2626 		left.br_blockcount = temp;
2627 
2628 		xfs_iext_remove(ip, icur, state);
2629 		xfs_iext_prev(ifp, icur);
2630 		xfs_iext_update_extent(ip, state, icur, &left);
2631 		break;
2632 
2633 	case BMAP_LEFT_CONTIG:
2634 		/*
2635 		 * New allocation is contiguous with a delayed allocation
2636 		 * on the left.
2637 		 * Merge the new allocation with the left neighbor.
2638 		 */
2639 		temp = left.br_blockcount + new->br_blockcount;
2640 
2641 		oldlen = startblockval(left.br_startblock) +
2642 			startblockval(new->br_startblock);
2643 		newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
2644 					 oldlen);
2645 		left.br_blockcount = temp;
2646 		left.br_startblock = nullstartblock(newlen);
2647 
2648 		xfs_iext_prev(ifp, icur);
2649 		xfs_iext_update_extent(ip, state, icur, &left);
2650 		break;
2651 
2652 	case BMAP_RIGHT_CONTIG:
2653 		/*
2654 		 * New allocation is contiguous with a delayed allocation
2655 		 * on the right.
2656 		 * Merge the new allocation with the right neighbor.
2657 		 */
2658 		temp = new->br_blockcount + right.br_blockcount;
2659 		oldlen = startblockval(new->br_startblock) +
2660 			startblockval(right.br_startblock);
2661 		newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
2662 					 oldlen);
2663 		right.br_startoff = new->br_startoff;
2664 		right.br_startblock = nullstartblock(newlen);
2665 		right.br_blockcount = temp;
2666 		xfs_iext_update_extent(ip, state, icur, &right);
2667 		break;
2668 
2669 	case 0:
2670 		/*
2671 		 * New allocation is not contiguous with another
2672 		 * delayed allocation.
2673 		 * Insert a new entry.
2674 		 */
2675 		oldlen = newlen = 0;
2676 		xfs_iext_insert(ip, icur, new, state);
2677 		break;
2678 	}
2679 	if (oldlen != newlen) {
2680 		ASSERT(oldlen > newlen);
2681 		xfs_add_fdblocks(ip->i_mount, oldlen - newlen);
2682 
2683 		/*
2684 		 * Nothing to do for disk quota accounting here.
2685 		 */
2686 		xfs_mod_delalloc(ip, 0, (int64_t)newlen - oldlen);
2687 	}
2688 }
2689 
2690 /*
2691  * Convert a hole to a real allocation.
2692  */
2693 STATIC int				/* error */
2694 xfs_bmap_add_extent_hole_real(
2695 	struct xfs_trans	*tp,
2696 	struct xfs_inode	*ip,
2697 	int			whichfork,
2698 	struct xfs_iext_cursor	*icur,
2699 	struct xfs_btree_cur	**curp,
2700 	struct xfs_bmbt_irec	*new,
2701 	int			*logflagsp,
2702 	uint32_t		flags)
2703 {
2704 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
2705 	struct xfs_mount	*mp = ip->i_mount;
2706 	struct xfs_btree_cur	*cur = *curp;
2707 	int			error;	/* error return value */
2708 	int			i;	/* temp state */
2709 	xfs_bmbt_irec_t		left;	/* left neighbor extent entry */
2710 	xfs_bmbt_irec_t		right;	/* right neighbor extent entry */
2711 	int			rval=0;	/* return value (logging flags) */
2712 	uint32_t		state = xfs_bmap_fork_to_state(whichfork);
2713 	struct xfs_bmbt_irec	old;
2714 
2715 	ASSERT(!isnullstartblock(new->br_startblock));
2716 	ASSERT(!cur || !(cur->bc_flags & XFS_BTREE_BMBT_WASDEL));
2717 
2718 	XFS_STATS_INC(mp, xs_add_exlist);
2719 
2720 	/*
2721 	 * Check and set flags if this segment has a left neighbor.
2722 	 */
2723 	if (xfs_iext_peek_prev_extent(ifp, icur, &left)) {
2724 		state |= BMAP_LEFT_VALID;
2725 		if (isnullstartblock(left.br_startblock))
2726 			state |= BMAP_LEFT_DELAY;
2727 	}
2728 
2729 	/*
2730 	 * Check and set flags if this segment has a current value.
2731 	 * Not true if we're inserting into the "hole" at eof.
2732 	 */
2733 	if (xfs_iext_get_extent(ifp, icur, &right)) {
2734 		state |= BMAP_RIGHT_VALID;
2735 		if (isnullstartblock(right.br_startblock))
2736 			state |= BMAP_RIGHT_DELAY;
2737 	}
2738 
2739 	/*
2740 	 * We're inserting a real allocation between "left" and "right".
2741 	 * Set the contiguity flags.  Don't let extents get too large.
2742 	 */
2743 	if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
2744 	    left.br_startoff + left.br_blockcount == new->br_startoff &&
2745 	    left.br_startblock + left.br_blockcount == new->br_startblock &&
2746 	    left.br_state == new->br_state &&
2747 	    left.br_blockcount + new->br_blockcount <= XFS_MAX_BMBT_EXTLEN)
2748 		state |= BMAP_LEFT_CONTIG;
2749 
2750 	if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
2751 	    new->br_startoff + new->br_blockcount == right.br_startoff &&
2752 	    new->br_startblock + new->br_blockcount == right.br_startblock &&
2753 	    new->br_state == right.br_state &&
2754 	    new->br_blockcount + right.br_blockcount <= XFS_MAX_BMBT_EXTLEN &&
2755 	    (!(state & BMAP_LEFT_CONTIG) ||
2756 	     left.br_blockcount + new->br_blockcount +
2757 	     right.br_blockcount <= XFS_MAX_BMBT_EXTLEN))
2758 		state |= BMAP_RIGHT_CONTIG;
2759 
2760 	error = 0;
2761 	/*
2762 	 * Select which case we're in here, and implement it.
2763 	 */
2764 	switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) {
2765 	case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2766 		/*
2767 		 * New allocation is contiguous with real allocations on the
2768 		 * left and on the right.
2769 		 * Merge all three into a single extent record.
2770 		 */
2771 		left.br_blockcount += new->br_blockcount + right.br_blockcount;
2772 
2773 		xfs_iext_remove(ip, icur, state);
2774 		xfs_iext_prev(ifp, icur);
2775 		xfs_iext_update_extent(ip, state, icur, &left);
2776 		ifp->if_nextents--;
2777 
2778 		if (cur == NULL) {
2779 			rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
2780 		} else {
2781 			rval = XFS_ILOG_CORE;
2782 			error = xfs_bmbt_lookup_eq(cur, &right, &i);
2783 			if (error)
2784 				goto done;
2785 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2786 				xfs_btree_mark_sick(cur);
2787 				error = -EFSCORRUPTED;
2788 				goto done;
2789 			}
2790 			error = xfs_btree_delete(cur, &i);
2791 			if (error)
2792 				goto done;
2793 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2794 				xfs_btree_mark_sick(cur);
2795 				error = -EFSCORRUPTED;
2796 				goto done;
2797 			}
2798 			error = xfs_btree_decrement(cur, 0, &i);
2799 			if (error)
2800 				goto done;
2801 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2802 				xfs_btree_mark_sick(cur);
2803 				error = -EFSCORRUPTED;
2804 				goto done;
2805 			}
2806 			error = xfs_bmbt_update(cur, &left);
2807 			if (error)
2808 				goto done;
2809 		}
2810 		break;
2811 
2812 	case BMAP_LEFT_CONTIG:
2813 		/*
2814 		 * New allocation is contiguous with a real allocation
2815 		 * on the left.
2816 		 * Merge the new allocation with the left neighbor.
2817 		 */
2818 		old = left;
2819 		left.br_blockcount += new->br_blockcount;
2820 
2821 		xfs_iext_prev(ifp, icur);
2822 		xfs_iext_update_extent(ip, state, icur, &left);
2823 
2824 		if (cur == NULL) {
2825 			rval = xfs_ilog_fext(whichfork);
2826 		} else {
2827 			rval = 0;
2828 			error = xfs_bmbt_lookup_eq(cur, &old, &i);
2829 			if (error)
2830 				goto done;
2831 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2832 				xfs_btree_mark_sick(cur);
2833 				error = -EFSCORRUPTED;
2834 				goto done;
2835 			}
2836 			error = xfs_bmbt_update(cur, &left);
2837 			if (error)
2838 				goto done;
2839 		}
2840 		break;
2841 
2842 	case BMAP_RIGHT_CONTIG:
2843 		/*
2844 		 * New allocation is contiguous with a real allocation
2845 		 * on the right.
2846 		 * Merge the new allocation with the right neighbor.
2847 		 */
2848 		old = right;
2849 
2850 		right.br_startoff = new->br_startoff;
2851 		right.br_startblock = new->br_startblock;
2852 		right.br_blockcount += new->br_blockcount;
2853 		xfs_iext_update_extent(ip, state, icur, &right);
2854 
2855 		if (cur == NULL) {
2856 			rval = xfs_ilog_fext(whichfork);
2857 		} else {
2858 			rval = 0;
2859 			error = xfs_bmbt_lookup_eq(cur, &old, &i);
2860 			if (error)
2861 				goto done;
2862 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2863 				xfs_btree_mark_sick(cur);
2864 				error = -EFSCORRUPTED;
2865 				goto done;
2866 			}
2867 			error = xfs_bmbt_update(cur, &right);
2868 			if (error)
2869 				goto done;
2870 		}
2871 		break;
2872 
2873 	case 0:
2874 		/*
2875 		 * New allocation is not contiguous with another
2876 		 * real allocation.
2877 		 * Insert a new entry.
2878 		 */
2879 		xfs_iext_insert(ip, icur, new, state);
2880 		ifp->if_nextents++;
2881 
2882 		if (cur == NULL) {
2883 			rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
2884 		} else {
2885 			rval = XFS_ILOG_CORE;
2886 			error = xfs_bmbt_lookup_eq(cur, new, &i);
2887 			if (error)
2888 				goto done;
2889 			if (XFS_IS_CORRUPT(mp, i != 0)) {
2890 				xfs_btree_mark_sick(cur);
2891 				error = -EFSCORRUPTED;
2892 				goto done;
2893 			}
2894 			error = xfs_btree_insert(cur, &i);
2895 			if (error)
2896 				goto done;
2897 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2898 				xfs_btree_mark_sick(cur);
2899 				error = -EFSCORRUPTED;
2900 				goto done;
2901 			}
2902 		}
2903 		break;
2904 	}
2905 
2906 	/* add reverse mapping unless caller opted out */
2907 	if (!(flags & XFS_BMAPI_NORMAP))
2908 		xfs_rmap_map_extent(tp, ip, whichfork, new);
2909 
2910 	/* convert to a btree if necessary */
2911 	if (xfs_bmap_needs_btree(ip, whichfork)) {
2912 		int	tmp_logflags;	/* partial log flag return val */
2913 
2914 		ASSERT(cur == NULL);
2915 		error = xfs_bmap_extents_to_btree(tp, ip, curp, 0,
2916 				&tmp_logflags, whichfork);
2917 		*logflagsp |= tmp_logflags;
2918 		cur = *curp;
2919 		if (error)
2920 			goto done;
2921 	}
2922 
2923 	/* clear out the allocated field, done with it now in any case. */
2924 	if (cur)
2925 		cur->bc_bmap.allocated = 0;
2926 
2927 	xfs_bmap_check_leaf_extents(cur, ip, whichfork);
2928 done:
2929 	*logflagsp |= rval;
2930 	return error;
2931 }
2932 
2933 /*
2934  * Functions used in the extent read, allocate and remove paths
2935  */
2936 
2937 /*
2938  * Adjust the size of the new extent based on i_extsize and rt extsize.
2939  */
2940 int
2941 xfs_bmap_extsize_align(
2942 	xfs_mount_t	*mp,
2943 	xfs_bmbt_irec_t	*gotp,		/* next extent pointer */
2944 	xfs_bmbt_irec_t	*prevp,		/* previous extent pointer */
2945 	xfs_extlen_t	extsz,		/* align to this extent size */
2946 	int		rt,		/* is this a realtime inode? */
2947 	int		eof,		/* is extent at end-of-file? */
2948 	int		delay,		/* creating delalloc extent? */
2949 	int		convert,	/* overwriting unwritten extent? */
2950 	xfs_fileoff_t	*offp,		/* in/out: aligned offset */
2951 	xfs_extlen_t	*lenp)		/* in/out: aligned length */
2952 {
2953 	xfs_fileoff_t	orig_off;	/* original offset */
2954 	xfs_extlen_t	orig_alen;	/* original length */
2955 	xfs_fileoff_t	orig_end;	/* original off+len */
2956 	xfs_fileoff_t	nexto;		/* next file offset */
2957 	xfs_fileoff_t	prevo;		/* previous file offset */
2958 	xfs_fileoff_t	align_off;	/* temp for offset */
2959 	xfs_extlen_t	align_alen;	/* temp for length */
2960 	xfs_extlen_t	temp;		/* temp for calculations */
2961 
2962 	if (convert)
2963 		return 0;
2964 
2965 	orig_off = align_off = *offp;
2966 	orig_alen = align_alen = *lenp;
2967 	orig_end = orig_off + orig_alen;
2968 
2969 	/*
2970 	 * If this request overlaps an existing extent, then don't
2971 	 * attempt to perform any additional alignment.
2972 	 */
2973 	if (!delay && !eof &&
2974 	    (orig_off >= gotp->br_startoff) &&
2975 	    (orig_end <= gotp->br_startoff + gotp->br_blockcount)) {
2976 		return 0;
2977 	}
2978 
2979 	/*
2980 	 * If the file offset is unaligned vs. the extent size
2981 	 * we need to align it.  This will be possible unless
2982 	 * the file was previously written with a kernel that didn't
2983 	 * perform this alignment, or if a truncate shot us in the
2984 	 * foot.
2985 	 */
2986 	div_u64_rem(orig_off, extsz, &temp);
2987 	if (temp) {
2988 		align_alen += temp;
2989 		align_off -= temp;
2990 	}
2991 
2992 	/* Same adjustment for the end of the requested area. */
2993 	temp = (align_alen % extsz);
2994 	if (temp)
2995 		align_alen += extsz - temp;
2996 
2997 	/*
2998 	 * For large extent hint sizes, the aligned extent might be larger than
2999 	 * XFS_BMBT_MAX_EXTLEN. In that case, reduce the size by an extsz so
3000 	 * that it pulls the length back under XFS_BMBT_MAX_EXTLEN. The outer
3001 	 * allocation loops handle short allocation just fine, so it is safe to
3002 	 * do this. We only want to do it when we are forced to, though, because
3003 	 * it means more allocation operations are required.
3004 	 */
3005 	while (align_alen > XFS_MAX_BMBT_EXTLEN)
3006 		align_alen -= extsz;
3007 	ASSERT(align_alen <= XFS_MAX_BMBT_EXTLEN);
3008 
3009 	/*
3010 	 * If the previous block overlaps with this proposed allocation
3011 	 * then move the start forward without adjusting the length.
3012 	 */
3013 	if (prevp->br_startoff != NULLFILEOFF) {
3014 		if (prevp->br_startblock == HOLESTARTBLOCK)
3015 			prevo = prevp->br_startoff;
3016 		else
3017 			prevo = prevp->br_startoff + prevp->br_blockcount;
3018 	} else
3019 		prevo = 0;
3020 	if (align_off != orig_off && align_off < prevo)
3021 		align_off = prevo;
3022 	/*
3023 	 * If the next block overlaps with this proposed allocation
3024 	 * then move the start back without adjusting the length,
3025 	 * but not before offset 0.
3026 	 * This may of course make the start overlap previous block,
3027 	 * and if we hit the offset 0 limit then the next block
3028 	 * can still overlap too.
3029 	 */
3030 	if (!eof && gotp->br_startoff != NULLFILEOFF) {
3031 		if ((delay && gotp->br_startblock == HOLESTARTBLOCK) ||
3032 		    (!delay && gotp->br_startblock == DELAYSTARTBLOCK))
3033 			nexto = gotp->br_startoff + gotp->br_blockcount;
3034 		else
3035 			nexto = gotp->br_startoff;
3036 	} else
3037 		nexto = NULLFILEOFF;
3038 	if (!eof &&
3039 	    align_off + align_alen != orig_end &&
3040 	    align_off + align_alen > nexto)
3041 		align_off = nexto > align_alen ? nexto - align_alen : 0;
3042 	/*
3043 	 * If we're now overlapping the next or previous extent that
3044 	 * means we can't fit an extsz piece in this hole.  Just move
3045 	 * the start forward to the first valid spot and set
3046 	 * the length so we hit the end.
3047 	 */
3048 	if (align_off != orig_off && align_off < prevo)
3049 		align_off = prevo;
3050 	if (align_off + align_alen != orig_end &&
3051 	    align_off + align_alen > nexto &&
3052 	    nexto != NULLFILEOFF) {
3053 		ASSERT(nexto > prevo);
3054 		align_alen = nexto - align_off;
3055 	}
3056 
3057 	/*
3058 	 * If realtime, and the result isn't a multiple of the realtime
3059 	 * extent size we need to remove blocks until it is.
3060 	 */
3061 	if (rt && (temp = xfs_extlen_to_rtxmod(mp, align_alen))) {
3062 		/*
3063 		 * We're not covering the original request, or
3064 		 * we won't be able to once we fix the length.
3065 		 */
3066 		if (orig_off < align_off ||
3067 		    orig_end > align_off + align_alen ||
3068 		    align_alen - temp < orig_alen)
3069 			return -EINVAL;
3070 		/*
3071 		 * Try to fix it by moving the start up.
3072 		 */
3073 		if (align_off + temp <= orig_off) {
3074 			align_alen -= temp;
3075 			align_off += temp;
3076 		}
3077 		/*
3078 		 * Try to fix it by moving the end in.
3079 		 */
3080 		else if (align_off + align_alen - temp >= orig_end)
3081 			align_alen -= temp;
3082 		/*
3083 		 * Set the start to the minimum then trim the length.
3084 		 */
3085 		else {
3086 			align_alen -= orig_off - align_off;
3087 			align_off = orig_off;
3088 			align_alen -= xfs_extlen_to_rtxmod(mp, align_alen);
3089 		}
3090 		/*
3091 		 * Result doesn't cover the request, fail it.
3092 		 */
3093 		if (orig_off < align_off || orig_end > align_off + align_alen)
3094 			return -EINVAL;
3095 	} else {
3096 		ASSERT(orig_off >= align_off);
3097 		/* see XFS_BMBT_MAX_EXTLEN handling above */
3098 		ASSERT(orig_end <= align_off + align_alen ||
3099 		       align_alen + extsz > XFS_MAX_BMBT_EXTLEN);
3100 	}
3101 
3102 #ifdef DEBUG
3103 	if (!eof && gotp->br_startoff != NULLFILEOFF)
3104 		ASSERT(align_off + align_alen <= gotp->br_startoff);
3105 	if (prevp->br_startoff != NULLFILEOFF)
3106 		ASSERT(align_off >= prevp->br_startoff + prevp->br_blockcount);
3107 #endif
3108 
3109 	*lenp = align_alen;
3110 	*offp = align_off;
3111 	return 0;
3112 }
3113 
3114 #define XFS_ALLOC_GAP_UNITS	4
3115 
3116 /* returns true if ap->blkno was modified */
3117 bool
3118 xfs_bmap_adjacent(
3119 	struct xfs_bmalloca	*ap)	/* bmap alloc argument struct */
3120 {
3121 	xfs_fsblock_t	adjust;		/* adjustment to block numbers */
3122 	xfs_mount_t	*mp;		/* mount point structure */
3123 	int		rt;		/* true if inode is realtime */
3124 
3125 #define	ISVALID(x,y)	\
3126 	(rt ? \
3127 		(x) < mp->m_sb.sb_rblocks : \
3128 		XFS_FSB_TO_AGNO(mp, x) == XFS_FSB_TO_AGNO(mp, y) && \
3129 		XFS_FSB_TO_AGNO(mp, x) < mp->m_sb.sb_agcount && \
3130 		XFS_FSB_TO_AGBNO(mp, x) < mp->m_sb.sb_agblocks)
3131 
3132 	mp = ap->ip->i_mount;
3133 	rt = XFS_IS_REALTIME_INODE(ap->ip) &&
3134 		(ap->datatype & XFS_ALLOC_USERDATA);
3135 	/*
3136 	 * If allocating at eof, and there's a previous real block,
3137 	 * try to use its last block as our starting point.
3138 	 */
3139 	if (ap->eof && ap->prev.br_startoff != NULLFILEOFF &&
3140 	    !isnullstartblock(ap->prev.br_startblock) &&
3141 	    ISVALID(ap->prev.br_startblock + ap->prev.br_blockcount,
3142 		    ap->prev.br_startblock)) {
3143 		ap->blkno = ap->prev.br_startblock + ap->prev.br_blockcount;
3144 		/*
3145 		 * Adjust for the gap between prevp and us.
3146 		 */
3147 		adjust = ap->offset -
3148 			(ap->prev.br_startoff + ap->prev.br_blockcount);
3149 		if (adjust &&
3150 		    ISVALID(ap->blkno + adjust, ap->prev.br_startblock))
3151 			ap->blkno += adjust;
3152 		return true;
3153 	}
3154 	/*
3155 	 * If not at eof, then compare the two neighbor blocks.
3156 	 * Figure out whether either one gives us a good starting point,
3157 	 * and pick the better one.
3158 	 */
3159 	if (!ap->eof) {
3160 		xfs_fsblock_t	gotbno;		/* right side block number */
3161 		xfs_fsblock_t	gotdiff=0;	/* right side difference */
3162 		xfs_fsblock_t	prevbno;	/* left side block number */
3163 		xfs_fsblock_t	prevdiff=0;	/* left side difference */
3164 
3165 		/*
3166 		 * If there's a previous (left) block, select a requested
3167 		 * start block based on it.
3168 		 */
3169 		if (ap->prev.br_startoff != NULLFILEOFF &&
3170 		    !isnullstartblock(ap->prev.br_startblock) &&
3171 		    (prevbno = ap->prev.br_startblock +
3172 			       ap->prev.br_blockcount) &&
3173 		    ISVALID(prevbno, ap->prev.br_startblock)) {
3174 			/*
3175 			 * Calculate gap to end of previous block.
3176 			 */
3177 			adjust = prevdiff = ap->offset -
3178 				(ap->prev.br_startoff +
3179 				 ap->prev.br_blockcount);
3180 			/*
3181 			 * Figure the startblock based on the previous block's
3182 			 * end and the gap size.
3183 			 * Heuristic!
3184 			 * If the gap is large relative to the piece we're
3185 			 * allocating, or using it gives us an invalid block
3186 			 * number, then just use the end of the previous block.
3187 			 */
3188 			if (prevdiff <= XFS_ALLOC_GAP_UNITS * ap->length &&
3189 			    ISVALID(prevbno + prevdiff,
3190 				    ap->prev.br_startblock))
3191 				prevbno += adjust;
3192 			else
3193 				prevdiff += adjust;
3194 		}
3195 		/*
3196 		 * No previous block or can't follow it, just default.
3197 		 */
3198 		else
3199 			prevbno = NULLFSBLOCK;
3200 		/*
3201 		 * If there's a following (right) block, select a requested
3202 		 * start block based on it.
3203 		 */
3204 		if (!isnullstartblock(ap->got.br_startblock)) {
3205 			/*
3206 			 * Calculate gap to start of next block.
3207 			 */
3208 			adjust = gotdiff = ap->got.br_startoff - ap->offset;
3209 			/*
3210 			 * Figure the startblock based on the next block's
3211 			 * start and the gap size.
3212 			 */
3213 			gotbno = ap->got.br_startblock;
3214 			/*
3215 			 * Heuristic!
3216 			 * If the gap is large relative to the piece we're
3217 			 * allocating, or using it gives us an invalid block
3218 			 * number, then just use the start of the next block
3219 			 * offset by our length.
3220 			 */
3221 			if (gotdiff <= XFS_ALLOC_GAP_UNITS * ap->length &&
3222 			    ISVALID(gotbno - gotdiff, gotbno))
3223 				gotbno -= adjust;
3224 			else if (ISVALID(gotbno - ap->length, gotbno)) {
3225 				gotbno -= ap->length;
3226 				gotdiff += adjust - ap->length;
3227 			} else
3228 				gotdiff += adjust;
3229 		}
3230 		/*
3231 		 * No next block, just default.
3232 		 */
3233 		else
3234 			gotbno = NULLFSBLOCK;
3235 		/*
3236 		 * If both valid, pick the better one, else the only good
3237 		 * one, else ap->blkno is already set (to 0 or the inode block).
3238 		 */
3239 		if (prevbno != NULLFSBLOCK && gotbno != NULLFSBLOCK) {
3240 			ap->blkno = prevdiff <= gotdiff ? prevbno : gotbno;
3241 			return true;
3242 		}
3243 		if (prevbno != NULLFSBLOCK) {
3244 			ap->blkno = prevbno;
3245 			return true;
3246 		}
3247 		if (gotbno != NULLFSBLOCK) {
3248 			ap->blkno = gotbno;
3249 			return true;
3250 		}
3251 	}
3252 #undef ISVALID
3253 	return false;
3254 }
3255 
3256 int
3257 xfs_bmap_longest_free_extent(
3258 	struct xfs_perag	*pag,
3259 	struct xfs_trans	*tp,
3260 	xfs_extlen_t		*blen)
3261 {
3262 	xfs_extlen_t		longest;
3263 	int			error = 0;
3264 
3265 	if (!xfs_perag_initialised_agf(pag)) {
3266 		error = xfs_alloc_read_agf(pag, tp, XFS_ALLOC_FLAG_TRYLOCK,
3267 				NULL);
3268 		if (error)
3269 			return error;
3270 	}
3271 
3272 	longest = xfs_alloc_longest_free_extent(pag,
3273 				xfs_alloc_min_freelist(pag->pag_mount, pag),
3274 				xfs_ag_resv_needed(pag, XFS_AG_RESV_NONE));
3275 	if (*blen < longest)
3276 		*blen = longest;
3277 
3278 	return 0;
3279 }
3280 
3281 static xfs_extlen_t
3282 xfs_bmap_select_minlen(
3283 	struct xfs_bmalloca	*ap,
3284 	struct xfs_alloc_arg	*args,
3285 	xfs_extlen_t		blen)
3286 {
3287 
3288 	/*
3289 	 * Since we used XFS_ALLOC_FLAG_TRYLOCK in _longest_free_extent(), it is
3290 	 * possible that there is enough contiguous free space for this request.
3291 	 */
3292 	if (blen < ap->minlen)
3293 		return ap->minlen;
3294 
3295 	/*
3296 	 * If the best seen length is less than the request length,
3297 	 * use the best as the minimum, otherwise we've got the maxlen we
3298 	 * were asked for.
3299 	 */
3300 	if (blen < args->maxlen)
3301 		return blen;
3302 	return args->maxlen;
3303 }
3304 
3305 static int
3306 xfs_bmap_btalloc_select_lengths(
3307 	struct xfs_bmalloca	*ap,
3308 	struct xfs_alloc_arg	*args,
3309 	xfs_extlen_t		*blen)
3310 {
3311 	struct xfs_mount	*mp = args->mp;
3312 	struct xfs_perag	*pag;
3313 	xfs_agnumber_t		agno, startag;
3314 	int			error = 0;
3315 
3316 	if (ap->tp->t_flags & XFS_TRANS_LOWMODE) {
3317 		args->total = ap->minlen;
3318 		args->minlen = ap->minlen;
3319 		return 0;
3320 	}
3321 
3322 	args->total = ap->total;
3323 	startag = XFS_FSB_TO_AGNO(mp, ap->blkno);
3324 	if (startag == NULLAGNUMBER)
3325 		startag = 0;
3326 
3327 	*blen = 0;
3328 	for_each_perag_wrap(mp, startag, agno, pag) {
3329 		error = xfs_bmap_longest_free_extent(pag, args->tp, blen);
3330 		if (error && error != -EAGAIN)
3331 			break;
3332 		error = 0;
3333 		if (*blen >= args->maxlen)
3334 			break;
3335 	}
3336 	if (pag)
3337 		xfs_perag_rele(pag);
3338 
3339 	args->minlen = xfs_bmap_select_minlen(ap, args, *blen);
3340 	return error;
3341 }
3342 
3343 /* Update all inode and quota accounting for the allocation we just did. */
3344 void
3345 xfs_bmap_alloc_account(
3346 	struct xfs_bmalloca	*ap)
3347 {
3348 	bool			isrt = XFS_IS_REALTIME_INODE(ap->ip) &&
3349 					!(ap->flags & XFS_BMAPI_ATTRFORK);
3350 	uint			fld;
3351 
3352 	if (ap->flags & XFS_BMAPI_COWFORK) {
3353 		/*
3354 		 * COW fork blocks are in-core only and thus are treated as
3355 		 * in-core quota reservation (like delalloc blocks) even when
3356 		 * converted to real blocks. The quota reservation is not
3357 		 * accounted to disk until blocks are remapped to the data
3358 		 * fork. So if these blocks were previously delalloc, we
3359 		 * already have quota reservation and there's nothing to do
3360 		 * yet.
3361 		 */
3362 		if (ap->wasdel) {
3363 			xfs_mod_delalloc(ap->ip, -(int64_t)ap->length, 0);
3364 			return;
3365 		}
3366 
3367 		/*
3368 		 * Otherwise, we've allocated blocks in a hole. The transaction
3369 		 * has acquired in-core quota reservation for this extent.
3370 		 * Rather than account these as real blocks, however, we reduce
3371 		 * the transaction quota reservation based on the allocation.
3372 		 * This essentially transfers the transaction quota reservation
3373 		 * to that of a delalloc extent.
3374 		 */
3375 		ap->ip->i_delayed_blks += ap->length;
3376 		xfs_trans_mod_dquot_byino(ap->tp, ap->ip, isrt ?
3377 				XFS_TRANS_DQ_RES_RTBLKS : XFS_TRANS_DQ_RES_BLKS,
3378 				-(long)ap->length);
3379 		return;
3380 	}
3381 
3382 	/* data/attr fork only */
3383 	ap->ip->i_nblocks += ap->length;
3384 	xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
3385 	if (ap->wasdel) {
3386 		ap->ip->i_delayed_blks -= ap->length;
3387 		xfs_mod_delalloc(ap->ip, -(int64_t)ap->length, 0);
3388 		fld = isrt ? XFS_TRANS_DQ_DELRTBCOUNT : XFS_TRANS_DQ_DELBCOUNT;
3389 	} else {
3390 		fld = isrt ? XFS_TRANS_DQ_RTBCOUNT : XFS_TRANS_DQ_BCOUNT;
3391 	}
3392 
3393 	xfs_trans_mod_dquot_byino(ap->tp, ap->ip, fld, ap->length);
3394 }
3395 
3396 static int
3397 xfs_bmap_compute_alignments(
3398 	struct xfs_bmalloca	*ap,
3399 	struct xfs_alloc_arg	*args)
3400 {
3401 	struct xfs_mount	*mp = args->mp;
3402 	xfs_extlen_t		align = 0; /* minimum allocation alignment */
3403 	int			stripe_align = 0;
3404 
3405 	/* stripe alignment for allocation is determined by mount parameters */
3406 	if (mp->m_swidth && xfs_has_swalloc(mp))
3407 		stripe_align = mp->m_swidth;
3408 	else if (mp->m_dalign)
3409 		stripe_align = mp->m_dalign;
3410 
3411 	if (ap->flags & XFS_BMAPI_COWFORK)
3412 		align = xfs_get_cowextsz_hint(ap->ip);
3413 	else if (ap->datatype & XFS_ALLOC_USERDATA)
3414 		align = xfs_get_extsz_hint(ap->ip);
3415 	if (align) {
3416 		if (xfs_bmap_extsize_align(mp, &ap->got, &ap->prev, align, 0,
3417 					ap->eof, 0, ap->conv, &ap->offset,
3418 					&ap->length))
3419 			ASSERT(0);
3420 		ASSERT(ap->length);
3421 	}
3422 
3423 	/* apply extent size hints if obtained earlier */
3424 	if (align) {
3425 		args->prod = align;
3426 		div_u64_rem(ap->offset, args->prod, &args->mod);
3427 		if (args->mod)
3428 			args->mod = args->prod - args->mod;
3429 	} else if (mp->m_sb.sb_blocksize >= PAGE_SIZE) {
3430 		args->prod = 1;
3431 		args->mod = 0;
3432 	} else {
3433 		args->prod = PAGE_SIZE >> mp->m_sb.sb_blocklog;
3434 		div_u64_rem(ap->offset, args->prod, &args->mod);
3435 		if (args->mod)
3436 			args->mod = args->prod - args->mod;
3437 	}
3438 
3439 	return stripe_align;
3440 }
3441 
3442 static void
3443 xfs_bmap_process_allocated_extent(
3444 	struct xfs_bmalloca	*ap,
3445 	struct xfs_alloc_arg	*args,
3446 	xfs_fileoff_t		orig_offset,
3447 	xfs_extlen_t		orig_length)
3448 {
3449 	ap->blkno = args->fsbno;
3450 	ap->length = args->len;
3451 	/*
3452 	 * If the extent size hint is active, we tried to round the
3453 	 * caller's allocation request offset down to extsz and the
3454 	 * length up to another extsz boundary.  If we found a free
3455 	 * extent we mapped it in starting at this new offset.  If the
3456 	 * newly mapped space isn't long enough to cover any of the
3457 	 * range of offsets that was originally requested, move the
3458 	 * mapping up so that we can fill as much of the caller's
3459 	 * original request as possible.  Free space is apparently
3460 	 * very fragmented so we're unlikely to be able to satisfy the
3461 	 * hints anyway.
3462 	 */
3463 	if (ap->length <= orig_length)
3464 		ap->offset = orig_offset;
3465 	else if (ap->offset + ap->length < orig_offset + orig_length)
3466 		ap->offset = orig_offset + orig_length - ap->length;
3467 	xfs_bmap_alloc_account(ap);
3468 }
3469 
3470 #ifdef DEBUG
3471 static int
3472 xfs_bmap_exact_minlen_extent_alloc(
3473 	struct xfs_bmalloca	*ap)
3474 {
3475 	struct xfs_mount	*mp = ap->ip->i_mount;
3476 	struct xfs_alloc_arg	args = { .tp = ap->tp, .mp = mp };
3477 	xfs_fileoff_t		orig_offset;
3478 	xfs_extlen_t		orig_length;
3479 	int			error;
3480 
3481 	ASSERT(ap->length);
3482 
3483 	if (ap->minlen != 1) {
3484 		ap->blkno = NULLFSBLOCK;
3485 		ap->length = 0;
3486 		return 0;
3487 	}
3488 
3489 	orig_offset = ap->offset;
3490 	orig_length = ap->length;
3491 
3492 	args.alloc_minlen_only = 1;
3493 
3494 	xfs_bmap_compute_alignments(ap, &args);
3495 
3496 	/*
3497 	 * Unlike the longest extent available in an AG, we don't track
3498 	 * the length of an AG's shortest extent.
3499 	 * XFS_ERRTAG_BMAP_ALLOC_MINLEN_EXTENT is a debug only knob and
3500 	 * hence we can afford to start traversing from the 0th AG since
3501 	 * we need not be concerned about a drop in performance in
3502 	 * "debug only" code paths.
3503 	 */
3504 	ap->blkno = XFS_AGB_TO_FSB(mp, 0, 0);
3505 
3506 	args.oinfo = XFS_RMAP_OINFO_SKIP_UPDATE;
3507 	args.minlen = args.maxlen = ap->minlen;
3508 	args.total = ap->total;
3509 
3510 	args.alignment = 1;
3511 	args.minalignslop = 0;
3512 
3513 	args.minleft = ap->minleft;
3514 	args.wasdel = ap->wasdel;
3515 	args.resv = XFS_AG_RESV_NONE;
3516 	args.datatype = ap->datatype;
3517 
3518 	error = xfs_alloc_vextent_first_ag(&args, ap->blkno);
3519 	if (error)
3520 		return error;
3521 
3522 	if (args.fsbno != NULLFSBLOCK) {
3523 		xfs_bmap_process_allocated_extent(ap, &args, orig_offset,
3524 			orig_length);
3525 	} else {
3526 		ap->blkno = NULLFSBLOCK;
3527 		ap->length = 0;
3528 	}
3529 
3530 	return 0;
3531 }
3532 #else
3533 
3534 #define xfs_bmap_exact_minlen_extent_alloc(bma) (-EFSCORRUPTED)
3535 
3536 #endif
3537 
3538 /*
3539  * If we are not low on available data blocks and we are allocating at
3540  * EOF, optimise allocation for contiguous file extension and/or stripe
3541  * alignment of the new extent.
3542  *
3543  * NOTE: ap->aeof is only set if the allocation length is >= the
3544  * stripe unit and the allocation offset is at the end of file.
3545  */
3546 static int
3547 xfs_bmap_btalloc_at_eof(
3548 	struct xfs_bmalloca	*ap,
3549 	struct xfs_alloc_arg	*args,
3550 	xfs_extlen_t		blen,
3551 	int			stripe_align,
3552 	bool			ag_only)
3553 {
3554 	struct xfs_mount	*mp = args->mp;
3555 	struct xfs_perag	*caller_pag = args->pag;
3556 	int			error;
3557 
3558 	/*
3559 	 * If there are already extents in the file, try an exact EOF block
3560 	 * allocation to extend the file as a contiguous extent. If that fails,
3561 	 * or it's the first allocation in a file, just try for a stripe aligned
3562 	 * allocation.
3563 	 */
3564 	if (ap->offset) {
3565 		xfs_extlen_t	nextminlen = 0;
3566 
3567 		/*
3568 		 * Compute the minlen+alignment for the next case.  Set slop so
3569 		 * that the value of minlen+alignment+slop doesn't go up between
3570 		 * the calls.
3571 		 */
3572 		args->alignment = 1;
3573 		if (blen > stripe_align && blen <= args->maxlen)
3574 			nextminlen = blen - stripe_align;
3575 		else
3576 			nextminlen = args->minlen;
3577 		if (nextminlen + stripe_align > args->minlen + 1)
3578 			args->minalignslop = nextminlen + stripe_align -
3579 					args->minlen - 1;
3580 		else
3581 			args->minalignslop = 0;
3582 
3583 		if (!caller_pag)
3584 			args->pag = xfs_perag_get(mp, XFS_FSB_TO_AGNO(mp, ap->blkno));
3585 		error = xfs_alloc_vextent_exact_bno(args, ap->blkno);
3586 		if (!caller_pag) {
3587 			xfs_perag_put(args->pag);
3588 			args->pag = NULL;
3589 		}
3590 		if (error)
3591 			return error;
3592 
3593 		if (args->fsbno != NULLFSBLOCK)
3594 			return 0;
3595 		/*
3596 		 * Exact allocation failed. Reset to try an aligned allocation
3597 		 * according to the original allocation specification.
3598 		 */
3599 		args->alignment = stripe_align;
3600 		args->minlen = nextminlen;
3601 		args->minalignslop = 0;
3602 	} else {
3603 		/*
3604 		 * Adjust minlen to try and preserve alignment if we
3605 		 * can't guarantee an aligned maxlen extent.
3606 		 */
3607 		args->alignment = stripe_align;
3608 		if (blen > args->alignment &&
3609 		    blen <= args->maxlen + args->alignment)
3610 			args->minlen = blen - args->alignment;
3611 		args->minalignslop = 0;
3612 	}
3613 
3614 	if (ag_only) {
3615 		error = xfs_alloc_vextent_near_bno(args, ap->blkno);
3616 	} else {
3617 		args->pag = NULL;
3618 		error = xfs_alloc_vextent_start_ag(args, ap->blkno);
3619 		ASSERT(args->pag == NULL);
3620 		args->pag = caller_pag;
3621 	}
3622 	if (error)
3623 		return error;
3624 
3625 	if (args->fsbno != NULLFSBLOCK)
3626 		return 0;
3627 
3628 	/*
3629 	 * Allocation failed, so turn return the allocation args to their
3630 	 * original non-aligned state so the caller can proceed on allocation
3631 	 * failure as if this function was never called.
3632 	 */
3633 	args->alignment = 1;
3634 	return 0;
3635 }
3636 
3637 /*
3638  * We have failed multiple allocation attempts so now are in a low space
3639  * allocation situation. Try a locality first full filesystem minimum length
3640  * allocation whilst still maintaining necessary total block reservation
3641  * requirements.
3642  *
3643  * If that fails, we are now critically low on space, so perform a last resort
3644  * allocation attempt: no reserve, no locality, blocking, minimum length, full
3645  * filesystem free space scan. We also indicate to future allocations in this
3646  * transaction that we are critically low on space so they don't waste time on
3647  * allocation modes that are unlikely to succeed.
3648  */
3649 int
3650 xfs_bmap_btalloc_low_space(
3651 	struct xfs_bmalloca	*ap,
3652 	struct xfs_alloc_arg	*args)
3653 {
3654 	int			error;
3655 
3656 	if (args->minlen > ap->minlen) {
3657 		args->minlen = ap->minlen;
3658 		error = xfs_alloc_vextent_start_ag(args, ap->blkno);
3659 		if (error || args->fsbno != NULLFSBLOCK)
3660 			return error;
3661 	}
3662 
3663 	/* Last ditch attempt before failure is declared. */
3664 	args->total = ap->minlen;
3665 	error = xfs_alloc_vextent_first_ag(args, 0);
3666 	if (error)
3667 		return error;
3668 	ap->tp->t_flags |= XFS_TRANS_LOWMODE;
3669 	return 0;
3670 }
3671 
3672 static int
3673 xfs_bmap_btalloc_filestreams(
3674 	struct xfs_bmalloca	*ap,
3675 	struct xfs_alloc_arg	*args,
3676 	int			stripe_align)
3677 {
3678 	xfs_extlen_t		blen = 0;
3679 	int			error = 0;
3680 
3681 
3682 	error = xfs_filestream_select_ag(ap, args, &blen);
3683 	if (error)
3684 		return error;
3685 	ASSERT(args->pag);
3686 
3687 	/*
3688 	 * If we are in low space mode, then optimal allocation will fail so
3689 	 * prepare for minimal allocation and jump to the low space algorithm
3690 	 * immediately.
3691 	 */
3692 	if (ap->tp->t_flags & XFS_TRANS_LOWMODE) {
3693 		args->minlen = ap->minlen;
3694 		ASSERT(args->fsbno == NULLFSBLOCK);
3695 		goto out_low_space;
3696 	}
3697 
3698 	args->minlen = xfs_bmap_select_minlen(ap, args, blen);
3699 	if (ap->aeof)
3700 		error = xfs_bmap_btalloc_at_eof(ap, args, blen, stripe_align,
3701 				true);
3702 
3703 	if (!error && args->fsbno == NULLFSBLOCK)
3704 		error = xfs_alloc_vextent_near_bno(args, ap->blkno);
3705 
3706 out_low_space:
3707 	/*
3708 	 * We are now done with the perag reference for the filestreams
3709 	 * association provided by xfs_filestream_select_ag(). Release it now as
3710 	 * we've either succeeded, had a fatal error or we are out of space and
3711 	 * need to do a full filesystem scan for free space which will take it's
3712 	 * own references.
3713 	 */
3714 	xfs_perag_rele(args->pag);
3715 	args->pag = NULL;
3716 	if (error || args->fsbno != NULLFSBLOCK)
3717 		return error;
3718 
3719 	return xfs_bmap_btalloc_low_space(ap, args);
3720 }
3721 
3722 static int
3723 xfs_bmap_btalloc_best_length(
3724 	struct xfs_bmalloca	*ap,
3725 	struct xfs_alloc_arg	*args,
3726 	int			stripe_align)
3727 {
3728 	xfs_extlen_t		blen = 0;
3729 	int			error;
3730 
3731 	ap->blkno = XFS_INO_TO_FSB(args->mp, ap->ip->i_ino);
3732 	xfs_bmap_adjacent(ap);
3733 
3734 	/*
3735 	 * Search for an allocation group with a single extent large enough for
3736 	 * the request.  If one isn't found, then adjust the minimum allocation
3737 	 * size to the largest space found.
3738 	 */
3739 	error = xfs_bmap_btalloc_select_lengths(ap, args, &blen);
3740 	if (error)
3741 		return error;
3742 
3743 	/*
3744 	 * Don't attempt optimal EOF allocation if previous allocations barely
3745 	 * succeeded due to being near ENOSPC. It is highly unlikely we'll get
3746 	 * optimal or even aligned allocations in this case, so don't waste time
3747 	 * trying.
3748 	 */
3749 	if (ap->aeof && !(ap->tp->t_flags & XFS_TRANS_LOWMODE)) {
3750 		error = xfs_bmap_btalloc_at_eof(ap, args, blen, stripe_align,
3751 				false);
3752 		if (error || args->fsbno != NULLFSBLOCK)
3753 			return error;
3754 	}
3755 
3756 	error = xfs_alloc_vextent_start_ag(args, ap->blkno);
3757 	if (error || args->fsbno != NULLFSBLOCK)
3758 		return error;
3759 
3760 	return xfs_bmap_btalloc_low_space(ap, args);
3761 }
3762 
3763 static int
3764 xfs_bmap_btalloc(
3765 	struct xfs_bmalloca	*ap)
3766 {
3767 	struct xfs_mount	*mp = ap->ip->i_mount;
3768 	struct xfs_alloc_arg	args = {
3769 		.tp		= ap->tp,
3770 		.mp		= mp,
3771 		.fsbno		= NULLFSBLOCK,
3772 		.oinfo		= XFS_RMAP_OINFO_SKIP_UPDATE,
3773 		.minleft	= ap->minleft,
3774 		.wasdel		= ap->wasdel,
3775 		.resv		= XFS_AG_RESV_NONE,
3776 		.datatype	= ap->datatype,
3777 		.alignment	= 1,
3778 		.minalignslop	= 0,
3779 	};
3780 	xfs_fileoff_t		orig_offset;
3781 	xfs_extlen_t		orig_length;
3782 	int			error;
3783 	int			stripe_align;
3784 
3785 	ASSERT(ap->length);
3786 	orig_offset = ap->offset;
3787 	orig_length = ap->length;
3788 
3789 	stripe_align = xfs_bmap_compute_alignments(ap, &args);
3790 
3791 	/* Trim the allocation back to the maximum an AG can fit. */
3792 	args.maxlen = min(ap->length, mp->m_ag_max_usable);
3793 
3794 	if ((ap->datatype & XFS_ALLOC_USERDATA) &&
3795 	    xfs_inode_is_filestream(ap->ip))
3796 		error = xfs_bmap_btalloc_filestreams(ap, &args, stripe_align);
3797 	else
3798 		error = xfs_bmap_btalloc_best_length(ap, &args, stripe_align);
3799 	if (error)
3800 		return error;
3801 
3802 	if (args.fsbno != NULLFSBLOCK) {
3803 		xfs_bmap_process_allocated_extent(ap, &args, orig_offset,
3804 			orig_length);
3805 	} else {
3806 		ap->blkno = NULLFSBLOCK;
3807 		ap->length = 0;
3808 	}
3809 	return 0;
3810 }
3811 
3812 /* Trim extent to fit a logical block range. */
3813 void
3814 xfs_trim_extent(
3815 	struct xfs_bmbt_irec	*irec,
3816 	xfs_fileoff_t		bno,
3817 	xfs_filblks_t		len)
3818 {
3819 	xfs_fileoff_t		distance;
3820 	xfs_fileoff_t		end = bno + len;
3821 
3822 	if (irec->br_startoff + irec->br_blockcount <= bno ||
3823 	    irec->br_startoff >= end) {
3824 		irec->br_blockcount = 0;
3825 		return;
3826 	}
3827 
3828 	if (irec->br_startoff < bno) {
3829 		distance = bno - irec->br_startoff;
3830 		if (isnullstartblock(irec->br_startblock))
3831 			irec->br_startblock = DELAYSTARTBLOCK;
3832 		if (irec->br_startblock != DELAYSTARTBLOCK &&
3833 		    irec->br_startblock != HOLESTARTBLOCK)
3834 			irec->br_startblock += distance;
3835 		irec->br_startoff += distance;
3836 		irec->br_blockcount -= distance;
3837 	}
3838 
3839 	if (end < irec->br_startoff + irec->br_blockcount) {
3840 		distance = irec->br_startoff + irec->br_blockcount - end;
3841 		irec->br_blockcount -= distance;
3842 	}
3843 }
3844 
3845 /*
3846  * Trim the returned map to the required bounds
3847  */
3848 STATIC void
3849 xfs_bmapi_trim_map(
3850 	struct xfs_bmbt_irec	*mval,
3851 	struct xfs_bmbt_irec	*got,
3852 	xfs_fileoff_t		*bno,
3853 	xfs_filblks_t		len,
3854 	xfs_fileoff_t		obno,
3855 	xfs_fileoff_t		end,
3856 	int			n,
3857 	uint32_t		flags)
3858 {
3859 	if ((flags & XFS_BMAPI_ENTIRE) ||
3860 	    got->br_startoff + got->br_blockcount <= obno) {
3861 		*mval = *got;
3862 		if (isnullstartblock(got->br_startblock))
3863 			mval->br_startblock = DELAYSTARTBLOCK;
3864 		return;
3865 	}
3866 
3867 	if (obno > *bno)
3868 		*bno = obno;
3869 	ASSERT((*bno >= obno) || (n == 0));
3870 	ASSERT(*bno < end);
3871 	mval->br_startoff = *bno;
3872 	if (isnullstartblock(got->br_startblock))
3873 		mval->br_startblock = DELAYSTARTBLOCK;
3874 	else
3875 		mval->br_startblock = got->br_startblock +
3876 					(*bno - got->br_startoff);
3877 	/*
3878 	 * Return the minimum of what we got and what we asked for for
3879 	 * the length.  We can use the len variable here because it is
3880 	 * modified below and we could have been there before coming
3881 	 * here if the first part of the allocation didn't overlap what
3882 	 * was asked for.
3883 	 */
3884 	mval->br_blockcount = XFS_FILBLKS_MIN(end - *bno,
3885 			got->br_blockcount - (*bno - got->br_startoff));
3886 	mval->br_state = got->br_state;
3887 	ASSERT(mval->br_blockcount <= len);
3888 	return;
3889 }
3890 
3891 /*
3892  * Update and validate the extent map to return
3893  */
3894 STATIC void
3895 xfs_bmapi_update_map(
3896 	struct xfs_bmbt_irec	**map,
3897 	xfs_fileoff_t		*bno,
3898 	xfs_filblks_t		*len,
3899 	xfs_fileoff_t		obno,
3900 	xfs_fileoff_t		end,
3901 	int			*n,
3902 	uint32_t		flags)
3903 {
3904 	xfs_bmbt_irec_t	*mval = *map;
3905 
3906 	ASSERT((flags & XFS_BMAPI_ENTIRE) ||
3907 	       ((mval->br_startoff + mval->br_blockcount) <= end));
3908 	ASSERT((flags & XFS_BMAPI_ENTIRE) || (mval->br_blockcount <= *len) ||
3909 	       (mval->br_startoff < obno));
3910 
3911 	*bno = mval->br_startoff + mval->br_blockcount;
3912 	*len = end - *bno;
3913 	if (*n > 0 && mval->br_startoff == mval[-1].br_startoff) {
3914 		/* update previous map with new information */
3915 		ASSERT(mval->br_startblock == mval[-1].br_startblock);
3916 		ASSERT(mval->br_blockcount > mval[-1].br_blockcount);
3917 		ASSERT(mval->br_state == mval[-1].br_state);
3918 		mval[-1].br_blockcount = mval->br_blockcount;
3919 		mval[-1].br_state = mval->br_state;
3920 	} else if (*n > 0 && mval->br_startblock != DELAYSTARTBLOCK &&
3921 		   mval[-1].br_startblock != DELAYSTARTBLOCK &&
3922 		   mval[-1].br_startblock != HOLESTARTBLOCK &&
3923 		   mval->br_startblock == mval[-1].br_startblock +
3924 					  mval[-1].br_blockcount &&
3925 		   mval[-1].br_state == mval->br_state) {
3926 		ASSERT(mval->br_startoff ==
3927 		       mval[-1].br_startoff + mval[-1].br_blockcount);
3928 		mval[-1].br_blockcount += mval->br_blockcount;
3929 	} else if (*n > 0 &&
3930 		   mval->br_startblock == DELAYSTARTBLOCK &&
3931 		   mval[-1].br_startblock == DELAYSTARTBLOCK &&
3932 		   mval->br_startoff ==
3933 		   mval[-1].br_startoff + mval[-1].br_blockcount) {
3934 		mval[-1].br_blockcount += mval->br_blockcount;
3935 		mval[-1].br_state = mval->br_state;
3936 	} else if (!((*n == 0) &&
3937 		     ((mval->br_startoff + mval->br_blockcount) <=
3938 		      obno))) {
3939 		mval++;
3940 		(*n)++;
3941 	}
3942 	*map = mval;
3943 }
3944 
3945 /*
3946  * Map file blocks to filesystem blocks without allocation.
3947  */
3948 int
3949 xfs_bmapi_read(
3950 	struct xfs_inode	*ip,
3951 	xfs_fileoff_t		bno,
3952 	xfs_filblks_t		len,
3953 	struct xfs_bmbt_irec	*mval,
3954 	int			*nmap,
3955 	uint32_t		flags)
3956 {
3957 	struct xfs_mount	*mp = ip->i_mount;
3958 	int			whichfork = xfs_bmapi_whichfork(flags);
3959 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
3960 	struct xfs_bmbt_irec	got;
3961 	xfs_fileoff_t		obno;
3962 	xfs_fileoff_t		end;
3963 	struct xfs_iext_cursor	icur;
3964 	int			error;
3965 	bool			eof = false;
3966 	int			n = 0;
3967 
3968 	ASSERT(*nmap >= 1);
3969 	ASSERT(!(flags & ~(XFS_BMAPI_ATTRFORK | XFS_BMAPI_ENTIRE)));
3970 	xfs_assert_ilocked(ip, XFS_ILOCK_SHARED | XFS_ILOCK_EXCL);
3971 
3972 	if (WARN_ON_ONCE(!ifp)) {
3973 		xfs_bmap_mark_sick(ip, whichfork);
3974 		return -EFSCORRUPTED;
3975 	}
3976 
3977 	if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)) ||
3978 	    XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) {
3979 		xfs_bmap_mark_sick(ip, whichfork);
3980 		return -EFSCORRUPTED;
3981 	}
3982 
3983 	if (xfs_is_shutdown(mp))
3984 		return -EIO;
3985 
3986 	XFS_STATS_INC(mp, xs_blk_mapr);
3987 
3988 	error = xfs_iread_extents(NULL, ip, whichfork);
3989 	if (error)
3990 		return error;
3991 
3992 	if (!xfs_iext_lookup_extent(ip, ifp, bno, &icur, &got))
3993 		eof = true;
3994 	end = bno + len;
3995 	obno = bno;
3996 
3997 	while (bno < end && n < *nmap) {
3998 		/* Reading past eof, act as though there's a hole up to end. */
3999 		if (eof)
4000 			got.br_startoff = end;
4001 		if (got.br_startoff > bno) {
4002 			/* Reading in a hole.  */
4003 			mval->br_startoff = bno;
4004 			mval->br_startblock = HOLESTARTBLOCK;
4005 			mval->br_blockcount =
4006 				XFS_FILBLKS_MIN(len, got.br_startoff - bno);
4007 			mval->br_state = XFS_EXT_NORM;
4008 			bno += mval->br_blockcount;
4009 			len -= mval->br_blockcount;
4010 			mval++;
4011 			n++;
4012 			continue;
4013 		}
4014 
4015 		/* set up the extent map to return. */
4016 		xfs_bmapi_trim_map(mval, &got, &bno, len, obno, end, n, flags);
4017 		xfs_bmapi_update_map(&mval, &bno, &len, obno, end, &n, flags);
4018 
4019 		/* If we're done, stop now. */
4020 		if (bno >= end || n >= *nmap)
4021 			break;
4022 
4023 		/* Else go on to the next record. */
4024 		if (!xfs_iext_next_extent(ifp, &icur, &got))
4025 			eof = true;
4026 	}
4027 	*nmap = n;
4028 	return 0;
4029 }
4030 
4031 /*
4032  * Add a delayed allocation extent to an inode. Blocks are reserved from the
4033  * global pool and the extent inserted into the inode in-core extent tree.
4034  *
4035  * On entry, got refers to the first extent beyond the offset of the extent to
4036  * allocate or eof is specified if no such extent exists. On return, got refers
4037  * to the extent record that was inserted to the inode fork.
4038  *
4039  * Note that the allocated extent may have been merged with contiguous extents
4040  * during insertion into the inode fork. Thus, got does not reflect the current
4041  * state of the inode fork on return. If necessary, the caller can use lastx to
4042  * look up the updated record in the inode fork.
4043  */
4044 int
4045 xfs_bmapi_reserve_delalloc(
4046 	struct xfs_inode	*ip,
4047 	int			whichfork,
4048 	xfs_fileoff_t		off,
4049 	xfs_filblks_t		len,
4050 	xfs_filblks_t		prealloc,
4051 	struct xfs_bmbt_irec	*got,
4052 	struct xfs_iext_cursor	*icur,
4053 	int			eof)
4054 {
4055 	struct xfs_mount	*mp = ip->i_mount;
4056 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
4057 	xfs_extlen_t		alen;
4058 	xfs_extlen_t		indlen;
4059 	uint64_t		fdblocks;
4060 	int			error;
4061 	xfs_fileoff_t		aoff;
4062 	bool			use_cowextszhint =
4063 					whichfork == XFS_COW_FORK && !prealloc;
4064 
4065 retry:
4066 	/*
4067 	 * Cap the alloc length. Keep track of prealloc so we know whether to
4068 	 * tag the inode before we return.
4069 	 */
4070 	aoff = off;
4071 	alen = XFS_FILBLKS_MIN(len + prealloc, XFS_MAX_BMBT_EXTLEN);
4072 	if (!eof)
4073 		alen = XFS_FILBLKS_MIN(alen, got->br_startoff - aoff);
4074 	if (prealloc && alen >= len)
4075 		prealloc = alen - len;
4076 
4077 	/*
4078 	 * If we're targetting the COW fork but aren't creating a speculative
4079 	 * posteof preallocation, try to expand the reservation to align with
4080 	 * the COW extent size hint if there's sufficient free space.
4081 	 *
4082 	 * Unlike the data fork, the CoW cancellation functions will free all
4083 	 * the reservations at inactivation, so we don't require that every
4084 	 * delalloc reservation have a dirty pagecache.
4085 	 */
4086 	if (use_cowextszhint) {
4087 		struct xfs_bmbt_irec	prev;
4088 		xfs_extlen_t		extsz = xfs_get_cowextsz_hint(ip);
4089 
4090 		if (!xfs_iext_peek_prev_extent(ifp, icur, &prev))
4091 			prev.br_startoff = NULLFILEOFF;
4092 
4093 		error = xfs_bmap_extsize_align(mp, got, &prev, extsz, 0, eof,
4094 					       1, 0, &aoff, &alen);
4095 		ASSERT(!error);
4096 	}
4097 
4098 	/*
4099 	 * Make a transaction-less quota reservation for delayed allocation
4100 	 * blocks.  This number gets adjusted later.  We return if we haven't
4101 	 * allocated blocks already inside this loop.
4102 	 */
4103 	error = xfs_quota_reserve_blkres(ip, alen);
4104 	if (error)
4105 		goto out;
4106 
4107 	/*
4108 	 * Split changing sb for alen and indlen since they could be coming
4109 	 * from different places.
4110 	 */
4111 	indlen = (xfs_extlen_t)xfs_bmap_worst_indlen(ip, alen);
4112 	ASSERT(indlen > 0);
4113 
4114 	fdblocks = indlen;
4115 	if (XFS_IS_REALTIME_INODE(ip)) {
4116 		error = xfs_dec_frextents(mp, xfs_rtb_to_rtx(mp, alen));
4117 		if (error)
4118 			goto out_unreserve_quota;
4119 	} else {
4120 		fdblocks += alen;
4121 	}
4122 
4123 	error = xfs_dec_fdblocks(mp, fdblocks, false);
4124 	if (error)
4125 		goto out_unreserve_frextents;
4126 
4127 	ip->i_delayed_blks += alen;
4128 	xfs_mod_delalloc(ip, alen, indlen);
4129 
4130 	got->br_startoff = aoff;
4131 	got->br_startblock = nullstartblock(indlen);
4132 	got->br_blockcount = alen;
4133 	got->br_state = XFS_EXT_NORM;
4134 
4135 	xfs_bmap_add_extent_hole_delay(ip, whichfork, icur, got);
4136 
4137 	/*
4138 	 * Tag the inode if blocks were preallocated. Note that COW fork
4139 	 * preallocation can occur at the start or end of the extent, even when
4140 	 * prealloc == 0, so we must also check the aligned offset and length.
4141 	 */
4142 	if (whichfork == XFS_DATA_FORK && prealloc)
4143 		xfs_inode_set_eofblocks_tag(ip);
4144 	if (whichfork == XFS_COW_FORK && (prealloc || aoff < off || alen > len))
4145 		xfs_inode_set_cowblocks_tag(ip);
4146 
4147 	return 0;
4148 
4149 out_unreserve_frextents:
4150 	if (XFS_IS_REALTIME_INODE(ip))
4151 		xfs_add_frextents(mp, xfs_rtb_to_rtx(mp, alen));
4152 out_unreserve_quota:
4153 	if (XFS_IS_QUOTA_ON(mp))
4154 		xfs_quota_unreserve_blkres(ip, alen);
4155 out:
4156 	if (error == -ENOSPC || error == -EDQUOT) {
4157 		trace_xfs_delalloc_enospc(ip, off, len);
4158 
4159 		if (prealloc || use_cowextszhint) {
4160 			/* retry without any preallocation */
4161 			use_cowextszhint = false;
4162 			prealloc = 0;
4163 			goto retry;
4164 		}
4165 	}
4166 	return error;
4167 }
4168 
4169 static int
4170 xfs_bmap_alloc_userdata(
4171 	struct xfs_bmalloca	*bma)
4172 {
4173 	struct xfs_mount	*mp = bma->ip->i_mount;
4174 	int			whichfork = xfs_bmapi_whichfork(bma->flags);
4175 	int			error;
4176 
4177 	/*
4178 	 * Set the data type being allocated. For the data fork, the first data
4179 	 * in the file is treated differently to all other allocations. For the
4180 	 * attribute fork, we only need to ensure the allocated range is not on
4181 	 * the busy list.
4182 	 */
4183 	bma->datatype = XFS_ALLOC_NOBUSY;
4184 	if (whichfork == XFS_DATA_FORK || whichfork == XFS_COW_FORK) {
4185 		bma->datatype |= XFS_ALLOC_USERDATA;
4186 		if (bma->offset == 0)
4187 			bma->datatype |= XFS_ALLOC_INITIAL_USER_DATA;
4188 
4189 		if (mp->m_dalign && bma->length >= mp->m_dalign) {
4190 			error = xfs_bmap_isaeof(bma, whichfork);
4191 			if (error)
4192 				return error;
4193 		}
4194 
4195 		if (XFS_IS_REALTIME_INODE(bma->ip))
4196 			return xfs_bmap_rtalloc(bma);
4197 	}
4198 
4199 	if (unlikely(XFS_TEST_ERROR(false, mp,
4200 			XFS_ERRTAG_BMAP_ALLOC_MINLEN_EXTENT)))
4201 		return xfs_bmap_exact_minlen_extent_alloc(bma);
4202 
4203 	return xfs_bmap_btalloc(bma);
4204 }
4205 
4206 static int
4207 xfs_bmapi_allocate(
4208 	struct xfs_bmalloca	*bma)
4209 {
4210 	struct xfs_mount	*mp = bma->ip->i_mount;
4211 	int			whichfork = xfs_bmapi_whichfork(bma->flags);
4212 	struct xfs_ifork	*ifp = xfs_ifork_ptr(bma->ip, whichfork);
4213 	int			error;
4214 
4215 	ASSERT(bma->length > 0);
4216 	ASSERT(bma->length <= XFS_MAX_BMBT_EXTLEN);
4217 
4218 	if (bma->flags & XFS_BMAPI_CONTIG)
4219 		bma->minlen = bma->length;
4220 	else
4221 		bma->minlen = 1;
4222 
4223 	if (bma->flags & XFS_BMAPI_METADATA) {
4224 		if (unlikely(XFS_TEST_ERROR(false, mp,
4225 				XFS_ERRTAG_BMAP_ALLOC_MINLEN_EXTENT)))
4226 			error = xfs_bmap_exact_minlen_extent_alloc(bma);
4227 		else
4228 			error = xfs_bmap_btalloc(bma);
4229 	} else {
4230 		error = xfs_bmap_alloc_userdata(bma);
4231 	}
4232 	if (error)
4233 		return error;
4234 	if (bma->blkno == NULLFSBLOCK)
4235 		return -ENOSPC;
4236 
4237 	if (WARN_ON_ONCE(!xfs_valid_startblock(bma->ip, bma->blkno))) {
4238 		xfs_bmap_mark_sick(bma->ip, whichfork);
4239 		return -EFSCORRUPTED;
4240 	}
4241 
4242 	if (bma->flags & XFS_BMAPI_ZERO) {
4243 		error = xfs_zero_extent(bma->ip, bma->blkno, bma->length);
4244 		if (error)
4245 			return error;
4246 	}
4247 
4248 	if (ifp->if_format == XFS_DINODE_FMT_BTREE && !bma->cur)
4249 		bma->cur = xfs_bmbt_init_cursor(mp, bma->tp, bma->ip, whichfork);
4250 	/*
4251 	 * Bump the number of extents we've allocated
4252 	 * in this call.
4253 	 */
4254 	bma->nallocs++;
4255 
4256 	if (bma->cur && bma->wasdel)
4257 		bma->cur->bc_flags |= XFS_BTREE_BMBT_WASDEL;
4258 
4259 	bma->got.br_startoff = bma->offset;
4260 	bma->got.br_startblock = bma->blkno;
4261 	bma->got.br_blockcount = bma->length;
4262 	bma->got.br_state = XFS_EXT_NORM;
4263 
4264 	if (bma->flags & XFS_BMAPI_PREALLOC)
4265 		bma->got.br_state = XFS_EXT_UNWRITTEN;
4266 
4267 	if (bma->wasdel)
4268 		error = xfs_bmap_add_extent_delay_real(bma, whichfork);
4269 	else
4270 		error = xfs_bmap_add_extent_hole_real(bma->tp, bma->ip,
4271 				whichfork, &bma->icur, &bma->cur, &bma->got,
4272 				&bma->logflags, bma->flags);
4273 	if (error)
4274 		return error;
4275 
4276 	/*
4277 	 * Update our extent pointer, given that xfs_bmap_add_extent_delay_real
4278 	 * or xfs_bmap_add_extent_hole_real might have merged it into one of
4279 	 * the neighbouring ones.
4280 	 */
4281 	xfs_iext_get_extent(ifp, &bma->icur, &bma->got);
4282 
4283 	ASSERT(bma->got.br_startoff <= bma->offset);
4284 	ASSERT(bma->got.br_startoff + bma->got.br_blockcount >=
4285 	       bma->offset + bma->length);
4286 	ASSERT(bma->got.br_state == XFS_EXT_NORM ||
4287 	       bma->got.br_state == XFS_EXT_UNWRITTEN);
4288 	return 0;
4289 }
4290 
4291 STATIC int
4292 xfs_bmapi_convert_unwritten(
4293 	struct xfs_bmalloca	*bma,
4294 	struct xfs_bmbt_irec	*mval,
4295 	xfs_filblks_t		len,
4296 	uint32_t		flags)
4297 {
4298 	int			whichfork = xfs_bmapi_whichfork(flags);
4299 	struct xfs_ifork	*ifp = xfs_ifork_ptr(bma->ip, whichfork);
4300 	int			tmp_logflags = 0;
4301 	int			error;
4302 
4303 	/* check if we need to do unwritten->real conversion */
4304 	if (mval->br_state == XFS_EXT_UNWRITTEN &&
4305 	    (flags & XFS_BMAPI_PREALLOC))
4306 		return 0;
4307 
4308 	/* check if we need to do real->unwritten conversion */
4309 	if (mval->br_state == XFS_EXT_NORM &&
4310 	    (flags & (XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT)) !=
4311 			(XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT))
4312 		return 0;
4313 
4314 	/*
4315 	 * Modify (by adding) the state flag, if writing.
4316 	 */
4317 	ASSERT(mval->br_blockcount <= len);
4318 	if (ifp->if_format == XFS_DINODE_FMT_BTREE && !bma->cur) {
4319 		bma->cur = xfs_bmbt_init_cursor(bma->ip->i_mount, bma->tp,
4320 					bma->ip, whichfork);
4321 	}
4322 	mval->br_state = (mval->br_state == XFS_EXT_UNWRITTEN)
4323 				? XFS_EXT_NORM : XFS_EXT_UNWRITTEN;
4324 
4325 	/*
4326 	 * Before insertion into the bmbt, zero the range being converted
4327 	 * if required.
4328 	 */
4329 	if (flags & XFS_BMAPI_ZERO) {
4330 		error = xfs_zero_extent(bma->ip, mval->br_startblock,
4331 					mval->br_blockcount);
4332 		if (error)
4333 			return error;
4334 	}
4335 
4336 	error = xfs_bmap_add_extent_unwritten_real(bma->tp, bma->ip, whichfork,
4337 			&bma->icur, &bma->cur, mval, &tmp_logflags);
4338 	/*
4339 	 * Log the inode core unconditionally in the unwritten extent conversion
4340 	 * path because the conversion might not have done so (e.g., if the
4341 	 * extent count hasn't changed). We need to make sure the inode is dirty
4342 	 * in the transaction for the sake of fsync(), even if nothing has
4343 	 * changed, because fsync() will not force the log for this transaction
4344 	 * unless it sees the inode pinned.
4345 	 *
4346 	 * Note: If we're only converting cow fork extents, there aren't
4347 	 * any on-disk updates to make, so we don't need to log anything.
4348 	 */
4349 	if (whichfork != XFS_COW_FORK)
4350 		bma->logflags |= tmp_logflags | XFS_ILOG_CORE;
4351 	if (error)
4352 		return error;
4353 
4354 	/*
4355 	 * Update our extent pointer, given that
4356 	 * xfs_bmap_add_extent_unwritten_real might have merged it into one
4357 	 * of the neighbouring ones.
4358 	 */
4359 	xfs_iext_get_extent(ifp, &bma->icur, &bma->got);
4360 
4361 	/*
4362 	 * We may have combined previously unwritten space with written space,
4363 	 * so generate another request.
4364 	 */
4365 	if (mval->br_blockcount < len)
4366 		return -EAGAIN;
4367 	return 0;
4368 }
4369 
4370 xfs_extlen_t
4371 xfs_bmapi_minleft(
4372 	struct xfs_trans	*tp,
4373 	struct xfs_inode	*ip,
4374 	int			fork)
4375 {
4376 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, fork);
4377 
4378 	if (tp && tp->t_highest_agno != NULLAGNUMBER)
4379 		return 0;
4380 	if (ifp->if_format != XFS_DINODE_FMT_BTREE)
4381 		return 1;
4382 	return be16_to_cpu(ifp->if_broot->bb_level) + 1;
4383 }
4384 
4385 /*
4386  * Log whatever the flags say, even if error.  Otherwise we might miss detecting
4387  * a case where the data is changed, there's an error, and it's not logged so we
4388  * don't shutdown when we should.  Don't bother logging extents/btree changes if
4389  * we converted to the other format.
4390  */
4391 static void
4392 xfs_bmapi_finish(
4393 	struct xfs_bmalloca	*bma,
4394 	int			whichfork,
4395 	int			error)
4396 {
4397 	struct xfs_ifork	*ifp = xfs_ifork_ptr(bma->ip, whichfork);
4398 
4399 	if ((bma->logflags & xfs_ilog_fext(whichfork)) &&
4400 	    ifp->if_format != XFS_DINODE_FMT_EXTENTS)
4401 		bma->logflags &= ~xfs_ilog_fext(whichfork);
4402 	else if ((bma->logflags & xfs_ilog_fbroot(whichfork)) &&
4403 		 ifp->if_format != XFS_DINODE_FMT_BTREE)
4404 		bma->logflags &= ~xfs_ilog_fbroot(whichfork);
4405 
4406 	if (bma->logflags)
4407 		xfs_trans_log_inode(bma->tp, bma->ip, bma->logflags);
4408 	if (bma->cur)
4409 		xfs_btree_del_cursor(bma->cur, error);
4410 }
4411 
4412 /*
4413  * Map file blocks to filesystem blocks, and allocate blocks or convert the
4414  * extent state if necessary.  Details behaviour is controlled by the flags
4415  * parameter.  Only allocates blocks from a single allocation group, to avoid
4416  * locking problems.
4417  *
4418  * Returns 0 on success and places the extent mappings in mval.  nmaps is used
4419  * as an input/output parameter where the caller specifies the maximum number
4420  * of mappings that may be returned and xfs_bmapi_write passes back the number
4421  * of mappings (including existing mappings) it found.
4422  *
4423  * Returns a negative error code on failure, including -ENOSPC when it could not
4424  * allocate any blocks and -ENOSR when it did allocate blocks to convert a
4425  * delalloc range, but those blocks were before the passed in range.
4426  */
4427 int
4428 xfs_bmapi_write(
4429 	struct xfs_trans	*tp,		/* transaction pointer */
4430 	struct xfs_inode	*ip,		/* incore inode */
4431 	xfs_fileoff_t		bno,		/* starting file offs. mapped */
4432 	xfs_filblks_t		len,		/* length to map in file */
4433 	uint32_t		flags,		/* XFS_BMAPI_... */
4434 	xfs_extlen_t		total,		/* total blocks needed */
4435 	struct xfs_bmbt_irec	*mval,		/* output: map values */
4436 	int			*nmap)		/* i/o: mval size/count */
4437 {
4438 	struct xfs_bmalloca	bma = {
4439 		.tp		= tp,
4440 		.ip		= ip,
4441 		.total		= total,
4442 	};
4443 	struct xfs_mount	*mp = ip->i_mount;
4444 	int			whichfork = xfs_bmapi_whichfork(flags);
4445 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
4446 	xfs_fileoff_t		end;		/* end of mapped file region */
4447 	bool			eof = false;	/* after the end of extents */
4448 	int			error;		/* error return */
4449 	int			n;		/* current extent index */
4450 	xfs_fileoff_t		obno;		/* old block number (offset) */
4451 
4452 #ifdef DEBUG
4453 	xfs_fileoff_t		orig_bno;	/* original block number value */
4454 	int			orig_flags;	/* original flags arg value */
4455 	xfs_filblks_t		orig_len;	/* original value of len arg */
4456 	struct xfs_bmbt_irec	*orig_mval;	/* original value of mval */
4457 	int			orig_nmap;	/* original value of *nmap */
4458 
4459 	orig_bno = bno;
4460 	orig_len = len;
4461 	orig_flags = flags;
4462 	orig_mval = mval;
4463 	orig_nmap = *nmap;
4464 #endif
4465 
4466 	ASSERT(*nmap >= 1);
4467 	ASSERT(*nmap <= XFS_BMAP_MAX_NMAP);
4468 	ASSERT(tp != NULL);
4469 	ASSERT(len > 0);
4470 	ASSERT(ifp->if_format != XFS_DINODE_FMT_LOCAL);
4471 	xfs_assert_ilocked(ip, XFS_ILOCK_EXCL);
4472 	ASSERT(!(flags & XFS_BMAPI_REMAP));
4473 
4474 	/* zeroing is for currently only for data extents, not metadata */
4475 	ASSERT((flags & (XFS_BMAPI_METADATA | XFS_BMAPI_ZERO)) !=
4476 			(XFS_BMAPI_METADATA | XFS_BMAPI_ZERO));
4477 	/*
4478 	 * we can allocate unwritten extents or pre-zero allocated blocks,
4479 	 * but it makes no sense to do both at once. This would result in
4480 	 * zeroing the unwritten extent twice, but it still being an
4481 	 * unwritten extent....
4482 	 */
4483 	ASSERT((flags & (XFS_BMAPI_PREALLOC | XFS_BMAPI_ZERO)) !=
4484 			(XFS_BMAPI_PREALLOC | XFS_BMAPI_ZERO));
4485 
4486 	if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)) ||
4487 	    XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) {
4488 		xfs_bmap_mark_sick(ip, whichfork);
4489 		return -EFSCORRUPTED;
4490 	}
4491 
4492 	if (xfs_is_shutdown(mp))
4493 		return -EIO;
4494 
4495 	XFS_STATS_INC(mp, xs_blk_mapw);
4496 
4497 	error = xfs_iread_extents(tp, ip, whichfork);
4498 	if (error)
4499 		goto error0;
4500 
4501 	if (!xfs_iext_lookup_extent(ip, ifp, bno, &bma.icur, &bma.got))
4502 		eof = true;
4503 	if (!xfs_iext_peek_prev_extent(ifp, &bma.icur, &bma.prev))
4504 		bma.prev.br_startoff = NULLFILEOFF;
4505 	bma.minleft = xfs_bmapi_minleft(tp, ip, whichfork);
4506 
4507 	n = 0;
4508 	end = bno + len;
4509 	obno = bno;
4510 	while (bno < end && n < *nmap) {
4511 		bool			need_alloc = false, wasdelay = false;
4512 
4513 		/* in hole or beyond EOF? */
4514 		if (eof || bma.got.br_startoff > bno) {
4515 			/*
4516 			 * CoW fork conversions should /never/ hit EOF or
4517 			 * holes.  There should always be something for us
4518 			 * to work on.
4519 			 */
4520 			ASSERT(!((flags & XFS_BMAPI_CONVERT) &&
4521 			         (flags & XFS_BMAPI_COWFORK)));
4522 
4523 			need_alloc = true;
4524 		} else if (isnullstartblock(bma.got.br_startblock)) {
4525 			wasdelay = true;
4526 		}
4527 
4528 		/*
4529 		 * First, deal with the hole before the allocated space
4530 		 * that we found, if any.
4531 		 */
4532 		if (need_alloc || wasdelay) {
4533 			bma.eof = eof;
4534 			bma.conv = !!(flags & XFS_BMAPI_CONVERT);
4535 			bma.wasdel = wasdelay;
4536 			bma.offset = bno;
4537 			bma.flags = flags;
4538 
4539 			/*
4540 			 * There's a 32/64 bit type mismatch between the
4541 			 * allocation length request (which can be 64 bits in
4542 			 * length) and the bma length request, which is
4543 			 * xfs_extlen_t and therefore 32 bits. Hence we have to
4544 			 * be careful and do the min() using the larger type to
4545 			 * avoid overflows.
4546 			 */
4547 			bma.length = XFS_FILBLKS_MIN(len, XFS_MAX_BMBT_EXTLEN);
4548 
4549 			if (wasdelay) {
4550 				bma.length = XFS_FILBLKS_MIN(bma.length,
4551 					bma.got.br_blockcount -
4552 					(bno - bma.got.br_startoff));
4553 			} else {
4554 				if (!eof)
4555 					bma.length = XFS_FILBLKS_MIN(bma.length,
4556 						bma.got.br_startoff - bno);
4557 			}
4558 
4559 			ASSERT(bma.length > 0);
4560 			error = xfs_bmapi_allocate(&bma);
4561 			if (error) {
4562 				/*
4563 				 * If we already allocated space in a previous
4564 				 * iteration return what we go so far when
4565 				 * running out of space.
4566 				 */
4567 				if (error == -ENOSPC && bma.nallocs)
4568 					break;
4569 				goto error0;
4570 			}
4571 
4572 			/*
4573 			 * If this is a CoW allocation, record the data in
4574 			 * the refcount btree for orphan recovery.
4575 			 */
4576 			if (whichfork == XFS_COW_FORK)
4577 				xfs_refcount_alloc_cow_extent(tp, bma.blkno,
4578 						bma.length);
4579 		}
4580 
4581 		/* Deal with the allocated space we found.  */
4582 		xfs_bmapi_trim_map(mval, &bma.got, &bno, len, obno,
4583 							end, n, flags);
4584 
4585 		/* Execute unwritten extent conversion if necessary */
4586 		error = xfs_bmapi_convert_unwritten(&bma, mval, len, flags);
4587 		if (error == -EAGAIN)
4588 			continue;
4589 		if (error)
4590 			goto error0;
4591 
4592 		/* update the extent map to return */
4593 		xfs_bmapi_update_map(&mval, &bno, &len, obno, end, &n, flags);
4594 
4595 		/*
4596 		 * If we're done, stop now.  Stop when we've allocated
4597 		 * XFS_BMAP_MAX_NMAP extents no matter what.  Otherwise
4598 		 * the transaction may get too big.
4599 		 */
4600 		if (bno >= end || n >= *nmap || bma.nallocs >= *nmap)
4601 			break;
4602 
4603 		/* Else go on to the next record. */
4604 		bma.prev = bma.got;
4605 		if (!xfs_iext_next_extent(ifp, &bma.icur, &bma.got))
4606 			eof = true;
4607 	}
4608 
4609 	error = xfs_bmap_btree_to_extents(tp, ip, bma.cur, &bma.logflags,
4610 			whichfork);
4611 	if (error)
4612 		goto error0;
4613 
4614 	ASSERT(ifp->if_format != XFS_DINODE_FMT_BTREE ||
4615 	       ifp->if_nextents > XFS_IFORK_MAXEXT(ip, whichfork));
4616 	xfs_bmapi_finish(&bma, whichfork, 0);
4617 	xfs_bmap_validate_ret(orig_bno, orig_len, orig_flags, orig_mval,
4618 		orig_nmap, n);
4619 
4620 	/*
4621 	 * When converting delayed allocations, xfs_bmapi_allocate ignores
4622 	 * the passed in bno and always converts from the start of the found
4623 	 * delalloc extent.
4624 	 *
4625 	 * To avoid a successful return with *nmap set to 0, return the magic
4626 	 * -ENOSR error code for this particular case so that the caller can
4627 	 * handle it.
4628 	 */
4629 	if (!n) {
4630 		ASSERT(bma.nallocs >= *nmap);
4631 		return -ENOSR;
4632 	}
4633 	*nmap = n;
4634 	return 0;
4635 error0:
4636 	xfs_bmapi_finish(&bma, whichfork, error);
4637 	return error;
4638 }
4639 
4640 /*
4641  * Convert an existing delalloc extent to real blocks based on file offset. This
4642  * attempts to allocate the entire delalloc extent and may require multiple
4643  * invocations to allocate the target offset if a large enough physical extent
4644  * is not available.
4645  */
4646 static int
4647 xfs_bmapi_convert_one_delalloc(
4648 	struct xfs_inode	*ip,
4649 	int			whichfork,
4650 	xfs_off_t		offset,
4651 	struct iomap		*iomap,
4652 	unsigned int		*seq)
4653 {
4654 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
4655 	struct xfs_mount	*mp = ip->i_mount;
4656 	xfs_fileoff_t		offset_fsb = XFS_B_TO_FSBT(mp, offset);
4657 	struct xfs_bmalloca	bma = { NULL };
4658 	uint16_t		flags = 0;
4659 	struct xfs_trans	*tp;
4660 	int			error;
4661 
4662 	if (whichfork == XFS_COW_FORK)
4663 		flags |= IOMAP_F_SHARED;
4664 
4665 	/*
4666 	 * Space for the extent and indirect blocks was reserved when the
4667 	 * delalloc extent was created so there's no need to do so here.
4668 	 */
4669 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 0, 0,
4670 				XFS_TRANS_RESERVE, &tp);
4671 	if (error)
4672 		return error;
4673 
4674 	xfs_ilock(ip, XFS_ILOCK_EXCL);
4675 	xfs_trans_ijoin(tp, ip, 0);
4676 
4677 	error = xfs_iext_count_extend(tp, ip, whichfork,
4678 			XFS_IEXT_ADD_NOSPLIT_CNT);
4679 	if (error)
4680 		goto out_trans_cancel;
4681 
4682 	if (!xfs_iext_lookup_extent(ip, ifp, offset_fsb, &bma.icur, &bma.got) ||
4683 	    bma.got.br_startoff > offset_fsb) {
4684 		/*
4685 		 * No extent found in the range we are trying to convert.  This
4686 		 * should only happen for the COW fork, where another thread
4687 		 * might have moved the extent to the data fork in the meantime.
4688 		 */
4689 		WARN_ON_ONCE(whichfork != XFS_COW_FORK);
4690 		error = -EAGAIN;
4691 		goto out_trans_cancel;
4692 	}
4693 
4694 	/*
4695 	 * If we find a real extent here we raced with another thread converting
4696 	 * the extent.  Just return the real extent at this offset.
4697 	 */
4698 	if (!isnullstartblock(bma.got.br_startblock)) {
4699 		xfs_bmbt_to_iomap(ip, iomap, &bma.got, 0, flags,
4700 				xfs_iomap_inode_sequence(ip, flags));
4701 		if (seq)
4702 			*seq = READ_ONCE(ifp->if_seq);
4703 		goto out_trans_cancel;
4704 	}
4705 
4706 	bma.tp = tp;
4707 	bma.ip = ip;
4708 	bma.wasdel = true;
4709 	bma.minleft = xfs_bmapi_minleft(tp, ip, whichfork);
4710 
4711 	/*
4712 	 * Always allocate convert from the start of the delalloc extent even if
4713 	 * that is outside the passed in range to create large contiguous
4714 	 * extents on disk.
4715 	 */
4716 	bma.offset = bma.got.br_startoff;
4717 	bma.length = bma.got.br_blockcount;
4718 
4719 	/*
4720 	 * When we're converting the delalloc reservations backing dirty pages
4721 	 * in the page cache, we must be careful about how we create the new
4722 	 * extents:
4723 	 *
4724 	 * New CoW fork extents are created unwritten, turned into real extents
4725 	 * when we're about to write the data to disk, and mapped into the data
4726 	 * fork after the write finishes.  End of story.
4727 	 *
4728 	 * New data fork extents must be mapped in as unwritten and converted
4729 	 * to real extents after the write succeeds to avoid exposing stale
4730 	 * disk contents if we crash.
4731 	 */
4732 	bma.flags = XFS_BMAPI_PREALLOC;
4733 	if (whichfork == XFS_COW_FORK)
4734 		bma.flags |= XFS_BMAPI_COWFORK;
4735 
4736 	if (!xfs_iext_peek_prev_extent(ifp, &bma.icur, &bma.prev))
4737 		bma.prev.br_startoff = NULLFILEOFF;
4738 
4739 	error = xfs_bmapi_allocate(&bma);
4740 	if (error)
4741 		goto out_finish;
4742 
4743 	XFS_STATS_ADD(mp, xs_xstrat_bytes, XFS_FSB_TO_B(mp, bma.length));
4744 	XFS_STATS_INC(mp, xs_xstrat_quick);
4745 
4746 	ASSERT(!isnullstartblock(bma.got.br_startblock));
4747 	xfs_bmbt_to_iomap(ip, iomap, &bma.got, 0, flags,
4748 				xfs_iomap_inode_sequence(ip, flags));
4749 	if (seq)
4750 		*seq = READ_ONCE(ifp->if_seq);
4751 
4752 	if (whichfork == XFS_COW_FORK)
4753 		xfs_refcount_alloc_cow_extent(tp, bma.blkno, bma.length);
4754 
4755 	error = xfs_bmap_btree_to_extents(tp, ip, bma.cur, &bma.logflags,
4756 			whichfork);
4757 	if (error)
4758 		goto out_finish;
4759 
4760 	xfs_bmapi_finish(&bma, whichfork, 0);
4761 	error = xfs_trans_commit(tp);
4762 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
4763 	return error;
4764 
4765 out_finish:
4766 	xfs_bmapi_finish(&bma, whichfork, error);
4767 out_trans_cancel:
4768 	xfs_trans_cancel(tp);
4769 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
4770 	return error;
4771 }
4772 
4773 /*
4774  * Pass in a dellalloc extent and convert it to real extents, return the real
4775  * extent that maps offset_fsb in iomap.
4776  */
4777 int
4778 xfs_bmapi_convert_delalloc(
4779 	struct xfs_inode	*ip,
4780 	int			whichfork,
4781 	loff_t			offset,
4782 	struct iomap		*iomap,
4783 	unsigned int		*seq)
4784 {
4785 	int			error;
4786 
4787 	/*
4788 	 * Attempt to allocate whatever delalloc extent currently backs offset
4789 	 * and put the result into iomap.  Allocate in a loop because it may
4790 	 * take several attempts to allocate real blocks for a contiguous
4791 	 * delalloc extent if free space is sufficiently fragmented.
4792 	 */
4793 	do {
4794 		error = xfs_bmapi_convert_one_delalloc(ip, whichfork, offset,
4795 					iomap, seq);
4796 		if (error)
4797 			return error;
4798 	} while (iomap->offset + iomap->length <= offset);
4799 
4800 	return 0;
4801 }
4802 
4803 int
4804 xfs_bmapi_remap(
4805 	struct xfs_trans	*tp,
4806 	struct xfs_inode	*ip,
4807 	xfs_fileoff_t		bno,
4808 	xfs_filblks_t		len,
4809 	xfs_fsblock_t		startblock,
4810 	uint32_t		flags)
4811 {
4812 	struct xfs_mount	*mp = ip->i_mount;
4813 	struct xfs_ifork	*ifp;
4814 	struct xfs_btree_cur	*cur = NULL;
4815 	struct xfs_bmbt_irec	got;
4816 	struct xfs_iext_cursor	icur;
4817 	int			whichfork = xfs_bmapi_whichfork(flags);
4818 	int			logflags = 0, error;
4819 
4820 	ifp = xfs_ifork_ptr(ip, whichfork);
4821 	ASSERT(len > 0);
4822 	ASSERT(len <= (xfs_filblks_t)XFS_MAX_BMBT_EXTLEN);
4823 	xfs_assert_ilocked(ip, XFS_ILOCK_EXCL);
4824 	ASSERT(!(flags & ~(XFS_BMAPI_ATTRFORK | XFS_BMAPI_PREALLOC |
4825 			   XFS_BMAPI_NORMAP)));
4826 	ASSERT((flags & (XFS_BMAPI_ATTRFORK | XFS_BMAPI_PREALLOC)) !=
4827 			(XFS_BMAPI_ATTRFORK | XFS_BMAPI_PREALLOC));
4828 
4829 	if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)) ||
4830 	    XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) {
4831 		xfs_bmap_mark_sick(ip, whichfork);
4832 		return -EFSCORRUPTED;
4833 	}
4834 
4835 	if (xfs_is_shutdown(mp))
4836 		return -EIO;
4837 
4838 	error = xfs_iread_extents(tp, ip, whichfork);
4839 	if (error)
4840 		return error;
4841 
4842 	if (xfs_iext_lookup_extent(ip, ifp, bno, &icur, &got)) {
4843 		/* make sure we only reflink into a hole. */
4844 		ASSERT(got.br_startoff > bno);
4845 		ASSERT(got.br_startoff - bno >= len);
4846 	}
4847 
4848 	ip->i_nblocks += len;
4849 	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
4850 
4851 	if (ifp->if_format == XFS_DINODE_FMT_BTREE)
4852 		cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
4853 
4854 	got.br_startoff = bno;
4855 	got.br_startblock = startblock;
4856 	got.br_blockcount = len;
4857 	if (flags & XFS_BMAPI_PREALLOC)
4858 		got.br_state = XFS_EXT_UNWRITTEN;
4859 	else
4860 		got.br_state = XFS_EXT_NORM;
4861 
4862 	error = xfs_bmap_add_extent_hole_real(tp, ip, whichfork, &icur,
4863 			&cur, &got, &logflags, flags);
4864 	if (error)
4865 		goto error0;
4866 
4867 	error = xfs_bmap_btree_to_extents(tp, ip, cur, &logflags, whichfork);
4868 
4869 error0:
4870 	if (ip->i_df.if_format != XFS_DINODE_FMT_EXTENTS)
4871 		logflags &= ~XFS_ILOG_DEXT;
4872 	else if (ip->i_df.if_format != XFS_DINODE_FMT_BTREE)
4873 		logflags &= ~XFS_ILOG_DBROOT;
4874 
4875 	if (logflags)
4876 		xfs_trans_log_inode(tp, ip, logflags);
4877 	if (cur)
4878 		xfs_btree_del_cursor(cur, error);
4879 	return error;
4880 }
4881 
4882 /*
4883  * When a delalloc extent is split (e.g., due to a hole punch), the original
4884  * indlen reservation must be shared across the two new extents that are left
4885  * behind.
4886  *
4887  * Given the original reservation and the worst case indlen for the two new
4888  * extents (as calculated by xfs_bmap_worst_indlen()), split the original
4889  * reservation fairly across the two new extents. If necessary, steal available
4890  * blocks from a deleted extent to make up a reservation deficiency (e.g., if
4891  * ores == 1). The number of stolen blocks is returned. The availability and
4892  * subsequent accounting of stolen blocks is the responsibility of the caller.
4893  */
4894 static void
4895 xfs_bmap_split_indlen(
4896 	xfs_filblks_t			ores,		/* original res. */
4897 	xfs_filblks_t			*indlen1,	/* ext1 worst indlen */
4898 	xfs_filblks_t			*indlen2)	/* ext2 worst indlen */
4899 {
4900 	xfs_filblks_t			len1 = *indlen1;
4901 	xfs_filblks_t			len2 = *indlen2;
4902 	xfs_filblks_t			nres = len1 + len2; /* new total res. */
4903 	xfs_filblks_t			resfactor;
4904 
4905 	/*
4906 	 * We can't meet the total required reservation for the two extents.
4907 	 * Calculate the percent of the overall shortage between both extents
4908 	 * and apply this percentage to each of the requested indlen values.
4909 	 * This distributes the shortage fairly and reduces the chances that one
4910 	 * of the two extents is left with nothing when extents are repeatedly
4911 	 * split.
4912 	 */
4913 	resfactor = (ores * 100);
4914 	do_div(resfactor, nres);
4915 	len1 *= resfactor;
4916 	do_div(len1, 100);
4917 	len2 *= resfactor;
4918 	do_div(len2, 100);
4919 	ASSERT(len1 + len2 <= ores);
4920 	ASSERT(len1 < *indlen1 && len2 < *indlen2);
4921 
4922 	/*
4923 	 * Hand out the remainder to each extent. If one of the two reservations
4924 	 * is zero, we want to make sure that one gets a block first. The loop
4925 	 * below starts with len1, so hand len2 a block right off the bat if it
4926 	 * is zero.
4927 	 */
4928 	ores -= (len1 + len2);
4929 	ASSERT((*indlen1 - len1) + (*indlen2 - len2) >= ores);
4930 	if (ores && !len2 && *indlen2) {
4931 		len2++;
4932 		ores--;
4933 	}
4934 	while (ores) {
4935 		if (len1 < *indlen1) {
4936 			len1++;
4937 			ores--;
4938 		}
4939 		if (!ores)
4940 			break;
4941 		if (len2 < *indlen2) {
4942 			len2++;
4943 			ores--;
4944 		}
4945 	}
4946 
4947 	*indlen1 = len1;
4948 	*indlen2 = len2;
4949 }
4950 
4951 void
4952 xfs_bmap_del_extent_delay(
4953 	struct xfs_inode	*ip,
4954 	int			whichfork,
4955 	struct xfs_iext_cursor	*icur,
4956 	struct xfs_bmbt_irec	*got,
4957 	struct xfs_bmbt_irec	*del)
4958 {
4959 	struct xfs_mount	*mp = ip->i_mount;
4960 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
4961 	struct xfs_bmbt_irec	new;
4962 	int64_t			da_old, da_new, da_diff = 0;
4963 	xfs_fileoff_t		del_endoff, got_endoff;
4964 	xfs_filblks_t		got_indlen, new_indlen, stolen = 0;
4965 	uint32_t		state = xfs_bmap_fork_to_state(whichfork);
4966 	uint64_t		fdblocks;
4967 	bool			isrt;
4968 
4969 	XFS_STATS_INC(mp, xs_del_exlist);
4970 
4971 	isrt = xfs_ifork_is_realtime(ip, whichfork);
4972 	del_endoff = del->br_startoff + del->br_blockcount;
4973 	got_endoff = got->br_startoff + got->br_blockcount;
4974 	da_old = startblockval(got->br_startblock);
4975 	da_new = 0;
4976 
4977 	ASSERT(del->br_blockcount > 0);
4978 	ASSERT(got->br_startoff <= del->br_startoff);
4979 	ASSERT(got_endoff >= del_endoff);
4980 
4981 	/*
4982 	 * Update the inode delalloc counter now and wait to update the
4983 	 * sb counters as we might have to borrow some blocks for the
4984 	 * indirect block accounting.
4985 	 */
4986 	xfs_quota_unreserve_blkres(ip, del->br_blockcount);
4987 	ip->i_delayed_blks -= del->br_blockcount;
4988 
4989 	if (got->br_startoff == del->br_startoff)
4990 		state |= BMAP_LEFT_FILLING;
4991 	if (got_endoff == del_endoff)
4992 		state |= BMAP_RIGHT_FILLING;
4993 
4994 	switch (state & (BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING)) {
4995 	case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
4996 		/*
4997 		 * Matches the whole extent.  Delete the entry.
4998 		 */
4999 		xfs_iext_remove(ip, icur, state);
5000 		xfs_iext_prev(ifp, icur);
5001 		break;
5002 	case BMAP_LEFT_FILLING:
5003 		/*
5004 		 * Deleting the first part of the extent.
5005 		 */
5006 		got->br_startoff = del_endoff;
5007 		got->br_blockcount -= del->br_blockcount;
5008 		da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip,
5009 				got->br_blockcount), da_old);
5010 		got->br_startblock = nullstartblock((int)da_new);
5011 		xfs_iext_update_extent(ip, state, icur, got);
5012 		break;
5013 	case BMAP_RIGHT_FILLING:
5014 		/*
5015 		 * Deleting the last part of the extent.
5016 		 */
5017 		got->br_blockcount = got->br_blockcount - del->br_blockcount;
5018 		da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip,
5019 				got->br_blockcount), da_old);
5020 		got->br_startblock = nullstartblock((int)da_new);
5021 		xfs_iext_update_extent(ip, state, icur, got);
5022 		break;
5023 	case 0:
5024 		/*
5025 		 * Deleting the middle of the extent.
5026 		 *
5027 		 * Distribute the original indlen reservation across the two new
5028 		 * extents.  Steal blocks from the deleted extent if necessary.
5029 		 * Stealing blocks simply fudges the fdblocks accounting below.
5030 		 * Warn if either of the new indlen reservations is zero as this
5031 		 * can lead to delalloc problems.
5032 		 */
5033 		got->br_blockcount = del->br_startoff - got->br_startoff;
5034 		got_indlen = xfs_bmap_worst_indlen(ip, got->br_blockcount);
5035 
5036 		new.br_blockcount = got_endoff - del_endoff;
5037 		new_indlen = xfs_bmap_worst_indlen(ip, new.br_blockcount);
5038 
5039 		WARN_ON_ONCE(!got_indlen || !new_indlen);
5040 		/*
5041 		 * Steal as many blocks as we can to try and satisfy the worst
5042 		 * case indlen for both new extents.
5043 		 *
5044 		 * However, we can't just steal reservations from the data
5045 		 * blocks if this is an RT inodes as the data and metadata
5046 		 * blocks come from different pools.  We'll have to live with
5047 		 * under-filled indirect reservation in this case.
5048 		 */
5049 		da_new = got_indlen + new_indlen;
5050 		if (da_new > da_old && !isrt) {
5051 			stolen = XFS_FILBLKS_MIN(da_new - da_old,
5052 						 del->br_blockcount);
5053 			da_old += stolen;
5054 		}
5055 		if (da_new > da_old)
5056 			xfs_bmap_split_indlen(da_old, &got_indlen, &new_indlen);
5057 		da_new = got_indlen + new_indlen;
5058 
5059 		got->br_startblock = nullstartblock((int)got_indlen);
5060 
5061 		new.br_startoff = del_endoff;
5062 		new.br_state = got->br_state;
5063 		new.br_startblock = nullstartblock((int)new_indlen);
5064 
5065 		xfs_iext_update_extent(ip, state, icur, got);
5066 		xfs_iext_next(ifp, icur);
5067 		xfs_iext_insert(ip, icur, &new, state);
5068 
5069 		del->br_blockcount -= stolen;
5070 		break;
5071 	}
5072 
5073 	ASSERT(da_old >= da_new);
5074 	da_diff = da_old - da_new;
5075 	fdblocks = da_diff;
5076 
5077 	if (isrt)
5078 		xfs_add_frextents(mp, xfs_rtb_to_rtx(mp, del->br_blockcount));
5079 	else
5080 		fdblocks += del->br_blockcount;
5081 
5082 	xfs_add_fdblocks(mp, fdblocks);
5083 	xfs_mod_delalloc(ip, -(int64_t)del->br_blockcount, -da_diff);
5084 }
5085 
5086 void
5087 xfs_bmap_del_extent_cow(
5088 	struct xfs_inode	*ip,
5089 	struct xfs_iext_cursor	*icur,
5090 	struct xfs_bmbt_irec	*got,
5091 	struct xfs_bmbt_irec	*del)
5092 {
5093 	struct xfs_mount	*mp = ip->i_mount;
5094 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, XFS_COW_FORK);
5095 	struct xfs_bmbt_irec	new;
5096 	xfs_fileoff_t		del_endoff, got_endoff;
5097 	uint32_t		state = BMAP_COWFORK;
5098 
5099 	XFS_STATS_INC(mp, xs_del_exlist);
5100 
5101 	del_endoff = del->br_startoff + del->br_blockcount;
5102 	got_endoff = got->br_startoff + got->br_blockcount;
5103 
5104 	ASSERT(del->br_blockcount > 0);
5105 	ASSERT(got->br_startoff <= del->br_startoff);
5106 	ASSERT(got_endoff >= del_endoff);
5107 	ASSERT(!isnullstartblock(got->br_startblock));
5108 
5109 	if (got->br_startoff == del->br_startoff)
5110 		state |= BMAP_LEFT_FILLING;
5111 	if (got_endoff == del_endoff)
5112 		state |= BMAP_RIGHT_FILLING;
5113 
5114 	switch (state & (BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING)) {
5115 	case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
5116 		/*
5117 		 * Matches the whole extent.  Delete the entry.
5118 		 */
5119 		xfs_iext_remove(ip, icur, state);
5120 		xfs_iext_prev(ifp, icur);
5121 		break;
5122 	case BMAP_LEFT_FILLING:
5123 		/*
5124 		 * Deleting the first part of the extent.
5125 		 */
5126 		got->br_startoff = del_endoff;
5127 		got->br_blockcount -= del->br_blockcount;
5128 		got->br_startblock = del->br_startblock + del->br_blockcount;
5129 		xfs_iext_update_extent(ip, state, icur, got);
5130 		break;
5131 	case BMAP_RIGHT_FILLING:
5132 		/*
5133 		 * Deleting the last part of the extent.
5134 		 */
5135 		got->br_blockcount -= del->br_blockcount;
5136 		xfs_iext_update_extent(ip, state, icur, got);
5137 		break;
5138 	case 0:
5139 		/*
5140 		 * Deleting the middle of the extent.
5141 		 */
5142 		got->br_blockcount = del->br_startoff - got->br_startoff;
5143 
5144 		new.br_startoff = del_endoff;
5145 		new.br_blockcount = got_endoff - del_endoff;
5146 		new.br_state = got->br_state;
5147 		new.br_startblock = del->br_startblock + del->br_blockcount;
5148 
5149 		xfs_iext_update_extent(ip, state, icur, got);
5150 		xfs_iext_next(ifp, icur);
5151 		xfs_iext_insert(ip, icur, &new, state);
5152 		break;
5153 	}
5154 	ip->i_delayed_blks -= del->br_blockcount;
5155 }
5156 
5157 /*
5158  * Called by xfs_bmapi to update file extent records and the btree
5159  * after removing space.
5160  */
5161 STATIC int				/* error */
5162 xfs_bmap_del_extent_real(
5163 	xfs_inode_t		*ip,	/* incore inode pointer */
5164 	xfs_trans_t		*tp,	/* current transaction pointer */
5165 	struct xfs_iext_cursor	*icur,
5166 	struct xfs_btree_cur	*cur,	/* if null, not a btree */
5167 	xfs_bmbt_irec_t		*del,	/* data to remove from extents */
5168 	int			*logflagsp, /* inode logging flags */
5169 	int			whichfork, /* data or attr fork */
5170 	uint32_t		bflags)	/* bmapi flags */
5171 {
5172 	xfs_fsblock_t		del_endblock=0;	/* first block past del */
5173 	xfs_fileoff_t		del_endoff;	/* first offset past del */
5174 	int			error = 0;	/* error return value */
5175 	struct xfs_bmbt_irec	got;	/* current extent entry */
5176 	xfs_fileoff_t		got_endoff;	/* first offset past got */
5177 	int			i;	/* temp state */
5178 	struct xfs_ifork	*ifp;	/* inode fork pointer */
5179 	xfs_mount_t		*mp;	/* mount structure */
5180 	xfs_filblks_t		nblks;	/* quota/sb block count */
5181 	xfs_bmbt_irec_t		new;	/* new record to be inserted */
5182 	/* REFERENCED */
5183 	uint			qfield;	/* quota field to update */
5184 	uint32_t		state = xfs_bmap_fork_to_state(whichfork);
5185 	struct xfs_bmbt_irec	old;
5186 
5187 	*logflagsp = 0;
5188 
5189 	mp = ip->i_mount;
5190 	XFS_STATS_INC(mp, xs_del_exlist);
5191 
5192 	ifp = xfs_ifork_ptr(ip, whichfork);
5193 	ASSERT(del->br_blockcount > 0);
5194 	xfs_iext_get_extent(ifp, icur, &got);
5195 	ASSERT(got.br_startoff <= del->br_startoff);
5196 	del_endoff = del->br_startoff + del->br_blockcount;
5197 	got_endoff = got.br_startoff + got.br_blockcount;
5198 	ASSERT(got_endoff >= del_endoff);
5199 	ASSERT(!isnullstartblock(got.br_startblock));
5200 	qfield = 0;
5201 
5202 	/*
5203 	 * If it's the case where the directory code is running with no block
5204 	 * reservation, and the deleted block is in the middle of its extent,
5205 	 * and the resulting insert of an extent would cause transformation to
5206 	 * btree format, then reject it.  The calling code will then swap blocks
5207 	 * around instead.  We have to do this now, rather than waiting for the
5208 	 * conversion to btree format, since the transaction will be dirty then.
5209 	 */
5210 	if (tp->t_blk_res == 0 &&
5211 	    ifp->if_format == XFS_DINODE_FMT_EXTENTS &&
5212 	    ifp->if_nextents >= XFS_IFORK_MAXEXT(ip, whichfork) &&
5213 	    del->br_startoff > got.br_startoff && del_endoff < got_endoff)
5214 		return -ENOSPC;
5215 
5216 	*logflagsp = XFS_ILOG_CORE;
5217 	if (xfs_ifork_is_realtime(ip, whichfork))
5218 		qfield = XFS_TRANS_DQ_RTBCOUNT;
5219 	else
5220 		qfield = XFS_TRANS_DQ_BCOUNT;
5221 	nblks = del->br_blockcount;
5222 
5223 	del_endblock = del->br_startblock + del->br_blockcount;
5224 	if (cur) {
5225 		error = xfs_bmbt_lookup_eq(cur, &got, &i);
5226 		if (error)
5227 			return error;
5228 		if (XFS_IS_CORRUPT(mp, i != 1)) {
5229 			xfs_btree_mark_sick(cur);
5230 			return -EFSCORRUPTED;
5231 		}
5232 	}
5233 
5234 	if (got.br_startoff == del->br_startoff)
5235 		state |= BMAP_LEFT_FILLING;
5236 	if (got_endoff == del_endoff)
5237 		state |= BMAP_RIGHT_FILLING;
5238 
5239 	switch (state & (BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING)) {
5240 	case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
5241 		/*
5242 		 * Matches the whole extent.  Delete the entry.
5243 		 */
5244 		xfs_iext_remove(ip, icur, state);
5245 		xfs_iext_prev(ifp, icur);
5246 		ifp->if_nextents--;
5247 
5248 		*logflagsp |= XFS_ILOG_CORE;
5249 		if (!cur) {
5250 			*logflagsp |= xfs_ilog_fext(whichfork);
5251 			break;
5252 		}
5253 		if ((error = xfs_btree_delete(cur, &i)))
5254 			return error;
5255 		if (XFS_IS_CORRUPT(mp, i != 1)) {
5256 			xfs_btree_mark_sick(cur);
5257 			return -EFSCORRUPTED;
5258 		}
5259 		break;
5260 	case BMAP_LEFT_FILLING:
5261 		/*
5262 		 * Deleting the first part of the extent.
5263 		 */
5264 		got.br_startoff = del_endoff;
5265 		got.br_startblock = del_endblock;
5266 		got.br_blockcount -= del->br_blockcount;
5267 		xfs_iext_update_extent(ip, state, icur, &got);
5268 		if (!cur) {
5269 			*logflagsp |= xfs_ilog_fext(whichfork);
5270 			break;
5271 		}
5272 		error = xfs_bmbt_update(cur, &got);
5273 		if (error)
5274 			return error;
5275 		break;
5276 	case BMAP_RIGHT_FILLING:
5277 		/*
5278 		 * Deleting the last part of the extent.
5279 		 */
5280 		got.br_blockcount -= del->br_blockcount;
5281 		xfs_iext_update_extent(ip, state, icur, &got);
5282 		if (!cur) {
5283 			*logflagsp |= xfs_ilog_fext(whichfork);
5284 			break;
5285 		}
5286 		error = xfs_bmbt_update(cur, &got);
5287 		if (error)
5288 			return error;
5289 		break;
5290 	case 0:
5291 		/*
5292 		 * Deleting the middle of the extent.
5293 		 */
5294 
5295 		old = got;
5296 
5297 		got.br_blockcount = del->br_startoff - got.br_startoff;
5298 		xfs_iext_update_extent(ip, state, icur, &got);
5299 
5300 		new.br_startoff = del_endoff;
5301 		new.br_blockcount = got_endoff - del_endoff;
5302 		new.br_state = got.br_state;
5303 		new.br_startblock = del_endblock;
5304 
5305 		*logflagsp |= XFS_ILOG_CORE;
5306 		if (cur) {
5307 			error = xfs_bmbt_update(cur, &got);
5308 			if (error)
5309 				return error;
5310 			error = xfs_btree_increment(cur, 0, &i);
5311 			if (error)
5312 				return error;
5313 			cur->bc_rec.b = new;
5314 			error = xfs_btree_insert(cur, &i);
5315 			if (error && error != -ENOSPC)
5316 				return error;
5317 			/*
5318 			 * If get no-space back from btree insert, it tried a
5319 			 * split, and we have a zero block reservation.  Fix up
5320 			 * our state and return the error.
5321 			 */
5322 			if (error == -ENOSPC) {
5323 				/*
5324 				 * Reset the cursor, don't trust it after any
5325 				 * insert operation.
5326 				 */
5327 				error = xfs_bmbt_lookup_eq(cur, &got, &i);
5328 				if (error)
5329 					return error;
5330 				if (XFS_IS_CORRUPT(mp, i != 1)) {
5331 					xfs_btree_mark_sick(cur);
5332 					return -EFSCORRUPTED;
5333 				}
5334 				/*
5335 				 * Update the btree record back
5336 				 * to the original value.
5337 				 */
5338 				error = xfs_bmbt_update(cur, &old);
5339 				if (error)
5340 					return error;
5341 				/*
5342 				 * Reset the extent record back
5343 				 * to the original value.
5344 				 */
5345 				xfs_iext_update_extent(ip, state, icur, &old);
5346 				*logflagsp = 0;
5347 				return -ENOSPC;
5348 			}
5349 			if (XFS_IS_CORRUPT(mp, i != 1)) {
5350 				xfs_btree_mark_sick(cur);
5351 				return -EFSCORRUPTED;
5352 			}
5353 		} else
5354 			*logflagsp |= xfs_ilog_fext(whichfork);
5355 
5356 		ifp->if_nextents++;
5357 		xfs_iext_next(ifp, icur);
5358 		xfs_iext_insert(ip, icur, &new, state);
5359 		break;
5360 	}
5361 
5362 	/* remove reverse mapping */
5363 	xfs_rmap_unmap_extent(tp, ip, whichfork, del);
5364 
5365 	/*
5366 	 * If we need to, add to list of extents to delete.
5367 	 */
5368 	if (!(bflags & XFS_BMAPI_REMAP)) {
5369 		if (xfs_is_reflink_inode(ip) && whichfork == XFS_DATA_FORK) {
5370 			xfs_refcount_decrease_extent(tp, del);
5371 		} else if (xfs_ifork_is_realtime(ip, whichfork)) {
5372 			/*
5373 			 * Ensure the bitmap and summary inodes are locked
5374 			 * and joined to the transaction before modifying them.
5375 			 */
5376 			if (!(tp->t_flags & XFS_TRANS_RTBITMAP_LOCKED)) {
5377 				tp->t_flags |= XFS_TRANS_RTBITMAP_LOCKED;
5378 				xfs_rtbitmap_lock(tp, mp);
5379 			}
5380 			error = xfs_rtfree_blocks(tp, del->br_startblock,
5381 					del->br_blockcount);
5382 		} else {
5383 			error = xfs_free_extent_later(tp, del->br_startblock,
5384 					del->br_blockcount, NULL,
5385 					XFS_AG_RESV_NONE,
5386 					((bflags & XFS_BMAPI_NODISCARD) ||
5387 					del->br_state == XFS_EXT_UNWRITTEN));
5388 		}
5389 		if (error)
5390 			return error;
5391 	}
5392 
5393 	/*
5394 	 * Adjust inode # blocks in the file.
5395 	 */
5396 	if (nblks)
5397 		ip->i_nblocks -= nblks;
5398 	/*
5399 	 * Adjust quota data.
5400 	 */
5401 	if (qfield && !(bflags & XFS_BMAPI_REMAP))
5402 		xfs_trans_mod_dquot_byino(tp, ip, qfield, (long)-nblks);
5403 
5404 	return 0;
5405 }
5406 
5407 /*
5408  * Unmap (remove) blocks from a file.
5409  * If nexts is nonzero then the number of extents to remove is limited to
5410  * that value.  If not all extents in the block range can be removed then
5411  * *done is set.
5412  */
5413 static int
5414 __xfs_bunmapi(
5415 	struct xfs_trans	*tp,		/* transaction pointer */
5416 	struct xfs_inode	*ip,		/* incore inode */
5417 	xfs_fileoff_t		start,		/* first file offset deleted */
5418 	xfs_filblks_t		*rlen,		/* i/o: amount remaining */
5419 	uint32_t		flags,		/* misc flags */
5420 	xfs_extnum_t		nexts)		/* number of extents max */
5421 {
5422 	struct xfs_btree_cur	*cur;		/* bmap btree cursor */
5423 	struct xfs_bmbt_irec	del;		/* extent being deleted */
5424 	int			error;		/* error return value */
5425 	xfs_extnum_t		extno;		/* extent number in list */
5426 	struct xfs_bmbt_irec	got;		/* current extent record */
5427 	struct xfs_ifork	*ifp;		/* inode fork pointer */
5428 	int			isrt;		/* freeing in rt area */
5429 	int			logflags;	/* transaction logging flags */
5430 	xfs_extlen_t		mod;		/* rt extent offset */
5431 	struct xfs_mount	*mp = ip->i_mount;
5432 	int			tmp_logflags;	/* partial logging flags */
5433 	int			wasdel;		/* was a delayed alloc extent */
5434 	int			whichfork;	/* data or attribute fork */
5435 	xfs_filblks_t		len = *rlen;	/* length to unmap in file */
5436 	xfs_fileoff_t		end;
5437 	struct xfs_iext_cursor	icur;
5438 	bool			done = false;
5439 
5440 	trace_xfs_bunmap(ip, start, len, flags, _RET_IP_);
5441 
5442 	whichfork = xfs_bmapi_whichfork(flags);
5443 	ASSERT(whichfork != XFS_COW_FORK);
5444 	ifp = xfs_ifork_ptr(ip, whichfork);
5445 	if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp))) {
5446 		xfs_bmap_mark_sick(ip, whichfork);
5447 		return -EFSCORRUPTED;
5448 	}
5449 	if (xfs_is_shutdown(mp))
5450 		return -EIO;
5451 
5452 	xfs_assert_ilocked(ip, XFS_ILOCK_EXCL);
5453 	ASSERT(len > 0);
5454 	ASSERT(nexts >= 0);
5455 
5456 	error = xfs_iread_extents(tp, ip, whichfork);
5457 	if (error)
5458 		return error;
5459 
5460 	if (xfs_iext_count(ifp) == 0) {
5461 		*rlen = 0;
5462 		return 0;
5463 	}
5464 	XFS_STATS_INC(mp, xs_blk_unmap);
5465 	isrt = xfs_ifork_is_realtime(ip, whichfork);
5466 	end = start + len;
5467 
5468 	if (!xfs_iext_lookup_extent_before(ip, ifp, &end, &icur, &got)) {
5469 		*rlen = 0;
5470 		return 0;
5471 	}
5472 	end--;
5473 
5474 	logflags = 0;
5475 	if (ifp->if_format == XFS_DINODE_FMT_BTREE) {
5476 		ASSERT(ifp->if_format == XFS_DINODE_FMT_BTREE);
5477 		cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
5478 	} else
5479 		cur = NULL;
5480 
5481 	extno = 0;
5482 	while (end != (xfs_fileoff_t)-1 && end >= start &&
5483 	       (nexts == 0 || extno < nexts)) {
5484 		/*
5485 		 * Is the found extent after a hole in which end lives?
5486 		 * Just back up to the previous extent, if so.
5487 		 */
5488 		if (got.br_startoff > end &&
5489 		    !xfs_iext_prev_extent(ifp, &icur, &got)) {
5490 			done = true;
5491 			break;
5492 		}
5493 		/*
5494 		 * Is the last block of this extent before the range
5495 		 * we're supposed to delete?  If so, we're done.
5496 		 */
5497 		end = XFS_FILEOFF_MIN(end,
5498 			got.br_startoff + got.br_blockcount - 1);
5499 		if (end < start)
5500 			break;
5501 		/*
5502 		 * Then deal with the (possibly delayed) allocated space
5503 		 * we found.
5504 		 */
5505 		del = got;
5506 		wasdel = isnullstartblock(del.br_startblock);
5507 
5508 		if (got.br_startoff < start) {
5509 			del.br_startoff = start;
5510 			del.br_blockcount -= start - got.br_startoff;
5511 			if (!wasdel)
5512 				del.br_startblock += start - got.br_startoff;
5513 		}
5514 		if (del.br_startoff + del.br_blockcount > end + 1)
5515 			del.br_blockcount = end + 1 - del.br_startoff;
5516 
5517 		if (!isrt || (flags & XFS_BMAPI_REMAP))
5518 			goto delete;
5519 
5520 		mod = xfs_rtb_to_rtxoff(mp,
5521 				del.br_startblock + del.br_blockcount);
5522 		if (mod) {
5523 			/*
5524 			 * Realtime extent not lined up at the end.
5525 			 * The extent could have been split into written
5526 			 * and unwritten pieces, or we could just be
5527 			 * unmapping part of it.  But we can't really
5528 			 * get rid of part of a realtime extent.
5529 			 */
5530 			if (del.br_state == XFS_EXT_UNWRITTEN) {
5531 				/*
5532 				 * This piece is unwritten, or we're not
5533 				 * using unwritten extents.  Skip over it.
5534 				 */
5535 				ASSERT((flags & XFS_BMAPI_REMAP) || end >= mod);
5536 				end -= mod > del.br_blockcount ?
5537 					del.br_blockcount : mod;
5538 				if (end < got.br_startoff &&
5539 				    !xfs_iext_prev_extent(ifp, &icur, &got)) {
5540 					done = true;
5541 					break;
5542 				}
5543 				continue;
5544 			}
5545 			/*
5546 			 * It's written, turn it unwritten.
5547 			 * This is better than zeroing it.
5548 			 */
5549 			ASSERT(del.br_state == XFS_EXT_NORM);
5550 			ASSERT(tp->t_blk_res > 0);
5551 			/*
5552 			 * If this spans a realtime extent boundary,
5553 			 * chop it back to the start of the one we end at.
5554 			 */
5555 			if (del.br_blockcount > mod) {
5556 				del.br_startoff += del.br_blockcount - mod;
5557 				del.br_startblock += del.br_blockcount - mod;
5558 				del.br_blockcount = mod;
5559 			}
5560 			del.br_state = XFS_EXT_UNWRITTEN;
5561 			error = xfs_bmap_add_extent_unwritten_real(tp, ip,
5562 					whichfork, &icur, &cur, &del,
5563 					&logflags);
5564 			if (error)
5565 				goto error0;
5566 			goto nodelete;
5567 		}
5568 
5569 		mod = xfs_rtb_to_rtxoff(mp, del.br_startblock);
5570 		if (mod) {
5571 			xfs_extlen_t off = mp->m_sb.sb_rextsize - mod;
5572 
5573 			/*
5574 			 * Realtime extent is lined up at the end but not
5575 			 * at the front.  We'll get rid of full extents if
5576 			 * we can.
5577 			 */
5578 			if (del.br_blockcount > off) {
5579 				del.br_blockcount -= off;
5580 				del.br_startoff += off;
5581 				del.br_startblock += off;
5582 			} else if (del.br_startoff == start &&
5583 				   (del.br_state == XFS_EXT_UNWRITTEN ||
5584 				    tp->t_blk_res == 0)) {
5585 				/*
5586 				 * Can't make it unwritten.  There isn't
5587 				 * a full extent here so just skip it.
5588 				 */
5589 				ASSERT(end >= del.br_blockcount);
5590 				end -= del.br_blockcount;
5591 				if (got.br_startoff > end &&
5592 				    !xfs_iext_prev_extent(ifp, &icur, &got)) {
5593 					done = true;
5594 					break;
5595 				}
5596 				continue;
5597 			} else if (del.br_state == XFS_EXT_UNWRITTEN) {
5598 				struct xfs_bmbt_irec	prev;
5599 				xfs_fileoff_t		unwrite_start;
5600 
5601 				/*
5602 				 * This one is already unwritten.
5603 				 * It must have a written left neighbor.
5604 				 * Unwrite the killed part of that one and
5605 				 * try again.
5606 				 */
5607 				if (!xfs_iext_prev_extent(ifp, &icur, &prev))
5608 					ASSERT(0);
5609 				ASSERT(prev.br_state == XFS_EXT_NORM);
5610 				ASSERT(!isnullstartblock(prev.br_startblock));
5611 				ASSERT(del.br_startblock ==
5612 				       prev.br_startblock + prev.br_blockcount);
5613 				unwrite_start = max3(start,
5614 						     del.br_startoff - mod,
5615 						     prev.br_startoff);
5616 				mod = unwrite_start - prev.br_startoff;
5617 				prev.br_startoff = unwrite_start;
5618 				prev.br_startblock += mod;
5619 				prev.br_blockcount -= mod;
5620 				prev.br_state = XFS_EXT_UNWRITTEN;
5621 				error = xfs_bmap_add_extent_unwritten_real(tp,
5622 						ip, whichfork, &icur, &cur,
5623 						&prev, &logflags);
5624 				if (error)
5625 					goto error0;
5626 				goto nodelete;
5627 			} else {
5628 				ASSERT(del.br_state == XFS_EXT_NORM);
5629 				del.br_state = XFS_EXT_UNWRITTEN;
5630 				error = xfs_bmap_add_extent_unwritten_real(tp,
5631 						ip, whichfork, &icur, &cur,
5632 						&del, &logflags);
5633 				if (error)
5634 					goto error0;
5635 				goto nodelete;
5636 			}
5637 		}
5638 
5639 delete:
5640 		if (wasdel) {
5641 			xfs_bmap_del_extent_delay(ip, whichfork, &icur, &got, &del);
5642 		} else {
5643 			error = xfs_bmap_del_extent_real(ip, tp, &icur, cur,
5644 					&del, &tmp_logflags, whichfork,
5645 					flags);
5646 			logflags |= tmp_logflags;
5647 			if (error)
5648 				goto error0;
5649 		}
5650 
5651 		end = del.br_startoff - 1;
5652 nodelete:
5653 		/*
5654 		 * If not done go on to the next (previous) record.
5655 		 */
5656 		if (end != (xfs_fileoff_t)-1 && end >= start) {
5657 			if (!xfs_iext_get_extent(ifp, &icur, &got) ||
5658 			    (got.br_startoff > end &&
5659 			     !xfs_iext_prev_extent(ifp, &icur, &got))) {
5660 				done = true;
5661 				break;
5662 			}
5663 			extno++;
5664 		}
5665 	}
5666 	if (done || end == (xfs_fileoff_t)-1 || end < start)
5667 		*rlen = 0;
5668 	else
5669 		*rlen = end - start + 1;
5670 
5671 	/*
5672 	 * Convert to a btree if necessary.
5673 	 */
5674 	if (xfs_bmap_needs_btree(ip, whichfork)) {
5675 		ASSERT(cur == NULL);
5676 		error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0,
5677 				&tmp_logflags, whichfork);
5678 		logflags |= tmp_logflags;
5679 	} else {
5680 		error = xfs_bmap_btree_to_extents(tp, ip, cur, &logflags,
5681 			whichfork);
5682 	}
5683 
5684 error0:
5685 	/*
5686 	 * Log everything.  Do this after conversion, there's no point in
5687 	 * logging the extent records if we've converted to btree format.
5688 	 */
5689 	if ((logflags & xfs_ilog_fext(whichfork)) &&
5690 	    ifp->if_format != XFS_DINODE_FMT_EXTENTS)
5691 		logflags &= ~xfs_ilog_fext(whichfork);
5692 	else if ((logflags & xfs_ilog_fbroot(whichfork)) &&
5693 		 ifp->if_format != XFS_DINODE_FMT_BTREE)
5694 		logflags &= ~xfs_ilog_fbroot(whichfork);
5695 	/*
5696 	 * Log inode even in the error case, if the transaction
5697 	 * is dirty we'll need to shut down the filesystem.
5698 	 */
5699 	if (logflags)
5700 		xfs_trans_log_inode(tp, ip, logflags);
5701 	if (cur) {
5702 		if (!error)
5703 			cur->bc_bmap.allocated = 0;
5704 		xfs_btree_del_cursor(cur, error);
5705 	}
5706 	return error;
5707 }
5708 
5709 /* Unmap a range of a file. */
5710 int
5711 xfs_bunmapi(
5712 	xfs_trans_t		*tp,
5713 	struct xfs_inode	*ip,
5714 	xfs_fileoff_t		bno,
5715 	xfs_filblks_t		len,
5716 	uint32_t		flags,
5717 	xfs_extnum_t		nexts,
5718 	int			*done)
5719 {
5720 	int			error;
5721 
5722 	error = __xfs_bunmapi(tp, ip, bno, &len, flags, nexts);
5723 	*done = (len == 0);
5724 	return error;
5725 }
5726 
5727 /*
5728  * Determine whether an extent shift can be accomplished by a merge with the
5729  * extent that precedes the target hole of the shift.
5730  */
5731 STATIC bool
5732 xfs_bmse_can_merge(
5733 	struct xfs_bmbt_irec	*left,	/* preceding extent */
5734 	struct xfs_bmbt_irec	*got,	/* current extent to shift */
5735 	xfs_fileoff_t		shift)	/* shift fsb */
5736 {
5737 	xfs_fileoff_t		startoff;
5738 
5739 	startoff = got->br_startoff - shift;
5740 
5741 	/*
5742 	 * The extent, once shifted, must be adjacent in-file and on-disk with
5743 	 * the preceding extent.
5744 	 */
5745 	if ((left->br_startoff + left->br_blockcount != startoff) ||
5746 	    (left->br_startblock + left->br_blockcount != got->br_startblock) ||
5747 	    (left->br_state != got->br_state) ||
5748 	    (left->br_blockcount + got->br_blockcount > XFS_MAX_BMBT_EXTLEN))
5749 		return false;
5750 
5751 	return true;
5752 }
5753 
5754 /*
5755  * A bmap extent shift adjusts the file offset of an extent to fill a preceding
5756  * hole in the file. If an extent shift would result in the extent being fully
5757  * adjacent to the extent that currently precedes the hole, we can merge with
5758  * the preceding extent rather than do the shift.
5759  *
5760  * This function assumes the caller has verified a shift-by-merge is possible
5761  * with the provided extents via xfs_bmse_can_merge().
5762  */
5763 STATIC int
5764 xfs_bmse_merge(
5765 	struct xfs_trans		*tp,
5766 	struct xfs_inode		*ip,
5767 	int				whichfork,
5768 	xfs_fileoff_t			shift,		/* shift fsb */
5769 	struct xfs_iext_cursor		*icur,
5770 	struct xfs_bmbt_irec		*got,		/* extent to shift */
5771 	struct xfs_bmbt_irec		*left,		/* preceding extent */
5772 	struct xfs_btree_cur		*cur,
5773 	int				*logflags)	/* output */
5774 {
5775 	struct xfs_ifork		*ifp = xfs_ifork_ptr(ip, whichfork);
5776 	struct xfs_bmbt_irec		new;
5777 	xfs_filblks_t			blockcount;
5778 	int				error, i;
5779 	struct xfs_mount		*mp = ip->i_mount;
5780 
5781 	blockcount = left->br_blockcount + got->br_blockcount;
5782 
5783 	xfs_assert_ilocked(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL);
5784 	ASSERT(xfs_bmse_can_merge(left, got, shift));
5785 
5786 	new = *left;
5787 	new.br_blockcount = blockcount;
5788 
5789 	/*
5790 	 * Update the on-disk extent count, the btree if necessary and log the
5791 	 * inode.
5792 	 */
5793 	ifp->if_nextents--;
5794 	*logflags |= XFS_ILOG_CORE;
5795 	if (!cur) {
5796 		*logflags |= XFS_ILOG_DEXT;
5797 		goto done;
5798 	}
5799 
5800 	/* lookup and remove the extent to merge */
5801 	error = xfs_bmbt_lookup_eq(cur, got, &i);
5802 	if (error)
5803 		return error;
5804 	if (XFS_IS_CORRUPT(mp, i != 1)) {
5805 		xfs_btree_mark_sick(cur);
5806 		return -EFSCORRUPTED;
5807 	}
5808 
5809 	error = xfs_btree_delete(cur, &i);
5810 	if (error)
5811 		return error;
5812 	if (XFS_IS_CORRUPT(mp, i != 1)) {
5813 		xfs_btree_mark_sick(cur);
5814 		return -EFSCORRUPTED;
5815 	}
5816 
5817 	/* lookup and update size of the previous extent */
5818 	error = xfs_bmbt_lookup_eq(cur, left, &i);
5819 	if (error)
5820 		return error;
5821 	if (XFS_IS_CORRUPT(mp, i != 1)) {
5822 		xfs_btree_mark_sick(cur);
5823 		return -EFSCORRUPTED;
5824 	}
5825 
5826 	error = xfs_bmbt_update(cur, &new);
5827 	if (error)
5828 		return error;
5829 
5830 	/* change to extent format if required after extent removal */
5831 	error = xfs_bmap_btree_to_extents(tp, ip, cur, logflags, whichfork);
5832 	if (error)
5833 		return error;
5834 
5835 done:
5836 	xfs_iext_remove(ip, icur, 0);
5837 	xfs_iext_prev(ifp, icur);
5838 	xfs_iext_update_extent(ip, xfs_bmap_fork_to_state(whichfork), icur,
5839 			&new);
5840 
5841 	/* update reverse mapping. rmap functions merge the rmaps for us */
5842 	xfs_rmap_unmap_extent(tp, ip, whichfork, got);
5843 	memcpy(&new, got, sizeof(new));
5844 	new.br_startoff = left->br_startoff + left->br_blockcount;
5845 	xfs_rmap_map_extent(tp, ip, whichfork, &new);
5846 	return 0;
5847 }
5848 
5849 static int
5850 xfs_bmap_shift_update_extent(
5851 	struct xfs_trans	*tp,
5852 	struct xfs_inode	*ip,
5853 	int			whichfork,
5854 	struct xfs_iext_cursor	*icur,
5855 	struct xfs_bmbt_irec	*got,
5856 	struct xfs_btree_cur	*cur,
5857 	int			*logflags,
5858 	xfs_fileoff_t		startoff)
5859 {
5860 	struct xfs_mount	*mp = ip->i_mount;
5861 	struct xfs_bmbt_irec	prev = *got;
5862 	int			error, i;
5863 
5864 	*logflags |= XFS_ILOG_CORE;
5865 
5866 	got->br_startoff = startoff;
5867 
5868 	if (cur) {
5869 		error = xfs_bmbt_lookup_eq(cur, &prev, &i);
5870 		if (error)
5871 			return error;
5872 		if (XFS_IS_CORRUPT(mp, i != 1)) {
5873 			xfs_btree_mark_sick(cur);
5874 			return -EFSCORRUPTED;
5875 		}
5876 
5877 		error = xfs_bmbt_update(cur, got);
5878 		if (error)
5879 			return error;
5880 	} else {
5881 		*logflags |= XFS_ILOG_DEXT;
5882 	}
5883 
5884 	xfs_iext_update_extent(ip, xfs_bmap_fork_to_state(whichfork), icur,
5885 			got);
5886 
5887 	/* update reverse mapping */
5888 	xfs_rmap_unmap_extent(tp, ip, whichfork, &prev);
5889 	xfs_rmap_map_extent(tp, ip, whichfork, got);
5890 	return 0;
5891 }
5892 
5893 int
5894 xfs_bmap_collapse_extents(
5895 	struct xfs_trans	*tp,
5896 	struct xfs_inode	*ip,
5897 	xfs_fileoff_t		*next_fsb,
5898 	xfs_fileoff_t		offset_shift_fsb,
5899 	bool			*done)
5900 {
5901 	int			whichfork = XFS_DATA_FORK;
5902 	struct xfs_mount	*mp = ip->i_mount;
5903 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
5904 	struct xfs_btree_cur	*cur = NULL;
5905 	struct xfs_bmbt_irec	got, prev;
5906 	struct xfs_iext_cursor	icur;
5907 	xfs_fileoff_t		new_startoff;
5908 	int			error = 0;
5909 	int			logflags = 0;
5910 
5911 	if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)) ||
5912 	    XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) {
5913 		xfs_bmap_mark_sick(ip, whichfork);
5914 		return -EFSCORRUPTED;
5915 	}
5916 
5917 	if (xfs_is_shutdown(mp))
5918 		return -EIO;
5919 
5920 	xfs_assert_ilocked(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL);
5921 
5922 	error = xfs_iread_extents(tp, ip, whichfork);
5923 	if (error)
5924 		return error;
5925 
5926 	if (ifp->if_format == XFS_DINODE_FMT_BTREE)
5927 		cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
5928 
5929 	if (!xfs_iext_lookup_extent(ip, ifp, *next_fsb, &icur, &got)) {
5930 		*done = true;
5931 		goto del_cursor;
5932 	}
5933 	if (XFS_IS_CORRUPT(mp, isnullstartblock(got.br_startblock))) {
5934 		xfs_bmap_mark_sick(ip, whichfork);
5935 		error = -EFSCORRUPTED;
5936 		goto del_cursor;
5937 	}
5938 
5939 	new_startoff = got.br_startoff - offset_shift_fsb;
5940 	if (xfs_iext_peek_prev_extent(ifp, &icur, &prev)) {
5941 		if (new_startoff < prev.br_startoff + prev.br_blockcount) {
5942 			error = -EINVAL;
5943 			goto del_cursor;
5944 		}
5945 
5946 		if (xfs_bmse_can_merge(&prev, &got, offset_shift_fsb)) {
5947 			error = xfs_bmse_merge(tp, ip, whichfork,
5948 					offset_shift_fsb, &icur, &got, &prev,
5949 					cur, &logflags);
5950 			if (error)
5951 				goto del_cursor;
5952 			goto done;
5953 		}
5954 	} else {
5955 		if (got.br_startoff < offset_shift_fsb) {
5956 			error = -EINVAL;
5957 			goto del_cursor;
5958 		}
5959 	}
5960 
5961 	error = xfs_bmap_shift_update_extent(tp, ip, whichfork, &icur, &got,
5962 			cur, &logflags, new_startoff);
5963 	if (error)
5964 		goto del_cursor;
5965 
5966 done:
5967 	if (!xfs_iext_next_extent(ifp, &icur, &got)) {
5968 		*done = true;
5969 		goto del_cursor;
5970 	}
5971 
5972 	*next_fsb = got.br_startoff;
5973 del_cursor:
5974 	if (cur)
5975 		xfs_btree_del_cursor(cur, error);
5976 	if (logflags)
5977 		xfs_trans_log_inode(tp, ip, logflags);
5978 	return error;
5979 }
5980 
5981 /* Make sure we won't be right-shifting an extent past the maximum bound. */
5982 int
5983 xfs_bmap_can_insert_extents(
5984 	struct xfs_inode	*ip,
5985 	xfs_fileoff_t		off,
5986 	xfs_fileoff_t		shift)
5987 {
5988 	struct xfs_bmbt_irec	got;
5989 	int			is_empty;
5990 	int			error = 0;
5991 
5992 	xfs_assert_ilocked(ip, XFS_IOLOCK_EXCL);
5993 
5994 	if (xfs_is_shutdown(ip->i_mount))
5995 		return -EIO;
5996 
5997 	xfs_ilock(ip, XFS_ILOCK_EXCL);
5998 	error = xfs_bmap_last_extent(NULL, ip, XFS_DATA_FORK, &got, &is_empty);
5999 	if (!error && !is_empty && got.br_startoff >= off &&
6000 	    ((got.br_startoff + shift) & BMBT_STARTOFF_MASK) < got.br_startoff)
6001 		error = -EINVAL;
6002 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
6003 
6004 	return error;
6005 }
6006 
6007 int
6008 xfs_bmap_insert_extents(
6009 	struct xfs_trans	*tp,
6010 	struct xfs_inode	*ip,
6011 	xfs_fileoff_t		*next_fsb,
6012 	xfs_fileoff_t		offset_shift_fsb,
6013 	bool			*done,
6014 	xfs_fileoff_t		stop_fsb)
6015 {
6016 	int			whichfork = XFS_DATA_FORK;
6017 	struct xfs_mount	*mp = ip->i_mount;
6018 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
6019 	struct xfs_btree_cur	*cur = NULL;
6020 	struct xfs_bmbt_irec	got, next;
6021 	struct xfs_iext_cursor	icur;
6022 	xfs_fileoff_t		new_startoff;
6023 	int			error = 0;
6024 	int			logflags = 0;
6025 
6026 	if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)) ||
6027 	    XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) {
6028 		xfs_bmap_mark_sick(ip, whichfork);
6029 		return -EFSCORRUPTED;
6030 	}
6031 
6032 	if (xfs_is_shutdown(mp))
6033 		return -EIO;
6034 
6035 	xfs_assert_ilocked(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL);
6036 
6037 	error = xfs_iread_extents(tp, ip, whichfork);
6038 	if (error)
6039 		return error;
6040 
6041 	if (ifp->if_format == XFS_DINODE_FMT_BTREE)
6042 		cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
6043 
6044 	if (*next_fsb == NULLFSBLOCK) {
6045 		xfs_iext_last(ifp, &icur);
6046 		if (!xfs_iext_get_extent(ifp, &icur, &got) ||
6047 		    stop_fsb > got.br_startoff) {
6048 			*done = true;
6049 			goto del_cursor;
6050 		}
6051 	} else {
6052 		if (!xfs_iext_lookup_extent(ip, ifp, *next_fsb, &icur, &got)) {
6053 			*done = true;
6054 			goto del_cursor;
6055 		}
6056 	}
6057 	if (XFS_IS_CORRUPT(mp, isnullstartblock(got.br_startblock))) {
6058 		xfs_bmap_mark_sick(ip, whichfork);
6059 		error = -EFSCORRUPTED;
6060 		goto del_cursor;
6061 	}
6062 
6063 	if (XFS_IS_CORRUPT(mp, stop_fsb > got.br_startoff)) {
6064 		xfs_bmap_mark_sick(ip, whichfork);
6065 		error = -EFSCORRUPTED;
6066 		goto del_cursor;
6067 	}
6068 
6069 	new_startoff = got.br_startoff + offset_shift_fsb;
6070 	if (xfs_iext_peek_next_extent(ifp, &icur, &next)) {
6071 		if (new_startoff + got.br_blockcount > next.br_startoff) {
6072 			error = -EINVAL;
6073 			goto del_cursor;
6074 		}
6075 
6076 		/*
6077 		 * Unlike a left shift (which involves a hole punch), a right
6078 		 * shift does not modify extent neighbors in any way.  We should
6079 		 * never find mergeable extents in this scenario.  Check anyways
6080 		 * and warn if we encounter two extents that could be one.
6081 		 */
6082 		if (xfs_bmse_can_merge(&got, &next, offset_shift_fsb))
6083 			WARN_ON_ONCE(1);
6084 	}
6085 
6086 	error = xfs_bmap_shift_update_extent(tp, ip, whichfork, &icur, &got,
6087 			cur, &logflags, new_startoff);
6088 	if (error)
6089 		goto del_cursor;
6090 
6091 	if (!xfs_iext_prev_extent(ifp, &icur, &got) ||
6092 	    stop_fsb >= got.br_startoff + got.br_blockcount) {
6093 		*done = true;
6094 		goto del_cursor;
6095 	}
6096 
6097 	*next_fsb = got.br_startoff;
6098 del_cursor:
6099 	if (cur)
6100 		xfs_btree_del_cursor(cur, error);
6101 	if (logflags)
6102 		xfs_trans_log_inode(tp, ip, logflags);
6103 	return error;
6104 }
6105 
6106 /*
6107  * Splits an extent into two extents at split_fsb block such that it is the
6108  * first block of the current_ext. @ext is a target extent to be split.
6109  * @split_fsb is a block where the extents is split.  If split_fsb lies in a
6110  * hole or the first block of extents, just return 0.
6111  */
6112 int
6113 xfs_bmap_split_extent(
6114 	struct xfs_trans	*tp,
6115 	struct xfs_inode	*ip,
6116 	xfs_fileoff_t		split_fsb)
6117 {
6118 	int				whichfork = XFS_DATA_FORK;
6119 	struct xfs_ifork		*ifp = xfs_ifork_ptr(ip, whichfork);
6120 	struct xfs_btree_cur		*cur = NULL;
6121 	struct xfs_bmbt_irec		got;
6122 	struct xfs_bmbt_irec		new; /* split extent */
6123 	struct xfs_mount		*mp = ip->i_mount;
6124 	xfs_fsblock_t			gotblkcnt; /* new block count for got */
6125 	struct xfs_iext_cursor		icur;
6126 	int				error = 0;
6127 	int				logflags = 0;
6128 	int				i = 0;
6129 
6130 	if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)) ||
6131 	    XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) {
6132 		xfs_bmap_mark_sick(ip, whichfork);
6133 		return -EFSCORRUPTED;
6134 	}
6135 
6136 	if (xfs_is_shutdown(mp))
6137 		return -EIO;
6138 
6139 	/* Read in all the extents */
6140 	error = xfs_iread_extents(tp, ip, whichfork);
6141 	if (error)
6142 		return error;
6143 
6144 	/*
6145 	 * If there are not extents, or split_fsb lies in a hole we are done.
6146 	 */
6147 	if (!xfs_iext_lookup_extent(ip, ifp, split_fsb, &icur, &got) ||
6148 	    got.br_startoff >= split_fsb)
6149 		return 0;
6150 
6151 	gotblkcnt = split_fsb - got.br_startoff;
6152 	new.br_startoff = split_fsb;
6153 	new.br_startblock = got.br_startblock + gotblkcnt;
6154 	new.br_blockcount = got.br_blockcount - gotblkcnt;
6155 	new.br_state = got.br_state;
6156 
6157 	if (ifp->if_format == XFS_DINODE_FMT_BTREE) {
6158 		cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
6159 		error = xfs_bmbt_lookup_eq(cur, &got, &i);
6160 		if (error)
6161 			goto del_cursor;
6162 		if (XFS_IS_CORRUPT(mp, i != 1)) {
6163 			xfs_btree_mark_sick(cur);
6164 			error = -EFSCORRUPTED;
6165 			goto del_cursor;
6166 		}
6167 	}
6168 
6169 	got.br_blockcount = gotblkcnt;
6170 	xfs_iext_update_extent(ip, xfs_bmap_fork_to_state(whichfork), &icur,
6171 			&got);
6172 
6173 	logflags = XFS_ILOG_CORE;
6174 	if (cur) {
6175 		error = xfs_bmbt_update(cur, &got);
6176 		if (error)
6177 			goto del_cursor;
6178 	} else
6179 		logflags |= XFS_ILOG_DEXT;
6180 
6181 	/* Add new extent */
6182 	xfs_iext_next(ifp, &icur);
6183 	xfs_iext_insert(ip, &icur, &new, 0);
6184 	ifp->if_nextents++;
6185 
6186 	if (cur) {
6187 		error = xfs_bmbt_lookup_eq(cur, &new, &i);
6188 		if (error)
6189 			goto del_cursor;
6190 		if (XFS_IS_CORRUPT(mp, i != 0)) {
6191 			xfs_btree_mark_sick(cur);
6192 			error = -EFSCORRUPTED;
6193 			goto del_cursor;
6194 		}
6195 		error = xfs_btree_insert(cur, &i);
6196 		if (error)
6197 			goto del_cursor;
6198 		if (XFS_IS_CORRUPT(mp, i != 1)) {
6199 			xfs_btree_mark_sick(cur);
6200 			error = -EFSCORRUPTED;
6201 			goto del_cursor;
6202 		}
6203 	}
6204 
6205 	/*
6206 	 * Convert to a btree if necessary.
6207 	 */
6208 	if (xfs_bmap_needs_btree(ip, whichfork)) {
6209 		int tmp_logflags; /* partial log flag return val */
6210 
6211 		ASSERT(cur == NULL);
6212 		error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0,
6213 				&tmp_logflags, whichfork);
6214 		logflags |= tmp_logflags;
6215 	}
6216 
6217 del_cursor:
6218 	if (cur) {
6219 		cur->bc_bmap.allocated = 0;
6220 		xfs_btree_del_cursor(cur, error);
6221 	}
6222 
6223 	if (logflags)
6224 		xfs_trans_log_inode(tp, ip, logflags);
6225 	return error;
6226 }
6227 
6228 /* Record a bmap intent. */
6229 static inline void
6230 __xfs_bmap_add(
6231 	struct xfs_trans		*tp,
6232 	enum xfs_bmap_intent_type	type,
6233 	struct xfs_inode		*ip,
6234 	int				whichfork,
6235 	struct xfs_bmbt_irec		*bmap)
6236 {
6237 	struct xfs_bmap_intent		*bi;
6238 
6239 	if ((whichfork != XFS_DATA_FORK && whichfork != XFS_ATTR_FORK) ||
6240 	    bmap->br_startblock == HOLESTARTBLOCK ||
6241 	    bmap->br_startblock == DELAYSTARTBLOCK)
6242 		return;
6243 
6244 	bi = kmem_cache_alloc(xfs_bmap_intent_cache, GFP_KERNEL | __GFP_NOFAIL);
6245 	INIT_LIST_HEAD(&bi->bi_list);
6246 	bi->bi_type = type;
6247 	bi->bi_owner = ip;
6248 	bi->bi_whichfork = whichfork;
6249 	bi->bi_bmap = *bmap;
6250 
6251 	xfs_bmap_defer_add(tp, bi);
6252 }
6253 
6254 /* Map an extent into a file. */
6255 void
6256 xfs_bmap_map_extent(
6257 	struct xfs_trans	*tp,
6258 	struct xfs_inode	*ip,
6259 	int			whichfork,
6260 	struct xfs_bmbt_irec	*PREV)
6261 {
6262 	__xfs_bmap_add(tp, XFS_BMAP_MAP, ip, whichfork, PREV);
6263 }
6264 
6265 /* Unmap an extent out of a file. */
6266 void
6267 xfs_bmap_unmap_extent(
6268 	struct xfs_trans	*tp,
6269 	struct xfs_inode	*ip,
6270 	int			whichfork,
6271 	struct xfs_bmbt_irec	*PREV)
6272 {
6273 	__xfs_bmap_add(tp, XFS_BMAP_UNMAP, ip, whichfork, PREV);
6274 }
6275 
6276 /*
6277  * Process one of the deferred bmap operations.  We pass back the
6278  * btree cursor to maintain our lock on the bmapbt between calls.
6279  */
6280 int
6281 xfs_bmap_finish_one(
6282 	struct xfs_trans		*tp,
6283 	struct xfs_bmap_intent		*bi)
6284 {
6285 	struct xfs_bmbt_irec		*bmap = &bi->bi_bmap;
6286 	int				error = 0;
6287 	int				flags = 0;
6288 
6289 	if (bi->bi_whichfork == XFS_ATTR_FORK)
6290 		flags |= XFS_BMAPI_ATTRFORK;
6291 
6292 	ASSERT(tp->t_highest_agno == NULLAGNUMBER);
6293 
6294 	trace_xfs_bmap_deferred(bi);
6295 
6296 	if (XFS_TEST_ERROR(false, tp->t_mountp, XFS_ERRTAG_BMAP_FINISH_ONE))
6297 		return -EIO;
6298 
6299 	switch (bi->bi_type) {
6300 	case XFS_BMAP_MAP:
6301 		if (bi->bi_bmap.br_state == XFS_EXT_UNWRITTEN)
6302 			flags |= XFS_BMAPI_PREALLOC;
6303 		error = xfs_bmapi_remap(tp, bi->bi_owner, bmap->br_startoff,
6304 				bmap->br_blockcount, bmap->br_startblock,
6305 				flags);
6306 		bmap->br_blockcount = 0;
6307 		break;
6308 	case XFS_BMAP_UNMAP:
6309 		error = __xfs_bunmapi(tp, bi->bi_owner, bmap->br_startoff,
6310 				&bmap->br_blockcount, flags | XFS_BMAPI_REMAP,
6311 				1);
6312 		break;
6313 	default:
6314 		ASSERT(0);
6315 		xfs_bmap_mark_sick(bi->bi_owner, bi->bi_whichfork);
6316 		error = -EFSCORRUPTED;
6317 	}
6318 
6319 	return error;
6320 }
6321 
6322 /* Check that an extent does not have invalid flags or bad ranges. */
6323 xfs_failaddr_t
6324 xfs_bmap_validate_extent_raw(
6325 	struct xfs_mount	*mp,
6326 	bool			rtfile,
6327 	int			whichfork,
6328 	struct xfs_bmbt_irec	*irec)
6329 {
6330 	if (!xfs_verify_fileext(mp, irec->br_startoff, irec->br_blockcount))
6331 		return __this_address;
6332 
6333 	if (rtfile && whichfork == XFS_DATA_FORK) {
6334 		if (!xfs_verify_rtbext(mp, irec->br_startblock,
6335 					   irec->br_blockcount))
6336 			return __this_address;
6337 	} else {
6338 		if (!xfs_verify_fsbext(mp, irec->br_startblock,
6339 					   irec->br_blockcount))
6340 			return __this_address;
6341 	}
6342 	if (irec->br_state != XFS_EXT_NORM && whichfork != XFS_DATA_FORK)
6343 		return __this_address;
6344 	return NULL;
6345 }
6346 
6347 int __init
6348 xfs_bmap_intent_init_cache(void)
6349 {
6350 	xfs_bmap_intent_cache = kmem_cache_create("xfs_bmap_intent",
6351 			sizeof(struct xfs_bmap_intent),
6352 			0, 0, NULL);
6353 
6354 	return xfs_bmap_intent_cache != NULL ? 0 : -ENOMEM;
6355 }
6356 
6357 void
6358 xfs_bmap_intent_destroy_cache(void)
6359 {
6360 	kmem_cache_destroy(xfs_bmap_intent_cache);
6361 	xfs_bmap_intent_cache = NULL;
6362 }
6363 
6364 /* Check that an inode's extent does not have invalid flags or bad ranges. */
6365 xfs_failaddr_t
6366 xfs_bmap_validate_extent(
6367 	struct xfs_inode	*ip,
6368 	int			whichfork,
6369 	struct xfs_bmbt_irec	*irec)
6370 {
6371 	return xfs_bmap_validate_extent_raw(ip->i_mount,
6372 			XFS_IS_REALTIME_INODE(ip), whichfork, irec);
6373 }
6374 
6375 /*
6376  * Used in xfs_itruncate_extents().  This is the maximum number of extents
6377  * freed from a file in a single transaction.
6378  */
6379 #define	XFS_ITRUNC_MAX_EXTENTS	2
6380 
6381 /*
6382  * Unmap every extent in part of an inode's fork.  We don't do any higher level
6383  * invalidation work at all.
6384  */
6385 int
6386 xfs_bunmapi_range(
6387 	struct xfs_trans	**tpp,
6388 	struct xfs_inode	*ip,
6389 	uint32_t		flags,
6390 	xfs_fileoff_t		startoff,
6391 	xfs_fileoff_t		endoff)
6392 {
6393 	xfs_filblks_t		unmap_len = endoff - startoff + 1;
6394 	int			error = 0;
6395 
6396 	xfs_assert_ilocked(ip, XFS_ILOCK_EXCL);
6397 
6398 	while (unmap_len > 0) {
6399 		ASSERT((*tpp)->t_highest_agno == NULLAGNUMBER);
6400 		error = __xfs_bunmapi(*tpp, ip, startoff, &unmap_len, flags,
6401 				XFS_ITRUNC_MAX_EXTENTS);
6402 		if (error)
6403 			goto out;
6404 
6405 		/* free the just unmapped extents */
6406 		error = xfs_defer_finish(tpp);
6407 		if (error)
6408 			goto out;
6409 		cond_resched();
6410 	}
6411 out:
6412 	return error;
6413 }
6414 
6415 struct xfs_bmap_query_range {
6416 	xfs_bmap_query_range_fn	fn;
6417 	void			*priv;
6418 };
6419 
6420 /* Format btree record and pass to our callback. */
6421 STATIC int
6422 xfs_bmap_query_range_helper(
6423 	struct xfs_btree_cur		*cur,
6424 	const union xfs_btree_rec	*rec,
6425 	void				*priv)
6426 {
6427 	struct xfs_bmap_query_range	*query = priv;
6428 	struct xfs_bmbt_irec		irec;
6429 	xfs_failaddr_t			fa;
6430 
6431 	xfs_bmbt_disk_get_all(&rec->bmbt, &irec);
6432 	fa = xfs_bmap_validate_extent(cur->bc_ino.ip, cur->bc_ino.whichfork,
6433 			&irec);
6434 	if (fa) {
6435 		xfs_btree_mark_sick(cur);
6436 		return xfs_bmap_complain_bad_rec(cur->bc_ino.ip,
6437 				cur->bc_ino.whichfork, fa, &irec);
6438 	}
6439 
6440 	return query->fn(cur, &irec, query->priv);
6441 }
6442 
6443 /* Find all bmaps. */
6444 int
6445 xfs_bmap_query_all(
6446 	struct xfs_btree_cur		*cur,
6447 	xfs_bmap_query_range_fn		fn,
6448 	void				*priv)
6449 {
6450 	struct xfs_bmap_query_range	query = {
6451 		.priv			= priv,
6452 		.fn			= fn,
6453 	};
6454 
6455 	return xfs_btree_query_all(cur, xfs_bmap_query_range_helper, &query);
6456 }
6457