xref: /linux/fs/xfs/libxfs/xfs_bmap.c (revision a095686a2383526d7315197e2419d84ee8470217)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2000-2006 Silicon Graphics, Inc.
4  * All Rights Reserved.
5  */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_bit.h"
13 #include "xfs_sb.h"
14 #include "xfs_mount.h"
15 #include "xfs_defer.h"
16 #include "xfs_dir2.h"
17 #include "xfs_inode.h"
18 #include "xfs_btree.h"
19 #include "xfs_trans.h"
20 #include "xfs_alloc.h"
21 #include "xfs_bmap.h"
22 #include "xfs_bmap_util.h"
23 #include "xfs_bmap_btree.h"
24 #include "xfs_rtbitmap.h"
25 #include "xfs_errortag.h"
26 #include "xfs_error.h"
27 #include "xfs_quota.h"
28 #include "xfs_trans_space.h"
29 #include "xfs_buf_item.h"
30 #include "xfs_trace.h"
31 #include "xfs_attr_leaf.h"
32 #include "xfs_filestream.h"
33 #include "xfs_rmap.h"
34 #include "xfs_ag.h"
35 #include "xfs_ag_resv.h"
36 #include "xfs_refcount.h"
37 #include "xfs_icache.h"
38 #include "xfs_iomap.h"
39 #include "xfs_health.h"
40 
41 struct kmem_cache		*xfs_bmap_intent_cache;
42 
43 /*
44  * Miscellaneous helper functions
45  */
46 
47 /*
48  * Compute and fill in the value of the maximum depth of a bmap btree
49  * in this filesystem.  Done once, during mount.
50  */
51 void
52 xfs_bmap_compute_maxlevels(
53 	xfs_mount_t	*mp,		/* file system mount structure */
54 	int		whichfork)	/* data or attr fork */
55 {
56 	uint64_t	maxblocks;	/* max blocks at this level */
57 	xfs_extnum_t	maxleafents;	/* max leaf entries possible */
58 	int		level;		/* btree level */
59 	int		maxrootrecs;	/* max records in root block */
60 	int		minleafrecs;	/* min records in leaf block */
61 	int		minnoderecs;	/* min records in node block */
62 	int		sz;		/* root block size */
63 
64 	/*
65 	 * The maximum number of extents in a fork, hence the maximum number of
66 	 * leaf entries, is controlled by the size of the on-disk extent count.
67 	 *
68 	 * Note that we can no longer assume that if we are in ATTR1 that the
69 	 * fork offset of all the inodes will be
70 	 * (xfs_default_attroffset(ip) >> 3) because we could have mounted with
71 	 * ATTR2 and then mounted back with ATTR1, keeping the i_forkoff's fixed
72 	 * but probably at various positions. Therefore, for both ATTR1 and
73 	 * ATTR2 we have to assume the worst case scenario of a minimum size
74 	 * available.
75 	 */
76 	maxleafents = xfs_iext_max_nextents(xfs_has_large_extent_counts(mp),
77 				whichfork);
78 	if (whichfork == XFS_DATA_FORK)
79 		sz = XFS_BMDR_SPACE_CALC(MINDBTPTRS);
80 	else
81 		sz = XFS_BMDR_SPACE_CALC(MINABTPTRS);
82 
83 	maxrootrecs = xfs_bmdr_maxrecs(sz, 0);
84 	minleafrecs = mp->m_bmap_dmnr[0];
85 	minnoderecs = mp->m_bmap_dmnr[1];
86 	maxblocks = howmany_64(maxleafents, minleafrecs);
87 	for (level = 1; maxblocks > 1; level++) {
88 		if (maxblocks <= maxrootrecs)
89 			maxblocks = 1;
90 		else
91 			maxblocks = howmany_64(maxblocks, minnoderecs);
92 	}
93 	mp->m_bm_maxlevels[whichfork] = level;
94 	ASSERT(mp->m_bm_maxlevels[whichfork] <= xfs_bmbt_maxlevels_ondisk());
95 }
96 
97 unsigned int
98 xfs_bmap_compute_attr_offset(
99 	struct xfs_mount	*mp)
100 {
101 	if (mp->m_sb.sb_inodesize == 256)
102 		return XFS_LITINO(mp) - XFS_BMDR_SPACE_CALC(MINABTPTRS);
103 	return XFS_BMDR_SPACE_CALC(6 * MINABTPTRS);
104 }
105 
106 STATIC int				/* error */
107 xfs_bmbt_lookup_eq(
108 	struct xfs_btree_cur	*cur,
109 	struct xfs_bmbt_irec	*irec,
110 	int			*stat)	/* success/failure */
111 {
112 	cur->bc_rec.b = *irec;
113 	return xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat);
114 }
115 
116 STATIC int				/* error */
117 xfs_bmbt_lookup_first(
118 	struct xfs_btree_cur	*cur,
119 	int			*stat)	/* success/failure */
120 {
121 	cur->bc_rec.b.br_startoff = 0;
122 	cur->bc_rec.b.br_startblock = 0;
123 	cur->bc_rec.b.br_blockcount = 0;
124 	return xfs_btree_lookup(cur, XFS_LOOKUP_GE, stat);
125 }
126 
127 /*
128  * Check if the inode needs to be converted to btree format.
129  */
130 static inline bool xfs_bmap_needs_btree(struct xfs_inode *ip, int whichfork)
131 {
132 	struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
133 
134 	return whichfork != XFS_COW_FORK &&
135 		ifp->if_format == XFS_DINODE_FMT_EXTENTS &&
136 		ifp->if_nextents > XFS_IFORK_MAXEXT(ip, whichfork);
137 }
138 
139 /*
140  * Check if the inode should be converted to extent format.
141  */
142 static inline bool xfs_bmap_wants_extents(struct xfs_inode *ip, int whichfork)
143 {
144 	struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
145 
146 	return whichfork != XFS_COW_FORK &&
147 		ifp->if_format == XFS_DINODE_FMT_BTREE &&
148 		ifp->if_nextents <= XFS_IFORK_MAXEXT(ip, whichfork);
149 }
150 
151 /*
152  * Update the record referred to by cur to the value given by irec
153  * This either works (return 0) or gets an EFSCORRUPTED error.
154  */
155 STATIC int
156 xfs_bmbt_update(
157 	struct xfs_btree_cur	*cur,
158 	struct xfs_bmbt_irec	*irec)
159 {
160 	union xfs_btree_rec	rec;
161 
162 	xfs_bmbt_disk_set_all(&rec.bmbt, irec);
163 	return xfs_btree_update(cur, &rec);
164 }
165 
166 /*
167  * Compute the worst-case number of indirect blocks that will be used
168  * for ip's delayed extent of length "len".
169  */
170 STATIC xfs_filblks_t
171 xfs_bmap_worst_indlen(
172 	xfs_inode_t	*ip,		/* incore inode pointer */
173 	xfs_filblks_t	len)		/* delayed extent length */
174 {
175 	int		level;		/* btree level number */
176 	int		maxrecs;	/* maximum record count at this level */
177 	xfs_mount_t	*mp;		/* mount structure */
178 	xfs_filblks_t	rval;		/* return value */
179 
180 	mp = ip->i_mount;
181 	maxrecs = mp->m_bmap_dmxr[0];
182 	for (level = 0, rval = 0;
183 	     level < XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK);
184 	     level++) {
185 		len += maxrecs - 1;
186 		do_div(len, maxrecs);
187 		rval += len;
188 		if (len == 1)
189 			return rval + XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) -
190 				level - 1;
191 		if (level == 0)
192 			maxrecs = mp->m_bmap_dmxr[1];
193 	}
194 	return rval;
195 }
196 
197 /*
198  * Calculate the default attribute fork offset for newly created inodes.
199  */
200 uint
201 xfs_default_attroffset(
202 	struct xfs_inode	*ip)
203 {
204 	if (ip->i_df.if_format == XFS_DINODE_FMT_DEV)
205 		return roundup(sizeof(xfs_dev_t), 8);
206 	return M_IGEO(ip->i_mount)->attr_fork_offset;
207 }
208 
209 /*
210  * Helper routine to reset inode i_forkoff field when switching attribute fork
211  * from local to extent format - we reset it where possible to make space
212  * available for inline data fork extents.
213  */
214 STATIC void
215 xfs_bmap_forkoff_reset(
216 	xfs_inode_t	*ip,
217 	int		whichfork)
218 {
219 	if (whichfork == XFS_ATTR_FORK &&
220 	    ip->i_df.if_format != XFS_DINODE_FMT_DEV &&
221 	    ip->i_df.if_format != XFS_DINODE_FMT_BTREE) {
222 		uint	dfl_forkoff = xfs_default_attroffset(ip) >> 3;
223 
224 		if (dfl_forkoff > ip->i_forkoff)
225 			ip->i_forkoff = dfl_forkoff;
226 	}
227 }
228 
229 static int
230 xfs_bmap_read_buf(
231 	struct xfs_mount	*mp,		/* file system mount point */
232 	struct xfs_trans	*tp,		/* transaction pointer */
233 	xfs_fsblock_t		fsbno,		/* file system block number */
234 	struct xfs_buf		**bpp)		/* buffer for fsbno */
235 {
236 	struct xfs_buf		*bp;		/* return value */
237 	int			error;
238 
239 	if (!xfs_verify_fsbno(mp, fsbno))
240 		return -EFSCORRUPTED;
241 	error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp,
242 			XFS_FSB_TO_DADDR(mp, fsbno), mp->m_bsize, 0, &bp,
243 			&xfs_bmbt_buf_ops);
244 	if (!error) {
245 		xfs_buf_set_ref(bp, XFS_BMAP_BTREE_REF);
246 		*bpp = bp;
247 	}
248 	return error;
249 }
250 
251 #ifdef DEBUG
252 STATIC struct xfs_buf *
253 xfs_bmap_get_bp(
254 	struct xfs_btree_cur	*cur,
255 	xfs_fsblock_t		bno)
256 {
257 	struct xfs_log_item	*lip;
258 	int			i;
259 
260 	if (!cur)
261 		return NULL;
262 
263 	for (i = 0; i < cur->bc_maxlevels; i++) {
264 		if (!cur->bc_levels[i].bp)
265 			break;
266 		if (xfs_buf_daddr(cur->bc_levels[i].bp) == bno)
267 			return cur->bc_levels[i].bp;
268 	}
269 
270 	/* Chase down all the log items to see if the bp is there */
271 	list_for_each_entry(lip, &cur->bc_tp->t_items, li_trans) {
272 		struct xfs_buf_log_item	*bip = (struct xfs_buf_log_item *)lip;
273 
274 		if (bip->bli_item.li_type == XFS_LI_BUF &&
275 		    xfs_buf_daddr(bip->bli_buf) == bno)
276 			return bip->bli_buf;
277 	}
278 
279 	return NULL;
280 }
281 
282 STATIC void
283 xfs_check_block(
284 	struct xfs_btree_block	*block,
285 	xfs_mount_t		*mp,
286 	int			root,
287 	short			sz)
288 {
289 	int			i, j, dmxr;
290 	__be64			*pp, *thispa;	/* pointer to block address */
291 	xfs_bmbt_key_t		*prevp, *keyp;
292 
293 	ASSERT(be16_to_cpu(block->bb_level) > 0);
294 
295 	prevp = NULL;
296 	for( i = 1; i <= xfs_btree_get_numrecs(block); i++) {
297 		dmxr = mp->m_bmap_dmxr[0];
298 		keyp = XFS_BMBT_KEY_ADDR(mp, block, i);
299 
300 		if (prevp) {
301 			ASSERT(be64_to_cpu(prevp->br_startoff) <
302 			       be64_to_cpu(keyp->br_startoff));
303 		}
304 		prevp = keyp;
305 
306 		/*
307 		 * Compare the block numbers to see if there are dups.
308 		 */
309 		if (root)
310 			pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, i, sz);
311 		else
312 			pp = XFS_BMBT_PTR_ADDR(mp, block, i, dmxr);
313 
314 		for (j = i+1; j <= be16_to_cpu(block->bb_numrecs); j++) {
315 			if (root)
316 				thispa = XFS_BMAP_BROOT_PTR_ADDR(mp, block, j, sz);
317 			else
318 				thispa = XFS_BMBT_PTR_ADDR(mp, block, j, dmxr);
319 			if (*thispa == *pp) {
320 				xfs_warn(mp, "%s: thispa(%d) == pp(%d) %lld",
321 					__func__, j, i,
322 					(unsigned long long)be64_to_cpu(*thispa));
323 				xfs_err(mp, "%s: ptrs are equal in node\n",
324 					__func__);
325 				xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
326 			}
327 		}
328 	}
329 }
330 
331 /*
332  * Check that the extents for the inode ip are in the right order in all
333  * btree leaves. THis becomes prohibitively expensive for large extent count
334  * files, so don't bother with inodes that have more than 10,000 extents in
335  * them. The btree record ordering checks will still be done, so for such large
336  * bmapbt constructs that is going to catch most corruptions.
337  */
338 STATIC void
339 xfs_bmap_check_leaf_extents(
340 	struct xfs_btree_cur	*cur,	/* btree cursor or null */
341 	xfs_inode_t		*ip,		/* incore inode pointer */
342 	int			whichfork)	/* data or attr fork */
343 {
344 	struct xfs_mount	*mp = ip->i_mount;
345 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
346 	struct xfs_btree_block	*block;	/* current btree block */
347 	xfs_fsblock_t		bno;	/* block # of "block" */
348 	struct xfs_buf		*bp;	/* buffer for "block" */
349 	int			error;	/* error return value */
350 	xfs_extnum_t		i=0, j;	/* index into the extents list */
351 	int			level;	/* btree level, for checking */
352 	__be64			*pp;	/* pointer to block address */
353 	xfs_bmbt_rec_t		*ep;	/* pointer to current extent */
354 	xfs_bmbt_rec_t		last = {0, 0}; /* last extent in prev block */
355 	xfs_bmbt_rec_t		*nextp;	/* pointer to next extent */
356 	int			bp_release = 0;
357 
358 	if (ifp->if_format != XFS_DINODE_FMT_BTREE)
359 		return;
360 
361 	/* skip large extent count inodes */
362 	if (ip->i_df.if_nextents > 10000)
363 		return;
364 
365 	bno = NULLFSBLOCK;
366 	block = ifp->if_broot;
367 	/*
368 	 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
369 	 */
370 	level = be16_to_cpu(block->bb_level);
371 	ASSERT(level > 0);
372 	xfs_check_block(block, mp, 1, ifp->if_broot_bytes);
373 	pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes);
374 	bno = be64_to_cpu(*pp);
375 
376 	ASSERT(bno != NULLFSBLOCK);
377 	ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount);
378 	ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks);
379 
380 	/*
381 	 * Go down the tree until leaf level is reached, following the first
382 	 * pointer (leftmost) at each level.
383 	 */
384 	while (level-- > 0) {
385 		/* See if buf is in cur first */
386 		bp_release = 0;
387 		bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno));
388 		if (!bp) {
389 			bp_release = 1;
390 			error = xfs_bmap_read_buf(mp, NULL, bno, &bp);
391 			if (xfs_metadata_is_sick(error))
392 				xfs_btree_mark_sick(cur);
393 			if (error)
394 				goto error_norelse;
395 		}
396 		block = XFS_BUF_TO_BLOCK(bp);
397 		if (level == 0)
398 			break;
399 
400 		/*
401 		 * Check this block for basic sanity (increasing keys and
402 		 * no duplicate blocks).
403 		 */
404 
405 		xfs_check_block(block, mp, 0, 0);
406 		pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
407 		bno = be64_to_cpu(*pp);
408 		if (XFS_IS_CORRUPT(mp, !xfs_verify_fsbno(mp, bno))) {
409 			xfs_btree_mark_sick(cur);
410 			error = -EFSCORRUPTED;
411 			goto error0;
412 		}
413 		if (bp_release) {
414 			bp_release = 0;
415 			xfs_trans_brelse(NULL, bp);
416 		}
417 	}
418 
419 	/*
420 	 * Here with bp and block set to the leftmost leaf node in the tree.
421 	 */
422 	i = 0;
423 
424 	/*
425 	 * Loop over all leaf nodes checking that all extents are in the right order.
426 	 */
427 	for (;;) {
428 		xfs_fsblock_t	nextbno;
429 		xfs_extnum_t	num_recs;
430 
431 
432 		num_recs = xfs_btree_get_numrecs(block);
433 
434 		/*
435 		 * Read-ahead the next leaf block, if any.
436 		 */
437 
438 		nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
439 
440 		/*
441 		 * Check all the extents to make sure they are OK.
442 		 * If we had a previous block, the last entry should
443 		 * conform with the first entry in this one.
444 		 */
445 
446 		ep = XFS_BMBT_REC_ADDR(mp, block, 1);
447 		if (i) {
448 			ASSERT(xfs_bmbt_disk_get_startoff(&last) +
449 			       xfs_bmbt_disk_get_blockcount(&last) <=
450 			       xfs_bmbt_disk_get_startoff(ep));
451 		}
452 		for (j = 1; j < num_recs; j++) {
453 			nextp = XFS_BMBT_REC_ADDR(mp, block, j + 1);
454 			ASSERT(xfs_bmbt_disk_get_startoff(ep) +
455 			       xfs_bmbt_disk_get_blockcount(ep) <=
456 			       xfs_bmbt_disk_get_startoff(nextp));
457 			ep = nextp;
458 		}
459 
460 		last = *ep;
461 		i += num_recs;
462 		if (bp_release) {
463 			bp_release = 0;
464 			xfs_trans_brelse(NULL, bp);
465 		}
466 		bno = nextbno;
467 		/*
468 		 * If we've reached the end, stop.
469 		 */
470 		if (bno == NULLFSBLOCK)
471 			break;
472 
473 		bp_release = 0;
474 		bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno));
475 		if (!bp) {
476 			bp_release = 1;
477 			error = xfs_bmap_read_buf(mp, NULL, bno, &bp);
478 			if (xfs_metadata_is_sick(error))
479 				xfs_btree_mark_sick(cur);
480 			if (error)
481 				goto error_norelse;
482 		}
483 		block = XFS_BUF_TO_BLOCK(bp);
484 	}
485 
486 	return;
487 
488 error0:
489 	xfs_warn(mp, "%s: at error0", __func__);
490 	if (bp_release)
491 		xfs_trans_brelse(NULL, bp);
492 error_norelse:
493 	xfs_warn(mp, "%s: BAD after btree leaves for %llu extents",
494 		__func__, i);
495 	xfs_err(mp, "%s: CORRUPTED BTREE OR SOMETHING", __func__);
496 	xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
497 	return;
498 }
499 
500 /*
501  * Validate that the bmbt_irecs being returned from bmapi are valid
502  * given the caller's original parameters.  Specifically check the
503  * ranges of the returned irecs to ensure that they only extend beyond
504  * the given parameters if the XFS_BMAPI_ENTIRE flag was set.
505  */
506 STATIC void
507 xfs_bmap_validate_ret(
508 	xfs_fileoff_t		bno,
509 	xfs_filblks_t		len,
510 	uint32_t		flags,
511 	xfs_bmbt_irec_t		*mval,
512 	int			nmap,
513 	int			ret_nmap)
514 {
515 	int			i;		/* index to map values */
516 
517 	ASSERT(ret_nmap <= nmap);
518 
519 	for (i = 0; i < ret_nmap; i++) {
520 		ASSERT(mval[i].br_blockcount > 0);
521 		if (!(flags & XFS_BMAPI_ENTIRE)) {
522 			ASSERT(mval[i].br_startoff >= bno);
523 			ASSERT(mval[i].br_blockcount <= len);
524 			ASSERT(mval[i].br_startoff + mval[i].br_blockcount <=
525 			       bno + len);
526 		} else {
527 			ASSERT(mval[i].br_startoff < bno + len);
528 			ASSERT(mval[i].br_startoff + mval[i].br_blockcount >
529 			       bno);
530 		}
531 		ASSERT(i == 0 ||
532 		       mval[i - 1].br_startoff + mval[i - 1].br_blockcount ==
533 		       mval[i].br_startoff);
534 		ASSERT(mval[i].br_startblock != DELAYSTARTBLOCK &&
535 		       mval[i].br_startblock != HOLESTARTBLOCK);
536 		ASSERT(mval[i].br_state == XFS_EXT_NORM ||
537 		       mval[i].br_state == XFS_EXT_UNWRITTEN);
538 	}
539 }
540 
541 #else
542 #define xfs_bmap_check_leaf_extents(cur, ip, whichfork)		do { } while (0)
543 #define	xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)	do { } while (0)
544 #endif /* DEBUG */
545 
546 /*
547  * Inode fork format manipulation functions
548  */
549 
550 /*
551  * Convert the inode format to extent format if it currently is in btree format,
552  * but the extent list is small enough that it fits into the extent format.
553  *
554  * Since the extents are already in-core, all we have to do is give up the space
555  * for the btree root and pitch the leaf block.
556  */
557 STATIC int				/* error */
558 xfs_bmap_btree_to_extents(
559 	struct xfs_trans	*tp,	/* transaction pointer */
560 	struct xfs_inode	*ip,	/* incore inode pointer */
561 	struct xfs_btree_cur	*cur,	/* btree cursor */
562 	int			*logflagsp, /* inode logging flags */
563 	int			whichfork)  /* data or attr fork */
564 {
565 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
566 	struct xfs_mount	*mp = ip->i_mount;
567 	struct xfs_btree_block	*rblock = ifp->if_broot;
568 	struct xfs_btree_block	*cblock;/* child btree block */
569 	xfs_fsblock_t		cbno;	/* child block number */
570 	struct xfs_buf		*cbp;	/* child block's buffer */
571 	int			error;	/* error return value */
572 	__be64			*pp;	/* ptr to block address */
573 	struct xfs_owner_info	oinfo;
574 
575 	/* check if we actually need the extent format first: */
576 	if (!xfs_bmap_wants_extents(ip, whichfork))
577 		return 0;
578 
579 	ASSERT(cur);
580 	ASSERT(whichfork != XFS_COW_FORK);
581 	ASSERT(ifp->if_format == XFS_DINODE_FMT_BTREE);
582 	ASSERT(be16_to_cpu(rblock->bb_level) == 1);
583 	ASSERT(be16_to_cpu(rblock->bb_numrecs) == 1);
584 	ASSERT(xfs_bmbt_maxrecs(mp, ifp->if_broot_bytes, 0) == 1);
585 
586 	pp = XFS_BMAP_BROOT_PTR_ADDR(mp, rblock, 1, ifp->if_broot_bytes);
587 	cbno = be64_to_cpu(*pp);
588 #ifdef DEBUG
589 	if (XFS_IS_CORRUPT(cur->bc_mp, !xfs_verify_fsbno(mp, cbno))) {
590 		xfs_btree_mark_sick(cur);
591 		return -EFSCORRUPTED;
592 	}
593 #endif
594 	error = xfs_bmap_read_buf(mp, tp, cbno, &cbp);
595 	if (xfs_metadata_is_sick(error))
596 		xfs_btree_mark_sick(cur);
597 	if (error)
598 		return error;
599 	cblock = XFS_BUF_TO_BLOCK(cbp);
600 	if ((error = xfs_btree_check_block(cur, cblock, 0, cbp)))
601 		return error;
602 
603 	xfs_rmap_ino_bmbt_owner(&oinfo, ip->i_ino, whichfork);
604 	error = xfs_free_extent_later(cur->bc_tp, cbno, 1, &oinfo,
605 			XFS_AG_RESV_NONE, false);
606 	if (error)
607 		return error;
608 
609 	ip->i_nblocks--;
610 	xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L);
611 	xfs_trans_binval(tp, cbp);
612 	if (cur->bc_levels[0].bp == cbp)
613 		cur->bc_levels[0].bp = NULL;
614 	xfs_iroot_realloc(ip, -1, whichfork);
615 	ASSERT(ifp->if_broot == NULL);
616 	ifp->if_format = XFS_DINODE_FMT_EXTENTS;
617 	*logflagsp |= XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
618 	return 0;
619 }
620 
621 /*
622  * Convert an extents-format file into a btree-format file.
623  * The new file will have a root block (in the inode) and a single child block.
624  */
625 STATIC int					/* error */
626 xfs_bmap_extents_to_btree(
627 	struct xfs_trans	*tp,		/* transaction pointer */
628 	struct xfs_inode	*ip,		/* incore inode pointer */
629 	struct xfs_btree_cur	**curp,		/* cursor returned to caller */
630 	int			wasdel,		/* converting a delayed alloc */
631 	int			*logflagsp,	/* inode logging flags */
632 	int			whichfork)	/* data or attr fork */
633 {
634 	struct xfs_btree_block	*ablock;	/* allocated (child) bt block */
635 	struct xfs_buf		*abp;		/* buffer for ablock */
636 	struct xfs_alloc_arg	args;		/* allocation arguments */
637 	struct xfs_bmbt_rec	*arp;		/* child record pointer */
638 	struct xfs_btree_block	*block;		/* btree root block */
639 	struct xfs_btree_cur	*cur;		/* bmap btree cursor */
640 	int			error;		/* error return value */
641 	struct xfs_ifork	*ifp;		/* inode fork pointer */
642 	struct xfs_bmbt_key	*kp;		/* root block key pointer */
643 	struct xfs_mount	*mp;		/* mount structure */
644 	xfs_bmbt_ptr_t		*pp;		/* root block address pointer */
645 	struct xfs_iext_cursor	icur;
646 	struct xfs_bmbt_irec	rec;
647 	xfs_extnum_t		cnt = 0;
648 
649 	mp = ip->i_mount;
650 	ASSERT(whichfork != XFS_COW_FORK);
651 	ifp = xfs_ifork_ptr(ip, whichfork);
652 	ASSERT(ifp->if_format == XFS_DINODE_FMT_EXTENTS);
653 
654 	/*
655 	 * Make space in the inode incore. This needs to be undone if we fail
656 	 * to expand the root.
657 	 */
658 	xfs_iroot_realloc(ip, 1, whichfork);
659 
660 	/*
661 	 * Fill in the root.
662 	 */
663 	block = ifp->if_broot;
664 	xfs_bmbt_init_block(ip, block, NULL, 1, 1);
665 	/*
666 	 * Need a cursor.  Can't allocate until bb_level is filled in.
667 	 */
668 	cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
669 	if (wasdel)
670 		cur->bc_flags |= XFS_BTREE_BMBT_WASDEL;
671 	/*
672 	 * Convert to a btree with two levels, one record in root.
673 	 */
674 	ifp->if_format = XFS_DINODE_FMT_BTREE;
675 	memset(&args, 0, sizeof(args));
676 	args.tp = tp;
677 	args.mp = mp;
678 	xfs_rmap_ino_bmbt_owner(&args.oinfo, ip->i_ino, whichfork);
679 
680 	args.minlen = args.maxlen = args.prod = 1;
681 	args.wasdel = wasdel;
682 	*logflagsp = 0;
683 	error = xfs_alloc_vextent_start_ag(&args,
684 				XFS_INO_TO_FSB(mp, ip->i_ino));
685 	if (error)
686 		goto out_root_realloc;
687 
688 	/*
689 	 * Allocation can't fail, the space was reserved.
690 	 */
691 	if (WARN_ON_ONCE(args.fsbno == NULLFSBLOCK)) {
692 		error = -ENOSPC;
693 		goto out_root_realloc;
694 	}
695 
696 	cur->bc_bmap.allocated++;
697 	ip->i_nblocks++;
698 	xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, 1L);
699 	error = xfs_trans_get_buf(tp, mp->m_ddev_targp,
700 			XFS_FSB_TO_DADDR(mp, args.fsbno),
701 			mp->m_bsize, 0, &abp);
702 	if (error)
703 		goto out_unreserve_dquot;
704 
705 	/*
706 	 * Fill in the child block.
707 	 */
708 	ablock = XFS_BUF_TO_BLOCK(abp);
709 	xfs_bmbt_init_block(ip, ablock, abp, 0, 0);
710 
711 	for_each_xfs_iext(ifp, &icur, &rec) {
712 		if (isnullstartblock(rec.br_startblock))
713 			continue;
714 		arp = XFS_BMBT_REC_ADDR(mp, ablock, 1 + cnt);
715 		xfs_bmbt_disk_set_all(arp, &rec);
716 		cnt++;
717 	}
718 	ASSERT(cnt == ifp->if_nextents);
719 	xfs_btree_set_numrecs(ablock, cnt);
720 
721 	/*
722 	 * Fill in the root key and pointer.
723 	 */
724 	kp = XFS_BMBT_KEY_ADDR(mp, block, 1);
725 	arp = XFS_BMBT_REC_ADDR(mp, ablock, 1);
726 	kp->br_startoff = cpu_to_be64(xfs_bmbt_disk_get_startoff(arp));
727 	pp = XFS_BMBT_PTR_ADDR(mp, block, 1, xfs_bmbt_get_maxrecs(cur,
728 						be16_to_cpu(block->bb_level)));
729 	*pp = cpu_to_be64(args.fsbno);
730 
731 	/*
732 	 * Do all this logging at the end so that
733 	 * the root is at the right level.
734 	 */
735 	xfs_btree_log_block(cur, abp, XFS_BB_ALL_BITS);
736 	xfs_btree_log_recs(cur, abp, 1, be16_to_cpu(ablock->bb_numrecs));
737 	ASSERT(*curp == NULL);
738 	*curp = cur;
739 	*logflagsp = XFS_ILOG_CORE | xfs_ilog_fbroot(whichfork);
740 	return 0;
741 
742 out_unreserve_dquot:
743 	xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L);
744 out_root_realloc:
745 	xfs_iroot_realloc(ip, -1, whichfork);
746 	ifp->if_format = XFS_DINODE_FMT_EXTENTS;
747 	ASSERT(ifp->if_broot == NULL);
748 	xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
749 
750 	return error;
751 }
752 
753 /*
754  * Convert a local file to an extents file.
755  * This code is out of bounds for data forks of regular files,
756  * since the file data needs to get logged so things will stay consistent.
757  * (The bmap-level manipulations are ok, though).
758  */
759 void
760 xfs_bmap_local_to_extents_empty(
761 	struct xfs_trans	*tp,
762 	struct xfs_inode	*ip,
763 	int			whichfork)
764 {
765 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
766 
767 	ASSERT(whichfork != XFS_COW_FORK);
768 	ASSERT(ifp->if_format == XFS_DINODE_FMT_LOCAL);
769 	ASSERT(ifp->if_bytes == 0);
770 	ASSERT(ifp->if_nextents == 0);
771 
772 	xfs_bmap_forkoff_reset(ip, whichfork);
773 	ifp->if_data = NULL;
774 	ifp->if_height = 0;
775 	ifp->if_format = XFS_DINODE_FMT_EXTENTS;
776 	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
777 }
778 
779 
780 STATIC int				/* error */
781 xfs_bmap_local_to_extents(
782 	xfs_trans_t	*tp,		/* transaction pointer */
783 	xfs_inode_t	*ip,		/* incore inode pointer */
784 	xfs_extlen_t	total,		/* total blocks needed by transaction */
785 	int		*logflagsp,	/* inode logging flags */
786 	int		whichfork,
787 	void		(*init_fn)(struct xfs_trans *tp,
788 				   struct xfs_buf *bp,
789 				   struct xfs_inode *ip,
790 				   struct xfs_ifork *ifp))
791 {
792 	int		error = 0;
793 	int		flags;		/* logging flags returned */
794 	struct xfs_ifork *ifp;		/* inode fork pointer */
795 	xfs_alloc_arg_t	args;		/* allocation arguments */
796 	struct xfs_buf	*bp;		/* buffer for extent block */
797 	struct xfs_bmbt_irec rec;
798 	struct xfs_iext_cursor icur;
799 
800 	/*
801 	 * We don't want to deal with the case of keeping inode data inline yet.
802 	 * So sending the data fork of a regular inode is invalid.
803 	 */
804 	ASSERT(!(S_ISREG(VFS_I(ip)->i_mode) && whichfork == XFS_DATA_FORK));
805 	ifp = xfs_ifork_ptr(ip, whichfork);
806 	ASSERT(ifp->if_format == XFS_DINODE_FMT_LOCAL);
807 
808 	if (!ifp->if_bytes) {
809 		xfs_bmap_local_to_extents_empty(tp, ip, whichfork);
810 		flags = XFS_ILOG_CORE;
811 		goto done;
812 	}
813 
814 	flags = 0;
815 	error = 0;
816 	memset(&args, 0, sizeof(args));
817 	args.tp = tp;
818 	args.mp = ip->i_mount;
819 	args.total = total;
820 	args.minlen = args.maxlen = args.prod = 1;
821 	xfs_rmap_ino_owner(&args.oinfo, ip->i_ino, whichfork, 0);
822 
823 	/*
824 	 * Allocate a block.  We know we need only one, since the
825 	 * file currently fits in an inode.
826 	 */
827 	args.total = total;
828 	args.minlen = args.maxlen = args.prod = 1;
829 	error = xfs_alloc_vextent_start_ag(&args,
830 			XFS_INO_TO_FSB(args.mp, ip->i_ino));
831 	if (error)
832 		goto done;
833 
834 	/* Can't fail, the space was reserved. */
835 	ASSERT(args.fsbno != NULLFSBLOCK);
836 	ASSERT(args.len == 1);
837 	error = xfs_trans_get_buf(tp, args.mp->m_ddev_targp,
838 			XFS_FSB_TO_DADDR(args.mp, args.fsbno),
839 			args.mp->m_bsize, 0, &bp);
840 	if (error)
841 		goto done;
842 
843 	/*
844 	 * Initialize the block, copy the data and log the remote buffer.
845 	 *
846 	 * The callout is responsible for logging because the remote format
847 	 * might differ from the local format and thus we don't know how much to
848 	 * log here. Note that init_fn must also set the buffer log item type
849 	 * correctly.
850 	 */
851 	init_fn(tp, bp, ip, ifp);
852 
853 	/* account for the change in fork size */
854 	xfs_idata_realloc(ip, -ifp->if_bytes, whichfork);
855 	xfs_bmap_local_to_extents_empty(tp, ip, whichfork);
856 	flags |= XFS_ILOG_CORE;
857 
858 	ifp->if_data = NULL;
859 	ifp->if_height = 0;
860 
861 	rec.br_startoff = 0;
862 	rec.br_startblock = args.fsbno;
863 	rec.br_blockcount = 1;
864 	rec.br_state = XFS_EXT_NORM;
865 	xfs_iext_first(ifp, &icur);
866 	xfs_iext_insert(ip, &icur, &rec, 0);
867 
868 	ifp->if_nextents = 1;
869 	ip->i_nblocks = 1;
870 	xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, 1L);
871 	flags |= xfs_ilog_fext(whichfork);
872 
873 done:
874 	*logflagsp = flags;
875 	return error;
876 }
877 
878 /*
879  * Called from xfs_bmap_add_attrfork to handle btree format files.
880  */
881 STATIC int					/* error */
882 xfs_bmap_add_attrfork_btree(
883 	xfs_trans_t		*tp,		/* transaction pointer */
884 	xfs_inode_t		*ip,		/* incore inode pointer */
885 	int			*flags)		/* inode logging flags */
886 {
887 	struct xfs_btree_block	*block = ip->i_df.if_broot;
888 	struct xfs_btree_cur	*cur;		/* btree cursor */
889 	int			error;		/* error return value */
890 	xfs_mount_t		*mp;		/* file system mount struct */
891 	int			stat;		/* newroot status */
892 
893 	mp = ip->i_mount;
894 
895 	if (XFS_BMAP_BMDR_SPACE(block) <= xfs_inode_data_fork_size(ip))
896 		*flags |= XFS_ILOG_DBROOT;
897 	else {
898 		cur = xfs_bmbt_init_cursor(mp, tp, ip, XFS_DATA_FORK);
899 		error = xfs_bmbt_lookup_first(cur, &stat);
900 		if (error)
901 			goto error0;
902 		/* must be at least one entry */
903 		if (XFS_IS_CORRUPT(mp, stat != 1)) {
904 			xfs_btree_mark_sick(cur);
905 			error = -EFSCORRUPTED;
906 			goto error0;
907 		}
908 		if ((error = xfs_btree_new_iroot(cur, flags, &stat)))
909 			goto error0;
910 		if (stat == 0) {
911 			xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
912 			return -ENOSPC;
913 		}
914 		cur->bc_bmap.allocated = 0;
915 		xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
916 	}
917 	return 0;
918 error0:
919 	xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
920 	return error;
921 }
922 
923 /*
924  * Called from xfs_bmap_add_attrfork to handle extents format files.
925  */
926 STATIC int					/* error */
927 xfs_bmap_add_attrfork_extents(
928 	struct xfs_trans	*tp,		/* transaction pointer */
929 	struct xfs_inode	*ip,		/* incore inode pointer */
930 	int			*flags)		/* inode logging flags */
931 {
932 	struct xfs_btree_cur	*cur;		/* bmap btree cursor */
933 	int			error;		/* error return value */
934 
935 	if (ip->i_df.if_nextents * sizeof(struct xfs_bmbt_rec) <=
936 	    xfs_inode_data_fork_size(ip))
937 		return 0;
938 	cur = NULL;
939 	error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0, flags,
940 					  XFS_DATA_FORK);
941 	if (cur) {
942 		cur->bc_bmap.allocated = 0;
943 		xfs_btree_del_cursor(cur, error);
944 	}
945 	return error;
946 }
947 
948 /*
949  * Called from xfs_bmap_add_attrfork to handle local format files. Each
950  * different data fork content type needs a different callout to do the
951  * conversion. Some are basic and only require special block initialisation
952  * callouts for the data formating, others (directories) are so specialised they
953  * handle everything themselves.
954  *
955  * XXX (dgc): investigate whether directory conversion can use the generic
956  * formatting callout. It should be possible - it's just a very complex
957  * formatter.
958  */
959 STATIC int					/* error */
960 xfs_bmap_add_attrfork_local(
961 	struct xfs_trans	*tp,		/* transaction pointer */
962 	struct xfs_inode	*ip,		/* incore inode pointer */
963 	int			*flags)		/* inode logging flags */
964 {
965 	struct xfs_da_args	dargs;		/* args for dir/attr code */
966 
967 	if (ip->i_df.if_bytes <= xfs_inode_data_fork_size(ip))
968 		return 0;
969 
970 	if (S_ISDIR(VFS_I(ip)->i_mode)) {
971 		memset(&dargs, 0, sizeof(dargs));
972 		dargs.geo = ip->i_mount->m_dir_geo;
973 		dargs.dp = ip;
974 		dargs.total = dargs.geo->fsbcount;
975 		dargs.whichfork = XFS_DATA_FORK;
976 		dargs.trans = tp;
977 		return xfs_dir2_sf_to_block(&dargs);
978 	}
979 
980 	if (S_ISLNK(VFS_I(ip)->i_mode))
981 		return xfs_bmap_local_to_extents(tp, ip, 1, flags,
982 						 XFS_DATA_FORK,
983 						 xfs_symlink_local_to_remote);
984 
985 	/* should only be called for types that support local format data */
986 	ASSERT(0);
987 	xfs_bmap_mark_sick(ip, XFS_ATTR_FORK);
988 	return -EFSCORRUPTED;
989 }
990 
991 /*
992  * Set an inode attr fork offset based on the format of the data fork.
993  */
994 static int
995 xfs_bmap_set_attrforkoff(
996 	struct xfs_inode	*ip,
997 	int			size,
998 	int			*version)
999 {
1000 	int			default_size = xfs_default_attroffset(ip) >> 3;
1001 
1002 	switch (ip->i_df.if_format) {
1003 	case XFS_DINODE_FMT_DEV:
1004 		ip->i_forkoff = default_size;
1005 		break;
1006 	case XFS_DINODE_FMT_LOCAL:
1007 	case XFS_DINODE_FMT_EXTENTS:
1008 	case XFS_DINODE_FMT_BTREE:
1009 		ip->i_forkoff = xfs_attr_shortform_bytesfit(ip, size);
1010 		if (!ip->i_forkoff)
1011 			ip->i_forkoff = default_size;
1012 		else if (xfs_has_attr2(ip->i_mount) && version)
1013 			*version = 2;
1014 		break;
1015 	default:
1016 		ASSERT(0);
1017 		return -EINVAL;
1018 	}
1019 
1020 	return 0;
1021 }
1022 
1023 /*
1024  * Convert inode from non-attributed to attributed.
1025  * Must not be in a transaction, ip must not be locked.
1026  */
1027 int						/* error code */
1028 xfs_bmap_add_attrfork(
1029 	xfs_inode_t		*ip,		/* incore inode pointer */
1030 	int			size,		/* space new attribute needs */
1031 	int			rsvd)		/* xact may use reserved blks */
1032 {
1033 	xfs_mount_t		*mp;		/* mount structure */
1034 	xfs_trans_t		*tp;		/* transaction pointer */
1035 	int			blks;		/* space reservation */
1036 	int			version = 1;	/* superblock attr version */
1037 	int			logflags;	/* logging flags */
1038 	int			error;		/* error return value */
1039 
1040 	ASSERT(xfs_inode_has_attr_fork(ip) == 0);
1041 
1042 	mp = ip->i_mount;
1043 	ASSERT(!XFS_NOT_DQATTACHED(mp, ip));
1044 
1045 	blks = XFS_ADDAFORK_SPACE_RES(mp);
1046 
1047 	error = xfs_trans_alloc_inode(ip, &M_RES(mp)->tr_addafork, blks, 0,
1048 			rsvd, &tp);
1049 	if (error)
1050 		return error;
1051 	if (xfs_inode_has_attr_fork(ip))
1052 		goto trans_cancel;
1053 
1054 	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1055 	error = xfs_bmap_set_attrforkoff(ip, size, &version);
1056 	if (error)
1057 		goto trans_cancel;
1058 
1059 	xfs_ifork_init_attr(ip, XFS_DINODE_FMT_EXTENTS, 0);
1060 	logflags = 0;
1061 	switch (ip->i_df.if_format) {
1062 	case XFS_DINODE_FMT_LOCAL:
1063 		error = xfs_bmap_add_attrfork_local(tp, ip, &logflags);
1064 		break;
1065 	case XFS_DINODE_FMT_EXTENTS:
1066 		error = xfs_bmap_add_attrfork_extents(tp, ip, &logflags);
1067 		break;
1068 	case XFS_DINODE_FMT_BTREE:
1069 		error = xfs_bmap_add_attrfork_btree(tp, ip, &logflags);
1070 		break;
1071 	default:
1072 		error = 0;
1073 		break;
1074 	}
1075 	if (logflags)
1076 		xfs_trans_log_inode(tp, ip, logflags);
1077 	if (error)
1078 		goto trans_cancel;
1079 	if (!xfs_has_attr(mp) ||
1080 	   (!xfs_has_attr2(mp) && version == 2)) {
1081 		bool log_sb = false;
1082 
1083 		spin_lock(&mp->m_sb_lock);
1084 		if (!xfs_has_attr(mp)) {
1085 			xfs_add_attr(mp);
1086 			log_sb = true;
1087 		}
1088 		if (!xfs_has_attr2(mp) && version == 2) {
1089 			xfs_add_attr2(mp);
1090 			log_sb = true;
1091 		}
1092 		spin_unlock(&mp->m_sb_lock);
1093 		if (log_sb)
1094 			xfs_log_sb(tp);
1095 	}
1096 
1097 	error = xfs_trans_commit(tp);
1098 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1099 	return error;
1100 
1101 trans_cancel:
1102 	xfs_trans_cancel(tp);
1103 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1104 	return error;
1105 }
1106 
1107 /*
1108  * Internal and external extent tree search functions.
1109  */
1110 
1111 struct xfs_iread_state {
1112 	struct xfs_iext_cursor	icur;
1113 	xfs_extnum_t		loaded;
1114 };
1115 
1116 int
1117 xfs_bmap_complain_bad_rec(
1118 	struct xfs_inode		*ip,
1119 	int				whichfork,
1120 	xfs_failaddr_t			fa,
1121 	const struct xfs_bmbt_irec	*irec)
1122 {
1123 	struct xfs_mount		*mp = ip->i_mount;
1124 	const char			*forkname;
1125 
1126 	switch (whichfork) {
1127 	case XFS_DATA_FORK:	forkname = "data"; break;
1128 	case XFS_ATTR_FORK:	forkname = "attr"; break;
1129 	case XFS_COW_FORK:	forkname = "CoW"; break;
1130 	default:		forkname = "???"; break;
1131 	}
1132 
1133 	xfs_warn(mp,
1134  "Bmap BTree record corruption in inode 0x%llx %s fork detected at %pS!",
1135 				ip->i_ino, forkname, fa);
1136 	xfs_warn(mp,
1137 		"Offset 0x%llx, start block 0x%llx, block count 0x%llx state 0x%x",
1138 		irec->br_startoff, irec->br_startblock, irec->br_blockcount,
1139 		irec->br_state);
1140 
1141 	return -EFSCORRUPTED;
1142 }
1143 
1144 /* Stuff every bmbt record from this block into the incore extent map. */
1145 static int
1146 xfs_iread_bmbt_block(
1147 	struct xfs_btree_cur	*cur,
1148 	int			level,
1149 	void			*priv)
1150 {
1151 	struct xfs_iread_state	*ir = priv;
1152 	struct xfs_mount	*mp = cur->bc_mp;
1153 	struct xfs_inode	*ip = cur->bc_ino.ip;
1154 	struct xfs_btree_block	*block;
1155 	struct xfs_buf		*bp;
1156 	struct xfs_bmbt_rec	*frp;
1157 	xfs_extnum_t		num_recs;
1158 	xfs_extnum_t		j;
1159 	int			whichfork = cur->bc_ino.whichfork;
1160 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
1161 
1162 	block = xfs_btree_get_block(cur, level, &bp);
1163 
1164 	/* Abort if we find more records than nextents. */
1165 	num_recs = xfs_btree_get_numrecs(block);
1166 	if (unlikely(ir->loaded + num_recs > ifp->if_nextents)) {
1167 		xfs_warn(ip->i_mount, "corrupt dinode %llu, (btree extents).",
1168 				(unsigned long long)ip->i_ino);
1169 		xfs_inode_verifier_error(ip, -EFSCORRUPTED, __func__, block,
1170 				sizeof(*block), __this_address);
1171 		xfs_bmap_mark_sick(ip, whichfork);
1172 		return -EFSCORRUPTED;
1173 	}
1174 
1175 	/* Copy records into the incore cache. */
1176 	frp = XFS_BMBT_REC_ADDR(mp, block, 1);
1177 	for (j = 0; j < num_recs; j++, frp++, ir->loaded++) {
1178 		struct xfs_bmbt_irec	new;
1179 		xfs_failaddr_t		fa;
1180 
1181 		xfs_bmbt_disk_get_all(frp, &new);
1182 		fa = xfs_bmap_validate_extent(ip, whichfork, &new);
1183 		if (fa) {
1184 			xfs_inode_verifier_error(ip, -EFSCORRUPTED,
1185 					"xfs_iread_extents(2)", frp,
1186 					sizeof(*frp), fa);
1187 			xfs_bmap_mark_sick(ip, whichfork);
1188 			return xfs_bmap_complain_bad_rec(ip, whichfork, fa,
1189 					&new);
1190 		}
1191 		xfs_iext_insert(ip, &ir->icur, &new,
1192 				xfs_bmap_fork_to_state(whichfork));
1193 		trace_xfs_read_extent(ip, &ir->icur,
1194 				xfs_bmap_fork_to_state(whichfork), _THIS_IP_);
1195 		xfs_iext_next(ifp, &ir->icur);
1196 	}
1197 
1198 	return 0;
1199 }
1200 
1201 /*
1202  * Read in extents from a btree-format inode.
1203  */
1204 int
1205 xfs_iread_extents(
1206 	struct xfs_trans	*tp,
1207 	struct xfs_inode	*ip,
1208 	int			whichfork)
1209 {
1210 	struct xfs_iread_state	ir;
1211 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
1212 	struct xfs_mount	*mp = ip->i_mount;
1213 	struct xfs_btree_cur	*cur;
1214 	int			error;
1215 
1216 	if (!xfs_need_iread_extents(ifp))
1217 		return 0;
1218 
1219 	xfs_assert_ilocked(ip, XFS_ILOCK_EXCL);
1220 
1221 	ir.loaded = 0;
1222 	xfs_iext_first(ifp, &ir.icur);
1223 	cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
1224 	error = xfs_btree_visit_blocks(cur, xfs_iread_bmbt_block,
1225 			XFS_BTREE_VISIT_RECORDS, &ir);
1226 	xfs_btree_del_cursor(cur, error);
1227 	if (error)
1228 		goto out;
1229 
1230 	if (XFS_IS_CORRUPT(mp, ir.loaded != ifp->if_nextents)) {
1231 		xfs_bmap_mark_sick(ip, whichfork);
1232 		error = -EFSCORRUPTED;
1233 		goto out;
1234 	}
1235 	ASSERT(ir.loaded == xfs_iext_count(ifp));
1236 	/*
1237 	 * Use release semantics so that we can use acquire semantics in
1238 	 * xfs_need_iread_extents and be guaranteed to see a valid mapping tree
1239 	 * after that load.
1240 	 */
1241 	smp_store_release(&ifp->if_needextents, 0);
1242 	return 0;
1243 out:
1244 	if (xfs_metadata_is_sick(error))
1245 		xfs_bmap_mark_sick(ip, whichfork);
1246 	xfs_iext_destroy(ifp);
1247 	return error;
1248 }
1249 
1250 /*
1251  * Returns the relative block number of the first unused block(s) in the given
1252  * fork with at least "len" logically contiguous blocks free.  This is the
1253  * lowest-address hole if the fork has holes, else the first block past the end
1254  * of fork.  Return 0 if the fork is currently local (in-inode).
1255  */
1256 int						/* error */
1257 xfs_bmap_first_unused(
1258 	struct xfs_trans	*tp,		/* transaction pointer */
1259 	struct xfs_inode	*ip,		/* incore inode */
1260 	xfs_extlen_t		len,		/* size of hole to find */
1261 	xfs_fileoff_t		*first_unused,	/* unused block */
1262 	int			whichfork)	/* data or attr fork */
1263 {
1264 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
1265 	struct xfs_bmbt_irec	got;
1266 	struct xfs_iext_cursor	icur;
1267 	xfs_fileoff_t		lastaddr = 0;
1268 	xfs_fileoff_t		lowest, max;
1269 	int			error;
1270 
1271 	if (ifp->if_format == XFS_DINODE_FMT_LOCAL) {
1272 		*first_unused = 0;
1273 		return 0;
1274 	}
1275 
1276 	ASSERT(xfs_ifork_has_extents(ifp));
1277 
1278 	error = xfs_iread_extents(tp, ip, whichfork);
1279 	if (error)
1280 		return error;
1281 
1282 	lowest = max = *first_unused;
1283 	for_each_xfs_iext(ifp, &icur, &got) {
1284 		/*
1285 		 * See if the hole before this extent will work.
1286 		 */
1287 		if (got.br_startoff >= lowest + len &&
1288 		    got.br_startoff - max >= len)
1289 			break;
1290 		lastaddr = got.br_startoff + got.br_blockcount;
1291 		max = XFS_FILEOFF_MAX(lastaddr, lowest);
1292 	}
1293 
1294 	*first_unused = max;
1295 	return 0;
1296 }
1297 
1298 /*
1299  * Returns the file-relative block number of the last block - 1 before
1300  * last_block (input value) in the file.
1301  * This is not based on i_size, it is based on the extent records.
1302  * Returns 0 for local files, as they do not have extent records.
1303  */
1304 int						/* error */
1305 xfs_bmap_last_before(
1306 	struct xfs_trans	*tp,		/* transaction pointer */
1307 	struct xfs_inode	*ip,		/* incore inode */
1308 	xfs_fileoff_t		*last_block,	/* last block */
1309 	int			whichfork)	/* data or attr fork */
1310 {
1311 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
1312 	struct xfs_bmbt_irec	got;
1313 	struct xfs_iext_cursor	icur;
1314 	int			error;
1315 
1316 	switch (ifp->if_format) {
1317 	case XFS_DINODE_FMT_LOCAL:
1318 		*last_block = 0;
1319 		return 0;
1320 	case XFS_DINODE_FMT_BTREE:
1321 	case XFS_DINODE_FMT_EXTENTS:
1322 		break;
1323 	default:
1324 		ASSERT(0);
1325 		xfs_bmap_mark_sick(ip, whichfork);
1326 		return -EFSCORRUPTED;
1327 	}
1328 
1329 	error = xfs_iread_extents(tp, ip, whichfork);
1330 	if (error)
1331 		return error;
1332 
1333 	if (!xfs_iext_lookup_extent_before(ip, ifp, last_block, &icur, &got))
1334 		*last_block = 0;
1335 	return 0;
1336 }
1337 
1338 int
1339 xfs_bmap_last_extent(
1340 	struct xfs_trans	*tp,
1341 	struct xfs_inode	*ip,
1342 	int			whichfork,
1343 	struct xfs_bmbt_irec	*rec,
1344 	int			*is_empty)
1345 {
1346 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
1347 	struct xfs_iext_cursor	icur;
1348 	int			error;
1349 
1350 	error = xfs_iread_extents(tp, ip, whichfork);
1351 	if (error)
1352 		return error;
1353 
1354 	xfs_iext_last(ifp, &icur);
1355 	if (!xfs_iext_get_extent(ifp, &icur, rec))
1356 		*is_empty = 1;
1357 	else
1358 		*is_empty = 0;
1359 	return 0;
1360 }
1361 
1362 /*
1363  * Check the last inode extent to determine whether this allocation will result
1364  * in blocks being allocated at the end of the file. When we allocate new data
1365  * blocks at the end of the file which do not start at the previous data block,
1366  * we will try to align the new blocks at stripe unit boundaries.
1367  *
1368  * Returns 1 in bma->aeof if the file (fork) is empty as any new write will be
1369  * at, or past the EOF.
1370  */
1371 STATIC int
1372 xfs_bmap_isaeof(
1373 	struct xfs_bmalloca	*bma,
1374 	int			whichfork)
1375 {
1376 	struct xfs_bmbt_irec	rec;
1377 	int			is_empty;
1378 	int			error;
1379 
1380 	bma->aeof = false;
1381 	error = xfs_bmap_last_extent(NULL, bma->ip, whichfork, &rec,
1382 				     &is_empty);
1383 	if (error)
1384 		return error;
1385 
1386 	if (is_empty) {
1387 		bma->aeof = true;
1388 		return 0;
1389 	}
1390 
1391 	/*
1392 	 * Check if we are allocation or past the last extent, or at least into
1393 	 * the last delayed allocated extent.
1394 	 */
1395 	bma->aeof = bma->offset >= rec.br_startoff + rec.br_blockcount ||
1396 		(bma->offset >= rec.br_startoff &&
1397 		 isnullstartblock(rec.br_startblock));
1398 	return 0;
1399 }
1400 
1401 /*
1402  * Returns the file-relative block number of the first block past eof in
1403  * the file.  This is not based on i_size, it is based on the extent records.
1404  * Returns 0 for local files, as they do not have extent records.
1405  */
1406 int
1407 xfs_bmap_last_offset(
1408 	struct xfs_inode	*ip,
1409 	xfs_fileoff_t		*last_block,
1410 	int			whichfork)
1411 {
1412 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
1413 	struct xfs_bmbt_irec	rec;
1414 	int			is_empty;
1415 	int			error;
1416 
1417 	*last_block = 0;
1418 
1419 	if (ifp->if_format == XFS_DINODE_FMT_LOCAL)
1420 		return 0;
1421 
1422 	if (XFS_IS_CORRUPT(ip->i_mount, !xfs_ifork_has_extents(ifp))) {
1423 		xfs_bmap_mark_sick(ip, whichfork);
1424 		return -EFSCORRUPTED;
1425 	}
1426 
1427 	error = xfs_bmap_last_extent(NULL, ip, whichfork, &rec, &is_empty);
1428 	if (error || is_empty)
1429 		return error;
1430 
1431 	*last_block = rec.br_startoff + rec.br_blockcount;
1432 	return 0;
1433 }
1434 
1435 /*
1436  * Extent tree manipulation functions used during allocation.
1437  */
1438 
1439 /*
1440  * Convert a delayed allocation to a real allocation.
1441  */
1442 STATIC int				/* error */
1443 xfs_bmap_add_extent_delay_real(
1444 	struct xfs_bmalloca	*bma,
1445 	int			whichfork)
1446 {
1447 	struct xfs_mount	*mp = bma->ip->i_mount;
1448 	struct xfs_ifork	*ifp = xfs_ifork_ptr(bma->ip, whichfork);
1449 	struct xfs_bmbt_irec	*new = &bma->got;
1450 	int			error;	/* error return value */
1451 	int			i;	/* temp state */
1452 	xfs_fileoff_t		new_endoff;	/* end offset of new entry */
1453 	xfs_bmbt_irec_t		r[3];	/* neighbor extent entries */
1454 					/* left is 0, right is 1, prev is 2 */
1455 	int			rval=0;	/* return value (logging flags) */
1456 	uint32_t		state = xfs_bmap_fork_to_state(whichfork);
1457 	xfs_filblks_t		da_new; /* new count del alloc blocks used */
1458 	xfs_filblks_t		da_old; /* old count del alloc blocks used */
1459 	xfs_filblks_t		temp=0;	/* value for da_new calculations */
1460 	int			tmp_rval;	/* partial logging flags */
1461 	struct xfs_bmbt_irec	old;
1462 
1463 	ASSERT(whichfork != XFS_ATTR_FORK);
1464 	ASSERT(!isnullstartblock(new->br_startblock));
1465 	ASSERT(!bma->cur || (bma->cur->bc_flags & XFS_BTREE_BMBT_WASDEL));
1466 
1467 	XFS_STATS_INC(mp, xs_add_exlist);
1468 
1469 #define	LEFT		r[0]
1470 #define	RIGHT		r[1]
1471 #define	PREV		r[2]
1472 
1473 	/*
1474 	 * Set up a bunch of variables to make the tests simpler.
1475 	 */
1476 	xfs_iext_get_extent(ifp, &bma->icur, &PREV);
1477 	new_endoff = new->br_startoff + new->br_blockcount;
1478 	ASSERT(isnullstartblock(PREV.br_startblock));
1479 	ASSERT(PREV.br_startoff <= new->br_startoff);
1480 	ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff);
1481 
1482 	da_old = startblockval(PREV.br_startblock);
1483 	da_new = 0;
1484 
1485 	/*
1486 	 * Set flags determining what part of the previous delayed allocation
1487 	 * extent is being replaced by a real allocation.
1488 	 */
1489 	if (PREV.br_startoff == new->br_startoff)
1490 		state |= BMAP_LEFT_FILLING;
1491 	if (PREV.br_startoff + PREV.br_blockcount == new_endoff)
1492 		state |= BMAP_RIGHT_FILLING;
1493 
1494 	/*
1495 	 * Check and set flags if this segment has a left neighbor.
1496 	 * Don't set contiguous if the combined extent would be too large.
1497 	 */
1498 	if (xfs_iext_peek_prev_extent(ifp, &bma->icur, &LEFT)) {
1499 		state |= BMAP_LEFT_VALID;
1500 		if (isnullstartblock(LEFT.br_startblock))
1501 			state |= BMAP_LEFT_DELAY;
1502 	}
1503 
1504 	if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
1505 	    LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff &&
1506 	    LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock &&
1507 	    LEFT.br_state == new->br_state &&
1508 	    LEFT.br_blockcount + new->br_blockcount <= XFS_MAX_BMBT_EXTLEN)
1509 		state |= BMAP_LEFT_CONTIG;
1510 
1511 	/*
1512 	 * Check and set flags if this segment has a right neighbor.
1513 	 * Don't set contiguous if the combined extent would be too large.
1514 	 * Also check for all-three-contiguous being too large.
1515 	 */
1516 	if (xfs_iext_peek_next_extent(ifp, &bma->icur, &RIGHT)) {
1517 		state |= BMAP_RIGHT_VALID;
1518 		if (isnullstartblock(RIGHT.br_startblock))
1519 			state |= BMAP_RIGHT_DELAY;
1520 	}
1521 
1522 	if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
1523 	    new_endoff == RIGHT.br_startoff &&
1524 	    new->br_startblock + new->br_blockcount == RIGHT.br_startblock &&
1525 	    new->br_state == RIGHT.br_state &&
1526 	    new->br_blockcount + RIGHT.br_blockcount <= XFS_MAX_BMBT_EXTLEN &&
1527 	    ((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
1528 		       BMAP_RIGHT_FILLING)) !=
1529 		      (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
1530 		       BMAP_RIGHT_FILLING) ||
1531 	     LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount
1532 			<= XFS_MAX_BMBT_EXTLEN))
1533 		state |= BMAP_RIGHT_CONTIG;
1534 
1535 	error = 0;
1536 	/*
1537 	 * Switch out based on the FILLING and CONTIG state bits.
1538 	 */
1539 	switch (state & (BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
1540 			 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG)) {
1541 	case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
1542 	     BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
1543 		/*
1544 		 * Filling in all of a previously delayed allocation extent.
1545 		 * The left and right neighbors are both contiguous with new.
1546 		 */
1547 		LEFT.br_blockcount += PREV.br_blockcount + RIGHT.br_blockcount;
1548 
1549 		xfs_iext_remove(bma->ip, &bma->icur, state);
1550 		xfs_iext_remove(bma->ip, &bma->icur, state);
1551 		xfs_iext_prev(ifp, &bma->icur);
1552 		xfs_iext_update_extent(bma->ip, state, &bma->icur, &LEFT);
1553 		ifp->if_nextents--;
1554 
1555 		if (bma->cur == NULL)
1556 			rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1557 		else {
1558 			rval = XFS_ILOG_CORE;
1559 			error = xfs_bmbt_lookup_eq(bma->cur, &RIGHT, &i);
1560 			if (error)
1561 				goto done;
1562 			if (XFS_IS_CORRUPT(mp, i != 1)) {
1563 				xfs_btree_mark_sick(bma->cur);
1564 				error = -EFSCORRUPTED;
1565 				goto done;
1566 			}
1567 			error = xfs_btree_delete(bma->cur, &i);
1568 			if (error)
1569 				goto done;
1570 			if (XFS_IS_CORRUPT(mp, i != 1)) {
1571 				xfs_btree_mark_sick(bma->cur);
1572 				error = -EFSCORRUPTED;
1573 				goto done;
1574 			}
1575 			error = xfs_btree_decrement(bma->cur, 0, &i);
1576 			if (error)
1577 				goto done;
1578 			if (XFS_IS_CORRUPT(mp, i != 1)) {
1579 				xfs_btree_mark_sick(bma->cur);
1580 				error = -EFSCORRUPTED;
1581 				goto done;
1582 			}
1583 			error = xfs_bmbt_update(bma->cur, &LEFT);
1584 			if (error)
1585 				goto done;
1586 		}
1587 		break;
1588 
1589 	case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
1590 		/*
1591 		 * Filling in all of a previously delayed allocation extent.
1592 		 * The left neighbor is contiguous, the right is not.
1593 		 */
1594 		old = LEFT;
1595 		LEFT.br_blockcount += PREV.br_blockcount;
1596 
1597 		xfs_iext_remove(bma->ip, &bma->icur, state);
1598 		xfs_iext_prev(ifp, &bma->icur);
1599 		xfs_iext_update_extent(bma->ip, state, &bma->icur, &LEFT);
1600 
1601 		if (bma->cur == NULL)
1602 			rval = XFS_ILOG_DEXT;
1603 		else {
1604 			rval = 0;
1605 			error = xfs_bmbt_lookup_eq(bma->cur, &old, &i);
1606 			if (error)
1607 				goto done;
1608 			if (XFS_IS_CORRUPT(mp, i != 1)) {
1609 				xfs_btree_mark_sick(bma->cur);
1610 				error = -EFSCORRUPTED;
1611 				goto done;
1612 			}
1613 			error = xfs_bmbt_update(bma->cur, &LEFT);
1614 			if (error)
1615 				goto done;
1616 		}
1617 		break;
1618 
1619 	case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
1620 		/*
1621 		 * Filling in all of a previously delayed allocation extent.
1622 		 * The right neighbor is contiguous, the left is not. Take care
1623 		 * with delay -> unwritten extent allocation here because the
1624 		 * delalloc record we are overwriting is always written.
1625 		 */
1626 		PREV.br_startblock = new->br_startblock;
1627 		PREV.br_blockcount += RIGHT.br_blockcount;
1628 		PREV.br_state = new->br_state;
1629 
1630 		xfs_iext_next(ifp, &bma->icur);
1631 		xfs_iext_remove(bma->ip, &bma->icur, state);
1632 		xfs_iext_prev(ifp, &bma->icur);
1633 		xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV);
1634 
1635 		if (bma->cur == NULL)
1636 			rval = XFS_ILOG_DEXT;
1637 		else {
1638 			rval = 0;
1639 			error = xfs_bmbt_lookup_eq(bma->cur, &RIGHT, &i);
1640 			if (error)
1641 				goto done;
1642 			if (XFS_IS_CORRUPT(mp, i != 1)) {
1643 				xfs_btree_mark_sick(bma->cur);
1644 				error = -EFSCORRUPTED;
1645 				goto done;
1646 			}
1647 			error = xfs_bmbt_update(bma->cur, &PREV);
1648 			if (error)
1649 				goto done;
1650 		}
1651 		break;
1652 
1653 	case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
1654 		/*
1655 		 * Filling in all of a previously delayed allocation extent.
1656 		 * Neither the left nor right neighbors are contiguous with
1657 		 * the new one.
1658 		 */
1659 		PREV.br_startblock = new->br_startblock;
1660 		PREV.br_state = new->br_state;
1661 		xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV);
1662 		ifp->if_nextents++;
1663 
1664 		if (bma->cur == NULL)
1665 			rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1666 		else {
1667 			rval = XFS_ILOG_CORE;
1668 			error = xfs_bmbt_lookup_eq(bma->cur, new, &i);
1669 			if (error)
1670 				goto done;
1671 			if (XFS_IS_CORRUPT(mp, i != 0)) {
1672 				xfs_btree_mark_sick(bma->cur);
1673 				error = -EFSCORRUPTED;
1674 				goto done;
1675 			}
1676 			error = xfs_btree_insert(bma->cur, &i);
1677 			if (error)
1678 				goto done;
1679 			if (XFS_IS_CORRUPT(mp, i != 1)) {
1680 				xfs_btree_mark_sick(bma->cur);
1681 				error = -EFSCORRUPTED;
1682 				goto done;
1683 			}
1684 		}
1685 		break;
1686 
1687 	case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG:
1688 		/*
1689 		 * Filling in the first part of a previous delayed allocation.
1690 		 * The left neighbor is contiguous.
1691 		 */
1692 		old = LEFT;
1693 		temp = PREV.br_blockcount - new->br_blockcount;
1694 		da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
1695 				startblockval(PREV.br_startblock));
1696 
1697 		LEFT.br_blockcount += new->br_blockcount;
1698 
1699 		PREV.br_blockcount = temp;
1700 		PREV.br_startoff += new->br_blockcount;
1701 		PREV.br_startblock = nullstartblock(da_new);
1702 
1703 		xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV);
1704 		xfs_iext_prev(ifp, &bma->icur);
1705 		xfs_iext_update_extent(bma->ip, state, &bma->icur, &LEFT);
1706 
1707 		if (bma->cur == NULL)
1708 			rval = XFS_ILOG_DEXT;
1709 		else {
1710 			rval = 0;
1711 			error = xfs_bmbt_lookup_eq(bma->cur, &old, &i);
1712 			if (error)
1713 				goto done;
1714 			if (XFS_IS_CORRUPT(mp, i != 1)) {
1715 				xfs_btree_mark_sick(bma->cur);
1716 				error = -EFSCORRUPTED;
1717 				goto done;
1718 			}
1719 			error = xfs_bmbt_update(bma->cur, &LEFT);
1720 			if (error)
1721 				goto done;
1722 		}
1723 		break;
1724 
1725 	case BMAP_LEFT_FILLING:
1726 		/*
1727 		 * Filling in the first part of a previous delayed allocation.
1728 		 * The left neighbor is not contiguous.
1729 		 */
1730 		xfs_iext_update_extent(bma->ip, state, &bma->icur, new);
1731 		ifp->if_nextents++;
1732 
1733 		if (bma->cur == NULL)
1734 			rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1735 		else {
1736 			rval = XFS_ILOG_CORE;
1737 			error = xfs_bmbt_lookup_eq(bma->cur, new, &i);
1738 			if (error)
1739 				goto done;
1740 			if (XFS_IS_CORRUPT(mp, i != 0)) {
1741 				xfs_btree_mark_sick(bma->cur);
1742 				error = -EFSCORRUPTED;
1743 				goto done;
1744 			}
1745 			error = xfs_btree_insert(bma->cur, &i);
1746 			if (error)
1747 				goto done;
1748 			if (XFS_IS_CORRUPT(mp, i != 1)) {
1749 				xfs_btree_mark_sick(bma->cur);
1750 				error = -EFSCORRUPTED;
1751 				goto done;
1752 			}
1753 		}
1754 
1755 		if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
1756 			error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
1757 					&bma->cur, 1, &tmp_rval, whichfork);
1758 			rval |= tmp_rval;
1759 			if (error)
1760 				goto done;
1761 		}
1762 
1763 		temp = PREV.br_blockcount - new->br_blockcount;
1764 		da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
1765 			startblockval(PREV.br_startblock) -
1766 			(bma->cur ? bma->cur->bc_bmap.allocated : 0));
1767 
1768 		PREV.br_startoff = new_endoff;
1769 		PREV.br_blockcount = temp;
1770 		PREV.br_startblock = nullstartblock(da_new);
1771 		xfs_iext_next(ifp, &bma->icur);
1772 		xfs_iext_insert(bma->ip, &bma->icur, &PREV, state);
1773 		xfs_iext_prev(ifp, &bma->icur);
1774 		break;
1775 
1776 	case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
1777 		/*
1778 		 * Filling in the last part of a previous delayed allocation.
1779 		 * The right neighbor is contiguous with the new allocation.
1780 		 */
1781 		old = RIGHT;
1782 		RIGHT.br_startoff = new->br_startoff;
1783 		RIGHT.br_startblock = new->br_startblock;
1784 		RIGHT.br_blockcount += new->br_blockcount;
1785 
1786 		if (bma->cur == NULL)
1787 			rval = XFS_ILOG_DEXT;
1788 		else {
1789 			rval = 0;
1790 			error = xfs_bmbt_lookup_eq(bma->cur, &old, &i);
1791 			if (error)
1792 				goto done;
1793 			if (XFS_IS_CORRUPT(mp, i != 1)) {
1794 				xfs_btree_mark_sick(bma->cur);
1795 				error = -EFSCORRUPTED;
1796 				goto done;
1797 			}
1798 			error = xfs_bmbt_update(bma->cur, &RIGHT);
1799 			if (error)
1800 				goto done;
1801 		}
1802 
1803 		temp = PREV.br_blockcount - new->br_blockcount;
1804 		da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
1805 			startblockval(PREV.br_startblock));
1806 
1807 		PREV.br_blockcount = temp;
1808 		PREV.br_startblock = nullstartblock(da_new);
1809 
1810 		xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV);
1811 		xfs_iext_next(ifp, &bma->icur);
1812 		xfs_iext_update_extent(bma->ip, state, &bma->icur, &RIGHT);
1813 		break;
1814 
1815 	case BMAP_RIGHT_FILLING:
1816 		/*
1817 		 * Filling in the last part of a previous delayed allocation.
1818 		 * The right neighbor is not contiguous.
1819 		 */
1820 		xfs_iext_update_extent(bma->ip, state, &bma->icur, new);
1821 		ifp->if_nextents++;
1822 
1823 		if (bma->cur == NULL)
1824 			rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1825 		else {
1826 			rval = XFS_ILOG_CORE;
1827 			error = xfs_bmbt_lookup_eq(bma->cur, new, &i);
1828 			if (error)
1829 				goto done;
1830 			if (XFS_IS_CORRUPT(mp, i != 0)) {
1831 				xfs_btree_mark_sick(bma->cur);
1832 				error = -EFSCORRUPTED;
1833 				goto done;
1834 			}
1835 			error = xfs_btree_insert(bma->cur, &i);
1836 			if (error)
1837 				goto done;
1838 			if (XFS_IS_CORRUPT(mp, i != 1)) {
1839 				xfs_btree_mark_sick(bma->cur);
1840 				error = -EFSCORRUPTED;
1841 				goto done;
1842 			}
1843 		}
1844 
1845 		if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
1846 			error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
1847 				&bma->cur, 1, &tmp_rval, whichfork);
1848 			rval |= tmp_rval;
1849 			if (error)
1850 				goto done;
1851 		}
1852 
1853 		temp = PREV.br_blockcount - new->br_blockcount;
1854 		da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
1855 			startblockval(PREV.br_startblock) -
1856 			(bma->cur ? bma->cur->bc_bmap.allocated : 0));
1857 
1858 		PREV.br_startblock = nullstartblock(da_new);
1859 		PREV.br_blockcount = temp;
1860 		xfs_iext_insert(bma->ip, &bma->icur, &PREV, state);
1861 		xfs_iext_next(ifp, &bma->icur);
1862 		break;
1863 
1864 	case 0:
1865 		/*
1866 		 * Filling in the middle part of a previous delayed allocation.
1867 		 * Contiguity is impossible here.
1868 		 * This case is avoided almost all the time.
1869 		 *
1870 		 * We start with a delayed allocation:
1871 		 *
1872 		 * +ddddddddddddddddddddddddddddddddddddddddddddddddddddddd+
1873 		 *  PREV @ idx
1874 		 *
1875 	         * and we are allocating:
1876 		 *                     +rrrrrrrrrrrrrrrrr+
1877 		 *			      new
1878 		 *
1879 		 * and we set it up for insertion as:
1880 		 * +ddddddddddddddddddd+rrrrrrrrrrrrrrrrr+ddddddddddddddddd+
1881 		 *                            new
1882 		 *  PREV @ idx          LEFT              RIGHT
1883 		 *                      inserted at idx + 1
1884 		 */
1885 		old = PREV;
1886 
1887 		/* LEFT is the new middle */
1888 		LEFT = *new;
1889 
1890 		/* RIGHT is the new right */
1891 		RIGHT.br_state = PREV.br_state;
1892 		RIGHT.br_startoff = new_endoff;
1893 		RIGHT.br_blockcount =
1894 			PREV.br_startoff + PREV.br_blockcount - new_endoff;
1895 		RIGHT.br_startblock =
1896 			nullstartblock(xfs_bmap_worst_indlen(bma->ip,
1897 					RIGHT.br_blockcount));
1898 
1899 		/* truncate PREV */
1900 		PREV.br_blockcount = new->br_startoff - PREV.br_startoff;
1901 		PREV.br_startblock =
1902 			nullstartblock(xfs_bmap_worst_indlen(bma->ip,
1903 					PREV.br_blockcount));
1904 		xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV);
1905 
1906 		xfs_iext_next(ifp, &bma->icur);
1907 		xfs_iext_insert(bma->ip, &bma->icur, &RIGHT, state);
1908 		xfs_iext_insert(bma->ip, &bma->icur, &LEFT, state);
1909 		ifp->if_nextents++;
1910 
1911 		if (bma->cur == NULL)
1912 			rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1913 		else {
1914 			rval = XFS_ILOG_CORE;
1915 			error = xfs_bmbt_lookup_eq(bma->cur, new, &i);
1916 			if (error)
1917 				goto done;
1918 			if (XFS_IS_CORRUPT(mp, i != 0)) {
1919 				xfs_btree_mark_sick(bma->cur);
1920 				error = -EFSCORRUPTED;
1921 				goto done;
1922 			}
1923 			error = xfs_btree_insert(bma->cur, &i);
1924 			if (error)
1925 				goto done;
1926 			if (XFS_IS_CORRUPT(mp, i != 1)) {
1927 				xfs_btree_mark_sick(bma->cur);
1928 				error = -EFSCORRUPTED;
1929 				goto done;
1930 			}
1931 		}
1932 
1933 		if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
1934 			error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
1935 					&bma->cur, 1, &tmp_rval, whichfork);
1936 			rval |= tmp_rval;
1937 			if (error)
1938 				goto done;
1939 		}
1940 
1941 		da_new = startblockval(PREV.br_startblock) +
1942 			 startblockval(RIGHT.br_startblock);
1943 		break;
1944 
1945 	case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
1946 	case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
1947 	case BMAP_LEFT_FILLING | BMAP_RIGHT_CONTIG:
1948 	case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
1949 	case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
1950 	case BMAP_LEFT_CONTIG:
1951 	case BMAP_RIGHT_CONTIG:
1952 		/*
1953 		 * These cases are all impossible.
1954 		 */
1955 		ASSERT(0);
1956 	}
1957 
1958 	/* add reverse mapping unless caller opted out */
1959 	if (!(bma->flags & XFS_BMAPI_NORMAP))
1960 		xfs_rmap_map_extent(bma->tp, bma->ip, whichfork, new);
1961 
1962 	/* convert to a btree if necessary */
1963 	if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
1964 		int	tmp_logflags;	/* partial log flag return val */
1965 
1966 		ASSERT(bma->cur == NULL);
1967 		error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
1968 				&bma->cur, da_old > 0, &tmp_logflags,
1969 				whichfork);
1970 		bma->logflags |= tmp_logflags;
1971 		if (error)
1972 			goto done;
1973 	}
1974 
1975 	if (da_new != da_old)
1976 		xfs_mod_delalloc(mp, (int64_t)da_new - da_old);
1977 
1978 	if (bma->cur) {
1979 		da_new += bma->cur->bc_bmap.allocated;
1980 		bma->cur->bc_bmap.allocated = 0;
1981 	}
1982 
1983 	/* adjust for changes in reserved delayed indirect blocks */
1984 	if (da_new != da_old) {
1985 		ASSERT(state == 0 || da_new < da_old);
1986 		error = xfs_mod_fdblocks(mp, (int64_t)(da_old - da_new),
1987 				false);
1988 	}
1989 
1990 	xfs_bmap_check_leaf_extents(bma->cur, bma->ip, whichfork);
1991 done:
1992 	if (whichfork != XFS_COW_FORK)
1993 		bma->logflags |= rval;
1994 	return error;
1995 #undef	LEFT
1996 #undef	RIGHT
1997 #undef	PREV
1998 }
1999 
2000 /*
2001  * Convert an unwritten allocation to a real allocation or vice versa.
2002  */
2003 int					/* error */
2004 xfs_bmap_add_extent_unwritten_real(
2005 	struct xfs_trans	*tp,
2006 	xfs_inode_t		*ip,	/* incore inode pointer */
2007 	int			whichfork,
2008 	struct xfs_iext_cursor	*icur,
2009 	struct xfs_btree_cur	**curp,	/* if *curp is null, not a btree */
2010 	xfs_bmbt_irec_t		*new,	/* new data to add to file extents */
2011 	int			*logflagsp) /* inode logging flags */
2012 {
2013 	struct xfs_btree_cur	*cur;	/* btree cursor */
2014 	int			error;	/* error return value */
2015 	int			i;	/* temp state */
2016 	struct xfs_ifork	*ifp;	/* inode fork pointer */
2017 	xfs_fileoff_t		new_endoff;	/* end offset of new entry */
2018 	xfs_bmbt_irec_t		r[3];	/* neighbor extent entries */
2019 					/* left is 0, right is 1, prev is 2 */
2020 	int			rval=0;	/* return value (logging flags) */
2021 	uint32_t		state = xfs_bmap_fork_to_state(whichfork);
2022 	struct xfs_mount	*mp = ip->i_mount;
2023 	struct xfs_bmbt_irec	old;
2024 
2025 	*logflagsp = 0;
2026 
2027 	cur = *curp;
2028 	ifp = xfs_ifork_ptr(ip, whichfork);
2029 
2030 	ASSERT(!isnullstartblock(new->br_startblock));
2031 
2032 	XFS_STATS_INC(mp, xs_add_exlist);
2033 
2034 #define	LEFT		r[0]
2035 #define	RIGHT		r[1]
2036 #define	PREV		r[2]
2037 
2038 	/*
2039 	 * Set up a bunch of variables to make the tests simpler.
2040 	 */
2041 	error = 0;
2042 	xfs_iext_get_extent(ifp, icur, &PREV);
2043 	ASSERT(new->br_state != PREV.br_state);
2044 	new_endoff = new->br_startoff + new->br_blockcount;
2045 	ASSERT(PREV.br_startoff <= new->br_startoff);
2046 	ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff);
2047 
2048 	/*
2049 	 * Set flags determining what part of the previous oldext allocation
2050 	 * extent is being replaced by a newext allocation.
2051 	 */
2052 	if (PREV.br_startoff == new->br_startoff)
2053 		state |= BMAP_LEFT_FILLING;
2054 	if (PREV.br_startoff + PREV.br_blockcount == new_endoff)
2055 		state |= BMAP_RIGHT_FILLING;
2056 
2057 	/*
2058 	 * Check and set flags if this segment has a left neighbor.
2059 	 * Don't set contiguous if the combined extent would be too large.
2060 	 */
2061 	if (xfs_iext_peek_prev_extent(ifp, icur, &LEFT)) {
2062 		state |= BMAP_LEFT_VALID;
2063 		if (isnullstartblock(LEFT.br_startblock))
2064 			state |= BMAP_LEFT_DELAY;
2065 	}
2066 
2067 	if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
2068 	    LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff &&
2069 	    LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock &&
2070 	    LEFT.br_state == new->br_state &&
2071 	    LEFT.br_blockcount + new->br_blockcount <= XFS_MAX_BMBT_EXTLEN)
2072 		state |= BMAP_LEFT_CONTIG;
2073 
2074 	/*
2075 	 * Check and set flags if this segment has a right neighbor.
2076 	 * Don't set contiguous if the combined extent would be too large.
2077 	 * Also check for all-three-contiguous being too large.
2078 	 */
2079 	if (xfs_iext_peek_next_extent(ifp, icur, &RIGHT)) {
2080 		state |= BMAP_RIGHT_VALID;
2081 		if (isnullstartblock(RIGHT.br_startblock))
2082 			state |= BMAP_RIGHT_DELAY;
2083 	}
2084 
2085 	if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
2086 	    new_endoff == RIGHT.br_startoff &&
2087 	    new->br_startblock + new->br_blockcount == RIGHT.br_startblock &&
2088 	    new->br_state == RIGHT.br_state &&
2089 	    new->br_blockcount + RIGHT.br_blockcount <= XFS_MAX_BMBT_EXTLEN &&
2090 	    ((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
2091 		       BMAP_RIGHT_FILLING)) !=
2092 		      (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
2093 		       BMAP_RIGHT_FILLING) ||
2094 	     LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount
2095 			<= XFS_MAX_BMBT_EXTLEN))
2096 		state |= BMAP_RIGHT_CONTIG;
2097 
2098 	/*
2099 	 * Switch out based on the FILLING and CONTIG state bits.
2100 	 */
2101 	switch (state & (BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
2102 			 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG)) {
2103 	case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
2104 	     BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
2105 		/*
2106 		 * Setting all of a previous oldext extent to newext.
2107 		 * The left and right neighbors are both contiguous with new.
2108 		 */
2109 		LEFT.br_blockcount += PREV.br_blockcount + RIGHT.br_blockcount;
2110 
2111 		xfs_iext_remove(ip, icur, state);
2112 		xfs_iext_remove(ip, icur, state);
2113 		xfs_iext_prev(ifp, icur);
2114 		xfs_iext_update_extent(ip, state, icur, &LEFT);
2115 		ifp->if_nextents -= 2;
2116 		if (cur == NULL)
2117 			rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2118 		else {
2119 			rval = XFS_ILOG_CORE;
2120 			error = xfs_bmbt_lookup_eq(cur, &RIGHT, &i);
2121 			if (error)
2122 				goto done;
2123 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2124 				xfs_btree_mark_sick(cur);
2125 				error = -EFSCORRUPTED;
2126 				goto done;
2127 			}
2128 			if ((error = xfs_btree_delete(cur, &i)))
2129 				goto done;
2130 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2131 				xfs_btree_mark_sick(cur);
2132 				error = -EFSCORRUPTED;
2133 				goto done;
2134 			}
2135 			if ((error = xfs_btree_decrement(cur, 0, &i)))
2136 				goto done;
2137 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2138 				xfs_btree_mark_sick(cur);
2139 				error = -EFSCORRUPTED;
2140 				goto done;
2141 			}
2142 			if ((error = xfs_btree_delete(cur, &i)))
2143 				goto done;
2144 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2145 				xfs_btree_mark_sick(cur);
2146 				error = -EFSCORRUPTED;
2147 				goto done;
2148 			}
2149 			if ((error = xfs_btree_decrement(cur, 0, &i)))
2150 				goto done;
2151 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2152 				xfs_btree_mark_sick(cur);
2153 				error = -EFSCORRUPTED;
2154 				goto done;
2155 			}
2156 			error = xfs_bmbt_update(cur, &LEFT);
2157 			if (error)
2158 				goto done;
2159 		}
2160 		break;
2161 
2162 	case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
2163 		/*
2164 		 * Setting all of a previous oldext extent to newext.
2165 		 * The left neighbor is contiguous, the right is not.
2166 		 */
2167 		LEFT.br_blockcount += PREV.br_blockcount;
2168 
2169 		xfs_iext_remove(ip, icur, state);
2170 		xfs_iext_prev(ifp, icur);
2171 		xfs_iext_update_extent(ip, state, icur, &LEFT);
2172 		ifp->if_nextents--;
2173 		if (cur == NULL)
2174 			rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2175 		else {
2176 			rval = XFS_ILOG_CORE;
2177 			error = xfs_bmbt_lookup_eq(cur, &PREV, &i);
2178 			if (error)
2179 				goto done;
2180 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2181 				xfs_btree_mark_sick(cur);
2182 				error = -EFSCORRUPTED;
2183 				goto done;
2184 			}
2185 			if ((error = xfs_btree_delete(cur, &i)))
2186 				goto done;
2187 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2188 				xfs_btree_mark_sick(cur);
2189 				error = -EFSCORRUPTED;
2190 				goto done;
2191 			}
2192 			if ((error = xfs_btree_decrement(cur, 0, &i)))
2193 				goto done;
2194 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2195 				xfs_btree_mark_sick(cur);
2196 				error = -EFSCORRUPTED;
2197 				goto done;
2198 			}
2199 			error = xfs_bmbt_update(cur, &LEFT);
2200 			if (error)
2201 				goto done;
2202 		}
2203 		break;
2204 
2205 	case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
2206 		/*
2207 		 * Setting all of a previous oldext extent to newext.
2208 		 * The right neighbor is contiguous, the left is not.
2209 		 */
2210 		PREV.br_blockcount += RIGHT.br_blockcount;
2211 		PREV.br_state = new->br_state;
2212 
2213 		xfs_iext_next(ifp, icur);
2214 		xfs_iext_remove(ip, icur, state);
2215 		xfs_iext_prev(ifp, icur);
2216 		xfs_iext_update_extent(ip, state, icur, &PREV);
2217 		ifp->if_nextents--;
2218 
2219 		if (cur == NULL)
2220 			rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2221 		else {
2222 			rval = XFS_ILOG_CORE;
2223 			error = xfs_bmbt_lookup_eq(cur, &RIGHT, &i);
2224 			if (error)
2225 				goto done;
2226 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2227 				xfs_btree_mark_sick(cur);
2228 				error = -EFSCORRUPTED;
2229 				goto done;
2230 			}
2231 			if ((error = xfs_btree_delete(cur, &i)))
2232 				goto done;
2233 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2234 				xfs_btree_mark_sick(cur);
2235 				error = -EFSCORRUPTED;
2236 				goto done;
2237 			}
2238 			if ((error = xfs_btree_decrement(cur, 0, &i)))
2239 				goto done;
2240 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2241 				xfs_btree_mark_sick(cur);
2242 				error = -EFSCORRUPTED;
2243 				goto done;
2244 			}
2245 			error = xfs_bmbt_update(cur, &PREV);
2246 			if (error)
2247 				goto done;
2248 		}
2249 		break;
2250 
2251 	case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
2252 		/*
2253 		 * Setting all of a previous oldext extent to newext.
2254 		 * Neither the left nor right neighbors are contiguous with
2255 		 * the new one.
2256 		 */
2257 		PREV.br_state = new->br_state;
2258 		xfs_iext_update_extent(ip, state, icur, &PREV);
2259 
2260 		if (cur == NULL)
2261 			rval = XFS_ILOG_DEXT;
2262 		else {
2263 			rval = 0;
2264 			error = xfs_bmbt_lookup_eq(cur, new, &i);
2265 			if (error)
2266 				goto done;
2267 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2268 				xfs_btree_mark_sick(cur);
2269 				error = -EFSCORRUPTED;
2270 				goto done;
2271 			}
2272 			error = xfs_bmbt_update(cur, &PREV);
2273 			if (error)
2274 				goto done;
2275 		}
2276 		break;
2277 
2278 	case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG:
2279 		/*
2280 		 * Setting the first part of a previous oldext extent to newext.
2281 		 * The left neighbor is contiguous.
2282 		 */
2283 		LEFT.br_blockcount += new->br_blockcount;
2284 
2285 		old = PREV;
2286 		PREV.br_startoff += new->br_blockcount;
2287 		PREV.br_startblock += new->br_blockcount;
2288 		PREV.br_blockcount -= new->br_blockcount;
2289 
2290 		xfs_iext_update_extent(ip, state, icur, &PREV);
2291 		xfs_iext_prev(ifp, icur);
2292 		xfs_iext_update_extent(ip, state, icur, &LEFT);
2293 
2294 		if (cur == NULL)
2295 			rval = XFS_ILOG_DEXT;
2296 		else {
2297 			rval = 0;
2298 			error = xfs_bmbt_lookup_eq(cur, &old, &i);
2299 			if (error)
2300 				goto done;
2301 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2302 				xfs_btree_mark_sick(cur);
2303 				error = -EFSCORRUPTED;
2304 				goto done;
2305 			}
2306 			error = xfs_bmbt_update(cur, &PREV);
2307 			if (error)
2308 				goto done;
2309 			error = xfs_btree_decrement(cur, 0, &i);
2310 			if (error)
2311 				goto done;
2312 			error = xfs_bmbt_update(cur, &LEFT);
2313 			if (error)
2314 				goto done;
2315 		}
2316 		break;
2317 
2318 	case BMAP_LEFT_FILLING:
2319 		/*
2320 		 * Setting the first part of a previous oldext extent to newext.
2321 		 * The left neighbor is not contiguous.
2322 		 */
2323 		old = PREV;
2324 		PREV.br_startoff += new->br_blockcount;
2325 		PREV.br_startblock += new->br_blockcount;
2326 		PREV.br_blockcount -= new->br_blockcount;
2327 
2328 		xfs_iext_update_extent(ip, state, icur, &PREV);
2329 		xfs_iext_insert(ip, icur, new, state);
2330 		ifp->if_nextents++;
2331 
2332 		if (cur == NULL)
2333 			rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2334 		else {
2335 			rval = XFS_ILOG_CORE;
2336 			error = xfs_bmbt_lookup_eq(cur, &old, &i);
2337 			if (error)
2338 				goto done;
2339 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2340 				xfs_btree_mark_sick(cur);
2341 				error = -EFSCORRUPTED;
2342 				goto done;
2343 			}
2344 			error = xfs_bmbt_update(cur, &PREV);
2345 			if (error)
2346 				goto done;
2347 			cur->bc_rec.b = *new;
2348 			if ((error = xfs_btree_insert(cur, &i)))
2349 				goto done;
2350 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2351 				xfs_btree_mark_sick(cur);
2352 				error = -EFSCORRUPTED;
2353 				goto done;
2354 			}
2355 		}
2356 		break;
2357 
2358 	case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
2359 		/*
2360 		 * Setting the last part of a previous oldext extent to newext.
2361 		 * The right neighbor is contiguous with the new allocation.
2362 		 */
2363 		old = PREV;
2364 		PREV.br_blockcount -= new->br_blockcount;
2365 
2366 		RIGHT.br_startoff = new->br_startoff;
2367 		RIGHT.br_startblock = new->br_startblock;
2368 		RIGHT.br_blockcount += new->br_blockcount;
2369 
2370 		xfs_iext_update_extent(ip, state, icur, &PREV);
2371 		xfs_iext_next(ifp, icur);
2372 		xfs_iext_update_extent(ip, state, icur, &RIGHT);
2373 
2374 		if (cur == NULL)
2375 			rval = XFS_ILOG_DEXT;
2376 		else {
2377 			rval = 0;
2378 			error = xfs_bmbt_lookup_eq(cur, &old, &i);
2379 			if (error)
2380 				goto done;
2381 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2382 				xfs_btree_mark_sick(cur);
2383 				error = -EFSCORRUPTED;
2384 				goto done;
2385 			}
2386 			error = xfs_bmbt_update(cur, &PREV);
2387 			if (error)
2388 				goto done;
2389 			error = xfs_btree_increment(cur, 0, &i);
2390 			if (error)
2391 				goto done;
2392 			error = xfs_bmbt_update(cur, &RIGHT);
2393 			if (error)
2394 				goto done;
2395 		}
2396 		break;
2397 
2398 	case BMAP_RIGHT_FILLING:
2399 		/*
2400 		 * Setting the last part of a previous oldext extent to newext.
2401 		 * The right neighbor is not contiguous.
2402 		 */
2403 		old = PREV;
2404 		PREV.br_blockcount -= new->br_blockcount;
2405 
2406 		xfs_iext_update_extent(ip, state, icur, &PREV);
2407 		xfs_iext_next(ifp, icur);
2408 		xfs_iext_insert(ip, icur, new, state);
2409 		ifp->if_nextents++;
2410 
2411 		if (cur == NULL)
2412 			rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2413 		else {
2414 			rval = XFS_ILOG_CORE;
2415 			error = xfs_bmbt_lookup_eq(cur, &old, &i);
2416 			if (error)
2417 				goto done;
2418 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2419 				xfs_btree_mark_sick(cur);
2420 				error = -EFSCORRUPTED;
2421 				goto done;
2422 			}
2423 			error = xfs_bmbt_update(cur, &PREV);
2424 			if (error)
2425 				goto done;
2426 			error = xfs_bmbt_lookup_eq(cur, new, &i);
2427 			if (error)
2428 				goto done;
2429 			if (XFS_IS_CORRUPT(mp, i != 0)) {
2430 				xfs_btree_mark_sick(cur);
2431 				error = -EFSCORRUPTED;
2432 				goto done;
2433 			}
2434 			if ((error = xfs_btree_insert(cur, &i)))
2435 				goto done;
2436 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2437 				xfs_btree_mark_sick(cur);
2438 				error = -EFSCORRUPTED;
2439 				goto done;
2440 			}
2441 		}
2442 		break;
2443 
2444 	case 0:
2445 		/*
2446 		 * Setting the middle part of a previous oldext extent to
2447 		 * newext.  Contiguity is impossible here.
2448 		 * One extent becomes three extents.
2449 		 */
2450 		old = PREV;
2451 		PREV.br_blockcount = new->br_startoff - PREV.br_startoff;
2452 
2453 		r[0] = *new;
2454 		r[1].br_startoff = new_endoff;
2455 		r[1].br_blockcount =
2456 			old.br_startoff + old.br_blockcount - new_endoff;
2457 		r[1].br_startblock = new->br_startblock + new->br_blockcount;
2458 		r[1].br_state = PREV.br_state;
2459 
2460 		xfs_iext_update_extent(ip, state, icur, &PREV);
2461 		xfs_iext_next(ifp, icur);
2462 		xfs_iext_insert(ip, icur, &r[1], state);
2463 		xfs_iext_insert(ip, icur, &r[0], state);
2464 		ifp->if_nextents += 2;
2465 
2466 		if (cur == NULL)
2467 			rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2468 		else {
2469 			rval = XFS_ILOG_CORE;
2470 			error = xfs_bmbt_lookup_eq(cur, &old, &i);
2471 			if (error)
2472 				goto done;
2473 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2474 				xfs_btree_mark_sick(cur);
2475 				error = -EFSCORRUPTED;
2476 				goto done;
2477 			}
2478 			/* new right extent - oldext */
2479 			error = xfs_bmbt_update(cur, &r[1]);
2480 			if (error)
2481 				goto done;
2482 			/* new left extent - oldext */
2483 			cur->bc_rec.b = PREV;
2484 			if ((error = xfs_btree_insert(cur, &i)))
2485 				goto done;
2486 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2487 				xfs_btree_mark_sick(cur);
2488 				error = -EFSCORRUPTED;
2489 				goto done;
2490 			}
2491 			/*
2492 			 * Reset the cursor to the position of the new extent
2493 			 * we are about to insert as we can't trust it after
2494 			 * the previous insert.
2495 			 */
2496 			error = xfs_bmbt_lookup_eq(cur, new, &i);
2497 			if (error)
2498 				goto done;
2499 			if (XFS_IS_CORRUPT(mp, i != 0)) {
2500 				xfs_btree_mark_sick(cur);
2501 				error = -EFSCORRUPTED;
2502 				goto done;
2503 			}
2504 			/* new middle extent - newext */
2505 			if ((error = xfs_btree_insert(cur, &i)))
2506 				goto done;
2507 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2508 				xfs_btree_mark_sick(cur);
2509 				error = -EFSCORRUPTED;
2510 				goto done;
2511 			}
2512 		}
2513 		break;
2514 
2515 	case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2516 	case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2517 	case BMAP_LEFT_FILLING | BMAP_RIGHT_CONTIG:
2518 	case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
2519 	case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2520 	case BMAP_LEFT_CONTIG:
2521 	case BMAP_RIGHT_CONTIG:
2522 		/*
2523 		 * These cases are all impossible.
2524 		 */
2525 		ASSERT(0);
2526 	}
2527 
2528 	/* update reverse mappings */
2529 	xfs_rmap_convert_extent(mp, tp, ip, whichfork, new);
2530 
2531 	/* convert to a btree if necessary */
2532 	if (xfs_bmap_needs_btree(ip, whichfork)) {
2533 		int	tmp_logflags;	/* partial log flag return val */
2534 
2535 		ASSERT(cur == NULL);
2536 		error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0,
2537 				&tmp_logflags, whichfork);
2538 		*logflagsp |= tmp_logflags;
2539 		if (error)
2540 			goto done;
2541 	}
2542 
2543 	/* clear out the allocated field, done with it now in any case. */
2544 	if (cur) {
2545 		cur->bc_bmap.allocated = 0;
2546 		*curp = cur;
2547 	}
2548 
2549 	xfs_bmap_check_leaf_extents(*curp, ip, whichfork);
2550 done:
2551 	*logflagsp |= rval;
2552 	return error;
2553 #undef	LEFT
2554 #undef	RIGHT
2555 #undef	PREV
2556 }
2557 
2558 /*
2559  * Convert a hole to a delayed allocation.
2560  */
2561 STATIC void
2562 xfs_bmap_add_extent_hole_delay(
2563 	xfs_inode_t		*ip,	/* incore inode pointer */
2564 	int			whichfork,
2565 	struct xfs_iext_cursor	*icur,
2566 	xfs_bmbt_irec_t		*new)	/* new data to add to file extents */
2567 {
2568 	struct xfs_ifork	*ifp;	/* inode fork pointer */
2569 	xfs_bmbt_irec_t		left;	/* left neighbor extent entry */
2570 	xfs_filblks_t		newlen=0;	/* new indirect size */
2571 	xfs_filblks_t		oldlen=0;	/* old indirect size */
2572 	xfs_bmbt_irec_t		right;	/* right neighbor extent entry */
2573 	uint32_t		state = xfs_bmap_fork_to_state(whichfork);
2574 	xfs_filblks_t		temp;	 /* temp for indirect calculations */
2575 
2576 	ifp = xfs_ifork_ptr(ip, whichfork);
2577 	ASSERT(isnullstartblock(new->br_startblock));
2578 
2579 	/*
2580 	 * Check and set flags if this segment has a left neighbor
2581 	 */
2582 	if (xfs_iext_peek_prev_extent(ifp, icur, &left)) {
2583 		state |= BMAP_LEFT_VALID;
2584 		if (isnullstartblock(left.br_startblock))
2585 			state |= BMAP_LEFT_DELAY;
2586 	}
2587 
2588 	/*
2589 	 * Check and set flags if the current (right) segment exists.
2590 	 * If it doesn't exist, we're converting the hole at end-of-file.
2591 	 */
2592 	if (xfs_iext_get_extent(ifp, icur, &right)) {
2593 		state |= BMAP_RIGHT_VALID;
2594 		if (isnullstartblock(right.br_startblock))
2595 			state |= BMAP_RIGHT_DELAY;
2596 	}
2597 
2598 	/*
2599 	 * Set contiguity flags on the left and right neighbors.
2600 	 * Don't let extents get too large, even if the pieces are contiguous.
2601 	 */
2602 	if ((state & BMAP_LEFT_VALID) && (state & BMAP_LEFT_DELAY) &&
2603 	    left.br_startoff + left.br_blockcount == new->br_startoff &&
2604 	    left.br_blockcount + new->br_blockcount <= XFS_MAX_BMBT_EXTLEN)
2605 		state |= BMAP_LEFT_CONTIG;
2606 
2607 	if ((state & BMAP_RIGHT_VALID) && (state & BMAP_RIGHT_DELAY) &&
2608 	    new->br_startoff + new->br_blockcount == right.br_startoff &&
2609 	    new->br_blockcount + right.br_blockcount <= XFS_MAX_BMBT_EXTLEN &&
2610 	    (!(state & BMAP_LEFT_CONTIG) ||
2611 	     (left.br_blockcount + new->br_blockcount +
2612 	      right.br_blockcount <= XFS_MAX_BMBT_EXTLEN)))
2613 		state |= BMAP_RIGHT_CONTIG;
2614 
2615 	/*
2616 	 * Switch out based on the contiguity flags.
2617 	 */
2618 	switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) {
2619 	case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2620 		/*
2621 		 * New allocation is contiguous with delayed allocations
2622 		 * on the left and on the right.
2623 		 * Merge all three into a single extent record.
2624 		 */
2625 		temp = left.br_blockcount + new->br_blockcount +
2626 			right.br_blockcount;
2627 
2628 		oldlen = startblockval(left.br_startblock) +
2629 			startblockval(new->br_startblock) +
2630 			startblockval(right.br_startblock);
2631 		newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
2632 					 oldlen);
2633 		left.br_startblock = nullstartblock(newlen);
2634 		left.br_blockcount = temp;
2635 
2636 		xfs_iext_remove(ip, icur, state);
2637 		xfs_iext_prev(ifp, icur);
2638 		xfs_iext_update_extent(ip, state, icur, &left);
2639 		break;
2640 
2641 	case BMAP_LEFT_CONTIG:
2642 		/*
2643 		 * New allocation is contiguous with a delayed allocation
2644 		 * on the left.
2645 		 * Merge the new allocation with the left neighbor.
2646 		 */
2647 		temp = left.br_blockcount + new->br_blockcount;
2648 
2649 		oldlen = startblockval(left.br_startblock) +
2650 			startblockval(new->br_startblock);
2651 		newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
2652 					 oldlen);
2653 		left.br_blockcount = temp;
2654 		left.br_startblock = nullstartblock(newlen);
2655 
2656 		xfs_iext_prev(ifp, icur);
2657 		xfs_iext_update_extent(ip, state, icur, &left);
2658 		break;
2659 
2660 	case BMAP_RIGHT_CONTIG:
2661 		/*
2662 		 * New allocation is contiguous with a delayed allocation
2663 		 * on the right.
2664 		 * Merge the new allocation with the right neighbor.
2665 		 */
2666 		temp = new->br_blockcount + right.br_blockcount;
2667 		oldlen = startblockval(new->br_startblock) +
2668 			startblockval(right.br_startblock);
2669 		newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
2670 					 oldlen);
2671 		right.br_startoff = new->br_startoff;
2672 		right.br_startblock = nullstartblock(newlen);
2673 		right.br_blockcount = temp;
2674 		xfs_iext_update_extent(ip, state, icur, &right);
2675 		break;
2676 
2677 	case 0:
2678 		/*
2679 		 * New allocation is not contiguous with another
2680 		 * delayed allocation.
2681 		 * Insert a new entry.
2682 		 */
2683 		oldlen = newlen = 0;
2684 		xfs_iext_insert(ip, icur, new, state);
2685 		break;
2686 	}
2687 	if (oldlen != newlen) {
2688 		ASSERT(oldlen > newlen);
2689 		xfs_mod_fdblocks(ip->i_mount, (int64_t)(oldlen - newlen),
2690 				 false);
2691 		/*
2692 		 * Nothing to do for disk quota accounting here.
2693 		 */
2694 		xfs_mod_delalloc(ip->i_mount, (int64_t)newlen - oldlen);
2695 	}
2696 }
2697 
2698 /*
2699  * Convert a hole to a real allocation.
2700  */
2701 STATIC int				/* error */
2702 xfs_bmap_add_extent_hole_real(
2703 	struct xfs_trans	*tp,
2704 	struct xfs_inode	*ip,
2705 	int			whichfork,
2706 	struct xfs_iext_cursor	*icur,
2707 	struct xfs_btree_cur	**curp,
2708 	struct xfs_bmbt_irec	*new,
2709 	int			*logflagsp,
2710 	uint32_t		flags)
2711 {
2712 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
2713 	struct xfs_mount	*mp = ip->i_mount;
2714 	struct xfs_btree_cur	*cur = *curp;
2715 	int			error;	/* error return value */
2716 	int			i;	/* temp state */
2717 	xfs_bmbt_irec_t		left;	/* left neighbor extent entry */
2718 	xfs_bmbt_irec_t		right;	/* right neighbor extent entry */
2719 	int			rval=0;	/* return value (logging flags) */
2720 	uint32_t		state = xfs_bmap_fork_to_state(whichfork);
2721 	struct xfs_bmbt_irec	old;
2722 
2723 	ASSERT(!isnullstartblock(new->br_startblock));
2724 	ASSERT(!cur || !(cur->bc_flags & XFS_BTREE_BMBT_WASDEL));
2725 
2726 	XFS_STATS_INC(mp, xs_add_exlist);
2727 
2728 	/*
2729 	 * Check and set flags if this segment has a left neighbor.
2730 	 */
2731 	if (xfs_iext_peek_prev_extent(ifp, icur, &left)) {
2732 		state |= BMAP_LEFT_VALID;
2733 		if (isnullstartblock(left.br_startblock))
2734 			state |= BMAP_LEFT_DELAY;
2735 	}
2736 
2737 	/*
2738 	 * Check and set flags if this segment has a current value.
2739 	 * Not true if we're inserting into the "hole" at eof.
2740 	 */
2741 	if (xfs_iext_get_extent(ifp, icur, &right)) {
2742 		state |= BMAP_RIGHT_VALID;
2743 		if (isnullstartblock(right.br_startblock))
2744 			state |= BMAP_RIGHT_DELAY;
2745 	}
2746 
2747 	/*
2748 	 * We're inserting a real allocation between "left" and "right".
2749 	 * Set the contiguity flags.  Don't let extents get too large.
2750 	 */
2751 	if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
2752 	    left.br_startoff + left.br_blockcount == new->br_startoff &&
2753 	    left.br_startblock + left.br_blockcount == new->br_startblock &&
2754 	    left.br_state == new->br_state &&
2755 	    left.br_blockcount + new->br_blockcount <= XFS_MAX_BMBT_EXTLEN)
2756 		state |= BMAP_LEFT_CONTIG;
2757 
2758 	if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
2759 	    new->br_startoff + new->br_blockcount == right.br_startoff &&
2760 	    new->br_startblock + new->br_blockcount == right.br_startblock &&
2761 	    new->br_state == right.br_state &&
2762 	    new->br_blockcount + right.br_blockcount <= XFS_MAX_BMBT_EXTLEN &&
2763 	    (!(state & BMAP_LEFT_CONTIG) ||
2764 	     left.br_blockcount + new->br_blockcount +
2765 	     right.br_blockcount <= XFS_MAX_BMBT_EXTLEN))
2766 		state |= BMAP_RIGHT_CONTIG;
2767 
2768 	error = 0;
2769 	/*
2770 	 * Select which case we're in here, and implement it.
2771 	 */
2772 	switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) {
2773 	case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2774 		/*
2775 		 * New allocation is contiguous with real allocations on the
2776 		 * left and on the right.
2777 		 * Merge all three into a single extent record.
2778 		 */
2779 		left.br_blockcount += new->br_blockcount + right.br_blockcount;
2780 
2781 		xfs_iext_remove(ip, icur, state);
2782 		xfs_iext_prev(ifp, icur);
2783 		xfs_iext_update_extent(ip, state, icur, &left);
2784 		ifp->if_nextents--;
2785 
2786 		if (cur == NULL) {
2787 			rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
2788 		} else {
2789 			rval = XFS_ILOG_CORE;
2790 			error = xfs_bmbt_lookup_eq(cur, &right, &i);
2791 			if (error)
2792 				goto done;
2793 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2794 				xfs_btree_mark_sick(cur);
2795 				error = -EFSCORRUPTED;
2796 				goto done;
2797 			}
2798 			error = xfs_btree_delete(cur, &i);
2799 			if (error)
2800 				goto done;
2801 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2802 				xfs_btree_mark_sick(cur);
2803 				error = -EFSCORRUPTED;
2804 				goto done;
2805 			}
2806 			error = xfs_btree_decrement(cur, 0, &i);
2807 			if (error)
2808 				goto done;
2809 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2810 				xfs_btree_mark_sick(cur);
2811 				error = -EFSCORRUPTED;
2812 				goto done;
2813 			}
2814 			error = xfs_bmbt_update(cur, &left);
2815 			if (error)
2816 				goto done;
2817 		}
2818 		break;
2819 
2820 	case BMAP_LEFT_CONTIG:
2821 		/*
2822 		 * New allocation is contiguous with a real allocation
2823 		 * on the left.
2824 		 * Merge the new allocation with the left neighbor.
2825 		 */
2826 		old = left;
2827 		left.br_blockcount += new->br_blockcount;
2828 
2829 		xfs_iext_prev(ifp, icur);
2830 		xfs_iext_update_extent(ip, state, icur, &left);
2831 
2832 		if (cur == NULL) {
2833 			rval = xfs_ilog_fext(whichfork);
2834 		} else {
2835 			rval = 0;
2836 			error = xfs_bmbt_lookup_eq(cur, &old, &i);
2837 			if (error)
2838 				goto done;
2839 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2840 				xfs_btree_mark_sick(cur);
2841 				error = -EFSCORRUPTED;
2842 				goto done;
2843 			}
2844 			error = xfs_bmbt_update(cur, &left);
2845 			if (error)
2846 				goto done;
2847 		}
2848 		break;
2849 
2850 	case BMAP_RIGHT_CONTIG:
2851 		/*
2852 		 * New allocation is contiguous with a real allocation
2853 		 * on the right.
2854 		 * Merge the new allocation with the right neighbor.
2855 		 */
2856 		old = right;
2857 
2858 		right.br_startoff = new->br_startoff;
2859 		right.br_startblock = new->br_startblock;
2860 		right.br_blockcount += new->br_blockcount;
2861 		xfs_iext_update_extent(ip, state, icur, &right);
2862 
2863 		if (cur == NULL) {
2864 			rval = xfs_ilog_fext(whichfork);
2865 		} else {
2866 			rval = 0;
2867 			error = xfs_bmbt_lookup_eq(cur, &old, &i);
2868 			if (error)
2869 				goto done;
2870 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2871 				xfs_btree_mark_sick(cur);
2872 				error = -EFSCORRUPTED;
2873 				goto done;
2874 			}
2875 			error = xfs_bmbt_update(cur, &right);
2876 			if (error)
2877 				goto done;
2878 		}
2879 		break;
2880 
2881 	case 0:
2882 		/*
2883 		 * New allocation is not contiguous with another
2884 		 * real allocation.
2885 		 * Insert a new entry.
2886 		 */
2887 		xfs_iext_insert(ip, icur, new, state);
2888 		ifp->if_nextents++;
2889 
2890 		if (cur == NULL) {
2891 			rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
2892 		} else {
2893 			rval = XFS_ILOG_CORE;
2894 			error = xfs_bmbt_lookup_eq(cur, new, &i);
2895 			if (error)
2896 				goto done;
2897 			if (XFS_IS_CORRUPT(mp, i != 0)) {
2898 				xfs_btree_mark_sick(cur);
2899 				error = -EFSCORRUPTED;
2900 				goto done;
2901 			}
2902 			error = xfs_btree_insert(cur, &i);
2903 			if (error)
2904 				goto done;
2905 			if (XFS_IS_CORRUPT(mp, i != 1)) {
2906 				xfs_btree_mark_sick(cur);
2907 				error = -EFSCORRUPTED;
2908 				goto done;
2909 			}
2910 		}
2911 		break;
2912 	}
2913 
2914 	/* add reverse mapping unless caller opted out */
2915 	if (!(flags & XFS_BMAPI_NORMAP))
2916 		xfs_rmap_map_extent(tp, ip, whichfork, new);
2917 
2918 	/* convert to a btree if necessary */
2919 	if (xfs_bmap_needs_btree(ip, whichfork)) {
2920 		int	tmp_logflags;	/* partial log flag return val */
2921 
2922 		ASSERT(cur == NULL);
2923 		error = xfs_bmap_extents_to_btree(tp, ip, curp, 0,
2924 				&tmp_logflags, whichfork);
2925 		*logflagsp |= tmp_logflags;
2926 		cur = *curp;
2927 		if (error)
2928 			goto done;
2929 	}
2930 
2931 	/* clear out the allocated field, done with it now in any case. */
2932 	if (cur)
2933 		cur->bc_bmap.allocated = 0;
2934 
2935 	xfs_bmap_check_leaf_extents(cur, ip, whichfork);
2936 done:
2937 	*logflagsp |= rval;
2938 	return error;
2939 }
2940 
2941 /*
2942  * Functions used in the extent read, allocate and remove paths
2943  */
2944 
2945 /*
2946  * Adjust the size of the new extent based on i_extsize and rt extsize.
2947  */
2948 int
2949 xfs_bmap_extsize_align(
2950 	xfs_mount_t	*mp,
2951 	xfs_bmbt_irec_t	*gotp,		/* next extent pointer */
2952 	xfs_bmbt_irec_t	*prevp,		/* previous extent pointer */
2953 	xfs_extlen_t	extsz,		/* align to this extent size */
2954 	int		rt,		/* is this a realtime inode? */
2955 	int		eof,		/* is extent at end-of-file? */
2956 	int		delay,		/* creating delalloc extent? */
2957 	int		convert,	/* overwriting unwritten extent? */
2958 	xfs_fileoff_t	*offp,		/* in/out: aligned offset */
2959 	xfs_extlen_t	*lenp)		/* in/out: aligned length */
2960 {
2961 	xfs_fileoff_t	orig_off;	/* original offset */
2962 	xfs_extlen_t	orig_alen;	/* original length */
2963 	xfs_fileoff_t	orig_end;	/* original off+len */
2964 	xfs_fileoff_t	nexto;		/* next file offset */
2965 	xfs_fileoff_t	prevo;		/* previous file offset */
2966 	xfs_fileoff_t	align_off;	/* temp for offset */
2967 	xfs_extlen_t	align_alen;	/* temp for length */
2968 	xfs_extlen_t	temp;		/* temp for calculations */
2969 
2970 	if (convert)
2971 		return 0;
2972 
2973 	orig_off = align_off = *offp;
2974 	orig_alen = align_alen = *lenp;
2975 	orig_end = orig_off + orig_alen;
2976 
2977 	/*
2978 	 * If this request overlaps an existing extent, then don't
2979 	 * attempt to perform any additional alignment.
2980 	 */
2981 	if (!delay && !eof &&
2982 	    (orig_off >= gotp->br_startoff) &&
2983 	    (orig_end <= gotp->br_startoff + gotp->br_blockcount)) {
2984 		return 0;
2985 	}
2986 
2987 	/*
2988 	 * If the file offset is unaligned vs. the extent size
2989 	 * we need to align it.  This will be possible unless
2990 	 * the file was previously written with a kernel that didn't
2991 	 * perform this alignment, or if a truncate shot us in the
2992 	 * foot.
2993 	 */
2994 	div_u64_rem(orig_off, extsz, &temp);
2995 	if (temp) {
2996 		align_alen += temp;
2997 		align_off -= temp;
2998 	}
2999 
3000 	/* Same adjustment for the end of the requested area. */
3001 	temp = (align_alen % extsz);
3002 	if (temp)
3003 		align_alen += extsz - temp;
3004 
3005 	/*
3006 	 * For large extent hint sizes, the aligned extent might be larger than
3007 	 * XFS_BMBT_MAX_EXTLEN. In that case, reduce the size by an extsz so
3008 	 * that it pulls the length back under XFS_BMBT_MAX_EXTLEN. The outer
3009 	 * allocation loops handle short allocation just fine, so it is safe to
3010 	 * do this. We only want to do it when we are forced to, though, because
3011 	 * it means more allocation operations are required.
3012 	 */
3013 	while (align_alen > XFS_MAX_BMBT_EXTLEN)
3014 		align_alen -= extsz;
3015 	ASSERT(align_alen <= XFS_MAX_BMBT_EXTLEN);
3016 
3017 	/*
3018 	 * If the previous block overlaps with this proposed allocation
3019 	 * then move the start forward without adjusting the length.
3020 	 */
3021 	if (prevp->br_startoff != NULLFILEOFF) {
3022 		if (prevp->br_startblock == HOLESTARTBLOCK)
3023 			prevo = prevp->br_startoff;
3024 		else
3025 			prevo = prevp->br_startoff + prevp->br_blockcount;
3026 	} else
3027 		prevo = 0;
3028 	if (align_off != orig_off && align_off < prevo)
3029 		align_off = prevo;
3030 	/*
3031 	 * If the next block overlaps with this proposed allocation
3032 	 * then move the start back without adjusting the length,
3033 	 * but not before offset 0.
3034 	 * This may of course make the start overlap previous block,
3035 	 * and if we hit the offset 0 limit then the next block
3036 	 * can still overlap too.
3037 	 */
3038 	if (!eof && gotp->br_startoff != NULLFILEOFF) {
3039 		if ((delay && gotp->br_startblock == HOLESTARTBLOCK) ||
3040 		    (!delay && gotp->br_startblock == DELAYSTARTBLOCK))
3041 			nexto = gotp->br_startoff + gotp->br_blockcount;
3042 		else
3043 			nexto = gotp->br_startoff;
3044 	} else
3045 		nexto = NULLFILEOFF;
3046 	if (!eof &&
3047 	    align_off + align_alen != orig_end &&
3048 	    align_off + align_alen > nexto)
3049 		align_off = nexto > align_alen ? nexto - align_alen : 0;
3050 	/*
3051 	 * If we're now overlapping the next or previous extent that
3052 	 * means we can't fit an extsz piece in this hole.  Just move
3053 	 * the start forward to the first valid spot and set
3054 	 * the length so we hit the end.
3055 	 */
3056 	if (align_off != orig_off && align_off < prevo)
3057 		align_off = prevo;
3058 	if (align_off + align_alen != orig_end &&
3059 	    align_off + align_alen > nexto &&
3060 	    nexto != NULLFILEOFF) {
3061 		ASSERT(nexto > prevo);
3062 		align_alen = nexto - align_off;
3063 	}
3064 
3065 	/*
3066 	 * If realtime, and the result isn't a multiple of the realtime
3067 	 * extent size we need to remove blocks until it is.
3068 	 */
3069 	if (rt && (temp = xfs_extlen_to_rtxmod(mp, align_alen))) {
3070 		/*
3071 		 * We're not covering the original request, or
3072 		 * we won't be able to once we fix the length.
3073 		 */
3074 		if (orig_off < align_off ||
3075 		    orig_end > align_off + align_alen ||
3076 		    align_alen - temp < orig_alen)
3077 			return -EINVAL;
3078 		/*
3079 		 * Try to fix it by moving the start up.
3080 		 */
3081 		if (align_off + temp <= orig_off) {
3082 			align_alen -= temp;
3083 			align_off += temp;
3084 		}
3085 		/*
3086 		 * Try to fix it by moving the end in.
3087 		 */
3088 		else if (align_off + align_alen - temp >= orig_end)
3089 			align_alen -= temp;
3090 		/*
3091 		 * Set the start to the minimum then trim the length.
3092 		 */
3093 		else {
3094 			align_alen -= orig_off - align_off;
3095 			align_off = orig_off;
3096 			align_alen -= xfs_extlen_to_rtxmod(mp, align_alen);
3097 		}
3098 		/*
3099 		 * Result doesn't cover the request, fail it.
3100 		 */
3101 		if (orig_off < align_off || orig_end > align_off + align_alen)
3102 			return -EINVAL;
3103 	} else {
3104 		ASSERT(orig_off >= align_off);
3105 		/* see XFS_BMBT_MAX_EXTLEN handling above */
3106 		ASSERT(orig_end <= align_off + align_alen ||
3107 		       align_alen + extsz > XFS_MAX_BMBT_EXTLEN);
3108 	}
3109 
3110 #ifdef DEBUG
3111 	if (!eof && gotp->br_startoff != NULLFILEOFF)
3112 		ASSERT(align_off + align_alen <= gotp->br_startoff);
3113 	if (prevp->br_startoff != NULLFILEOFF)
3114 		ASSERT(align_off >= prevp->br_startoff + prevp->br_blockcount);
3115 #endif
3116 
3117 	*lenp = align_alen;
3118 	*offp = align_off;
3119 	return 0;
3120 }
3121 
3122 #define XFS_ALLOC_GAP_UNITS	4
3123 
3124 /* returns true if ap->blkno was modified */
3125 bool
3126 xfs_bmap_adjacent(
3127 	struct xfs_bmalloca	*ap)	/* bmap alloc argument struct */
3128 {
3129 	xfs_fsblock_t	adjust;		/* adjustment to block numbers */
3130 	xfs_mount_t	*mp;		/* mount point structure */
3131 	int		rt;		/* true if inode is realtime */
3132 
3133 #define	ISVALID(x,y)	\
3134 	(rt ? \
3135 		(x) < mp->m_sb.sb_rblocks : \
3136 		XFS_FSB_TO_AGNO(mp, x) == XFS_FSB_TO_AGNO(mp, y) && \
3137 		XFS_FSB_TO_AGNO(mp, x) < mp->m_sb.sb_agcount && \
3138 		XFS_FSB_TO_AGBNO(mp, x) < mp->m_sb.sb_agblocks)
3139 
3140 	mp = ap->ip->i_mount;
3141 	rt = XFS_IS_REALTIME_INODE(ap->ip) &&
3142 		(ap->datatype & XFS_ALLOC_USERDATA);
3143 	/*
3144 	 * If allocating at eof, and there's a previous real block,
3145 	 * try to use its last block as our starting point.
3146 	 */
3147 	if (ap->eof && ap->prev.br_startoff != NULLFILEOFF &&
3148 	    !isnullstartblock(ap->prev.br_startblock) &&
3149 	    ISVALID(ap->prev.br_startblock + ap->prev.br_blockcount,
3150 		    ap->prev.br_startblock)) {
3151 		ap->blkno = ap->prev.br_startblock + ap->prev.br_blockcount;
3152 		/*
3153 		 * Adjust for the gap between prevp and us.
3154 		 */
3155 		adjust = ap->offset -
3156 			(ap->prev.br_startoff + ap->prev.br_blockcount);
3157 		if (adjust &&
3158 		    ISVALID(ap->blkno + adjust, ap->prev.br_startblock))
3159 			ap->blkno += adjust;
3160 		return true;
3161 	}
3162 	/*
3163 	 * If not at eof, then compare the two neighbor blocks.
3164 	 * Figure out whether either one gives us a good starting point,
3165 	 * and pick the better one.
3166 	 */
3167 	if (!ap->eof) {
3168 		xfs_fsblock_t	gotbno;		/* right side block number */
3169 		xfs_fsblock_t	gotdiff=0;	/* right side difference */
3170 		xfs_fsblock_t	prevbno;	/* left side block number */
3171 		xfs_fsblock_t	prevdiff=0;	/* left side difference */
3172 
3173 		/*
3174 		 * If there's a previous (left) block, select a requested
3175 		 * start block based on it.
3176 		 */
3177 		if (ap->prev.br_startoff != NULLFILEOFF &&
3178 		    !isnullstartblock(ap->prev.br_startblock) &&
3179 		    (prevbno = ap->prev.br_startblock +
3180 			       ap->prev.br_blockcount) &&
3181 		    ISVALID(prevbno, ap->prev.br_startblock)) {
3182 			/*
3183 			 * Calculate gap to end of previous block.
3184 			 */
3185 			adjust = prevdiff = ap->offset -
3186 				(ap->prev.br_startoff +
3187 				 ap->prev.br_blockcount);
3188 			/*
3189 			 * Figure the startblock based on the previous block's
3190 			 * end and the gap size.
3191 			 * Heuristic!
3192 			 * If the gap is large relative to the piece we're
3193 			 * allocating, or using it gives us an invalid block
3194 			 * number, then just use the end of the previous block.
3195 			 */
3196 			if (prevdiff <= XFS_ALLOC_GAP_UNITS * ap->length &&
3197 			    ISVALID(prevbno + prevdiff,
3198 				    ap->prev.br_startblock))
3199 				prevbno += adjust;
3200 			else
3201 				prevdiff += adjust;
3202 		}
3203 		/*
3204 		 * No previous block or can't follow it, just default.
3205 		 */
3206 		else
3207 			prevbno = NULLFSBLOCK;
3208 		/*
3209 		 * If there's a following (right) block, select a requested
3210 		 * start block based on it.
3211 		 */
3212 		if (!isnullstartblock(ap->got.br_startblock)) {
3213 			/*
3214 			 * Calculate gap to start of next block.
3215 			 */
3216 			adjust = gotdiff = ap->got.br_startoff - ap->offset;
3217 			/*
3218 			 * Figure the startblock based on the next block's
3219 			 * start and the gap size.
3220 			 */
3221 			gotbno = ap->got.br_startblock;
3222 			/*
3223 			 * Heuristic!
3224 			 * If the gap is large relative to the piece we're
3225 			 * allocating, or using it gives us an invalid block
3226 			 * number, then just use the start of the next block
3227 			 * offset by our length.
3228 			 */
3229 			if (gotdiff <= XFS_ALLOC_GAP_UNITS * ap->length &&
3230 			    ISVALID(gotbno - gotdiff, gotbno))
3231 				gotbno -= adjust;
3232 			else if (ISVALID(gotbno - ap->length, gotbno)) {
3233 				gotbno -= ap->length;
3234 				gotdiff += adjust - ap->length;
3235 			} else
3236 				gotdiff += adjust;
3237 		}
3238 		/*
3239 		 * No next block, just default.
3240 		 */
3241 		else
3242 			gotbno = NULLFSBLOCK;
3243 		/*
3244 		 * If both valid, pick the better one, else the only good
3245 		 * one, else ap->blkno is already set (to 0 or the inode block).
3246 		 */
3247 		if (prevbno != NULLFSBLOCK && gotbno != NULLFSBLOCK) {
3248 			ap->blkno = prevdiff <= gotdiff ? prevbno : gotbno;
3249 			return true;
3250 		}
3251 		if (prevbno != NULLFSBLOCK) {
3252 			ap->blkno = prevbno;
3253 			return true;
3254 		}
3255 		if (gotbno != NULLFSBLOCK) {
3256 			ap->blkno = gotbno;
3257 			return true;
3258 		}
3259 	}
3260 #undef ISVALID
3261 	return false;
3262 }
3263 
3264 int
3265 xfs_bmap_longest_free_extent(
3266 	struct xfs_perag	*pag,
3267 	struct xfs_trans	*tp,
3268 	xfs_extlen_t		*blen)
3269 {
3270 	xfs_extlen_t		longest;
3271 	int			error = 0;
3272 
3273 	if (!xfs_perag_initialised_agf(pag)) {
3274 		error = xfs_alloc_read_agf(pag, tp, XFS_ALLOC_FLAG_TRYLOCK,
3275 				NULL);
3276 		if (error)
3277 			return error;
3278 	}
3279 
3280 	longest = xfs_alloc_longest_free_extent(pag,
3281 				xfs_alloc_min_freelist(pag->pag_mount, pag),
3282 				xfs_ag_resv_needed(pag, XFS_AG_RESV_NONE));
3283 	if (*blen < longest)
3284 		*blen = longest;
3285 
3286 	return 0;
3287 }
3288 
3289 static xfs_extlen_t
3290 xfs_bmap_select_minlen(
3291 	struct xfs_bmalloca	*ap,
3292 	struct xfs_alloc_arg	*args,
3293 	xfs_extlen_t		blen)
3294 {
3295 
3296 	/*
3297 	 * Since we used XFS_ALLOC_FLAG_TRYLOCK in _longest_free_extent(), it is
3298 	 * possible that there is enough contiguous free space for this request.
3299 	 */
3300 	if (blen < ap->minlen)
3301 		return ap->minlen;
3302 
3303 	/*
3304 	 * If the best seen length is less than the request length,
3305 	 * use the best as the minimum, otherwise we've got the maxlen we
3306 	 * were asked for.
3307 	 */
3308 	if (blen < args->maxlen)
3309 		return blen;
3310 	return args->maxlen;
3311 }
3312 
3313 static int
3314 xfs_bmap_btalloc_select_lengths(
3315 	struct xfs_bmalloca	*ap,
3316 	struct xfs_alloc_arg	*args,
3317 	xfs_extlen_t		*blen)
3318 {
3319 	struct xfs_mount	*mp = args->mp;
3320 	struct xfs_perag	*pag;
3321 	xfs_agnumber_t		agno, startag;
3322 	int			error = 0;
3323 
3324 	if (ap->tp->t_flags & XFS_TRANS_LOWMODE) {
3325 		args->total = ap->minlen;
3326 		args->minlen = ap->minlen;
3327 		return 0;
3328 	}
3329 
3330 	args->total = ap->total;
3331 	startag = XFS_FSB_TO_AGNO(mp, ap->blkno);
3332 	if (startag == NULLAGNUMBER)
3333 		startag = 0;
3334 
3335 	*blen = 0;
3336 	for_each_perag_wrap(mp, startag, agno, pag) {
3337 		error = xfs_bmap_longest_free_extent(pag, args->tp, blen);
3338 		if (error && error != -EAGAIN)
3339 			break;
3340 		error = 0;
3341 		if (*blen >= args->maxlen)
3342 			break;
3343 	}
3344 	if (pag)
3345 		xfs_perag_rele(pag);
3346 
3347 	args->minlen = xfs_bmap_select_minlen(ap, args, *blen);
3348 	return error;
3349 }
3350 
3351 /* Update all inode and quota accounting for the allocation we just did. */
3352 void
3353 xfs_bmap_alloc_account(
3354 	struct xfs_bmalloca	*ap)
3355 {
3356 	bool			isrt = XFS_IS_REALTIME_INODE(ap->ip) &&
3357 					!(ap->flags & XFS_BMAPI_ATTRFORK);
3358 	uint			fld;
3359 
3360 	if (ap->flags & XFS_BMAPI_COWFORK) {
3361 		/*
3362 		 * COW fork blocks are in-core only and thus are treated as
3363 		 * in-core quota reservation (like delalloc blocks) even when
3364 		 * converted to real blocks. The quota reservation is not
3365 		 * accounted to disk until blocks are remapped to the data
3366 		 * fork. So if these blocks were previously delalloc, we
3367 		 * already have quota reservation and there's nothing to do
3368 		 * yet.
3369 		 */
3370 		if (ap->wasdel) {
3371 			xfs_mod_delalloc(ap->ip->i_mount, -(int64_t)ap->length);
3372 			return;
3373 		}
3374 
3375 		/*
3376 		 * Otherwise, we've allocated blocks in a hole. The transaction
3377 		 * has acquired in-core quota reservation for this extent.
3378 		 * Rather than account these as real blocks, however, we reduce
3379 		 * the transaction quota reservation based on the allocation.
3380 		 * This essentially transfers the transaction quota reservation
3381 		 * to that of a delalloc extent.
3382 		 */
3383 		ap->ip->i_delayed_blks += ap->length;
3384 		xfs_trans_mod_dquot_byino(ap->tp, ap->ip, isrt ?
3385 				XFS_TRANS_DQ_RES_RTBLKS : XFS_TRANS_DQ_RES_BLKS,
3386 				-(long)ap->length);
3387 		return;
3388 	}
3389 
3390 	/* data/attr fork only */
3391 	ap->ip->i_nblocks += ap->length;
3392 	xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
3393 	if (ap->wasdel) {
3394 		ap->ip->i_delayed_blks -= ap->length;
3395 		xfs_mod_delalloc(ap->ip->i_mount, -(int64_t)ap->length);
3396 		fld = isrt ? XFS_TRANS_DQ_DELRTBCOUNT : XFS_TRANS_DQ_DELBCOUNT;
3397 	} else {
3398 		fld = isrt ? XFS_TRANS_DQ_RTBCOUNT : XFS_TRANS_DQ_BCOUNT;
3399 	}
3400 
3401 	xfs_trans_mod_dquot_byino(ap->tp, ap->ip, fld, ap->length);
3402 }
3403 
3404 static int
3405 xfs_bmap_compute_alignments(
3406 	struct xfs_bmalloca	*ap,
3407 	struct xfs_alloc_arg	*args)
3408 {
3409 	struct xfs_mount	*mp = args->mp;
3410 	xfs_extlen_t		align = 0; /* minimum allocation alignment */
3411 	int			stripe_align = 0;
3412 
3413 	/* stripe alignment for allocation is determined by mount parameters */
3414 	if (mp->m_swidth && xfs_has_swalloc(mp))
3415 		stripe_align = mp->m_swidth;
3416 	else if (mp->m_dalign)
3417 		stripe_align = mp->m_dalign;
3418 
3419 	if (ap->flags & XFS_BMAPI_COWFORK)
3420 		align = xfs_get_cowextsz_hint(ap->ip);
3421 	else if (ap->datatype & XFS_ALLOC_USERDATA)
3422 		align = xfs_get_extsz_hint(ap->ip);
3423 	if (align) {
3424 		if (xfs_bmap_extsize_align(mp, &ap->got, &ap->prev, align, 0,
3425 					ap->eof, 0, ap->conv, &ap->offset,
3426 					&ap->length))
3427 			ASSERT(0);
3428 		ASSERT(ap->length);
3429 	}
3430 
3431 	/* apply extent size hints if obtained earlier */
3432 	if (align) {
3433 		args->prod = align;
3434 		div_u64_rem(ap->offset, args->prod, &args->mod);
3435 		if (args->mod)
3436 			args->mod = args->prod - args->mod;
3437 	} else if (mp->m_sb.sb_blocksize >= PAGE_SIZE) {
3438 		args->prod = 1;
3439 		args->mod = 0;
3440 	} else {
3441 		args->prod = PAGE_SIZE >> mp->m_sb.sb_blocklog;
3442 		div_u64_rem(ap->offset, args->prod, &args->mod);
3443 		if (args->mod)
3444 			args->mod = args->prod - args->mod;
3445 	}
3446 
3447 	return stripe_align;
3448 }
3449 
3450 static void
3451 xfs_bmap_process_allocated_extent(
3452 	struct xfs_bmalloca	*ap,
3453 	struct xfs_alloc_arg	*args,
3454 	xfs_fileoff_t		orig_offset,
3455 	xfs_extlen_t		orig_length)
3456 {
3457 	ap->blkno = args->fsbno;
3458 	ap->length = args->len;
3459 	/*
3460 	 * If the extent size hint is active, we tried to round the
3461 	 * caller's allocation request offset down to extsz and the
3462 	 * length up to another extsz boundary.  If we found a free
3463 	 * extent we mapped it in starting at this new offset.  If the
3464 	 * newly mapped space isn't long enough to cover any of the
3465 	 * range of offsets that was originally requested, move the
3466 	 * mapping up so that we can fill as much of the caller's
3467 	 * original request as possible.  Free space is apparently
3468 	 * very fragmented so we're unlikely to be able to satisfy the
3469 	 * hints anyway.
3470 	 */
3471 	if (ap->length <= orig_length)
3472 		ap->offset = orig_offset;
3473 	else if (ap->offset + ap->length < orig_offset + orig_length)
3474 		ap->offset = orig_offset + orig_length - ap->length;
3475 	xfs_bmap_alloc_account(ap);
3476 }
3477 
3478 #ifdef DEBUG
3479 static int
3480 xfs_bmap_exact_minlen_extent_alloc(
3481 	struct xfs_bmalloca	*ap)
3482 {
3483 	struct xfs_mount	*mp = ap->ip->i_mount;
3484 	struct xfs_alloc_arg	args = { .tp = ap->tp, .mp = mp };
3485 	xfs_fileoff_t		orig_offset;
3486 	xfs_extlen_t		orig_length;
3487 	int			error;
3488 
3489 	ASSERT(ap->length);
3490 
3491 	if (ap->minlen != 1) {
3492 		ap->blkno = NULLFSBLOCK;
3493 		ap->length = 0;
3494 		return 0;
3495 	}
3496 
3497 	orig_offset = ap->offset;
3498 	orig_length = ap->length;
3499 
3500 	args.alloc_minlen_only = 1;
3501 
3502 	xfs_bmap_compute_alignments(ap, &args);
3503 
3504 	/*
3505 	 * Unlike the longest extent available in an AG, we don't track
3506 	 * the length of an AG's shortest extent.
3507 	 * XFS_ERRTAG_BMAP_ALLOC_MINLEN_EXTENT is a debug only knob and
3508 	 * hence we can afford to start traversing from the 0th AG since
3509 	 * we need not be concerned about a drop in performance in
3510 	 * "debug only" code paths.
3511 	 */
3512 	ap->blkno = XFS_AGB_TO_FSB(mp, 0, 0);
3513 
3514 	args.oinfo = XFS_RMAP_OINFO_SKIP_UPDATE;
3515 	args.minlen = args.maxlen = ap->minlen;
3516 	args.total = ap->total;
3517 
3518 	args.alignment = 1;
3519 	args.minalignslop = 0;
3520 
3521 	args.minleft = ap->minleft;
3522 	args.wasdel = ap->wasdel;
3523 	args.resv = XFS_AG_RESV_NONE;
3524 	args.datatype = ap->datatype;
3525 
3526 	error = xfs_alloc_vextent_first_ag(&args, ap->blkno);
3527 	if (error)
3528 		return error;
3529 
3530 	if (args.fsbno != NULLFSBLOCK) {
3531 		xfs_bmap_process_allocated_extent(ap, &args, orig_offset,
3532 			orig_length);
3533 	} else {
3534 		ap->blkno = NULLFSBLOCK;
3535 		ap->length = 0;
3536 	}
3537 
3538 	return 0;
3539 }
3540 #else
3541 
3542 #define xfs_bmap_exact_minlen_extent_alloc(bma) (-EFSCORRUPTED)
3543 
3544 #endif
3545 
3546 /*
3547  * If we are not low on available data blocks and we are allocating at
3548  * EOF, optimise allocation for contiguous file extension and/or stripe
3549  * alignment of the new extent.
3550  *
3551  * NOTE: ap->aeof is only set if the allocation length is >= the
3552  * stripe unit and the allocation offset is at the end of file.
3553  */
3554 static int
3555 xfs_bmap_btalloc_at_eof(
3556 	struct xfs_bmalloca	*ap,
3557 	struct xfs_alloc_arg	*args,
3558 	xfs_extlen_t		blen,
3559 	int			stripe_align,
3560 	bool			ag_only)
3561 {
3562 	struct xfs_mount	*mp = args->mp;
3563 	struct xfs_perag	*caller_pag = args->pag;
3564 	int			error;
3565 
3566 	/*
3567 	 * If there are already extents in the file, try an exact EOF block
3568 	 * allocation to extend the file as a contiguous extent. If that fails,
3569 	 * or it's the first allocation in a file, just try for a stripe aligned
3570 	 * allocation.
3571 	 */
3572 	if (ap->offset) {
3573 		xfs_extlen_t	nextminlen = 0;
3574 
3575 		/*
3576 		 * Compute the minlen+alignment for the next case.  Set slop so
3577 		 * that the value of minlen+alignment+slop doesn't go up between
3578 		 * the calls.
3579 		 */
3580 		args->alignment = 1;
3581 		if (blen > stripe_align && blen <= args->maxlen)
3582 			nextminlen = blen - stripe_align;
3583 		else
3584 			nextminlen = args->minlen;
3585 		if (nextminlen + stripe_align > args->minlen + 1)
3586 			args->minalignslop = nextminlen + stripe_align -
3587 					args->minlen - 1;
3588 		else
3589 			args->minalignslop = 0;
3590 
3591 		if (!caller_pag)
3592 			args->pag = xfs_perag_get(mp, XFS_FSB_TO_AGNO(mp, ap->blkno));
3593 		error = xfs_alloc_vextent_exact_bno(args, ap->blkno);
3594 		if (!caller_pag) {
3595 			xfs_perag_put(args->pag);
3596 			args->pag = NULL;
3597 		}
3598 		if (error)
3599 			return error;
3600 
3601 		if (args->fsbno != NULLFSBLOCK)
3602 			return 0;
3603 		/*
3604 		 * Exact allocation failed. Reset to try an aligned allocation
3605 		 * according to the original allocation specification.
3606 		 */
3607 		args->alignment = stripe_align;
3608 		args->minlen = nextminlen;
3609 		args->minalignslop = 0;
3610 	} else {
3611 		/*
3612 		 * Adjust minlen to try and preserve alignment if we
3613 		 * can't guarantee an aligned maxlen extent.
3614 		 */
3615 		args->alignment = stripe_align;
3616 		if (blen > args->alignment &&
3617 		    blen <= args->maxlen + args->alignment)
3618 			args->minlen = blen - args->alignment;
3619 		args->minalignslop = 0;
3620 	}
3621 
3622 	if (ag_only) {
3623 		error = xfs_alloc_vextent_near_bno(args, ap->blkno);
3624 	} else {
3625 		args->pag = NULL;
3626 		error = xfs_alloc_vextent_start_ag(args, ap->blkno);
3627 		ASSERT(args->pag == NULL);
3628 		args->pag = caller_pag;
3629 	}
3630 	if (error)
3631 		return error;
3632 
3633 	if (args->fsbno != NULLFSBLOCK)
3634 		return 0;
3635 
3636 	/*
3637 	 * Allocation failed, so turn return the allocation args to their
3638 	 * original non-aligned state so the caller can proceed on allocation
3639 	 * failure as if this function was never called.
3640 	 */
3641 	args->alignment = 1;
3642 	return 0;
3643 }
3644 
3645 /*
3646  * We have failed multiple allocation attempts so now are in a low space
3647  * allocation situation. Try a locality first full filesystem minimum length
3648  * allocation whilst still maintaining necessary total block reservation
3649  * requirements.
3650  *
3651  * If that fails, we are now critically low on space, so perform a last resort
3652  * allocation attempt: no reserve, no locality, blocking, minimum length, full
3653  * filesystem free space scan. We also indicate to future allocations in this
3654  * transaction that we are critically low on space so they don't waste time on
3655  * allocation modes that are unlikely to succeed.
3656  */
3657 int
3658 xfs_bmap_btalloc_low_space(
3659 	struct xfs_bmalloca	*ap,
3660 	struct xfs_alloc_arg	*args)
3661 {
3662 	int			error;
3663 
3664 	if (args->minlen > ap->minlen) {
3665 		args->minlen = ap->minlen;
3666 		error = xfs_alloc_vextent_start_ag(args, ap->blkno);
3667 		if (error || args->fsbno != NULLFSBLOCK)
3668 			return error;
3669 	}
3670 
3671 	/* Last ditch attempt before failure is declared. */
3672 	args->total = ap->minlen;
3673 	error = xfs_alloc_vextent_first_ag(args, 0);
3674 	if (error)
3675 		return error;
3676 	ap->tp->t_flags |= XFS_TRANS_LOWMODE;
3677 	return 0;
3678 }
3679 
3680 static int
3681 xfs_bmap_btalloc_filestreams(
3682 	struct xfs_bmalloca	*ap,
3683 	struct xfs_alloc_arg	*args,
3684 	int			stripe_align)
3685 {
3686 	xfs_extlen_t		blen = 0;
3687 	int			error = 0;
3688 
3689 
3690 	error = xfs_filestream_select_ag(ap, args, &blen);
3691 	if (error)
3692 		return error;
3693 	ASSERT(args->pag);
3694 
3695 	/*
3696 	 * If we are in low space mode, then optimal allocation will fail so
3697 	 * prepare for minimal allocation and jump to the low space algorithm
3698 	 * immediately.
3699 	 */
3700 	if (ap->tp->t_flags & XFS_TRANS_LOWMODE) {
3701 		args->minlen = ap->minlen;
3702 		ASSERT(args->fsbno == NULLFSBLOCK);
3703 		goto out_low_space;
3704 	}
3705 
3706 	args->minlen = xfs_bmap_select_minlen(ap, args, blen);
3707 	if (ap->aeof)
3708 		error = xfs_bmap_btalloc_at_eof(ap, args, blen, stripe_align,
3709 				true);
3710 
3711 	if (!error && args->fsbno == NULLFSBLOCK)
3712 		error = xfs_alloc_vextent_near_bno(args, ap->blkno);
3713 
3714 out_low_space:
3715 	/*
3716 	 * We are now done with the perag reference for the filestreams
3717 	 * association provided by xfs_filestream_select_ag(). Release it now as
3718 	 * we've either succeeded, had a fatal error or we are out of space and
3719 	 * need to do a full filesystem scan for free space which will take it's
3720 	 * own references.
3721 	 */
3722 	xfs_perag_rele(args->pag);
3723 	args->pag = NULL;
3724 	if (error || args->fsbno != NULLFSBLOCK)
3725 		return error;
3726 
3727 	return xfs_bmap_btalloc_low_space(ap, args);
3728 }
3729 
3730 static int
3731 xfs_bmap_btalloc_best_length(
3732 	struct xfs_bmalloca	*ap,
3733 	struct xfs_alloc_arg	*args,
3734 	int			stripe_align)
3735 {
3736 	xfs_extlen_t		blen = 0;
3737 	int			error;
3738 
3739 	ap->blkno = XFS_INO_TO_FSB(args->mp, ap->ip->i_ino);
3740 	xfs_bmap_adjacent(ap);
3741 
3742 	/*
3743 	 * Search for an allocation group with a single extent large enough for
3744 	 * the request.  If one isn't found, then adjust the minimum allocation
3745 	 * size to the largest space found.
3746 	 */
3747 	error = xfs_bmap_btalloc_select_lengths(ap, args, &blen);
3748 	if (error)
3749 		return error;
3750 
3751 	/*
3752 	 * Don't attempt optimal EOF allocation if previous allocations barely
3753 	 * succeeded due to being near ENOSPC. It is highly unlikely we'll get
3754 	 * optimal or even aligned allocations in this case, so don't waste time
3755 	 * trying.
3756 	 */
3757 	if (ap->aeof && !(ap->tp->t_flags & XFS_TRANS_LOWMODE)) {
3758 		error = xfs_bmap_btalloc_at_eof(ap, args, blen, stripe_align,
3759 				false);
3760 		if (error || args->fsbno != NULLFSBLOCK)
3761 			return error;
3762 	}
3763 
3764 	error = xfs_alloc_vextent_start_ag(args, ap->blkno);
3765 	if (error || args->fsbno != NULLFSBLOCK)
3766 		return error;
3767 
3768 	return xfs_bmap_btalloc_low_space(ap, args);
3769 }
3770 
3771 static int
3772 xfs_bmap_btalloc(
3773 	struct xfs_bmalloca	*ap)
3774 {
3775 	struct xfs_mount	*mp = ap->ip->i_mount;
3776 	struct xfs_alloc_arg	args = {
3777 		.tp		= ap->tp,
3778 		.mp		= mp,
3779 		.fsbno		= NULLFSBLOCK,
3780 		.oinfo		= XFS_RMAP_OINFO_SKIP_UPDATE,
3781 		.minleft	= ap->minleft,
3782 		.wasdel		= ap->wasdel,
3783 		.resv		= XFS_AG_RESV_NONE,
3784 		.datatype	= ap->datatype,
3785 		.alignment	= 1,
3786 		.minalignslop	= 0,
3787 	};
3788 	xfs_fileoff_t		orig_offset;
3789 	xfs_extlen_t		orig_length;
3790 	int			error;
3791 	int			stripe_align;
3792 
3793 	ASSERT(ap->length);
3794 	orig_offset = ap->offset;
3795 	orig_length = ap->length;
3796 
3797 	stripe_align = xfs_bmap_compute_alignments(ap, &args);
3798 
3799 	/* Trim the allocation back to the maximum an AG can fit. */
3800 	args.maxlen = min(ap->length, mp->m_ag_max_usable);
3801 
3802 	if ((ap->datatype & XFS_ALLOC_USERDATA) &&
3803 	    xfs_inode_is_filestream(ap->ip))
3804 		error = xfs_bmap_btalloc_filestreams(ap, &args, stripe_align);
3805 	else
3806 		error = xfs_bmap_btalloc_best_length(ap, &args, stripe_align);
3807 	if (error)
3808 		return error;
3809 
3810 	if (args.fsbno != NULLFSBLOCK) {
3811 		xfs_bmap_process_allocated_extent(ap, &args, orig_offset,
3812 			orig_length);
3813 	} else {
3814 		ap->blkno = NULLFSBLOCK;
3815 		ap->length = 0;
3816 	}
3817 	return 0;
3818 }
3819 
3820 /* Trim extent to fit a logical block range. */
3821 void
3822 xfs_trim_extent(
3823 	struct xfs_bmbt_irec	*irec,
3824 	xfs_fileoff_t		bno,
3825 	xfs_filblks_t		len)
3826 {
3827 	xfs_fileoff_t		distance;
3828 	xfs_fileoff_t		end = bno + len;
3829 
3830 	if (irec->br_startoff + irec->br_blockcount <= bno ||
3831 	    irec->br_startoff >= end) {
3832 		irec->br_blockcount = 0;
3833 		return;
3834 	}
3835 
3836 	if (irec->br_startoff < bno) {
3837 		distance = bno - irec->br_startoff;
3838 		if (isnullstartblock(irec->br_startblock))
3839 			irec->br_startblock = DELAYSTARTBLOCK;
3840 		if (irec->br_startblock != DELAYSTARTBLOCK &&
3841 		    irec->br_startblock != HOLESTARTBLOCK)
3842 			irec->br_startblock += distance;
3843 		irec->br_startoff += distance;
3844 		irec->br_blockcount -= distance;
3845 	}
3846 
3847 	if (end < irec->br_startoff + irec->br_blockcount) {
3848 		distance = irec->br_startoff + irec->br_blockcount - end;
3849 		irec->br_blockcount -= distance;
3850 	}
3851 }
3852 
3853 /*
3854  * Trim the returned map to the required bounds
3855  */
3856 STATIC void
3857 xfs_bmapi_trim_map(
3858 	struct xfs_bmbt_irec	*mval,
3859 	struct xfs_bmbt_irec	*got,
3860 	xfs_fileoff_t		*bno,
3861 	xfs_filblks_t		len,
3862 	xfs_fileoff_t		obno,
3863 	xfs_fileoff_t		end,
3864 	int			n,
3865 	uint32_t		flags)
3866 {
3867 	if ((flags & XFS_BMAPI_ENTIRE) ||
3868 	    got->br_startoff + got->br_blockcount <= obno) {
3869 		*mval = *got;
3870 		if (isnullstartblock(got->br_startblock))
3871 			mval->br_startblock = DELAYSTARTBLOCK;
3872 		return;
3873 	}
3874 
3875 	if (obno > *bno)
3876 		*bno = obno;
3877 	ASSERT((*bno >= obno) || (n == 0));
3878 	ASSERT(*bno < end);
3879 	mval->br_startoff = *bno;
3880 	if (isnullstartblock(got->br_startblock))
3881 		mval->br_startblock = DELAYSTARTBLOCK;
3882 	else
3883 		mval->br_startblock = got->br_startblock +
3884 					(*bno - got->br_startoff);
3885 	/*
3886 	 * Return the minimum of what we got and what we asked for for
3887 	 * the length.  We can use the len variable here because it is
3888 	 * modified below and we could have been there before coming
3889 	 * here if the first part of the allocation didn't overlap what
3890 	 * was asked for.
3891 	 */
3892 	mval->br_blockcount = XFS_FILBLKS_MIN(end - *bno,
3893 			got->br_blockcount - (*bno - got->br_startoff));
3894 	mval->br_state = got->br_state;
3895 	ASSERT(mval->br_blockcount <= len);
3896 	return;
3897 }
3898 
3899 /*
3900  * Update and validate the extent map to return
3901  */
3902 STATIC void
3903 xfs_bmapi_update_map(
3904 	struct xfs_bmbt_irec	**map,
3905 	xfs_fileoff_t		*bno,
3906 	xfs_filblks_t		*len,
3907 	xfs_fileoff_t		obno,
3908 	xfs_fileoff_t		end,
3909 	int			*n,
3910 	uint32_t		flags)
3911 {
3912 	xfs_bmbt_irec_t	*mval = *map;
3913 
3914 	ASSERT((flags & XFS_BMAPI_ENTIRE) ||
3915 	       ((mval->br_startoff + mval->br_blockcount) <= end));
3916 	ASSERT((flags & XFS_BMAPI_ENTIRE) || (mval->br_blockcount <= *len) ||
3917 	       (mval->br_startoff < obno));
3918 
3919 	*bno = mval->br_startoff + mval->br_blockcount;
3920 	*len = end - *bno;
3921 	if (*n > 0 && mval->br_startoff == mval[-1].br_startoff) {
3922 		/* update previous map with new information */
3923 		ASSERT(mval->br_startblock == mval[-1].br_startblock);
3924 		ASSERT(mval->br_blockcount > mval[-1].br_blockcount);
3925 		ASSERT(mval->br_state == mval[-1].br_state);
3926 		mval[-1].br_blockcount = mval->br_blockcount;
3927 		mval[-1].br_state = mval->br_state;
3928 	} else if (*n > 0 && mval->br_startblock != DELAYSTARTBLOCK &&
3929 		   mval[-1].br_startblock != DELAYSTARTBLOCK &&
3930 		   mval[-1].br_startblock != HOLESTARTBLOCK &&
3931 		   mval->br_startblock == mval[-1].br_startblock +
3932 					  mval[-1].br_blockcount &&
3933 		   mval[-1].br_state == mval->br_state) {
3934 		ASSERT(mval->br_startoff ==
3935 		       mval[-1].br_startoff + mval[-1].br_blockcount);
3936 		mval[-1].br_blockcount += mval->br_blockcount;
3937 	} else if (*n > 0 &&
3938 		   mval->br_startblock == DELAYSTARTBLOCK &&
3939 		   mval[-1].br_startblock == DELAYSTARTBLOCK &&
3940 		   mval->br_startoff ==
3941 		   mval[-1].br_startoff + mval[-1].br_blockcount) {
3942 		mval[-1].br_blockcount += mval->br_blockcount;
3943 		mval[-1].br_state = mval->br_state;
3944 	} else if (!((*n == 0) &&
3945 		     ((mval->br_startoff + mval->br_blockcount) <=
3946 		      obno))) {
3947 		mval++;
3948 		(*n)++;
3949 	}
3950 	*map = mval;
3951 }
3952 
3953 /*
3954  * Map file blocks to filesystem blocks without allocation.
3955  */
3956 int
3957 xfs_bmapi_read(
3958 	struct xfs_inode	*ip,
3959 	xfs_fileoff_t		bno,
3960 	xfs_filblks_t		len,
3961 	struct xfs_bmbt_irec	*mval,
3962 	int			*nmap,
3963 	uint32_t		flags)
3964 {
3965 	struct xfs_mount	*mp = ip->i_mount;
3966 	int			whichfork = xfs_bmapi_whichfork(flags);
3967 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
3968 	struct xfs_bmbt_irec	got;
3969 	xfs_fileoff_t		obno;
3970 	xfs_fileoff_t		end;
3971 	struct xfs_iext_cursor	icur;
3972 	int			error;
3973 	bool			eof = false;
3974 	int			n = 0;
3975 
3976 	ASSERT(*nmap >= 1);
3977 	ASSERT(!(flags & ~(XFS_BMAPI_ATTRFORK | XFS_BMAPI_ENTIRE)));
3978 	xfs_assert_ilocked(ip, XFS_ILOCK_SHARED | XFS_ILOCK_EXCL);
3979 
3980 	if (WARN_ON_ONCE(!ifp)) {
3981 		xfs_bmap_mark_sick(ip, whichfork);
3982 		return -EFSCORRUPTED;
3983 	}
3984 
3985 	if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)) ||
3986 	    XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) {
3987 		xfs_bmap_mark_sick(ip, whichfork);
3988 		return -EFSCORRUPTED;
3989 	}
3990 
3991 	if (xfs_is_shutdown(mp))
3992 		return -EIO;
3993 
3994 	XFS_STATS_INC(mp, xs_blk_mapr);
3995 
3996 	error = xfs_iread_extents(NULL, ip, whichfork);
3997 	if (error)
3998 		return error;
3999 
4000 	if (!xfs_iext_lookup_extent(ip, ifp, bno, &icur, &got))
4001 		eof = true;
4002 	end = bno + len;
4003 	obno = bno;
4004 
4005 	while (bno < end && n < *nmap) {
4006 		/* Reading past eof, act as though there's a hole up to end. */
4007 		if (eof)
4008 			got.br_startoff = end;
4009 		if (got.br_startoff > bno) {
4010 			/* Reading in a hole.  */
4011 			mval->br_startoff = bno;
4012 			mval->br_startblock = HOLESTARTBLOCK;
4013 			mval->br_blockcount =
4014 				XFS_FILBLKS_MIN(len, got.br_startoff - bno);
4015 			mval->br_state = XFS_EXT_NORM;
4016 			bno += mval->br_blockcount;
4017 			len -= mval->br_blockcount;
4018 			mval++;
4019 			n++;
4020 			continue;
4021 		}
4022 
4023 		/* set up the extent map to return. */
4024 		xfs_bmapi_trim_map(mval, &got, &bno, len, obno, end, n, flags);
4025 		xfs_bmapi_update_map(&mval, &bno, &len, obno, end, &n, flags);
4026 
4027 		/* If we're done, stop now. */
4028 		if (bno >= end || n >= *nmap)
4029 			break;
4030 
4031 		/* Else go on to the next record. */
4032 		if (!xfs_iext_next_extent(ifp, &icur, &got))
4033 			eof = true;
4034 	}
4035 	*nmap = n;
4036 	return 0;
4037 }
4038 
4039 /*
4040  * Add a delayed allocation extent to an inode. Blocks are reserved from the
4041  * global pool and the extent inserted into the inode in-core extent tree.
4042  *
4043  * On entry, got refers to the first extent beyond the offset of the extent to
4044  * allocate or eof is specified if no such extent exists. On return, got refers
4045  * to the extent record that was inserted to the inode fork.
4046  *
4047  * Note that the allocated extent may have been merged with contiguous extents
4048  * during insertion into the inode fork. Thus, got does not reflect the current
4049  * state of the inode fork on return. If necessary, the caller can use lastx to
4050  * look up the updated record in the inode fork.
4051  */
4052 int
4053 xfs_bmapi_reserve_delalloc(
4054 	struct xfs_inode	*ip,
4055 	int			whichfork,
4056 	xfs_fileoff_t		off,
4057 	xfs_filblks_t		len,
4058 	xfs_filblks_t		prealloc,
4059 	struct xfs_bmbt_irec	*got,
4060 	struct xfs_iext_cursor	*icur,
4061 	int			eof)
4062 {
4063 	struct xfs_mount	*mp = ip->i_mount;
4064 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
4065 	xfs_extlen_t		alen;
4066 	xfs_extlen_t		indlen;
4067 	int			error;
4068 	xfs_fileoff_t		aoff = off;
4069 
4070 	/*
4071 	 * Cap the alloc length. Keep track of prealloc so we know whether to
4072 	 * tag the inode before we return.
4073 	 */
4074 	alen = XFS_FILBLKS_MIN(len + prealloc, XFS_MAX_BMBT_EXTLEN);
4075 	if (!eof)
4076 		alen = XFS_FILBLKS_MIN(alen, got->br_startoff - aoff);
4077 	if (prealloc && alen >= len)
4078 		prealloc = alen - len;
4079 
4080 	/* Figure out the extent size, adjust alen */
4081 	if (whichfork == XFS_COW_FORK) {
4082 		struct xfs_bmbt_irec	prev;
4083 		xfs_extlen_t		extsz = xfs_get_cowextsz_hint(ip);
4084 
4085 		if (!xfs_iext_peek_prev_extent(ifp, icur, &prev))
4086 			prev.br_startoff = NULLFILEOFF;
4087 
4088 		error = xfs_bmap_extsize_align(mp, got, &prev, extsz, 0, eof,
4089 					       1, 0, &aoff, &alen);
4090 		ASSERT(!error);
4091 	}
4092 
4093 	/*
4094 	 * Make a transaction-less quota reservation for delayed allocation
4095 	 * blocks.  This number gets adjusted later.  We return if we haven't
4096 	 * allocated blocks already inside this loop.
4097 	 */
4098 	error = xfs_quota_reserve_blkres(ip, alen);
4099 	if (error)
4100 		return error;
4101 
4102 	/*
4103 	 * Split changing sb for alen and indlen since they could be coming
4104 	 * from different places.
4105 	 */
4106 	indlen = (xfs_extlen_t)xfs_bmap_worst_indlen(ip, alen);
4107 	ASSERT(indlen > 0);
4108 
4109 	error = xfs_mod_fdblocks(mp, -((int64_t)alen), false);
4110 	if (error)
4111 		goto out_unreserve_quota;
4112 
4113 	error = xfs_mod_fdblocks(mp, -((int64_t)indlen), false);
4114 	if (error)
4115 		goto out_unreserve_blocks;
4116 
4117 
4118 	ip->i_delayed_blks += alen;
4119 	xfs_mod_delalloc(ip->i_mount, alen + indlen);
4120 
4121 	got->br_startoff = aoff;
4122 	got->br_startblock = nullstartblock(indlen);
4123 	got->br_blockcount = alen;
4124 	got->br_state = XFS_EXT_NORM;
4125 
4126 	xfs_bmap_add_extent_hole_delay(ip, whichfork, icur, got);
4127 
4128 	/*
4129 	 * Tag the inode if blocks were preallocated. Note that COW fork
4130 	 * preallocation can occur at the start or end of the extent, even when
4131 	 * prealloc == 0, so we must also check the aligned offset and length.
4132 	 */
4133 	if (whichfork == XFS_DATA_FORK && prealloc)
4134 		xfs_inode_set_eofblocks_tag(ip);
4135 	if (whichfork == XFS_COW_FORK && (prealloc || aoff < off || alen > len))
4136 		xfs_inode_set_cowblocks_tag(ip);
4137 
4138 	return 0;
4139 
4140 out_unreserve_blocks:
4141 	xfs_mod_fdblocks(mp, alen, false);
4142 out_unreserve_quota:
4143 	if (XFS_IS_QUOTA_ON(mp))
4144 		xfs_quota_unreserve_blkres(ip, alen);
4145 	return error;
4146 }
4147 
4148 static int
4149 xfs_bmap_alloc_userdata(
4150 	struct xfs_bmalloca	*bma)
4151 {
4152 	struct xfs_mount	*mp = bma->ip->i_mount;
4153 	int			whichfork = xfs_bmapi_whichfork(bma->flags);
4154 	int			error;
4155 
4156 	/*
4157 	 * Set the data type being allocated. For the data fork, the first data
4158 	 * in the file is treated differently to all other allocations. For the
4159 	 * attribute fork, we only need to ensure the allocated range is not on
4160 	 * the busy list.
4161 	 */
4162 	bma->datatype = XFS_ALLOC_NOBUSY;
4163 	if (whichfork == XFS_DATA_FORK || whichfork == XFS_COW_FORK) {
4164 		bma->datatype |= XFS_ALLOC_USERDATA;
4165 		if (bma->offset == 0)
4166 			bma->datatype |= XFS_ALLOC_INITIAL_USER_DATA;
4167 
4168 		if (mp->m_dalign && bma->length >= mp->m_dalign) {
4169 			error = xfs_bmap_isaeof(bma, whichfork);
4170 			if (error)
4171 				return error;
4172 		}
4173 
4174 		if (XFS_IS_REALTIME_INODE(bma->ip))
4175 			return xfs_bmap_rtalloc(bma);
4176 	}
4177 
4178 	if (unlikely(XFS_TEST_ERROR(false, mp,
4179 			XFS_ERRTAG_BMAP_ALLOC_MINLEN_EXTENT)))
4180 		return xfs_bmap_exact_minlen_extent_alloc(bma);
4181 
4182 	return xfs_bmap_btalloc(bma);
4183 }
4184 
4185 static int
4186 xfs_bmapi_allocate(
4187 	struct xfs_bmalloca	*bma)
4188 {
4189 	struct xfs_mount	*mp = bma->ip->i_mount;
4190 	int			whichfork = xfs_bmapi_whichfork(bma->flags);
4191 	struct xfs_ifork	*ifp = xfs_ifork_ptr(bma->ip, whichfork);
4192 	int			tmp_logflags = 0;
4193 	int			error;
4194 
4195 	ASSERT(bma->length > 0);
4196 
4197 	/*
4198 	 * For the wasdelay case, we could also just allocate the stuff asked
4199 	 * for in this bmap call but that wouldn't be as good.
4200 	 */
4201 	if (bma->wasdel) {
4202 		bma->length = (xfs_extlen_t)bma->got.br_blockcount;
4203 		bma->offset = bma->got.br_startoff;
4204 		if (!xfs_iext_peek_prev_extent(ifp, &bma->icur, &bma->prev))
4205 			bma->prev.br_startoff = NULLFILEOFF;
4206 	} else {
4207 		bma->length = XFS_FILBLKS_MIN(bma->length, XFS_MAX_BMBT_EXTLEN);
4208 		if (!bma->eof)
4209 			bma->length = XFS_FILBLKS_MIN(bma->length,
4210 					bma->got.br_startoff - bma->offset);
4211 	}
4212 
4213 	if (bma->flags & XFS_BMAPI_CONTIG)
4214 		bma->minlen = bma->length;
4215 	else
4216 		bma->minlen = 1;
4217 
4218 	if (bma->flags & XFS_BMAPI_METADATA) {
4219 		if (unlikely(XFS_TEST_ERROR(false, mp,
4220 				XFS_ERRTAG_BMAP_ALLOC_MINLEN_EXTENT)))
4221 			error = xfs_bmap_exact_minlen_extent_alloc(bma);
4222 		else
4223 			error = xfs_bmap_btalloc(bma);
4224 	} else {
4225 		error = xfs_bmap_alloc_userdata(bma);
4226 	}
4227 	if (error || bma->blkno == NULLFSBLOCK)
4228 		return error;
4229 
4230 	if (bma->flags & XFS_BMAPI_ZERO) {
4231 		error = xfs_zero_extent(bma->ip, bma->blkno, bma->length);
4232 		if (error)
4233 			return error;
4234 	}
4235 
4236 	if (ifp->if_format == XFS_DINODE_FMT_BTREE && !bma->cur)
4237 		bma->cur = xfs_bmbt_init_cursor(mp, bma->tp, bma->ip, whichfork);
4238 	/*
4239 	 * Bump the number of extents we've allocated
4240 	 * in this call.
4241 	 */
4242 	bma->nallocs++;
4243 
4244 	if (bma->cur && bma->wasdel)
4245 		bma->cur->bc_flags |= XFS_BTREE_BMBT_WASDEL;
4246 
4247 	bma->got.br_startoff = bma->offset;
4248 	bma->got.br_startblock = bma->blkno;
4249 	bma->got.br_blockcount = bma->length;
4250 	bma->got.br_state = XFS_EXT_NORM;
4251 
4252 	if (bma->flags & XFS_BMAPI_PREALLOC)
4253 		bma->got.br_state = XFS_EXT_UNWRITTEN;
4254 
4255 	if (bma->wasdel)
4256 		error = xfs_bmap_add_extent_delay_real(bma, whichfork);
4257 	else
4258 		error = xfs_bmap_add_extent_hole_real(bma->tp, bma->ip,
4259 				whichfork, &bma->icur, &bma->cur, &bma->got,
4260 				&bma->logflags, bma->flags);
4261 
4262 	bma->logflags |= tmp_logflags;
4263 	if (error)
4264 		return error;
4265 
4266 	/*
4267 	 * Update our extent pointer, given that xfs_bmap_add_extent_delay_real
4268 	 * or xfs_bmap_add_extent_hole_real might have merged it into one of
4269 	 * the neighbouring ones.
4270 	 */
4271 	xfs_iext_get_extent(ifp, &bma->icur, &bma->got);
4272 
4273 	ASSERT(bma->got.br_startoff <= bma->offset);
4274 	ASSERT(bma->got.br_startoff + bma->got.br_blockcount >=
4275 	       bma->offset + bma->length);
4276 	ASSERT(bma->got.br_state == XFS_EXT_NORM ||
4277 	       bma->got.br_state == XFS_EXT_UNWRITTEN);
4278 	return 0;
4279 }
4280 
4281 STATIC int
4282 xfs_bmapi_convert_unwritten(
4283 	struct xfs_bmalloca	*bma,
4284 	struct xfs_bmbt_irec	*mval,
4285 	xfs_filblks_t		len,
4286 	uint32_t		flags)
4287 {
4288 	int			whichfork = xfs_bmapi_whichfork(flags);
4289 	struct xfs_ifork	*ifp = xfs_ifork_ptr(bma->ip, whichfork);
4290 	int			tmp_logflags = 0;
4291 	int			error;
4292 
4293 	/* check if we need to do unwritten->real conversion */
4294 	if (mval->br_state == XFS_EXT_UNWRITTEN &&
4295 	    (flags & XFS_BMAPI_PREALLOC))
4296 		return 0;
4297 
4298 	/* check if we need to do real->unwritten conversion */
4299 	if (mval->br_state == XFS_EXT_NORM &&
4300 	    (flags & (XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT)) !=
4301 			(XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT))
4302 		return 0;
4303 
4304 	/*
4305 	 * Modify (by adding) the state flag, if writing.
4306 	 */
4307 	ASSERT(mval->br_blockcount <= len);
4308 	if (ifp->if_format == XFS_DINODE_FMT_BTREE && !bma->cur) {
4309 		bma->cur = xfs_bmbt_init_cursor(bma->ip->i_mount, bma->tp,
4310 					bma->ip, whichfork);
4311 	}
4312 	mval->br_state = (mval->br_state == XFS_EXT_UNWRITTEN)
4313 				? XFS_EXT_NORM : XFS_EXT_UNWRITTEN;
4314 
4315 	/*
4316 	 * Before insertion into the bmbt, zero the range being converted
4317 	 * if required.
4318 	 */
4319 	if (flags & XFS_BMAPI_ZERO) {
4320 		error = xfs_zero_extent(bma->ip, mval->br_startblock,
4321 					mval->br_blockcount);
4322 		if (error)
4323 			return error;
4324 	}
4325 
4326 	error = xfs_bmap_add_extent_unwritten_real(bma->tp, bma->ip, whichfork,
4327 			&bma->icur, &bma->cur, mval, &tmp_logflags);
4328 	/*
4329 	 * Log the inode core unconditionally in the unwritten extent conversion
4330 	 * path because the conversion might not have done so (e.g., if the
4331 	 * extent count hasn't changed). We need to make sure the inode is dirty
4332 	 * in the transaction for the sake of fsync(), even if nothing has
4333 	 * changed, because fsync() will not force the log for this transaction
4334 	 * unless it sees the inode pinned.
4335 	 *
4336 	 * Note: If we're only converting cow fork extents, there aren't
4337 	 * any on-disk updates to make, so we don't need to log anything.
4338 	 */
4339 	if (whichfork != XFS_COW_FORK)
4340 		bma->logflags |= tmp_logflags | XFS_ILOG_CORE;
4341 	if (error)
4342 		return error;
4343 
4344 	/*
4345 	 * Update our extent pointer, given that
4346 	 * xfs_bmap_add_extent_unwritten_real might have merged it into one
4347 	 * of the neighbouring ones.
4348 	 */
4349 	xfs_iext_get_extent(ifp, &bma->icur, &bma->got);
4350 
4351 	/*
4352 	 * We may have combined previously unwritten space with written space,
4353 	 * so generate another request.
4354 	 */
4355 	if (mval->br_blockcount < len)
4356 		return -EAGAIN;
4357 	return 0;
4358 }
4359 
4360 xfs_extlen_t
4361 xfs_bmapi_minleft(
4362 	struct xfs_trans	*tp,
4363 	struct xfs_inode	*ip,
4364 	int			fork)
4365 {
4366 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, fork);
4367 
4368 	if (tp && tp->t_highest_agno != NULLAGNUMBER)
4369 		return 0;
4370 	if (ifp->if_format != XFS_DINODE_FMT_BTREE)
4371 		return 1;
4372 	return be16_to_cpu(ifp->if_broot->bb_level) + 1;
4373 }
4374 
4375 /*
4376  * Log whatever the flags say, even if error.  Otherwise we might miss detecting
4377  * a case where the data is changed, there's an error, and it's not logged so we
4378  * don't shutdown when we should.  Don't bother logging extents/btree changes if
4379  * we converted to the other format.
4380  */
4381 static void
4382 xfs_bmapi_finish(
4383 	struct xfs_bmalloca	*bma,
4384 	int			whichfork,
4385 	int			error)
4386 {
4387 	struct xfs_ifork	*ifp = xfs_ifork_ptr(bma->ip, whichfork);
4388 
4389 	if ((bma->logflags & xfs_ilog_fext(whichfork)) &&
4390 	    ifp->if_format != XFS_DINODE_FMT_EXTENTS)
4391 		bma->logflags &= ~xfs_ilog_fext(whichfork);
4392 	else if ((bma->logflags & xfs_ilog_fbroot(whichfork)) &&
4393 		 ifp->if_format != XFS_DINODE_FMT_BTREE)
4394 		bma->logflags &= ~xfs_ilog_fbroot(whichfork);
4395 
4396 	if (bma->logflags)
4397 		xfs_trans_log_inode(bma->tp, bma->ip, bma->logflags);
4398 	if (bma->cur)
4399 		xfs_btree_del_cursor(bma->cur, error);
4400 }
4401 
4402 /*
4403  * Map file blocks to filesystem blocks, and allocate blocks or convert the
4404  * extent state if necessary.  Details behaviour is controlled by the flags
4405  * parameter.  Only allocates blocks from a single allocation group, to avoid
4406  * locking problems.
4407  */
4408 int
4409 xfs_bmapi_write(
4410 	struct xfs_trans	*tp,		/* transaction pointer */
4411 	struct xfs_inode	*ip,		/* incore inode */
4412 	xfs_fileoff_t		bno,		/* starting file offs. mapped */
4413 	xfs_filblks_t		len,		/* length to map in file */
4414 	uint32_t		flags,		/* XFS_BMAPI_... */
4415 	xfs_extlen_t		total,		/* total blocks needed */
4416 	struct xfs_bmbt_irec	*mval,		/* output: map values */
4417 	int			*nmap)		/* i/o: mval size/count */
4418 {
4419 	struct xfs_bmalloca	bma = {
4420 		.tp		= tp,
4421 		.ip		= ip,
4422 		.total		= total,
4423 	};
4424 	struct xfs_mount	*mp = ip->i_mount;
4425 	int			whichfork = xfs_bmapi_whichfork(flags);
4426 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
4427 	xfs_fileoff_t		end;		/* end of mapped file region */
4428 	bool			eof = false;	/* after the end of extents */
4429 	int			error;		/* error return */
4430 	int			n;		/* current extent index */
4431 	xfs_fileoff_t		obno;		/* old block number (offset) */
4432 
4433 #ifdef DEBUG
4434 	xfs_fileoff_t		orig_bno;	/* original block number value */
4435 	int			orig_flags;	/* original flags arg value */
4436 	xfs_filblks_t		orig_len;	/* original value of len arg */
4437 	struct xfs_bmbt_irec	*orig_mval;	/* original value of mval */
4438 	int			orig_nmap;	/* original value of *nmap */
4439 
4440 	orig_bno = bno;
4441 	orig_len = len;
4442 	orig_flags = flags;
4443 	orig_mval = mval;
4444 	orig_nmap = *nmap;
4445 #endif
4446 
4447 	ASSERT(*nmap >= 1);
4448 	ASSERT(*nmap <= XFS_BMAP_MAX_NMAP);
4449 	ASSERT(tp != NULL);
4450 	ASSERT(len > 0);
4451 	ASSERT(ifp->if_format != XFS_DINODE_FMT_LOCAL);
4452 	xfs_assert_ilocked(ip, XFS_ILOCK_EXCL);
4453 	ASSERT(!(flags & XFS_BMAPI_REMAP));
4454 
4455 	/* zeroing is for currently only for data extents, not metadata */
4456 	ASSERT((flags & (XFS_BMAPI_METADATA | XFS_BMAPI_ZERO)) !=
4457 			(XFS_BMAPI_METADATA | XFS_BMAPI_ZERO));
4458 	/*
4459 	 * we can allocate unwritten extents or pre-zero allocated blocks,
4460 	 * but it makes no sense to do both at once. This would result in
4461 	 * zeroing the unwritten extent twice, but it still being an
4462 	 * unwritten extent....
4463 	 */
4464 	ASSERT((flags & (XFS_BMAPI_PREALLOC | XFS_BMAPI_ZERO)) !=
4465 			(XFS_BMAPI_PREALLOC | XFS_BMAPI_ZERO));
4466 
4467 	if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)) ||
4468 	    XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) {
4469 		xfs_bmap_mark_sick(ip, whichfork);
4470 		return -EFSCORRUPTED;
4471 	}
4472 
4473 	if (xfs_is_shutdown(mp))
4474 		return -EIO;
4475 
4476 	XFS_STATS_INC(mp, xs_blk_mapw);
4477 
4478 	error = xfs_iread_extents(tp, ip, whichfork);
4479 	if (error)
4480 		goto error0;
4481 
4482 	if (!xfs_iext_lookup_extent(ip, ifp, bno, &bma.icur, &bma.got))
4483 		eof = true;
4484 	if (!xfs_iext_peek_prev_extent(ifp, &bma.icur, &bma.prev))
4485 		bma.prev.br_startoff = NULLFILEOFF;
4486 	bma.minleft = xfs_bmapi_minleft(tp, ip, whichfork);
4487 
4488 	n = 0;
4489 	end = bno + len;
4490 	obno = bno;
4491 	while (bno < end && n < *nmap) {
4492 		bool			need_alloc = false, wasdelay = false;
4493 
4494 		/* in hole or beyond EOF? */
4495 		if (eof || bma.got.br_startoff > bno) {
4496 			/*
4497 			 * CoW fork conversions should /never/ hit EOF or
4498 			 * holes.  There should always be something for us
4499 			 * to work on.
4500 			 */
4501 			ASSERT(!((flags & XFS_BMAPI_CONVERT) &&
4502 			         (flags & XFS_BMAPI_COWFORK)));
4503 
4504 			need_alloc = true;
4505 		} else if (isnullstartblock(bma.got.br_startblock)) {
4506 			wasdelay = true;
4507 		}
4508 
4509 		/*
4510 		 * First, deal with the hole before the allocated space
4511 		 * that we found, if any.
4512 		 */
4513 		if (need_alloc || wasdelay) {
4514 			bma.eof = eof;
4515 			bma.conv = !!(flags & XFS_BMAPI_CONVERT);
4516 			bma.wasdel = wasdelay;
4517 			bma.offset = bno;
4518 			bma.flags = flags;
4519 
4520 			/*
4521 			 * There's a 32/64 bit type mismatch between the
4522 			 * allocation length request (which can be 64 bits in
4523 			 * length) and the bma length request, which is
4524 			 * xfs_extlen_t and therefore 32 bits. Hence we have to
4525 			 * check for 32-bit overflows and handle them here.
4526 			 */
4527 			if (len > (xfs_filblks_t)XFS_MAX_BMBT_EXTLEN)
4528 				bma.length = XFS_MAX_BMBT_EXTLEN;
4529 			else
4530 				bma.length = len;
4531 
4532 			ASSERT(len > 0);
4533 			ASSERT(bma.length > 0);
4534 			error = xfs_bmapi_allocate(&bma);
4535 			if (error)
4536 				goto error0;
4537 			if (bma.blkno == NULLFSBLOCK)
4538 				break;
4539 
4540 			/*
4541 			 * If this is a CoW allocation, record the data in
4542 			 * the refcount btree for orphan recovery.
4543 			 */
4544 			if (whichfork == XFS_COW_FORK)
4545 				xfs_refcount_alloc_cow_extent(tp, bma.blkno,
4546 						bma.length);
4547 		}
4548 
4549 		/* Deal with the allocated space we found.  */
4550 		xfs_bmapi_trim_map(mval, &bma.got, &bno, len, obno,
4551 							end, n, flags);
4552 
4553 		/* Execute unwritten extent conversion if necessary */
4554 		error = xfs_bmapi_convert_unwritten(&bma, mval, len, flags);
4555 		if (error == -EAGAIN)
4556 			continue;
4557 		if (error)
4558 			goto error0;
4559 
4560 		/* update the extent map to return */
4561 		xfs_bmapi_update_map(&mval, &bno, &len, obno, end, &n, flags);
4562 
4563 		/*
4564 		 * If we're done, stop now.  Stop when we've allocated
4565 		 * XFS_BMAP_MAX_NMAP extents no matter what.  Otherwise
4566 		 * the transaction may get too big.
4567 		 */
4568 		if (bno >= end || n >= *nmap || bma.nallocs >= *nmap)
4569 			break;
4570 
4571 		/* Else go on to the next record. */
4572 		bma.prev = bma.got;
4573 		if (!xfs_iext_next_extent(ifp, &bma.icur, &bma.got))
4574 			eof = true;
4575 	}
4576 	*nmap = n;
4577 
4578 	error = xfs_bmap_btree_to_extents(tp, ip, bma.cur, &bma.logflags,
4579 			whichfork);
4580 	if (error)
4581 		goto error0;
4582 
4583 	ASSERT(ifp->if_format != XFS_DINODE_FMT_BTREE ||
4584 	       ifp->if_nextents > XFS_IFORK_MAXEXT(ip, whichfork));
4585 	xfs_bmapi_finish(&bma, whichfork, 0);
4586 	xfs_bmap_validate_ret(orig_bno, orig_len, orig_flags, orig_mval,
4587 		orig_nmap, *nmap);
4588 	return 0;
4589 error0:
4590 	xfs_bmapi_finish(&bma, whichfork, error);
4591 	return error;
4592 }
4593 
4594 /*
4595  * Convert an existing delalloc extent to real blocks based on file offset. This
4596  * attempts to allocate the entire delalloc extent and may require multiple
4597  * invocations to allocate the target offset if a large enough physical extent
4598  * is not available.
4599  */
4600 int
4601 xfs_bmapi_convert_delalloc(
4602 	struct xfs_inode	*ip,
4603 	int			whichfork,
4604 	xfs_off_t		offset,
4605 	struct iomap		*iomap,
4606 	unsigned int		*seq)
4607 {
4608 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
4609 	struct xfs_mount	*mp = ip->i_mount;
4610 	xfs_fileoff_t		offset_fsb = XFS_B_TO_FSBT(mp, offset);
4611 	struct xfs_bmalloca	bma = { NULL };
4612 	uint16_t		flags = 0;
4613 	struct xfs_trans	*tp;
4614 	int			error;
4615 
4616 	if (whichfork == XFS_COW_FORK)
4617 		flags |= IOMAP_F_SHARED;
4618 
4619 	/*
4620 	 * Space for the extent and indirect blocks was reserved when the
4621 	 * delalloc extent was created so there's no need to do so here.
4622 	 */
4623 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 0, 0,
4624 				XFS_TRANS_RESERVE, &tp);
4625 	if (error)
4626 		return error;
4627 
4628 	xfs_ilock(ip, XFS_ILOCK_EXCL);
4629 	xfs_trans_ijoin(tp, ip, 0);
4630 
4631 	error = xfs_iext_count_may_overflow(ip, whichfork,
4632 			XFS_IEXT_ADD_NOSPLIT_CNT);
4633 	if (error == -EFBIG)
4634 		error = xfs_iext_count_upgrade(tp, ip,
4635 				XFS_IEXT_ADD_NOSPLIT_CNT);
4636 	if (error)
4637 		goto out_trans_cancel;
4638 
4639 	if (!xfs_iext_lookup_extent(ip, ifp, offset_fsb, &bma.icur, &bma.got) ||
4640 	    bma.got.br_startoff > offset_fsb) {
4641 		/*
4642 		 * No extent found in the range we are trying to convert.  This
4643 		 * should only happen for the COW fork, where another thread
4644 		 * might have moved the extent to the data fork in the meantime.
4645 		 */
4646 		WARN_ON_ONCE(whichfork != XFS_COW_FORK);
4647 		error = -EAGAIN;
4648 		goto out_trans_cancel;
4649 	}
4650 
4651 	/*
4652 	 * If we find a real extent here we raced with another thread converting
4653 	 * the extent.  Just return the real extent at this offset.
4654 	 */
4655 	if (!isnullstartblock(bma.got.br_startblock)) {
4656 		xfs_bmbt_to_iomap(ip, iomap, &bma.got, 0, flags,
4657 				xfs_iomap_inode_sequence(ip, flags));
4658 		*seq = READ_ONCE(ifp->if_seq);
4659 		goto out_trans_cancel;
4660 	}
4661 
4662 	bma.tp = tp;
4663 	bma.ip = ip;
4664 	bma.wasdel = true;
4665 	bma.offset = bma.got.br_startoff;
4666 	bma.length = max_t(xfs_filblks_t, bma.got.br_blockcount,
4667 			XFS_MAX_BMBT_EXTLEN);
4668 	bma.minleft = xfs_bmapi_minleft(tp, ip, whichfork);
4669 
4670 	/*
4671 	 * When we're converting the delalloc reservations backing dirty pages
4672 	 * in the page cache, we must be careful about how we create the new
4673 	 * extents:
4674 	 *
4675 	 * New CoW fork extents are created unwritten, turned into real extents
4676 	 * when we're about to write the data to disk, and mapped into the data
4677 	 * fork after the write finishes.  End of story.
4678 	 *
4679 	 * New data fork extents must be mapped in as unwritten and converted
4680 	 * to real extents after the write succeeds to avoid exposing stale
4681 	 * disk contents if we crash.
4682 	 */
4683 	bma.flags = XFS_BMAPI_PREALLOC;
4684 	if (whichfork == XFS_COW_FORK)
4685 		bma.flags |= XFS_BMAPI_COWFORK;
4686 
4687 	if (!xfs_iext_peek_prev_extent(ifp, &bma.icur, &bma.prev))
4688 		bma.prev.br_startoff = NULLFILEOFF;
4689 
4690 	error = xfs_bmapi_allocate(&bma);
4691 	if (error)
4692 		goto out_finish;
4693 
4694 	error = -ENOSPC;
4695 	if (WARN_ON_ONCE(bma.blkno == NULLFSBLOCK))
4696 		goto out_finish;
4697 	if (WARN_ON_ONCE(!xfs_valid_startblock(ip, bma.got.br_startblock))) {
4698 		xfs_bmap_mark_sick(ip, whichfork);
4699 		error = -EFSCORRUPTED;
4700 		goto out_finish;
4701 	}
4702 
4703 	XFS_STATS_ADD(mp, xs_xstrat_bytes, XFS_FSB_TO_B(mp, bma.length));
4704 	XFS_STATS_INC(mp, xs_xstrat_quick);
4705 
4706 	ASSERT(!isnullstartblock(bma.got.br_startblock));
4707 	xfs_bmbt_to_iomap(ip, iomap, &bma.got, 0, flags,
4708 				xfs_iomap_inode_sequence(ip, flags));
4709 	*seq = READ_ONCE(ifp->if_seq);
4710 
4711 	if (whichfork == XFS_COW_FORK)
4712 		xfs_refcount_alloc_cow_extent(tp, bma.blkno, bma.length);
4713 
4714 	error = xfs_bmap_btree_to_extents(tp, ip, bma.cur, &bma.logflags,
4715 			whichfork);
4716 	if (error)
4717 		goto out_finish;
4718 
4719 	xfs_bmapi_finish(&bma, whichfork, 0);
4720 	error = xfs_trans_commit(tp);
4721 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
4722 	return error;
4723 
4724 out_finish:
4725 	xfs_bmapi_finish(&bma, whichfork, error);
4726 out_trans_cancel:
4727 	xfs_trans_cancel(tp);
4728 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
4729 	return error;
4730 }
4731 
4732 int
4733 xfs_bmapi_remap(
4734 	struct xfs_trans	*tp,
4735 	struct xfs_inode	*ip,
4736 	xfs_fileoff_t		bno,
4737 	xfs_filblks_t		len,
4738 	xfs_fsblock_t		startblock,
4739 	uint32_t		flags)
4740 {
4741 	struct xfs_mount	*mp = ip->i_mount;
4742 	struct xfs_ifork	*ifp;
4743 	struct xfs_btree_cur	*cur = NULL;
4744 	struct xfs_bmbt_irec	got;
4745 	struct xfs_iext_cursor	icur;
4746 	int			whichfork = xfs_bmapi_whichfork(flags);
4747 	int			logflags = 0, error;
4748 
4749 	ifp = xfs_ifork_ptr(ip, whichfork);
4750 	ASSERT(len > 0);
4751 	ASSERT(len <= (xfs_filblks_t)XFS_MAX_BMBT_EXTLEN);
4752 	xfs_assert_ilocked(ip, XFS_ILOCK_EXCL);
4753 	ASSERT(!(flags & ~(XFS_BMAPI_ATTRFORK | XFS_BMAPI_PREALLOC |
4754 			   XFS_BMAPI_NORMAP)));
4755 	ASSERT((flags & (XFS_BMAPI_ATTRFORK | XFS_BMAPI_PREALLOC)) !=
4756 			(XFS_BMAPI_ATTRFORK | XFS_BMAPI_PREALLOC));
4757 
4758 	if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)) ||
4759 	    XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) {
4760 		xfs_bmap_mark_sick(ip, whichfork);
4761 		return -EFSCORRUPTED;
4762 	}
4763 
4764 	if (xfs_is_shutdown(mp))
4765 		return -EIO;
4766 
4767 	error = xfs_iread_extents(tp, ip, whichfork);
4768 	if (error)
4769 		return error;
4770 
4771 	if (xfs_iext_lookup_extent(ip, ifp, bno, &icur, &got)) {
4772 		/* make sure we only reflink into a hole. */
4773 		ASSERT(got.br_startoff > bno);
4774 		ASSERT(got.br_startoff - bno >= len);
4775 	}
4776 
4777 	ip->i_nblocks += len;
4778 	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
4779 
4780 	if (ifp->if_format == XFS_DINODE_FMT_BTREE)
4781 		cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
4782 
4783 	got.br_startoff = bno;
4784 	got.br_startblock = startblock;
4785 	got.br_blockcount = len;
4786 	if (flags & XFS_BMAPI_PREALLOC)
4787 		got.br_state = XFS_EXT_UNWRITTEN;
4788 	else
4789 		got.br_state = XFS_EXT_NORM;
4790 
4791 	error = xfs_bmap_add_extent_hole_real(tp, ip, whichfork, &icur,
4792 			&cur, &got, &logflags, flags);
4793 	if (error)
4794 		goto error0;
4795 
4796 	error = xfs_bmap_btree_to_extents(tp, ip, cur, &logflags, whichfork);
4797 
4798 error0:
4799 	if (ip->i_df.if_format != XFS_DINODE_FMT_EXTENTS)
4800 		logflags &= ~XFS_ILOG_DEXT;
4801 	else if (ip->i_df.if_format != XFS_DINODE_FMT_BTREE)
4802 		logflags &= ~XFS_ILOG_DBROOT;
4803 
4804 	if (logflags)
4805 		xfs_trans_log_inode(tp, ip, logflags);
4806 	if (cur)
4807 		xfs_btree_del_cursor(cur, error);
4808 	return error;
4809 }
4810 
4811 /*
4812  * When a delalloc extent is split (e.g., due to a hole punch), the original
4813  * indlen reservation must be shared across the two new extents that are left
4814  * behind.
4815  *
4816  * Given the original reservation and the worst case indlen for the two new
4817  * extents (as calculated by xfs_bmap_worst_indlen()), split the original
4818  * reservation fairly across the two new extents. If necessary, steal available
4819  * blocks from a deleted extent to make up a reservation deficiency (e.g., if
4820  * ores == 1). The number of stolen blocks is returned. The availability and
4821  * subsequent accounting of stolen blocks is the responsibility of the caller.
4822  */
4823 static xfs_filblks_t
4824 xfs_bmap_split_indlen(
4825 	xfs_filblks_t			ores,		/* original res. */
4826 	xfs_filblks_t			*indlen1,	/* ext1 worst indlen */
4827 	xfs_filblks_t			*indlen2,	/* ext2 worst indlen */
4828 	xfs_filblks_t			avail)		/* stealable blocks */
4829 {
4830 	xfs_filblks_t			len1 = *indlen1;
4831 	xfs_filblks_t			len2 = *indlen2;
4832 	xfs_filblks_t			nres = len1 + len2; /* new total res. */
4833 	xfs_filblks_t			stolen = 0;
4834 	xfs_filblks_t			resfactor;
4835 
4836 	/*
4837 	 * Steal as many blocks as we can to try and satisfy the worst case
4838 	 * indlen for both new extents.
4839 	 */
4840 	if (ores < nres && avail)
4841 		stolen = XFS_FILBLKS_MIN(nres - ores, avail);
4842 	ores += stolen;
4843 
4844 	 /* nothing else to do if we've satisfied the new reservation */
4845 	if (ores >= nres)
4846 		return stolen;
4847 
4848 	/*
4849 	 * We can't meet the total required reservation for the two extents.
4850 	 * Calculate the percent of the overall shortage between both extents
4851 	 * and apply this percentage to each of the requested indlen values.
4852 	 * This distributes the shortage fairly and reduces the chances that one
4853 	 * of the two extents is left with nothing when extents are repeatedly
4854 	 * split.
4855 	 */
4856 	resfactor = (ores * 100);
4857 	do_div(resfactor, nres);
4858 	len1 *= resfactor;
4859 	do_div(len1, 100);
4860 	len2 *= resfactor;
4861 	do_div(len2, 100);
4862 	ASSERT(len1 + len2 <= ores);
4863 	ASSERT(len1 < *indlen1 && len2 < *indlen2);
4864 
4865 	/*
4866 	 * Hand out the remainder to each extent. If one of the two reservations
4867 	 * is zero, we want to make sure that one gets a block first. The loop
4868 	 * below starts with len1, so hand len2 a block right off the bat if it
4869 	 * is zero.
4870 	 */
4871 	ores -= (len1 + len2);
4872 	ASSERT((*indlen1 - len1) + (*indlen2 - len2) >= ores);
4873 	if (ores && !len2 && *indlen2) {
4874 		len2++;
4875 		ores--;
4876 	}
4877 	while (ores) {
4878 		if (len1 < *indlen1) {
4879 			len1++;
4880 			ores--;
4881 		}
4882 		if (!ores)
4883 			break;
4884 		if (len2 < *indlen2) {
4885 			len2++;
4886 			ores--;
4887 		}
4888 	}
4889 
4890 	*indlen1 = len1;
4891 	*indlen2 = len2;
4892 
4893 	return stolen;
4894 }
4895 
4896 int
4897 xfs_bmap_del_extent_delay(
4898 	struct xfs_inode	*ip,
4899 	int			whichfork,
4900 	struct xfs_iext_cursor	*icur,
4901 	struct xfs_bmbt_irec	*got,
4902 	struct xfs_bmbt_irec	*del)
4903 {
4904 	struct xfs_mount	*mp = ip->i_mount;
4905 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
4906 	struct xfs_bmbt_irec	new;
4907 	int64_t			da_old, da_new, da_diff = 0;
4908 	xfs_fileoff_t		del_endoff, got_endoff;
4909 	xfs_filblks_t		got_indlen, new_indlen, stolen;
4910 	uint32_t		state = xfs_bmap_fork_to_state(whichfork);
4911 	int			error = 0;
4912 	bool			isrt;
4913 
4914 	XFS_STATS_INC(mp, xs_del_exlist);
4915 
4916 	isrt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(ip);
4917 	del_endoff = del->br_startoff + del->br_blockcount;
4918 	got_endoff = got->br_startoff + got->br_blockcount;
4919 	da_old = startblockval(got->br_startblock);
4920 	da_new = 0;
4921 
4922 	ASSERT(del->br_blockcount > 0);
4923 	ASSERT(got->br_startoff <= del->br_startoff);
4924 	ASSERT(got_endoff >= del_endoff);
4925 
4926 	if (isrt)
4927 		xfs_mod_frextents(mp, xfs_rtb_to_rtx(mp, del->br_blockcount));
4928 
4929 	/*
4930 	 * Update the inode delalloc counter now and wait to update the
4931 	 * sb counters as we might have to borrow some blocks for the
4932 	 * indirect block accounting.
4933 	 */
4934 	ASSERT(!isrt);
4935 	error = xfs_quota_unreserve_blkres(ip, del->br_blockcount);
4936 	if (error)
4937 		return error;
4938 	ip->i_delayed_blks -= del->br_blockcount;
4939 
4940 	if (got->br_startoff == del->br_startoff)
4941 		state |= BMAP_LEFT_FILLING;
4942 	if (got_endoff == del_endoff)
4943 		state |= BMAP_RIGHT_FILLING;
4944 
4945 	switch (state & (BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING)) {
4946 	case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
4947 		/*
4948 		 * Matches the whole extent.  Delete the entry.
4949 		 */
4950 		xfs_iext_remove(ip, icur, state);
4951 		xfs_iext_prev(ifp, icur);
4952 		break;
4953 	case BMAP_LEFT_FILLING:
4954 		/*
4955 		 * Deleting the first part of the extent.
4956 		 */
4957 		got->br_startoff = del_endoff;
4958 		got->br_blockcount -= del->br_blockcount;
4959 		da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip,
4960 				got->br_blockcount), da_old);
4961 		got->br_startblock = nullstartblock((int)da_new);
4962 		xfs_iext_update_extent(ip, state, icur, got);
4963 		break;
4964 	case BMAP_RIGHT_FILLING:
4965 		/*
4966 		 * Deleting the last part of the extent.
4967 		 */
4968 		got->br_blockcount = got->br_blockcount - del->br_blockcount;
4969 		da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip,
4970 				got->br_blockcount), da_old);
4971 		got->br_startblock = nullstartblock((int)da_new);
4972 		xfs_iext_update_extent(ip, state, icur, got);
4973 		break;
4974 	case 0:
4975 		/*
4976 		 * Deleting the middle of the extent.
4977 		 *
4978 		 * Distribute the original indlen reservation across the two new
4979 		 * extents.  Steal blocks from the deleted extent if necessary.
4980 		 * Stealing blocks simply fudges the fdblocks accounting below.
4981 		 * Warn if either of the new indlen reservations is zero as this
4982 		 * can lead to delalloc problems.
4983 		 */
4984 		got->br_blockcount = del->br_startoff - got->br_startoff;
4985 		got_indlen = xfs_bmap_worst_indlen(ip, got->br_blockcount);
4986 
4987 		new.br_blockcount = got_endoff - del_endoff;
4988 		new_indlen = xfs_bmap_worst_indlen(ip, new.br_blockcount);
4989 
4990 		WARN_ON_ONCE(!got_indlen || !new_indlen);
4991 		stolen = xfs_bmap_split_indlen(da_old, &got_indlen, &new_indlen,
4992 						       del->br_blockcount);
4993 
4994 		got->br_startblock = nullstartblock((int)got_indlen);
4995 
4996 		new.br_startoff = del_endoff;
4997 		new.br_state = got->br_state;
4998 		new.br_startblock = nullstartblock((int)new_indlen);
4999 
5000 		xfs_iext_update_extent(ip, state, icur, got);
5001 		xfs_iext_next(ifp, icur);
5002 		xfs_iext_insert(ip, icur, &new, state);
5003 
5004 		da_new = got_indlen + new_indlen - stolen;
5005 		del->br_blockcount -= stolen;
5006 		break;
5007 	}
5008 
5009 	ASSERT(da_old >= da_new);
5010 	da_diff = da_old - da_new;
5011 	if (!isrt)
5012 		da_diff += del->br_blockcount;
5013 	if (da_diff) {
5014 		xfs_mod_fdblocks(mp, da_diff, false);
5015 		xfs_mod_delalloc(mp, -da_diff);
5016 	}
5017 	return error;
5018 }
5019 
5020 void
5021 xfs_bmap_del_extent_cow(
5022 	struct xfs_inode	*ip,
5023 	struct xfs_iext_cursor	*icur,
5024 	struct xfs_bmbt_irec	*got,
5025 	struct xfs_bmbt_irec	*del)
5026 {
5027 	struct xfs_mount	*mp = ip->i_mount;
5028 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, XFS_COW_FORK);
5029 	struct xfs_bmbt_irec	new;
5030 	xfs_fileoff_t		del_endoff, got_endoff;
5031 	uint32_t		state = BMAP_COWFORK;
5032 
5033 	XFS_STATS_INC(mp, xs_del_exlist);
5034 
5035 	del_endoff = del->br_startoff + del->br_blockcount;
5036 	got_endoff = got->br_startoff + got->br_blockcount;
5037 
5038 	ASSERT(del->br_blockcount > 0);
5039 	ASSERT(got->br_startoff <= del->br_startoff);
5040 	ASSERT(got_endoff >= del_endoff);
5041 	ASSERT(!isnullstartblock(got->br_startblock));
5042 
5043 	if (got->br_startoff == del->br_startoff)
5044 		state |= BMAP_LEFT_FILLING;
5045 	if (got_endoff == del_endoff)
5046 		state |= BMAP_RIGHT_FILLING;
5047 
5048 	switch (state & (BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING)) {
5049 	case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
5050 		/*
5051 		 * Matches the whole extent.  Delete the entry.
5052 		 */
5053 		xfs_iext_remove(ip, icur, state);
5054 		xfs_iext_prev(ifp, icur);
5055 		break;
5056 	case BMAP_LEFT_FILLING:
5057 		/*
5058 		 * Deleting the first part of the extent.
5059 		 */
5060 		got->br_startoff = del_endoff;
5061 		got->br_blockcount -= del->br_blockcount;
5062 		got->br_startblock = del->br_startblock + del->br_blockcount;
5063 		xfs_iext_update_extent(ip, state, icur, got);
5064 		break;
5065 	case BMAP_RIGHT_FILLING:
5066 		/*
5067 		 * Deleting the last part of the extent.
5068 		 */
5069 		got->br_blockcount -= del->br_blockcount;
5070 		xfs_iext_update_extent(ip, state, icur, got);
5071 		break;
5072 	case 0:
5073 		/*
5074 		 * Deleting the middle of the extent.
5075 		 */
5076 		got->br_blockcount = del->br_startoff - got->br_startoff;
5077 
5078 		new.br_startoff = del_endoff;
5079 		new.br_blockcount = got_endoff - del_endoff;
5080 		new.br_state = got->br_state;
5081 		new.br_startblock = del->br_startblock + del->br_blockcount;
5082 
5083 		xfs_iext_update_extent(ip, state, icur, got);
5084 		xfs_iext_next(ifp, icur);
5085 		xfs_iext_insert(ip, icur, &new, state);
5086 		break;
5087 	}
5088 	ip->i_delayed_blks -= del->br_blockcount;
5089 }
5090 
5091 /*
5092  * Called by xfs_bmapi to update file extent records and the btree
5093  * after removing space.
5094  */
5095 STATIC int				/* error */
5096 xfs_bmap_del_extent_real(
5097 	xfs_inode_t		*ip,	/* incore inode pointer */
5098 	xfs_trans_t		*tp,	/* current transaction pointer */
5099 	struct xfs_iext_cursor	*icur,
5100 	struct xfs_btree_cur	*cur,	/* if null, not a btree */
5101 	xfs_bmbt_irec_t		*del,	/* data to remove from extents */
5102 	int			*logflagsp, /* inode logging flags */
5103 	int			whichfork, /* data or attr fork */
5104 	uint32_t		bflags)	/* bmapi flags */
5105 {
5106 	xfs_fsblock_t		del_endblock=0;	/* first block past del */
5107 	xfs_fileoff_t		del_endoff;	/* first offset past del */
5108 	int			do_fx;	/* free extent at end of routine */
5109 	int			error;	/* error return value */
5110 	struct xfs_bmbt_irec	got;	/* current extent entry */
5111 	xfs_fileoff_t		got_endoff;	/* first offset past got */
5112 	int			i;	/* temp state */
5113 	struct xfs_ifork	*ifp;	/* inode fork pointer */
5114 	xfs_mount_t		*mp;	/* mount structure */
5115 	xfs_filblks_t		nblks;	/* quota/sb block count */
5116 	xfs_bmbt_irec_t		new;	/* new record to be inserted */
5117 	/* REFERENCED */
5118 	uint			qfield;	/* quota field to update */
5119 	uint32_t		state = xfs_bmap_fork_to_state(whichfork);
5120 	struct xfs_bmbt_irec	old;
5121 
5122 	*logflagsp = 0;
5123 
5124 	mp = ip->i_mount;
5125 	XFS_STATS_INC(mp, xs_del_exlist);
5126 
5127 	ifp = xfs_ifork_ptr(ip, whichfork);
5128 	ASSERT(del->br_blockcount > 0);
5129 	xfs_iext_get_extent(ifp, icur, &got);
5130 	ASSERT(got.br_startoff <= del->br_startoff);
5131 	del_endoff = del->br_startoff + del->br_blockcount;
5132 	got_endoff = got.br_startoff + got.br_blockcount;
5133 	ASSERT(got_endoff >= del_endoff);
5134 	ASSERT(!isnullstartblock(got.br_startblock));
5135 	qfield = 0;
5136 
5137 	/*
5138 	 * If it's the case where the directory code is running with no block
5139 	 * reservation, and the deleted block is in the middle of its extent,
5140 	 * and the resulting insert of an extent would cause transformation to
5141 	 * btree format, then reject it.  The calling code will then swap blocks
5142 	 * around instead.  We have to do this now, rather than waiting for the
5143 	 * conversion to btree format, since the transaction will be dirty then.
5144 	 */
5145 	if (tp->t_blk_res == 0 &&
5146 	    ifp->if_format == XFS_DINODE_FMT_EXTENTS &&
5147 	    ifp->if_nextents >= XFS_IFORK_MAXEXT(ip, whichfork) &&
5148 	    del->br_startoff > got.br_startoff && del_endoff < got_endoff)
5149 		return -ENOSPC;
5150 
5151 	*logflagsp = XFS_ILOG_CORE;
5152 	if (whichfork == XFS_DATA_FORK && XFS_IS_REALTIME_INODE(ip)) {
5153 		if (!(bflags & XFS_BMAPI_REMAP)) {
5154 			error = xfs_rtfree_blocks(tp, del->br_startblock,
5155 					del->br_blockcount);
5156 			if (error)
5157 				return error;
5158 		}
5159 
5160 		do_fx = 0;
5161 		qfield = XFS_TRANS_DQ_RTBCOUNT;
5162 	} else {
5163 		do_fx = 1;
5164 		qfield = XFS_TRANS_DQ_BCOUNT;
5165 	}
5166 	nblks = del->br_blockcount;
5167 
5168 	del_endblock = del->br_startblock + del->br_blockcount;
5169 	if (cur) {
5170 		error = xfs_bmbt_lookup_eq(cur, &got, &i);
5171 		if (error)
5172 			return error;
5173 		if (XFS_IS_CORRUPT(mp, i != 1)) {
5174 			xfs_btree_mark_sick(cur);
5175 			return -EFSCORRUPTED;
5176 		}
5177 	}
5178 
5179 	if (got.br_startoff == del->br_startoff)
5180 		state |= BMAP_LEFT_FILLING;
5181 	if (got_endoff == del_endoff)
5182 		state |= BMAP_RIGHT_FILLING;
5183 
5184 	switch (state & (BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING)) {
5185 	case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
5186 		/*
5187 		 * Matches the whole extent.  Delete the entry.
5188 		 */
5189 		xfs_iext_remove(ip, icur, state);
5190 		xfs_iext_prev(ifp, icur);
5191 		ifp->if_nextents--;
5192 
5193 		*logflagsp |= XFS_ILOG_CORE;
5194 		if (!cur) {
5195 			*logflagsp |= xfs_ilog_fext(whichfork);
5196 			break;
5197 		}
5198 		if ((error = xfs_btree_delete(cur, &i)))
5199 			return error;
5200 		if (XFS_IS_CORRUPT(mp, i != 1)) {
5201 			xfs_btree_mark_sick(cur);
5202 			return -EFSCORRUPTED;
5203 		}
5204 		break;
5205 	case BMAP_LEFT_FILLING:
5206 		/*
5207 		 * Deleting the first part of the extent.
5208 		 */
5209 		got.br_startoff = del_endoff;
5210 		got.br_startblock = del_endblock;
5211 		got.br_blockcount -= del->br_blockcount;
5212 		xfs_iext_update_extent(ip, state, icur, &got);
5213 		if (!cur) {
5214 			*logflagsp |= xfs_ilog_fext(whichfork);
5215 			break;
5216 		}
5217 		error = xfs_bmbt_update(cur, &got);
5218 		if (error)
5219 			return error;
5220 		break;
5221 	case BMAP_RIGHT_FILLING:
5222 		/*
5223 		 * Deleting the last part of the extent.
5224 		 */
5225 		got.br_blockcount -= del->br_blockcount;
5226 		xfs_iext_update_extent(ip, state, icur, &got);
5227 		if (!cur) {
5228 			*logflagsp |= xfs_ilog_fext(whichfork);
5229 			break;
5230 		}
5231 		error = xfs_bmbt_update(cur, &got);
5232 		if (error)
5233 			return error;
5234 		break;
5235 	case 0:
5236 		/*
5237 		 * Deleting the middle of the extent.
5238 		 */
5239 
5240 		old = got;
5241 
5242 		got.br_blockcount = del->br_startoff - got.br_startoff;
5243 		xfs_iext_update_extent(ip, state, icur, &got);
5244 
5245 		new.br_startoff = del_endoff;
5246 		new.br_blockcount = got_endoff - del_endoff;
5247 		new.br_state = got.br_state;
5248 		new.br_startblock = del_endblock;
5249 
5250 		*logflagsp |= XFS_ILOG_CORE;
5251 		if (cur) {
5252 			error = xfs_bmbt_update(cur, &got);
5253 			if (error)
5254 				return error;
5255 			error = xfs_btree_increment(cur, 0, &i);
5256 			if (error)
5257 				return error;
5258 			cur->bc_rec.b = new;
5259 			error = xfs_btree_insert(cur, &i);
5260 			if (error && error != -ENOSPC)
5261 				return error;
5262 			/*
5263 			 * If get no-space back from btree insert, it tried a
5264 			 * split, and we have a zero block reservation.  Fix up
5265 			 * our state and return the error.
5266 			 */
5267 			if (error == -ENOSPC) {
5268 				/*
5269 				 * Reset the cursor, don't trust it after any
5270 				 * insert operation.
5271 				 */
5272 				error = xfs_bmbt_lookup_eq(cur, &got, &i);
5273 				if (error)
5274 					return error;
5275 				if (XFS_IS_CORRUPT(mp, i != 1)) {
5276 					xfs_btree_mark_sick(cur);
5277 					return -EFSCORRUPTED;
5278 				}
5279 				/*
5280 				 * Update the btree record back
5281 				 * to the original value.
5282 				 */
5283 				error = xfs_bmbt_update(cur, &old);
5284 				if (error)
5285 					return error;
5286 				/*
5287 				 * Reset the extent record back
5288 				 * to the original value.
5289 				 */
5290 				xfs_iext_update_extent(ip, state, icur, &old);
5291 				*logflagsp = 0;
5292 				return -ENOSPC;
5293 			}
5294 			if (XFS_IS_CORRUPT(mp, i != 1)) {
5295 				xfs_btree_mark_sick(cur);
5296 				return -EFSCORRUPTED;
5297 			}
5298 		} else
5299 			*logflagsp |= xfs_ilog_fext(whichfork);
5300 
5301 		ifp->if_nextents++;
5302 		xfs_iext_next(ifp, icur);
5303 		xfs_iext_insert(ip, icur, &new, state);
5304 		break;
5305 	}
5306 
5307 	/* remove reverse mapping */
5308 	xfs_rmap_unmap_extent(tp, ip, whichfork, del);
5309 
5310 	/*
5311 	 * If we need to, add to list of extents to delete.
5312 	 */
5313 	if (do_fx && !(bflags & XFS_BMAPI_REMAP)) {
5314 		if (xfs_is_reflink_inode(ip) && whichfork == XFS_DATA_FORK) {
5315 			xfs_refcount_decrease_extent(tp, del);
5316 		} else {
5317 			error = xfs_free_extent_later(tp, del->br_startblock,
5318 					del->br_blockcount, NULL,
5319 					XFS_AG_RESV_NONE,
5320 					((bflags & XFS_BMAPI_NODISCARD) ||
5321 					del->br_state == XFS_EXT_UNWRITTEN));
5322 			if (error)
5323 				return error;
5324 		}
5325 	}
5326 
5327 	/*
5328 	 * Adjust inode # blocks in the file.
5329 	 */
5330 	if (nblks)
5331 		ip->i_nblocks -= nblks;
5332 	/*
5333 	 * Adjust quota data.
5334 	 */
5335 	if (qfield && !(bflags & XFS_BMAPI_REMAP))
5336 		xfs_trans_mod_dquot_byino(tp, ip, qfield, (long)-nblks);
5337 
5338 	return 0;
5339 }
5340 
5341 /*
5342  * Unmap (remove) blocks from a file.
5343  * If nexts is nonzero then the number of extents to remove is limited to
5344  * that value.  If not all extents in the block range can be removed then
5345  * *done is set.
5346  */
5347 static int
5348 __xfs_bunmapi(
5349 	struct xfs_trans	*tp,		/* transaction pointer */
5350 	struct xfs_inode	*ip,		/* incore inode */
5351 	xfs_fileoff_t		start,		/* first file offset deleted */
5352 	xfs_filblks_t		*rlen,		/* i/o: amount remaining */
5353 	uint32_t		flags,		/* misc flags */
5354 	xfs_extnum_t		nexts)		/* number of extents max */
5355 {
5356 	struct xfs_btree_cur	*cur;		/* bmap btree cursor */
5357 	struct xfs_bmbt_irec	del;		/* extent being deleted */
5358 	int			error;		/* error return value */
5359 	xfs_extnum_t		extno;		/* extent number in list */
5360 	struct xfs_bmbt_irec	got;		/* current extent record */
5361 	struct xfs_ifork	*ifp;		/* inode fork pointer */
5362 	int			isrt;		/* freeing in rt area */
5363 	int			logflags;	/* transaction logging flags */
5364 	xfs_extlen_t		mod;		/* rt extent offset */
5365 	struct xfs_mount	*mp = ip->i_mount;
5366 	int			tmp_logflags;	/* partial logging flags */
5367 	int			wasdel;		/* was a delayed alloc extent */
5368 	int			whichfork;	/* data or attribute fork */
5369 	xfs_filblks_t		len = *rlen;	/* length to unmap in file */
5370 	xfs_fileoff_t		end;
5371 	struct xfs_iext_cursor	icur;
5372 	bool			done = false;
5373 
5374 	trace_xfs_bunmap(ip, start, len, flags, _RET_IP_);
5375 
5376 	whichfork = xfs_bmapi_whichfork(flags);
5377 	ASSERT(whichfork != XFS_COW_FORK);
5378 	ifp = xfs_ifork_ptr(ip, whichfork);
5379 	if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp))) {
5380 		xfs_bmap_mark_sick(ip, whichfork);
5381 		return -EFSCORRUPTED;
5382 	}
5383 	if (xfs_is_shutdown(mp))
5384 		return -EIO;
5385 
5386 	xfs_assert_ilocked(ip, XFS_ILOCK_EXCL);
5387 	ASSERT(len > 0);
5388 	ASSERT(nexts >= 0);
5389 
5390 	error = xfs_iread_extents(tp, ip, whichfork);
5391 	if (error)
5392 		return error;
5393 
5394 	if (xfs_iext_count(ifp) == 0) {
5395 		*rlen = 0;
5396 		return 0;
5397 	}
5398 	XFS_STATS_INC(mp, xs_blk_unmap);
5399 	isrt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(ip);
5400 	end = start + len;
5401 
5402 	if (!xfs_iext_lookup_extent_before(ip, ifp, &end, &icur, &got)) {
5403 		*rlen = 0;
5404 		return 0;
5405 	}
5406 	end--;
5407 
5408 	logflags = 0;
5409 	if (ifp->if_format == XFS_DINODE_FMT_BTREE) {
5410 		ASSERT(ifp->if_format == XFS_DINODE_FMT_BTREE);
5411 		cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
5412 	} else
5413 		cur = NULL;
5414 
5415 	if (isrt) {
5416 		/*
5417 		 * Synchronize by locking the bitmap inode.
5418 		 */
5419 		xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL|XFS_ILOCK_RTBITMAP);
5420 		xfs_trans_ijoin(tp, mp->m_rbmip, XFS_ILOCK_EXCL);
5421 		xfs_ilock(mp->m_rsumip, XFS_ILOCK_EXCL|XFS_ILOCK_RTSUM);
5422 		xfs_trans_ijoin(tp, mp->m_rsumip, XFS_ILOCK_EXCL);
5423 	}
5424 
5425 	extno = 0;
5426 	while (end != (xfs_fileoff_t)-1 && end >= start &&
5427 	       (nexts == 0 || extno < nexts)) {
5428 		/*
5429 		 * Is the found extent after a hole in which end lives?
5430 		 * Just back up to the previous extent, if so.
5431 		 */
5432 		if (got.br_startoff > end &&
5433 		    !xfs_iext_prev_extent(ifp, &icur, &got)) {
5434 			done = true;
5435 			break;
5436 		}
5437 		/*
5438 		 * Is the last block of this extent before the range
5439 		 * we're supposed to delete?  If so, we're done.
5440 		 */
5441 		end = XFS_FILEOFF_MIN(end,
5442 			got.br_startoff + got.br_blockcount - 1);
5443 		if (end < start)
5444 			break;
5445 		/*
5446 		 * Then deal with the (possibly delayed) allocated space
5447 		 * we found.
5448 		 */
5449 		del = got;
5450 		wasdel = isnullstartblock(del.br_startblock);
5451 
5452 		if (got.br_startoff < start) {
5453 			del.br_startoff = start;
5454 			del.br_blockcount -= start - got.br_startoff;
5455 			if (!wasdel)
5456 				del.br_startblock += start - got.br_startoff;
5457 		}
5458 		if (del.br_startoff + del.br_blockcount > end + 1)
5459 			del.br_blockcount = end + 1 - del.br_startoff;
5460 
5461 		if (!isrt)
5462 			goto delete;
5463 
5464 		mod = xfs_rtb_to_rtxoff(mp,
5465 				del.br_startblock + del.br_blockcount);
5466 		if (mod) {
5467 			/*
5468 			 * Realtime extent not lined up at the end.
5469 			 * The extent could have been split into written
5470 			 * and unwritten pieces, or we could just be
5471 			 * unmapping part of it.  But we can't really
5472 			 * get rid of part of a realtime extent.
5473 			 */
5474 			if (del.br_state == XFS_EXT_UNWRITTEN) {
5475 				/*
5476 				 * This piece is unwritten, or we're not
5477 				 * using unwritten extents.  Skip over it.
5478 				 */
5479 				ASSERT(end >= mod);
5480 				end -= mod > del.br_blockcount ?
5481 					del.br_blockcount : mod;
5482 				if (end < got.br_startoff &&
5483 				    !xfs_iext_prev_extent(ifp, &icur, &got)) {
5484 					done = true;
5485 					break;
5486 				}
5487 				continue;
5488 			}
5489 			/*
5490 			 * It's written, turn it unwritten.
5491 			 * This is better than zeroing it.
5492 			 */
5493 			ASSERT(del.br_state == XFS_EXT_NORM);
5494 			ASSERT(tp->t_blk_res > 0);
5495 			/*
5496 			 * If this spans a realtime extent boundary,
5497 			 * chop it back to the start of the one we end at.
5498 			 */
5499 			if (del.br_blockcount > mod) {
5500 				del.br_startoff += del.br_blockcount - mod;
5501 				del.br_startblock += del.br_blockcount - mod;
5502 				del.br_blockcount = mod;
5503 			}
5504 			del.br_state = XFS_EXT_UNWRITTEN;
5505 			error = xfs_bmap_add_extent_unwritten_real(tp, ip,
5506 					whichfork, &icur, &cur, &del,
5507 					&logflags);
5508 			if (error)
5509 				goto error0;
5510 			goto nodelete;
5511 		}
5512 
5513 		mod = xfs_rtb_to_rtxoff(mp, del.br_startblock);
5514 		if (mod) {
5515 			xfs_extlen_t off = mp->m_sb.sb_rextsize - mod;
5516 
5517 			/*
5518 			 * Realtime extent is lined up at the end but not
5519 			 * at the front.  We'll get rid of full extents if
5520 			 * we can.
5521 			 */
5522 			if (del.br_blockcount > off) {
5523 				del.br_blockcount -= off;
5524 				del.br_startoff += off;
5525 				del.br_startblock += off;
5526 			} else if (del.br_startoff == start &&
5527 				   (del.br_state == XFS_EXT_UNWRITTEN ||
5528 				    tp->t_blk_res == 0)) {
5529 				/*
5530 				 * Can't make it unwritten.  There isn't
5531 				 * a full extent here so just skip it.
5532 				 */
5533 				ASSERT(end >= del.br_blockcount);
5534 				end -= del.br_blockcount;
5535 				if (got.br_startoff > end &&
5536 				    !xfs_iext_prev_extent(ifp, &icur, &got)) {
5537 					done = true;
5538 					break;
5539 				}
5540 				continue;
5541 			} else if (del.br_state == XFS_EXT_UNWRITTEN) {
5542 				struct xfs_bmbt_irec	prev;
5543 				xfs_fileoff_t		unwrite_start;
5544 
5545 				/*
5546 				 * This one is already unwritten.
5547 				 * It must have a written left neighbor.
5548 				 * Unwrite the killed part of that one and
5549 				 * try again.
5550 				 */
5551 				if (!xfs_iext_prev_extent(ifp, &icur, &prev))
5552 					ASSERT(0);
5553 				ASSERT(prev.br_state == XFS_EXT_NORM);
5554 				ASSERT(!isnullstartblock(prev.br_startblock));
5555 				ASSERT(del.br_startblock ==
5556 				       prev.br_startblock + prev.br_blockcount);
5557 				unwrite_start = max3(start,
5558 						     del.br_startoff - mod,
5559 						     prev.br_startoff);
5560 				mod = unwrite_start - prev.br_startoff;
5561 				prev.br_startoff = unwrite_start;
5562 				prev.br_startblock += mod;
5563 				prev.br_blockcount -= mod;
5564 				prev.br_state = XFS_EXT_UNWRITTEN;
5565 				error = xfs_bmap_add_extent_unwritten_real(tp,
5566 						ip, whichfork, &icur, &cur,
5567 						&prev, &logflags);
5568 				if (error)
5569 					goto error0;
5570 				goto nodelete;
5571 			} else {
5572 				ASSERT(del.br_state == XFS_EXT_NORM);
5573 				del.br_state = XFS_EXT_UNWRITTEN;
5574 				error = xfs_bmap_add_extent_unwritten_real(tp,
5575 						ip, whichfork, &icur, &cur,
5576 						&del, &logflags);
5577 				if (error)
5578 					goto error0;
5579 				goto nodelete;
5580 			}
5581 		}
5582 
5583 delete:
5584 		if (wasdel) {
5585 			error = xfs_bmap_del_extent_delay(ip, whichfork, &icur,
5586 					&got, &del);
5587 		} else {
5588 			error = xfs_bmap_del_extent_real(ip, tp, &icur, cur,
5589 					&del, &tmp_logflags, whichfork,
5590 					flags);
5591 			logflags |= tmp_logflags;
5592 		}
5593 
5594 		if (error)
5595 			goto error0;
5596 
5597 		end = del.br_startoff - 1;
5598 nodelete:
5599 		/*
5600 		 * If not done go on to the next (previous) record.
5601 		 */
5602 		if (end != (xfs_fileoff_t)-1 && end >= start) {
5603 			if (!xfs_iext_get_extent(ifp, &icur, &got) ||
5604 			    (got.br_startoff > end &&
5605 			     !xfs_iext_prev_extent(ifp, &icur, &got))) {
5606 				done = true;
5607 				break;
5608 			}
5609 			extno++;
5610 		}
5611 	}
5612 	if (done || end == (xfs_fileoff_t)-1 || end < start)
5613 		*rlen = 0;
5614 	else
5615 		*rlen = end - start + 1;
5616 
5617 	/*
5618 	 * Convert to a btree if necessary.
5619 	 */
5620 	if (xfs_bmap_needs_btree(ip, whichfork)) {
5621 		ASSERT(cur == NULL);
5622 		error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0,
5623 				&tmp_logflags, whichfork);
5624 		logflags |= tmp_logflags;
5625 	} else {
5626 		error = xfs_bmap_btree_to_extents(tp, ip, cur, &logflags,
5627 			whichfork);
5628 	}
5629 
5630 error0:
5631 	/*
5632 	 * Log everything.  Do this after conversion, there's no point in
5633 	 * logging the extent records if we've converted to btree format.
5634 	 */
5635 	if ((logflags & xfs_ilog_fext(whichfork)) &&
5636 	    ifp->if_format != XFS_DINODE_FMT_EXTENTS)
5637 		logflags &= ~xfs_ilog_fext(whichfork);
5638 	else if ((logflags & xfs_ilog_fbroot(whichfork)) &&
5639 		 ifp->if_format != XFS_DINODE_FMT_BTREE)
5640 		logflags &= ~xfs_ilog_fbroot(whichfork);
5641 	/*
5642 	 * Log inode even in the error case, if the transaction
5643 	 * is dirty we'll need to shut down the filesystem.
5644 	 */
5645 	if (logflags)
5646 		xfs_trans_log_inode(tp, ip, logflags);
5647 	if (cur) {
5648 		if (!error)
5649 			cur->bc_bmap.allocated = 0;
5650 		xfs_btree_del_cursor(cur, error);
5651 	}
5652 	return error;
5653 }
5654 
5655 /* Unmap a range of a file. */
5656 int
5657 xfs_bunmapi(
5658 	xfs_trans_t		*tp,
5659 	struct xfs_inode	*ip,
5660 	xfs_fileoff_t		bno,
5661 	xfs_filblks_t		len,
5662 	uint32_t		flags,
5663 	xfs_extnum_t		nexts,
5664 	int			*done)
5665 {
5666 	int			error;
5667 
5668 	error = __xfs_bunmapi(tp, ip, bno, &len, flags, nexts);
5669 	*done = (len == 0);
5670 	return error;
5671 }
5672 
5673 /*
5674  * Determine whether an extent shift can be accomplished by a merge with the
5675  * extent that precedes the target hole of the shift.
5676  */
5677 STATIC bool
5678 xfs_bmse_can_merge(
5679 	struct xfs_bmbt_irec	*left,	/* preceding extent */
5680 	struct xfs_bmbt_irec	*got,	/* current extent to shift */
5681 	xfs_fileoff_t		shift)	/* shift fsb */
5682 {
5683 	xfs_fileoff_t		startoff;
5684 
5685 	startoff = got->br_startoff - shift;
5686 
5687 	/*
5688 	 * The extent, once shifted, must be adjacent in-file and on-disk with
5689 	 * the preceding extent.
5690 	 */
5691 	if ((left->br_startoff + left->br_blockcount != startoff) ||
5692 	    (left->br_startblock + left->br_blockcount != got->br_startblock) ||
5693 	    (left->br_state != got->br_state) ||
5694 	    (left->br_blockcount + got->br_blockcount > XFS_MAX_BMBT_EXTLEN))
5695 		return false;
5696 
5697 	return true;
5698 }
5699 
5700 /*
5701  * A bmap extent shift adjusts the file offset of an extent to fill a preceding
5702  * hole in the file. If an extent shift would result in the extent being fully
5703  * adjacent to the extent that currently precedes the hole, we can merge with
5704  * the preceding extent rather than do the shift.
5705  *
5706  * This function assumes the caller has verified a shift-by-merge is possible
5707  * with the provided extents via xfs_bmse_can_merge().
5708  */
5709 STATIC int
5710 xfs_bmse_merge(
5711 	struct xfs_trans		*tp,
5712 	struct xfs_inode		*ip,
5713 	int				whichfork,
5714 	xfs_fileoff_t			shift,		/* shift fsb */
5715 	struct xfs_iext_cursor		*icur,
5716 	struct xfs_bmbt_irec		*got,		/* extent to shift */
5717 	struct xfs_bmbt_irec		*left,		/* preceding extent */
5718 	struct xfs_btree_cur		*cur,
5719 	int				*logflags)	/* output */
5720 {
5721 	struct xfs_ifork		*ifp = xfs_ifork_ptr(ip, whichfork);
5722 	struct xfs_bmbt_irec		new;
5723 	xfs_filblks_t			blockcount;
5724 	int				error, i;
5725 	struct xfs_mount		*mp = ip->i_mount;
5726 
5727 	blockcount = left->br_blockcount + got->br_blockcount;
5728 
5729 	xfs_assert_ilocked(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL);
5730 	ASSERT(xfs_bmse_can_merge(left, got, shift));
5731 
5732 	new = *left;
5733 	new.br_blockcount = blockcount;
5734 
5735 	/*
5736 	 * Update the on-disk extent count, the btree if necessary and log the
5737 	 * inode.
5738 	 */
5739 	ifp->if_nextents--;
5740 	*logflags |= XFS_ILOG_CORE;
5741 	if (!cur) {
5742 		*logflags |= XFS_ILOG_DEXT;
5743 		goto done;
5744 	}
5745 
5746 	/* lookup and remove the extent to merge */
5747 	error = xfs_bmbt_lookup_eq(cur, got, &i);
5748 	if (error)
5749 		return error;
5750 	if (XFS_IS_CORRUPT(mp, i != 1)) {
5751 		xfs_btree_mark_sick(cur);
5752 		return -EFSCORRUPTED;
5753 	}
5754 
5755 	error = xfs_btree_delete(cur, &i);
5756 	if (error)
5757 		return error;
5758 	if (XFS_IS_CORRUPT(mp, i != 1)) {
5759 		xfs_btree_mark_sick(cur);
5760 		return -EFSCORRUPTED;
5761 	}
5762 
5763 	/* lookup and update size of the previous extent */
5764 	error = xfs_bmbt_lookup_eq(cur, left, &i);
5765 	if (error)
5766 		return error;
5767 	if (XFS_IS_CORRUPT(mp, i != 1)) {
5768 		xfs_btree_mark_sick(cur);
5769 		return -EFSCORRUPTED;
5770 	}
5771 
5772 	error = xfs_bmbt_update(cur, &new);
5773 	if (error)
5774 		return error;
5775 
5776 	/* change to extent format if required after extent removal */
5777 	error = xfs_bmap_btree_to_extents(tp, ip, cur, logflags, whichfork);
5778 	if (error)
5779 		return error;
5780 
5781 done:
5782 	xfs_iext_remove(ip, icur, 0);
5783 	xfs_iext_prev(ifp, icur);
5784 	xfs_iext_update_extent(ip, xfs_bmap_fork_to_state(whichfork), icur,
5785 			&new);
5786 
5787 	/* update reverse mapping. rmap functions merge the rmaps for us */
5788 	xfs_rmap_unmap_extent(tp, ip, whichfork, got);
5789 	memcpy(&new, got, sizeof(new));
5790 	new.br_startoff = left->br_startoff + left->br_blockcount;
5791 	xfs_rmap_map_extent(tp, ip, whichfork, &new);
5792 	return 0;
5793 }
5794 
5795 static int
5796 xfs_bmap_shift_update_extent(
5797 	struct xfs_trans	*tp,
5798 	struct xfs_inode	*ip,
5799 	int			whichfork,
5800 	struct xfs_iext_cursor	*icur,
5801 	struct xfs_bmbt_irec	*got,
5802 	struct xfs_btree_cur	*cur,
5803 	int			*logflags,
5804 	xfs_fileoff_t		startoff)
5805 {
5806 	struct xfs_mount	*mp = ip->i_mount;
5807 	struct xfs_bmbt_irec	prev = *got;
5808 	int			error, i;
5809 
5810 	*logflags |= XFS_ILOG_CORE;
5811 
5812 	got->br_startoff = startoff;
5813 
5814 	if (cur) {
5815 		error = xfs_bmbt_lookup_eq(cur, &prev, &i);
5816 		if (error)
5817 			return error;
5818 		if (XFS_IS_CORRUPT(mp, i != 1)) {
5819 			xfs_btree_mark_sick(cur);
5820 			return -EFSCORRUPTED;
5821 		}
5822 
5823 		error = xfs_bmbt_update(cur, got);
5824 		if (error)
5825 			return error;
5826 	} else {
5827 		*logflags |= XFS_ILOG_DEXT;
5828 	}
5829 
5830 	xfs_iext_update_extent(ip, xfs_bmap_fork_to_state(whichfork), icur,
5831 			got);
5832 
5833 	/* update reverse mapping */
5834 	xfs_rmap_unmap_extent(tp, ip, whichfork, &prev);
5835 	xfs_rmap_map_extent(tp, ip, whichfork, got);
5836 	return 0;
5837 }
5838 
5839 int
5840 xfs_bmap_collapse_extents(
5841 	struct xfs_trans	*tp,
5842 	struct xfs_inode	*ip,
5843 	xfs_fileoff_t		*next_fsb,
5844 	xfs_fileoff_t		offset_shift_fsb,
5845 	bool			*done)
5846 {
5847 	int			whichfork = XFS_DATA_FORK;
5848 	struct xfs_mount	*mp = ip->i_mount;
5849 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
5850 	struct xfs_btree_cur	*cur = NULL;
5851 	struct xfs_bmbt_irec	got, prev;
5852 	struct xfs_iext_cursor	icur;
5853 	xfs_fileoff_t		new_startoff;
5854 	int			error = 0;
5855 	int			logflags = 0;
5856 
5857 	if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)) ||
5858 	    XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) {
5859 		xfs_bmap_mark_sick(ip, whichfork);
5860 		return -EFSCORRUPTED;
5861 	}
5862 
5863 	if (xfs_is_shutdown(mp))
5864 		return -EIO;
5865 
5866 	xfs_assert_ilocked(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL);
5867 
5868 	error = xfs_iread_extents(tp, ip, whichfork);
5869 	if (error)
5870 		return error;
5871 
5872 	if (ifp->if_format == XFS_DINODE_FMT_BTREE)
5873 		cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
5874 
5875 	if (!xfs_iext_lookup_extent(ip, ifp, *next_fsb, &icur, &got)) {
5876 		*done = true;
5877 		goto del_cursor;
5878 	}
5879 	if (XFS_IS_CORRUPT(mp, isnullstartblock(got.br_startblock))) {
5880 		xfs_bmap_mark_sick(ip, whichfork);
5881 		error = -EFSCORRUPTED;
5882 		goto del_cursor;
5883 	}
5884 
5885 	new_startoff = got.br_startoff - offset_shift_fsb;
5886 	if (xfs_iext_peek_prev_extent(ifp, &icur, &prev)) {
5887 		if (new_startoff < prev.br_startoff + prev.br_blockcount) {
5888 			error = -EINVAL;
5889 			goto del_cursor;
5890 		}
5891 
5892 		if (xfs_bmse_can_merge(&prev, &got, offset_shift_fsb)) {
5893 			error = xfs_bmse_merge(tp, ip, whichfork,
5894 					offset_shift_fsb, &icur, &got, &prev,
5895 					cur, &logflags);
5896 			if (error)
5897 				goto del_cursor;
5898 			goto done;
5899 		}
5900 	} else {
5901 		if (got.br_startoff < offset_shift_fsb) {
5902 			error = -EINVAL;
5903 			goto del_cursor;
5904 		}
5905 	}
5906 
5907 	error = xfs_bmap_shift_update_extent(tp, ip, whichfork, &icur, &got,
5908 			cur, &logflags, new_startoff);
5909 	if (error)
5910 		goto del_cursor;
5911 
5912 done:
5913 	if (!xfs_iext_next_extent(ifp, &icur, &got)) {
5914 		*done = true;
5915 		goto del_cursor;
5916 	}
5917 
5918 	*next_fsb = got.br_startoff;
5919 del_cursor:
5920 	if (cur)
5921 		xfs_btree_del_cursor(cur, error);
5922 	if (logflags)
5923 		xfs_trans_log_inode(tp, ip, logflags);
5924 	return error;
5925 }
5926 
5927 /* Make sure we won't be right-shifting an extent past the maximum bound. */
5928 int
5929 xfs_bmap_can_insert_extents(
5930 	struct xfs_inode	*ip,
5931 	xfs_fileoff_t		off,
5932 	xfs_fileoff_t		shift)
5933 {
5934 	struct xfs_bmbt_irec	got;
5935 	int			is_empty;
5936 	int			error = 0;
5937 
5938 	xfs_assert_ilocked(ip, XFS_IOLOCK_EXCL);
5939 
5940 	if (xfs_is_shutdown(ip->i_mount))
5941 		return -EIO;
5942 
5943 	xfs_ilock(ip, XFS_ILOCK_EXCL);
5944 	error = xfs_bmap_last_extent(NULL, ip, XFS_DATA_FORK, &got, &is_empty);
5945 	if (!error && !is_empty && got.br_startoff >= off &&
5946 	    ((got.br_startoff + shift) & BMBT_STARTOFF_MASK) < got.br_startoff)
5947 		error = -EINVAL;
5948 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
5949 
5950 	return error;
5951 }
5952 
5953 int
5954 xfs_bmap_insert_extents(
5955 	struct xfs_trans	*tp,
5956 	struct xfs_inode	*ip,
5957 	xfs_fileoff_t		*next_fsb,
5958 	xfs_fileoff_t		offset_shift_fsb,
5959 	bool			*done,
5960 	xfs_fileoff_t		stop_fsb)
5961 {
5962 	int			whichfork = XFS_DATA_FORK;
5963 	struct xfs_mount	*mp = ip->i_mount;
5964 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
5965 	struct xfs_btree_cur	*cur = NULL;
5966 	struct xfs_bmbt_irec	got, next;
5967 	struct xfs_iext_cursor	icur;
5968 	xfs_fileoff_t		new_startoff;
5969 	int			error = 0;
5970 	int			logflags = 0;
5971 
5972 	if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)) ||
5973 	    XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) {
5974 		xfs_bmap_mark_sick(ip, whichfork);
5975 		return -EFSCORRUPTED;
5976 	}
5977 
5978 	if (xfs_is_shutdown(mp))
5979 		return -EIO;
5980 
5981 	xfs_assert_ilocked(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL);
5982 
5983 	error = xfs_iread_extents(tp, ip, whichfork);
5984 	if (error)
5985 		return error;
5986 
5987 	if (ifp->if_format == XFS_DINODE_FMT_BTREE)
5988 		cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
5989 
5990 	if (*next_fsb == NULLFSBLOCK) {
5991 		xfs_iext_last(ifp, &icur);
5992 		if (!xfs_iext_get_extent(ifp, &icur, &got) ||
5993 		    stop_fsb > got.br_startoff) {
5994 			*done = true;
5995 			goto del_cursor;
5996 		}
5997 	} else {
5998 		if (!xfs_iext_lookup_extent(ip, ifp, *next_fsb, &icur, &got)) {
5999 			*done = true;
6000 			goto del_cursor;
6001 		}
6002 	}
6003 	if (XFS_IS_CORRUPT(mp, isnullstartblock(got.br_startblock))) {
6004 		xfs_bmap_mark_sick(ip, whichfork);
6005 		error = -EFSCORRUPTED;
6006 		goto del_cursor;
6007 	}
6008 
6009 	if (XFS_IS_CORRUPT(mp, stop_fsb > got.br_startoff)) {
6010 		xfs_bmap_mark_sick(ip, whichfork);
6011 		error = -EFSCORRUPTED;
6012 		goto del_cursor;
6013 	}
6014 
6015 	new_startoff = got.br_startoff + offset_shift_fsb;
6016 	if (xfs_iext_peek_next_extent(ifp, &icur, &next)) {
6017 		if (new_startoff + got.br_blockcount > next.br_startoff) {
6018 			error = -EINVAL;
6019 			goto del_cursor;
6020 		}
6021 
6022 		/*
6023 		 * Unlike a left shift (which involves a hole punch), a right
6024 		 * shift does not modify extent neighbors in any way.  We should
6025 		 * never find mergeable extents in this scenario.  Check anyways
6026 		 * and warn if we encounter two extents that could be one.
6027 		 */
6028 		if (xfs_bmse_can_merge(&got, &next, offset_shift_fsb))
6029 			WARN_ON_ONCE(1);
6030 	}
6031 
6032 	error = xfs_bmap_shift_update_extent(tp, ip, whichfork, &icur, &got,
6033 			cur, &logflags, new_startoff);
6034 	if (error)
6035 		goto del_cursor;
6036 
6037 	if (!xfs_iext_prev_extent(ifp, &icur, &got) ||
6038 	    stop_fsb >= got.br_startoff + got.br_blockcount) {
6039 		*done = true;
6040 		goto del_cursor;
6041 	}
6042 
6043 	*next_fsb = got.br_startoff;
6044 del_cursor:
6045 	if (cur)
6046 		xfs_btree_del_cursor(cur, error);
6047 	if (logflags)
6048 		xfs_trans_log_inode(tp, ip, logflags);
6049 	return error;
6050 }
6051 
6052 /*
6053  * Splits an extent into two extents at split_fsb block such that it is the
6054  * first block of the current_ext. @ext is a target extent to be split.
6055  * @split_fsb is a block where the extents is split.  If split_fsb lies in a
6056  * hole or the first block of extents, just return 0.
6057  */
6058 int
6059 xfs_bmap_split_extent(
6060 	struct xfs_trans	*tp,
6061 	struct xfs_inode	*ip,
6062 	xfs_fileoff_t		split_fsb)
6063 {
6064 	int				whichfork = XFS_DATA_FORK;
6065 	struct xfs_ifork		*ifp = xfs_ifork_ptr(ip, whichfork);
6066 	struct xfs_btree_cur		*cur = NULL;
6067 	struct xfs_bmbt_irec		got;
6068 	struct xfs_bmbt_irec		new; /* split extent */
6069 	struct xfs_mount		*mp = ip->i_mount;
6070 	xfs_fsblock_t			gotblkcnt; /* new block count for got */
6071 	struct xfs_iext_cursor		icur;
6072 	int				error = 0;
6073 	int				logflags = 0;
6074 	int				i = 0;
6075 
6076 	if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)) ||
6077 	    XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) {
6078 		xfs_bmap_mark_sick(ip, whichfork);
6079 		return -EFSCORRUPTED;
6080 	}
6081 
6082 	if (xfs_is_shutdown(mp))
6083 		return -EIO;
6084 
6085 	/* Read in all the extents */
6086 	error = xfs_iread_extents(tp, ip, whichfork);
6087 	if (error)
6088 		return error;
6089 
6090 	/*
6091 	 * If there are not extents, or split_fsb lies in a hole we are done.
6092 	 */
6093 	if (!xfs_iext_lookup_extent(ip, ifp, split_fsb, &icur, &got) ||
6094 	    got.br_startoff >= split_fsb)
6095 		return 0;
6096 
6097 	gotblkcnt = split_fsb - got.br_startoff;
6098 	new.br_startoff = split_fsb;
6099 	new.br_startblock = got.br_startblock + gotblkcnt;
6100 	new.br_blockcount = got.br_blockcount - gotblkcnt;
6101 	new.br_state = got.br_state;
6102 
6103 	if (ifp->if_format == XFS_DINODE_FMT_BTREE) {
6104 		cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
6105 		error = xfs_bmbt_lookup_eq(cur, &got, &i);
6106 		if (error)
6107 			goto del_cursor;
6108 		if (XFS_IS_CORRUPT(mp, i != 1)) {
6109 			xfs_btree_mark_sick(cur);
6110 			error = -EFSCORRUPTED;
6111 			goto del_cursor;
6112 		}
6113 	}
6114 
6115 	got.br_blockcount = gotblkcnt;
6116 	xfs_iext_update_extent(ip, xfs_bmap_fork_to_state(whichfork), &icur,
6117 			&got);
6118 
6119 	logflags = XFS_ILOG_CORE;
6120 	if (cur) {
6121 		error = xfs_bmbt_update(cur, &got);
6122 		if (error)
6123 			goto del_cursor;
6124 	} else
6125 		logflags |= XFS_ILOG_DEXT;
6126 
6127 	/* Add new extent */
6128 	xfs_iext_next(ifp, &icur);
6129 	xfs_iext_insert(ip, &icur, &new, 0);
6130 	ifp->if_nextents++;
6131 
6132 	if (cur) {
6133 		error = xfs_bmbt_lookup_eq(cur, &new, &i);
6134 		if (error)
6135 			goto del_cursor;
6136 		if (XFS_IS_CORRUPT(mp, i != 0)) {
6137 			xfs_btree_mark_sick(cur);
6138 			error = -EFSCORRUPTED;
6139 			goto del_cursor;
6140 		}
6141 		error = xfs_btree_insert(cur, &i);
6142 		if (error)
6143 			goto del_cursor;
6144 		if (XFS_IS_CORRUPT(mp, i != 1)) {
6145 			xfs_btree_mark_sick(cur);
6146 			error = -EFSCORRUPTED;
6147 			goto del_cursor;
6148 		}
6149 	}
6150 
6151 	/*
6152 	 * Convert to a btree if necessary.
6153 	 */
6154 	if (xfs_bmap_needs_btree(ip, whichfork)) {
6155 		int tmp_logflags; /* partial log flag return val */
6156 
6157 		ASSERT(cur == NULL);
6158 		error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0,
6159 				&tmp_logflags, whichfork);
6160 		logflags |= tmp_logflags;
6161 	}
6162 
6163 del_cursor:
6164 	if (cur) {
6165 		cur->bc_bmap.allocated = 0;
6166 		xfs_btree_del_cursor(cur, error);
6167 	}
6168 
6169 	if (logflags)
6170 		xfs_trans_log_inode(tp, ip, logflags);
6171 	return error;
6172 }
6173 
6174 /* Deferred mapping is only for real extents in the data fork. */
6175 static bool
6176 xfs_bmap_is_update_needed(
6177 	struct xfs_bmbt_irec	*bmap)
6178 {
6179 	return  bmap->br_startblock != HOLESTARTBLOCK &&
6180 		bmap->br_startblock != DELAYSTARTBLOCK;
6181 }
6182 
6183 /* Record a bmap intent. */
6184 static int
6185 __xfs_bmap_add(
6186 	struct xfs_trans		*tp,
6187 	enum xfs_bmap_intent_type	type,
6188 	struct xfs_inode		*ip,
6189 	int				whichfork,
6190 	struct xfs_bmbt_irec		*bmap)
6191 {
6192 	struct xfs_bmap_intent		*bi;
6193 
6194 	trace_xfs_bmap_defer(tp->t_mountp,
6195 			XFS_FSB_TO_AGNO(tp->t_mountp, bmap->br_startblock),
6196 			type,
6197 			XFS_FSB_TO_AGBNO(tp->t_mountp, bmap->br_startblock),
6198 			ip->i_ino, whichfork,
6199 			bmap->br_startoff,
6200 			bmap->br_blockcount,
6201 			bmap->br_state);
6202 
6203 	bi = kmem_cache_alloc(xfs_bmap_intent_cache, GFP_KERNEL | __GFP_NOFAIL);
6204 	INIT_LIST_HEAD(&bi->bi_list);
6205 	bi->bi_type = type;
6206 	bi->bi_owner = ip;
6207 	bi->bi_whichfork = whichfork;
6208 	bi->bi_bmap = *bmap;
6209 
6210 	xfs_bmap_update_get_group(tp->t_mountp, bi);
6211 	xfs_defer_add(tp, &bi->bi_list, &xfs_bmap_update_defer_type);
6212 	return 0;
6213 }
6214 
6215 /* Map an extent into a file. */
6216 void
6217 xfs_bmap_map_extent(
6218 	struct xfs_trans	*tp,
6219 	struct xfs_inode	*ip,
6220 	struct xfs_bmbt_irec	*PREV)
6221 {
6222 	if (!xfs_bmap_is_update_needed(PREV))
6223 		return;
6224 
6225 	__xfs_bmap_add(tp, XFS_BMAP_MAP, ip, XFS_DATA_FORK, PREV);
6226 }
6227 
6228 /* Unmap an extent out of a file. */
6229 void
6230 xfs_bmap_unmap_extent(
6231 	struct xfs_trans	*tp,
6232 	struct xfs_inode	*ip,
6233 	struct xfs_bmbt_irec	*PREV)
6234 {
6235 	if (!xfs_bmap_is_update_needed(PREV))
6236 		return;
6237 
6238 	__xfs_bmap_add(tp, XFS_BMAP_UNMAP, ip, XFS_DATA_FORK, PREV);
6239 }
6240 
6241 /*
6242  * Process one of the deferred bmap operations.  We pass back the
6243  * btree cursor to maintain our lock on the bmapbt between calls.
6244  */
6245 int
6246 xfs_bmap_finish_one(
6247 	struct xfs_trans		*tp,
6248 	struct xfs_bmap_intent		*bi)
6249 {
6250 	struct xfs_bmbt_irec		*bmap = &bi->bi_bmap;
6251 	int				error = 0;
6252 
6253 	ASSERT(tp->t_highest_agno == NULLAGNUMBER);
6254 
6255 	trace_xfs_bmap_deferred(tp->t_mountp,
6256 			XFS_FSB_TO_AGNO(tp->t_mountp, bmap->br_startblock),
6257 			bi->bi_type,
6258 			XFS_FSB_TO_AGBNO(tp->t_mountp, bmap->br_startblock),
6259 			bi->bi_owner->i_ino, bi->bi_whichfork,
6260 			bmap->br_startoff, bmap->br_blockcount,
6261 			bmap->br_state);
6262 
6263 	if (WARN_ON_ONCE(bi->bi_whichfork != XFS_DATA_FORK)) {
6264 		xfs_bmap_mark_sick(bi->bi_owner, bi->bi_whichfork);
6265 		return -EFSCORRUPTED;
6266 	}
6267 
6268 	if (XFS_TEST_ERROR(false, tp->t_mountp,
6269 			XFS_ERRTAG_BMAP_FINISH_ONE))
6270 		return -EIO;
6271 
6272 	switch (bi->bi_type) {
6273 	case XFS_BMAP_MAP:
6274 		error = xfs_bmapi_remap(tp, bi->bi_owner, bmap->br_startoff,
6275 				bmap->br_blockcount, bmap->br_startblock, 0);
6276 		bmap->br_blockcount = 0;
6277 		break;
6278 	case XFS_BMAP_UNMAP:
6279 		error = __xfs_bunmapi(tp, bi->bi_owner, bmap->br_startoff,
6280 				&bmap->br_blockcount, XFS_BMAPI_REMAP, 1);
6281 		break;
6282 	default:
6283 		ASSERT(0);
6284 		xfs_bmap_mark_sick(bi->bi_owner, bi->bi_whichfork);
6285 		error = -EFSCORRUPTED;
6286 	}
6287 
6288 	return error;
6289 }
6290 
6291 /* Check that an extent does not have invalid flags or bad ranges. */
6292 xfs_failaddr_t
6293 xfs_bmap_validate_extent_raw(
6294 	struct xfs_mount	*mp,
6295 	bool			rtfile,
6296 	int			whichfork,
6297 	struct xfs_bmbt_irec	*irec)
6298 {
6299 	if (!xfs_verify_fileext(mp, irec->br_startoff, irec->br_blockcount))
6300 		return __this_address;
6301 
6302 	if (rtfile && whichfork == XFS_DATA_FORK) {
6303 		if (!xfs_verify_rtbext(mp, irec->br_startblock,
6304 					   irec->br_blockcount))
6305 			return __this_address;
6306 	} else {
6307 		if (!xfs_verify_fsbext(mp, irec->br_startblock,
6308 					   irec->br_blockcount))
6309 			return __this_address;
6310 	}
6311 	if (irec->br_state != XFS_EXT_NORM && whichfork != XFS_DATA_FORK)
6312 		return __this_address;
6313 	return NULL;
6314 }
6315 
6316 int __init
6317 xfs_bmap_intent_init_cache(void)
6318 {
6319 	xfs_bmap_intent_cache = kmem_cache_create("xfs_bmap_intent",
6320 			sizeof(struct xfs_bmap_intent),
6321 			0, 0, NULL);
6322 
6323 	return xfs_bmap_intent_cache != NULL ? 0 : -ENOMEM;
6324 }
6325 
6326 void
6327 xfs_bmap_intent_destroy_cache(void)
6328 {
6329 	kmem_cache_destroy(xfs_bmap_intent_cache);
6330 	xfs_bmap_intent_cache = NULL;
6331 }
6332 
6333 /* Check that an inode's extent does not have invalid flags or bad ranges. */
6334 xfs_failaddr_t
6335 xfs_bmap_validate_extent(
6336 	struct xfs_inode	*ip,
6337 	int			whichfork,
6338 	struct xfs_bmbt_irec	*irec)
6339 {
6340 	return xfs_bmap_validate_extent_raw(ip->i_mount,
6341 			XFS_IS_REALTIME_INODE(ip), whichfork, irec);
6342 }
6343 
6344 /*
6345  * Used in xfs_itruncate_extents().  This is the maximum number of extents
6346  * freed from a file in a single transaction.
6347  */
6348 #define	XFS_ITRUNC_MAX_EXTENTS	2
6349 
6350 /*
6351  * Unmap every extent in part of an inode's fork.  We don't do any higher level
6352  * invalidation work at all.
6353  */
6354 int
6355 xfs_bunmapi_range(
6356 	struct xfs_trans	**tpp,
6357 	struct xfs_inode	*ip,
6358 	uint32_t		flags,
6359 	xfs_fileoff_t		startoff,
6360 	xfs_fileoff_t		endoff)
6361 {
6362 	xfs_filblks_t		unmap_len = endoff - startoff + 1;
6363 	int			error = 0;
6364 
6365 	xfs_assert_ilocked(ip, XFS_ILOCK_EXCL);
6366 
6367 	while (unmap_len > 0) {
6368 		ASSERT((*tpp)->t_highest_agno == NULLAGNUMBER);
6369 		error = __xfs_bunmapi(*tpp, ip, startoff, &unmap_len, flags,
6370 				XFS_ITRUNC_MAX_EXTENTS);
6371 		if (error)
6372 			goto out;
6373 
6374 		/* free the just unmapped extents */
6375 		error = xfs_defer_finish(tpp);
6376 		if (error)
6377 			goto out;
6378 	}
6379 out:
6380 	return error;
6381 }
6382